gt
stringclasses
1 value
context
stringlengths
2.49k
119k
from data.COMMON import * #essentials Header( 0.001, #Script Version (for updates) ('Nintendo',['mdl0','brmdl']), #model activation ('',[]), #anim activation ['revolution'], #included libs ['NTDO_IMG']) #image handlers ''' UMC model import/export script written by Tcll5850 ''' def ImportModel(FT,Cmd): def MTX44(): return [[1.0,0.0,0.0,0.0],[0.0,1.0,0.0,0.0],[0.0,0.0,1.0,0.0],[0.0,0.0,0.0,1.0]] def Offset_Name(offset): p=jump(offset-1,label=' -- String_Data') Str=string(bu8( label=' -- String_Length')) jump(p); return Str def RList(offset): #Nobody has helped me figure this area out, #brbx (the only correctly-structured/working src) is too confusing for me D: #Kryal hasn't returned since I asked #BJ isn't much help (I understand his area though.) #Toomai doesn't understand it that well (last I heard from him) #Bero disappeared <_< (he was on once lately, but not too lately as of now) #anyone else I talk to either has little knowledge, or doesn't care about it. >_< #I won't be able to build a working exporter until this area is figured out #at least importing can easily be loosely formatted for this... :P jump(offset, label=' -- Resource_Group') group_len = bu32( label=' -- Resource_Group_Length') offset_cnt = bu32(label=' -- Resource_Offset_Count') skip(16, label=' -- Resource_Offset -1 (old method)') #'FFFF 0000 000# 000# 00000000 00000000 LABEL('\n -- Resource_Offsets [Group_ID,padding,Prev_ID,Next_ID,String_Offset,Data_Offset]:') return StructArr(['bs16','bu16','bu16','bu16','bu32','bu32'],offset_cnt) def Bounds(Type): #MinX,Y(,Z),MaxX,Y(,Z) LABEL('\n -- Bounds:') return [bf32(),bf32(),bf32(),bf32()]+([bf32(),bf32()] if Type==3 else []) #global header: magic = string(4, label=' -- MDL0_Magic') DataLen = bu32( label=' -- Data_Length') version = bu32( label=' -- Version') brres_hdr = bs32( label=' -- Brres_Header') #data sections Definits = (bu32(label=' -- Definitions')if version>7 else 0) Bones = (bu32(label=' -- Bones') if version>7 else 0) Vertices = (bu32(label=' -- Vertices') if version>7 else 0) Normals = (bu32(label=' -- Normals') if version>7 else 0) Colors = (bu32(label=' -- Colors') if version>7 else 0) UVs = (bu32(label=' -- UVs') if version>7 else 0) FVectors = (bu32(label=' -- Fur_Vectors')if version>9 else 0) FLayers = (bu32(label=' -- Fur_Layers') if version>9 else 0) Materials= (bu32(label=' -- Materials') if version>7 else 0) Nodes = (bu32(label=' -- Nodes') if version>7 else 0) Objects = (bu32(label=' -- Objects') if version>7 else 0) Textures = (bu32(label=' -- Textures') if version>7 else 0) Pallets = (bu32(label=' -- Pallets') if version>7 else 0) Unk3 = (bu32(label=' -- Unknown3') if version>10 else 0) #local header: MDL0_Name = Offset_Name(bu32(label=' -- String_Offset')) header_len = bu32( label=' -- Header_Length') MDL0_offset = bs32( label=' -- MDL0_Header_Offset') header_unk1 = bu32( label=' -- Unknown') header_unk2 = bu32( label=' -- Unknown') num_verticies = bu32( label=' -- Vert_Count') num_faces = bu32( label=' -- Face_Count') header_unk3 = bu32( label=' -- Unknown') Def_Count = bu32( label=' -- Def_Count') unk_flags = bu32( label=' -- Unknown_Flags') header_unk5 = bu16( label=' -- Unknown') Data_Offset = bu16( label=' -- Data_Offset') boundbox = Bounds(3) if bool(Unk3): LABEL('''\n\nThis MDL0 uses an unknown resource. please contact me at Tcll5850@gmail.com and be sure to send me the MDL0 so I may add support for this data.\n'''.lstrip()) #Collect the needed data for the objects global _Bones LABEL('\n -- Links:') _Links = bs32(['']*bu32(label=' -- Link_Count')) #Definition Links _Definitions={'NodeTree':[],'NodeMix':{},'DrawOpa':{},'DrawXlu':{} } _Bones=[] _Vertices=[] _Normals=[] _Colors=[] _UVs=[] _materials={} #{OID:Mat_Name} _textures=[] if bool(Definits): for I,pad,P,N,S,D in RList(Definits): DN = Offset_Name(Definits+S) jump(Definits+D,label=' -- Definitions') while True: switch(bu8(label=' -- Type ( ')) if case(1): LABEL('End_Marker )'); break elif case(2): LABEL('Bone_Mapping )') _Definitions[DN]+=[ [bu16(label=' -- Bone_ID'),_Links[bu16(label=' -- Parent_Link_Index')]] ] #[BoneID,ParentID] elif case(3): #weights: { ID:[[BoneID, Value], []] }: LABEL('Bone_Weighting )') LI=bu16(label=' -- Link_Index') WL=[[_Links[bu16(label=' -- Weight_Link_Index')],bf32(label=' -- Weight_Value')] for i in range(bu8(label=' -- Weight_count'))] _Definitions[DN][LI] = WL elif case(4): LABEL('Material )') MA,OB,BN,UNK = bu16(label=' -- Material_ID'),bu16(label=' -- Object_ID'),bu16(label=' -- Bone_ID'),bu8(label=' -- Unknown') _Definitions[DN][MA] = OB elif case(5): LABEL('Link_Indexing )') LI=bu16(label=' -- Link_Index') WL=[[bu16(label=' -- Bone_ID'),1.0]] _Definitions[DN][LI] = WL if bool(Bones): def TransformMatrix( translate, rotate, scale ): degrad = pi/180 cosx = cos(rotate[0] * degrad) sinx = sin(rotate[0] * degrad) cosy = cos(rotate[1] * degrad) siny = sin(rotate[1] * degrad) cosz = cos(rotate[2] * degrad) sinz = sin(rotate[2] * degrad) return [ [ scale[0] * cosy * cosz, scale[1] * (sinx * cosz * siny - cosx * sinz), scale[2] * (sinx * sinz + cosx * cosz * siny), translate[0]], [ scale[0] * sinz * cosy, scale[1] * (sinx * sinz * siny + cosz * cosx), scale[2] * (cosx * sinz * siny - sinx * cosz), translate[1]], [ -scale[0] * siny, scale[1] * sinx * cosy, scale[2] * cosx * cosy, translate[2]], [0.0,0.0,0.0,1.0]] def Loc(Mtx): return [Mtx[0][3],Mtx[1][3],Mtx[2][3]] def Rot(Mtx): degrad = pi/180 y = asin(Mtx[1][0]) if (pi/2) - abs(y) < 0.0001: z = 0.0 if y>0: x = atan2(Mtx[0][1],Mtx[0][2]) else: x = atan2(Mtx[0][1],-Mtx[0][2]) else: c = cos(y) x = atan2(Mtx[1][2]/c,Mtx[2][2]/c) z = atan2(Mtx[0][0]/c,Mtx[0][1]/c) if pi - abs(z) < 0.05: y = pi-y c = cos(y) x = atan2(Mtx[1][2]/c,Mtx[2][2]/c) z = atan2(Mtx[0][0]/c,Mtx[0][1]/c) return [x*degrad,y*degrad,z*degrad] for I,pad,P,N,S,D in RList(Bones): jump(Bones+D,label=' -- Bone') #boneheader block_size = bu32( label=' -- Data_Size') MDL0_header = bs32( label=' -- MDL0_Header_Offset') Bone_Name = Offset_Name(Bones+D+bu32(label=' -- String_Offset')) Bone_ID = bu32( label=' -- Bone_ID') Link_Index = bu32( label=' -- Link_Index') _UNUSED,_FLAG15,_FLAG14,_FLAG13,_FLAG12,_FLAG11,_FLAG10,_HasGeometry,_FLAG8,_FLAG7,_HasChildren,_FLAG5,_FLAG4,_FixedScale,_FixedRotation,_FixedTranslation,_NoTransform=Field(['16',1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1], bu32(label=' -- Bone_Flags')) skip(8) #padding LABEL('\n -- Loc:'); LX,LY,LZ=bf32(['','','']) LABEL('\n -- Rot:'); RX,RY,RZ=bf32(['','','']) LABEL('\n -- Sca:'); SX,SY,SZ=bf32(['','','']) #LRS=[LX,LY,LZ,RX,RY,RZ,SX,SY,SZ] _Bounds = Bounds(3) _parent = bs32( label=' -- Parent offset (ignored)') _child = bs32( label=' -- Child offset (ignored)') _next = bs32( label=' -- Next offset (ignored)') _prev = bs32( label=' -- Prev offset (ignored)') P2 = bs32( label=' -- Part 2 offset') TransMtx=TransformMatrix( [LX,LY,LZ], [RX,RY,RZ], [SX,SY,SZ] ) FrameMtx=MTX44() if Bone_ID>0: for BID,PID in _Definitions['NodeTree']: if BID==Bone_ID: FrameMtx=MtxMultiply(TransMtx,_Bones[PID][5]) break BindMtx=Matrix(3,4,'bf32')+[[0,0,0,1]] InvBindMtx=Matrix(3,4,'bf32')+[[0,0,0,1]] LRS=[BindMtx[0][3],BindMtx[1][3],BindMtx[2][3],RX,RY,RZ,SX,SY,SZ] _Bones+=[[Bone_Name,LRS,TransMtx,BindMtx,InvBindMtx,FrameMtx]] #create the rig object and link the bones to it SetObject(MDL0_Name+'_Rig',24) for BID,PID in _Definitions['NodeTree']: BN,BLRS,BTMTX,BBMTX,BIMTX,BFMTX=_Bones[BID] PN,PLRS,PTMTX,PBMTX,PIMTX,PFMTX=_Bones[PID] SetBone(BN,0,BLRS,BBMTX,PN) def Vector(cnt, dmt, CT, ev): L = StructArr([['bu8','bs8','bu16','bs16','bf32'][CT]]*dmt,cnt) return [[D/pow(2.0,evaluator) for D in V] for V in L] if CT<4 else L if bool(Vertices): for I,pad,P,N,S,D in RList(Vertices): jump(Vertices+D,label=' -- Vertices') #vectorheader block_size = bu32( label=' -- Data_Size') MDL0_header = bs32( label=' -- MDL0_Header_Offset') data_offset = bu32( label=' -- Data_Offset') #usu 32 or 64(if bounds) Mesh_Name = Offset_Name(Vertices+D+bu32(label=' -- String_Offset')) VertID = bu32( label=' -- Vert_ID') XYZ = bu32( label=' -- is_XYZ') _GXCompType = bu32( label=' -- Component_Type') evaluator = bu8( label=' -- Evaluator') stride = bu8( label=' -- Vert_Stride') vector_count = bu16(label=' -- Vert_Count') Bounds(2+XYZ) j=Jump(Vertices+D+data_offset, label=' -- Vector_Data') _Vertices+=[Vector(vector_count, 2+XYZ, _GXCompType, evaluator)] #^lousy method (ID's discarded) used until I figure out the resource group enumeration. if bool(Normals): for I,pad,P,N,S,D in RList(Normals): jump(Normals+D,label=' -- Normals') #vectorheader block_size = bu32( label=' -- Data_Size') MDL0_header = bs32( label=' -- MDL0_Header_Offset') data_offset = bu32( label=' -- Data_Offset') #usu 32 or 64(if bounds) Mesh_Name = Offset_Name(Normals+D+bu32(label=' -- String_Offset')) NormID = bu32( label=' -- Normal_ID') NBT = bu32( label=' -- is_NBT') _GXCompType = bu32( label=' -- Component_Type') evaluator = bu8( label=' -- Evaluator') stride = bu8( label=' -- Normal_Stride') vector_count = bu16(label=' -- Normal_Count') j=Jump(Normals+D+data_offset, label=' -- Vector_Data') _Normals+=[Vector(vector_count, 3, _GXCompType, evaluator)] #^lousy method (ID's discarded) used until I figure out the resource group enumeration. if bool(UVs): for I,pad,P,N,S,D in RList(UVs): jump(UVs+D,label=' -- UVs') #vectorheader block_size = bu32( label=' -- Data_Size') MDL0_header = bs32( label=' -- MDL0_Header_Offset') data_offset = bu32( label=' -- Data_Offset') #usu 32 or 64(if bounds) Mesh_Name = Offset_Name(UVs+D+bu32(label=' -- String_Offset')) UVID = bu32( label=' -- UV_ID') ST = bu32( label=' -- is_ST') _GXCompType = bu32( label=' -- Component_Type') evaluator = bu8( label=' -- Evaluator') stride = bu8( label=' -- UV_Stride') vector_count = bu16(label=' -- UV_Count') j=Jump(UVs+D+data_offset, label=' -- Vector_Data') _UVs+=[Vector(vector_count, 2, _GXCompType, evaluator)] #^lousy method (ID's discarded) used until I figure out the resource group enumeration. def color(cnt, fmt): RGB565,RGB8,RGBX8,RGBA4,RGBA6,RGBA8 = range(6) data=StructArr([ #_GXCompType (Colors): 'bu16', #RGB565= 0, ['bu8','bu8','bu8'], #RGB8 = 1, ['bu8','bu8','bu8','bu8'], #RGBX8 = 2, #X is disreguarded ['bu8','bu8'], #RGBA4 = 3, 'bu24', #RGBA6 = 4, ['bu8','bu8','bu8','bu8'] #RGBA8 = 5 ][fmt],cnt) switch(fmt) if case(RGB565): return [[int(((D>>11)&31)*(255/31.)),int(((D>>5)&63)*(255/63.)),int((D&31)*(255/31.))] for D in data] #[R,G,B] elif case(RGB8): return data #[R,G,B] elif case(RGBX8): return [D[0:3] for D in data] #[R,G,B] elif case(RGBA4): return [[(RG>>4*16)+RG>>4,(RG&15*16)+RG&15,(BA>>4*16)+BA>>4,(BA&15*16)+BA&15] for RG,BA in data] #[R,G,B,A] elif case(RGBA6): return [(D>>18)*(255/63),((D>>12)&63)*(255/63),((D>>6)&63)*(255/63),(D&63)*(255/63)] #[R,G,B,A] elif case(RGBA8): return data #[R,G,B,A] if bool(Colors): for I,pad,P,N,S,D in RList(Colors): jump(Colors+D,label=' -- Colors') #vectorheader block_size = bu32( label=' -- Data_Size') MDL0_header = bs32( label=' -- MDL0_Header_Offset') data_offset = bu32( label=' -- Data_Offset') Col_Name = Offset_Name(Colors+D+bu32(label=' -- String_Offset')) ColID = bu32( label=' -- Color_ID') RGBA = bu32( label=' -- is_RGBA') _GXCompType = bu32( label=' -- Component_Type') stride = bu8( label=' -- Color_Stride') scale = bu8( label=' -- Scale') color_count = bu16( label=' -- Color_Count') j=Jump(Colors+D+data_offset, label=' -- Color_Data') _Colors+=[color(color_count, _GXCompType)] #^lousy method (ID's discarded) used until I figure out the resource group enumeration. if bool(Materials): for I,pad,P,N,S,D in RList(Materials): jump(Materials+D,label=' -- Material') Block_Size=bu32( label=' -- Data_Size') MDL0_header=bs32( label=' -- MDL0_Header_Offset') Material_Name=Offset_Name(Materials+D+bu32(label=' -- String_Offset')) ID=bu32( label=' -- Material_ID') XLU=bu32( label=' -- is_XLU') TexGens = bu8( label=' -- TexGens') Light_Channels = bu8( label=' -- Light_Channels') AStages = bu8( label=' -- Activ_TEV_Stages') Indirect_Textures = bu8(label=' -- Indirect_Textures') Cull=bu32( label=' -- Cull_Mode') Alpha_Testing = bu8( label=' -- Alpha_Testing') Light_Set = bu8( label=' -- Light_Set') Fog_Set = bu8( label=' -- Fog_Set') M_unk2 = bu8( label=' -- Unknown') M_unk3 = bu32( label=' -- Unknown') M_unk4 = bu32( label=' -- Unknown') TEV=bu32( label=' -- TEV_Offset') texture_count=bu32( label=' -- Texture_Layer_Count') Layers=bu32( label=' -- Texture_Layers_Offset') P2=bu32( label=' -- Part_2_Offset') Setup=bu32( label=' -- Material_Settings_Offset') P3=bu32( label=' -- Part_3_Offset') SetMaterial(Material_Name) try: OID = _Definitions['DrawXlu' if XLU else 'DrawOpa'][ID] _materials[OID] = Material_Name except: pass #not all materials are used L = Materials+D+Layers jump(L,label=' -- Texture Layers') for i in range(texture_count): Texture_Name=Offset_Name(L+(52*i)+bu32(label=' -- String_Offset')) pad = bu32( label=' -- pad') unk1 = bu32( label=' -- Unknown') unk2 = bu32( label=' -- Unknown') id1 = bu32( label=' -- index1') id2 = bu32( label=' -- index2') UWrap_Mode = bu32( label=' -- UWrap_Mode') VWrap_Mode = bu32( label=' -- VWrap_Mode') Min_Filter = bu32( label=' -- Min_Filter') Mag_Filter = bu32( label=' -- Mag_Filter') LOD_Bias = bf32( label=' -- LOD_Bias') Max_Antisotropy = bu32( label=' -- Max_Antisotropy') Clamp_Bias = bu8( label=' -- Clamp_Bias') Interpolation = bu8( label=' -- Interpolation') unk3 = bu16( label=' -- Unknown') SetTexture(Texture_Name) if Texture_Name not in _textures: SetImage(Texture_Name, 0,0, '%s.tex0'%Texture_Name) #SwitchFile() #because this isn't working in SetImage() for some reason >_> _textures+=[Texture_Name] #Process everything when we get here if bool(Objects): def getCPList(lo,hi): # CP register (old code (currently faster than using Field()) return [ (lo & 1), ((lo >> 1) & 1), ((lo >> 2) & 1), ((lo >> 3) & 1), ((lo >> 4) & 1), ((lo >> 5) & 1), ((lo >> 6) & 1), ((lo >> 7) & 1), ((lo >> 8) & 1), ((lo >> 9) & 3), ((lo >> 11) & 3), ((lo >> 13) & 3), ((lo >> 15) & 3), (hi & 3), ((hi >> 2) & 3), ((hi >> 4) & 3), ((hi >> 6) & 3), ((hi >> 8) & 3), ((hi >> 10) & 3), ((hi >> 12) & 3), ((hi >> 14) & 3), ((hi >> 16) & 3), ((hi >> 18) & 3), ((hi >> 20) & 3), ((hi >> 22) & 3), ((hi >> 24) & 3), ((hi >> 26) & 3), ((hi >> 28) & 3)] def CPT(V): #face-point index/value formats switch(V) if case(0): return '',0 #Null if case(1): return None,0 #Direct Data if case(2): return bu8(),1 #8bit index if case(3): return bu16(),2 #16bit index def MTXMultVec(V,M): return [(M[0][0]*V[0]) + (M[0][1]*V[1]) + (M[0][2]*V[2]) + M[0][3], (M[1][0]*V[0]) + (M[1][1]*V[1]) + (M[1][2]*V[2]) + M[1][3], (M[2][0]*V[0]) + (M[2][1]*V[1]) + (M[2][2]*V[2]) + M[2][3]] def Transform(vert,mtcs): global _Bones inflmtx = [[0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0]] Wghts=[] #---restructured using the src for BrawlBox: if len(mtcs)>1: for BID,W in mtcs: BN,BLRS,BTMTX,BBMTX,BIMTX,BFMTX=_Bones[BID] tempmtx = MtxMultiply(BBMTX,BIMTX) for r in range(4): for c in range(4): inflmtx[r][c]+=tempmtx[r][c]*W Wghts+=[[BN,W]] elif len(mtcs)==1: BID,W = mtcs[0] #W is always 1.0 (statically defaulted) BN,BLRS,BTMTX,BBMTX,BIMTX,BFMTX=_Bones[BID] inflmtx=BBMTX Wghts+=[[BN,W]] return MTXMultVec(vert,inflmtx),Wghts #return weights for UMC's functions def _Rotate(V,M): return [(M[0][0]*V[0]) + (M[0][1]*V[1]) + (M[0][2]*V[2]), (M[1][0]*V[0]) + (M[1][1]*V[1]) + (M[1][2]*V[2]), (M[2][0]*V[0]) + (M[2][1]*V[1]) + (M[2][2]*V[2])] def NTransform(normal,mtcs): global _Bones inflmtx = [[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0]] Wghts=[] if len(mtcs)>1: for BID,W in mtcs: BN,BLRS,BTMTX,BBMTX,BIMTX,BFMTX=_Bones[BID] tempmtx = MtxMultiply(BBMTX,BIMTX) for r in range(3): for c in range(3): inflmtx[r][c]+=tempmtx[r][c]*W elif len(mtcs)==1: BID,W = mtcs[0] #W is always 1.0 (statically defaulted) BN,BLRS,BTMTX,BBMTX,BIMTX,BFMTX=_Bones[BID] inflmtx=[BBMTX[0][:3],BBMTX[1][:3],BBMTX[2][:3]] return _Rotate(normal,inflmtx) XF_VtxMtxCache=[None]*16 XF_NrmMtxCache=[None]*16 for I,pad,P,N,S,D in RList(Objects): jump(Objects+D,label=' -- Object') Block_Size=bu32( label=' -- Data_Size') MDL0_header=bs32( label=' -- MDL0_Header_Offset') Link=bs32( label=' -- Link_ID/Def_Table') CPL=getCPList( bu32(label=' -- CP_Lo'), bu32(label=' -- CP_Hi') ) #TODO: use attribute definitions INVTXSPEC=bu32( label=' -- XF_Specs') Attribute_size=bu32( label=' -- Attribute_Size') Attribute_flags=bu32( label=' -- Attribute_Flags') #0x00000080 or 0x000000A0 #^I honestly don't think these have anything to do with the arrributes Attributes_offset=bu32( label=' -- Attributes_Offset')+34+Objects+D Buffer_Size=bu32( label=' -- Data_Buffer_Size') Data_Size=bu32( label=' -- Data_Size') Primitives_Offset=bu32(label=' -- Data_Offset')+36+Objects+D Elm_Flags=bu32( label=' -- Element_Flags') #unused (CPL does this automatically) :P unk3=bu32( label=' -- Unknown') #usually 0 Object_Name=Offset_Name(Objects+D+bu32(label=' -- String_Offset')) ID=bu32( label=' -- Object_ID') num_verts=bu32( label=' -- Vert_Count') num_faces=bu32( label=' -- Face_Count') LABEL('\n -- Vert_Input:'); vertex_input=bs16() LABEL('\n -- Normal_Input:'); normal_input=bs16() LABEL('\n -- Color_Inputs:'); color_inputs=[bs16(),bs16()] LABEL('\n -- UV_Inputs:'); UV_inputs=[bs16(),bs16(),bs16(),bs16(),bs16(),bs16(),bs16(),bs16()] unknown_inputs=[] if version>9: LABEL('\n -- Unknown_Inputs:'); unknown_inputs=StructArr('bs16',2) Def_Tbl=bu32( label=' -- Definition_Table_Offset')+Objects+D if Link==-1: #use def table jump(Def_Tbl, label=' -- Definition_Table') Defs=StructArr('bu16',bu32(label=' -- Definition_Count')) # ^ I'll be working on this area later... # tbh though, it isn't really needed >_> #jump(Attributes_offset,) #soon :/ SetObject(Object_Name, ParentName=(MDL0_Name+'_Rig' if bool(len(_Definitions['NodeTree'])) else '')) try: SetMaterial(_materials[ID]) except: pass #just in case this object has no material Jump(Primitives_Offset,label=' -- Primitives:') pos = 0 while pos<Buffer_Size: Switch(bh8()) #primitive ID pos+=1 t='' #0x0002 B 024 if Case('20'): #vert matrix LABEL(' -- Primitive Type ( XF_Vert_Matrix )') Link,Len,XFAddr = Field(['16','4','12'],bu32()); pos+=4 XF_VtxMtxCache[XFAddr/12]=_Definitions['NodeMix'][Link] #[ [BoneID, Weight], [] ] # use the bone ID to get the bone's name and bind matrix. elif Case('28'): #normal matrix (3x3) LABEL(' -- Primitive Type ( XF_Normal_Matrix )') Link,Len,XFAddr = Field(['16','4','12'],bu32()); pos+=4 ##XF_NrmMtxCache[XFAddr/12]=_Definitions['NodeMix'][Link] #[ [BoneID, Weight], [] ] #if( ( readNum( f, 2, 2, true ) - 0xb000 ) / 0x0c > 7 ) elif Case('30'): #UV matrix LABEL(' -- Primitive Type ( XF_UV_Matrix )') Val,Len,Adr=Field(['16','4','12'],bu32()); pos+=4 elif Case('38'): #light matrix LABEL(' -- Primitive Type ( XF_Light_Matrix )') Val,Len,Adr=Field(['16','4','12'],bu32()); pos+=4 elif Case('78'): t,n = UMC_POLYGON, 'Polygon' #possible support based on pattern (hasn't actually been seen) elif Case('80'): t,n = UMC_QUADS, 'Quads' elif Case('88'): t,n = UMC_QUADSTRIP, 'QuadStrip' #possible support based on pattern (hasn't actually been seen) elif Case('90'): t,n = UMC_TRIANGLES, 'Triangles' elif Case('98'): t,n = UMC_TRIANGLESTRIP,'TriangleStrip' elif Case('A0'): t,n = UMC_TRIANGLEFAN, 'TriangleFan' elif Case('A8'): t,n = UMC_LINES, 'Lines' elif Case('B0'): t,n = UMC_LINESTRIP, 'LineStrip' elif Case('B8'): t,n = UMC_POINTS, 'Points' if t!='': LABEL(' -- Primitive Type ( '+n+' )') SetPrimitive(t) pos+=2 vmtcs=[] if Link==-1 else [[_Links[Link],1.0]] nmtcs=[] if Link==-1 else [[_Links[Link],1.0]] for v in range(bu16(label=' -- Facepoint_Count')): V,N,C,U = '','',['',''],[''] Weights=[] for I,CPV in enumerate(CPL): D,L=CPT(CPV); pos+=L #not sure if anything other than matrix indecies can be direct data #(there doesn't seem to be a defined relation to specific formatting values) >_> switch(I) if case( 0): #vert/nor_mtx if CPV==1: i=bu8(label=' -- Vert/Normal_Mtx value')/3; pos+=1 vmtcs=XF_VtxMtxCache[i]; nmtcs=XF_NrmMtxCache[i] elif case( 1): #uv[0]_mtx (unknown processing) if CPV==1: bu8(label=' -- UV[0]_Mtx value')/3; pos+=1 elif case( 2): #uv[1]_mtx if CPV==1: bu8(label=' -- UV[1]_Mtx value')/3; pos+=1 elif case( 3): #uv[2]_mtx if CPV==1: bu8(label=' -- UV[2]_Mtx value')/3; pos+=1 elif case( 4): #uv[3]_mtx if CPV==1: bu8(label=' -- UV[3]_Mtx value')/3; pos+=1 elif case( 5): #uv[4]_mtx if CPV==1: bu8(label=' -- UV[4]_Mtx value')/3; pos+=1 elif case( 6): #uv[5]_mtx if CPV==1: bu8(label=' -- UV[5]_Mtx value')/3; pos+=1 elif case( 7): #uv[6]_mtx if CPV==1: bu8(label=' -- UV[6]_Mtx value')/3; pos+=1 elif case( 8): #uv[7]_mtx if CPV==1: bu8(label=' -- UV[7]_Mtx value')/3; pos+=1 #I'm aware of 'dmt', 'CT', and 'ev' not being defined for direct data: ( where do I define them?? ) #I've only coded the basis for direct data support just in case it can be made avaliable. elif case( 9): #vert if CPV==1: V=('' if I=='' else Vector(1, dmt, CT, ev)[0]); LABEL(' -- Vert value') elif CPV>1: LABEL(' -- Vert Index'); V,Weights=Transform(_Vertices[vertex_input][D],vmtcs) elif case(10): #normal if CPV==1: V=('' if I=='' else Vector(1, dmt, CT, ev)[0]); LABEL(' -- Normal value') elif CPV>1: LABEL(' -- Normal Index'); N=NTransform(_Normals[normal_input][D],vmtcs) ##NTransform(_Normals[normal_input][D],nmtcs) elif case(11): #color[0] if CPV==1: C[0]=('' if I=='' else color(1,CT)[0]); LABEL(' -- Color[0] value') elif CPV>1: LABEL(' -- Color[0] Index'); C[0]=_Colors[color_inputs[0]][D] elif case(12): #color[1] if CPV==1: C[1]=('' if I=='' else color(1,CT)[0]); LABEL(' -- Color[1] value') elif CPV>1: LABEL(' -- Color[1] Index'); C[1]=_Colors[color_inputs[1]][D] elif case(13): #uv[0] if CPV==1: U[0]=('' if I=='' else Vector(1, dmt, CT, ev)[0]); LABEL(' -- UV[0] value') elif CPV>1: LABEL(' -- UV[0] Index'); U[0]=_UVs[UV_inputs[0]][D] elif case(14): #uv[1] if CPV==1: U[1]=('' if I=='' else Vector(1, dmt, CT, ev)[0]); LABEL(' -- UV[1] value') elif CPV>1: LABEL(' -- UV[1] Index'); U[1]=_UVs[UV_inputs[1]][D] elif case(15): #uv[2] if CPV==1: U[2]=('' if I=='' else Vector(1, dmt, CT, ev)[0]); LABEL(' -- UV[2] value') elif CPV>1: LABEL(' -- UV[2] Index'); U[2]=_UVs[UV_inputs[2]][D] elif case(16): #uv[3] if CPV==1: U[3]=('' if I=='' else Vector(1, dmt, CT, ev)[0]); LABEL(' -- UV[3] value') elif CPV>1: LABEL(' -- UV[3] Index'); U[3]=_UVs[UV_inputs[3]][D] elif case(17): #uv[4] if CPV==1: U[4]=('' if I=='' else Vector(1, dmt, CT, ev)[0]); LABEL(' -- UV[4] value') elif CPV>1: LABEL(' -- UV[4] Index'); U[4]=_UVs[UV_inputs[4]][D] elif case(18): #uv[5] if CPV==1: U[5]=('' if I=='' else Vector(1, dmt, CT, ev)[0]); LABEL(' -- UV[5] value') elif CPV>1: LABEL(' -- UV[5] Index'); U[5]=_UVs[UV_inputs[5]][D] elif case(19): #uv[6] if CPV==1: U[6]=('' if I=='' else Vector(1, dmt, CT, ev)[0]); LABEL(' -- UV[6] value') elif CPV>1: LABEL(' -- UV[6] Index'); U[6]=_UVs[UV_inputs[6]][D] elif case(20): #uv[7] if CPV==1: U[7]=('' if I=='' else Vector(1, dmt, CT, ev)[0]); LABEL(' -- UV[7] value') elif CPV>1: LABEL(' -- UV[7] Index'); U[7]=_UVs[UV_inputs[7]][D] elif case(21): pass #vert_mtx_arr elif case(22): pass #normal_mtx_arr elif case(23): pass #uv_mtx_arr elif case(24): pass #light_mtx_array elif case(25): pass #NBT (NX,NY,NZ, BX,BY,BZ, TX,TY,TZ) #elif case(255):pass #CP_NULL #debugging: #if D!='': LABEL(' - '+str(pos)+' > '+str(Buffer_Size)+' ?') SetFacepoint( V,N,tuple(C),tuple(U) ) if len(Weights): (SetWeight( N, W ) for N,W in Weights) def XExportModel(FT): #WIP (not functional yet) #to build resource groups: # 1: sort names alphabetically # 2: insert empty string # 3: add names to the table #group names by length #compair names in binary tree fashon #chars are in R>L order, bits in L>R def CompareBits(b1, b2=0): i=8 while i>0: i-=1 if (b1 & 1<<(i-1)) != (b2 & 1<<(i-1)): return i-1 #(not sure) return 0 #def GenerateID(name="") #Generate the MDL0 Group ID # return -1 if name=="" else ((len(name) - 1) << 3) | CompareBits(ord(name[len(name)-1])) _name, _id, _index, _left, _right = "",0,0,[],[] def IsRight(Cname): global _name, _id return False if len(_name) != len(Cname) else ((ord(Cname[(_id >> 3)]) >> (_id & 7)) & 1) != 0 def GenerateId(Cname, Cid, Cindex, Cleft, Cright): global _name, _id, _left, _right for i in range(len(_name)): if (_name[i] != Cname[i]): _id = (i << 3) | CompareBits(ord(_name[i]), ord(Cname[i])) if IsRight(comparison): _left,_right = this,comparison else: _left,_right = comparison,this return _id return 0
""" Script entry point """ from src.calrissian.particle2_network import Particle2Network from src.calrissian.layers.particle2 import Particle2 from src.calrissian.optimizers.particle2_sgd import Particle2SGD import numpy as np def main(): # train_X = np.asarray([[0.2, -0.3]]) # train_Y = np.asarray([[0.0, 1.0, 0.0]]) train_X = np.asarray([[0.2, -0.3], [0.1, -0.9]]) train_Y = np.asarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) net = Particle2Network(cost="mse") net.append(Particle2(2, 5, activation="sigmoid")) net.append(Particle2(5, 3, activation="sigmoid")) print(net.predict(train_X)) print(net.cost(train_X, train_Y)) # print(net.cost_gradient(train_X, train_Y)) def main2(): sgd = Particle2SGD(alpha=0.2, n_epochs=1, mini_batch_size=1, verbosity=2, weight_update="momentum", beta=0.5) # sgd = Particle2SGD(alpha=0.2, n_epochs=1, mini_batch_size=1, verbosity=2) train_X = np.asarray([[0.2, -0.3]]) train_Y = np.asarray([[0.0, 1.0, 0.0]]) net = Particle2Network(cost="mse") net.append(Particle2(2, 5, activation="sigmoid")) net.append(Particle2(5, 3, activation="sigmoid")) sgd.optimize(net, train_X, train_Y) def main3(): train_X = np.asarray([[0.2, -0.3], [0.1, -0.9]]) train_Y = np.asarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) net = Particle2Network(cost="mse") net.append(Particle2(2, 5, activation="sigmoid")) net.append(Particle2(5, 3, activation="sigmoid")) print(net.predict(train_X)) # # with open("/Users/alange/network.json", "w") as f: # net.write_to_json(f) # # with open("/Users/alange/network.json", "r") as f: # new_net = Particle2Network.read_from_json(f) # print(new_net.predict(train_X)) def fd(): # train_X = np.asarray([[0.2, -0.3]]) # train_Y = np.asarray([[0.0, 1.0, 0.0]]) train_X = np.asarray([[0.2, -0.3], [0.1, -0.9]]) train_Y = np.asarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) net = Particle2Network(cost="categorical_cross_entropy") net.append(Particle2(2, 5, activation="sigmoid")) net.append(Particle2(5, 4, activation="sigmoid")) net.append(Particle2(4, 3, activation="softmax")) # Finite difference checking net.cost(train_X, train_Y) db, dq, dr_inp, dr_out, dt_inp, dt_out = net.cost_gradient(train_X, train_Y) h = 0.001 print("analytic b") print(db) fd_b = [] for l in range(len(net.layers)): lb = [] for c in range(len(net.layers[l].b)): for b in range(len(net.layers[l].b[c])): orig = net.layers[l].b[c][b] net.layers[l].b[c][b] += h fp = net.cost(train_X, train_Y) net.layers[l].b[c][b] -= 2*h fm = net.cost(train_X, train_Y) lb.append((fp - fm) / (2*h)) net.layers[l].b[c][b] = orig fd_b.append(lb) print("numerical b") print(fd_b) print("analytic q") for x in dq: print(x) fd_q = [] for l in range(len(net.layers)): lq = [] for i in range(len(net.layers[l].q)): orig = net.layers[l].q[i] net.layers[l].q[i] += h fp = net.cost(train_X, train_Y) net.layers[l].q[i] -= 2*h fm = net.cost(train_X, train_Y) lq.append((fp - fm) / (2*h)) net.layers[l].q[i] = orig fd_q.append(lq) print("numerical q") for x in fd_q: print(x) print("analytic theta input") for x in dt_inp: print(x) fd_t = [] for l in range(len(net.layers)): lt = [] for i in range(len(net.layers[l].theta_inp)): orig = net.layers[l].theta_inp[i] net.layers[l].theta_inp[i] += h fp = net.cost(train_X, train_Y) net.layers[l].theta_inp[i] -= 2*h fm = net.cost(train_X, train_Y) lt.append((fp - fm) / (2*h)) net.layers[l].theta_inp[i] = orig fd_t.append(lt) print("numerical theta input") for x in fd_t: print(x) print("analytic theta output") for x in dt_out: print(x) fd_t = [] for l in range(len(net.layers)): lt = [] for i in range(len(net.layers[l].theta_out)): orig = net.layers[l].theta_out[i] net.layers[l].theta_out[i] += h fp = net.cost(train_X, train_Y) net.layers[l].theta_out[i] -= 2*h fm = net.cost(train_X, train_Y) lt.append((fp - fm) / (2*h)) net.layers[l].theta_out[i] = orig fd_t.append(lt) print("numerical theta output") for x in fd_t: print(x) print("analytic x input") for layer in dr_inp[0]: print(layer) print("analytic y input") for layer in dr_inp[1]: print(layer) print("analytic z input") for layer in dr_inp[2]: print(layer) fd_r_x = [] fd_r_y = [] fd_r_z = [] for layer in net.layers: lr_x = [] lr_y = [] lr_z = [] for i in range(layer.input_size): # x orig = layer.rx_inp[i] layer.rx_inp[i] += h fp = net.cost(train_X, train_Y) layer.rx_inp[i] -= 2*h fm = net.cost(train_X, train_Y) lr_x.append((fp - fm) / (2*h)) layer.rx_inp[i] = orig # y orig = layer.ry_inp[i] layer.ry_inp[i] += h fp = net.cost(train_X, train_Y) layer.ry_inp[i] -= 2*h fm = net.cost(train_X, train_Y) lr_y.append((fp - fm) / (2*h)) layer.ry_inp[i] = orig # z orig = layer.rz_inp[i] layer.rz_inp[i] += h fp = net.cost(train_X, train_Y) layer.rz_inp[i] -= 2*h fm = net.cost(train_X, train_Y) lr_z.append((fp - fm) / (2*h)) layer.rz_inp[i] = orig fd_r_x.append(lr_x) fd_r_y.append(lr_y) fd_r_z.append(lr_z) print("numerical r x input") for f in fd_r_x: print(f) print("numerical r y input") for f in fd_r_y: print(f) print("numerical r z input") for f in fd_r_z: print(f) print("analytic x output") for layer in dr_out[0]: print(layer) print("analytic y output") for layer in dr_out[1]: print(layer) print("analytic z output") for layer in dr_out[2]: print(layer) fd_r_x = [] fd_r_y = [] fd_r_z = [] for layer in net.layers: lr_x = [] lr_y = [] lr_z = [] for i in range(layer.output_size): # x orig = layer.rx_out[i] layer.rx_out[i] += h fp = net.cost(train_X, train_Y) layer.rx_out[i] -= 2*h fm = net.cost(train_X, train_Y) lr_x.append((fp - fm) / (2*h)) layer.rx_out[i] = orig # y orig = layer.ry_out[i] layer.ry_out[i] += h fp = net.cost(train_X, train_Y) layer.ry_out[i] -= 2*h fm = net.cost(train_X, train_Y) lr_y.append((fp - fm) / (2*h)) layer.ry_out[i] = orig # z orig = layer.rz_out[i] layer.rz_out[i] += h fp = net.cost(train_X, train_Y) layer.rz_out[i] -= 2*h fm = net.cost(train_X, train_Y) lr_z.append((fp - fm) / (2*h)) layer.rz_out[i] = orig fd_r_x.append(lr_x) fd_r_y.append(lr_y) fd_r_z.append(lr_z) print("numerical r x output") for f in fd_r_x: print(f) print("numerical r y output") for f in fd_r_y: print(f) print("numerical r z ouput") for f in fd_r_z: print(f) if __name__ == "__main__": # Ensure same seed np.random.seed(100) # main() # main2() # main3() fd()
#!/usr/bin/env python import pygame, sys from pygame.locals import * import random import math class Setup: DisplaySize = (800, 600) FPS = 60 Lives = 5 PaddleLimit = (0, 1024) PaddleSpeed = int((PaddleLimit[1] - PaddleLimit[0])/(0.75 * FPS)) BallSpeed = 2.75 MaxSpeed = 4.8 # should be lower than brick height SpeedupCoef = 1.17 BoardSize = (320, 240) # NOTE: it is important that the length of BrickStartupPattern is the # same as the sum of BrickGroupSizes (which equals to BrickLines) BrickStartupPattern = ([1]*4 + [0]*2)*3 + [0]*(4+2)*2 BrickGroupSizes = [6, 6, 6, 6, 6] BrickGroupPoints = [5, 4, 3, 2, 1, 0] BrickLines = sum(BrickGroupSizes) AdditionalLines = [0]*2 + [1]*4 # 10 bricks per line: width 30, border 1, margin 0 # 16 bricks per line: width 18, border 1, margin 0 # 20 bricks per line: width 14, border 1, margin 0 # 4 lines per group: height 5, border 1, margin 0, paddletop 222 # 5 lines per group: height 4, border 1, margin 0, paddletop 222 BricksPerLine = 16 BrickSize = (18, 5) BrickBorder = (1, 1) BallSize = (4, 4) PaddleSize = (30, 4) PaddleTop = 222 BoardMargin = (0, 0) class Color: black = (0, 0, 0) white = (255, 255, 255) blue = (0, 0, 170) red = (153, 0, 0) green = (0, 204, 0) yellow = (255, 235, 0) gray = (60, 60, 60) class SevenSeg: # 1 # 2 4 # 8 # 16 32 # 64 to7seg = { '0': 1 + 2 + 4 + 16 + 32 + 64, '1': 4 + 32, '2': 1 + 4 + 8 + 16 + 64, '3': 1 + 4 + 8 + 32 + 64, '4': 2 + 4 + 8 + 32, '5': 1 + 2 + 8 + 32 + 64, '6': 1 + 2 + 8 + 16 + 32 + 64, '7': 1 + 4 + 32, '8': 1 + 2 + 4 + 8 + 16 + 32 + 64, '9': 1 + 2 + 4 + 8 + 32 + 64, 'H': 4096 + 8192 + 8 + 1024 + 2048, 'I': 4096 + 1024, 'G': 1 + 2 + 16 + 32 + 64 + 128, 'A': 1 + 2 + 4 + 8 + 1024 + 2048, 'M': 1 + 2 + 4 + 1024 + 2048 + 256, 'E': 1 + 2 + 8 + 16 + 64, 'O': 1 + 2 + 4 + 16 + 32 + 64, 'V': 4096 + 8192 + 16 + 32 + 64, 'R': 1 + 2 + 4 + 8 + 1024 + 512 } segsize = 3 def __init__(self, surface): self.surface = surface self.charwidth = self.segsize + 3 def draw_text(self, topleft, text): x = topleft[0] for d in text: if d in self.to7seg: v = self.to7seg[d] else: v = 0 self.draw_single_digit(Color.white, (x, topleft[1]), v) x += self.charwidth def draw_single_digit(self, color, topleft, digit): s = self.segsize x0 = topleft[0] x1 = topleft[0] + s + 1 y0 = topleft[1] y1 = topleft[1] + s + 1 y2 = topleft[1] + 2*s + 2 xm = (x0 + x1) / 2 if digit & 1: pygame.draw.line(self.surface, color, (x0+1, y0), (x1-1, y0)) if digit & 2: pygame.draw.line(self.surface, color, (x0, y0+1), (x0, y1-1)) if digit & 4: pygame.draw.line(self.surface, color, (x1, y0+1), (x1, y1-1)) if digit & 8: pygame.draw.line(self.surface, color, (x0+1, y1), (x1-1, y1)) if digit & 16: pygame.draw.line(self.surface, color, (x0, y1+1), (x0, y2-1)) if digit & 32: pygame.draw.line(self.surface, color, (x1, y1+1), (x1, y2-1)) if digit & 64: pygame.draw.line(self.surface, color, (x0+1, y2), (x1-1, y2)) if digit & 128: pygame.draw.line(self.surface, color, (xm, y1), (x1-1, y1)) if digit & 256: pygame.draw.line(self.surface, color, (xm, y0+1), (xm, y1-1)) if digit & 512: pygame.draw.line(self.surface, color, (x0+1, y1+1), (x1, y2)) if digit & 1024: pygame.draw.line(self.surface, color, (x0, y1+1), (x0, y2)) if digit & 2048: pygame.draw.line(self.surface, color, (x1, y1+1), (x1, y2)) if digit & 4096: pygame.draw.line(self.surface, color, (x0, y0), (x0, y1-1)) if digit & 8192: pygame.draw.line(self.surface, color, (x1, y0), (x1, y1-1)) def vector_from_angle(rad_angle, length): return (length * math.cos(rad_angle), -length * math.sin(rad_angle)) def get_paddle_rect(logical_pos): normalized = float(logical_pos - Setup.PaddleLimit[0]) / (Setup.PaddleLimit[1] - Setup.PaddleLimit[0]) max_left = Setup.BoardSize[0] - Setup.PaddleSize[0] - 1 left = min(max_left, int(max_left * normalized)) return (left, Setup.PaddleTop) + Setup.PaddleSize def get_brick_rect(row, col): t = (Setup.BrickSize[0] + 2 * Setup.BrickBorder[0], \ Setup.BrickSize[1] + 2 * Setup.BrickBorder[1]) return (Setup.BoardMargin[0] + col * t[0] + Setup.BrickBorder[0], \ Setup.BoardMargin[1] + row * t[1] + Setup.BrickBorder[1]) \ + Setup.BrickSize def get_brick_index(pos): row = (pos[1] - Setup.BoardMargin[1]) / (Setup.BrickSize[1] + 2 * Setup.BrickBorder[1]) if row >= Setup.BrickLines: return None col = (pos[0] - Setup.BoardMargin[0]) / (Setup.BrickSize[0] + 2 * Setup.BrickBorder[0]) if col < 0 or col >= Setup.BricksPerLine: return None return (int(row), int(col)) def get_brick_points(row): row = max(0, min(Setup.BrickLines, row)) accu = 0 i = 0 while accu <= row: accu += Setup.BrickGroupSizes[i] i += 1 return Setup.BrickGroupPoints[i - 1] def get_ball_rect(pos): return (int(pos[0]), int(pos[1])) + Setup.BallSize def get_color_rects(): clrr = [] y = 0 colors = [Color.blue, Color.red, Color.green, Color.yellow, Color.white] for num_lines, color in zip(Setup.BrickGroupSizes, colors): pxl_height = num_lines * (Setup.BrickSize[1] + 2 * Setup.BrickBorder[1]) clrr += [(color, (0, y, Setup.BoardSize[0], pxl_height))] y += pxl_height return clrr def adjacent_bricks(b1, b2): return max(abs(b1[0] - b2[0]), abs(b1[1] - b2[1])) <= 1 class Gamestate: def __init__(self): self.hiscore = 0 self.reset_game() def reset_game(self): self.pad_pos = (Setup.PaddleLimit[1] + Setup.PaddleLimit[0]) / 2 self.brick_matrix = [ [i] * Setup.BricksPerLine for i in Setup.BrickStartupPattern ] self.score = 0 self.stopped = True self.next_falldown_threshold = 2 * Setup.BricksPerLine self.next_speedup_threshold = 2 * Setup.BricksPerLine self.falldown_threshold = self.next_falldown_threshold self.lives = Setup.Lives self.has_ball = False self.next_additional_line = 0 def throw_ball(self): self.speed = Setup.BallSpeed self.speedup_threshold = self.next_speedup_threshold self.ball = (random.randint(0, Setup.BoardSize[0]-1), Setup.BoardSize[1]/3.0) self.ball_collisions = False self.ball_vector = vector_from_angle((random.random() * 4 + 7) * math.pi / 6, self.speed) self.last_brick = (-5, -5) # dummy indices far outside the board self.has_ball = True self.lives -= 1 def tick(self): if self.stopped: return if not self.has_ball: self.throw_ball() newball = (self.ball[0] + self.ball_vector[0], self.ball[1] + self.ball_vector[1]) newball = self.bounce(newball) newball = self.collide(newball) self.ball = newball def bounce(self, newball): x = newball[0] y = newball[1] revert_x = False revert_y = False max_x = Setup.BoardSize[0] - Setup.BallSize[0] if x < 0: x = -x revert_x = True elif x > max_x: x = 2 * max_x - x revert_x = True # do not bouce off the bottom edge if y < 0: y = -y revert_y = True self.ball_vector = ([self.ball_vector[0], -self.ball_vector[0]][revert_x], \ [self.ball_vector[1], -self.ball_vector[1]][revert_y]) return (x, y) def collide(self, newball): if newball[1] >= Setup.PaddleTop - Setup.BallSize[1]: return self.collide_with_paddle(newball) # collide with bricks if not self.ball_collisions: # ball is in ghost mode after it has been thrown. # no collisions with bricks until it hits the paddle return newball centerball = (newball[0] + Setup.BallSize[0], newball[1] + Setup.BallSize[1]) brick_index = get_brick_index(centerball) if brick_index: if not adjacent_bricks(brick_index, self.last_brick): self.last_brick = (-5, -5) self.collide_with_brick(centerball, *brick_index) return newball def collide_with_paddle(self, newball): collision_y = Setup.PaddleTop - Setup.BallSize[1] collision_x = self.ball[0] + (collision_y - self.ball[1]) * self.ball_vector[0] / self.ball_vector[1] ball_x_middle = collision_x + Setup.BallSize[0] / 2.0 paddle = get_paddle_rect(self.pad_pos) # cushion of 1 ball size around the paddle from each side big_paddle_left = paddle[0] - Setup.BallSize[0] big_paddle_right = big_paddle_left + Setup.PaddleSize[0] + 2 * Setup.BallSize[0] if ball_x_middle < big_paddle_left or ball_x_middle > big_paddle_right: # ball dropped self.ball_dropped() return newball else: big_paddle_40pct = (big_paddle_right - big_paddle_left) * 0.4 # ball bounces off paddle if ball_x_middle < big_paddle_left + big_paddle_40pct: # left end of the paddle. Angle in second quadrant based on collision position v = (ball_x_middle - big_paddle_left) / big_paddle_40pct angle = (math.pi * 5/6) - v * math.pi / 3 self.ball_vector = vector_from_angle(angle, self.speed) elif ball_x_middle < big_paddle_right - big_paddle_40pct: # middle part of the paddle. Angle preserved self.ball_vector = (self.ball_vector[0], -self.ball_vector[1]) else: # right end of the paddle. Angle in first quadrant based on collision position v = (big_paddle_right - ball_x_middle) / big_paddle_40pct angle = (math.pi * 1/6) + v * math.pi / 3 self.ball_vector = vector_from_angle(angle, self.speed) # deactivate ball ghost mode after first bounce self.ball_collisions = True self.increase_difficulty(1) return (collision_x, collision_y) def collide_with_brick(self, newball, row, col): if self.brick_matrix[row][col] == 0: return self.last_brick = (row, col) self.ball_vector = (self.ball_vector[0], -self.ball_vector[1]) self.brick_matrix[row][col] = 0 brick_points = get_brick_points(row) self.score += brick_points self.hiscore = max(self.hiscore, self.score) self.increase_difficulty(brick_points) def ball_dropped(self): self.has_ball = False self.stopped = True if self.lives == 0: # game over self.lives -= 1 def increase_difficulty(self, amount): self.falldown_threshold -= 1 if self.falldown_threshold <= 0: self.drop_wall() # increase speed self.speedup_threshold -= amount if self.speedup_threshold <= 0: newspeed = min(self.speed * Setup.SpeedupCoef, Setup.MaxSpeed) coef = newspeed / self.speed self.ball_vector = (self.ball_vector[0] * coef, self.ball_vector[1] * coef) self.speed = newspeed self.speedup_threshold = self.next_speedup_threshold def drop_wall(self): self.falldown_threshold = self.next_falldown_threshold self.next_falldown_threshold = max(10, int(0.92 * self.next_falldown_threshold)) newline = [Setup.AdditionalLines[self.next_additional_line]] * Setup.BricksPerLine self.next_additional_line = (self.next_additional_line + 1) % len(Setup.AdditionalLines) self.brick_matrix = [newline] + self.brick_matrix[:-1] class KeyboardController: def __init__(self): self.key_left = False self.key_right = False def up(self, key): if key == pygame.K_LEFT: self.key_left = False elif key == pygame.K_RIGHT: self.key_right = False def down(self, key): if key == pygame.K_LEFT: self.key_left = True elif key == pygame.K_RIGHT: self.key_right = True def move_paddle(self, current_pos): if not (self.key_left ^ self.key_right): return current_pos if self.key_left: return max(Setup.PaddleLimit[0], current_pos - Setup.PaddleSpeed) else: return min(Setup.PaddleLimit[1], current_pos + Setup.PaddleSpeed) class PotController: def move_paddle(self, current_pos): # TODO return current_pos class App: def __init__(self): pygame.init() self.display = pygame.display.set_mode(Setup.DisplaySize, 0) pygame.display.set_caption("Breakout") self.surface = pygame.Surface(Setup.BoardSize) self.state = Gamestate() self._prepare_color_overlay() self.kbd_controller = KeyboardController() self.pot_controller = PotController() self.clock = pygame.time.Clock() self.sevenseg = SevenSeg(self.surface) def _prepare_color_overlay(self): self.color_overlay = pygame.Surface(Setup.BoardSize) self.color_overlay.fill(Color.white) for color, rect in get_color_rects(): pygame.draw.rect(self.color_overlay, color, rect) def stop(self): pygame.quit() sys.exit() def loop(self): self.clock.tick(Setup.FPS) paddle_pos = self.state.pad_pos paddle_pos = self.kbd_controller.move_paddle(paddle_pos) paddle_pos = self.pot_controller.move_paddle(paddle_pos) self.state.pad_pos = paddle_pos self.state.tick() def render(self): self.surface.fill(Color.black) # status bar pygame.draw.rect(self.surface, Color.gray, (0, Setup.PaddleTop, Setup.BoardSize[0], Setup.BoardSize[1])) texttop = Setup.PaddleTop + Setup.PaddleSize[1] + 2 ninechars = self.sevenseg.charwidth * 9 self.sevenseg.draw_text((Setup.Lives * (Setup.BallSize[0]+1) + 2, texttop), '{}'.format(self.state.score)) self.sevenseg.draw_text((Setup.BoardSize[0] - ninechars, texttop), 'HI {}'.format(self.state.hiscore)) # remaining lives or game over if self.state.lives < 0: self.sevenseg.draw_text(((Setup.BoardSize[0] - ninechars) / 2, texttop), 'GAME OVER') else: for i in range(self.state.lives): pygame.draw.rect(self.surface, Color.white, (i*(Setup.BallSize[0]+1)+1, texttop) + Setup.BallSize) # draw bricks row_idx = 0 for row in self.state.brick_matrix: col_idx = 0 for col in row: if col: pygame.draw.rect(self.surface, Color.white, get_brick_rect(row_idx, col_idx)) col_idx += 1 row_idx += 1 # draw paddle pygame.draw.rect(self.surface, Color.white, get_paddle_rect(self.state.pad_pos)) # draw ball if self.state.has_ball: pygame.draw.rect(self.surface, Color.white, get_ball_rect(self.state.ball)) # colors, yay! # the overlay will cause the moving ball change color, just like it should self.surface.blit(self.color_overlay, (0, 0), special_flags = BLEND_MULT) # stretch the image to the whole display pygame.transform.scale(self.surface, Setup.DisplaySize, self.display) pygame.display.update() def run(self): while True: for event in pygame.event.get(): if event.type == pygame.QUIT: self.stop() elif event.type == pygame.KEYUP: if event.key == pygame.K_SPACE: if self.state.lives < 0: self.state.reset_game() else: self.state.stopped = not self.state.stopped elif event.key == pygame.K_ESCAPE: self.stop() else: self.kbd_controller.up(event.key) elif event.type == pygame.KEYDOWN: self.kbd_controller.down(event.key) self.loop() self.render() if __name__ == '__main__': app = App() app.run()
#!/usr/bin/env python # # Copyright 2005 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test test filters. A user can specify which test(s) in a Google Test program to run via either the GTEST_FILTER environment variable or the --gtest_filter flag. This script tests such functionality by invoking gtest_filter_unittest_ (a program written with Google Test) with different environments and command line flags. Note that test sharding may also influence which tests are filtered. Therefore, we test that here also. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sets import gtest_test_utils # Constants. IS_WINDOWS = os.name == 'nt' # The environment variable for specifying the test filters. FILTER_ENV_VAR = 'GTEST_FILTER' # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE' # The command line flag for specifying the test filters. FILTER_FLAG = 'gtest_filter' # The command line flag for including disabled tests. ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests' # Command to run the gtest_filter_unittest_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_') # Regex for determining whether parameterized tests are enabled in the binary. PARAM_TEST_REGEX = re.compile(r'/ParamTest') # Regex for parsing test case names from Google Test's output. TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)') # Regex for parsing test names from Google Test's output. TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)') # Full names of all tests in gtest_filter_unittests_. PARAM_TESTS = [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestX/1', 'SeqQ/ParamTest.TestY/0', 'SeqQ/ParamTest.TestY/1', ] DISABLED_TESTS = [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ] # All the non-disabled tests. ACTIVE_TESTS = [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', 'HasDeathTest.Test1', 'HasDeathTest.Test2', ] + PARAM_TESTS param_tests_present = None # Utilities. def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: os.environ[env_var] = value elif env_var in os.environ: del os.environ[env_var] def Run(command): """Runs a test program and returns its exit code and a list of tests run.""" stdout_file = os.popen(command, 'r') tests_run = [] test_case = '' test = '' for line in stdout_file: match = TEST_CASE_REGEX.match(line) if match is not None: test_case = match.group(1) else: match = TEST_REGEX.match(line) if match is not None: test = match.group(1) tests_run += [test_case + '.' + test] exit_code = stdout_file.close() return (tests_run, exit_code) def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs): """Runs the given function and arguments in a modified environment.""" try: original_env = os.environ.copy() os.environ.update(extra_env) return function(*args, **kwargs) finally: for key in extra_env.iterkeys(): if key in original_env: os.environ[key] = original_env[key] else: del os.environ[key] def RunWithSharding(total_shards, shard_index, command): """Runs a test program shard and returns exit code and a list of tests run.""" extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index), TOTAL_SHARDS_ENV_VAR: str(total_shards)} return InvokeWithModifiedEnv(extra_env, Run, command) # The unit test. class GTestFilterUnitTest(gtest_test_utils.TestCase): """Tests GTEST_FILTER env variable or --gtest_filter flag to filter tests.""" # Utilities. def AssertSetEqual(self, lhs, rhs): """Asserts that two sets are equal.""" for elem in lhs: self.assert_(elem in rhs, '%s in %s' % (elem, rhs)) for elem in rhs: self.assert_(elem in lhs, '%s in %s' % (elem, lhs)) def AssertPartitionIsValid(self, set_var, list_of_sets): """Asserts that list_of_sets is a valid partition of set_var.""" full_partition = [] for slice_var in list_of_sets: full_partition.extend(slice_var) self.assertEqual(len(set_var), len(full_partition)) self.assertEqual(sets.Set(set_var), sets.Set(full_partition)) def AdjustForParameterizedTests(self, tests_to_run): """Adjust tests_to_run in case value parameterized tests are disabled.""" global param_tests_present if not param_tests_present: return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS)) else: return tests_to_run def RunAndVerify(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for the given filter.""" tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # First, tests using GTEST_FILTER. # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the GTEST_FILTER environment variable. However, # we can still test the case when the variable is not supplied (i.e., # gtest_filter is None). # pylint: disable-msg=C6403 if not IS_WINDOWS or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) tests_run = Run(COMMAND)[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, tests_to_run) # pylint: enable-msg=C6403 # Next, tests using --gtest_filter. if gtest_filter is None: command = COMMAND else: command = '%s --%s=%s' % (COMMAND, FILTER_FLAG, gtest_filter) tests_run = Run(command)[0] self.AssertSetEqual(tests_run, tests_to_run) def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run, command=COMMAND, check_exit_0=False): """Checks that binary runs correct tests for the given filter and shard. Runs all shards of gtest_filter_unittest_ with the given filter, and verifies that the right set of tests were run. The union of tests run on each shard should be identical to tests_to_run, without duplicates. Args: gtest_filter: A filter to apply to the tests. total_shards: A total number of shards to split test run into. tests_to_run: A set of tests expected to run. command: A command to invoke the test binary. check_exit_0: When set to a true value, make sure that all shards return 0. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the GTEST_FILTER environment variable. However, # we can still test the case when the variable is not supplied (i.e., # gtest_filter is None). # pylint: disable-msg=C6403 if not IS_WINDOWS or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) partition = [] for i in range(0, total_shards): (tests_run, exit_code) = RunWithSharding(total_shards, i, command) if check_exit_0: self.assert_(exit_code is None) partition.append(tests_run) self.AssertPartitionIsValid(tests_to_run, partition) SetEnvVar(FILTER_ENV_VAR, None) # pylint: enable-msg=C6403 def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for the given filter. Runs gtest_filter_unittest_ with the given filter, and enables disabled tests. Verifies that the right set of tests were run. Args: gtest_filter: A filter to apply to the tests. tests_to_run: A set of tests expected to run. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Construct the command line. command = '%s --%s' % (COMMAND, ALSO_RUN_DISABED_TESTS_FLAG) if gtest_filter is not None: command = '%s --%s=%s' % (command, FILTER_FLAG, gtest_filter) tests_run = Run(command)[0] self.AssertSetEqual(tests_run, tests_to_run) def setUp(self): """Sets up test case. Determines whether value-parameterized tests are enabled in the binary and sets the flags accordingly. """ global param_tests_present if param_tests_present is None: param_tests_present = PARAM_TEST_REGEX.search( '\n'.join(os.popen(COMMAND, 'r').readlines())) is not None def testDefaultBehavior(self): """Tests the behavior of not specifying the filter.""" self.RunAndVerify(None, ACTIVE_TESTS) def testDefaultBehaviorWithShards(self): """Tests the behavior without the filter, with sharding enabled.""" self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS) def testEmptyFilter(self): """Tests an empty filter.""" self.RunAndVerify('', []) self.RunAndVerifyWithSharding('', 1, []) self.RunAndVerifyWithSharding('', 2, []) def testBadFilter(self): """Tests a filter that matches nothing.""" self.RunAndVerify('BadFilter', []) self.RunAndVerifyAllowingDisabled('BadFilter', []) def testFullName(self): """Tests filtering by full name.""" self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz']) def testUniversalFilters(self): """Tests filters that match everything.""" self.RunAndVerify('*', ACTIVE_TESTS) self.RunAndVerify('*.*', ACTIVE_TESTS) self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS) self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS) self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS) def testFilterByTestCase(self): """Tests filtering by test case name.""" self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz']) BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB'] self.RunAndVerify('BazTest.*', BAZ_TESTS) self.RunAndVerifyAllowingDisabled('BazTest.*', BAZ_TESTS + ['BazTest.DISABLED_TestC']) def testFilterByTest(self): """Tests filtering by test name.""" self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne']) def testFilterDisabledTests(self): """Select only the disabled tests to run.""" self.RunAndVerify('DISABLED_FoobarTest.Test1', []) self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1', ['DISABLED_FoobarTest.Test1']) self.RunAndVerify('*DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS) self.RunAndVerify('*.DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.DISABLED_Test2', ]) self.RunAndVerify('DISABLED_*', []) self.RunAndVerifyAllowingDisabled('DISABLED_*', [ 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ]) def testWildcardInTestCaseName(self): """Tests using wildcard in the test case name.""" self.RunAndVerify('*a*.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', 'HasDeathTest.Test1', 'HasDeathTest.Test2', ] + PARAM_TESTS) def testWildcardInTestName(self): """Tests using wildcard in the test name.""" self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA']) def testFilterWithoutDot(self): """Tests a filter that has no '.' in it.""" self.RunAndVerify('*z*', [ 'FooTest.Xyz', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ]) def testTwoPatterns(self): """Tests filters that consist of two patterns.""" self.RunAndVerify('Foo*.*:*A*', [ 'FooTest.Abc', 'FooTest.Xyz', 'BazTest.TestA', ]) # An empty pattern + a non-empty one self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA']) def testThreePatterns(self): """Tests filters that consist of three patterns.""" self.RunAndVerify('*oo*:*A*:*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', 'BazTest.TestA', ]) # The 2nd pattern is empty. self.RunAndVerify('*oo*::*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', ]) # The last 2 patterns are empty. self.RunAndVerify('*oo*::', [ 'FooTest.Abc', 'FooTest.Xyz', ]) def testNegativeFilters(self): self.RunAndVerify('*-HasDeathTest.Test1', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', 'HasDeathTest.Test2', ] + PARAM_TESTS) self.RunAndVerify('*-FooTest.Abc:HasDeathTest.*', [ 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + PARAM_TESTS) self.RunAndVerify('BarTest.*-BarTest.TestOne', [ 'BarTest.TestTwo', 'BarTest.TestThree', ]) # Tests without leading '*'. self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:HasDeathTest.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + PARAM_TESTS) # Value parameterized tests. self.RunAndVerify('*/*', PARAM_TESTS) # Value parameterized tests filtering by the sequence name. self.RunAndVerify('SeqP/*', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ]) # Value parameterized tests filtering by the test name. self.RunAndVerify('*/0', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestY/0', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestY/0', ]) def testFlagOverridesEnvVar(self): """Tests that the filter flag overrides the filtering env. variable.""" SetEnvVar(FILTER_ENV_VAR, 'Foo*') command = '%s --%s=%s' % (COMMAND, FILTER_FLAG, '*One') tests_run = Run(command)[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne']) def testShardStatusFileIsCreated(self): """Tests that the shard file is created if specified in the environment.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} stdout_file = InvokeWithModifiedEnv(extra_env, os.popen, COMMAND, 'r') try: stdout_file.readlines() finally: stdout_file.close() self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) def testShardStatusFileIsCreatedWithListTests(self): """Tests that the shard file is created with --gtest_list_tests.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file2') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} stdout_file = InvokeWithModifiedEnv(extra_env, os.popen, '%s --gtest_list_tests' % COMMAND, 'r') try: stdout_file.readlines() finally: stdout_file.close() self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) def testShardingWorksWithDeathTests(self): """Tests integration with death tests and sharding.""" gtest_filter = 'HasDeathTest.*:SeqP/*' expected_tests = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ] for command in (COMMAND + ' --gtest_death_test_style=threadsafe', COMMAND + ' --gtest_death_test_style=fast'): self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests, check_exit_0=True, command=command) self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests, check_exit_0=True, command=command) if __name__ == '__main__': gtest_test_utils.Main()
# Copyright (C) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for db purge.""" import datetime import uuid from oslo_db import exception as db_exc from oslo_utils import timeutils from sqlalchemy.dialects import sqlite from cinder import context from cinder import db from cinder.db.sqlalchemy import api as db_api from cinder import exception from cinder import test from oslo_db.sqlalchemy import utils as sqlalchemyutils class PurgeDeletedTest(test.TestCase): def setUp(self): super(PurgeDeletedTest, self).setUp() self.context = context.get_admin_context() self.engine = db_api.get_engine() self.session = db_api.get_session() self.conn = self.engine.connect() self.volumes = sqlalchemyutils.get_table( self.engine, "volumes") # The volume_metadata table has a FK of volume_id self.vm = sqlalchemyutils.get_table( self.engine, "volume_metadata") self.vol_types = sqlalchemyutils.get_table( self.engine, "volume_types") # The volume_type_projects table has a FK of volume_type_id self.vol_type_proj = sqlalchemyutils.get_table( self.engine, "volume_type_projects") self.snapshots = sqlalchemyutils.get_table( self.engine, "snapshots") self.sm = sqlalchemyutils.get_table( self.engine, "snapshot_metadata") self.vgm = sqlalchemyutils.get_table( self.engine, "volume_glance_metadata") self.qos = sqlalchemyutils.get_table( self.engine, "quality_of_service_specs") self.uuidstrs = [] for unused in range(6): self.uuidstrs.append(uuid.uuid4().hex) # Add 6 rows to table for uuidstr in self.uuidstrs: ins_stmt = self.volumes.insert().values(id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vm.insert().values(volume_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vgm.insert().values( volume_id=uuidstr, key='image_name', value='test') self.conn.execute(ins_stmt) ins_stmt = self.vol_types.insert().values(id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vol_type_proj.insert().\ values(volume_type_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.snapshots.insert().values( id=uuidstr, volume_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.sm.insert().values(snapshot_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.vgm.insert().values( snapshot_id=uuidstr, key='image_name', value='test') self.conn.execute(ins_stmt) ins_stmt = self.qos.insert().values( id=uuidstr, key='QoS_Specs_Name', value='test') self.conn.execute(ins_stmt) ins_stmt = self.vol_types.insert().values( id=uuid.uuid4().hex, qos_specs_id=uuidstr) self.conn.execute(ins_stmt) ins_stmt = self.qos.insert().values( id=uuid.uuid4().hex, specs_id=uuidstr, key='desc', value='test') self.conn.execute(ins_stmt) # Set 4 of them deleted, 2 are 60 days ago, 2 are 20 days ago old = timeutils.utcnow() - datetime.timedelta(days=20) older = timeutils.utcnow() - datetime.timedelta(days=60) make_vol_old = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_older = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_vol_meta_old = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_meta_older = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_vol_types_old = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_types_older = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_vol_type_proj_old = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_type_proj_older = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_snap_old = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_snap_older = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_snap_meta_old = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_snap_meta_older = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_vol_glance_meta_old = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_vol_glance_meta_older = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_snap_glance_meta_old = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old) make_snap_glance_meta_older = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older) make_qos_old = self.qos.update().where( self.qos.c.id.in_(self.uuidstrs[1:3])).values(deleted_at=old) make_qos_older = self.qos.update().where( self.qos.c.id.in_(self.uuidstrs[4:6])).values(deleted_at=older) make_qos_child_record_old = self.qos.update().where( self.qos.c.specs_id.in_(self.uuidstrs[1:3])).values( deleted_at=old) make_qos_child_record_older = self.qos.update().where( self.qos.c.specs_id.in_(self.uuidstrs[4:6])).values( deleted_at=older) make_vol_types1_old = self.vol_types.update().where( self.vol_types.c.qos_specs_id.in_(self.uuidstrs[1:3])).values( deleted_at=old) make_vol_types1_older = self.vol_types.update().where( self.vol_types.c.qos_specs_id.in_(self.uuidstrs[4:6])).values( deleted_at=older) self.conn.execute(make_vol_old) self.conn.execute(make_vol_older) self.conn.execute(make_vol_meta_old) self.conn.execute(make_vol_meta_older) self.conn.execute(make_vol_types_old) self.conn.execute(make_vol_types_older) self.conn.execute(make_vol_type_proj_old) self.conn.execute(make_vol_type_proj_older) self.conn.execute(make_snap_old) self.conn.execute(make_snap_older) self.conn.execute(make_snap_meta_old) self.conn.execute(make_snap_meta_older) self.conn.execute(make_vol_glance_meta_old) self.conn.execute(make_vol_glance_meta_older) self.conn.execute(make_snap_glance_meta_old) self.conn.execute(make_snap_glance_meta_older) self.conn.execute(make_qos_old) self.conn.execute(make_qos_older) self.conn.execute(make_qos_child_record_old) self.conn.execute(make_qos_child_record_older) self.conn.execute(make_vol_types1_old) self.conn.execute(make_vol_types1_older) def test_purge_deleted_rows_old(self): dialect = self.engine.url.get_dialect() if dialect == sqlite.dialect: # We're seeing issues with foreign key support in SQLite 3.6.20 # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 # It works fine in SQLite 3.7. # Force foreign_key checking if running SQLite >= 3.7 import sqlite3 tup = sqlite3.sqlite_version_info if tup[0] > 3 or (tup[0] == 3 and tup[1] >= 7): self.conn.execute("PRAGMA foreign_keys = ON") # Purge at 30 days old, should only delete 2 rows db.purge_deleted_rows(self.context, age_in_days=30) vol_rows = self.session.query(self.volumes).count() vol_meta_rows = self.session.query(self.vm).count() vol_type_rows = self.session.query(self.vol_types).count() vol_type_proj_rows = self.session.query(self.vol_type_proj).count() snap_rows = self.session.query(self.snapshots).count() snap_meta_rows = self.session.query(self.sm).count() vol_glance_meta_rows = self.session.query(self.vgm).count() qos_rows = self.session.query(self.qos).count() # Verify that we only deleted 2 self.assertEqual(4, vol_rows) self.assertEqual(4, vol_meta_rows) self.assertEqual(8, vol_type_rows) self.assertEqual(4, vol_type_proj_rows) self.assertEqual(4, snap_rows) self.assertEqual(4, snap_meta_rows) self.assertEqual(8, vol_glance_meta_rows) self.assertEqual(8, qos_rows) def test_purge_deleted_rows_older(self): dialect = self.engine.url.get_dialect() if dialect == sqlite.dialect: # We're seeing issues with foreign key support in SQLite 3.6.20 # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 # It works fine in SQLite 3.7. # Force foreign_key checking if running SQLite >= 3.7 import sqlite3 tup = sqlite3.sqlite_version_info if tup[0] > 3 or (tup[0] == 3 and tup[1] >= 7): self.conn.execute("PRAGMA foreign_keys = ON") # Purge at 10 days old now, should delete 2 more rows db.purge_deleted_rows(self.context, age_in_days=10) vol_rows = self.session.query(self.volumes).count() vol_meta_rows = self.session.query(self.vm).count() vol_type_rows = self.session.query(self.vol_types).count() vol_type_proj_rows = self.session.query(self.vol_type_proj).count() snap_rows = self.session.query(self.snapshots).count() snap_meta_rows = self.session.query(self.sm).count() vol_glance_meta_rows = self.session.query(self.vgm).count() qos_rows = self.session.query(self.qos).count() # Verify that we only have 2 rows now self.assertEqual(2, vol_rows) self.assertEqual(2, vol_meta_rows) self.assertEqual(4, vol_type_rows) self.assertEqual(2, vol_type_proj_rows) self.assertEqual(2, snap_rows) self.assertEqual(2, snap_meta_rows) self.assertEqual(4, vol_glance_meta_rows) self.assertEqual(4, qos_rows) def test_purge_deleted_rows_bad_args(self): # Test with no age argument self.assertRaises(TypeError, db.purge_deleted_rows, self.context) # Test purge with non-integer self.assertRaises(exception.InvalidParameterValue, db.purge_deleted_rows, self.context, age_in_days='ten') def test_purge_deleted_rows_integrity_failure(self): dialect = self.engine.url.get_dialect() if dialect == sqlite.dialect: # We're seeing issues with foreign key support in SQLite 3.6.20 # SQLAlchemy doesn't support it at all with < SQLite 3.6.19 # It works fine in SQLite 3.7. # So return early to skip this test if running SQLite < 3.7 import sqlite3 tup = sqlite3.sqlite_version_info if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7): self.skipTest( 'sqlite version too old for reliable SQLA foreign_keys') self.conn.execute("PRAGMA foreign_keys = ON") # add new entry in volume and volume_admin_metadata for # integrity check uuid_str = uuid.uuid4().hex ins_stmt = self.volumes.insert().values(id=uuid_str) self.conn.execute(ins_stmt) ins_stmt = self.vm.insert().values(volume_id=uuid_str) self.conn.execute(ins_stmt) # set volume record to deleted 20 days ago old = timeutils.utcnow() - datetime.timedelta(days=20) make_old = self.volumes.update().where( self.volumes.c.id.in_([uuid_str])).values(deleted_at=old) self.conn.execute(make_old) # Verify that purge_deleted_rows fails due to Foreign Key constraint self.assertRaises(db_exc.DBReferenceError, db.purge_deleted_rows, self.context, age_in_days=10)
#!/usr/bin/env python """ Test pycacheback """ import os import shutil import random import copy from pycacheback import pyCacheBack import unittest class TestpyCacheBack(unittest.TestCase): def testSimple(self): """A simple 'smoke test' for the extended dictionary.""" # make an extended dictionary and define a key/value pair a = pyCacheBack() a[1] = 'one' # make sure we can retrieve the pre-defined key/value pair msg = "a[1] != 'one'!" self.assertEqual(a[1], 'one', msg) # make sure that accessing non-existant key raises KeyError self.assertRaises(KeyError, a.__getitem__, 2) # make sure the len() function works correctly msg = 'len(a) should be 1, got %d!' % len(a) self.assertEqual(len(a), 1, msg) def testDict(self): """Test the basic dictionary methods that should still work.""" # make an extended dictionary and define some key/value pairs a = pyCacheBack() a[1] = 'one' a[2] = '2' a[3] = 3 a['4'] = 'four' # check other dictionary methods msg = 'len(a) should be 4, got %d!' % len(a) self.assertEqual(len(a), 4, msg) msg = "'1 in a' was False, should be True!" self.assertTrue(1 in a, msg) msg = "'\'4\' in a' was False, should be True!" self.assertTrue('4' in a, msg) b = a.copy() msg = "a.copy() doesn't return a true copy'" self.assertEqual(a, b, msg) msg = "a.get(1) should return 'one', got %s" % a.get(1) self.assertEqual(a.get(1), 'one', msg) msg = ("a.get(10, 'NONE') should return 'NONE', got %s" % str(a.get(10, 'NONE'))) self.assertEqual(a.get(10, 'NONE'), 'NONE', msg) msg = "a.has_key(2) should return True, got %s" % str(2 in a) self.assertTrue(2 in a, msg) msg = ("a.has_key(10) should return False, got %s" % str(10 in a)) self.assertFalse(10 in a, msg) msg = ("a.items() should return [(1, 'one'), (2, '2'), (3, 3), " "('4', 'four')], got %s" % str(a.items())) self.assertEqual([(1, 'one'), (2, '2'), (3, 3), ('4', 'four')], list(a.items()), msg) msg = "a.keys() should return [1, 2, 3, '4'], got %s" % str(a.keys()) self.assertEqual([1, 2, 3, '4'], list(a.keys()), msg) msg = "a.keys() should return [1, 2, 3, '4'], got %s" % str(a.keys()) self.assertEqual([1, 2, 3, '4'], list(a.keys()), msg) msg = ("a.values() should return ['one', '2', 3, 'four'], got %s" % str(a.values())) self.assertEqual(['one', '2', 3, 'four'], list(a.values()), msg) result = a.setdefault(10, 'TEN') msg = "a.setdefault(10, 'TEN') doesn't return 'TEN'?" self.assertEqual(result, 'TEN', msg) msg = "a.setdefault() doesn't set the default?" self.assertEqual(a[10], 'TEN', msg) result = a.pop(10) msg = "a.pop(10) should return 'TEN' but got %s?" % result self.assertEqual(result, 'TEN', msg) result = a.pop(10, 'not found') msg = ("a.pop(10, 'not found') should return 'not found' but got %s?" % result) self.assertEqual(result, 'not found', msg) #msg = "a.pop(10) should raise KeyError exception, but didn't?" self.assertRaises(KeyError, a.pop, 10) msg = "a.update(b) should set 'TEN' key but didn't" b = {'TEN': 10} a.update(b) self.assertEqual(a['TEN'], 10, msg) a.clear() msg = 'After clear(), len(a) should be 0, got %d!' % len(a) self.assertEqual(len(a), 0, msg) b = {'TEN': 10} a.update(b) msg = "a.keys() should return ['TEN'], got %s" % str(a.keys()) self.assertEqual(list(a.keys()), ['TEN'], msg) def testLRU(self): """Test the LRU mechanism.""" # make an extended dictionary, maxLRU is 2 for testing a = pyCacheBack(max_lru=2) # the LRU list should be empty when we start msg = ("Initial LRU list should be empty, but it's %s" % str(a._lru_list)) self.assertEqual(a._lru_list, [], msg) # make sure the len() function works correctly msg = "len(a) should be 0, got %d!" % len(a) self.assertEqual(len(a), 0, msg) # add a key/value pair, recheck LRU and length a['test'] = 'test value' msg = ("LRU list should be %s, but it's %s" % (str(['test']), str(a._lru_list))) self.assertEqual(a._lru_list, ['test'], msg) msg = "len(a) should be 1, got %d!" % len(a) self.assertEqual(len(a), 1, msg) # add another key/value pair, recheck LRU a['test2'] = 'another test value' msg = ("LRU list should be %s, but it's %s" % (str(['test2', 'test']), str(a._lru_list))) self.assertEqual(a._lru_list, ['test2', 'test'], msg) # access first key/value pair, check LRU changed b = a['test'] msg = ("LRU list should be %s, but it's %s" % (str(['test', 'test2']), str(a._lru_list))) self.assertEqual(a._lru_list, ['test', 'test2'], msg) # make sure the len() function works correctly msg = "len(a) should be 2, got %d!" % len(a) self.assertEqual(len(a), 2, msg) # add third key/value pair, check LRU changed a['test3'] = 100 msg = ("LRU list should be %s, but it's %s" % (str(['test3', 'test']), str(a._lru_list))) self.assertEqual(a._lru_list, ['test3', 'test'], msg) # make sure the len() function works correctly (still 2) msg = "len(a) should be 2, got %d!" % len(a) self.assertEqual(len(a), 2, msg) # delete first key/value pair, check LRU changed del a['test'] msg = ("LRU list should be %s, but it's %s" % (str(['test3']), str(a._lru_list))) self.assertEqual(a._lru_list, ['test3'], msg) # make sure the len() function works correctly msg = "len(a) should be 1, got %d!" % len(a) self.assertEqual(len(a), 1, msg) def testBacking(self): """Test the backing mechanism. Keys will be (x, y) form.""" # create the test directory test_dir = './_#test_dir#_' shutil.rmtree(test_dir, ignore_errors=True) os.mkdir(test_dir) # override the backing functions in pyCacheBack class my_cache(pyCacheBack): def _put_to_back(self, key, value): (x, y) = key dir_path = os.path.join(self._backing_dir, str(x)) try: os.mkdir(dir_path) except OSError: pass file_path = os.path.join(dir_path, str(y)) with open(file_path, 'wb') as f: f.write(bytes(value, encoding='utf-8')) def _get_from_back(self, key): (x, y) = key file_path = os.path.join(self._backing_dir, str(x), str(y)) try: with open(file_path, 'rb') as f: value = f.read() except IOError: raise KeyError(str(key)) return value # define utility testing function def check_file(self, file_path, expected_contents): if not os.path.isfile(file_path): self.fail("File %s doesn't exist!?" % file_path) with open(file_path, 'rb') as f: file_contents = f.read().decode("utf-8") if file_contents != expected_contents: self.fail("Expected file contents '%s', got '%s'" % (expected_contents, file_contents)) # OK, test it a = my_cache(backing_dir=test_dir, max_lru=2) a[(1,1)] = 'one and one' a[(1,2)] = 'one and two' a[(1,1)] = 'one and one, second value' # redefine (1,1) value # test if backing files are as expected check_file(self, os.path.join(test_dir, '1', '1'), a[(1,1)]) check_file(self, os.path.join(test_dir, '1', '2'), a[(1,2)]) # add third key, flushing (1,2), check backing file still there a[(1,3)] = 'one, three' check_file(self, os.path.join(test_dir, '1', '2'), a[(1,2)]) # check that we can still get (1,2) data from backing store msg = "a[(1,2)] != 'one and two'!" self.assertEqual(a[(1,2)], 'one and two', msg) # delete a key, ensure gone & check backing file still there del a[(1,3)] check_file(self, os.path.join(test_dir, '1', '3'), 'one, three') # clean up shutil.rmtree(test_dir) def testIter(self): kv_list = [(1, 'one'), (2, 2), (3, 3.0)] a = pyCacheBack(kv_list, max_lru=10) for i, x in enumerate(iter(a)): msg = "'%d'th key should be %d, got %s" % (i+1, kv_list[i][0], x) self.assertEqual(kv_list[i][0], x, msg) def testCopy(self): kv_list = [(1, 'one'), (2, 2), (3, 3.0)] a = pyCacheBack(kv_list, max_lru=10) b = a.copy() msg = 'Length of copied dict should be %d, got %d' % (len(a), len(b)) self.assertEqual(len(b), len(a), msg) # change element of 'a', see if 'b' gets it orig = b[2] a[2] = 'test' _ = a[3] msg = "copy: b[2] should be %s, got %s" % (orig, str(b[2])) self.assertEqual(b[2], orig, msg) def testClear(self): kv_list = [(1, 'one'), (2, 2), (3, 3.0)] a = pyCacheBack(kv_list, max_lru=10) msg = 'Length before should be %d, got %d' % (len(kv_list), len(a)) self.assertEqual(len(a), len(kv_list), msg) a.clear() msg = 'Length after should be 0, got %d' % len(a) self.assertEqual(len(a), 0, msg) # check LRU list is empty msg = ".clear() didn't empty ._lru_list, it's '%s'" % str(a._lru_list) self.assertEqual([], a._lru_list, msg) def testGet(self): kv_list = [(1, 'one'), (2, 2), (3, 3.0)] a = pyCacheBack(kv_list, max_lru=10) msg = (".get(1) should return '%s', got '%s'" % (str(kv_list[1][0]), str(a.get(1)))) self.assertEqual(a.get(1), 'one', msg) def testHasKey(self): kv_list = [(1, 'one'), (2, 2), (3, 3.0)] a = pyCacheBack(kv_list, max_lru=10) msg = ".has_key(1) should return True, got '%s'" % str(1 in a) #a.has_key(1)) self.assertEqual(1 in a, True, msg) msg = (".has_key(100) should return False, got '%s'" % str(100 in a)) self.assertEqual(100 in a, False, msg) self.assertEqual(1 in a, True, msg) def testItems(self): kv_list = [(1, 'one'), (2, 2), (3, 3.0)] a = pyCacheBack(kv_list, max_lru=10) expected = str(kv_list) msg = (".items() should return '%s', got '%s'" % (expected, str(a.items()))) self.assertEqual(expected, str(a.items()), msg) def testItems(self): kv_list = [(1, 'one'), (2, 2), (3, 3.0)] a = pyCacheBack(kv_list, max_lru=10) for i, x in enumerate(a.items()): msg = (".iteritems() item %d should be '%s', got '%s'" % (i, str(kv_list[i]), str(x))) self.assertEqual(kv_list[i], x, msg) def testItervalues(self): kv_list = [(1, 'one'), (2, 2), (3, 3.0)] a = pyCacheBack(kv_list, max_lru=10) for i, x in enumerate(a.itervalues()): msg = (".itervalues() item %d should be '%s', got '%s'" % (i, str(kv_list[i][1]), str(x))) self.assertEqual(kv_list[i][1], x, msg) def testItervalues(self): kv_list = [(1, 'one'), (2, 2), (3, 3.0)] a = pyCacheBack(kv_list, max_lru=10) k_list = [x[0] for x in kv_list] msg = ".keys() should be '%s', got '%s'" % (str(k_list), str(a.keys())) self.assertEqual(k_list, list(a.keys()), msg) def testPop(self): kv_list = [(1, 'one'), (2, 2), (3, 3.0)] a = pyCacheBack(kv_list, max_lru=10) random.shuffle(kv_list) for (k, v) in kv_list: _ = a[k] expected_lru_len = len(a._lru_list) for (k, v) in kv_list: value = a.pop(k, None) msg = (".pop(%s) should return '%s', got '%s'" % (str(k), str(v), str(value))) self.assertEqual(v, value, msg) expected_lru_len = expected_lru_len - 1 msg = (".pop(%s) should leave dict with len(LRU)=%d, got %d" % (str(k), expected_lru_len, len(a._lru_list))) self.assertEqual(len(a._lru_list), expected_lru_len, msg) def testPopitem(self): kv_list = [(1, 'one'), (2, 2), (3, 3.0)] a = pyCacheBack(kv_list, max_lru=10) # get a big LRU list shuffle_kv_list = copy.deepcopy(kv_list) random.shuffle(shuffle_kv_list) for (k, v) in shuffle_kv_list: _ = a[k] a_len = len(a) lru_len = len(a._lru_list) for i in range(a_len): (k, v) = a.popitem() msg = (".popitem() returned '%s', shouldn't be in dict?" % str((k, v))) self.assertIn((k, v), kv_list, msg) msg = ".popitem() all done, len should be 0, got %d" % len(a) self.assertEqual(len(a), 0, msg) msg = (".popitem() all done, ._lru_list should be [], got '%s'" % str(a._lru_list)) self.assertEqual(a._lru_list, [], msg) def testSetdefault(self): kv_list = [(1, 'one'), (2, 2), (3, 3.0)] a = pyCacheBack(kv_list, max_lru=10) ret_val = a.setdefault(100, True) msg = ".setdefault(100, True) should return True, got %s" % str(ret_val) self.assertEqual(ret_val, True, msg) def testUpdate(self): kv_list = [(1, 'one'), (2, 2), (3, 3.0)] a = pyCacheBack(kv_list, max_lru=10) kv_update = [(4, '****'), (5, 'V')] a.update(kv_update) expected_len = len(kv_list) + len(kv_update) msg = (".update() should create length %d, got length %d" % (expected_len, len(a))) self.assertEqual(expected_len, len(a), msg) # check actual contents full_list = kv_list + kv_update b = pyCacheBack(full_list, max_lru=10) msg = ".update() didn't work, got dict '%s'" % str(a) self.assertEqual(b, a, msg) def testValues(self): kv_list = [(1, 'one'), (2, 2), (3, 3.0)] a = pyCacheBack(kv_list, max_lru=10) expected_values = [kv[1] for kv in kv_list] msg = (".values should return '%s', got '%s'" % (str(expected_values), str(a.values()))) self.assertEqual(expected_values, list(a.values()), msg) unittest.main()
# -*- coding: utf-8 -*- u""" This script is to parse SRW Python scripts and to produce JSON-file with the parsed data. It's highly dependent on the external Sirepo/SRW libraries and is written to allow parsing of the .py files using SRW objects. :copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved. :license: http://www.apache.org/licenses/LICENSE-2.0.html """ from __future__ import absolute_import, division, print_function from pykern import pkcollections from pykern import pkio from pykern import pkjson from pykern import pkrunpy from pykern.pkdebug import pkdlog, pkdexc, pkdp import ast import inspect import os import py.path import re import srwl_bl import sirepo.sim_data _SIM_DATA, SIM_TYPE, SCHEMA = sirepo.sim_data.template_globals('srw') class SRWParser(object): def __init__(self, script, user_filename, arguments, optics_func_name='set_optics'): m = pkrunpy.run_path_as_module(script) if arguments: import shlex arguments = shlex.split(arguments) self.var_param = srwl_bl.srwl_uti_parse_options(m.varParam, use_sys_argv=False, args=arguments) self.replace_mirror_files() self.replace_image_files() try: self.optics = getattr(m, optics_func_name)(self.var_param) except ValueError as e: if re.search('could not convert string to float', str(e.args)): self.replace_mirror_files('mirror_2d.dat') self.optics = getattr(m, optics_func_name)(self.var_param) self.data = _parsed_dict(self.var_param, self.optics) self.data.models.simulation.name = _name(user_filename) def replace_mirror_files(self, mirror_file='mirror_1d.dat'): for key in self.var_param.__dict__.keys(): if key == 'fdir': self.var_param.__dict__[key] = os.getcwd() if re.search(r'\_ofn$', key): self.var_param.__dict__[key] = '' if re.search(r'\_(h|i)fn$', key): if getattr(self.var_param, key) != '' and getattr(self.var_param, key) != 'None': self.var_param.__dict__[key] = str(_SIM_DATA.lib_file_abspath(mirror_file)) def replace_image_files(self, image_file='sample.tif'): for key in self.var_param.__dict__.keys(): if key.find('op_sample') >= 0: if getattr(self.var_param, key) != '': self.var_param.__dict__[key] = str(_SIM_DATA.lib_file_abspath(image_file)) class Struct(object): def __init__(self, **entries): self.__dict__.update(entries) def import_python(code, tmp_dir, user_filename=None, arguments=None): """Converts script_text into json and stores as new simulation. Avoids too much data back to the user in the event of an error. This could be a potential security issue, because the script could be used to probe the system. Args: simulation_type (str): always "srw", but used to find lib dir code (str): Python code that runs SRW user_filename (str): uploaded file name for log arguments (str): argv to be passed to script Returns: dict: simulation data """ script = None # Patch for the mirror profile for the exported .py file from Sirepo: code = _patch_mirror_profile(code) try: with pkio.save_chdir(tmp_dir): # This string won't show up anywhere script = pkio.write_text( 'in.py', re.sub(r'^main\(', '#', code, flags=re.MULTILINE), ) o = SRWParser( script, user_filename=user_filename, arguments=arguments, ) return o.data except Exception as e: lineno = script and _find_line_in_trace(script) if hasattr(e, 'args'): if len(e.args) == 1: m = str(e.args[0]) elif e.args: m = str(e.args) else: m = e.__class__.__name__ else: m = str(e) pkdlog( 'Error: {}; exception={}; script={}; filename={}; stack:\n{}', m, e.__class__.__name__, script, user_filename, pkdexc(), ) m = m[:50] raise ValueError( 'Error on line {}: {}'.format(lineno, m) if lineno else 'Error: {}'.format(m), ) # Mapping all the values to a dictionary: def _beamline_element(obj, idx, title, elem_type, position): data = pkcollections.Dict() data['id'] = idx data['type'] = elem_type data['title'] = title data['position'] = position if elem_type in ['aperture', 'obstacle']: data['shape'] = obj.shape data['horizontalOffset'] = obj.x data['verticalOffset'] = obj.y data['horizontalSize'] = obj.Dx * 1e3 data['verticalSize'] = obj.Dy * 1e3 elif elem_type == 'crl': keys = ['attenuationLength', 'focalPlane', 'horizontalApertureSize', 'numberOfLenses', 'radius', 'refractiveIndex', 'shape', 'verticalApertureSize', 'wallThickness'] for key in keys: data[key] = obj.input_parms[key] # Should be multiplied by 1000.0: for key in ['horizontalApertureSize', 'verticalApertureSize']: data[key] *= 1000.0 elif elem_type == 'crystal': # Fixed values in srw.js: data['heightAmplification'] = 1 data['heightProfileFile'] = None data['orientation'] = 'x' data['material'] = 'Unknown' data['h'] = '1' data['k'] = '1' data['l'] = '1' try: data['energy'] = obj.aux_energy except Exception: data['energy'] = None try: data['grazingAngle'] = obj.aux_ang_dif_pl except Exception: data['grazingAngle'] = 0.0 data['asymmetryAngle'] = obj.angAs data['rotationAngle'] = 0.0 data['crystalThickness'] = obj.tc data['geometryType'] = obj.uc data['dSpacing'] = obj.dSp data['psi0r'] = obj.psi0r data['psi0i'] = obj.psi0i data['psiHr'] = obj.psiHr data['psiHi'] = obj.psiHi data['psiHBr'] = obj.psiHbr data['psiHBi'] = obj.psiHbi data['nvx'] = obj.nvx data['nvy'] = obj.nvy data['nvz'] = obj.nvz data['tvx'] = obj.tvx data['tvy'] = obj.tvy elif elem_type == 'ellipsoidMirror': # Fixed values in srw.js: data['heightAmplification'] = 1 data['heightProfileFile'] = None data['orientation'] = 'x' data['firstFocusLength'] = obj.p data['focalLength'] = obj.q data['grazingAngle'] = obj.angGraz * 1e3 data['normalVectorX'] = obj.nvx data['normalVectorY'] = obj.nvy data['normalVectorZ'] = obj.nvz data['sagittalSize'] = obj.ds data['tangentialSize'] = obj.dt data['tangentialVectorX'] = obj.tvx data['tangentialVectorY'] = obj.tvy elif elem_type == 'fiber': data['method'] = 'server' data['externalMaterial'] = 'User-defined' data['coreMaterial'] = 'User-defined' keys = ['focalPlane', 'externalRefractiveIndex', 'coreRefractiveIndex', 'externalAttenuationLength', 'coreAttenuationLength', 'externalDiameter', 'coreDiameter', 'horizontalCenterPosition', 'verticalCenterPosition'] for key in keys: data[key] = obj.input_parms[key] elif elem_type == 'grating': # Fixed values in srw.js: data['grazingAngle'] = 12.9555790185373 data['diffractionOrder'] = obj.m data['grooveDensity0'] = obj.grDen data['grooveDensity1'] = obj.grDen1 data['grooveDensity2'] = obj.grDen2 data['grooveDensity3'] = obj.grDen3 data['grooveDensity4'] = obj.grDen4 data['normalVectorX'] = obj.mirSub.nvx data['normalVectorY'] = obj.mirSub.nvy data['normalVectorZ'] = obj.mirSub.nvz data['sagittalSize'] = obj.mirSub.ds data['tangentialSize'] = obj.mirSub.dt data['tangentialVectorX'] = obj.mirSub.tvx data['tangentialVectorY'] = obj.mirSub.tvy elif elem_type == 'lens': data['horizontalFocalLength'] = obj.Fx data['horizontalOffset'] = obj.x data['verticalFocalLength'] = obj.Fy data['verticalOffset'] = obj.y elif elem_type in ['mirror', 'mirror2d']: keys = ['grazingAngle', 'heightAmplification', 'heightProfileFile', 'horizontalTransverseSize', 'orientation', 'verticalTransverseSize'] for key in keys: if type(obj.input_parms) == tuple: data[key] = obj.input_parms[0][key] else: data[key] = obj.input_parms[key] # Should be multiplied by 1000.0: for key in ['grazingAngle', 'horizontalTransverseSize', 'verticalTransverseSize']: data[key] *= 1000.0 data['type'] = 'mirror' data['heightProfileFile'] = 'mirror_1d.dat' if elem_type == 'mirror' else 'mirror_2d.dat' elif elem_type == 'sample': data['imageFile'] = 'sample.tif' data['material'] = 'User-defined' data['method'] = 'server' keys = ['resolution', 'thickness', 'refractiveIndex', 'attenuationLength'] for key in keys: if type(obj.input_parms) == tuple: data[key] = obj.input_parms[0][key] else: data[key] = obj.input_parms[key] data['resolution'] *= 1e9 data['thickness'] *= 1e6 elif elem_type == 'sphericalMirror': # Fixed values in srw.js: data['grazingAngle'] = 13.9626000172 data['heightAmplification'] = 1 data['heightProfileFile'] = None data['orientation'] = 'x' data['normalVectorX'] = obj.nvx data['normalVectorY'] = obj.nvy data['normalVectorZ'] = obj.nvz data['radius'] = obj.rad data['sagittalSize'] = obj.ds data['tangentialSize'] = obj.dt data['tangentialVectorX'] = obj.tvx data['tangentialVectorY'] = obj.tvy elif elem_type == 'zonePlate': data['numberOfZones'] = obj.nZones data['outerRadius'] = obj.rn * 1e3 data['thickness'] = obj.thick * 1e6 data['method'] = 'server' data['mainMaterial'] = 'User-defined' data['mainRefractiveIndex'] = obj.delta1 data['mainAttenuationLength'] = obj.atLen1 data['complementaryMaterial'] = 'User-defined' data['complementaryRefractiveIndex'] = obj.delta2 data['complementaryAttenuationLength'] = obj.atLen2 data['horizontalOffset'] = obj.x data['verticalOffset'] = obj.y elif elem_type == 'watch': pass else: raise ValueError('Element type <{}> does not exist.'.format(elem_type)) return data def _get_beamline(obj_arOpt, init_distance=20.0): """The function creates a beamline from the provided object and/or AST tree. :param obj_arOpt: SRW object containing properties of the beamline elements. :param init_distance: distance from the source to the first element (20.0 m by default). :return elements_list: list of all found beamline elements. """ num_elements = len(obj_arOpt) elements_list = [] # The dictionary to count the elements of different types: names = pkcollections.Dict({ 'S': 0, 'O': 0, 'HDM': 0, 'CRL': 0, 'KL': 0, 'KLA': 0, 'AUX': 0, 'M': 0, # mirror 'G': 0, # grating 'ZP': 0, # zone plate 'Crystal': 0, 'Fiber': 0, 'Watch': '', 'Sample': '', }) positions = [] # a list of dictionaries with sequence of distances between elements d_src = init_distance counter = 0 for i in range(num_elements): name = obj_arOpt[i].__class__.__name__ try: next_name = obj_arOpt[i + 1].__class__.__name__ except Exception: next_name = None if name == 'SRWLOptD': d = obj_arOpt[i].L else: d = 0.0 d_src += d if (len(positions) == 0) or \ (name != 'SRWLOptD') or \ (name == 'SRWLOptD' and next_name == 'SRWLOptD') or \ (name == 'SRWLOptD' and (i + 1) == num_elements): counter += 1 elem_type = '' if name == 'SRWLOptA': if obj_arOpt[i].ap_or_ob == 'a': elem_type = 'aperture' key = 'S' else: elem_type = 'obstacle' key = 'O' elif name == 'SRWLOptCryst': key = 'Crystal' elem_type = 'crystal' elif name == 'SRWLOptD': key = 'AUX' elem_type = 'watch' elif name == 'SRWLOptG': key = 'G' elem_type = 'grating' elif name == 'SRWLOptL': key = 'KL' elem_type = 'lens' elif name == 'SRWLOptMirEl': key = 'M' elem_type = 'ellipsoidMirror' elif name == 'SRWLOptMirSph': key = 'M' elem_type = 'sphericalMirror' elif name == 'SRWLOptT': if type(obj_arOpt[i].input_parms) == tuple: elem_type = obj_arOpt[i].input_parms[0]['type'] else: elem_type = obj_arOpt[i].input_parms['type'] if elem_type in ['mirror', 'mirror2d']: key = 'HDM' elif elem_type == 'crl': # CRL key = 'CRL' elif elem_type == 'cyl_fiber': elem_type = 'fiber' key = 'Fiber' elif elem_type == 'sample': key = 'Sample' elif name == 'SRWLOptZP': key = 'ZP' elem_type = 'zonePlate' # Last element is Sample: if name == 'SRWLOptD' and (i + 1) == num_elements: key = 'Watch' elem_type = 'watch' try: names[key] += 1 except Exception: pass title = key + str(names[key]) if not elem_type: raise ValueError('Unhandled element named: {}.'.format(name)) positions.append(pkcollections.Dict({ 'id': counter, 'object': obj_arOpt[i], 'elem_class': name, 'elem_type': elem_type, 'title': title, 'dist': d, 'dist_source': float(str(d_src)), })) for i in range(len(positions)): data = _beamline_element( positions[i]['object'], positions[i]['id'], positions[i]['title'], positions[i]['elem_type'], positions[i]['dist_source'] ) elements_list.append(data) return elements_list def _get_default_drift(): """The function parses srw.js file to find the default values for drift propagation parameters, which can be sometimes missed in the exported .py files (when distance = 0), but should be presented in .json files. Returns: str: default drift propagation paramters """ def _search_for_default_drift(): return re.search( r'function defaultDriftPropagationParams.*?return\s*(\[[^\]]+\])', pkio.read_text(sirepo.resource.static('js', 'srw.js')), re.DOTALL, ).group(1) return pkjson.load_any(_search_for_default_drift()) def _get_propagation(op): prop_dict = pkcollections.Dict() counter = 0 for i in range(len(op.arProp) - 1): name = op.arOpt[i].__class__.__name__ try: next_name = op.arOpt[i + 1].__class__.__name__ except Exception: next_name = None if (name != 'SRWLOptD') or \ (name == 'SRWLOptD' and next_name == 'SRWLOptD') or \ ((i + 1) == len(op.arProp) - 1): # exclude last drift counter += 1 prop_dict[str(counter)] = [op.arProp[i]] if next_name == 'SRWLOptD': prop_dict[str(counter)].append(op.arProp[i + 1]) else: prop_dict[str(counter)].append(_get_default_drift()) return prop_dict def _find_line_in_trace(script): """Parse the stack trace for the most recent error message Returns: int: first line number in trace that was called from the script """ trace = None t = None f = None try: trace = inspect.trace() for t in reversed(trace): f = t[0] if py.path.local(f.f_code.co_filename) == script: return f.f_lineno finally: del trace del f del t return None def _list2dict(data_list): """ The function converts list of lists to a dictionary with keys from 1st elements and values from 3rd elements. :param data_list: list of SRW parameters (e.g., 'appParam' in Sirepo's *.py files). :return out_dict: dictionary with all parameters. """ out_dict = pkcollections.Dict() for i in range(len(data_list)): out_dict[data_list[i][0]] = data_list[i][2] return out_dict def _name(user_filename): """Parse base name from user_filename Can't assume the file separators will be understood so have to parse out the name manually. Will need to be uniquely named by sirepo.server, but not done yet. Args: user_filename (str): Passed in from browser Returns: str: suitable name """ # crude but good enough for now. m = re.search(r'([^:/\\]+)\.\w+$', user_filename) return m.group(1) if m else user_filename def _parsed_dict(v, op): import sirepo.template.srw std_options = Struct(**_list2dict(srwl_bl.srwl_uti_std_options())) beamline_elements = _get_beamline(op.arOpt, v.op_r) # Since the rotation angle cannot be passed from SRW object, we update the angle here: beamline_elements = _update_crystals(beamline_elements, v) def _default_value(parm, obj, std, def_val=None): if not hasattr(obj, parm): try: return getattr(std, parm) except Exception: if def_val is not None: return def_val else: return '' try: return getattr(obj, parm) except Exception: if def_val is not None: return def_val else: return '' # This dictionary will is used for both initial intensity report and for watch point: initialIntensityReport = pkcollections.Dict({ 'characteristic': v.si_type, 'fieldUnits': 1, 'polarization': v.si_pol, 'precision': v.w_prec, 'sampleFactor': 0, }) predefined_beams = sirepo.template.srw.get_predefined_beams() # Default electron beam: if (hasattr(v, 'source_type') and v.source_type == 'u') \ or (hasattr(v, 'ebm_nm') and (not hasattr(v, 'gbm_pen') or v.gbm_pen == 0)): source_type = 'u' if v.ebm_nms == 'Day1': v.ebm_nms = 'Day 1' full_beam_name = '{}{}'.format(v.ebm_nm, v.ebm_nms) if not full_beam_name: full_beam_name = 'Electron Beam' electronBeam = pkcollections.Dict() for b in predefined_beams: if b['name'] == full_beam_name: electronBeam = b electronBeam['beamSelector'] = full_beam_name break if not electronBeam: electronBeam = pkcollections.Dict({ 'beamSelector': full_beam_name, 'current': v.ebm_i, 'energy': _default_value('ebm_e', v, std_options, 3.0), 'energyDeviation': _default_value('ebm_de', v, std_options, 0.0), 'horizontalAlpha': _default_value('ebm_alphax', v, std_options, 0.0), 'horizontalBeta': _default_value('ebm_betax', v, std_options, 2.02), 'horizontalDispersion': _default_value('ebm_etax', v, std_options, 0.0), 'horizontalDispersionDerivative': _default_value('ebm_etaxp', v, std_options, 0.0), 'horizontalEmittance': _default_value('ebm_emx', v, std_options, 9e-10) * 1e9, 'horizontalPosition': v.ebm_x, 'isReadOnly': False, 'name': full_beam_name, 'rmsSpread': _default_value('ebm_ens', v, std_options, 0.00089), 'verticalAlpha': _default_value('ebm_alphay', v, std_options, 0.0), 'verticalBeta': _default_value('ebm_betay', v, std_options, 1.06), 'verticalDispersion': _default_value('ebm_etay', v, std_options, 0.0), 'verticalDispersionDerivative': _default_value('ebm_etayp', v, std_options, 0.0), 'verticalEmittance': _default_value('ebm_emy', v, std_options, 8e-12) * 1e9, 'verticalPosition': v.ebm_y, }) undulator = pkcollections.Dict({ 'horizontalAmplitude': _default_value('und_bx', v, std_options, 0.0), 'horizontalInitialPhase': _default_value('und_phx', v, std_options, 0.0), 'horizontalSymmetry': str(int(_default_value('und_sx', v, std_options, 1.0))), 'length': _default_value('und_len', v, std_options, 1.5), 'longitudinalPosition': _default_value('und_zc', v, std_options, 1.305), 'period': _default_value('und_per', v, std_options, 0.021) * 1e3, 'verticalAmplitude': _default_value('und_by', v, std_options, 0.88770981) if hasattr(v, 'und_by') else _default_value('und_b', v, std_options, 0.88770981), 'verticalInitialPhase': _default_value('und_phy', v, std_options, 0.0), 'verticalSymmetry': str(int(_default_value('und_sy', v, std_options, -1))), }) gaussianBeam = pkcollections.Dict({ 'energyPerPulse': None, 'polarization': 1, 'rmsPulseDuration': None, 'rmsSizeX': None, 'rmsSizeY': None, 'waistAngleX': None, 'waistAngleY': None, 'waistX': None, 'waistY': None, 'waistZ': None, }) else: source_type = 'g' electronBeam = pkcollections.Dict() default_ebeam_name = 'NSLS-II Low Beta Final' for beam in predefined_beams: if beam['name'] == default_ebeam_name: electronBeam = beam electronBeam['beamSelector'] = default_ebeam_name break if not electronBeam: raise ValueError('Electron beam is not set during import') undulator = pkcollections.Dict({ "horizontalAmplitude": "0", "horizontalInitialPhase": 0, "horizontalSymmetry": 1, "length": 3, "longitudinalPosition": 0, "period": "20", "undulatorParameter": 1.65776086, "verticalAmplitude": "0.88770981", "verticalInitialPhase": 0, "verticalSymmetry": -1, }) gaussianBeam = pkcollections.Dict({ 'energyPerPulse': _default_value('gbm_pen', v, std_options), 'polarization': _default_value('gbm_pol', v, std_options), 'rmsPulseDuration': _default_value('gbm_st', v, std_options) * 1e12, 'rmsSizeX': _default_value('gbm_sx', v, std_options) * 1e6, 'rmsSizeY': _default_value('gbm_sy', v, std_options) * 1e6, 'waistAngleX': _default_value('gbm_xp', v, std_options), 'waistAngleY': _default_value('gbm_yp', v, std_options), 'waistX': _default_value('gbm_x', v, std_options), 'waistY': _default_value('gbm_y', v, std_options), 'waistZ': _default_value('gbm_z', v, std_options), }) python_dict = pkcollections.Dict({ 'models': pkcollections.Dict({ 'beamline': beamline_elements, 'electronBeam': electronBeam, 'electronBeams': [], 'beamline3DReport': pkcollections.Dict({}), 'fluxReport': pkcollections.Dict({ 'azimuthalPrecision': v.sm_pra, 'distanceFromSource': v.op_r, 'finalEnergy': v.sm_ef, 'fluxType': v.sm_type, 'horizontalApertureSize': v.sm_rx * 1e3, 'horizontalPosition': v.sm_x, 'initialEnergy': v.sm_ei, 'longitudinalPrecision': v.sm_prl, 'photonEnergyPointCount': v.sm_ne, 'polarization': v.sm_pol, 'verticalApertureSize': v.sm_ry * 1e3, 'verticalPosition': v.sm_y, }), 'initialIntensityReport': initialIntensityReport, 'intensityReport': pkcollections.Dict({ 'distanceFromSource': v.op_r, 'fieldUnits': 1, 'finalEnergy': v.ss_ef, 'horizontalPosition': v.ss_x, 'initialEnergy': v.ss_ei, 'photonEnergyPointCount': v.ss_ne, 'polarization': v.ss_pol, 'precision': v.ss_prec, 'verticalPosition': v.ss_y, }), 'multiElectronAnimation': pkcollections.Dict({ 'horizontalPosition': 0, 'horizontalRange': v.w_rx * 1e3, 'stokesParameter': '0', 'verticalPosition': 0, 'verticalRange': v.w_ry * 1e3, }), 'multipole': pkcollections.Dict({ 'distribution': 'n', 'field': 0, 'length': 0, 'order': 1, }), 'postPropagation': op.arProp[-1], 'powerDensityReport': pkcollections.Dict({ 'distanceFromSource': v.op_r, 'horizontalPointCount': v.pw_nx, 'horizontalPosition': v.pw_x, 'horizontalRange': v.pw_rx * 1e3, 'method': v.pw_meth, 'precision': v.pw_pr, 'verticalPointCount': v.pw_ny, 'verticalPosition': v.pw_y, 'verticalRange': v.pw_ry * 1e3, }), 'propagation': _get_propagation(op), 'simulation': pkcollections.Dict({ 'horizontalPointCount': v.w_nx, 'horizontalPosition': v.w_x, 'horizontalRange': v.w_rx * 1e3, 'isExample': 0, 'name': '', 'photonEnergy': v.w_e, 'sampleFactor': v.w_smpf, 'samplingMethod': 1, 'simulationId': '', 'sourceType': source_type, 'verticalPointCount': v.w_ny, 'verticalPosition': v.w_y, 'verticalRange': v.w_ry * 1e3, }), 'sourceIntensityReport': pkcollections.Dict({ 'characteristic': v.si_type, # 0, 'distanceFromSource': v.op_r, 'fieldUnits': 1, 'polarization': v.si_pol, }), 'undulator': undulator, 'gaussianBeam': gaussianBeam, }), 'simulationType': 'srw', 'version': '', }) # Format the key name to be consistent with Sirepo: for i in range(len(beamline_elements)): if beamline_elements[i]['type'] == 'watch': idx = beamline_elements[i]['id'] python_dict['models']['watchpointReport{}'.format(idx)] = initialIntensityReport return python_dict def _patch_mirror_profile(code, mirror_file='mirror_1d.dat'): """Patch for the mirror profile for the exported .py file from Sirepo""" import sirepo.template.srw # old format mirror names var_names = ['Cryst', 'ElMirror', 'Mirror', 'SphMirror', 'TorMirror'] code_list = code.split('\n') for var_name in var_names: if var_name in ['Mirror']: final_mirror_file = '"{}"'.format(_SIM_DATA.lib_file_abspath(mirror_file)) else: final_mirror_file = None var_name = 'ifn' + var_name for i in range(len(code_list)): if re.search(r'^(\s*)' + var_name + r'(\d*)(\s*)=(\s*)(.*\.dat\w*)(\s*)', code_list[i]): full_var_name = code_list[i].strip().split('=')[0].strip() code_list[i] = code_list[i].replace( full_var_name, '{} = {} # '.format(full_var_name, final_mirror_file) ) code = '\n'.join(code_list) return code def _update_crystals(data, v): """Update rotation angle from the parameters value. Args: data: list of beamline elements from get_beamline() function. v: object containing all variables. Returns: data: updated list. """ for i in range(len(data)): if data[i]['type'] == 'crystal': try: # get crystal # crystal_id = int(data[i]['title'].replace('Crystal', '')) except Exception: crystal_id = 1 try: # update rotation angle data[i]['rotationAngle'] = getattr(v, 'op_DCM_ac{}'.format(crystal_id)) except Exception: pass if not data[i]['energy']: try: # update energy if an old srwlib.py is used data[i]['energy'] = v.op_DCM_e except Exception: data[i]['energy'] = v.w_e return data
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Random code generation for testing/fuzzing.""" # pylint: disable=invalid-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import random import string import gast import numpy as np from tensorflow.contrib.autograph.pyct import templates class NodeSampler(object): sample_map = None def sample(self): nodes, magnitudes = zip(*self.sample_map.items()) return np.random.choice( nodes, p=np.array(magnitudes, dtype='float32') / np.sum(magnitudes)) class StatementSampler(NodeSampler): sample_map = dict(( (gast.Assign, 10), (gast.Print, 1), (gast.If, 2), (gast.While, 2), (gast.For, 0), )) class ExpressionSampler(NodeSampler): sample_map = dict(( (gast.UnaryOp, 1), (gast.BinOp, 8), (gast.Name, 1), (gast.Call, 0), )) class CompareSampler(NodeSampler): sample_map = dict(( (gast.Eq, 1), (gast.NotEq, 1), (gast.Lt, 1), (gast.LtE, 1), (gast.Gt, 1), (gast.GtE, 1), (gast.Is, 1), (gast.IsNot, 1), )) class BinaryOpSampler(NodeSampler): sample_map = dict(( (gast.Add, 1), (gast.Sub, 1), (gast.Mult, 1), (gast.Div, 1), (gast.FloorDiv, 1), (gast.Mod, 1), (gast.Pow, 1), )) class UnaryOpSampler(NodeSampler): sample_map = dict(((gast.USub, 1), (gast.UAdd, 0))) class NameSampler(NodeSampler): sample_map = dict(( ('new', 1), ('existing', 1), )) N_CONTROLFLOW_STATEMENTS = 10 N_FUNCTIONDEF_STATEMENTS = 10 class CodeGenerator(object): """Generate random syntactically-valid Python ASTs.""" def __init__(self, max_depth=3, depth=0): self.max_depth = max_depth self.depth = depth def generate_statement(self): """Generate a statement node, dispatching to the correct class method.""" desired_node = StatementSampler().sample() self.depth += 1 # Enforce some constraints on generating statements. # E.g., if statements need at least 3 readable variables. # If we fail to satisfy our constraints, draw another sample. if desired_node in (gast.While, gast.For, gast.If): if self.depth > self.max_depth: return self.generate_statement() # Go get the generator method and run it method = 'generate_' + desired_node.__name__ visitor = getattr(self, method) node = visitor() self.depth -= 1 return node def sample_node_list(self, low, high, generator): """Generate a list of statements of random length. Args: low: Fewest number of statements to generate. high: Highest number of statements to generate. generator: Function to call to generate nodes. Returns: A list of statements. """ statements = [] for _ in range(np.random.randint(low, high)): statements.append(generator()) return statements def generate_Name(self, ctx=gast.Load()): variable_name = '_' + ''.join( random.choice(string.ascii_lowercase) for _ in range(4)) return gast.Name(variable_name, ctx=ctx, annotation=None) def generate_BinOp(self): # TODO(alexbw): convert to generate_expression when we get to limit # expression depth. op = BinaryOpSampler().sample()() return gast.BinOp(self.generate_Name(), op, self.generate_Name()) def generate_Compare(self): op = CompareSampler().sample()() return gast.Compare(self.generate_Name(), [op], [self.generate_Name()]) def generate_UnaryOp(self): operand = self.generate_Name() op = UnaryOpSampler().sample()() return gast.UnaryOp(op, operand) def generate_expression(self): desired_node = ExpressionSampler().sample() # Go get the generator method and run it method = 'generate_' + desired_node.__name__ generator = getattr(self, method) return generator() def generate_Assign(self): """Generate an Assign node.""" # Generate left-hand side target_node = self.generate_Name(gast.Store()) # Generate right-hand side value_node = self.generate_expression() # Put it all together node = gast.Assign(targets=[target_node], value=value_node) return node def generate_If(self): """Generate an If node.""" test = self.generate_Compare() # Generate true branch statements body = self.sample_node_list( low=1, high=N_CONTROLFLOW_STATEMENTS // 2, generator=self.generate_statement) # Generate false branch statements orelse = self.sample_node_list( low=1, high=N_CONTROLFLOW_STATEMENTS // 2, generator=self.generate_statement) node = gast.If(test, body, orelse) return node def generate_While(self): """Generate a While node.""" test = self.generate_Compare() body = self.sample_node_list( low=1, high=N_CONTROLFLOW_STATEMENTS, generator=self.generate_statement) orelse = [] # not generating else statements node = gast.While(test, body, orelse) return node def generate_Call(self): raise NotImplementedError def generate_Return(self): return gast.Return(self.generate_expression()) def generate_Print(self): return templates.replace('print(x)', x=self.generate_expression())[0] def generate_FunctionDef(self): """Generate a FunctionDef node.""" # Generate the arguments, register them as available arg_vars = self.sample_node_list( low=2, high=10, generator=lambda: self.generate_Name(gast.Param())) args = gast.arguments(arg_vars, None, [], [], None, []) # Generate the function body body = self.sample_node_list( low=1, high=N_FUNCTIONDEF_STATEMENTS, generator=self.generate_statement) body.append(self.generate_Return()) fn_name = self.generate_Name().id node = gast.FunctionDef(fn_name, args, body, (), None) return node def generate_random_functiondef(): return CodeGenerator().generate_FunctionDef()
from __future__ import division, absolute_import import time import types from itertools import groupby import six import numpy as np from neupy.utils import preformat_value, AttributeKeyDict from neupy.helpers import table from neupy.core.base import BaseSkeleton from neupy.core.properties import (BoundedProperty, NumberProperty, Property) from .summary_info import SummaryTable, InlineSummary from .utils import (iter_until_converge, shuffle, normalize_error, StopNetworkTraining) __all__ = ('BaseNetwork',) def show_network_options(network, highlight_options=None): """ Display all available parameters options for Neural Network. Parameters ---------- network : object Neural network instance. highlight_options : list List of enabled options. In that case all options from that list would be marked with a green color. """ available_classes = [cls.__name__ for cls in network.__class__.__mro__] logs = network.logs if highlight_options is None: highlight_options = {} def group_by_class_name(value): _, option = value option_priority = -available_classes.index(option.class_name) return option_priority, option.class_name grouped_options = groupby( sorted(network.options.items(), key=group_by_class_name), group_by_class_name, ) logs.title("Main information") logs.message("ALGORITHM", network.class_name()) logs.newline() for (_, class_name), options in grouped_options: if not options: continue logs.write("{}:".format(class_name)) for key, data in sorted(options): if key in highlight_options: msg_color = 'green' value = highlight_options[key] else: msg_color = 'gray' value = data.value formated_value = preformat_value(value) msg_text = "{} = {}".format(key, formated_value) logs.message("OPTION", msg_text, color=msg_color) logs.newline() def logging_info_about_the_data(network, input_train, input_test): logs = network.logs n_train_samples = input_train.shape[0] train_feature_shape = input_train.shape[1:] logs.title("Start training") logs.message("TRAIN DATA", "{} samples, feature shape: {}" "".format(n_train_samples, train_feature_shape)) if input_test is not None: n_test_samples = input_test.shape[0] test_feature_shape = input_test.shape[1:] logs.message("TEST DATA", "{} samples, feature shape: {}" "".format(n_test_samples, test_feature_shape)) if train_feature_shape != test_feature_shape: raise ValueError("Train and test samples should have the " "same feature shape.") def logging_info_about_training(network, epochs, epsilon): logs = network.logs if epsilon is None: logs.message("TRAINING", "Total epochs: {}".format(epochs)) else: logs.message("TRAINING", "Epsilon: {}, Max epochs: {}" "".format(epsilon, epochs)) def parse_show_epoch_property(network, n_epochs, epsilon=None): show_epoch = network.show_epoch if isinstance(show_epoch, int): return show_epoch if epsilon is not None and isinstance(show_epoch, six.string_types): network.logs.warning("Can't use `show_epoch` value in converging " "mode. Set up `show_epoch` property equal to 1") return 1 number_end_position = show_epoch.index('time') # Ignore grammar mistakes like `2 time`, this error could be # really annoying n_epochs_to_check = int(show_epoch[:number_end_position].strip()) if n_epochs <= n_epochs_to_check: return 1 return int(round(n_epochs / n_epochs_to_check)) def create_training_epochs_iterator(network, epochs, epsilon=None): if epsilon is not None: return iter_until_converge(network, epsilon, max_epochs=epochs) next_epoch = network.last_epoch + 1 return range(next_epoch, next_epoch + epochs) class ShowEpochProperty(BoundedProperty): """ Class helps validate specific syntax for `show_epoch` property from ``BaseNetwork`` class. Parameters ---------- {BoundedProperty.minval} {BoundedProperty.maxval} {BaseProperty.default} {BaseProperty.required} """ expected_type = tuple([int] + [six.string_types]) def validate(self, value): if not isinstance(value, six.string_types): if value < 1: raise ValueError("Property `{}` value should be integer " "greater than zero or string. See the " "documentation for more information." "".format(self.name)) return if 'time' not in value: raise ValueError("`{}` value has invalid string format." "".format(self.name)) valid_endings = ('times', 'time') number_end_position = value.index('time') number_part = value[:number_end_position].strip() if not value.endswith(valid_endings) or not number_part.isdigit(): valid_endings_formated = ', '.join(valid_endings) raise ValueError( "Property `{}` in string format should be a positive number " "with one of those endings: {}. For example: `10 times`." "".format(self.name, valid_endings_formated) ) if int(number_part) < 1: raise ValueError("Part that related to the number in `{}` " "property should be an integer greater or " "equal to one.".format(self.name)) def is_valid_error_value(value): """ Checks that error value has valid type. Parameters ---------- value : object Returns ------- bool """ return value is not None and not np.all(np.isnan(value)) class ErrorHistoryList(list): """ Wrapper around the built-in list class that adds a few additional methods. """ def last(self): """ Returns last element if list is not empty, ``None`` otherwise. """ if self and is_valid_error_value(self[-1]): return normalize_error(self[-1]) def previous(self): """ Returns last element if list is not empty, ``None`` otherwise. """ if len(self) >= 2 and is_valid_error_value(self[-2]): return normalize_error(self[-2]) def normalized(self): """ Normalize list that contains error outputs. Returns ------- list Return the same list with normalized values if there where some problems. """ if not self or isinstance(self[0], float): return self normalized_errors = map(normalize_error, self) return list(normalized_errors) class BaseNetwork(BaseSkeleton): """ Base class for Neural Network algorithms. Parameters ---------- step : float Learning rate, defaults to ``0.1``. show_epoch : int or str This property controls how often the network will display information about training. There are two main syntaxes for this property. You can describe it as positive integer number and it will describe how offen would you like to see summary output in terminal. For instance, number `100` mean that network will show you summary in 100, 200, 300 ... epochs. String value should be in a specific format. It should contain the number of times that the output will be displayed in the terminal. The second part is just a syntax word ``time`` or ``times`` just to make text readable. For instance, value ``'2 times'`` mean that the network will show output twice with approximately equal period of epochs and one additional output would be after the finall epoch. Defaults to ``1``. shuffle_data : bool If it's ``True`` class shuffles all your training data before training your network, defaults to ``True``. epoch_end_signal : function Calls this function when train epoch finishes. train_end_signal : function Calls this function when train process finishes. {Verbose.verbose} Attributes ---------- errors : ErrorHistoryList Contains list of training errors. This object has the same properties as list and in addition there are three additional useful methods: `last`, `previous` and `normalized`. train_errors : ErrorHistoryList Alias to `errors` attribute. validation_errors : ErrorHistoryList The same as `errors` attribute, but it contains only validation errors. last_epoch : int Value equals to the last trained epoch. After initialization it is equal to ``0``. """ step = NumberProperty(default=0.1, minval=0) show_epoch = ShowEpochProperty(minval=1, default=1) shuffle_data = Property(default=False, expected_type=bool) epoch_end_signal = Property(expected_type=types.FunctionType) train_end_signal = Property(expected_type=types.FunctionType) def __init__(self, *args, **options): self.errors = self.train_errors = ErrorHistoryList() self.validation_errors = ErrorHistoryList() self.training = AttributeKeyDict() self.last_epoch = 0 super(BaseNetwork, self).__init__(*args, **options) self.init_properties() if self.verbose: show_network_options(self, highlight_options=options) def init_properties(self): """ Setup default values before populate the options. """ def predict(self, input_data): """ Return prediction results for the input data. Output result includes post-processing step related to the final layer that transforms output to convenient format for end-use. Parameters ---------- input_data : array-like Returns ------- array-like """ def on_epoch_start_update(self, epoch): """ Function would be trigger before run all training procedure related to the current epoch. Parameters ---------- epoch : int Current epoch number. """ self.last_epoch = epoch def train_epoch(self, input_train, target_train=None): raise NotImplementedError() def prediction_error(self, input_test, target_test): raise NotImplementedError() def train(self, input_train, target_train=None, input_test=None, target_test=None, epochs=100, epsilon=None, summary_type='table'): """ Method train neural network. Parameters ---------- input_train : array-like target_train : array-like or Npne input_test : array-like or None target_test : array-like or None epochs : int Defaults to `100`. epsilon : float or None Defaults to ``None``. """ show_epoch = self.show_epoch logs = self.logs training = self.training = AttributeKeyDict() if epochs <= 0: raise ValueError("Number of epochs needs to be greater than 0.") if epsilon is not None and epochs <= 2: raise ValueError("Network should train at teast 3 epochs before " "check the difference between errors") if summary_type == 'table': logging_info_about_the_data(self, input_train, input_test) logging_info_about_training(self, epochs, epsilon) logs.newline() summary = SummaryTable( table_builder=table.TableBuilder( table.Column(name="Epoch #"), table.NumberColumn(name="Train err"), table.NumberColumn(name="Valid err"), table.TimeColumn(name="Time", width=10), stdout=logs.write ), network=self, delay_limit=1., delay_history_length=10, ) elif summary_type == 'inline': summary = InlineSummary(network=self) else: raise ValueError("`{}` is unknown summary type" "".format(summary_type)) iterepochs = create_training_epochs_iterator(self, epochs, epsilon) show_epoch = parse_show_epoch_property(self, epochs, epsilon) training.show_epoch = show_epoch # Storring attributes and methods in local variables we prevent # useless __getattr__ call a lot of times in each loop. # This variables speed up loop in case on huge amount of # iterations. training_errors = self.errors validation_errors = self.validation_errors shuffle_data = self.shuffle_data train_epoch = self.train_epoch epoch_end_signal = self.epoch_end_signal train_end_signal = self.train_end_signal on_epoch_start_update = self.on_epoch_start_update is_first_iteration = True can_compute_validation_error = (input_test is not None) last_epoch_shown = 0 with logs.disable_user_input(): for epoch in iterepochs: validation_error = np.nan epoch_start_time = time.time() on_epoch_start_update(epoch) if shuffle_data: input_train, target_train = shuffle(input_train, target_train) try: train_error = train_epoch(input_train, target_train) if can_compute_validation_error: validation_error = self.prediction_error(input_test, target_test) training_errors.append(train_error) validation_errors.append(validation_error) epoch_finish_time = time.time() training.epoch_time = epoch_finish_time - epoch_start_time if epoch % training.show_epoch == 0 or is_first_iteration: summary.show_last() last_epoch_shown = epoch if epoch_end_signal is not None: epoch_end_signal(self) is_first_iteration = False except StopNetworkTraining as err: # TODO: This notification breaks table view in terminal. # I need to show it in a different way. logs.message("TRAIN", "Epoch #{} stopped. {}" "".format(epoch, str(err))) break if epoch != last_epoch_shown: summary.show_last() if train_end_signal is not None: train_end_signal(self) summary.finish() logs.newline() logs.message("TRAIN", "Trainig finished")
#!/usr/bin/env python from subprocess import Popen, PIPE from threading import Thread from os import path, mkdir, getcwd, remove, getenv, environ, pathsep from getpass import getuser from random import choice from string import hexdigits from argparse import ArgumentParser from shutil import rmtree import atexit import sys # Parse arguments parser = ArgumentParser(description="Start a quick server for PHP projects with an index file. This uses nginx and php-fpm.") parser.add_argument('-v', '--verbose', action='store_true', help="Display output from php-fpm and nginx") parser.add_argument('-p', '--port', metavar='port', type=int, default=8080, help='Port number (default: 8080)') parser.add_argument('-i', '--interface', metavar='address', type=str, default='*', help='Interface to listen to (default: *)') parser.add_argument('-l', '--log', action='store_true', help="If set, show error log output") rootgroup = parser.add_mutually_exclusive_group() rootgroup.add_argument('-r', '--root', metavar='dir', type=str, help="Web root directory (default: .)") rootgroup.add_argument('-b', '--base-index', action='store_true', help="Assume the directory the index file is in is the webroot") parser.add_argument('--handlers', metavar='n', type=int, default=6, help="Number of PHP handlers (default: 6)") parser.add_argument('--workers', metavar='n', type=int, default=2, help="Number of nginx workers (default: 2)") parser.add_argument('--restart-after', metavar='n', type=int, default=0, help="Restart php-fpm processes after this amount of requests, 0 means no restarts (default: 0)") parser.add_argument('--php-fpm-bin', metavar='path', type=str, default='php-fpm', help="Location of php-fpm binary (will search path if required) (default: php-fpm)") parser.add_argument('--nginx-bin', metavar='path', type=str, default='nginx', help="Location of nginx binary (will search path if required) (defaults: nginx)") parser.add_argument('--nginx-extra-config', metavar='file', type=str, default='.nginx', help="File containing extra nginx directives (default: .nginx)") parser.add_argument('--php-fpm-extra-config', metavar='file', type=str, default='.php-fpm', help="File containing extra php-fpm directives (default: .php-fpm)") parser.add_argument('-n', '--no-php-fpm', action='store_true', help="Disable php-fpm") parser.add_argument('index', metavar='index_file', type=str, nargs='?', help="The root index file (default: index.php)") args = parser.parse_args() # Initialize the options dict options = {} # Generic config options['HANDLERS'] = args.handlers options['WORKERS'] = args.workers options['RESTART_AFTER'] = args.restart_after # Set root to default if it isn't set, and make it an absolute path args.root = getcwd() if args.root == None else path.abspath(args.root) # If we don't have an index specified, try finding one if args.index == None: defaults = environ.get('QS_INDEX_PATH') if defaults != None: for option in defaults.split(pathsep): if path.isfile(option): args.base_index = True args.index = option break # Still nothing found? Assume the default if args.index == None: args.index = 'index.php' # Base directory extraction if args.base_index: root, indexfile = path.split(args.index) if root == '' or root == None: root = getcwd() if not path.isabs(root): root = path.abspath(root) options['LOCATION'] = root options['INDEX'] = indexfile else: options['LOCATION'] = args.root options['INDEX'] = args.index # Other options options['INTERFACE'] = args.interface options['PORT'] = args.port options['TMP_DIR'] = '/tmp' options['DEBUG'] = args.verbose options['SHOW_LOGS'] = args.log options['PHP_FPM_ENABLED'] = not args.no_php_fpm options['NGINX_EXTRA_DIRECTIVES_FILE'] = args.nginx_extra_config options['PHPFPM_EXTRA_DIRECTIVES_FILE'] = args.php_fpm_extra_config # Detailed config options['MAX_CLIENT_BODY_SIZE'] = '100M' options['ERROR_REPORTING'] = 'E_ALL' options['DISPLAY_ERRORS'] = 'on' options['DATE_TIMEZONE'] = 'Europe/Amsterdam' # Paths options['NGINX_CMD'] = args.nginx_bin options['PHPFPM_CMD'] = args.php_fpm_bin # Username if getuser() == 'root': options['NGINX_USER'] = getuser() options['PHP_USER'] = getenv('SUDO_USER') options['USER'] = 'user = {0}'.format(options['PHP_USER']) else: options['NGINX_USER'] = getuser() options['PHP_USER'] = getuser() options['USER'] = '' # Random file names for this instance options['RAND'] = ''.join(choice(hexdigits[:16]) for _ in xrange(6)) # PHP-FPM configuration options['PHPFPM_SOCKET_FILE'] = path.join(options['TMP_DIR'], 'php-fpm-' + options['RAND'] + '.socket') options['PHPFPM_CONFIG_FILE'] = path.join(options['TMP_DIR'], 'php-fpm-config-' + options['RAND'] + '.ini') options['PHPFPM_PID_FILE'] = path.join(options['TMP_DIR'], 'php-fpm-pid-' + options['RAND'] + '.pid') options['PHPFPM_ERROR_LOG'] = path.join(options['TMP_DIR'], 'php-fpm-error-' + options['RAND'] + '.log') options['PHPFPM_EXTRA_DIRECTIVES'] = '' if path.isfile(options['PHPFPM_EXTRA_DIRECTIVES_FILE']): with open(options['PHPFPM_EXTRA_DIRECTIVES_FILE']) as fextrafpm: options['PHPFPM_EXTRA_DIRECTIVES'] = fextrafpm.read() options['PHPFPM_CONFIG'] = """ [global] error_log={PHPFPM_ERROR_LOG} daemonize=no [www] {USER} listen = {PHPFPM_SOCKET_FILE} pm = static pm.max_children = {HANDLERS} pm.max_requests = {RESTART_AFTER} php_value[upload_max_filesize] = {MAX_CLIENT_BODY_SIZE} php_value[post_max_size] = {MAX_CLIENT_BODY_SIZE} php_value[error_reporting] = {ERROR_REPORTING} php_flag[display_errors] = {DISPLAY_ERRORS} php_value[date.timezone] = {DATE_TIMEZONE} php_flag[short_open_tag] = off php_value[xdebug.max_nesting_level] = 250 php_flag[xdebug.remote_enable] = on php_flag[xdebug.remote_connect_back] = on php_flag[xdebug.coverage_enable] = off php_admin_value[error_log] = {PHPFPM_ERROR_LOG} php_admin_flag[log_errors] = on php_admin_value[max_input_vars] = 50000 php_admin_value[realpath_cache_size] = 4096k {PHPFPM_EXTRA_DIRECTIVES} """ options['PHPFPM_CONFIG'] = options['PHPFPM_CONFIG'].format(**options) # Nginx configuration options['NGINX_CONFIG_FILE'] = path.join(options['TMP_DIR'], 'nginx-' + options['RAND'] + '.conf') options['NGINX_PID_FILE'] = path.join(options['TMP_DIR'], 'nginx-pid-' + options['RAND'] + '.pid') options['NGINX_TMP_DIR'] = path.join(options['TMP_DIR'], 'nginx-tmp-' + options['RAND']) options['NGINX_ERROR_LOG'] = path.join(options['TMP_DIR'], 'nginx-error-' + options['RAND'] + '.log') options['NGINX_CLIENT_TMP'] = path.join(options['NGINX_TMP_DIR'], 'client_temp') options['NGINX_PROXY_TMP'] = path.join(options['NGINX_TMP_DIR'], 'proxy_temp') options['NGINX_FASTCGI_TMP'] = path.join(options['NGINX_TMP_DIR'], 'fastcgi_temp') options['NGINX_UWSGI_TMP'] = path.join(options['NGINX_TMP_DIR'], 'uwsgi_temp') options['NGINX_SCGI_TMP'] = path.join(options['NGINX_TMP_DIR'], 'scgi_temp') options['NGINX_EXTRA_DIRECTIVES'] = '' if path.isfile(options['NGINX_EXTRA_DIRECTIVES_FILE']): with open(options['NGINX_EXTRA_DIRECTIVES_FILE']) as fextranginx: options['NGINX_EXTRA_DIRECTIVES'] = fextranginx.read() options['NGINX_CONFIG'] = """ error_log {NGINX_ERROR_LOG} warn; pid {NGINX_PID_FILE}; worker_processes {WORKERS}; events {{ worker_connections 1024; }} daemon off; master_process off; http {{ client_body_temp_path {NGINX_CLIENT_TMP} 1 2 3; proxy_temp_path {NGINX_PROXY_TMP} 1 2 3; fastcgi_temp_path {NGINX_FASTCGI_TMP} 1 2 3; uwsgi_temp_path {NGINX_UWSGI_TMP} 1 2 3; scgi_temp_path {NGINX_SCGI_TMP} 1 2 3; access_log off; types {{ text/html html htm shtml; application/xhtml+xml xhtml; text/plain txt; text/xml xml; text/css css less sass scss; application/x-javascript js; text/x-yaml yaml yml; image/gif gif; image/jpeg jpeg jpg; image/png png; application/atom+xml atom; application/rss+xml rss; text/xsl xsl xslt; text/mathml mml; image/tiff tif tiff; image/x-icon ico; image/x-ms-bmp bmp; image/svg+xml svg svgz; image/webp webp; application/x-shockwave-flash swf; application/pdf pdf; audio/midi mid midi kar; audio/mpeg mp3; audio/ogg ogg; audio/x-m4a m4a; audio/wav wav; video/mp4 mp4; video/mpeg mpeg mpg; video/webm webm; video/x-flv flv; video/x-m4v m4v; video/x-msvideo avi; application/x-font-opentype otf; application/x-font-truetype ttf; application/font-woff woff; apllication/vnd.ms-fontobject eot; }} default_type application/octet-stream; sendfile on; keepalive_timeout 65; client_max_body_size {MAX_CLIENT_BODY_SIZE}; server {{ listen {INTERFACE}:{PORT}; server_name localhost; root {LOCATION}; index {INDEX}; location / {{ try_files $uri $uri/ /{INDEX}?$query_string; }} location ~ \.php {{ fastcgi_pass unix:/{PHPFPM_SOCKET_FILE}; fastcgi_keep_conn on; fastcgi_split_path_info ^(.+\.php)(/.*)$; fastcgi_param QUERY_STRING $query_string; fastcgi_param REQUEST_METHOD $request_method; fastcgi_param CONTENT_TYPE $content_type; fastcgi_param CONTENT_LENGTH $content_length; fastcgi_param SCRIPT_NAME $fastcgi_script_name; fastcgi_param REQUEST_URI $request_uri; fastcgi_param DOCUMENT_URI $document_uri; fastcgi_param DOCUMENT_ROOT $document_root; fastcgi_param SERVER_PROTOCOL $server_protocol; fastcgi_param HTTPS $https if_not_empty; fastcgi_param GATEWAY_INTERFACE CGI/1.1; fastcgi_param SERVER_SOFTWARE nginx/$nginx_version; fastcgi_param REMOTE_ADDR $remote_addr; fastcgi_param REMOTE_PORT $remote_port; fastcgi_param SERVER_ADDR $server_addr; fastcgi_param SERVER_PORT $server_port; fastcgi_param SERVER_NAME $server_name; fastcgi_param REDIRECT_STATUS 200; fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; fastcgi_param APPLICATION_ENV development; fastcgi_buffers 16 32k; fastcgi_buffer_size 128k; }} {NGINX_EXTRA_DIRECTIVES} }} }} """ options['NGINX_CONFIG'] = options['NGINX_CONFIG'].format(**options) # Create nginx temporary directory and config mkdir(options['NGINX_TMP_DIR']) with open(options['NGINX_CONFIG_FILE'], 'w') as f: f.write(options['NGINX_CONFIG']) # Create php-fpm temporary config file with open(options['PHPFPM_CONFIG_FILE'], 'w') as f: f.write(options['PHPFPM_CONFIG']) # Create log files open(options['PHPFPM_ERROR_LOG'], 'a').close() open(options['NGINX_ERROR_LOG'], 'a').close() # PHP-FPM command options['PHPFPM_COMMAND'] = [ options['PHPFPM_CMD'], '-y', options['PHPFPM_CONFIG_FILE'], '-g', options['PHPFPM_PID_FILE'], ] # Nginx command options['NGINX_COMMAND'] = [ options['NGINX_CMD'], '-p', options['TMP_DIR'], '-c', options['NGINX_CONFIG_FILE'], '-q', ] # Let's get going try: with open('/dev/null', 'w') as devnull: if not options['PHP_FPM_ENABLED']: print("Note: php-fpm will not be started") print("Serving {0} ({1})".format(options['LOCATION'], options['INDEX'])) if getenv('SUDO_USER') != None: print("Nginx user: {0}; PHP user: {1}".format(options['NGINX_USER'], options['PHP_USER'])) if options['DEBUG']: stdoutstream = sys.stdout stderrstream = sys.stderr else: stdoutstream = devnull stderrstream = devnull if options['PHP_FPM_ENABLED']: phpfpm = Popen(options['PHPFPM_COMMAND'], stdout=stdoutstream, stderr=stderrstream) atexit.register(phpfpm.terminate) nginx = Popen(options['NGINX_COMMAND'], stdout=stdoutstream, stderr=stderrstream) atexit.register(nginx.terminate) print("Server running on {0}:{1}...".format(options['INTERFACE'], options['PORT'])) if options['SHOW_LOGS']: def enqueue_output(t, out): for line in iter(out.readline, b''): print '[{0}] {1}'.format(t, line.strip()) out.close() nginx_tail = Popen(['tail', '-f', options['NGINX_ERROR_LOG']], stdout=PIPE, stderr=stderrstream) atexit.register(nginx_tail.terminate) thread_nginx_tail = Thread(target=enqueue_output, args=('nginx', nginx_tail.stdout)) thread_nginx_tail.daemon = True thread_nginx_tail.start() if options['PHP_FPM_ENABLED']: phpfpm_tail = Popen(['tail', '-f', options['PHPFPM_ERROR_LOG']], stdout=PIPE, stderr=stderrstream) atexit.register(phpfpm_tail.terminate) thread_phpfpm_tail = Thread(target=enqueue_output, args=('php-fpm', phpfpm_tail.stdout)) thread_phpfpm_tail.daemon = True thread_phpfpm_tail.start() # Wait for the main command to exit if options['PHP_FPM_ENABLED']: phpfpm.wait() nginx.terminate() else: nginx.wait() # Shut down logging if options['SHOW_LOGS']: nginx_tail.terminate() if options['PHP_FPM_ENABLED']: phpfpm_tail.terminate() except (KeyboardInterrupt, SystemExit): if options['PHP_FPM_ENABLED']: phpfpm.terminate() nginx.terminate() # Shut down logging if options['SHOW_LOGS']: nginx_tail.terminate() if options['PHP_FPM_ENABLED']: phpfpm_tail.terminate() rmtree(options['NGINX_TMP_DIR']) remove(options['PHPFPM_CONFIG_FILE']) remove(options['NGINX_CONFIG_FILE']) remove(options['NGINX_ERROR_LOG']) remove(options['PHPFPM_ERROR_LOG'])
# Copyright 2010 Jacob Kaplan-Moss # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base utilities to build API operation managers and objects on top of. """ import abc import contextlib import hashlib import inspect import os import threading import json import six from . import exceptions from ecl.common.apiclient import base from ecl.common import cliutils Resource = base.Resource def getid(obj): """ Abstracts the common pattern of allowing both an object or an object's ID as a parameter when dealing with relationships. """ try: return obj.id except AttributeError: return obj class Manager(base.HookableMixin): """ Managers interact with a particular type of API (servers, flavors, images, etc.) and provide CRUD operations for them. """ resource_class = None cache_lock = threading.RLock() def __init__(self, api): self.api = api def _list(self, url, response_key, obj_class=None, body=None): if body: _resp, body = self.api.client.post(url, body=body) else: _resp, body = self.api.client.get(url) if obj_class is None: obj_class = self.resource_class data = body[response_key] # NOTE(ja): keystone returns values as list as {'values': [ ... ]} # unlike other services which just return the list... if isinstance(data, dict): try: data = data['values'] except KeyError: pass with self.completion_cache('human_id', obj_class, mode="w"): with self.completion_cache('uuid', obj_class, mode="w"): return [obj_class(self, res, loaded=True) for res in data if res] @contextlib.contextmanager def alternate_service_type(self, service_type): original_service_type = self.api.client.service_type self.api.client.service_type = service_type try: yield finally: self.api.client.service_type = original_service_type @contextlib.contextmanager def completion_cache(self, cache_type, obj_class, mode): """ The completion cache store items that can be used for bash autocompletion, like UUIDs or human-friendly IDs. A resource listing will clear and repopulate the cache. A resource create will append to the cache. Delete is not handled because listings are assumed to be performed often enough to keep the cache reasonably up-to-date. """ # NOTE(wryan): This lock protects read and write access to the # completion caches with self.cache_lock: base_dir = cliutils.env('BEARCLIENT_UUID_CACHE_DIR', default="~/.bearclient") # NOTE(sirp): Keep separate UUID caches for each username + # endpoint pair username = cliutils.env('OS_USERNAME', 'BEAR_USERNAME') url = cliutils.env('OS_URL', 'BEAR_URL') uniqifier = hashlib.md5(username.encode('utf-8') + url.encode('utf-8')).hexdigest() cache_dir = os.path.expanduser(os.path.join(base_dir, uniqifier)) try: os.makedirs(cache_dir, 0o755) except OSError: # NOTE(kiall): This is typically either permission denied while # attempting to create the directory, or the # directory already exists. Either way, don't # fail. pass resource = obj_class.__name__.lower() filename = "%s-%s-cache" % (resource, cache_type.replace('_', '-')) path = os.path.join(cache_dir, filename) cache_attr = "_%s_cache" % cache_type try: setattr(self, cache_attr, open(path, mode)) except IOError: # NOTE(kiall): This is typically a permission denied while # attempting to write the cache file. pass try: yield finally: cache = getattr(self, cache_attr, None) if cache: cache.close() delattr(self, cache_attr) def write_to_completion_cache(self, cache_type, val): cache = getattr(self, "_%s_cache" % cache_type, None) if cache: cache.write("%s\n" % val) def _get(self, url, response_key): _resp, body = self.api.client.get(url) return self.resource_class(self, body[response_key], loaded=True) def _create(self, url, body, response_key, return_raw=False, **kwargs): self.run_hooks('modify_body_for_create', body, **kwargs) _resp, body = self.api.client.post(url, body=body) # requirements from u1, # set admin_pass(only in response body of creation) to # return value of bearclient.create() try: admin_pass = None resp_content = getattr(_resp, '_content', '') if resp_content: resp_dict = json.loads(resp_content) admin_pass = resp_dict.get('server', {}).get('adminPass', '') except: admin_pass = None if return_raw: return body[response_key] with self.completion_cache('human_id', self.resource_class, mode="a"): with self.completion_cache('uuid', self.resource_class, mode="a"): rsc_cls = self.resource_class(self, body[response_key]) if admin_pass: setattr(rsc_cls, 'admin_pass', admin_pass) return rsc_cls def _delete(self, url): _resp, _body = self.api.client.delete(url) def _update(self, url, body, response_key=None, **kwargs): self.run_hooks('modify_body_for_update', body, **kwargs) _resp, body = self.api.client.put(url, body=body) if body: if response_key: return self.resource_class(self, body[response_key]) else: return self.resource_class(self, body) @six.add_metaclass(abc.ABCMeta) class ManagerWithFind(Manager): """ Like a `Manager`, but with additional `find()`/`findall()` methods. """ @abc.abstractmethod def list(self): pass def find(self, **kwargs): """ Find a single item with attributes matching ``**kwargs``. """ matches = self.findall(**kwargs) num_matches = len(matches) if num_matches == 0: msg = "No %s matching %s." % (self.resource_class.__name__, kwargs) raise exceptions.NotFound(404, msg) elif num_matches > 1: raise exceptions.NoUniqueMatch else: return matches[0] def findall(self, **kwargs): """ Find all items with attributes matching ``**kwargs``. """ found = [] searches = kwargs.items() detailed = True list_kwargs = {} list_argspec = inspect.getargspec(self.list) if 'detailed' in list_argspec.args: detailed = ("human_id" not in kwargs and "name" not in kwargs and "display_name" not in kwargs) list_kwargs['detailed'] = detailed if 'is_public' in list_argspec.args and 'is_public' in kwargs: is_public = kwargs['is_public'] list_kwargs['is_public'] = is_public if is_public is None: tmp_kwargs = kwargs.copy() del tmp_kwargs['is_public'] searches = tmp_kwargs.items() if 'search_opts' in list_argspec.args: # pass search_opts in to do server side based filtering. # TODO(jogo) not all search_opts support regex, find way to # identify when to use regex and when to use string matching. # volumes does not support regex while servers does. So when # doing findall on servers some client side filtering is still # needed. if "human_id" in kwargs: list_kwargs['search_opts'] = {"name": kwargs["human_id"]} elif "name" in kwargs: list_kwargs['search_opts'] = {"name": kwargs["name"]} elif "display_name" in kwargs: list_kwargs['search_opts'] = {"name": kwargs["display_name"]} if "all_tenants" in kwargs: all_tenants = kwargs['all_tenants'] list_kwargs['search_opts']['all_tenants'] = all_tenants searches = [(k, v) for k, v in searches if k != 'all_tenants'] listing = self.list(**list_kwargs) for obj in listing: try: if all(getattr(obj, attr) == value for (attr, value) in searches): if detailed: found.append(obj) else: found.append(self.get(obj.id)) except AttributeError: continue return found class BootingManagerWithFind(ManagerWithFind): """Like a `ManagerWithFind`, but has the ability to boot servers.""" def _parse_block_device_mapping(self, block_device_mapping): bdm = [] for device_name, mapping in six.iteritems(block_device_mapping): # # The mapping is in the format: # <id>:[<type>]:[<size(GB)>]:[<delete_on_terminate>] # bdm_dict = {'device_name': device_name} mapping_parts = mapping.split(':') source_id = mapping_parts[0] bdm_dict['uuid'] = source_id bdm_dict['boot_index'] = 0 if len(mapping_parts) == 1: bdm_dict['volume_id'] = source_id bdm_dict['source_type'] = 'volume' elif len(mapping_parts) > 1: source_type = mapping_parts[1] bdm_dict['source_type'] = source_type if source_type.startswith('snap'): bdm_dict['snapshot_id'] = source_id else: bdm_dict['volume_id'] = source_id if len(mapping_parts) > 2 and mapping_parts[2]: bdm_dict['volume_size'] = str(int(mapping_parts[2])) if len(mapping_parts) > 3: bdm_dict['delete_on_termination'] = mapping_parts[3] bdm.append(bdm_dict) return bdm
#!/usr/bin/env python # Copyright (C) 2015 Wayne Warren # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Manage JJB Configuration sources, defaults, and access. from collections import defaultdict import io import logging import os from six.moves import configparser, StringIO from six import PY2 from jenkins_jobs import builder from jenkins_jobs.errors import JJBConfigException from jenkins_jobs.errors import JenkinsJobsException __all__ = [ "JJBConfig" ] logger = logging.getLogger(__name__) DEFAULT_CONF = """ [job_builder] keep_descriptions=False ignore_cache=False recursive=False exclude=.* allow_duplicates=False allow_empty_variables=False [jenkins] url=http://localhost:8080/ query_plugins_info=True [hipchat] authtoken=dummy send-as=Jenkins """ CONFIG_REQUIRED_MESSAGE = ("A valid configuration file is required. " "No configuration file passed.") class JJBConfig(object): def __init__(self, config_filename=None, config_file_required=False): """ The JJBConfig class is intended to encapsulate and resolve priority between all sources of configuration for the JJB library. This allows the various sources of configuration to provide a consistent accessor interface regardless of where they are used. It also allows users of JJB-as-an-API to create minimally valid configuration and easily make minor modifications to default values without strictly adhering to the confusing setup (see the _setup method, the behavior of which largely lived in the cmd.execute method previously) necessary for the jenkins-jobs command line tool. :arg str config_filename: Name of configuration file on which to base this config object. :arg bool config_file_required: Allows users of the JJBConfig class to decide whether or not it's really necessary for a config file to be passed in when creating an instance. This has two effects on the behavior of JJBConfig initialization: * It determines whether or not we try "local" and "global" config files. * It determines whether or not failure to read some config file will raise an exception or simply print a warning message indicating that no config file was found. """ config_parser = self._init_defaults() global_conf = '/etc/jenkins_jobs/jenkins_jobs.ini' user_conf = os.path.join(os.path.expanduser('~'), '.config', 'jenkins_jobs', 'jenkins_jobs.ini') local_conf = os.path.join(os.path.dirname(__file__), 'jenkins_jobs.ini') conf = None if config_filename is not None: conf = config_filename else: if os.path.isfile(local_conf): conf = local_conf elif os.path.isfile(user_conf): conf = user_conf else: conf = global_conf if config_file_required and conf is None: raise JJBConfigException(CONFIG_REQUIRED_MESSAGE) config_fp = None if conf is not None: try: config_fp = self._read_config_file(conf) except JJBConfigException: if config_file_required: raise JJBConfigException(CONFIG_REQUIRED_MESSAGE) else: logger.warn("Config file, {0}, not found. Using default " "config values.".format(conf)) if config_fp is not None: if PY2: config_parser.readfp(config_fp) else: config_parser.read_file(config_fp) self.config_parser = config_parser self.ignore_cache = False self.flush_cache = False self.user = None self.password = None self.plugins_info = None self.timeout = builder._DEFAULT_TIMEOUT self.allow_empty_variables = None self.jenkins = defaultdict(None) self.builder = defaultdict(None) self.yamlparser = defaultdict(None) self.hipchat = defaultdict(None) self._setup() def _init_defaults(self): """ Initialize default configuration values using DEFAULT_CONF """ config = configparser.ConfigParser() # Load default config always if PY2: config.readfp(StringIO(DEFAULT_CONF)) else: config.read_file(StringIO(DEFAULT_CONF)) return config def _read_config_file(self, config_filename): """ Given path to configuration file, read it in as a ConfigParser object and return that object. """ if os.path.isfile(config_filename): self.__config_file = config_filename # remember file we read from logger.debug("Reading config from {0}".format(config_filename)) config_fp = io.open(config_filename, 'r', encoding='utf-8') else: raise JJBConfigException( "A valid configuration file is required. " "\n{0} is not valid.".format(config_filename)) return config_fp def _setup(self): config = self.config_parser logger.debug("Config: {0}".format(config)) # check the ignore_cache setting if config.has_option('jenkins', 'ignore_cache'): logging.warn("ignore_cache option should be moved to the " "[job_builder] section in the config file, the " "one specified in the [jenkins] section will be " "ignored in the future") self.ignore_cache = config.getboolean('jenkins', 'ignore_cache') elif config.has_option('job_builder', 'ignore_cache'): self.ignore_cache = config.getboolean('job_builder', 'ignore_cache') # check the flush_cache setting if config.has_option('job_builder', 'flush_cache'): self.flush_cache = config.getboolean('job_builder', 'flush_cache') # Jenkins supports access as an anonymous user, which can be used to # ensure read-only behaviour when querying the version of plugins # installed for test mode to generate XML output matching what will be # uploaded. To enable must pass 'None' as the value for user and # password to python-jenkins # # catching 'TypeError' is a workaround for python 2.6 interpolation # error # https://bugs.launchpad.net/openstack-ci/+bug/1259631 try: self.user = config.get('jenkins', 'user') except (TypeError, configparser.NoOptionError): pass try: self.password = config.get('jenkins', 'password') except (TypeError, configparser.NoOptionError): pass # None -- no timeout, blocking mode; same as setblocking(True) # 0.0 -- non-blocking mode; same as setblocking(False) <--- default # > 0 -- timeout mode; operations time out after timeout seconds # < 0 -- illegal; raises an exception # to retain the default must use # "timeout=jenkins_jobs.builder._DEFAULT_TIMEOUT" or not set timeout at # all. try: self.timeout = config.getfloat('jenkins', 'timeout') except (ValueError): raise JenkinsJobsException("Jenkins timeout config is invalid") except (TypeError, configparser.NoOptionError): pass if not config.getboolean("jenkins", "query_plugins_info"): logger.debug("Skipping plugin info retrieval") self.plugins_info = [] self.recursive = config.getboolean('job_builder', 'recursive') self.excludes = config.get('job_builder', 'exclude').split(os.pathsep) # The way we want to do things moving forward: self.jenkins['url'] = config.get('jenkins', 'url') self.jenkins['user'] = self.user self.jenkins['password'] = self.password self.jenkins['timeout'] = self.timeout self.builder['ignore_cache'] = self.ignore_cache self.builder['flush_cache'] = self.flush_cache self.builder['plugins_info'] = self.plugins_info # keep descriptions ? (used by yamlparser) keep_desc = False if (config and config.has_section('job_builder') and config.has_option('job_builder', 'keep_descriptions')): keep_desc = config.getboolean('job_builder', 'keep_descriptions') self.yamlparser['keep_descriptions'] = keep_desc # figure out the include path (used by yamlparser) path = ["."] if (config and config.has_section('job_builder') and config.has_option('job_builder', 'include_path')): path = config.get('job_builder', 'include_path').split(':') self.yamlparser['include_path'] = path # allow duplicates? allow_duplicates = False if config and config.has_option('job_builder', 'allow_duplicates'): allow_duplicates = config.getboolean('job_builder', 'allow_duplicates') self.yamlparser['allow_duplicates'] = allow_duplicates # allow empty variables? self.yamlparser['allow_empty_variables'] = ( self.allow_empty_variables or config and config.has_section('job_builder') and config.has_option('job_builder', 'allow_empty_variables') and config.getboolean('job_builder', 'allow_empty_variables')) def validate(self): config = self.config_parser # Inform the user as to what is likely to happen, as they may specify # a real jenkins instance in test mode to get the plugin info to check # the XML generated. if self.jenkins['user'] is None and self.jenkins['password'] is None: logger.info("Will use anonymous access to Jenkins if needed.") elif ((self.jenkins['user'] is not None and self.jenkins['password'] is None) or (self.jenkins['user'] is None and self.jenkins['password'] is not None)): raise JenkinsJobsException( "Cannot authenticate to Jenkins with only one of User and " "Password provided, please check your configuration." ) if (self.builder['plugins_info'] is not None and not isinstance(self.builder['plugins_info'], list)): raise JenkinsJobsException("plugins_info must contain a list!") # Temporary until yamlparser is refactored to query config object if self.yamlparser['allow_empty_variables'] is not None: config.set('job_builder', 'allow_empty_variables', str(self.yamlparser['allow_empty_variables'])) def get_module_config(self, section, key): """ Given a section name and a key value, return the value assigned to the key in the JJB .ini file if it exists, otherwise emit a warning indicating that the value is not set. Default value returned if no value is set in the file will be a blank string. """ result = '' try: result = self.config_parser.get( section, key ) except (configparser.NoSectionError, configparser.NoOptionError, JenkinsJobsException) as e: logger.warning("You didn't set a " + key + " neither in the yaml job definition nor in" + " the " + section + " section, blank default" + " value will be applied:\n{0}".format(e)) return result def get_plugin_config(self, plugin, key): value = self.get_module_config('plugin "{}"'.format(plugin), key) # Backwards compatibility for users who have not switched to the new # plugin configuration format in their config. This code should be # removed in future versions of JJB after 2.0. if not value: value = self.get_module_config(plugin, key) logger.warning( "Defining plugin configuration using [" + plugin + "] is" " deprecated. The recommended way to define plugins now is by" " configuring [plugin \"" + plugin + "\"]") return value
# -*- coding: UTF-8 -*- from insights.client.phase.v1 import post_update from mock.mock import patch, MagicMock from pytest import raises def patch_insights_config(old_function): patcher = patch("insights.client.phase.v1.InsightsConfig", **{"return_value.load_all.return_value.status": False, "return_value.load_all.return_value.unregister": False, "return_value.load_all.return_value.offline": False, "return_value.load_all.return_value.enable_schedule": False, "return_value.load_all.return_value.disable_schedule": False, "return_value.load_all.return_value.analyze_container": False, "return_value.load_all.return_value.display_name": False, "return_value.load_all.return_value.register": False, "return_value.load_all.return_value.legacy_upload": False, "return_value.load_all.return_value.diagnosis": None, "return_value.load_all.return_value.reregister": False, "return_value.load_all.return_value.payload": None, "return_value.load_all.return_value.list_specs": False, "return_value.load_all.return_value.show_results": False, "return_value.load_all.return_value.check_results": False, "return_value.load_all.return_value.no_upload": False, "return_value.load_all.return_value.core_collect": False}) return patcher(old_function) # DRY this at some point... for the love of god @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_no_options_registered(insights_config, insights_client): """ Client run with no options. If registered, exit with 0 exit code (don't kill parent) """ insights_client.return_value.get_registration_status = MagicMock(return_value=True) with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 0 insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_not_called() insights_client.return_value.set_display_name.assert_not_called() @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_no_options_unregistered(insights_config, insights_client): """ Client run with no options. If unregistered, exit with 101 exit code (kill parent) """ insights_client.return_value.get_registration_status = MagicMock(return_value=False) with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 101 insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_not_called() insights_client.return_value.set_display_name.assert_not_called() @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_no_options_err_reg_check(insights_config, insights_client): """ Client run with no options. If registration check fails, exit with 101 exit code """ insights_client.return_value.get_registration_status = MagicMock(return_value=None) with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 101 insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_not_called() insights_client.return_value.set_display_name.assert_not_called() @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_check_status_registered(insights_config, insights_client): """ Just check status. If registered, exit with 100 exit code (kill parent) """ insights_config.return_value.load_all.return_value.status = True insights_client.return_value.get_registration_status = MagicMock(return_value=True) with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 100 insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_not_called() insights_client.return_value.set_display_name.assert_not_called() @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_check_status_unregistered(insights_config, insights_client): """ Just check status. If unregistered, exit with 101 exit code (kill parent) """ insights_config.return_value.load_all.return_value.status = True insights_client.return_value.get_registration_status = MagicMock(return_value=False) with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 101 insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_not_called() insights_client.return_value.set_display_name.assert_not_called() @patch("insights.client.phase.v1.get_scheduler") @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_register_registered(insights_config, insights_client, get_scheduler): """ Client run with --register. If registered, exit with 0 exit code (don't kill parent) Also enable scheduling. """ insights_config.return_value.load_all.return_value.register = True insights_client.return_value.get_registration_status = MagicMock(return_value=True) with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 0 insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_not_called() insights_client.return_value.set_display_name.assert_not_called() get_scheduler.return_value.schedule.assert_called_once() @patch("insights.client.phase.v1.get_scheduler") @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_register_unregistered(insights_config, insights_client, get_scheduler): """ Client run with --register. If unregistered, exit with 0 exit code (don't kill parent) Also enable scheduling. """ insights_config.return_value.load_all.return_value.register = True insights_client.return_value.get_registration_status = MagicMock(return_value=False) with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 0 insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_not_called() insights_client.return_value.set_display_name.assert_not_called() get_scheduler.return_value.schedule.assert_called_once() @patch("insights.client.phase.v1.get_scheduler") @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_unregister_registered(insights_config, insights_client, get_scheduler): """ Client run with --unregister. If registered, exit with 100 exit code Also disable scheduling. """ insights_config.return_value.load_all.return_value.unregister = True insights_client.return_value.get_registration_status = MagicMock(return_value=True) with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 100 insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_not_called() insights_client.return_value.set_display_name.assert_not_called() get_scheduler.return_value.remove_scheduling.assert_called_once() @patch("insights.client.phase.v1.get_scheduler") @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_unregister_unregistered(insights_config, insights_client, get_scheduler): """ Client run with --unregister. If unregistered, exit with 101 exit code """ insights_config.return_value.load_all.return_value.unregister = True insights_client.return_value.get_registration_status = MagicMock(return_value=False) with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 101 insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_not_called() insights_client.return_value.set_display_name.assert_not_called() get_scheduler.return_value.remove_scheduling.assert_not_called() @patch("insights.client.phase.v1.get_scheduler") @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_force_register_registered(insights_config, insights_client, get_scheduler): """ Client run with --force-reregister. If registered, exit with 0 exit code (don't kill parent) Also enable scheduling. """ insights_config.return_value.load_all.return_value.register = True insights_config.return_value.load_all.return_value.reregister = True insights_client.return_value.get_registration_status = MagicMock(return_value=True) with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 0 insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_called_once() insights_client.return_value.set_display_name.assert_not_called() get_scheduler.return_value.schedule.assert_called_once() @patch("insights.client.phase.v1.get_scheduler") @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_force_register_unregistered(insights_config, insights_client, get_scheduler): """ Client run with --force-reregister. If registered, exit with 0 exit code (don't kill parent) Also enable scheduling. """ insights_config.return_value.load_all.return_value.register = True insights_config.return_value.load_all.return_value.reregister = True insights_client.return_value.get_registration_status = MagicMock(return_value=False) with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 0 insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_called_once() insights_client.return_value.set_display_name.assert_not_called() get_scheduler.return_value.schedule.assert_called_once() # ASK @patch("insights.client.phase.v1.get_scheduler") @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_set_display_name_cli_no_register_unreg(insights_config, insights_client, get_scheduler): """ Client is unregistered, and run with --display-name but not --register Should exit with code 101 after registration check """ insights_config.return_value.load_all.return_value.display_name = True insights_config.return_value.load_all.return_value._cli_opts = ['display_name'] insights_client.return_value.get_registration_status = MagicMock(return_value=False) with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 101 insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.set_display_name.assert_not_called() get_scheduler.return_value.schedule.assert_not_called() @patch("insights.client.phase.v1.get_scheduler") @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_set_display_name_cli_no_register_reg(insights_config, insights_client, get_scheduler): """ Client is registered, and run with --display-name but not --register Should exit with code 100 after setting display name """ insights_config.return_value.load_all.return_value.display_name = True insights_config.return_value.load_all.return_value._cli_opts = ['display_name'] insights_client.return_value.get_registration_status = MagicMock(return_value=True) with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 100 insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.set_display_name.assert_called_once() get_scheduler.return_value.schedule.assert_not_called() @patch("insights.client.phase.v1.get_scheduler") @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_set_display_name_cli_register(insights_config, insights_client, get_scheduler): """ Client is and run with --display-name and --register Should set schedule and exit with code 0 Display name is not explicitly set here """ insights_config.return_value.load_all.return_value.register = True insights_config.return_value.load_all.return_value.reregister = True insights_client.return_value.get_registration_status = MagicMock(return_value=True) with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 0 insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_called_once() insights_client.return_value.set_display_name.assert_not_called() get_scheduler.return_value.schedule.assert_called_once() @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_offline(insights_config, insights_client): """ Offline mode short circuits this phase """ insights_config.return_value.load_all.return_value.offline = True try: post_update() except SystemExit: pass insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_not_called() insights_client.return_value.clear_local_registration.assert_not_called() insights_client.return_value.set_display_name.assert_not_called() @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config def test_post_update_no_upload(insights_config, insights_client): """ No-upload short circuits this phase """ insights_config.return_value.load_all.return_value.no_upload = True try: post_update() except SystemExit: pass insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_not_called() insights_client.return_value.clear_local_registration.assert_not_called() insights_client.return_value.set_display_name.assert_not_called() @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config # @patch("insights.client.phase.v1.InsightsClient") def test_exit_ok(insights_config, insights_client): """ Support collection replaces the normal client run. """ with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 0
# -*- coding: utf-8 -*- # Copyright 2014, 2015 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList from synapse.util.caches import cache_counter, caches_by_name from twisted.internet import defer from blist import sorteddict import logging import ujson as json logger = logging.getLogger(__name__) class ReceiptsStore(SQLBaseStore): def __init__(self, hs): super(ReceiptsStore, self).__init__(hs) self._receipts_stream_cache = _RoomStreamChangeCache() @defer.inlineCallbacks def get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None): """Get receipts for multiple rooms for sending to clients. Args: room_ids (list): List of room_ids. to_key (int): Max stream id to fetch receipts upto. from_key (int): Min stream id to fetch receipts from. None fetches from the start. Returns: list: A list of receipts. """ room_ids = set(room_ids) if from_key: room_ids = yield self._receipts_stream_cache.get_rooms_changed( self, room_ids, from_key ) results = yield self._get_linearized_receipts_for_rooms( room_ids, to_key, from_key=from_key ) defer.returnValue([ev for res in results.values() for ev in res]) @cachedInlineCallbacks(num_args=3, max_entries=5000) def get_linearized_receipts_for_room(self, room_id, to_key, from_key=None): """Get receipts for a single room for sending to clients. Args: room_ids (str): The room id. to_key (int): Max stream id to fetch receipts upto. from_key (int): Min stream id to fetch receipts from. None fetches from the start. Returns: list: A list of receipts. """ def f(txn): if from_key: sql = ( "SELECT * FROM receipts_linearized WHERE" " room_id = ? AND stream_id > ? AND stream_id <= ?" ) txn.execute( sql, (room_id, from_key, to_key) ) else: sql = ( "SELECT * FROM receipts_linearized WHERE" " room_id = ? AND stream_id <= ?" ) txn.execute( sql, (room_id, to_key) ) rows = self.cursor_to_dict(txn) return rows rows = yield self.runInteraction( "get_linearized_receipts_for_room", f ) if not rows: defer.returnValue([]) content = {} for row in rows: content.setdefault( row["event_id"], {} ).setdefault( row["receipt_type"], {} )[row["user_id"]] = json.loads(row["data"]) defer.returnValue([{ "type": "m.receipt", "room_id": room_id, "content": content, }]) @cachedList(cache=get_linearized_receipts_for_room.cache, list_name="room_ids", num_args=3, inlineCallbacks=True) def _get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None): if not room_ids: defer.returnValue({}) def f(txn): if from_key: sql = ( "SELECT * FROM receipts_linearized WHERE" " room_id IN (%s) AND stream_id > ? AND stream_id <= ?" ) % ( ",".join(["?"] * len(room_ids)) ) args = list(room_ids) args.extend([from_key, to_key]) txn.execute(sql, args) else: sql = ( "SELECT * FROM receipts_linearized WHERE" " room_id IN (%s) AND stream_id <= ?" ) % ( ",".join(["?"] * len(room_ids)) ) args = list(room_ids) args.append(to_key) txn.execute(sql, args) return self.cursor_to_dict(txn) txn_results = yield self.runInteraction( "_get_linearized_receipts_for_rooms", f ) results = {} for row in txn_results: # We want a single event per room, since we want to batch the # receipts by room, event and type. room_event = results.setdefault(row["room_id"], { "type": "m.receipt", "room_id": row["room_id"], "content": {}, }) # The content is of the form: # {"$foo:bar": { "read": { "@user:host": <receipt> }, .. }, .. } event_entry = room_event["content"].setdefault(row["event_id"], {}) receipt_type = event_entry.setdefault(row["receipt_type"], {}) receipt_type[row["user_id"]] = json.loads(row["data"]) results = { room_id: [results[room_id]] if room_id in results else [] for room_id in room_ids } defer.returnValue(results) def get_max_receipt_stream_id(self): return self._receipts_id_gen.get_max_token(self) @cachedInlineCallbacks() def get_graph_receipts_for_room(self, room_id): """Get receipts for sending to remote servers. """ rows = yield self._simple_select_list( table="receipts_graph", keyvalues={"room_id": room_id}, retcols=["receipt_type", "user_id", "event_id"], desc="get_linearized_receipts_for_room", ) result = {} for row in rows: result.setdefault( row["user_id"], {} ).setdefault( row["receipt_type"], [] ).append(row["event_id"]) defer.returnValue(result) def insert_linearized_receipt_txn(self, txn, room_id, receipt_type, user_id, event_id, data, stream_id): # We don't want to clobber receipts for more recent events, so we # have to compare orderings of existing receipts sql = ( "SELECT topological_ordering, stream_ordering, event_id FROM events" " INNER JOIN receipts_linearized as r USING (event_id, room_id)" " WHERE r.room_id = ? AND r.receipt_type = ? AND r.user_id = ?" ) txn.execute(sql, (room_id, receipt_type, user_id)) results = txn.fetchall() if results: res = self._simple_select_one_txn( txn, table="events", retcols=["topological_ordering", "stream_ordering"], keyvalues={"event_id": event_id}, ) topological_ordering = int(res["topological_ordering"]) stream_ordering = int(res["stream_ordering"]) for to, so, _ in results: if int(to) > topological_ordering: return False elif int(to) == topological_ordering and int(so) >= stream_ordering: return False self._simple_delete_txn( txn, table="receipts_linearized", keyvalues={ "room_id": room_id, "receipt_type": receipt_type, "user_id": user_id, } ) self._simple_insert_txn( txn, table="receipts_linearized", values={ "stream_id": stream_id, "room_id": room_id, "receipt_type": receipt_type, "user_id": user_id, "event_id": event_id, "data": json.dumps(data), } ) return True @defer.inlineCallbacks def insert_receipt(self, room_id, receipt_type, user_id, event_ids, data): """Insert a receipt, either from local client or remote server. Automatically does conversion between linearized and graph representations. """ if not event_ids: return if len(event_ids) == 1: linearized_event_id = event_ids[0] else: # we need to points in graph -> linearized form. # TODO: Make this better. def graph_to_linear(txn): query = ( "SELECT event_id WHERE room_id = ? AND stream_ordering IN (" " SELECT max(stream_ordering) WHERE event_id IN (%s)" ")" ) % (",".join(["?"] * len(event_ids))) txn.execute(query, [room_id] + event_ids) rows = txn.fetchall() if rows: return rows[0][0] else: raise RuntimeError("Unrecognized event_ids: %r" % (event_ids,)) linearized_event_id = yield self.runInteraction( "insert_receipt_conv", graph_to_linear ) stream_id_manager = yield self._receipts_id_gen.get_next(self) with stream_id_manager as stream_id: yield self._receipts_stream_cache.room_has_changed( self, room_id, stream_id ) have_persisted = yield self.runInteraction( "insert_linearized_receipt", self.insert_linearized_receipt_txn, room_id, receipt_type, user_id, linearized_event_id, data, stream_id=stream_id, ) if not have_persisted: defer.returnValue(None) yield self.insert_graph_receipt( room_id, receipt_type, user_id, event_ids, data ) max_persisted_id = yield self._stream_id_gen.get_max_token(self) defer.returnValue((stream_id, max_persisted_id)) def insert_graph_receipt(self, room_id, receipt_type, user_id, event_ids, data): return self.runInteraction( "insert_graph_receipt", self.insert_graph_receipt_txn, room_id, receipt_type, user_id, event_ids, data ) def insert_graph_receipt_txn(self, txn, room_id, receipt_type, user_id, event_ids, data): self._simple_delete_txn( txn, table="receipts_graph", keyvalues={ "room_id": room_id, "receipt_type": receipt_type, "user_id": user_id, } ) self._simple_insert_txn( txn, table="receipts_graph", values={ "room_id": room_id, "receipt_type": receipt_type, "user_id": user_id, "event_ids": json.dumps(event_ids), "data": json.dumps(data), } ) class _RoomStreamChangeCache(object): """Keeps track of the stream_id of the latest change in rooms. Given a list of rooms and stream key, it will give a subset of rooms that may have changed since that key. If the key is too old then the cache will simply return all rooms. """ def __init__(self, size_of_cache=10000): self._size_of_cache = size_of_cache self._room_to_key = {} self._cache = sorteddict() self._earliest_key = None self.name = "ReceiptsRoomChangeCache" caches_by_name[self.name] = self._cache @defer.inlineCallbacks def get_rooms_changed(self, store, room_ids, key): """Returns subset of room ids that have had new receipts since the given key. If the key is too old it will just return the given list. """ if key > (yield self._get_earliest_key(store)): keys = self._cache.keys() i = keys.bisect_right(key) result = set( self._cache[k] for k in keys[i:] ).intersection(room_ids) cache_counter.inc_hits(self.name) else: result = room_ids cache_counter.inc_misses(self.name) defer.returnValue(result) @defer.inlineCallbacks def room_has_changed(self, store, room_id, key): """Informs the cache that the room has been changed at the given key. """ if key > (yield self._get_earliest_key(store)): old_key = self._room_to_key.get(room_id, None) if old_key: key = max(key, old_key) self._cache.pop(old_key, None) self._cache[key] = room_id while len(self._cache) > self._size_of_cache: k, r = self._cache.popitem() self._earliest_key = max(k, self._earliest_key) self._room_to_key.pop(r, None) @defer.inlineCallbacks def _get_earliest_key(self, store): if self._earliest_key is None: self._earliest_key = yield store.get_max_receipt_stream_id() self._earliest_key = int(self._earliest_key) defer.returnValue(self._earliest_key)
# Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Compute-related Utilities and helpers.""" import itertools import string import traceback import netifaces from oslo_config import cfg from oslo_log import log import six from jacket.compute import block_device from jacket.compute.cloud import power_state from jacket.compute.cloud import task_states from jacket.compute import exception from jacket.i18n import _LW from jacket.compute.network import model as network_model from jacket.compute import notifications from jacket.objects import compute as objects from jacket import rpc from jacket.compute import utils from jacket.compute.virt import driver CONF = cfg.CONF CONF.import_opt('host', 'jacket.compute.netconf') LOG = log.getLogger(__name__) def exception_to_dict(fault, message=None): """Converts exceptions to a dict for use in notifications.""" # TODO(johngarbutt) move to compute/exception.py to share with wrap_exception code = 500 if hasattr(fault, "kwargs"): code = fault.kwargs.get('code', 500) # get the message from the exception that was thrown # if that does not exist, use the name of the exception class itself try: if not message: message = fault.format_message() # These exception handlers are broad so we don't fail to log the fault # just because there is an unexpected error retrieving the message except Exception: try: message = six.text_type(fault) except Exception: message = None if not message: message = fault.__class__.__name__ # NOTE(dripton) The message field in the database is limited to 255 chars. # MySQL silently truncates overly long messages, but PostgreSQL throws an # error if we don't truncate it. u_message = utils.safe_truncate(message, 255) fault_dict = dict(exception=fault) fault_dict["message"] = u_message fault_dict["code"] = code return fault_dict def _get_fault_details(exc_info, error_code): details = '' if exc_info and error_code == 500: tb = exc_info[2] if tb: details = ''.join(traceback.format_tb(tb)) return six.text_type(details) def add_instance_fault_from_exc(context, instance, fault, exc_info=None, fault_message=None): """Adds the specified fault to the database.""" fault_obj = objects.InstanceFault(context=context) fault_obj.host = CONF.host fault_obj.instance_uuid = instance.uuid fault_obj.update(exception_to_dict(fault, message=fault_message)) code = fault_obj.code fault_obj.details = _get_fault_details(exc_info, code) fault_obj.create() def get_device_name_for_instance(instance, bdms, device): """Validates (or generates) a device name for instance. This method is a wrapper for get_next_device_name that gets the list of used devices and the root device from a block device mapping. """ mappings = block_device.instance_block_mapping(instance, bdms) return get_next_device_name(instance, mappings.values(), mappings['root'], device) def default_device_names_for_instance(instance, root_device_name, *block_device_lists): """Generate missing device names for an instance.""" dev_list = [bdm.device_name for bdm in itertools.chain(*block_device_lists) if bdm.device_name] if root_device_name not in dev_list: dev_list.append(root_device_name) for bdm in itertools.chain(*block_device_lists): dev = bdm.device_name if not dev: dev = get_next_device_name(instance, dev_list, root_device_name) bdm.device_name = dev bdm.save() dev_list.append(dev) def get_next_device_name(instance, device_name_list, root_device_name=None, device=None): """Validates (or generates) a device name for instance. If device is not set, it will generate a unique device appropriate for the instance. It uses the root_device_name (if provided) and the list of used devices to find valid device names. If the device name is valid but applicable to a different backend (for example /dev/vdc is specified but the backend uses /dev/xvdc), the device name will be converted to the appropriate format. """ req_prefix = None req_letter = None if device: try: req_prefix, req_letter = block_device.match_device(device) except (TypeError, AttributeError, ValueError): raise exception.InvalidDevicePath(path=device) if not root_device_name: root_device_name = block_device.DEFAULT_ROOT_DEV_NAME try: prefix = block_device.match_device( block_device.prepend_dev(root_device_name))[0] except (TypeError, AttributeError, ValueError): raise exception.InvalidDevicePath(path=root_device_name) # NOTE(vish): remove this when xenapi is setting default_root_device if driver.is_xenapi(): prefix = '/dev/xvd' if req_prefix != prefix: LOG.debug("Using %(prefix)s instead of %(req_prefix)s", {'prefix': prefix, 'req_prefix': req_prefix}) used_letters = set() for device_path in device_name_list: letter = block_device.get_device_letter(device_path) used_letters.add(letter) # NOTE(vish): remove this when xenapi is properly setting # default_ephemeral_device and default_swap_device if driver.is_xenapi(): flavor = instance.get_flavor() if flavor.ephemeral_gb: used_letters.add('b') if flavor.swap: used_letters.add('c') if not req_letter: req_letter = _get_unused_letter(used_letters) if req_letter in used_letters: raise exception.DevicePathInUse(path=device) return prefix + req_letter def _get_unused_letter(used_letters): doubles = [first + second for second in string.ascii_lowercase for first in string.ascii_lowercase] all_letters = set(list(string.ascii_lowercase) + doubles) letters = list(all_letters - used_letters) # NOTE(vish): prepend ` so all shorter sequences sort first letters.sort(key=lambda x: x.rjust(2, '`')) return letters[0] def get_value_from_system_metadata(instance, key, type, default): """Get a value of a specified type from image metadata. @param instance: The instance object @param key: The name of the property to get @param type: The python type the value is be returned as @param default: The value to return if key is not set or not the right type """ value = instance.system_metadata.get(key, default) try: return type(value) except ValueError: LOG.warning(_LW("Metadata value %(value)s for %(key)s is not of " "type %(type)s. Using default value %(default)s."), {'value': value, 'key': key, 'type': type, 'default': default}, instance=instance) return default def notify_usage_exists(notifier, context, instance_ref, current_period=False, ignore_missing_network_data=True, system_metadata=None, extra_usage_info=None): """Generates 'exists' notification for an instance for usage auditing purposes. :param notifier: a messaging.Notifier :param current_period: if True, this will generate a usage for the current usage period; if False, this will generate a usage for the previous audit period. :param ignore_missing_network_data: if True, log any exceptions generated while getting network info; if False, raise the exception. :param system_metadata: system_metadata DB entries for the instance, if not None. *NOTE*: Currently unused here in trunk, but needed for potential custom modifications. :param extra_usage_info: Dictionary containing extra values to add or override in the notification if not None. """ audit_start, audit_end = notifications.audit_period_bounds(current_period) bw = notifications.bandwidth_usage(instance_ref, audit_start, ignore_missing_network_data) if system_metadata is None: system_metadata = utils.instance_sys_meta(instance_ref) # add image metadata to the notification: image_meta = notifications.image_meta(system_metadata) extra_info = dict(audit_period_beginning=str(audit_start), audit_period_ending=str(audit_end), bandwidth=bw, image_meta=image_meta) if extra_usage_info: extra_info.update(extra_usage_info) notify_about_instance_usage(notifier, context, instance_ref, 'exists', system_metadata=system_metadata, extra_usage_info=extra_info) def notify_about_instance_usage(notifier, context, instance, event_suffix, network_info=None, system_metadata=None, extra_usage_info=None, fault=None): """Send a notification about an instance. :param notifier: a messaging.Notifier :param event_suffix: Event type like "delete.start" or "exists" :param network_info: Networking information, if provided. :param system_metadata: system_metadata DB entries for the instance, if provided. :param extra_usage_info: Dictionary containing extra values to add or override in the notification. """ if not extra_usage_info: extra_usage_info = {} usage_info = notifications.info_from_instance(context, instance, network_info, system_metadata, **extra_usage_info) if fault: # NOTE(johngarbutt) mirrors the format in wrap_exception fault_payload = exception_to_dict(fault) LOG.debug(fault_payload["message"], instance=instance) usage_info.update(fault_payload) if event_suffix.endswith("error"): method = notifier.error else: method = notifier.info method(context, 'compute.instance.%s' % event_suffix, usage_info) def notify_about_server_group_update(context, event_suffix, sg_payload): """Send a notification about server group update. :param event_suffix: Event type like "create.start" or "create.end" :param sg_payload: payload for server group update """ notifier = rpc.get_notifier(service='servergroup') notifier.info(context, 'servergroup.%s' % event_suffix, sg_payload) def notify_about_aggregate_update(context, event_suffix, aggregate_payload): """Send a notification about aggregate update. :param event_suffix: Event type like "create.start" or "create.end" :param aggregate_payload: payload for aggregate update """ aggregate_identifier = aggregate_payload.get('aggregate_id', None) if not aggregate_identifier: aggregate_identifier = aggregate_payload.get('name', None) if not aggregate_identifier: LOG.debug("No aggregate id or name specified for this " "notification and it will be ignored") return notifier = rpc.get_notifier(service='aggregate', host=aggregate_identifier) notifier.info(context, 'aggregate.%s' % event_suffix, aggregate_payload) def notify_about_host_update(context, event_suffix, host_payload): """Send a notification about host update. :param event_suffix: Event type like "create.start" or "create.end" :param host_payload: payload for host update. It is a dict and there should be at least the 'host_name' key in this dict. """ host_identifier = host_payload.get('host_name') if not host_identifier: LOG.warning(_LW("No host name specified for the notification of " "HostAPI.%s and it will be ignored"), event_suffix) return notifier = rpc.get_notifier(service='api', host=host_identifier) notifier.info(context, 'HostAPI.%s' % event_suffix, host_payload) def get_nw_info_for_instance(instance): if instance.info_cache is None: return network_model.NetworkInfo.hydrate([]) return instance.info_cache.network_info def refresh_info_cache_for_instance(context, instance): """Refresh the info cache for an instance. :param instance: The instance object. """ if instance.info_cache is not None: instance.info_cache.refresh() def usage_volume_info(vol_usage): def null_safe_str(s): return str(s) if s else '' tot_refreshed = vol_usage.tot_last_refreshed curr_refreshed = vol_usage.curr_last_refreshed if tot_refreshed and curr_refreshed: last_refreshed_time = max(tot_refreshed, curr_refreshed) elif tot_refreshed: last_refreshed_time = tot_refreshed else: # curr_refreshed must be set last_refreshed_time = curr_refreshed usage_info = dict( volume_id=vol_usage.volume_id, tenant_id=vol_usage.project_id, user_id=vol_usage.user_id, availability_zone=vol_usage.availability_zone, instance_id=vol_usage.instance_uuid, last_refreshed=null_safe_str(last_refreshed_time), reads=vol_usage.tot_reads + vol_usage.curr_reads, read_bytes=vol_usage.tot_read_bytes + vol_usage.curr_read_bytes, writes=vol_usage.tot_writes + vol_usage.curr_writes, write_bytes=vol_usage.tot_write_bytes + vol_usage.curr_write_bytes) return usage_info def get_reboot_type(task_state, current_power_state): """Checks if the current instance state requires a HARD reboot.""" if current_power_state != power_state.RUNNING: return 'HARD' soft_types = [task_states.REBOOT_STARTED, task_states.REBOOT_PENDING, task_states.REBOOTING] reboot_type = 'SOFT' if task_state in soft_types else 'HARD' return reboot_type def get_machine_ips(): """Get the machine's ip addresses :returns: list of Strings of ip addresses """ addresses = [] for interface in netifaces.interfaces(): try: iface_data = netifaces.ifaddresses(interface) for family in iface_data: if family not in (netifaces.AF_INET, netifaces.AF_INET6): continue for address in iface_data[family]: addr = address['addr'] # If we have an ipv6 address remove the # %ether_interface at the end if family == netifaces.AF_INET6: addr = addr.split('%')[0] addresses.append(addr) except ValueError: pass return addresses def resize_quota_delta(context, new_flavor, old_flavor, sense, compare): """Calculate any quota adjustment required at a particular point in the resize cycle. :param context: the request context :param new_flavor: the target instance type :param old_flavor: the original instance type :param sense: the sense of the adjustment, 1 indicates a forward adjustment, whereas -1 indicates a reversal of a prior adjustment :param compare: the direction of the comparison, 1 indicates we're checking for positive deltas, whereas -1 indicates negative deltas """ def _quota_delta(resource): return sense * (new_flavor[resource] - old_flavor[resource]) deltas = {} if compare * _quota_delta('vcpus') > 0: deltas['cores'] = _quota_delta('vcpus') if compare * _quota_delta('memory_mb') > 0: deltas['ram'] = _quota_delta('memory_mb') return deltas def upsize_quota_delta(context, new_flavor, old_flavor): """Calculate deltas required to adjust quota for an instance upsize. """ return resize_quota_delta(context, new_flavor, old_flavor, 1, 1) def reverse_upsize_quota_delta(context, instance): """Calculate deltas required to reverse a prior upsizing quota adjustment. """ return resize_quota_delta(context, instance.new_flavor, instance.old_flavor, -1, -1) def downsize_quota_delta(context, instance): """Calculate deltas required to adjust quota for an instance downsize. """ old_flavor = instance.get_flavor('old') new_flavor = instance.get_flavor('new') return resize_quota_delta(context, new_flavor, old_flavor, 1, -1) def reserve_quota_delta(context, deltas, instance): """If there are deltas to reserve, construct a Quotas object and reserve the deltas for the given project. :param context: The compute request context. :param deltas: A dictionary of the proposed delta changes. :param instance: The instance we're operating on, so that quotas can use the correct project_id/user_id. :return: compute.compute.quotas.Quotas """ quotas = objects.Quotas(context=context) if deltas: project_id, user_id = objects.quotas.ids_from_instance(context, instance) quotas.reserve(project_id=project_id, user_id=user_id, **deltas) return quotas def remove_shelved_keys_from_system_metadata(instance): # Delete system_metadata for a shelved instance for key in ['shelved_at', 'shelved_image_id', 'shelved_host']: if key in instance.system_metadata: del (instance.system_metadata[key]) class EventReporter(object): """Context manager to report instance action events.""" def __init__(self, context, event_name, *instance_uuids): self.context = context self.event_name = event_name self.instance_uuids = instance_uuids def __enter__(self): for uuid in self.instance_uuids: objects.InstanceActionEvent.event_start( self.context, uuid, self.event_name, want_result=False) return self def __exit__(self, exc_type, exc_val, exc_tb): for uuid in self.instance_uuids: objects.InstanceActionEvent.event_finish_with_failure( self.context, uuid, self.event_name, exc_val=exc_val, exc_tb=exc_tb, want_result=False) return False class UnlimitedSemaphore(object): def __enter__(self): pass def __exit__(self): pass @property def balance(self): return 0
import datetime import hashlib import logging import os from email.headerregistry import Address from email.parser import Parser from email.policy import default from email.utils import formataddr, parseaddr from typing import Any, Dict, List, Mapping, Optional, Tuple import orjson from django.conf import settings from django.core.mail import EmailMultiAlternatives from django.core.management import CommandError from django.template import loader from django.template.exceptions import TemplateDoesNotExist from django.utils.timezone import now as timezone_now from django.utils.translation import override as override_language from django.utils.translation import ugettext as _ from confirmation.models import generate_key from scripts.setup.inline_email_css import inline_template from zerver.lib.logging_util import log_to_file from zerver.models import EMAIL_TYPES, Realm, ScheduledEmail, UserProfile, get_user_profile_by_id ## Logging setup ## logger = logging.getLogger('zulip.send_email') log_to_file(logger, settings.EMAIL_LOG_PATH) class FromAddress: SUPPORT = parseaddr(settings.ZULIP_ADMINISTRATOR)[1] NOREPLY = parseaddr(settings.NOREPLY_EMAIL_ADDRESS)[1] support_placeholder = "SUPPORT" no_reply_placeholder = 'NO_REPLY' tokenized_no_reply_placeholder = 'TOKENIZED_NO_REPLY' # Generates an unpredictable noreply address. @staticmethod def tokenized_no_reply_address() -> str: if settings.ADD_TOKENS_TO_NOREPLY_ADDRESS: return parseaddr(settings.TOKENIZED_NOREPLY_EMAIL_ADDRESS)[1].format(token=generate_key()) return FromAddress.NOREPLY @staticmethod def security_email_from_name(language: Optional[str]=None, user_profile: Optional[UserProfile]=None) -> str: if language is None: assert user_profile is not None language = user_profile.default_language with override_language(language): return _("Zulip Account Security") def build_email(template_prefix: str, to_user_ids: Optional[List[int]]=None, to_emails: Optional[List[str]]=None, from_name: Optional[str]=None, from_address: Optional[str]=None, reply_to_email: Optional[str]=None, language: Optional[str]=None, context: Mapping[str, Any]={}, realm: Optional[Realm]=None ) -> EmailMultiAlternatives: # Callers should pass exactly one of to_user_id and to_email. assert (to_user_ids is None) ^ (to_emails is None) if to_user_ids is not None: to_users = [get_user_profile_by_id(to_user_id) for to_user_id in to_user_ids] if realm is None: assert len(set([to_user.realm_id for to_user in to_users])) == 1 realm = to_users[0].realm to_emails = [str(Address(display_name=to_user.full_name, addr_spec=to_user.delivery_email)) for to_user in to_users] extra_headers = {} if realm is not None: # formaddr is meant for formatting (display_name, email_address) pair for headers like "To", # but we can use its utility for formatting the List-Id header, as it follows the same format, # except having just a domain instead of an email address. extra_headers['List-Id'] = formataddr((realm.name, realm.host)) context = { **context, 'support_email': FromAddress.SUPPORT, 'email_images_base_uri': settings.ROOT_DOMAIN_URI + '/static/images/emails', 'physical_address': settings.PHYSICAL_ADDRESS, } def render_templates() -> Tuple[str, str, str]: email_subject = loader.render_to_string(template_prefix + '.subject.txt', context=context, using='Jinja2_plaintext').strip().replace('\n', '') message = loader.render_to_string(template_prefix + '.txt', context=context, using='Jinja2_plaintext') try: html_message = loader.render_to_string(template_prefix + '.html', context) except TemplateDoesNotExist: emails_dir = os.path.dirname(template_prefix) template = os.path.basename(template_prefix) compiled_template_prefix = os.path.join(emails_dir, "compiled", template) html_message = loader.render_to_string(compiled_template_prefix + '.html', context) return (html_message, message, email_subject) if not language and to_user_ids is not None: language = to_users[0].default_language if language: with override_language(language): # Make sure that we render the email using the target's native language (html_message, message, email_subject) = render_templates() else: (html_message, message, email_subject) = render_templates() logger.warning("Missing language for email template '%s'", template_prefix) if from_name is None: from_name = "Zulip" if from_address is None: from_address = FromAddress.NOREPLY if from_address == FromAddress.tokenized_no_reply_placeholder: from_address = FromAddress.tokenized_no_reply_address() if from_address == FromAddress.no_reply_placeholder: from_address = FromAddress.NOREPLY if from_address == FromAddress.support_placeholder: from_address = FromAddress.SUPPORT from_email = str(Address(display_name=from_name, addr_spec=from_address)) reply_to = None if reply_to_email is not None: reply_to = [reply_to_email] # Remove the from_name in the reply-to for noreply emails, so that users # see "noreply@..." rather than "Zulip" or whatever the from_name is # when they reply in their email client. elif from_address == FromAddress.NOREPLY: reply_to = [FromAddress.NOREPLY] mail = EmailMultiAlternatives(email_subject, message, from_email, to_emails, reply_to=reply_to, headers=extra_headers) if html_message is not None: mail.attach_alternative(html_message, 'text/html') return mail class EmailNotDeliveredException(Exception): pass class DoubledEmailArgumentException(CommandError): def __init__(self, argument_name: str) -> None: msg = f"Argument '{argument_name}' is ambiguously present in both options and email template." super().__init__(msg) class NoEmailArgumentException(CommandError): def __init__(self, argument_name: str) -> None: msg = f"Argument '{argument_name}' is required in either options or email template." super().__init__(msg) # When changing the arguments to this function, you may need to write a # migration to change or remove any emails in ScheduledEmail. def send_email(template_prefix: str, to_user_ids: Optional[List[int]]=None, to_emails: Optional[List[str]]=None, from_name: Optional[str]=None, from_address: Optional[str]=None, reply_to_email: Optional[str]=None, language: Optional[str]=None, context: Dict[str, Any]={}, realm: Optional[Realm]=None) -> None: mail = build_email(template_prefix, to_user_ids=to_user_ids, to_emails=to_emails, from_name=from_name, from_address=from_address, reply_to_email=reply_to_email, language=language, context=context, realm=realm) template = template_prefix.split("/")[-1] logger.info("Sending %s email to %s", template, mail.to) if mail.send() == 0: logger.error("Error sending %s email to %s", template, mail.to) raise EmailNotDeliveredException def send_email_from_dict(email_dict: Mapping[str, Any]) -> None: send_email(**dict(email_dict)) def send_future_email(template_prefix: str, realm: Realm, to_user_ids: Optional[List[int]]=None, to_emails: Optional[List[str]]=None, from_name: Optional[str]=None, from_address: Optional[str]=None, language: Optional[str]=None, context: Dict[str, Any]={}, delay: datetime.timedelta=datetime.timedelta(0)) -> None: template_name = template_prefix.split('/')[-1] email_fields = {'template_prefix': template_prefix, 'from_name': from_name, 'from_address': from_address, 'language': language, 'context': context} if settings.DEVELOPMENT_LOG_EMAILS: send_email(template_prefix, to_user_ids=to_user_ids, to_emails=to_emails, from_name=from_name, from_address=from_address, language=language, context=context) # For logging the email assert (to_user_ids is None) ^ (to_emails is None) email = ScheduledEmail.objects.create( type=EMAIL_TYPES[template_name], scheduled_timestamp=timezone_now() + delay, realm=realm, data=orjson.dumps(email_fields).decode()) # We store the recipients in the ScheduledEmail object itself, # rather than the JSON data object, so that we can find and clear # them using clear_scheduled_emails. try: if to_user_ids is not None: email.users.add(*to_user_ids) else: assert to_emails is not None assert(len(to_emails) == 1) email.address = parseaddr(to_emails[0])[1] email.save() except Exception as e: email.delete() raise e def send_email_to_admins(template_prefix: str, realm: Realm, from_name: Optional[str]=None, from_address: Optional[str]=None, language: Optional[str]=None, context: Dict[str, Any]={}) -> None: admins = realm.get_human_admin_users() admin_user_ids = [admin.id for admin in admins] send_email(template_prefix, to_user_ids=admin_user_ids, from_name=from_name, from_address=from_address, language=language, context=context) def clear_scheduled_invitation_emails(email: str) -> None: """Unlike most scheduled emails, invitation emails don't have an existing user object to key off of, so we filter by address here.""" items = ScheduledEmail.objects.filter(address__iexact=email, type=ScheduledEmail.INVITATION_REMINDER) items.delete() def clear_scheduled_emails(user_ids: List[int], email_type: Optional[int]=None) -> None: items = ScheduledEmail.objects.filter(users__in=user_ids).distinct() if email_type is not None: items = items.filter(type=email_type) for item in items: item.users.remove(*user_ids) if item.users.all().count() == 0: item.delete() def handle_send_email_format_changes(job: Dict[str, Any]) -> None: # Reformat any jobs that used the old to_email # and to_user_ids argument formats. if 'to_email' in job: if job['to_email'] is not None: job['to_emails'] = [job['to_email']] del job['to_email'] if 'to_user_id' in job: if job['to_user_id'] is not None: job['to_user_ids'] = [job['to_user_id']] del job['to_user_id'] def deliver_email(email: ScheduledEmail) -> None: data = orjson.loads(email.data) if email.users.exists(): data['to_user_ids'] = [user.id for user in email.users.all()] if email.address is not None: data['to_emails'] = [email.address] handle_send_email_format_changes(data) send_email(**data) email.delete() def get_header(option: Optional[str], header: Optional[str], name: str) -> str: if option and header: raise DoubledEmailArgumentException(name) if not option and not header: raise NoEmailArgumentException(name) return str(option or header) def send_custom_email(users: List[UserProfile], options: Dict[str, Any]) -> None: """ Can be used directly with from a management shell with send_custom_email(user_profile_list, dict( markdown_template_path="/path/to/markdown/file.md", subject="Email Subject", from_name="Sender Name") ) """ with open(options["markdown_template_path"]) as f: text = f.read() parsed_email_template = Parser(policy=default).parsestr(text) email_template_hash = hashlib.sha256(text.encode('utf-8')).hexdigest()[0:32] email_filename = f"custom/custom_email_{email_template_hash}.source.html" email_id = f"zerver/emails/custom/custom_email_{email_template_hash}" markdown_email_base_template_path = "templates/zerver/emails/custom_email_base.pre.html" html_source_template_path = f"templates/{email_id}.source.html" plain_text_template_path = f"templates/{email_id}.txt" subject_path = f"templates/{email_id}.subject.txt" os.makedirs(os.path.dirname(html_source_template_path), exist_ok=True) # First, we render the Markdown input file just like our # user-facing docs with render_markdown_path. with open(plain_text_template_path, "w") as f: f.write(parsed_email_template.get_payload()) from zerver.templatetags.app_filters import render_markdown_path rendered_input = render_markdown_path(plain_text_template_path.replace("templates/", "")) # And then extend it with our standard email headers. with open(html_source_template_path, "w") as f: with open(markdown_email_base_template_path) as base_template: # Note that we're doing a hacky non-Jinja2 substitution here; # we do this because the normal render_markdown_path ordering # doesn't commute properly with inline_email_css. f.write(base_template.read().replace('{{ rendered_input }}', rendered_input)) with open(subject_path, "w") as f: f.write(get_header(options.get("subject"), parsed_email_template.get("subject"), "subject")) inline_template(email_filename) # Finally, we send the actual emails. for user_profile in users: if options.get('admins_only') and not user_profile.is_realm_admin: continue context = { 'realm_uri': user_profile.realm.uri, 'realm_name': user_profile.realm.name, } send_email(email_id, to_user_ids=[user_profile.id], from_address=FromAddress.SUPPORT, reply_to_email=options.get("reply_to"), from_name=get_header(options.get("from_name"), parsed_email_template.get("from"), "from_name"), context=context)
# Copyright (c) 2013-2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import ldap from OpenSSL import crypto from barbican.common import exception as excep from barbican.common import hrefs import barbican.common.utils as utils from barbican.model import models from barbican.model import repositories as repos from barbican.plugin.interface import certificate_manager as cert from barbican.plugin import resources as plugin from barbican.tasks import common LOG = utils.getLogger(__name__) # Order sub-status definitions ORDER_STATUS_REQUEST_PENDING = models.OrderStatus( "cert_request_pending", "Request has been submitted to the CA. " "Waiting for certificate to be generated" ) ORDER_STATUS_CERT_GENERATED = models.OrderStatus( "cert_generated", "Certificate has been generated" ) ORDER_STATUS_DATA_INVALID = models.OrderStatus( "cert_data_invalid", "CA rejected request data as invalid" ) ORDER_STATUS_CA_UNAVAIL_FOR_ISSUE = models.OrderStatus( "cert_ca_unavail_for_issue", "Unable to submit certificate request. CA unavailable" ) ORDER_STATUS_INVALID_OPERATION = models.OrderStatus( "cert_invalid_operation", "CA returned invalid operation" ) ORDER_STATUS_INTERNAL_ERROR = models.OrderStatus( "cert_internal_error", "Internal error during certificate operations" ) ORDER_STATUS_CA_UNAVAIL_FOR_CHECK = models.OrderStatus( "cert_ca_unavail_for_status_check", "Unable to get certificate request status. CA unavailable." ) def issue_certificate_request(order_model, project_model, result_follow_on): """Create the initial order with CA. Note that this method may be called more than once if retries are required. Barbican metadata is used to store intermediate information, including selected plugins by name, to support such retries. :param: order_model - order associated with this cert request :param: project_model - project associated with this request :param: result_follow_on - A :class:`FollowOnProcessingStatusDTO` instance instantiated by the client that this function may optionally update with information on how to process this task into the future. :returns: container_model - container with the relevant cert if the request has been completed. None otherwise """ container_model = None plugin_meta = _get_plugin_meta(order_model) barbican_meta = _get_barbican_meta(order_model) # TODO(john-wood-w) We need to de-conflict barbican_meta (stored with order # and not shown to plugins) with barbican_meta_dto (shared with plugins). # As a minimum we should change the name of the DTO to something like # 'extended_meta_dto' or some such. barbican_meta_for_plugins_dto = cert.BarbicanMetaDTO() # refresh the CA table. This is mostly a no-op unless the entries # for a plugin are expired. cert.CertificatePluginManager().refresh_ca_table() # Locate the required certificate plugin. cert_plugin_name = barbican_meta.get('plugin_name') if cert_plugin_name: cert_plugin = cert.CertificatePluginManager().get_plugin_by_name( cert_plugin_name) else: ca_id = _get_ca_id(order_model.meta, project_model.id) if ca_id: barbican_meta_for_plugins_dto.plugin_ca_id = ca_id cert_plugin = cert.CertificatePluginManager().get_plugin_by_ca_id( ca_id) else: cert_plugin = cert.CertificatePluginManager().get_plugin( order_model.meta) barbican_meta['plugin_name'] = utils.generate_fullname_for(cert_plugin) # Generate CSR if needed. request_type = order_model.meta.get(cert.REQUEST_TYPE) if request_type == cert.CertificateRequestType.STORED_KEY_REQUEST: csr = barbican_meta.get('generated_csr') if csr is None: # TODO(alee) Fix this to be a non-project specific call once # the ACL patches go in. csr = _generate_csr(order_model, project_model) barbican_meta['generated_csr'] = csr barbican_meta_for_plugins_dto.generated_csr = csr result = cert_plugin.issue_certificate_request( order_model.id, order_model.meta, plugin_meta, barbican_meta_for_plugins_dto) # Save plugin and barbican metadata for this order. _save_plugin_metadata(order_model, plugin_meta) _save_barbican_metadata(order_model, barbican_meta) # Handle result if cert.CertificateStatus.WAITING_FOR_CA == result.status: _update_result_follow_on( result_follow_on, order_status=ORDER_STATUS_REQUEST_PENDING, retry_task=common.RetryTasks.INVOKE_CERT_STATUS_CHECK_TASK, retry_msec=result.retry_msec) elif cert.CertificateStatus.CERTIFICATE_GENERATED == result.status: _update_result_follow_on( result_follow_on, order_status=ORDER_STATUS_CERT_GENERATED) container_model = _save_secrets(result, project_model) elif cert.CertificateStatus.CLIENT_DATA_ISSUE_SEEN == result.status: raise cert.CertificateStatusClientDataIssue(result.status_message) elif cert.CertificateStatus.CA_UNAVAILABLE_FOR_REQUEST == result.status: _update_result_follow_on( result_follow_on, order_status=ORDER_STATUS_CA_UNAVAIL_FOR_ISSUE, retry_task=common.RetryTasks.INVOKE_SAME_TASK, retry_msec=cert.ERROR_RETRY_MSEC) _notify_ca_unavailable(order_model, result) elif cert.CertificateStatus.INVALID_OPERATION == result.status: raise cert.CertificateStatusInvalidOperation(result.status_message) else: raise cert.CertificateStatusNotSupported(result.status) return container_model def check_certificate_request(order_model, project_model, result_follow_on): """Check the status of a certificate request with the CA. Note that this method may be called more than once if retries are required. Barbican metadata is used to store intermediate information, including selected plugins by name, to support such retries. :param: order_model - order associated with this cert request :param: project_model - project associated with this request :param: result_follow_on - A :class:`FollowOnProcessingStatusDTO` instance instantiated by the client that this function may optionally update with information on how to process this task into the future. :returns: container_model - container with the relevant cert if the request has been completed. None otherwise. """ container_model = None plugin_meta = _get_plugin_meta(order_model) barbican_meta = _get_barbican_meta(order_model) # TODO(john-wood-w) See note above about DTO's name. barbican_meta_for_plugins_dto = cert.BarbicanMetaDTO() cert_plugin = cert.CertificatePluginManager().get_plugin_by_name( barbican_meta.get('plugin_name')) result = cert_plugin.check_certificate_status( order_model.id, order_model.meta, plugin_meta, barbican_meta_for_plugins_dto) # Save plugin order plugin state _save_plugin_metadata(order_model, plugin_meta) # Handle result if cert.CertificateStatus.WAITING_FOR_CA == result.status: _update_result_follow_on( result_follow_on, order_status=ORDER_STATUS_REQUEST_PENDING, retry_task=common.RetryTasks.INVOKE_CERT_STATUS_CHECK_TASK, retry_msec=result.retry_msec) elif cert.CertificateStatus.CERTIFICATE_GENERATED == result.status: _update_result_follow_on( result_follow_on, order_status=ORDER_STATUS_CERT_GENERATED) container_model = _save_secrets(result, project_model) elif cert.CertificateStatus.CLIENT_DATA_ISSUE_SEEN == result.status: raise cert.CertificateStatusClientDataIssue(result.status_message) elif cert.CertificateStatus.CA_UNAVAILABLE_FOR_REQUEST == result.status: _update_result_follow_on( result_follow_on, order_status=ORDER_STATUS_CA_UNAVAIL_FOR_CHECK, retry_task=common.RetryTasks.INVOKE_SAME_TASK, retry_msec=cert.ERROR_RETRY_MSEC) _notify_ca_unavailable(order_model, result) elif cert.CertificateStatus.INVALID_OPERATION == result.status: raise cert.CertificateStatusInvalidOperation(result.status_message) else: raise cert.CertificateStatusNotSupported(result.status) return container_model def modify_certificate_request(order_model, updated_meta): """Update the order with CA.""" # TODO(chellygel): Add the modify certificate request logic. LOG.debug('in modify_certificate_request') raise NotImplementedError # pragma: no cover def _get_ca_id(order_meta, project_id): ca_id = order_meta.get(cert.CA_ID) if ca_id: return ca_id preferred_ca_repository = repos.get_preferred_ca_repository() cas, offset, limit, total = preferred_ca_repository.get_by_create_date( project_id=project_id, suppress_exception=True) if total > 0: return cas[0].ca_id global_ca = preferred_ca_repository.get_global_preferred_ca() if global_ca: return global_ca.ca_id return None def _update_result_follow_on( result_follow_on, order_status=None, retry_task=common.RetryTasks.NO_ACTION_REQUIRED, retry_msec=common.RETRY_MSEC_DEFAULT): if order_status: result_follow_on.status = order_status.id result_follow_on.status_message = order_status.message result_follow_on.retry_task = retry_task if retry_msec and retry_msec >= 0: result_follow_on.retry_msec = retry_msec def _get_plugin_meta(order_model): if order_model: order_plugin_meta_repo = repos.get_order_plugin_meta_repository() return order_plugin_meta_repo.get_metadata_for_order(order_model.id) else: return {} def _get_barbican_meta(order_model): if order_model: order_barbican_meta_repo = repos.get_order_barbican_meta_repository() return order_barbican_meta_repo.get_metadata_for_order(order_model.id) else: return {} def _generate_csr(order_model, project_model): """Generate a CSR from the public key. :param: order_model - order for the request :param: project_model - project for this request :return: CSR (certificate signing request) in PEM format :raise: :class:`StoredKeyPrivateKeyNotFound` if private key not found :class:`StoredKeyContainerNotFound` if container not found """ container_ref = order_model.meta.get('container_ref') # extract container_id as the last part of the URL container_id = hrefs.get_container_id_from_ref(container_ref) container_repo = repos.get_container_repository() container = container_repo.get(container_id, project_model.external_id, suppress_exception=True) if not container: raise excep.StoredKeyContainerNotFound(container_id) passphrase = None private_key = None for cs in container.container_secrets: secret_repo = repos.get_secret_repository() if cs.name == 'private_key': private_key_model = secret_repo.get( cs.secret_id, project_model.external_id) private_key = plugin.get_secret( 'application/pkcs8', private_key_model, project_model) elif cs.name == 'private_key_passphrase': passphrase_model = secret_repo.get( cs.secret_id, project_model.external_id) passphrase = plugin.get_secret( 'text/plain;charset=utf-8', passphrase_model, project_model) passphrase = str(passphrase) if not private_key: raise excep.StoredKeyPrivateKeyNotFound(container_id) if passphrase is None: pkey = crypto.load_privatekey( crypto.FILETYPE_PEM, private_key ) else: pkey = crypto.load_privatekey( crypto.FILETYPE_PEM, private_key, passphrase ) subject_name = order_model.meta.get('subject_dn') subject_name_dns = ldap.dn.str2dn(subject_name) extensions = order_model.meta.get('extensions', None) req = crypto.X509Req() subj = req.get_subject() # Note: must iterate over the DNs in reverse order, or the resulting # subject name will be reversed. for ava in reversed(subject_name_dns): for key, val, extra in ava: setattr(subj, key.upper(), val) req.set_pubkey(pkey) if extensions: # TODO(alee-3) We need code here to parse the encoded extensions and # convert them into X509Extension objects. This code will also be # used in the validation code. Commenting out for now till we figure # out how to do this. # req.add_extensions(extensions) pass req.sign(pkey, 'sha256') csr = crypto.dump_certificate_request(crypto.FILETYPE_PEM, req) return csr def _notify_ca_unavailable(order_model, result): """Notify observer(s) that the CA was unavailable at this time.""" cert.EVENT_PLUGIN_MANAGER.notify_ca_is_unavailable( order_model.project_id, hrefs.convert_order_to_href(order_model.id), result.status_message, result.retry_msec) def _save_plugin_metadata(order_model, plugin_meta): """Add plugin metadata to an order.""" if not isinstance(plugin_meta, dict): plugin_meta = {} order_plugin_meta_repo = repos.get_order_plugin_meta_repository() order_plugin_meta_repo.save(plugin_meta, order_model) def _save_barbican_metadata(order_model, barbican_meta): """Add barbican metadata to an order.""" if not isinstance(barbican_meta, dict): barbican_meta = {} order_barbican_meta_repo = repos.get_order_barbican_meta_repository() order_barbican_meta_repo.save(barbican_meta, order_model) def _save_secrets(result, project_model): cert_secret_model, transport_key_model = plugin.store_secret( unencrypted_raw=result.certificate, content_type_raw='application/pkix-cert', content_encoding='base64', secret_model=models.Secret(), project_model=project_model) # save the certificate chain as a secret. if result.intermediates: intermediates_secret_model, transport_key_model = plugin.store_secret( unencrypted_raw=result.intermediates, content_type_raw='application/pkix-cert', content_encoding='base64', secret_model=models.Secret(), project_model=project_model ) else: intermediates_secret_model = None container_model = models.Container() container_model.type = "certificate" container_model.status = models.States.ACTIVE container_model.project_id = project_model.id container_repo = repos.get_container_repository() container_repo.create_from(container_model) # create container_secret for certificate new_consec_assoc = models.ContainerSecret() new_consec_assoc.name = 'certificate' new_consec_assoc.container_id = container_model.id new_consec_assoc.secret_id = cert_secret_model.id container_secret_repo = repos.get_container_secret_repository() container_secret_repo.create_from(new_consec_assoc) if intermediates_secret_model: # create container_secret for intermediate certs new_consec_assoc = models.ContainerSecret() new_consec_assoc.name = 'intermediates' new_consec_assoc.container_id = container_model.id new_consec_assoc.secret_id = intermediates_secret_model.id container_secret_repo.create_from(new_consec_assoc) return container_model
# -*- coding: UTF-8 -*- ############################################# ## (C)opyright by Dirk Holtwick, 2008 ## ## All rights reserved ## ############################################# from webob import Request, Response from webob import exc from paste.urlparser import StaticURLParser from paste.cascade import Cascade # from paste.cgitb_catcher import CgitbMiddleware from paste.registry import RegistryManager from paste.config import ConfigMiddleware #, CONFIG from paste.exceptions.errormiddleware import ErrorMiddleware from paste.util.import_string import import_module # import paste.deploy from pyxer.base import * import sys import logging import string import mimetypes import imp import os import os.path import types import pprint import site import zipimport import logging log = logging.getLogger(__name__) # XXX Needed? # sys.path = [os.getcwd()] + sys.path _counter = 0 # The WSGI application class PyxerApp: def __init__(self, base="public"): self.base = base self.router = Router(base) def __call__(self, environ, start_response): try: global _counter _counter += 1 log.debug("Processing pyxer call number %d", _counter) path = environ["PATH_INFO"] # Mod Python corrections if environ.has_key("SCRIPT_FILENAME"): prefix = environ["SCRIPT_FILENAME"][len(environ['DOCUMENT_ROOT']):] environ["WSGI_PREFIX"] = prefix path = path[len(prefix):] log.debug("Try matching %r", path) obj, vars = self.router.match(path) log.debug("For %r found %r with %r", path, obj, vars) # No matching if obj is None: abort(404) # Set globals if environ.has_key('paste.registry'): environ['paste.registry'].register(request, Request(environ)) environ['paste.registry'].register(response, Response()) environ['paste.registry'].register(c, AttrDict()) if environ.has_key('beaker.session'): environ['paste.registry'].register(session, environ['beaker.session']) else: environ['paste.registry'].register(session, None) environ['paste.registry'].register(config, environ.get("paste.config", {})) request.start_response = start_response # Guess template name name = None if vars["controller"] == "default": if path.endswith("/"): name = "index" else: name = vars["pyxer.match"] elif isinstance(vars["controller"], basestring): name = vars["controller"] # and path request.template_url = None if name is not None: tpath = os.path.join(vars["pyxer.path"], name + ".html") if os.path.isfile(tpath): request.template_url = tpath request.urlvars = vars environ['pyxer.urlvars'] = vars environ['pyxer.urlbase'] = path[:-(len(vars["pyxer.match"]) + len(vars["pyxer.tail"]))] # log.info("******* %r %r %r", path, vars["pyxer.match"], environ['pyxer.urlbase']) return obj() # obj(environ, start_response) # Handle HTTPException except exc.HTTPException, e: return e(environ, start_response) # Sessions available? SessionMiddleware = None # Make WSGI application, wrapping sessions etc. def make_app(global_conf={}, **app_conf): import os, sys # Cleanup the Python path (mainly to circumvent the systems SetupTools) sys.path = [path for path in sys.path if ("site-packages" not in path) and ('pyxer' not in path) and ('/Extras/lib/python' not in path)] # Add our local packages folder to the path import site here = app_conf.get('pyxer_here') or os.path.join(os.path.dirname(__file__), os.pardir, os.pardir) if not os.path.isdir(os.path.join(here, 'public')): here = os.getcwd() site_lib = os.path.join(here, 'site-packages') site.addsitedir(here) site.addsitedir(site_lib) # import pkg_resources # import setuptools # log.info(site.__file__) # pprint.pprint(global_conf) log.debug('\n'.join(sys.path)) try: import beaker.middleware global SessionMiddleware SessionMiddleware = beaker.middleware.SessionMiddleware log.debug("Beaker successfully loaded") except ImportError: log.debug("Beaker NOT loaded") conf = AttrDict(pyxer={ "session": "", "debug": False, "root": "public", }) root = os.getcwd() try: import ConfigParser filename = os.path.abspath(global_conf.get("__file__", None)) or os.path.abspath("pyxer.ini") # filename = os.path.abspath("pyxer.ini" ) root = os.path.dirname(filename) cfile = ConfigParser.SafeConfigParser() cfile.read(filename) for section in cfile.sections(): if not conf.has_key(section): conf[section] = AttrDict() try: for name, value in cfile.items(section): conf[section][name] = value except: log.exception("Config items") log.debug("Config: %r", conf) except: log.warning("Config file not found") # Add current directory to sys path # site.addsitedir(root) # Here we expect all data base = os.path.join(root, "public") # app = App(global_conf=None, root="public", path=None, **app_conf) app = PyxerApp() if SessionMiddleware and (conf.get("pyxer.session", "beaker") == "beaker"): log.debug("Beaker sessions") if "google.appengine" in sys.modules: # server = SessionMiddleware(server, type='ext:google', table_name="beaker_session", cookie_expires=False) app = SessionMiddleware(app, type='ext:google', table_name='PyxerSession') else: app = SessionMiddleware(app, type='dbm', data_dir=os.path.join(root, 'cache')) app = RegistryManager(app) app = ConfigMiddleware(app, conf.copy()) # app = CgitbMiddleware(app) if not stage: app = ErrorMiddleware(app, debug=True) #static = PyxerStatic(base) #app = Cascade([app, static]) return app # Paster EGG factory entry def app_factory(global_config, **local_conf): return make_app(global_config) # Serve with Python on board WSGI def serve(opt={}): print "Serving on http://%s:%s" % (opt.host, opt.port) from wsgiref.simple_server import make_server server = make_server(opt.host, int(opt.port), make_app()) server.serve_forever() if __name__ == "__main__": class opt: host = "127.0.0.1" port = 8080 serve(opt)
import os class Streams(): def __init__(self): ''' Constructor. ''' self.root = "/home/pi/Music" self.predefinedStreams = [ { "name": "Radio ZET", "stream": "http://n-1-11.dcs.redcdn.pl/sc/o2/Eurozet/live/audio.livx", "isRadio": True }, { "name": "Polskie Radio 24", "stream": "http://stream3.polskieradio.pl:8080", "isRadio": True }, { "name": "ZET Gold", "stream": "http://zgl01.cdn.eurozet.pl:8506/ZGLHIT.mp3", "isRadio": True }, { "name": "Meloradio", "stream": "http://mel02.cdn.eurozet.pl:8800/mel-net.mp3", "isRadio": True }, { "name": "Elton John Radio", "stream": "http://streaming.exclusive.radio/er/eltonjohn/icecast.audio", "isRadio": True }, { "name": "Madonna Radio", "stream": "http://streaming.exclusive.radio/er/madonna/icecast.audio", "isRadio": True }, { "name": "Elvis Presley Radio", "stream": "http://streaming.exclusive.radio/er/elvispresley/icecast.audio", "isRadio": True }, { "name": "Paul McCartney Radio", "stream": "http://streaming.exclusive.radio/er/paulmccartney/icecast.audio", "isRadio": True }, { "name": "George Michael Radio", "stream": "http://streaming.exclusive.radio/er/georgemichael/icecast.audio", "isRadio": True }, { "name": "Abba Radio", "stream": "http://streaming.exclusive.radio/er/abba/icecast.audio", "isRadio": True }, { "name": "Coldplay Radio", "stream": "http://streaming.exclusive.radio/er/coldplay/icecast.audio", "isRadio": True }, { "name": "Ed Sheeran Radio", "stream": "http://streaming.exclusive.radio/er/edsheeran/icecast.audio", "isRadio": True }, { "name": "Johnny Cash Radio", "stream": "http://streaming.exclusive.radio/er/johnnycash/icecast.audio", "isRadio": True }, { "name": "Phil Collins Radio", "stream": "http://streaming.exclusive.radio/er/philcollins/icecast.audio", "isRadio": True }, { "name": "Scorpions Radio", "stream": "http://streaming.exclusive.radio/er/scorpions/icecast.audio", "isRadio": True }, { "name": "Beach Boys", "stream": "http://streaming.exclusive.radio/er/beachboys/icecast.audio", "isRadio": True }, { "name": "Gloria Estefan", "stream": "http://streaming.exclusive.radio/er/gloriaestefan/icecast.audio", "isRadio": True }, { "name": "Shakira Radio", "stream": "http://streaming.exclusive.radio/er/shakira/icecast.audio", "isRadio": True }, { "name": "Whitney Houston Radio", "stream": "http://streaming.exclusive.radio/er/whitneyhouston/icecast.audio", "isRadio": True }, { "name": "Country Radio", "stream": "http://icepool.silvacast.com/COUNTRY108.mp3", "isRadio": True }, { "name": "Cinemix", "stream": "http://94.23.51.96:8001/;", "isRadio": True }, { "name": "Soundtracks", "stream": "http://hi5.streamingsoundtracks.com/;", "isRadio": True }, { "name": "James Bond 007", "stream": "http://stream.laut.fm/007", "isRadio": True }, { "name": "Radio Mambo", "stream": "http://178.32.139.184:8060/;", "isRadio": True }, { "name": "Salsa Warriors", "stream": "http://192.99.17.12:6031/;stream/1", "isRadio": True }, { "name": "Salsa AMS", "stream": "http://82.94.166.107:8067/;stream/1", "isRadio": True }, { "name": "Fox News", "stream": "http://streaming-ent.shoutcast.com/foxnews", "isRadio": True }, { "name": "RFI Monde", "stream": "http://live02.rfi.fr/rfimonde-96k.mp3", "isRadio": True }, { "name": "BBC 1", "stream": "http://bbcmedia.ic.llnwd.net/stream/bbcmedia_radio1_mf_p", "isRadio": True }, { "name": "BBC 2", "stream": "http://bbcmedia.ic.llnwd.net/stream/bbcmedia_radio2_mf_p", "isRadio": True }, { "name": "Beatles", "stream": "http://streaming.exclusive.radio/er/beatles/icecast.audio", "isRadio": True }, { "name": "Celine Dion", "stream": "http://streaming.exclusive.radio/er/celinedion/icecast.audio", "isRadio": True }, { "name": "Dire Straits", "stream": "http://streaming.exclusive.radio/er/direstraits/icecast.audio", "isRadio": True }, { "name": "Tears for Fears", "stream": "http://streaming.exclusive.radio/er/tearsforfears/icecast.audio", "isRadio": True }, { "name": "Moby", "stream": "http://streaming.exclusive.radio/er/moby/icecast.audio", "isRadio": True } # { # "name": "Christmas 1", # "stream": "http://node-05.zeno.fm/aae9yc3ygnruv?rj-ttl=5&rj-tok=AAABdo8S4DcAoTqG00pxZjP_MQ", # "isRadio": True # }, # { # "name": "Christmas 2", # "stream": "http://tuner.m1.fm/M1-XMAS.mp3", # "isRadio": True # }, # { # "name": "Koledy 1", # "stream": "http://31.192.216.7/KOLEDY", # "isRadio": True # }, # { # "name": "Koledy 2", # "stream": "http://zt04.cdn.eurozet.pl/ZETKOL.mp3", # "isRadio": True # } ] def get(self): for root, dirs, files in os.walk(self.root): if len(dirs): extraStreams = [] for dir in dirs: extraStreams.append({"name": dir, "stream": self.root + "/" + dir, "isRadio": False}) allStreams = extraStreams + self.predefinedStreams allStreams.sort(key=lambda stream: (stream["isRadio"], stream["name"]), reverse=False) return allStreams else: return self.predefinedStreams
# -*- coding: utf-8 -*- # # Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from .fetchers import NUPermissionsFetcher from .fetchers import NUMetadatasFetcher from .fetchers import NUGlobalMetadatasFetcher from .fetchers import NUVNFInterfaceDescriptorsFetcher from bambou import NURESTObject class NUVNFDescriptor(NURESTObject): """ Represents a VNFDescriptor in the VSD Notes: The behavioral and deployment information of a VNF is defined in the VNF descriptor template. The template is based on the libvirt domain XML and is on-boarded in a VNF catalog. The resource requirements for CPU, memory and storage are defined in this screen and the rest of the template is inherited from the VNF Metadata object. """ __rest_name__ = "vnfdescriptor" __resource_name__ = "vnfdescriptors" ## Constants CONST_TYPE_WAN_OPT = "WAN_OPT" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_TYPE_FIREWALL = "FIREWALL" CONST_TYPE_THREAT_PREVENTION = "THREAT_PREVENTION" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" def __init__(self, **kwargs): """ Initializes a VNFDescriptor instance Notes: You can specify all parameters while calling this methods. A special argument named `data` will enable you to load the object from a Python dictionary Examples: >>> vnfdescriptor = NUVNFDescriptor(id=u'xxxx-xxx-xxx-xxx', name=u'VNFDescriptor') >>> vnfdescriptor = NUVNFDescriptor(data=my_dict) """ super(NUVNFDescriptor, self).__init__() # Read/Write Attributes self._cpu_count = None self._name = None self._memory_mb = None self._vendor = None self._description = None self._metadata_id = None self._visible = None self._embedded_metadata = None self._entity_scope = None self._associated_vnf_threshold_policy_id = None self._storage_gb = None self._external_id = None self._type = None self.expose_attribute(local_name="cpu_count", remote_name="CPUCount", attribute_type=int, is_required=True, is_unique=False) self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name="memory_mb", remote_name="memoryMB", attribute_type=int, is_required=True, is_unique=False) self.expose_attribute(local_name="vendor", remote_name="vendor", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="metadata_id", remote_name="metadataID", attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name="visible", remote_name="visible", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="associated_vnf_threshold_policy_id", remote_name="associatedVNFThresholdPolicyID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="storage_gb", remote_name="storageGB", attribute_type=int, is_required=True, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) self.expose_attribute(local_name="type", remote_name="type", attribute_type=str, is_required=False, is_unique=False, choices=[u'FIREWALL', u'THREAT_PREVENTION', u'WAN_OPT']) # Fetchers self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.vnf_interface_descriptors = NUVNFInterfaceDescriptorsFetcher.fetcher_with_object(parent_object=self, relationship="child") self._compute_args(**kwargs) # Properties @property def cpu_count(self): """ Get cpu_count value. Notes: Number of CPUs to be allocated VNF instance when deployed This attribute is named `CPUCount` in VSD API. """ return self._cpu_count @cpu_count.setter def cpu_count(self, value): """ Set cpu_count value. Notes: Number of CPUs to be allocated VNF instance when deployed This attribute is named `CPUCount` in VSD API. """ self._cpu_count = value @property def name(self): """ Get name value. Notes: Name of the VNF Descriptor """ return self._name @name.setter def name(self, value): """ Set name value. Notes: Name of the VNF Descriptor """ self._name = value @property def memory_mb(self): """ Get memory_mb value. Notes: Memory (in MB) to be allocated for VNF instance when deployed This attribute is named `memoryMB` in VSD API. """ return self._memory_mb @memory_mb.setter def memory_mb(self, value): """ Set memory_mb value. Notes: Memory (in MB) to be allocated for VNF instance when deployed This attribute is named `memoryMB` in VSD API. """ self._memory_mb = value @property def vendor(self): """ Get vendor value. Notes: The vendor generating this VNF Descriptor """ return self._vendor @vendor.setter def vendor(self, value): """ Set vendor value. Notes: The vendor generating this VNF Descriptor """ self._vendor = value @property def description(self): """ Get description value. Notes: A description of the VNF Descriptor """ return self._description @description.setter def description(self, value): """ Set description value. Notes: A description of the VNF Descriptor """ self._description = value @property def metadata_id(self): """ Get metadata_id value. Notes: Id of referenced Metadata Object This attribute is named `metadataID` in VSD API. """ return self._metadata_id @metadata_id.setter def metadata_id(self, value): """ Set metadata_id value. Notes: Id of referenced Metadata Object This attribute is named `metadataID` in VSD API. """ self._metadata_id = value @property def visible(self): """ Get visible value. Notes: Controls if descriptor visible in catalog to create new VNF """ return self._visible @visible.setter def visible(self, value): """ Set visible value. Notes: Controls if descriptor visible in catalog to create new VNF """ self._visible = value @property def embedded_metadata(self): """ Get embedded_metadata value. Notes: Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration. This attribute is named `embeddedMetadata` in VSD API. """ return self._embedded_metadata @embedded_metadata.setter def embedded_metadata(self, value): """ Set embedded_metadata value. Notes: Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration. This attribute is named `embeddedMetadata` in VSD API. """ self._embedded_metadata = value @property def entity_scope(self): """ Get entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ return self._entity_scope @entity_scope.setter def entity_scope(self, value): """ Set entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ self._entity_scope = value @property def associated_vnf_threshold_policy_id(self): """ Get associated_vnf_threshold_policy_id value. Notes: The Id of referenced VNF threshold policy This attribute is named `associatedVNFThresholdPolicyID` in VSD API. """ return self._associated_vnf_threshold_policy_id @associated_vnf_threshold_policy_id.setter def associated_vnf_threshold_policy_id(self, value): """ Set associated_vnf_threshold_policy_id value. Notes: The Id of referenced VNF threshold policy This attribute is named `associatedVNFThresholdPolicyID` in VSD API. """ self._associated_vnf_threshold_policy_id = value @property def storage_gb(self): """ Get storage_gb value. Notes: Disk storage (in GB) to be allocated VNF instance when deployed This attribute is named `storageGB` in VSD API. """ return self._storage_gb @storage_gb.setter def storage_gb(self, value): """ Set storage_gb value. Notes: Disk storage (in GB) to be allocated VNF instance when deployed This attribute is named `storageGB` in VSD API. """ self._storage_gb = value @property def external_id(self): """ Get external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ return self._external_id @external_id.setter def external_id(self, value): """ Set external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ self._external_id = value @property def type(self): """ Get type value. Notes: Type of virtual network function """ return self._type @type.setter def type(self, value): """ Set type value. Notes: Type of virtual network function """ self._type = value
import os import sys from optparse import OptionParser import imp import django from django.core.management.base import BaseCommand, CommandError, handle_default_options # For backwards compatibility: get_version() used to be in this module. get_version = django.get_version # A cache of loaded commands, so that call_command # doesn't have to reload every time it's called. _commands = None def find_commands(management_dir): """ Given a path to a management directory, returns a list of all the command names that are available. Returns an empty list if no commands are defined. """ command_dir = os.path.join(management_dir, 'commands') try: return [f[:-3] for f in os.listdir(command_dir) if not f.startswith('_') and f.endswith('.py')] except OSError: return [] def find_management_module(app_name): """ Determines the path to the management module for the given app_name, without actually importing the application or the management module. Raises ImportError if the management module cannot be found for any reason. """ parts = app_name.split('.') parts.append('management') parts.reverse() part = parts.pop() path = None # When using manage.py, the project module is added to the path, # loaded, then removed from the path. This means that # testproject.testapp.models can be loaded in future, even if # testproject isn't in the path. When looking for the management # module, we need look for the case where the project name is part # of the app_name but the project directory itself isn't on the path. try: f, path, descr = imp.find_module(part,path) except ImportError,e: if os.path.basename(os.getcwd()) != part: raise e while parts: part = parts.pop() f, path, descr = imp.find_module(part, path and [path] or None) return path def load_command_class(app_name, name): """ Given a command name and an application name, returns the Command class instance. All errors raised by the import process (ImportError, AttributeError) are allowed to propagate. """ return getattr(__import__('%s.management.commands.%s' % (app_name, name), {}, {}, ['Command']), 'Command')() def get_commands(): """ Returns a dictionary mapping command names to their callback applications. This works by looking for a management.commands package in django.core, and in each installed application -- if a commands package exists, all commands in that package are registered. Core commands are always included. If a settings module has been specified, user-defined commands will also be included, the startproject command will be disabled, and the startapp command will be modified to use the directory in which the settings module appears. The dictionary is in the format {command_name: app_name}. Key-value pairs from this dictionary can then be used in calls to load_command_class(app_name, command_name) If a specific version of a command must be loaded (e.g., with the startapp command), the instantiated module can be placed in the dictionary in place of the application name. The dictionary is cached on the first call and reused on subsequent calls. """ global _commands if _commands is None: _commands = dict([(name, 'django.core') for name in find_commands(__path__[0])]) # Find the installed apps try: from django.conf import settings apps = settings.INSTALLED_APPS except (AttributeError, EnvironmentError, ImportError): apps = [] # Find the project directory try: from django.conf import settings project_directory = setup_environ( __import__( settings.SETTINGS_MODULE, {}, {}, (settings.SETTINGS_MODULE.split(".")[-1],) ), settings.SETTINGS_MODULE ) except (AttributeError, EnvironmentError, ImportError): project_directory = None # Find and load the management module for each installed app. for app_name in apps: try: path = find_management_module(app_name) _commands.update(dict([(name, app_name) for name in find_commands(path)])) except ImportError: pass # No management module - ignore this app if project_directory: # Remove the "startproject" command from self.commands, because # that's a django-admin.py command, not a manage.py command. del _commands['startproject'] # Override the startapp command so that it always uses the # project_directory, not the current working directory # (which is default). from django.core.management.commands.startapp import ProjectCommand _commands['startapp'] = ProjectCommand(project_directory) return _commands def call_command(name, *args, **options): """ Calls the given command, with the given options and args/kwargs. This is the primary API you should use for calling specific commands. Some examples: call_command('syncdb') call_command('shell', plain=True) call_command('sqlall', 'myapp') """ try: app_name = get_commands()[name] if isinstance(app_name, BaseCommand): # If the command is already loaded, use it directly. klass = app_name else: klass = load_command_class(app_name, name) except KeyError: raise CommandError, "Unknown command: %r" % name return klass.execute(*args, **options) class LaxOptionParser(OptionParser): """ An option parser that doesn't raise any errors on unknown options. This is needed because the --settings and --pythonpath options affect the commands (and thus the options) that are available to the user. """ def error(self, msg): pass def print_help(self): """Output nothing. The lax options are included in the normal option parser, so under normal usage, we don't need to print the lax options. """ pass def print_lax_help(self): """Output the basic options available to every command. This just redirects to the default print_help() behaviour. """ OptionParser.print_help(self) def _process_args(self, largs, rargs, values): """ Overrides OptionParser._process_args to exclusively handle default options and ignore args and other options. This overrides the behavior of the super class, which stop parsing at the first unrecognized option. """ while rargs: arg = rargs[0] try: if arg[0:2] == "--" and len(arg) > 2: # process a single long option (possibly with value(s)) # the superclass code pops the arg off rargs self._process_long_opt(rargs, values) elif arg[:1] == "-" and len(arg) > 1: # process a cluster of short options (possibly with # value(s) for the last one only) # the superclass code pops the arg off rargs self._process_short_opts(rargs, values) else: # it's either a non-default option or an arg # either way, add it to the args list so we can keep # dealing with options del rargs[0] raise Exception except: largs.append(arg) class ManagementUtility(object): """ Encapsulates the logic of the django-admin.py and manage.py utilities. A ManagementUtility has a number of commands, which can be manipulated by editing the self.commands dictionary. """ def __init__(self, argv=None): self.argv = argv or sys.argv[:] self.prog_name = os.path.basename(self.argv[0]) def main_help_text(self): """ Returns the script's main help text, as a string. """ usage = ['',"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,''] usage.append('Available subcommands:') commands = get_commands().keys() commands.sort() for cmd in commands: usage.append(' %s' % cmd) return '\n'.join(usage) def fetch_command(self, subcommand): """ Tries to fetch the given subcommand, printing a message with the appropriate command called from the command line (usually "django-admin.py" or "manage.py") if it can't be found. """ try: app_name = get_commands()[subcommand] if isinstance(app_name, BaseCommand): # If the command is already loaded, use it directly. klass = app_name else: klass = load_command_class(app_name, subcommand) except KeyError: sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" % \ (subcommand, self.prog_name)) sys.exit(1) return klass def execute(self): """ Given the command-line arguments, this figures out which subcommand is being run, creates a parser appropriate to that command, and runs it. """ # Preprocess options to extract --settings and --pythonpath. # These options could affect the commands that are available, so they # must be processed early. parser = LaxOptionParser(usage="%prog subcommand [options] [args]", version=get_version(), option_list=BaseCommand.option_list) try: options, args = parser.parse_args(self.argv) handle_default_options(options) except: pass # Ignore any option errors at this point. try: subcommand = self.argv[1] except IndexError: sys.stderr.write("Type '%s help' for usage.\n" % self.prog_name) sys.exit(1) if subcommand == 'help': if len(args) > 2: self.fetch_command(args[2]).print_help(self.prog_name, args[2]) else: parser.print_lax_help() sys.stderr.write(self.main_help_text() + '\n') sys.exit(1) # Special-cases: We want 'django-admin.py --version' and # 'django-admin.py --help' to work, for backwards compatibility. elif self.argv[1:] == ['--version']: # LaxOptionParser already takes care of printing the version. pass elif self.argv[1:] == ['--help']: parser.print_lax_help() sys.stderr.write(self.main_help_text() + '\n') else: self.fetch_command(subcommand).run_from_argv(self.argv) def setup_environ(settings_mod, original_settings_path=None): """ Configures the runtime environment. This can also be used by external scripts wanting to set up a similar environment to manage.py. Returns the project directory (assuming the passed settings module is directly in the project directory). The "original_settings_path" parameter is optional, but recommended, since trying to work out the original path from the module can be problematic. """ # Add this project to sys.path so that it's importable in the conventional # way. For example, if this file (manage.py) lives in a directory # "myproject", this code would add "/path/to/myproject" to sys.path. project_directory, settings_filename = os.path.split(settings_mod.__file__) if project_directory == os.curdir or not project_directory: project_directory = os.getcwd() project_name = os.path.basename(project_directory) # Strip filename suffix to get the module name. settings_name = os.path.splitext(settings_filename)[0] # Strip $py for Jython compiled files (like settings$py.class) if settings_name.endswith("$py"): settings_name = settings_name[:-3] sys.path.append(os.path.join(project_directory, os.pardir)) project_module = __import__(project_name, {}, {}, ['']) sys.path.pop() # Set DJANGO_SETTINGS_MODULE appropriately. if original_settings_path: os.environ['DJANGO_SETTINGS_MODULE'] = original_settings_path else: os.environ['DJANGO_SETTINGS_MODULE'] = '%s.%s' % (project_name, settings_name) return project_directory def execute_from_command_line(argv=None): """ A simple method that runs a ManagementUtility. """ utility = ManagementUtility(argv) utility.execute() def execute_manager(settings_mod, argv=None): """ Like execute_from_command_line(), but for use by manage.py, a project-specific django-admin.py utility. """ setup_environ(settings_mod) utility = ManagementUtility(argv) utility.execute()
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2014-2015 jeffZhuo # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import with_statement from __future__ import nested_scopes from __future__ import generators from traceback import print_exception from server import SERVER from server.err_code import ERR_SUCCESS, ERR_NULL_REQUEST, ERR_INTERNAL_EXCEPTION, ERR_100_CONTINUE_REQUEST from server.err_code import get_err_msg from server.header import RequestHeaders from server.response import WsgiResponse from server.exception.request_exception import ReadBlankException, RequestContinueException import sys from server.log import logging try: import cStringIO as StringIO except (Exception, ): import StringIO class HTTPRequest(object): """ The request form the socket, handle the request until the connections close. """ MAX_URL_SIZE = 65537 MAX_BODY_SIZE = 10485760 # 10MB = 1024 * 1024 * 10 def __init__(self, server, rfile, wfile, addr): """ :param server: the server obj :param rfile: the connection read file :param wfile: the connection write file :param addr: the remote address """ self.server = server self.rfile = rfile self.wfile = wfile self.addr = addr self.commond = None self.path = None self.query = None self.version = None # request header self.headers = None self.content_length = None # request body self.body = None def handle_one_request(self): err = ERR_SUCCESS err_msg = get_err_msg(ERR_SUCCESS) response = None try: self.parse_request() response = self.handle_request() except ReadBlankException as ex: # Get blank request err = ERR_NULL_REQUEST err_msg = get_err_msg(err) except RequestContinueException as ex: # Get 100 continue request err = ERR_100_CONTINUE_REQUEST err_msg = get_err_msg(err) response = WsgiResponse.make_response(100) except Exception as ex: # print log here logging.error("Exception while parse request: %s", ex) err = ERR_INTERNAL_EXCEPTION err_msg = get_err_msg(err) response = WsgiResponse.make_response(500) finally: return err, err_msg, response def handle_100_continue(self): """ Process after get 100 continue header. Read body from steam :return: """ err = ERR_SUCCESS err_msg = get_err_msg(ERR_SUCCESS) response = None self.body = HTTPRequest.__parse_body(self.rfile, self.content_length) logging.debug("request body: %s", self.body.getvalue()) try: self.parse_request() response = self.handle_request() except Exception as ex: # print log here logging.error("Exception while parse request: %s", ex) err = ERR_INTERNAL_EXCEPTION err_msg = get_err_msg(err) response = WsgiResponse.make_response(500) finally: return err, err_msg, response def parse_request(self): """ Parse the http struct from 'rfile' """ start_line = self.rfile.readline(HTTPRequest.MAX_URL_SIZE) start_line = start_line.replace('\r\n', '\n').replace('\r', '\n') logging.debug("start_line: %s", start_line) # Check if read blank string if start_line == "": raise ReadBlankException("Get blank data from client socket") self.commond, self.path, self.query, self.version = HTTPRequest.__parse_startline(start_line) self.headers = RequestHeaders(HTTPRequest.__parse_header(self.rfile)) logging.debug("header: %s", self.headers) self.content_length = int(self.headers.get("Content-Length", 0)) # Process 'Expect' header with value "100-continue" if self.headers.get("Expect") is not None and self.headers.get("Expect") == "100 Continue": # self.send_simple_response(100) raise RequestContinueException("Get 100 continue request") self.body = HTTPRequest.__parse_body(self.rfile, self.content_length) logging.debug("request body: %s", self.body.getvalue()) def handle_request(self): """ The event processing here :return: """ raise NotImplementedError @staticmethod def __parse_startline(start_line): """ Parse the start line, to get the commond, path, query, http version :param start_line: :return: (commond, path, query, http version) """ commond, path, version = start_line.split(' ') if '?' in path: path, query = path.split('?', 1) else: path, query = path, '' return commond, path, query, version @staticmethod def __parse_header(rfile): """ Parse the header after parse start line :param rfile: readable file(has been parse start line) :return: headers """ headers = {} while True: one_line = rfile.readline() if not one_line: break if one_line == '\r\n' or one_line == '\n': break header, value = one_line.split(':', 1) a = '' headers[header] = value.strip('\r\n').strip('\n').strip() return headers @staticmethod def __parse_body(rfile, content_length=0): body = StringIO.StringIO() if content_length != 0 and content_length <= HTTPRequest.MAX_BODY_SIZE: body.write(rfile.read(content_length)) return body def write(self, data): self.__write(data) def flush(self): self.__flush() def __write(self, data): self.wfile.write(data) self._write = self.wfile.write def __flush(self): self.wfile.flush() self._flush = self.wfile.flush def close(self): """ Close this request :return: """ self.rfile.close() self.wfile.close() # def send_simple_response(self, status, header=[('Content-Type', 'text/plain',), ]): # """ # Return a simple response to client # :param status: the status code # :param header: http header # :return: # """ # response = SimpleResponse(status, self.version, header, "") # self.__write(response) # self.__flush() class WSGIRequest(HTTPRequest): def __init__(self, server, rfile, wfile, addr): super(WSGIRequest, self).__init__(server, rfile, wfile, addr) self.application = server.application self.environ = None self.stderr = None self.result = None self.response_status = None self.response_headers = None def handle_request(self): """ Call the function run() to invoke wsgi application :return: """ response = self.run() return response def run(self): try: environ = self.getenv() self.result = self.application(environ, self.start_response) response = WsgiResponse(self.response_status, self.response_headers, self.result) except (Exception, ) as ex: logging.error('Exception in run(): %s', ex) response = WsgiResponse.make_response(500) finally: return response def getenv(self): """ Get the environ """ environ = self.server.base_environ.copy() environ["wsgi.version"] = (1, 0) environ["wsgi.input"] = self.body environ["wsgi.error"] = self.wfile environ["wsgi.multithread"] = False environ["wsgi.multiprocess"] = False environ["wsgi.run_once"] = False if environ.get("HTTPS", "off") in ("on", "1"): environ["wsgi.url_scheme"] = "https" else: environ["wsgi.url_scheme"] = "http" environ['SERVER_SOFTWARE'] = SERVER # CGI environ self.set_cgi_environ(environ) self.environ = environ return environ def set_cgi_environ(self, environ): """ Setting the cgi environ """ # Request http version environ['SERVER_PROTOCOL'] = self.version environ['REQUEST_METHOD'] = self.commond environ['PATH_INFO'] = self.path environ['QUERY_STRING'] = self.query environ['CONTENT_TYPE'] = self.headers.get('Content-Type', 'text/plain') length = self.headers.get('Content-Length') if length: environ["CONTENT_LENGTH"] = length for k, v in self.headers.items(): k = k.replace('-', '_').upper() v = v.strip() if k in environ: continue if 'HTTP_' + k in environ: environ['HTTP_' + k] += ',' + v else: environ['HTTP_' + k] = v def start_response(self, status, response_headers, excInfo=None): if excInfo: try: raise(excInfo[0], excInfo[1], excInfo[2]) finally: excInfo = None self.response_status = status logging.debug("response header: %s", response_headers) self.response_headers = response_headers return self.write # def handle_error(self): # """ Send error output to client if possible """ # self.log_exception(sys.exc_info()) # self.result = self.error_output(self.environ, self.start_response) # def log_exception(self, exc_info): # """ # Log the exc_info to server log by stderr # Can override this method to change the format # :param exc_info: # """ # try: # print_exception(exc_info[0], exc_info[1], exc_info[2], None, self.stderr) # finally: # exc_info = None
# Copyright 2012 Josh Durgin # Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import math import os import tempfile import mock from oslo_utils import units from cinder import context from cinder import exception from cinder.i18n import _ import cinder.image.glance from cinder.image import image_utils from cinder import objects from cinder import test from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test_volume from cinder.tests.unit import utils from cinder.volume import configuration as conf import cinder.volume.drivers.rbd as driver from cinder.volume.flows.manager import create_volume # This is used to collect raised exceptions so that tests may check what was # raised. # NOTE: this must be initialised in test setUp(). RAISED_EXCEPTIONS = [] class MockException(Exception): def __init__(self, *args, **kwargs): RAISED_EXCEPTIONS.append(self.__class__) class MockImageNotFoundException(MockException): """Used as mock for rbd.ImageNotFound.""" class MockImageBusyException(MockException): """Used as mock for rbd.ImageBusy.""" class MockImageExistsException(MockException): """Used as mock for rbd.ImageExists.""" def common_mocks(f): """Decorator to set mocks common to all tests. The point of doing these mocks here is so that we don't accidentally set mocks that can't/don't get unset. """ def _common_inner_inner1(inst, *args, **kwargs): @mock.patch('time.sleep') @mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy') @mock.patch('cinder.volume.drivers.rbd.RADOSClient') @mock.patch('cinder.backup.drivers.ceph.rbd') @mock.patch('cinder.backup.drivers.ceph.rados') def _common_inner_inner2(mock_rados, mock_rbd, mock_client, mock_proxy, mock_sleep): inst.mock_rbd = mock_rbd inst.mock_rados = mock_rados inst.mock_client = mock_client inst.mock_proxy = mock_proxy inst.mock_sleep = mock_sleep inst.mock_rbd.RBD.Error = Exception inst.mock_rados.Error = Exception inst.mock_rbd.ImageBusy = MockImageBusyException inst.mock_rbd.ImageNotFound = MockImageNotFoundException inst.mock_rbd.ImageExists = MockImageExistsException inst.mock_rbd.InvalidArgument = MockImageNotFoundException inst.driver.rbd = inst.mock_rbd inst.driver.rados = inst.mock_rados return f(inst, *args, **kwargs) return _common_inner_inner2() return _common_inner_inner1 CEPH_MON_DUMP = """dumped monmap epoch 1 { "epoch": 1, "fsid": "33630410-6d93-4d66-8e42-3b953cf194aa", "modified": "2013-05-22 17:44:56.343618", "created": "2013-05-22 17:44:56.343618", "mons": [ { "rank": 0, "name": "a", "addr": "[::1]:6789\/0"}, { "rank": 1, "name": "b", "addr": "[::1]:6790\/0"}, { "rank": 2, "name": "c", "addr": "[::1]:6791\/0"}, { "rank": 3, "name": "d", "addr": "127.0.0.1:6792\/0"}, { "rank": 4, "name": "e", "addr": "example.com:6791\/0"}], "quorum": [ 0, 1, 2]} """ @ddt.ddt class RBDTestCase(test.TestCase): def setUp(self): global RAISED_EXCEPTIONS RAISED_EXCEPTIONS = [] super(RBDTestCase, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.volume_tmp_dir = None self.cfg.image_conversion_dir = None self.cfg.rbd_cluster_name = 'nondefault' self.cfg.rbd_pool = 'rbd' self.cfg.rbd_ceph_conf = None self.cfg.rbd_secret_uuid = None self.cfg.rbd_user = None self.cfg.volume_dd_blocksize = '1M' self.cfg.rbd_store_chunk_size = 4 mock_exec = mock.Mock() mock_exec.return_value = ('', '') self.driver = driver.RBDDriver(execute=mock_exec, configuration=self.cfg) self.driver.set_initialized() self.context = context.get_admin_context() self.volume_a = fake_volume.fake_volume_obj( self.context, **{'name': u'volume-0000000a', 'id': '4c39c3c7-168f-4b32-b585-77f1b3bf0a38', 'size': 10}) self.volume_b = fake_volume.fake_volume_obj( self.context, **{'name': u'volume-0000000b', 'id': '0c7d1f44-5a06-403f-bb82-ae7ad0d693a6', 'size': 10}) self.snapshot = fake_snapshot.fake_snapshot_obj( self.context, name='snapshot-0000000a') @ddt.data({'cluster_name': None, 'pool_name': 'rbd'}, {'cluster_name': 'volumes', 'pool_name': None}) @ddt.unpack def test_min_config(self, cluster_name, pool_name): self.cfg.rbd_cluster_name = cluster_name self.cfg.rbd_pool = pool_name with mock.patch('cinder.volume.drivers.rbd.rados'): self.assertRaises(exception.InvalidConfigurationValue, self.driver.check_for_setup_error) @common_mocks def test_create_volume(self): client = self.mock_client.return_value client.__enter__.return_value = client self.driver.create_volume(self.volume_a) chunk_size = self.cfg.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) args = [client.ioctx, str(self.volume_a.name), self.volume_a.size * units.Gi, order] kwargs = {'old_format': False, 'features': client.features} self.mock_rbd.RBD.return_value.create.assert_called_once_with( *args, **kwargs) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) @common_mocks def test_manage_existing_get_size(self): with mock.patch.object(self.driver.rbd.Image(), 'size') as \ mock_rbd_image_size: with mock.patch.object(self.driver.rbd.Image(), 'close') \ as mock_rbd_image_close: mock_rbd_image_size.return_value = 2 * units.Gi existing_ref = {'source-name': self.volume_a.name} return_size = self.driver.manage_existing_get_size( self.volume_a, existing_ref) self.assertEqual(2, return_size) mock_rbd_image_size.assert_called_once_with() mock_rbd_image_close.assert_called_once_with() @common_mocks def test_manage_existing_get_invalid_size(self): with mock.patch.object(self.driver.rbd.Image(), 'size') as \ mock_rbd_image_size: with mock.patch.object(self.driver.rbd.Image(), 'close') \ as mock_rbd_image_close: mock_rbd_image_size.return_value = 'abcd' existing_ref = {'source-name': self.volume_a.name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_get_size, self.volume_a, existing_ref) mock_rbd_image_size.assert_called_once_with() mock_rbd_image_close.assert_called_once_with() @common_mocks def test_manage_existing(self): client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver.rbd.RBD(), 'rename') as \ mock_rbd_image_rename: exist_volume = 'vol-exist' existing_ref = {'source-name': exist_volume} mock_rbd_image_rename.return_value = 0 self.driver.manage_existing(self.volume_a, existing_ref) mock_rbd_image_rename.assert_called_with( client.ioctx, exist_volume, self.volume_a.name) @common_mocks def test_manage_existing_with_exist_rbd_image(self): client = self.mock_client.return_value client.__enter__.return_value = client self.mock_rbd.RBD.return_value.rename.side_effect = ( MockImageExistsException) exist_volume = 'vol-exist' existing_ref = {'source-name': exist_volume} self.assertRaises(self.mock_rbd.ImageExists, self.driver.manage_existing, self.volume_a, existing_ref) # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageExists]) @common_mocks def test_delete_backup_snaps(self): self.driver.rbd.Image.remove_snap = mock.Mock() with mock.patch.object(self.driver, '_get_backup_snaps') as \ mock_get_backup_snaps: mock_get_backup_snaps.return_value = [{'name': 'snap1'}] rbd_image = self.driver.rbd.Image() self.driver._delete_backup_snaps(rbd_image) mock_get_backup_snaps.assert_called_once_with(rbd_image) self.assertTrue( self.driver.rbd.Image.return_value.remove_snap.called) @common_mocks def test_delete_volume(self): client = self.mock_client.return_value self.driver.rbd.Image.return_value.list_snaps.return_value = [] with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: mock_get_clone_info.return_value = (None, None, None) self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( self.mock_rbd.Image.return_value, self.volume_a.name, None) (self.driver.rbd.Image.return_value .list_snaps.assert_called_once_with()) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_delete_backup_snaps.assert_called_once_with( self.mock_rbd.Image.return_value) self.assertFalse( self.driver.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.driver.rbd.RBD.return_value.remove.call_count) @common_mocks def delete_volume_not_found(self): self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound self.assertIsNone(self.driver.delete_volume(self.volume_a)) self.mock_rbd.Image.assert_called_once_with() # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound]) @common_mocks def test_delete_busy_volume(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] self.mock_rbd.RBD.return_value.remove.side_effect = ( self.mock_rbd.ImageBusy) with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: mock_get_clone_info.return_value = (None, None, None) with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: with mock.patch.object(driver, 'RADOSClient') as \ mock_rados_client: self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, self.volume_a) mock_get_clone_info.assert_called_once_with( self.mock_rbd.Image.return_value, self.volume_a.name, None) (self.mock_rbd.Image.return_value.list_snaps .assert_called_once_with()) mock_rados_client.assert_called_once_with(self.driver) mock_delete_backup_snaps.assert_called_once_with( self.mock_rbd.Image.return_value) self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 3, self.mock_rbd.RBD.return_value.remove.call_count) self.assertEqual(3, len(RAISED_EXCEPTIONS)) # Make sure the exception was raised self.assertIn(self.mock_rbd.ImageBusy, RAISED_EXCEPTIONS) @common_mocks def test_delete_volume_not_found(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] self.mock_rbd.RBD.return_value.remove.side_effect = ( self.mock_rbd.ImageNotFound) with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: mock_get_clone_info.return_value = (None, None, None) with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: with mock.patch.object(driver, 'RADOSClient') as \ mock_rados_client: self.assertIsNone(self.driver.delete_volume(self.volume_a)) mock_get_clone_info.assert_called_once_with( self.mock_rbd.Image.return_value, self.volume_a.name, None) (self.mock_rbd.Image.return_value.list_snaps .assert_called_once_with()) mock_rados_client.assert_called_once_with(self.driver) mock_delete_backup_snaps.assert_called_once_with( self.mock_rbd.Image.return_value) self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.mock_rbd.RBD.return_value.remove.call_count) # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound]) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_create_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy self.driver.create_snapshot(self.snapshot) args = [str(self.snapshot.name)] proxy.create_snap.assert_called_with(*args) proxy.protect_snap.assert_called_with(*args) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy self.driver.delete_snapshot(self.snapshot) proxy.remove_snap.assert_called_with(self.snapshot.name) proxy.unprotect_snap.assert_called_with(self.snapshot.name) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_notfound_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.unprotect_snap.side_effect = ( self.mock_rbd.ImageNotFound) self.driver.delete_snapshot(self.snapshot) proxy.remove_snap.assert_called_with(self.snapshot.name) proxy.unprotect_snap.assert_called_with(self.snapshot.name) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_unprotected_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.unprotect_snap.side_effect = self.mock_rbd.InvalidArgument self.driver.delete_snapshot(self.snapshot) self.assertTrue(proxy.unprotect_snap.called) self.assertTrue(proxy.remove_snap.called) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_busy_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.unprotect_snap.side_effect = ( self.mock_rbd.ImageBusy) with mock.patch.object(self.driver, '_get_children_info') as \ mock_get_children_info: mock_get_children_info.return_value = [('pool', 'volume2')] with mock.patch.object(driver, 'LOG') as \ mock_log: self.assertRaises(exception.SnapshotIsBusy, self.driver.delete_snapshot, self.snapshot) mock_get_children_info.assert_called_once_with( proxy, self.snapshot.name) self.assertTrue(mock_log.info.called) self.assertTrue(proxy.unprotect_snap.called) self.assertFalse(proxy.remove_snap.called) @common_mocks def test_get_children_info(self): volume = self.mock_proxy volume.set_snap = mock.Mock() volume.list_children = mock.Mock() list_children = [('pool', 'volume2')] volume.list_children.return_value = list_children info = self.driver._get_children_info(volume, self.snapshot['name']) self.assertEqual(list_children, info) @common_mocks def test_get_clone_info(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name)) volume.parent_info.return_value = parent_info info = self.driver._get_clone_info(volume, self.volume_a.name) self.assertEqual(parent_info, info) self.assertFalse(volume.set_snap.called) volume.parent_info.assert_called_once_with() @common_mocks def test_get_clone_info_w_snap(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name)) volume.parent_info.return_value = parent_info snapshot = self.mock_rbd.ImageSnapshot() info = self.driver._get_clone_info(volume, self.volume_a.name, snap=snapshot) self.assertEqual(parent_info, info) self.assertEqual(2, volume.set_snap.call_count) volume.parent_info.assert_called_once_with() @common_mocks def test_get_clone_info_w_exception(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() volume.parent_info.side_effect = self.mock_rbd.ImageNotFound snapshot = self.mock_rbd.ImageSnapshot() info = self.driver._get_clone_info(volume, self.volume_a.name, snap=snapshot) self.assertEqual((None, None, None), info) self.assertEqual(2, volume.set_snap.call_count) volume.parent_info.assert_called_once_with() # Make sure the exception was raised self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound]) @common_mocks def test_get_clone_info_deleted_volume(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name)) volume.parent_info.return_value = parent_info info = self.driver._get_clone_info(volume, "%s.deleted" % (self.volume_a.name)) self.assertEqual(parent_info, info) self.assertFalse(volume.set_snap.called) volume.parent_info.assert_called_once_with() @common_mocks def test_create_cloned_volume_same_size(self): self.cfg.rbd_max_clone_depth = 2 with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required with mock.patch.object(self.driver, '_resize') as mock_resize: mock_get_clone_depth.return_value = 1 self.driver.create_cloned_volume(self.volume_b, self.volume_a) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) self.mock_rbd.Image.return_value.close \ .assert_called_once_with() self.assertTrue(mock_get_clone_depth.called) self.assertEqual( 0, mock_resize.call_count) @common_mocks def test_create_cloned_volume_different_size(self): self.cfg.rbd_max_clone_depth = 2 with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required with mock.patch.object(self.driver, '_resize') as mock_resize: mock_get_clone_depth.return_value = 1 self.volume_b.size = 20 self.driver.create_cloned_volume(self.volume_b, self.volume_a) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) self.mock_rbd.Image.return_value.close \ .assert_called_once_with() self.assertTrue(mock_get_clone_depth.called) self.assertEqual( 1, mock_resize.call_count) @common_mocks def test_create_cloned_volume_w_flatten(self): self.cfg.rbd_max_clone_depth = 1 with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: mock_get_clone_info.return_value = ( ('fake_pool', self.volume_b.name, '.'.join((self.volume_b.name, 'clone_snap')))) with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required mock_get_clone_depth.return_value = 1 self.driver.create_cloned_volume(self.volume_b, self.volume_a) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) (self.mock_rbd.Image.return_value.unprotect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.remove_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) # We expect the driver to close both volumes, so 2 is expected self.assertEqual( 2, self.mock_rbd.Image.return_value.close.call_count) self.assertTrue(mock_get_clone_depth.called) @common_mocks def test_create_cloned_volume_w_clone_exception(self): self.cfg.rbd_max_clone_depth = 2 self.mock_rbd.RBD.return_value.clone.side_effect = ( self.mock_rbd.RBD.Error) with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required mock_get_clone_depth.return_value = 1 self.assertRaises(self.mock_rbd.RBD.Error, self.driver.create_cloned_volume, self.volume_b, self.volume_a) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) (self.mock_rbd.Image.return_value.unprotect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.remove_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) self.mock_rbd.Image.return_value.close.assert_called_once_with() @common_mocks def test_good_locations(self): locations = ['rbd://fsid/pool/image/snap', 'rbd://%2F/%2F/%2F/%2F', ] map(self.driver._parse_location, locations) @common_mocks def test_bad_locations(self): locations = ['rbd://image', 'http://path/to/somewhere/else', 'rbd://image/extra', 'rbd://image/', 'rbd://fsid/pool/image/', 'rbd://fsid/pool/image/snap/', 'rbd://///', ] for loc in locations: self.assertRaises(exception.ImageUnacceptable, self.driver._parse_location, loc) self.assertFalse( self.driver._is_cloneable(loc, {'disk_format': 'raw'})) @common_mocks def test_cloneable(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' info = {'disk_format': 'raw'} self.assertTrue(self.driver._is_cloneable(location, info)) self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_different_fsid(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://def/pool/image/snap' self.assertFalse( self.driver._is_cloneable(location, {'disk_format': 'raw'})) self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_unreadable(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' self.driver.rbd.Error = Exception self.mock_proxy.side_effect = Exception args = [location, {'disk_format': 'raw'}] self.assertFalse(self.driver._is_cloneable(*args)) self.assertEqual(1, self.mock_proxy.call_count) self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_bad_format(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' formats = ['qcow2', 'vmdk', 'vdi'] for f in formats: self.assertFalse( self.driver._is_cloneable(location, {'disk_format': f})) self.assertTrue(mock_get_fsid.called) def _copy_image(self): with mock.patch.object(tempfile, 'NamedTemporaryFile'): with mock.patch.object(os.path, 'exists') as mock_exists: mock_exists.return_value = True with mock.patch.object(image_utils, 'fetch_to_raw'): with mock.patch.object(self.driver, 'delete_volume'): with mock.patch.object(self.driver, '_resize'): mock_image_service = mock.MagicMock() args = [None, self.volume_a, mock_image_service, None] self.driver.copy_image_to_volume(*args) @common_mocks def test_copy_image_no_volume_tmp(self): self.cfg.volume_tmp_dir = None self.cfg.image_conversion_dir = None self._copy_image() @common_mocks def test_copy_image_volume_tmp(self): self.cfg.volume_tmp_dir = None self.cfg.image_conversion_dir = '/var/run/cinder/tmp' self._copy_image() @common_mocks def test_update_volume_stats(self): client = self.mock_client.return_value client.__enter__.return_value = client client.cluster = mock.Mock() client.cluster.mon_command = mock.Mock() client.cluster.mon_command.return_value = ( 0, '{"stats":{"total_bytes":64385286144,' '"total_used_bytes":3289628672,"total_avail_bytes":61095657472},' '"pools":[{"name":"rbd","id":2,"stats":{"kb_used":1510197,' '"bytes_used":1546440971,"max_avail":28987613184,"objects":412}},' '{"name":"volumes","id":3,"stats":{"kb_used":0,"bytes_used":0,' '"max_avail":28987613184,"objects":0}}]}\n', '') self.driver.configuration.safe_get = mock.Mock() self.driver.configuration.safe_get.return_value = 'RBD' expected = dict( volume_backend_name='RBD', vendor_name='Open Source', driver_version=self.driver.VERSION, storage_protocol='ceph', total_capacity_gb=27, free_capacity_gb=26, reserved_percentage=0, multiattach=True) actual = self.driver.get_volume_stats(True) client.cluster.mon_command.assert_called_once_with( '{"prefix":"df", "format":"json"}', '') self.assertDictMatch(expected, actual) @common_mocks def test_update_volume_stats_error(self): client = self.mock_client.return_value client.__enter__.return_value = client client.cluster = mock.Mock() client.cluster.mon_command = mock.Mock() client.cluster.mon_command.return_value = (22, '', '') self.driver.configuration.safe_get = mock.Mock() self.driver.configuration.safe_get.return_value = 'RBD' expected = dict(volume_backend_name='RBD', vendor_name='Open Source', driver_version=self.driver.VERSION, storage_protocol='ceph', total_capacity_gb='unknown', free_capacity_gb='unknown', reserved_percentage=0, multiattach=True) actual = self.driver.get_volume_stats(True) client.cluster.mon_command.assert_called_once_with( '{"prefix":"df", "format":"json"}', '') self.assertDictMatch(expected, actual) @common_mocks def test_get_mon_addrs(self): with mock.patch.object(self.driver, '_execute') as mock_execute: mock_execute.return_value = (CEPH_MON_DUMP, '') hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] ports = ['6789', '6790', '6791', '6792', '6791'] self.assertEqual((hosts, ports), self.driver._get_mon_addrs()) @common_mocks def test_initialize_connection(self): hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] ports = ['6789', '6790', '6791', '6792', '6791'] with mock.patch.object(self.driver, '_get_mon_addrs') as \ mock_get_mon_addrs: mock_get_mon_addrs.return_value = (hosts, ports) expected = { 'driver_volume_type': 'rbd', 'data': { 'name': '%s/%s' % (self.cfg.rbd_pool, self.volume_a.name), 'hosts': hosts, 'ports': ports, 'auth_enabled': False, 'auth_username': None, 'secret_type': 'ceph', 'secret_uuid': None, 'volume_id': self.volume_a.id } } actual = self.driver.initialize_connection(self.volume_a, None) self.assertDictMatch(expected, actual) self.assertTrue(mock_get_mon_addrs.called) @common_mocks def test_clone(self): src_pool = u'images' src_image = u'image-name' src_snap = u'snapshot-name' client_stack = [] def mock__enter__(inst): def _inner(): client_stack.append(inst) return inst return _inner client = self.mock_client.return_value # capture both rados client used to perform the clone client.__enter__.side_effect = mock__enter__(client) self.driver._clone(self.volume_a, src_pool, src_image, src_snap) args = [client_stack[0].ioctx, str(src_image), str(src_snap), client_stack[1].ioctx, str(self.volume_a.name)] kwargs = {'features': client.features} self.mock_rbd.RBD.return_value.clone.assert_called_once_with( *args, **kwargs) self.assertEqual(2, client.__enter__.call_count) @common_mocks def test_extend_volume(self): fake_size = '20' size = int(fake_size) * units.Gi with mock.patch.object(self.driver, '_resize') as mock_resize: self.driver.extend_volume(self.volume_a, fake_size) mock_resize.assert_called_once_with(self.volume_a, size=size) @common_mocks def test_retype(self): context = {} diff = {'encryption': {}, 'extra_specs': {}} updates = {'name': 'testvolume', 'host': 'currenthost', 'id': 'fakeid'} fake_type = 'high-IOPS' volume = fake_volume.fake_volume_obj(context, **updates) # The hosts have been checked same before rbd.retype # is called. # RBD doesn't support multiple pools in a driver. host = {'host': 'currenthost'} self.assertTrue(self.driver.retype(context, volume, fake_type, diff, host)) # The encryptions have been checked as same before rbd.retype # is called. diff['encryption'] = {} self.assertTrue(self.driver.retype(context, volume, fake_type, diff, host)) # extra_specs changes are supported. diff['extra_specs'] = {'non-empty': 'non-empty'} self.assertTrue(self.driver.retype(context, volume, fake_type, diff, host)) diff['extra_specs'] = {} self.assertTrue(self.driver.retype(context, volume, fake_type, diff, host)) @common_mocks def test_update_migrated_volume(self): client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver.rbd.RBD(), 'rename') as mock_rename: context = {} mock_rename.return_value = 0 model_update = self.driver.update_migrated_volume(context, self.volume_a, self.volume_b, 'available') mock_rename.assert_called_with(client.ioctx, 'volume-%s' % self.volume_b.id, 'volume-%s' % self.volume_a.id) self.assertEqual({'_name_id': None, 'provider_location': None}, model_update) def test_rbd_volume_proxy_init(self): mock_driver = mock.Mock(name='driver') mock_driver._connect_to_rados.return_value = (None, None) with driver.RBDVolumeProxy(mock_driver, self.volume_a.name): self.assertEqual(1, mock_driver._connect_to_rados.call_count) self.assertFalse(mock_driver._disconnect_from_rados.called) self.assertEqual(1, mock_driver._disconnect_from_rados.call_count) mock_driver.reset_mock() snap = u'snapshot-name' with driver.RBDVolumeProxy(mock_driver, self.volume_a.name, snapshot=snap): self.assertEqual(1, mock_driver._connect_to_rados.call_count) self.assertFalse(mock_driver._disconnect_from_rados.called) self.assertEqual(1, mock_driver._disconnect_from_rados.call_count) @common_mocks @mock.patch('time.sleep') def test_connect_to_rados(self, sleep_mock): # Default self.cfg.rados_connect_timeout = -1 self.mock_rados.Rados.return_value.open_ioctx.return_value = \ self.mock_rados.Rados.return_value.ioctx # default configured pool ret = self.driver._connect_to_rados() self.assertTrue(self.mock_rados.Rados.return_value.connect.called) # Expect no timeout if default is used self.mock_rados.Rados.return_value.connect.assert_called_once_with() self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) self.assertEqual(self.mock_rados.Rados.return_value.ioctx, ret[1]) self.mock_rados.Rados.return_value.open_ioctx.assert_called_with( self.cfg.rbd_pool) # different pool ret = self.driver._connect_to_rados('alt_pool') self.assertTrue(self.mock_rados.Rados.return_value.connect.called) self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) self.assertEqual(self.mock_rados.Rados.return_value.ioctx, ret[1]) self.mock_rados.Rados.return_value.open_ioctx.assert_called_with( 'alt_pool') # With timeout self.cfg.rados_connect_timeout = 1 self.mock_rados.Rados.return_value.connect.reset_mock() self.driver._connect_to_rados() self.mock_rados.Rados.return_value.connect.assert_called_once_with( timeout=1) # error self.mock_rados.Rados.return_value.open_ioctx.reset_mock() self.mock_rados.Rados.return_value.shutdown.reset_mock() self.mock_rados.Rados.return_value.open_ioctx.side_effect = ( self.mock_rados.Error) self.assertRaises(exception.VolumeBackendAPIException, self.driver._connect_to_rados) self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) self.assertEqual( 3, self.mock_rados.Rados.return_value.shutdown.call_count) class RBDImageIOWrapperTestCase(test.TestCase): def setUp(self): super(RBDImageIOWrapperTestCase, self).setUp() self.meta = mock.Mock() self.meta.user = 'mock_user' self.meta.conf = 'mock_conf' self.meta.pool = 'mock_pool' self.meta.image = mock.Mock() self.meta.image.read = mock.Mock() self.meta.image.write = mock.Mock() self.meta.image.size = mock.Mock() self.mock_rbd_wrapper = driver.RBDImageIOWrapper(self.meta) self.data_length = 1024 self.full_data = b'abcd' * 256 def test_init(self): self.assertEqual(self.mock_rbd_wrapper._rbd_meta, self.meta) self.assertEqual(0, self.mock_rbd_wrapper._offset) def test_inc_offset(self): self.mock_rbd_wrapper._inc_offset(10) self.mock_rbd_wrapper._inc_offset(10) self.assertEqual(20, self.mock_rbd_wrapper._offset) def test_rbd_image(self): self.assertEqual(self.mock_rbd_wrapper.rbd_image, self.meta.image) def test_rbd_user(self): self.assertEqual(self.mock_rbd_wrapper.rbd_user, self.meta.user) def test_rbd_pool(self): self.assertEqual(self.mock_rbd_wrapper.rbd_conf, self.meta.conf) def test_rbd_conf(self): self.assertEqual(self.mock_rbd_wrapper.rbd_pool, self.meta.pool) def test_read(self): def mock_read(offset, length): return self.full_data[offset:length] self.meta.image.read.side_effect = mock_read self.meta.image.size.return_value = self.data_length data = self.mock_rbd_wrapper.read() self.assertEqual(self.full_data, data) data = self.mock_rbd_wrapper.read() self.assertEqual(b'', data) self.mock_rbd_wrapper.seek(0) data = self.mock_rbd_wrapper.read() self.assertEqual(self.full_data, data) self.mock_rbd_wrapper.seek(0) data = self.mock_rbd_wrapper.read(10) self.assertEqual(self.full_data[:10], data) def test_write(self): self.mock_rbd_wrapper.write(self.full_data) self.assertEqual(1024, self.mock_rbd_wrapper._offset) def test_seekable(self): self.assertTrue(self.mock_rbd_wrapper.seekable) def test_seek(self): self.assertEqual(0, self.mock_rbd_wrapper._offset) self.mock_rbd_wrapper.seek(10) self.assertEqual(10, self.mock_rbd_wrapper._offset) self.mock_rbd_wrapper.seek(10) self.assertEqual(10, self.mock_rbd_wrapper._offset) self.mock_rbd_wrapper.seek(10, 1) self.assertEqual(20, self.mock_rbd_wrapper._offset) self.mock_rbd_wrapper.seek(0) self.mock_rbd_wrapper.write(self.full_data) self.meta.image.size.return_value = self.data_length self.mock_rbd_wrapper.seek(0) self.assertEqual(0, self.mock_rbd_wrapper._offset) self.mock_rbd_wrapper.seek(10, 2) self.assertEqual(self.data_length + 10, self.mock_rbd_wrapper._offset) self.mock_rbd_wrapper.seek(-10, 2) self.assertEqual(self.data_length - 10, self.mock_rbd_wrapper._offset) # test exceptions. self.assertRaises(IOError, self.mock_rbd_wrapper.seek, 0, 3) self.assertRaises(IOError, self.mock_rbd_wrapper.seek, -1) # offset should not have been changed by any of the previous # operations. self.assertEqual(self.data_length - 10, self.mock_rbd_wrapper._offset) def test_tell(self): self.assertEqual(0, self.mock_rbd_wrapper.tell()) self.mock_rbd_wrapper._inc_offset(10) self.assertEqual(10, self.mock_rbd_wrapper.tell()) def test_flush(self): with mock.patch.object(driver, 'LOG') as mock_logger: self.meta.image.flush = mock.Mock() self.mock_rbd_wrapper.flush() self.meta.image.flush.assert_called_once_with() self.meta.image.flush.reset_mock() # this should be caught and logged silently. self.meta.image.flush.side_effect = AttributeError self.mock_rbd_wrapper.flush() self.meta.image.flush.assert_called_once_with() msg = _("flush() not supported in this version of librbd") mock_logger.warning.assert_called_with(msg) def test_fileno(self): self.assertRaises(IOError, self.mock_rbd_wrapper.fileno) def test_close(self): self.mock_rbd_wrapper.close() class ManagedRBDTestCase(test_volume.DriverTestCase): driver_name = "cinder.volume.drivers.rbd.RBDDriver" def setUp(self): super(ManagedRBDTestCase, self).setUp() self.volume.driver.set_initialized() self.volume.stats = {'allocated_capacity_gb': 0, 'pools': {}} self.called = [] def _create_volume_from_image(self, expected_status, raw=False, clone_error=False): """Try to clone a volume from an image, and check status afterwards. NOTE: if clone_error is True we force the image type to raw otherwise clone_image is not called """ # See tests.image.fake for image types. if raw: image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6' else: image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' # creating volume testdata db_volume = {'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'availability_zone': 'fake_zone', 'attach_status': 'detached', 'host': 'dummy'} volume = objects.Volume(context=self.context, **db_volume) volume.create() try: if not clone_error: self.volume.create_volume(self.context, volume.id, request_spec={'image_id': image_id}, volume=volume) else: self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume.id, request_spec={'image_id': image_id}, volume=volume) volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual(expected_status, volume.status) finally: # cleanup volume.destroy() @mock.patch.object(cinder.image.glance, 'get_default_image_service') def test_create_vol_from_image_status_available(self, mock_gdis): """Clone raw image then verify volume is in available state.""" def _mock_clone_image(context, volume, image_location, image_meta, image_service): return {'provider_location': None}, True with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = _mock_clone_image with mock.patch.object(self.volume.driver, 'create_volume') as \ mock_create: with mock.patch.object(create_volume.CreateVolumeFromSpecTask, '_copy_image_to_volume') as mock_copy: self._create_volume_from_image('available', raw=True) self.assertFalse(mock_copy.called) self.assertTrue(mock_clone_image.called) self.assertFalse(mock_create.called) self.assertTrue(mock_gdis.called) @mock.patch.object(cinder.image.glance, 'get_default_image_service') @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') def test_create_vol_from_non_raw_image_status_available(self, mock_fetch, mock_gdis): """Clone non-raw image then verify volume is in available state.""" def _mock_clone_image(context, volume, image_location, image_meta, image_service): return {'provider_location': None}, False mock_fetch.return_value = mock.MagicMock(spec=utils.get_file_spec()) with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = _mock_clone_image with mock.patch.object(self.volume.driver, 'create_volume') as \ mock_create: with mock.patch.object(create_volume.CreateVolumeFromSpecTask, '_copy_image_to_volume') as mock_copy: self._create_volume_from_image('available', raw=False) self.assertTrue(mock_copy.called) self.assertTrue(mock_clone_image.called) self.assertTrue(mock_create.called) self.assertTrue(mock_gdis.called) @mock.patch.object(cinder.image.glance, 'get_default_image_service') def test_create_vol_from_image_status_error(self, mock_gdis): """Fail to clone raw image then verify volume is in error state.""" with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = exception.CinderException with mock.patch.object(self.volume.driver, 'create_volume'): with mock.patch.object(create_volume.CreateVolumeFromSpecTask, '_copy_image_to_volume') as mock_copy: self._create_volume_from_image('error', raw=True, clone_error=True) self.assertFalse(mock_copy.called) self.assertTrue(mock_clone_image.called) self.assertFalse(self.volume.driver.create_volume.called) self.assertTrue(mock_gdis.called) def test_clone_failure(self): driver = self.volume.driver with mock.patch.object(driver, '_is_cloneable', lambda *args: False): image_loc = (mock.Mock(), None) actual = driver.clone_image(mock.Mock(), mock.Mock(), image_loc, {}, mock.Mock()) self.assertEqual(({}, False), actual) self.assertEqual(({}, False), driver.clone_image('', object(), None, {}, '')) def test_clone_success(self): expected = ({'provider_location': None}, True) driver = self.volume.driver with mock.patch.object(self.volume.driver, '_is_cloneable') as \ mock_is_cloneable: mock_is_cloneable.return_value = True with mock.patch.object(self.volume.driver, '_clone') as \ mock_clone: with mock.patch.object(self.volume.driver, '_resize') as \ mock_resize: image_loc = ('rbd://fee/fi/fo/fum', None) volume = {'name': 'vol1'} actual = driver.clone_image(mock.Mock(), volume, image_loc, {'disk_format': 'raw', 'id': 'id.foo'}, mock.Mock()) self.assertEqual(expected, actual) mock_clone.assert_called_once_with(volume, 'fi', 'fo', 'fum') mock_resize.assert_called_once_with(volume) def test_clone_multilocation_success(self): expected = ({'provider_location': None}, True) driver = self.volume.driver def cloneable_side_effect(url_location, image_meta): return url_location == 'rbd://fee/fi/fo/fum' with mock.patch.object(self.volume.driver, '_is_cloneable') \ as mock_is_cloneable, \ mock.patch.object(self.volume.driver, '_clone') as mock_clone, \ mock.patch.object(self.volume.driver, '_resize') \ as mock_resize: mock_is_cloneable.side_effect = cloneable_side_effect image_loc = ('rbd://bee/bi/bo/bum', [{'url': 'rbd://bee/bi/bo/bum'}, {'url': 'rbd://fee/fi/fo/fum'}]) volume = {'name': 'vol1'} image_meta = mock.sentinel.image_meta image_service = mock.sentinel.image_service actual = driver.clone_image(self.context, volume, image_loc, image_meta, image_service) self.assertEqual(expected, actual) self.assertEqual(2, mock_is_cloneable.call_count) mock_clone.assert_called_once_with(volume, 'fi', 'fo', 'fum') mock_is_cloneable.assert_called_with('rbd://fee/fi/fo/fum', image_meta) mock_resize.assert_called_once_with(volume) def test_clone_multilocation_failure(self): expected = ({}, False) driver = self.volume.driver with mock.patch.object(driver, '_is_cloneable', return_value=False) \ as mock_is_cloneable, \ mock.patch.object(self.volume.driver, '_clone') as mock_clone, \ mock.patch.object(self.volume.driver, '_resize') \ as mock_resize: image_loc = ('rbd://bee/bi/bo/bum', [{'url': 'rbd://bee/bi/bo/bum'}, {'url': 'rbd://fee/fi/fo/fum'}]) volume = {'name': 'vol1'} image_meta = mock.sentinel.image_meta image_service = mock.sentinel.image_service actual = driver.clone_image(self.context, volume, image_loc, image_meta, image_service) self.assertEqual(expected, actual) self.assertEqual(2, mock_is_cloneable.call_count) mock_is_cloneable.assert_any_call('rbd://bee/bi/bo/bum', image_meta) mock_is_cloneable.assert_any_call('rbd://fee/fi/fo/fum', image_meta) self.assertFalse(mock_clone.called) self.assertFalse(mock_resize.called)
import json from django.http import HttpResponse from django.template.loader import get_template from django.template import Context from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.contrib.auth import authenticate from django.contrib.auth.models import User from django.views.decorators.csrf import csrf_exempt from django.core.files.base import ContentFile from django.utils.text import slugify from base64 import b64decode from .models import * # Load configured private media server from private_media.views import get_class ServerClass = get_class(settings.PRIVATE_MEDIA_SERVER) server = ServerClass(**getattr(settings, 'PRIVATE_MEDIA_SERVER_OPTIONS', {})) # Create your views here. def index(request): return HttpResponse("Hello, world. You're at the projects index.") # Create your views here. def tease(request): return HttpResponse(get_template('tease.html').render()) # Request a screenshot image by id def screenshot(request, id): try: screenshot = Screenshot.objects.get(id=id) if screenshot.has_read_permission(request.user): return server.serve(request, path=screenshot.image.url.lstrip('/')) else: return HttpResponse('Screenshot not yours', status=403) except (ObjectDoesNotExist, ValueError): return HttpResponse('Screenshot not found: ({0})'.format(id), status=404) def diff(request, before_id, after_id): before = getScreenshot(request, before_id) if type(before) is HttpResponse: return before after = getScreenshot(request, after_id) if type(after) is HttpResponse: return after template = get_template('diff.html') context = Context({ 'before': before, 'after': after }) html = template.render(context) return HttpResponse(html) # TODO: Add the platform to the url def diffPage(request, username, project, branch, page, build, new_build): try: project_user = User.objects.get(username=username) if not project_user.is_active: raise ObjectDoesNotExist project = Project.objects.get(name_slug=project, user_id=project_user.id) except (ObjectDoesNotExist, ValueError): return HttpResponse('Project not found: ({0})'.format(project), status=404) try: branch = Branch.objects.get(name_slug=branch, project_id=project.id) except (ObjectDoesNotExist, ValueError): return HttpResponse('Branch not found: ({0})'.format(branch), status=404) try: page = Page.objects.get(name_slug=page, project_id=project.id) except (ObjectDoesNotExist, ValueError): return HttpResponse('Page not found: ({0})'.format(page), status=404) try: build_before = Build.objects.get(name_slug=build, branch_id=branch.id) before = Screenshot.objects.get(page_id=page.id, build_id=build_before) except (ObjectDoesNotExist, ValueError): return HttpResponse('Build not found: ({0})'.format(build), status=404) try: build_after = Build.objects.get(name_slug=new_build, branch_id=branch.id) after = Screenshot.objects.get(page_id=page.id, build_id=build_after) except (ObjectDoesNotExist, ValueError): return HttpResponse('Build not found: ({0})'.format(new_build), status=404) return diff(request, before.id, after.id) def getScreenshot(request, id): try: screenshot = Screenshot.objects.get(id=id) if screenshot.has_read_permission(request.user): return screenshot else: return HttpResponse('Screenshot not yours', status=403) except (ObjectDoesNotExist, ValueError): return HttpResponse('Screenshot not found: ({0})'.format(id), status=404) # Upload a set of screenshots for a project version @csrf_exempt def upload(request, username, project): data = json.loads(request.body) user = authenticate(username=data['username'], password=data['password']) if user is None: return HttpResponse('Invalid username or password', status=401) if not user.is_active: return HttpResponse('Inactive username', status=403) try: project_user = User.objects.get(username=username) if not project_user.is_active: raise ObjectDoesNotExist project = Project.objects.get(name_slug=project, user_id=project_user.id) device = data['platform'].get('device', '') platform = getPlatform(device, data['platform']['os'], data['platform']['browser'], project.id) branch = getBranch(data['branch'], project.id) build = getBuild(data['build'], branch.id) for page_name, screenshot_data in data['screenshots'].iteritems(): saveScreenshot(project.id, build.id, platform.id, page_name, screenshot_data) return HttpResponse('{"status": "success"}'); except (ObjectDoesNotExist, ValueError): return HttpResponse('Project not found', status=404) def saveScreenshot(project_id, build_id, platform_id, page_name, screenshot_data): page = getPage(page_name, project_id) # TODO: Detect image type image_data = b64decode(screenshot_data) image = ContentFile(image_data, 'temp.png') screenshot = Screenshot(platform_id=platform_id, build_id=build_id, page_id=page.id, name=page.name, image=image) screenshot.save() def getPage(name, project_id): try: page = Page.objects.get(name_slug=slugify(name), project_id=project_id) except (ObjectDoesNotExist, ValueError): page = Page(name=name, project_id=project_id) page.save() return page def getBranch(name, project_id): try: branch = Branch.objects.get(name_slug=slugify(name), project_id=project_id) except (ObjectDoesNotExist, ValueError): branch = Branch(name=name, project_id=project_id) branch.save() return branch def getBuild(name, branch_id): try: build = Build.objects.get(name_slug=slugify(name), branch_id=branch_id) except (ObjectDoesNotExist, ValueError): build = Build(name=name, branch_id=branch_id) build.save() return build def getPlatform(device, os, browser, project_id): platform_name = "{0} {1} {2}".format(device, os, browser) try: platform = Platform.objects.get(name_slug=slugify(platform_name), project_id=project_id) except (ObjectDoesNotExist, ValueError): platform = Platform(name=platform_name, device=getDevice(device), os=getOperatingSystem(os), browser=getBrowser(browser), project_id=project_id) platform.save() return platform def getDevice(name): if not name: return None try: device = Device.objects.get(name_slug=slugify(name)) except (ObjectDoesNotExist, ValueError): device = Device(name=name) device.save() return device def getOperatingSystem(name): try: os = OperatingSystem.objects.get(name_slug=slugify(name)) except (ObjectDoesNotExist, ValueError): os = OperatingSystem(name=name) os.save() return os def getBrowser(name): try: browser = Browser.objects.get(name_slug=slugify(name)) except (ObjectDoesNotExist, ValueError): browser = Browser(name=name) browser.save() return browser
from __future__ import print_function import os import httplib2 from apiclient import discovery import logging from functools import wraps import creds # time to stop _MAX_COUNT = 10000 # Page size for API list call _PAGE_SIZE = 500; _LOG_FILENAME = 'listfiles.log' _LOG_FILE_PATH = 'log' if not os.path.exists(_LOG_FILE_PATH): os.makedirs(_LOG_FILE_PATH) logging.basicConfig(filename='%s/%s' % (_LOG_FILE_PATH, _LOG_FILENAME), level=logging.DEBUG) def memoize(func): cache = {} @wraps(func) def wrap(*args): if args not in cache: cache[args] = func(*args) return cache[args] return wrap class ItemKey(object): """Type representing a key generated from the item. Suspected duplicates will have same key.""" def __init__(self, item): self.md5Checksum = item.get('md5Checksum') self.name = item.get('name') # self.size = item.get('size') self.mimeType = item.get('mimeType') self.originalFilename = item.get('originalFilename') def __str__(self): return str(self.__dict__) def __eq__(self, other): return self.__str__() == other.__str__() def __ne__(self, other): return self.__str__() != other.__str__() def __hash__(self): return hash(self.__str__()) class FileLister(object): """Utility to find and list files in Google drive, detecting duplicates.""" def __init__(self, service=None, account_name="quickstart"): """Service: use None for actual drive service, or pass a mock.""" self._service = service self._count = 0 self._no_key_count = 0 if service == None: self._service = self.connect(account_name) def connect(self, account_name): """Connects to Google Drive.""" credentials = creds.get_credentials(account_name) http = credentials.authorize(httplib2.Http()) return discovery.build('drive', 'v3', http=http) def read_chunk(self, continuation_token): """Reads a page of from the list results.""" return self._service.files().list( pageSize=_PAGE_SIZE, pageToken=continuation_token, includeTeamDriveItems=False, corpora="user", spaces="drive", fields="nextPageToken, files(id, name, size, md5Checksum, mimeType, fullFileExtension, originalFilename," "description, modifiedTime, trashed, parents)" ).execute() def get_item(self, id): """Finds the item by fileId and returns it's name and parent's fileId.""" return self._service.files().get(fileId=id, fields='parents,name').execute() @memoize def build_path(self, id): """Builds path of the containing folder by the folder's fileId.""" parents = [] folder = self.get_item(id) path = '' if folder.get('parents'): parentId = folder.get('parents')[0] path = self.build_path(parentId) return '{0}/{1}'.format(path, folder.get('name')) def load_all_files(self): """Loads all docs from the drive. :return dictionary of lists of docs by the keys. """ self._count = 0 self._no_key_count = 0 all_docs = {} # TODO: add stats next_page_token = None done = False while not done: results = self.read_chunk(next_page_token) self.insert_items(all_docs, results.get('files', [])) next_page_token = results.get('nextPageToken') done = not (next_page_token and self._count < _MAX_COUNT) return all_docs def need(self, item): """Returns a boolean whether to keep the item.""" return not item['trashed'] and not item['mimeType'] == 'application/vnd.google-apps.document' def insert_items(self, all_docs, items): if not items: logging.debug(u'No more items.') return logging.debug(u'Inserting {0} items.'.format(len(items))) self._count += len(items) for item in items: if not self.need(item): logging.debug(u'Ignoring: {0} -> {1}'.format(item['name'], item)) continue logging.debug(u'{0} -> {1}'.format(item['name'], item)) key = ItemKey(item) if key: if not all_docs.has_key(key): all_docs[key] = [] all_docs[key] += [item] logging.debug(u'added doc id={0} {1}'.format(item['id'], item['name'])) else: self._no_key_count += 1 def find_dups(self, all_docs): """Loads all files from the drive, finds duplicate susptects.""" logging.info(u'Started findDups') dups = [all_docs[key] for key in all_docs if len(all_docs[key]) > 1] logging.info(u'Done findDups') return dups def get_report(self): fmt = """ =============== Finished. Processed files: {0} Found duplicate suspects: {1} Skipped - cannot generate key: {2} =============== ===== DUPS: ===== {3} ===== """ all_dups = self.find_dups(self.load_all_files()) report_lines = "" for file_dups in all_dups: name = file_dups[0]['name'] print(u'found {0} duplicates for {1}'.format(len(file_dups), name)) report_lines += u'** {0}\n'.format(name) # read paths until the first exception which is probably out of quota. # TODO: verify the exception, if quota - deal gracefully. # Save memoize cache maybe? need_path = True for item in file_dups: if need_path: try: path = self.build_path(item['id']) except Exception: logging.error("Failed to read path, interrupting path detection") need_path = False report_lines += u'\t{0}\n'.format(path) else: path = "???" return fmt.format(self._count, len(all_dups), self._no_key_count, report_lines) def main(): """Reads files from Google Drive API and tries to identify duplicates.""" report = FileLister(account_name='myaccount').get_report() print(report) file = open("out.lst", 'w') file.write(report) file.close() if __name__ == '__main__': main()
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # import glob import os from typedapi import ensure_api_is_typed import tensorflow_addons as tfa BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) def test_api_typed(): modules_list = [ tfa, tfa.activations, tfa.callbacks, tfa.image, tfa.losses, tfa.metrics, tfa.optimizers, tfa.rnn, tfa.seq2seq, tfa.text, ] # Files within this list will be exempt from verification. exception_list = [ tfa.rnn.PeepholeLSTMCell, ] help_message = ( "You can also take a look at the section about it in the CONTRIBUTING.md:\n" "https://github.com/tensorflow/addons/blob/master/CONTRIBUTING.md#about-type-hints" ) ensure_api_is_typed( modules_list, exception_list, init_only=True, additional_message=help_message ) def test_case_insensitive_filesystems(): # Make sure BASE_DIR is project root. # If it doesn't, we probably computed the wrong directory. if not os.path.isdir(os.path.join(BASE_DIR, "tensorflow_addons")): raise AssertionError("BASE_DIR = {} is not project root".format(BASE_DIR)) for dirpath, dirnames, filenames in os.walk(BASE_DIR, followlinks=True): lowercase_directories = [x.lower() for x in dirnames] lowercase_files = [x.lower() for x in filenames] lowercase_dir_contents = lowercase_directories + lowercase_files if len(lowercase_dir_contents) != len(set(lowercase_dir_contents)): raise AssertionError( "Files with same name but different case detected " "in directory: {}".format(dirpath) ) def get_lines_of_source_code(allowlist=None): allowlist = allowlist or [] source_dir = os.path.join(BASE_DIR, "tensorflow_addons") for path in glob.glob(source_dir + "/**/*.py", recursive=True): if in_allowlist(path, allowlist): continue with open(path) as f: for line_idx, line in enumerate(f): yield path, line_idx, line def in_allowlist(file_path, allowlist): for allowed_file in allowlist: if file_path.endswith(allowed_file): return True return False def test_no_private_tf_api(): # TODO: remove all elements of the list and remove the allowlist # This allowlist should not grow. Do not add elements to this list. allowlist = [ "tensorflow_addons/metrics/r_square.py", "tensorflow_addons/utils/test_utils.py", "tensorflow_addons/seq2seq/decoder.py", "tensorflow_addons/utils/types.py", ] for file_path, line_idx, line in get_lines_of_source_code(allowlist): if "import tensorflow.python" in line or "from tensorflow.python" in line: raise ImportError( "A private tensorflow API import was found in {} at line {}.\n" "tensorflow.python refers to TensorFlow's internal source " "code and private functions/classes.\n" "The use of those is forbidden in Addons for stability reasons." "\nYou should find a public alternative or ask the " "TensorFlow team to expose publicly the function/class " "that you are using.\n" "If you're trying to do `import tensorflow.python.keras` " "it can be replaced with `import tensorflow.keras`." "".format(file_path, line_idx + 1) ) def test_no_tf_cond(): # TODO: remove all elements of the list and remove the allowlist # This allowlist should not grow. Do not add elements to this list. allowlist = [ "tensorflow_addons/text/crf.py", "tensorflow_addons/layers/wrappers.py", "tensorflow_addons/image/connected_components.py", "tensorflow_addons/optimizers/novograd.py", "tensorflow_addons/metrics/cohens_kappa.py", "tensorflow_addons/seq2seq/sampler.py", "tensorflow_addons/seq2seq/beam_search_decoder.py", ] for file_path, line_idx, line in get_lines_of_source_code(allowlist): if "tf.cond(" in line: raise NameError( "The usage of a tf.cond() function call was found in " "file {} at line {}:\n\n" " {}\n" "In TensorFlow 2.x, using a simple `if` in a function decorated " "with `@tf.function` is equivalent to a tf.cond() thanks to Autograph. \n" "TensorFlow Addons aims to be written with idiomatic TF 2.x code. \n" "As such, using tf.cond() is not allowed in the codebase. \n" "Use a `if` and decorate your function with @tf.function instead. \n" "You can take a look at " "https://www.tensorflow.org/guide/function#use_python_control_flow" "".format(file_path, line_idx, line) ) def test_no_experimental_api(): # TODO: remove all elements of the list and remove the allowlist # This allowlist should not grow. Do not add elements to this list. allowlist = [ "tensorflow_addons/optimizers/weight_decay_optimizers.py", "tensorflow_addons/layers/max_unpooling_2d.py", "tensorflow_addons/image/dense_image_warp.py", ] for file_path, line_idx, line in get_lines_of_source_code(allowlist): if file_path.endswith("_test.py") or file_path.endswith("conftest.py"): continue if file_path.endswith("tensorflow_addons/utils/test_utils.py"): continue if "experimental" in line: raise NameError( "The usage of a TensorFlow experimental API was found in file {} " "at line {}:\n\n" " {}\n" "Experimental APIs are ok in tests but not in user-facing code. " "This is because Experimental APIs might have bugs and are not " "widely used yet.\n" "Addons should show how to write TensorFlow " "code in a stable and forward-compatible way." "".format(file_path, line_idx, line) ) def test_no_tf_control_dependencies(): # TODO: remove all elements of the list and remove the allowlist # This allowlist should not grow. Do not add elements to this list. allowlist = [ "tensorflow_addons/layers/wrappers.py", "tensorflow_addons/image/utils.py", "tensorflow_addons/image/dense_image_warp.py", "tensorflow_addons/optimizers/average_wrapper.py", "tensorflow_addons/optimizers/yogi.py", "tensorflow_addons/optimizers/lookahead.py", "tensorflow_addons/optimizers/weight_decay_optimizers.py", "tensorflow_addons/optimizers/rectified_adam.py", "tensorflow_addons/optimizers/lamb.py", "tensorflow_addons/seq2seq/sampler.py", "tensorflow_addons/seq2seq/beam_search_decoder.py", "tensorflow_addons/seq2seq/attention_wrapper.py", ] for file_path, line_idx, line in get_lines_of_source_code(allowlist): if "tf.control_dependencies(" in line: raise NameError( "The usage of a tf.control_dependencies() function call was found in " "file {} at line {}:\n\n" " {}\n" "In TensorFlow 2.x, in a function decorated " "with `@tf.function` the dependencies are controlled automatically" " thanks to Autograph. \n" "TensorFlow Addons aims to be written with idiomatic TF 2.x code. \n" "As such, using tf.control_dependencies() is not allowed in the codebase. \n" "Decorate your function with @tf.function instead. \n" "You can take a look at \n" "https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md#program-order-semantics--control-dependencies" "".format(file_path, line_idx, line) ) def test_no_deprecated_v1(): # TODO: remove all elements of the list and remove the allowlist # This allowlist should not grow. Do not add elements to this list. allowlist = [ "tensorflow_addons/text/skip_gram_ops.py", "tensorflow_addons/seq2seq/decoder.py", "tensorflow_addons/seq2seq/tests/attention_wrapper_test.py", ] for file_path, line_idx, line in get_lines_of_source_code(allowlist): if "tf.compat.v1" in line: raise NameError( "The usage of a tf.compat.v1 API was found in file {} at line {}:\n\n" " {}\n" "TensorFlow Addons doesn't support running programs with " "`tf.compat.v1.disable_v2_behavior()`.\n" "As such, there should be no need for the compatibility module " "tf.compat. Please find an alternative using only the TF2.x API." "".format(file_path, line_idx, line) )
import os import unittest import re from mock import MagicMock from PyAnalysisTools.base.ProcessConfig import ProcessConfig, parse_and_build_process_config, find_process_config, \ Process from PyAnalysisTools.base.YAMLHandle import YAMLLoader as yl cwd = os.path.dirname(__file__) class TestProcess(unittest.TestCase): @classmethod def setUpClass(self): self.data_set_info = yl.read_yaml(os.path.join(cwd, 'fixtures/dataset_info_pmg.yml')) def setUp(self): pass def tearDown(self): pass def test_str(self): process = Process('tmp/ntuple-311570_0.MC16a.root', self.data_set_info) self.assertEqual("TBbLQmumu1300l1 parsed from file name tmp/ntuple-311570_0.MC16a.root", process.__str__()) def test_unicode(self): process = Process('tmp/ntuple-311570_0.MC16a.root', self.data_set_info) self.assertEqual("TBbLQmumu1300l1 parsed from file name tmp/ntuple-311570_0.MC16a.root", process.__unicode__()) def test_format(self): process = Process('tmp/ntuple-311570_0.MC16a.root', self.data_set_info) self.assertEqual("TBbLQmumu1300l1 parsed from file name tmp/ntuple-311570_0.MC16a.root", "{:s}".format(process)) def test_hash(self): process = Process('tmp/ntuple-311570_0.MC16a.root', self.data_set_info) self.assertEqual(hash("TBbLQmumu1300l1"), hash(process)) def test_process_file_name_ntuple(self): process = Process('tmp/ntuple-311570_0.MC16a.root', self.data_set_info) self.assertTrue(process.is_mc) self.assertFalse(process.is_data) self.assertEqual('TBbLQmumu1300l1', process.process_name) self.assertEqual('311570', process.dsid) self.assertEqual('mc16a', process.mc_campaign) def test_process_file_name_hist(self): process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info) self.assertTrue(process.is_mc) self.assertFalse(process.is_data) self.assertEqual('TBbLQmumu1300l1', process.process_name) self.assertEqual('311570', process.dsid) self.assertEqual('mc16d', process.mc_campaign) def test_process_file_name_arbitrary_tag(self): process = Process('tmp/foo-311570_0.MC16e.root', self.data_set_info, tags=['foo']) self.assertTrue(process.is_mc) self.assertFalse(process.is_data) self.assertEqual('TBbLQmumu1300l1', process.process_name) self.assertEqual('311570', process.dsid) self.assertEqual('mc16e', process.mc_campaign) def test_process_file_name_data(self): process = Process('v21/ntuple-data18_13TeV_periodO_0.root', self.data_set_info, tags=['foo']) self.assertFalse(process.is_mc) self.assertTrue(process.is_data) self.assertEqual('data18.periodO', process.process_name) self.assertIsNone(process.dsid) self.assertIsNone(process.mc_campaign) def test_process_no_file_name(self): process = Process(None, self.data_set_info, tags=['foo']) self.assertFalse(process.is_mc) self.assertFalse(process.is_data) self.assertIsNone(process.process_name) self.assertIsNone(process.dsid) def test_process_unconvential_file_name(self): process = Process('tmp/hist-333311570_0.MC16e.root', self.data_set_info) self.assertTrue(process.is_mc) self.assertFalse(process.is_data) self.assertEqual(None, process.process_name) self.assertEqual('333311570', process.dsid) def test_str_operator(self): process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info) self.assertEqual('TBbLQmumu1300l1 parsed from file name tmp/hist-311570_0.MC16d.root', process.__str__()) def test_equality(self): process1 = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info) process2 = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info) self.assertEqual(process1, process2) def test_equality_different_files(self): process1 = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info) process2 = Process('tmp/ntuple-311570_0.MC16d.root', self.data_set_info) self.assertEqual(process1, process2) def test_inequality(self): process1 = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info) process2 = Process('tmp/hist-311570_0.MC16e.root', self.data_set_info) self.assertNotEqual(process1, process2) def test_inequality_type(self): process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info) self.assertNotEqual(process, None) def test_match_true(self): process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info) self.assertTrue(process.match('TBbLQmumu1300l1')) def test_match_false(self): process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info) self.assertFalse(process.match('TBbLQmumu1400l1')) def test_match_fals_no_process(self): process = Process('tmp/hist-333311570_0.MC16e.root', self.data_set_info) self.assertFalse(process.match('TBbLQmumu1400l1')) def test_match_any_true(self): process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info) self.assertTrue(process.matches_any(['TBbLQmumu1300l1'])) def test_match_any_false(self): process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info) self.assertFalse(process.matches_any(['TBbLQmumu1400l1'])) def test_match_any_false_invalid_input(self): process = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info) self.assertFalse(process.matches_any('TBbLQmumu1400l1')) def test_with_cut(self): process1 = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info, cut='foo') process2 = Process('tmp/hist-311570_0.MC16d.root', self.data_set_info, cut='bar') self.assertNotEqual(process1, process2) self.assertNotEqual(process1.process_name, process2.process_name) self.assertEqual(process1.dsid, process2.dsid) def test_process_file_name_mc(self): process = Process('v21/ntuple-mc16_311570_MC16e.root', self.data_set_info, tags=['foo']) self.assertTrue(process.is_mc) self.assertFalse(process.is_data) self.assertEqual('TBbLQmumu1300l1', process.process_name) self.assertEqual('311570', process.dsid) self.assertEqual('mc16e', process.mc_campaign) def test_process_file_name_hist_full_path(self): process = Process('/Users/foo/tmp/test/hists_20200206_18-04-21/hist-364106_1.MC16d.root', self.data_set_info, tags=['foo']) self.assertTrue(process.is_mc) self.assertFalse(process.is_data) self.assertEqual('ZmumuHT140280CVetoBVeto', process.process_name) self.assertEqual('364106', process.dsid) self.assertEqual('mc16d', process.mc_campaign) def test_process_file_name_ntuple_data_full_path(self): process = Process('/storage/hepgrp/morgens/LQ/ntuples/v29_merged/ntuple-data17_13TeV_periodK_0.root', self.data_set_info, tags=['foo']) self.assertFalse(process.is_mc) self.assertTrue(process.is_data) self.assertEqual('data17.periodK', process.process_name) def test_process_file_name_data_user(self): fname = '~/user.foo.data18_13TeV.periodAllYear.physics_Late.pro24_v01.v8_hist/user.foo.2._001.hist-output.root' process = Process(fname, self.data_set_info, tags=['foo']) self.assertFalse(process.is_mc) self.assertTrue(process.is_data) self.assertTrue(re.match(r'.*data.*', process.process_name)) def test_process_file_name_data_run(self): fname = '~/v8/ntuple-data16_cos_306147_physics_Main_cosmicsReco.root' process = Process(fname, self.data_set_info, tags=['foo']) self.assertFalse(process.is_mc) self.assertTrue(process.is_data) self.assertTrue(re.match(r'.*data16.*306147.*', process.process_name)) def test_process_file_name_data_cos(self): process = Process('hist-data16_cos.00306147.physics_Main.cosmicsStandardOFCs.root', None) self.assertTrue(process.is_data) self.assertFalse(process.is_mc) self.assertEqual('data16_00306147', process.process_name) self.assertIsNone(process.period) self.assertIsNone(process.weight) def test_process_weight(self): process = Process('v21/ntuple-mc16_311570_MC16e.root', self.data_set_info, weight='foo') self.assertEqual('foo', process.weight) class TestProcessConfig(unittest.TestCase): def setUp(self): self.pc = ProcessConfig(name='foo', type='data') self.cfg_file = os.path.join(os.path.dirname(__file__), 'fixtures/process_merge_config.yml') def test_ctor(self): self.assertIsNone(self.pc.parent_process) self.assertIsNone(self.pc.scale_factor) self.assertIsNone(self.pc.regions_only) self.assertIsNone(self.pc.weight) self.assertIsNone(self.pc.assoc_process) self.assertTrue(self.pc.is_data) self.assertFalse(self.pc.is_syst_process) self.assertFalse(self.pc.is_mc) def test_str(self): process_cfg_str = self.pc.__str__() self.assertTrue("Process config: foo \n" in process_cfg_str) self.assertTrue("name=foo \n" in process_cfg_str) self.assertTrue("type=data \n" in process_cfg_str) self.assertTrue("is_syst_process=False \n" in process_cfg_str) self.assertTrue("parent_process=None \n" in process_cfg_str) self.assertTrue("scale_factor=None \n" in process_cfg_str) self.assertTrue("is_mc=False \n" in process_cfg_str) self.assertTrue("weight=None \n" in process_cfg_str) self.assertTrue("is_data=True \n" in process_cfg_str) self.assertTrue("regions_only=None \n" in process_cfg_str) self.assertTrue("assoc_process=None \n" in process_cfg_str) self.assertTrue("type=data \n" in process_cfg_str) def test_unicode(self): process_cfg_str = self.pc.__unicode__() self.assertTrue("Process config: foo \n" in process_cfg_str) self.assertTrue("name=foo \n" in process_cfg_str) self.assertTrue("type=data \n" in process_cfg_str) self.assertTrue("is_syst_process=False \n" in process_cfg_str) self.assertTrue("parent_process=None \n" in process_cfg_str) self.assertTrue("scale_factor=None \n" in process_cfg_str) self.assertTrue("is_mc=False \n" in process_cfg_str) self.assertTrue("weight=None \n" in process_cfg_str) self.assertTrue("is_data=True \n" in process_cfg_str) self.assertTrue("regions_only=None \n" in process_cfg_str) self.assertTrue("assoc_process=None \n" in process_cfg_str) self.assertTrue("type=data \n" in process_cfg_str) def test_repr(self): process_cfg_str = self.pc.__repr__() self.assertTrue("Process config: foo \n" in process_cfg_str) self.assertTrue("name=foo \n" in process_cfg_str) self.assertTrue("type=data \n" in process_cfg_str) self.assertTrue("is_syst_process=False \n" in process_cfg_str) self.assertTrue("parent_process=None \n" in process_cfg_str) self.assertTrue("scale_factor=None \n" in process_cfg_str) self.assertTrue("is_mc=False \n" in process_cfg_str) self.assertTrue("weight=None \n" in process_cfg_str) self.assertTrue("is_data=True \n" in process_cfg_str) self.assertTrue("regions_only=None \n" in process_cfg_str) self.assertTrue("assoc_process=None \n" in process_cfg_str) self.assertTrue("type=data \n" in process_cfg_str) def test_parse_and_build_process_config(self): cfgs = parse_and_build_process_config(self.cfg_file) self.assertTrue('Data' in cfgs) def test_parse_and_build_process_config_lsit(self): cfgs = parse_and_build_process_config([self.cfg_file]) self.assertTrue('Data' in cfgs) def test_parse_and_build_process_config_no_file(self): self.assertIsNone(parse_and_build_process_config(None)) def test_parse_and_build_process_config_non_existing_file_exception(self): try: self.assertRaises(FileNotFoundError, parse_and_build_process_config, 'foo') except NameError: self.assertRaises(IOError, parse_and_build_process_config, 'foo') def test_find_process_config_missing_input(self): self.assertIsNone(find_process_config(None, MagicMock())) self.assertIsNone(find_process_config(MagicMock(), None)) def test_find_process_config(self): cfgs = parse_and_build_process_config(self.cfg_file) self.assertEqual(cfgs['Data'], find_process_config('data18_13TeV_periodB', cfgs)) def test_find_process_config_direct_cfg_match(self): cfgs = parse_and_build_process_config(self.cfg_file) self.assertEqual(cfgs['Data'], find_process_config('Data', cfgs)) def test_find_process_config_no_regex(self): cfgs = parse_and_build_process_config(self.cfg_file) cfgs['Data'].subprocesses = ['data18_13TeV_periodB'] self.assertEqual(cfgs['Data'], find_process_config('data18_13TeV_periodB', cfgs)) def test_find_process_config_no_subprocess(self): cfgs = parse_and_build_process_config(self.cfg_file) delattr(cfgs['Data'], 'subprocesses') self.assertIsNone(find_process_config('data18_13TeV_periodB', cfgs)) def test_find_process_config_multiple_matches(self): cfgs = parse_and_build_process_config(self.cfg_file) cfgs['tmp'] = cfgs['Data'] self.assertIsNone(find_process_config('data18_13TeV_periodB', cfgs)) def test_find_process_config_process(self): cfgs = parse_and_build_process_config(self.cfg_file) self.assertEqual(cfgs['Data'], find_process_config(Process('data18_13TeV_periodB', None), cfgs)) def test_find_process_config_direct_cfg_match_process(self): cfgs = parse_and_build_process_config(self.cfg_file) self.assertEqual(cfgs['Data'], find_process_config(Process('Data', None), cfgs)) def test_find_process_config_no_regex_process(self): cfgs = parse_and_build_process_config(self.cfg_file) cfgs['Data'].subprocesses = ['data18.periodB'] self.assertEqual(cfgs['Data'], find_process_config(Process('data18_13TeV_periodB', None), cfgs)) def test_find_process_config_no_subprocess_process(self): cfgs = parse_and_build_process_config(self.cfg_file) delattr(cfgs['Data'], 'subprocesses') self.assertIsNone(find_process_config(Process('data18_13TeV_periodB', None), cfgs)) def test_find_process_config_multiple_matches_process(self): cfgs = parse_and_build_process_config(self.cfg_file) cfgs['tmp'] = cfgs['Data'] self.assertIsNone(find_process_config(Process('data18_13TeV_periodB', None), cfgs))
""" Functions: score_tfbs_genome Score the TFBS in a genomic location (from filesystem). load_matrices list_matrices Return a list of (matrix ID, gene symbol). matid2matrix Return the matrix for a matrix ID. gene2matrices Return all matrices for a gene symbol. is_matrix_id find_matrix_file """ # OBSOLETE # get_tfbs_genome_db Load the TFBS in a genomic location (from database). # get_tfbs_tss_db Load the TFBS around a TSS. # _query_db import os def score_tfbs_genome(chrom, start, length, matrices=None, nlp=None, num_procs=1, ra_path=None): # Return list of matrix, chrom, strand, position, NLP. # NLP is given in log_e. import genomelib import patser seq = genomelib.get_sequence(chrom, start, length, ra_path=ra_path) if not matrices: matrices = [x[0] for x in list_matrices()] x = [find_matrix_file(x) for x in matrices] matrix_files = [x for x in x if x] # Filter out missing ones. # list of sequence_num, matrix_num, (0-based) position, strand, score, nlp patser_data = patser.score_tfbs(seq, matrix_files, num_jobs=num_procs) # list of matrix, chrom, strand, pos, nlp data = [] for x in patser_data: sequence_num, matrix_num, position, strand, score, tf_nlp = x assert sequence_num == 0 matrix = matrices[matrix_num] pos = position + start if nlp is not None and tf_nlp < nlp: continue x = matrix, chrom, strand, pos, tf_nlp data.append(x) # Sort by chrom, position, strand, matrix x = [(x[1], x[3], x[2], x[0], x) for x in data] x.sort() data = [x[-1] for x in x] return data def _load_matrices_h(): import config import filelib # MATRIX_ID (all upper case) -> object with members: # matid # accession # gene_id # gene_symbol # organism (for JASPAR) # length # Load the lengths. matid2length = {} for d in filelib.read_row( config.matid2length, "matid:s length:d"): matid2length[d.matid] = d.length # Load the matrix information. matrices = [] for d in filelib.read_row(config.JASPAR_info, header=1): assert d.xID in matid2length length = matid2length.get(d.xID, 0) x = filelib.GenericObject( matid=d.xID, accession="", gene_id=d.LocusLink, gene_symbol=d.Gene_Symbol, organism=d.Organism, length=length) matrices.append(x) for d in filelib.read_row(config.TRANSFAC_info, header=1): #assert d.Accession in matid2length, "Missing: %s" % d.Accession # Some are missing, e.g. M00316. length = matid2length.get(d.Accession, 0) x = filelib.GenericObject( matid=d.Accession, accession=d.xID, gene_id=d.LocusLink, gene_symbol=d.Gene_Symbol, organism="", length=length) matrices.append(x) return matrices MATRIX_CACHE = None def load_matrices(): # Return a list of objects with members: # matid matrix ID # accession for TRANSFAC only # gene_id # gene_symbol # length global MATRIX_CACHE if not MATRIX_CACHE: MATRIX_CACHE = _load_matrices_h() return MATRIX_CACHE def list_matrices(): # Return a list of matrix_id, gene_symbol matrix_db = load_matrices() x = [(d.matid, d.gene_symbol) for d in matrix_db] return x def matid2matrix(matid): # Return a matrix object (see load_matrices). matrix_db = load_matrices() x = [x for x in matrix_db if x.matid.upper() == matid.upper()] assert x, "I couldn't find matrix: %s." % matid assert len(x) == 1, "Multiple matches for matid %s" % matid return x[0] def gene2matrices(gene_symbol): ugene_symbol = gene_symbol.upper() matrices = load_matrices() x = [x for x in matrices if x.gene_symbol.upper() == ugene_symbol] return x def is_matrix_id(matid): matrix_db = load_matrices() x = [x for x in matrix_db if x.matid.upper() == matid.upper()] return len(x) > 0 def find_matrix_file(matrix_file_or_id): # Take a matrix ID and return the name of the file or None. import config if os.path.exists(matrix_file_or_id): return matrix_file_or_id # If it's not a file, it must be an ID. matrix_id = matrix_file_or_id opj = os.path.join files = [ opj(config.JASPAR_DB, "%s.pfm" % matrix_id), opj(config.TRANSFAC_DB, "%s.pfm" % matrix_id), #opj(config.MOTIFSEARCH, "matrices/%s.matrix" % matrix_id), ] for filename in files: if os.path.exists(filename): return filename return None #raise AssertionError, "Cannot find matrix file for %s" % matrix_id ## def get_tfbs_genome_db(chrom, start, end, matrices=None, nlp=None): ## # Return list of matrix, chrom, strand, position, NLP. ## # NLP is given in log_e. ## assert start >= 0 and end >= 0 ## assert start <= end ## # Get a list of the genpos records that I should consider. ## # Should bind variable for chrom, but when I do that, I get 0 rows ## # back? Also reported on the internet, but no fix. ## x = """SELECT gps_id, gps_position FROM genpos ## WHERE RTRIM(gps_chrom) = :chrom AND ## (gps_position = ( ## SELECT MAX(gps_position) FROM genpos WHERE ## RTRIM(gps_chrom) = :chrom AND gps_position <= :start1) OR ## gps_position = ( ## SELECT MAX(gps_position) FROM genpos WHERE ## RTRIM(gps_chrom) = :chrom AND gps_position < :end1)) ## ORDER BY gps_position ## """ ## results = _query_db(x, chrom=chrom, start1=start, end1=end) ## gps_ids = [x[0] for x in results] ## assert gps_ids, "Could not find genome position: %s [%d-%d]" % ( ## chrom, start, end) ## x = ",".join(map(str, gps_ids)) ## gpsid_query = "AND gps_id IN (%s)" % x ## matrix_query = "" ## if matrices is not None: ## x = ",".join(["'%s'" % x for x in matrices]) ## matrix_query = "AND RTRIM(mat_name) IN (%s)" % x ## nlp_query = "" ## if nlp is not None: ## nlp_query = "AND tbs_nlp >= %g" % nlp ## x = """SELECT mat_name, gps_chrom, gps_strand, gps_position, tbs_offset, ## tbs_nlp ## FROM tfbs, genpos, matrix ## WHERE tbs_gpsid=gps_id AND tbs_matid=mat_id ## %(gpsid_query)s ## %(matrix_query)s ## %(nlp_query)s ## AND gps_position+tbs_offset >= :start1 ## AND gps_position+tbs_offset < :end1 ## ORDER BY gps_chrom, gps_position+tbs_offset, gps_strand, mat_name ## """ % locals() ## results = _query_db(x, start1=start, end1=end) ## BATCH_SIZE = 10000 ## data, num = [], 0 ## for x in results: ## matrix, chrom, strand, position, offset, nlp = x ## chrom = chrom.strip() ## pos = position + offset ## if num >= len(data): ## data += [None] * BATCH_SIZE ## data[num] = matrix, chrom, strand, pos, nlp ## num += 1 ## return data[:num] ## def get_tfbs_tss_db(chrom, txn_start, txn_end, txn_strand, bp_upstream, length, ## matrices=None, nlp=None): ## # Return list of matrix, chrom, strand, gen_pos, tss, tss_pos, NLP. ## # gen_pos is the position of the matrix on the chromosome. ## # tss_pos is the position of the matrix relative to the TSS. ## # NLP is given in log_e. ## import genomelib ## x = genomelib.transcript2genome( ## txn_start, txn_end, txn_strand, bp_upstream, length) ## gen_start, gen_end = x ## tss = genomelib.determine_tss(txn_start, txn_end, txn_strand) ## data = get_tfbs_genome_db( ## chrom, gen_start, gen_end, matrices=matrices, nlp=nlp) ## results = [] ## for x in data: ## matrix, chrom, mat_strand, mat_pos, nlp = x ## tss_pos = genomelib.genpos2tsspos(mat_pos, tss, txn_strand) ## x = matrix, chrom, mat_strand, mat_pos, tss, tss_pos, nlp ## results.append(x) ## return results ## def _query_db(*args, **keywds): ## import cx_Oracle ## # Should put this in a configuration file. ## connection = cx_Oracle.connect("jefftc", "imaoraclem", "student1") ## cursor = connection.cursor() ## cursor.arraysize = 50 ## cursor.execute(*args, **keywds) ## return cursor.fetchall()
# Template for new Pygame project # KidsCanCode 2014 import pygame import sys import random # define some colors (R, G, B) BLACK = (0, 0, 0) GRAY = (155, 155, 155) WHITE = (255, 255, 255) RED = (255, 0, 0) GREEN = (0, 255, 0) BLUE = (0, 0, 255) YELLOW = (255, 255, 0) PURPLE = (119, 22, 245) ORANGE = (255, 159, 0) BGCOLOR = GRAY # basic constants for your game options FPS = 15 COLS = 8 ROWS = 8 MARGIN = 2 BORDER = 5 PAD = 50 # extra space on the bottom of the board for text CELLWIDTH = 50 CELLHEIGHT = 50 WIDTH = CELLWIDTH * COLS + 2 * BORDER + MARGIN * (COLS - 1) HEIGHT = CELLHEIGHT * ROWS + 2 * BORDER + MARGIN * (ROWS - 1) + PAD SHAPES = [RED, GREEN, BLUE, YELLOW, PURPLE, ORANGE] class Board: # rectanglar board def __init__(self): self.columns = COLS self.rows = ROWS self.blank = pygame.Surface((CELLWIDTH, CELLHEIGHT)) self.blank.fill(GRAY) self.selectbox = pygame.Surface((CELLWIDTH, CELLHEIGHT)) self.selectbox.fill(BLACK) self.selectbox.set_colorkey(BLACK) self.select_rect = self.selectbox.get_rect() pygame.draw.rect(self.selectbox, WHITE, self.select_rect, 3) self.board = [[self.blank for _ in range(ROWS)] for _ in range(COLS)] self.matches = [] self.refill = [] self.score = 0 self.shapes = [] self.selected = None self.animating = False for shape in SHAPES: img = pygame.Surface((CELLWIDTH, CELLHEIGHT)) img.fill(shape) self.shapes.append(img) def shuffle(self): # new board for row in range(ROWS): for col in range(COLS): self.board[row][col] = random.choice(self.shapes) def draw(self): # draw the board for i, row in enumerate(self.board): for j, shape in enumerate(row): screen.blit(shape, (BORDER + CELLWIDTH * j + MARGIN * j, BORDER + CELLHEIGHT * i + MARGIN * i)) if self.selected == (i, j): screen.blit(self.selectbox, (BORDER + CELLWIDTH * j + MARGIN * j, BORDER + CELLHEIGHT * i + MARGIN * i)) def find_matches(self): # find all 3 (or more) in a rows, return a list of cells match_list = [] # first check the columns for col in range(COLS): length = 1 for row in range(1, ROWS): match = self.board[row][col] == self.board[row-1][col] if match: length += 1 if not match or row == ROWS - 1: if row == ROWS - 1: row += 1 if length >= 3: match_cells = [] for clear_row in range(row-length, row): match_cells.append((clear_row, col)) match_list.append(match_cells) length = 1 # now check the rows for row in range(ROWS): length = 1 for col in range(1, COLS): match = self.board[row][col] == self.board[row][col-1] if match: length += 1 if not match or col == COLS - 1: if col == COLS - 1: col += 1 if length >= 3: match_cells = [] for clear_col in range(col-length, col): match_cells.append((row, clear_col)) match_list.append(match_cells) length = 1 return match_list def clear_matches(self, matches): for match in matches: for cell in match: row, col = cell self.board[row][col] = self.blank self.animating = True match_score = len(match) * 10 self.animate_score(match_score, match[0]) self.animating = False self.score += match_score self.draw() def animate_score(self, score, loc): score_text = str(score) y = BORDER + loc[0] * (CELLWIDTH + MARGIN) x = BORDER + loc[1] * (CELLHEIGHT + MARGIN) for i in range(10): draw_text(score_text, 24, x, y, BLACK) pygame.display.update() clock.tick(FPS) def fill_blanks(self): for col in range(COLS): for row in range(ROWS): if self.board[row][col] == self.blank: test = 0 length = 0 # how long is the clear space? while row + test < ROWS and self.board[row+test][col] == self.blank: length += 1 test += 1 for blank_row in range(row, ROWS): try: self.board[blank_row][col] = self.board[blank_row+length][col] except: self.board[blank_row][col] = random.choice(self.shapes) def clicked(self, pos): # convert the pos to a grid location click_col = pos[0] // (CELLWIDTH + MARGIN) click_row = pos[1] // (CELLHEIGHT + MARGIN) # if we click outside the board, select nothing if click_col >= COLS or click_row >= ROWS: self.selected = None else: if not self.selected: self.selected = (click_row, click_col) elif self.selected[0] == click_row and self.selected[1] == click_col: self.selected = None elif not self.animating: if self.adjacent(self.selected, (click_row, click_col)): self.swap(self.selected, (click_row, click_col)) self.selected = None def adjacent(self, loc1, loc2): if (loc1[0] == loc2[0] and abs(loc1[1]-loc2[1]) == 1) or (loc1[1] == loc2[1] and abs(loc1[0]-loc2[0]) == 1): return True return False def swap(self, loc1, loc2): self.board[loc1[0]][loc1[1]], self.board[loc2[0]][loc2[1]] = self.board[loc2[0]][loc2[1]], self.board[loc1[0]][loc1[1]] def draw_text(text, size, x, y, color=WHITE): # utility function to draw text on screen font_name = pygame.font.match_font('arial') font = pygame.font.Font(font_name, size) text_surface = font.render(text, True, color) text_rect = text_surface.get_rect() text_rect.topleft = (x, y) screen.blit(text_surface, text_rect) # initialize pygame pygame.init() # initialize sound - remove if you're not using sound pygame.mixer.init() screen = pygame.display.set_mode((WIDTH, HEIGHT)) pygame.display.set_caption("My Game") clock = pygame.time.Clock() board = Board() board.shuffle() running = True while running: clock.tick(FPS) # check for all your events for event in pygame.event.get(): # this one checks for the window being closed if event.type == pygame.QUIT: pygame.quit() sys.exit() # now check for keypresses elif event.type == pygame.KEYDOWN: # this one quits if the player presses Esc if event.key == pygame.K_ESCAPE: pygame.quit() sys.exit() # add any other key events here # if event.key == pygame.K_SPACE: # board.fill_blanks() elif event.type == pygame.MOUSEBUTTONDOWN: board.clicked(event.pos) ##### Game logic goes here ######### m = board.find_matches() board.clear_matches(m) board.fill_blanks() ##### Draw/update screen ######### screen.fill(BGCOLOR) board.draw() score_text = 'Score: %s' % board.score draw_text(score_text, 18, BORDER, HEIGHT-30) # after drawing, flip the display pygame.display.flip()
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from itertools import tee import jmespath from botocore.exceptions import PaginationError from botocore.compat import zip from botocore.utils import set_value_from_jmespath, merge_dicts class PageIterator(object): def __init__(self, method, input_token, output_token, more_results, result_keys, non_aggregate_keys, limit_key, max_items, starting_token, page_size, op_kwargs): self._method = method self._op_kwargs = op_kwargs self._input_token = input_token self._output_token = output_token self._more_results = more_results self._result_keys = result_keys self._max_items = max_items self._limit_key = limit_key self._starting_token = starting_token self._page_size = page_size self._op_kwargs = op_kwargs self._resume_token = None self._non_aggregate_key_exprs = non_aggregate_keys self._non_aggregate_part = {} @property def result_keys(self): return self._result_keys @property def resume_token(self): """Token to specify to resume pagination.""" return self._resume_token @resume_token.setter def resume_token(self, value): if isinstance(value, list): self._resume_token = '___'.join([str(v) for v in value]) @property def non_aggregate_part(self): return self._non_aggregate_part def __iter__(self): current_kwargs = self._op_kwargs previous_next_token = None next_token = [None for _ in range(len(self._input_token))] # The number of items from result_key we've seen so far. total_items = 0 first_request = True primary_result_key = self.result_keys[0] starting_truncation = 0 self._inject_starting_params(current_kwargs) while True: response = self._make_request(current_kwargs) parsed = self._extract_parsed_response(response) if first_request: # The first request is handled differently. We could # possibly have a resume/starting token that tells us where # to index into the retrieved page. if self._starting_token is not None: starting_truncation = self._handle_first_request( parsed, primary_result_key, starting_truncation) first_request = False self._record_non_aggregate_key_values(parsed) current_response = primary_result_key.search(parsed) if current_response is None: current_response = [] num_current_response = len(current_response) truncate_amount = 0 if self._max_items is not None: truncate_amount = (total_items + num_current_response) \ - self._max_items if truncate_amount > 0: self._truncate_response(parsed, primary_result_key, truncate_amount, starting_truncation, next_token) yield response break else: yield response total_items += num_current_response next_token = self._get_next_token(parsed) if all(t is None for t in next_token): break if self._max_items is not None and \ total_items == self._max_items: # We're on a page boundary so we can set the current # next token to be the resume token. self.resume_token = next_token break if previous_next_token is not None and \ previous_next_token == next_token: message = ("The same next token was received " "twice: %s" % next_token) raise PaginationError(message=message) self._inject_token_into_kwargs(current_kwargs, next_token) previous_next_token = next_token def _make_request(self, current_kwargs): return self._method(**current_kwargs) def _extract_parsed_response(self, response): return response def _record_non_aggregate_key_values(self, response): non_aggregate_keys = {} for expression in self._non_aggregate_key_exprs: result = expression.search(response) set_value_from_jmespath(non_aggregate_keys, expression.expression, result) self._non_aggregate_part = non_aggregate_keys def _inject_starting_params(self, op_kwargs): # If the user has specified a starting token we need to # inject that into the operation's kwargs. if self._starting_token is not None: # Don't need to do anything special if there is no starting # token specified. next_token = self._parse_starting_token()[0] self._inject_token_into_kwargs(op_kwargs, next_token) if self._page_size is not None: # Pass the page size as the parameter name for limiting # page size, also known as the limit_key. op_kwargs[self._limit_key] = self._page_size def _inject_token_into_kwargs(self, op_kwargs, next_token): for name, token in zip(self._input_token, next_token): if token is None or token == 'None': continue op_kwargs[name] = token def _handle_first_request(self, parsed, primary_result_key, starting_truncation): # First we need to slice into the array and only return # the truncated amount. starting_truncation = self._parse_starting_token()[1] all_data = primary_result_key.search(parsed) set_value_from_jmespath( parsed, primary_result_key.expression, all_data[starting_truncation:] ) # We also need to truncate any secondary result keys # because they were not truncated in the previous last # response. for token in self.result_keys: if token == primary_result_key: continue set_value_from_jmespath(parsed, token.expression, []) return starting_truncation def _truncate_response(self, parsed, primary_result_key, truncate_amount, starting_truncation, next_token): original = primary_result_key.search(parsed) if original is None: original = [] amount_to_keep = len(original) - truncate_amount truncated = original[:amount_to_keep] set_value_from_jmespath( parsed, primary_result_key.expression, truncated ) # The issue here is that even though we know how much we've truncated # we need to account for this globally including any starting # left truncation. For example: # Raw response: [0,1,2,3] # Starting index: 1 # Max items: 1 # Starting left truncation: [1, 2, 3] # End right truncation for max items: [1] # However, even though we only kept 1, this is post # left truncation so the next starting index should be 2, not 1 # (left_truncation + amount_to_keep). next_token.append(str(amount_to_keep + starting_truncation)) self.resume_token = next_token def _get_next_token(self, parsed): if self._more_results is not None: if not self._more_results.search(parsed): return [None] next_tokens = [] for token in self._output_token: next_tokens.append(token.search(parsed)) return next_tokens def result_key_iters(self): teed_results = tee(self, len(self.result_keys)) return [ResultKeyIterator(i, result_key) for i, result_key in zip(teed_results, self.result_keys)] def build_full_result(self): complete_result = {} # Prepopulate the result keys with an empty list. for result_expression in self.result_keys: set_value_from_jmespath(complete_result, result_expression.expression, []) for _, page in self: # We're incrementally building the full response page # by page. For each page in the response we need to # inject the necessary components from the page # into the complete_result. for result_expression in self.result_keys: # In order to incrementally update a result key # we need to search the existing value from complete_result, # then we need to search the _current_ page for the # current result key value. Then we append the current # value onto the existing value, and re-set that value # as the new value. existing_value = result_expression.search(complete_result) result_value = result_expression.search(page) if result_value is not None: existing_value.extend(result_value) merge_dicts(complete_result, self.non_aggregate_part) if self.resume_token is not None: complete_result['NextToken'] = self.resume_token return complete_result def _parse_starting_token(self): if self._starting_token is None: return None parts = self._starting_token.split('___') next_token = [] index = 0 if len(parts) == len(self._input_token) + 1: try: index = int(parts.pop()) except ValueError: raise ValueError("Bad starting token: %s" % self._starting_token) for part in parts: if part == 'None': next_token.append(None) else: next_token.append(part) return next_token, index class Paginator(object): PAGE_ITERATOR_CLS = PageIterator def __init__(self, method, pagination_config): self._method = method self._pagination_cfg = pagination_config self._output_token = self._get_output_tokens(self._pagination_cfg) self._input_token = self._get_input_tokens(self._pagination_cfg) self._more_results = self._get_more_results_token(self._pagination_cfg) self._non_aggregate_keys = self._get_non_aggregate_keys( self._pagination_cfg) self._result_keys = self._get_result_keys(self._pagination_cfg) self._limit_key = self._get_limit_key(self._pagination_cfg) @property def result_keys(self): return self._result_keys def _get_non_aggregate_keys(self, config): keys = [] for key in config.get('non_aggregate_keys', []): keys.append(jmespath.compile(key)) return keys def _get_output_tokens(self, config): output = [] output_token = config['output_token'] if not isinstance(output_token, list): output_token = [output_token] for config in output_token: output.append(jmespath.compile(config)) return output def _get_input_tokens(self, config): input_token = self._pagination_cfg['input_token'] if not isinstance(input_token, list): input_token = [input_token] return input_token def _get_more_results_token(self, config): more_results = config.get('more_results') if more_results is not None: return jmespath.compile(more_results) def _get_result_keys(self, config): result_key = config.get('result_key') if result_key is not None: if not isinstance(result_key, list): result_key = [result_key] result_key = [jmespath.compile(rk) for rk in result_key] return result_key def _get_limit_key(self, config): return config.get('limit_key') def paginate(self, **kwargs): """Create paginator object for an operation. This returns an iterable object. Iterating over this object will yield a single page of a response at a time. """ page_params = self._extract_paging_params(kwargs) return self.PAGE_ITERATOR_CLS( self._method, self._input_token, self._output_token, self._more_results, self._result_keys, self._non_aggregate_keys, self._limit_key, page_params['max_items'], page_params['starting_token'], page_params['page_size'], kwargs) def _extract_paging_params(self, kwargs): max_items = kwargs.pop('max_items', None) if max_items is not None: max_items = int(max_items) page_size = kwargs.pop('page_size', None) if page_size is not None: page_size = int(page_size) return { 'max_items': max_items, 'starting_token': kwargs.pop('starting_token', None), 'page_size': page_size, } class ResultKeyIterator(object): """Iterates over the results of paginated responses. Each iterator is associated with a single result key. Iterating over this object will give you each element in the result key list. :param pages_iterator: An iterator that will give you pages of results (a ``PageIterator`` class). :param result_key: The JMESPath expression representing the result key. """ def __init__(self, pages_iterator, result_key): self._pages_iterator = pages_iterator self.result_key = result_key def __iter__(self): for _, page in self._pages_iterator: results = self.result_key.search(page) if results is None: results = [] for result in results: yield result # These two class use the Operation.call() interface that is # being deprecated. This is here so that both interfaces can be # supported during a transition period. Eventually these two # interfaces will be removed. class DeprecatedPageIterator(PageIterator): def __init__(self, operation, endpoint, input_token, output_token, more_results, result_keys, non_aggregate_keys, limit_key, max_items, starting_token, page_size, op_kwargs): super(DeprecatedPageIterator, self).__init__( None, input_token, output_token, more_results, result_keys, non_aggregate_keys, limit_key, max_items, starting_token, page_size, op_kwargs) self._operation = operation self._endpoint = endpoint def _make_request(self, current_kwargs): return self._operation.call(self._endpoint, **current_kwargs) def _extract_parsed_response(self, response): return response[1] class DeprecatedPaginator(Paginator): PAGE_ITERATOR_CLS = DeprecatedPageIterator def __init__(self, operation, pagination_config): super(DeprecatedPaginator, self).__init__(None, pagination_config) self._operation = operation def paginate(self, endpoint, **kwargs): """Paginate responses to an operation. The responses to some operations are too large for a single response. When this happens, the service will indicate that there are more results in its response. This method handles the details of how to detect when this happens and how to retrieve more results. """ page_params = self._extract_paging_params(kwargs) return self.PAGE_ITERATOR_CLS( self._operation, endpoint, self._input_token, self._output_token, self._more_results, self._result_keys, self._non_aggregate_keys, self._limit_key, page_params['max_items'], page_params['starting_token'], page_params['page_size'], kwargs)
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe from frappe import _ from frappe.utils import now, cint from frappe.model import no_value_fields from frappe.model.document import Document from frappe.custom.doctype.property_setter.property_setter import make_property_setter from frappe.desk.notifications import delete_notification_count_for from frappe.modules import make_boilerplate form_grid_templates = { "fields": "templates/form_grid/fields.html" } class DocType(Document): __doclink__ = "https://frappe.io/docs/models/core/doctype" def get_feed(self): return self.name def validate(self): """Validate DocType before saving. - Check if developer mode is set. - Validate series - Check fieldnames (duplication etc) - Clear permission table for child tables - Add `amended_from` and `ameneded_by` if Amendable""" if not frappe.conf.get("developer_mode") and not self.custom: frappe.throw(_("Not in Developer Mode! Set in site_config.json or make 'Custom' DocType.")) for c in [".", "/", "#", "&", "=", ":", "'", '"']: if c in self.name: frappe.throw(_("{0} not allowed in name").format(c)) self.validate_series() self.scrub_field_names() self.validate_title_field() validate_fields(self) if self.istable: # no permission records for child table self.permissions = [] else: validate_permissions(self) self.make_amendable() def change_modified_of_parent(self): """Change the timestamp of parent DocType if the current one is a child to clear caches.""" if frappe.flags.in_import: return parent_list = frappe.db.sql("""SELECT parent from tabDocField where fieldtype="Table" and options=%s""", self.name) for p in parent_list: frappe.db.sql('UPDATE tabDocType SET modified=%s WHERE `name`=%s', (now(), p[0])) def scrub_field_names(self): """Sluggify fieldnames if not set from Label.""" restricted = ('name','parent','creation','modified','modified_by', 'parentfield','parenttype',"file_list") for d in self.get("fields"): if d.fieldtype: if (not getattr(d, "fieldname", None)): if d.label: d.fieldname = d.label.strip().lower().replace(' ','_') if d.fieldname in restricted: d.fieldname = d.fieldname + '1' else: d.fieldname = d.fieldtype.lower().replace(" ","_") + "_" + str(d.idx) def validate_title_field(self): """Throw exception if `title_field` is not a valid field.""" if self.title_field and \ self.title_field not in [d.fieldname for d in self.get("fields")]: frappe.throw(_("Title field must be a valid fieldname")) def validate_series(self, autoname=None, name=None): """Validate if `autoname` property is correctly set.""" if not autoname: autoname = self.autoname if not name: name = self.name if not autoname and self.get("fields", {"fieldname":"naming_series"}): self.autoname = "naming_series:" if autoname and (not autoname.startswith('field:')) \ and (not autoname.startswith('eval:')) \ and (not autoname in ('Prompt', 'hash')) \ and (not autoname.startswith('naming_series:')): prefix = autoname.split('.')[0] used_in = frappe.db.sql('select name from tabDocType where substring_index(autoname, ".", 1) = %s and name!=%s', (prefix, name)) if used_in: frappe.throw(_("Series {0} already used in {1}").format(prefix, used_in[0][0])) def on_update(self): """Update database schema, make controller templates if `custom` is not set and clear cache.""" from frappe.model.db_schema import updatedb updatedb(self.name) self.change_modified_of_parent() make_module_and_roles(self) from frappe import conf if not self.custom and not (frappe.flags.in_import or frappe.flags.in_test) and conf.get('developer_mode'): self.export_doc() self.make_controller_template() # update index if not self.custom: self.run_module_method("on_doctype_update") if self.flags.in_insert: self.run_module_method("after_doctype_insert") delete_notification_count_for(doctype=self.name) frappe.clear_cache(doctype=self.name) def run_module_method(self, method): from frappe.modules import load_doctype_module module = load_doctype_module(self.name, self.module) if hasattr(module, method): getattr(module, method)() def before_rename(self, old, new, merge=False): """Throw exception if merge. DocTypes cannot be merged.""" if merge: frappe.throw(_("DocType can not be merged")) def after_rename(self, old, new, merge=False): """Change table name using `RENAME TABLE` if table exists. Or update `doctype` property for Single type.""" if self.issingle: frappe.db.sql("""update tabSingles set doctype=%s where doctype=%s""", (new, old)) else: frappe.db.sql("rename table `tab%s` to `tab%s`" % (old, new)) def before_reload(self): """Preserve naming series changes in Property Setter.""" if not (self.issingle and self.istable): self.preserve_naming_series_options_in_property_setter() def preserve_naming_series_options_in_property_setter(self): """Preserve naming_series as property setter if it does not exist""" naming_series = self.get("fields", {"fieldname": "naming_series"}) if not naming_series: return # check if atleast 1 record exists if not (frappe.db.table_exists(self.name) and frappe.db.sql("select name from `tab{}` limit 1".format(self.name))): return existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.name, "property": "options", "field_name": "naming_series"}) if not existing_property_setter: make_property_setter(self.name, "naming_series", "options", naming_series[0].options, "Text", validate_fields_for_doctype=False) if naming_series[0].default: make_property_setter(self.name, "naming_series", "default", naming_series[0].default, "Text", validate_fields_for_doctype=False) def export_doc(self): """Export to standard folder `[module]/doctype/[name]/[name].json`.""" from frappe.modules.export_file import export_to_files export_to_files(record_list=[['DocType', self.name]]) def import_doc(self): """Import from standard folder `[module]/doctype/[name]/[name].json`.""" from frappe.modules.import_module import import_from_files import_from_files(record_list=[[self.module, 'doctype', self.name]]) def make_controller_template(self): """Make boilderplate controller template.""" make_boilerplate("controller.py", self) if not (self.istable or self.issingle): make_boilerplate("test_controller.py", self) def make_amendable(self): """If is_submittable is set, add amended_from docfields.""" if self.is_submittable: if not frappe.db.sql("""select name from tabDocField where fieldname = 'amended_from' and parent = %s""", self.name): self.append("fields", { "label": "Amended From", "fieldtype": "Link", "fieldname": "amended_from", "options": self.name, "read_only": 1, "print_hide": 1, "no_copy": 1 }) def get_max_idx(self): """Returns the highest `idx`""" max_idx = frappe.db.sql("""select max(idx) from `tabDocField` where parent = %s""", self.name) return max_idx and max_idx[0][0] or 0 def validate_fields_for_doctype(doctype): validate_fields(frappe.get_meta(doctype)) # this is separate because it is also called via custom field def validate_fields(meta): """Validate doctype fields. Checks 1. There are no illegal characters in fieldnames 2. If fieldnames are unique. 3. Fields that do have database columns are not mandatory. 4. `Link` and `Table` options are valid. 5. **Hidden** and **Mandatory** are not set simultaneously. 7. `Check` type field has default as 0 or 1. 8. `Dynamic Links` are correctly defined. 9. Precision is set in numeric fields and is between 1 & 6. 10. Fold is not at the end (if set). 11. `search_fields` are valid. :param meta: `frappe.model.meta.Meta` object to check.""" def check_illegal_characters(fieldname): for c in ['.', ',', ' ', '-', '&', '%', '=', '"', "'", '*', '$', '(', ')', '[', ']', '/']: if c in fieldname: frappe.throw(_("{0} not allowed in fieldname {1}").format(c, fieldname)) def check_unique_fieldname(fieldname): duplicates = filter(None, map(lambda df: df.fieldname==fieldname and str(df.idx) or None, fields)) if len(duplicates) > 1: frappe.throw(_("Fieldname {0} appears multiple times in rows {1}").format(fieldname, ", ".join(duplicates))) def check_illegal_mandatory(d): if (d.fieldtype in no_value_fields) and d.fieldtype!="Table" and d.reqd: frappe.throw(_("Field {0} of type {1} cannot be mandatory").format(d.label, d.fieldtype)) def check_link_table_options(d): if d.fieldtype in ("Link", "Table"): if not d.options: frappe.throw(_("Options requried for Link or Table type field {0} in row {1}").format(d.label, d.idx)) if d.options=="[Select]" or d.options==d.parent: return if d.options != d.parent: options = frappe.db.get_value("DocType", d.options, "name") if not options: frappe.throw(_("Options must be a valid DocType for field {0} in row {1}").format(d.label, d.idx)) else: # fix case d.options = options def check_hidden_and_mandatory(d): if d.hidden and d.reqd and not d.default: frappe.throw(_("Field {0} in row {1} cannot be hidden and mandatory without default").format(d.label, d.idx)) def check_width(d): if d.fieldtype == "Currency" and cint(d.width) < 100: frappe.throw(_("Max width for type Currency is 100px in row {0}").format(d.idx)) def check_in_list_view(d): if d.in_list_view and d.fieldtype!="Image" and (d.fieldtype in no_value_fields): frappe.throw(_("'In List View' not allowed for type {0} in row {1}").format(d.fieldtype, d.idx)) def check_dynamic_link_options(d): if d.fieldtype=="Dynamic Link": doctype_pointer = filter(lambda df: df.fieldname==d.options, fields) if not doctype_pointer or (doctype_pointer[0].fieldtype!="Link") \ or (doctype_pointer[0].options!="DocType"): frappe.throw(_("Options 'Dynamic Link' type of field must point to another Link Field with options as 'DocType'")) def check_illegal_default(d): if d.fieldtype == "Check" and d.default and d.default not in ('0', '1'): frappe.throw(_("Default for 'Check' type of field must be either '0' or '1'")) if d.fieldtype == "Select" and d.default and (d.default not in d.options.split("\n")): frappe.throw(_("Default for {0} must be an option").format(d.fieldname)) def check_precision(d): if d.fieldtype in ("Currency", "Float", "Percent") and d.precision is not None and not (1 <= cint(d.precision) <= 6): frappe.throw(_("Precision should be between 1 and 6")) def check_unique_and_text(d): if getattr(d, "unique", False) and d.fieldtype in ("Text", "Long Text", "Small Text", "Code", "Text Editor"): frappe.throw(_("Fieldtype {0} for {1} cannot be unique").format(d.fieldtype, d.label)) if d.search_index and d.fieldtype in ("Text", "Long Text", "Small Text", "Code", "Text Editor"): frappe.throw(_("Fieldtype {0} for {1} cannot be indexed").format(d.fieldtype, d.label)) def check_fold(fields): fold_exists = False for i, f in enumerate(fields): if f.fieldtype=="Fold": if fold_exists: frappe.throw(_("There can be only one Fold in a form")) fold_exists = True if i < len(fields)-1: nxt = fields[i+1] if nxt.fieldtype != "Section Break": frappe.throw(_("Fold must come before a Section Break")) else: frappe.throw(_("Fold can not be at the end of the form")) def check_search_fields(meta): if not meta.search_fields: return fieldname_list = [d.fieldname for d in fields] for fieldname in (meta.search_fields or "").split(","): fieldname = fieldname.strip() if fieldname not in fieldname_list: frappe.throw(_("Search Fields should contain valid fieldnames")) fields = meta.get("fields") for d in fields: if not d.permlevel: d.permlevel = 0 if not d.fieldname: frappe.throw(_("Fieldname is required in row {0}").format(d.idx)) d.fieldname = d.fieldname.lower() check_illegal_characters(d.fieldname) check_unique_fieldname(d.fieldname) check_illegal_mandatory(d) check_link_table_options(d) check_dynamic_link_options(d) check_hidden_and_mandatory(d) check_in_list_view(d) check_illegal_default(d) check_unique_and_text(d) check_fold(fields) check_search_fields(meta) def validate_permissions_for_doctype(doctype, for_remove=False): """Validates if permissions are set correctly.""" doctype = frappe.get_doc("DocType", doctype) if frappe.conf.developer_mode and not frappe.flags.in_test: # save doctype doctype.save() else: validate_permissions(doctype, for_remove) # save permissions for perm in doctype.get("permissions"): perm.db_update() def validate_permissions(doctype, for_remove=False): permissions = doctype.get("permissions") if not permissions: frappe.throw(_('Enter at least one permission row'), frappe.MandatoryError) issingle = issubmittable = isimportable = False if doctype: issingle = cint(doctype.issingle) issubmittable = cint(doctype.is_submittable) isimportable = cint(doctype.allow_import) def get_txt(d): return _("For {0} at level {1} in {2} in row {3}").format(d.role, d.permlevel, d.parent, d.idx) def check_atleast_one_set(d): if not d.read and not d.write and not d.submit and not d.cancel and not d.create: frappe.throw(_("{0}: No basic permissions set").format(get_txt(d))) def check_double(d): has_similar = False for p in permissions: if (p.role==d.role and p.permlevel==d.permlevel and p.apply_user_permissions==d.apply_user_permissions and p!=d): has_similar = True break if has_similar: frappe.throw(_("{0}: Only one rule allowed with the same Role, Level and Apply User Permissions").format(get_txt(d))) def check_level_zero_is_set(d): if cint(d.permlevel) > 0 and d.role != 'All': has_zero_perm = False for p in permissions: if p.role==d.role and (p.permlevel or 0)==0 and p!=d: has_zero_perm = True break if not has_zero_perm: frappe.throw(_("{0}: Permission at level 0 must be set before higher levels are set").format(get_txt(d))) for invalid in ("create", "submit", "cancel", "amend"): if d.get(invalid): d.set(invalid, 0) def check_permission_dependency(d): if d.cancel and not d.submit: frappe.throw(_("{0}: Cannot set Cancel without Submit").format(get_txt(d))) if (d.submit or d.cancel or d.amend) and not d.write: frappe.throw(_("{0}: Cannot set Submit, Cancel, Amend without Write").format(get_txt(d))) if d.amend and not d.write: frappe.throw(_("{0}: Cannot set Amend without Cancel").format(get_txt(d))) if d.get("import") and not d.create: frappe.throw(_("{0}: Cannot set Import without Create").format(get_txt(d))) def remove_rights_for_single(d): if not issingle: return if d.report: frappe.msgprint(_("Report cannot be set for Single types")) d.report = 0 d.set("import", 0) d.set("export", 0) for ptype, label in ( ("set_user_permissions", _("Set User Permissions")), ("apply_user_permissions", _("Apply User Permissions"))): if d.get(ptype): d.set(ptype, 0) frappe.msgprint(_("{0} cannot be set for Single types").format(label)) def check_if_submittable(d): if d.submit and not issubmittable: frappe.throw(_("{0}: Cannot set Assign Submit if not Submittable").format(get_txt(d))) elif d.amend and not issubmittable: frappe.throw(_("{0}: Cannot set Assign Amend if not Submittable").format(get_txt(d))) def check_if_importable(d): if d.get("import") and not isimportable: frappe.throw(_("{0}: Cannot set import as {1} is not importable").format(get_txt(d), doctype)) for d in permissions: if not d.permlevel: d.permlevel=0 check_atleast_one_set(d) if not for_remove: check_double(d) check_permission_dependency(d) check_if_submittable(d) check_if_importable(d) check_level_zero_is_set(d) remove_rights_for_single(d) def make_module_and_roles(doc, perm_fieldname="permissions"): """Make `Module Def` and `Role` records if already not made. Called while installing.""" try: if not frappe.db.exists("Module Def", doc.module): m = frappe.get_doc({"doctype": "Module Def", "module_name": doc.module}) m.app_name = frappe.local.module_app[frappe.scrub(doc.module)] m.flags.ignore_mandatory = m.flags.ignore_permissions = True m.insert() default_roles = ["Administrator", "Guest", "All"] roles = [p.role for p in doc.get("permissions") or []] + default_roles for role in list(set(roles)): if not frappe.db.exists("Role", role): r = frappe.get_doc({"doctype": "Role", "role_name": role}) r.role_name = role r.flags.ignore_mandatory = r.flags.ignore_permissions = True r.insert() except frappe.DoesNotExistError, e: pass except frappe.SQLError, e: if e.args[0]==1146: pass else: raise def init_list(doctype): """Make boilerplate list views.""" doc = frappe.get_meta(doctype) make_boilerplate("controller_list.js", doc) make_boilerplate("controller_list.html", doc)
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: urban@reciprocitylabs.com # Maintained By: urban@reciprocitylabs.com """Make Request First-class object Revision ID: 27684e5f313a Revises: 3c8f204ba7a9 Create Date: 2015-10-16 17:48:06.875436 """ # revision identifiers, used by Alembic. from alembic import op import bleach import datetime from HTMLParser import HTMLParser import sqlalchemy.exc as sqlaexceptions import sqlalchemy.types as types from sqlalchemy.sql import column from sqlalchemy.sql import table revision = '27684e5f313a' down_revision = '1bad7fe16295' relationships_table = table( 'relationships', column('id'), column('source_id'), column('source_type'), column('destination_id'), column('destination_type'), column('context_id'), column('modified_by_id'), column('updated_at'), column('created_at'), ) comments_table = table( 'comments', column('id'), column('description'), column('created_at'), column('modified_by_id'), column('updated_at'), column('context_id') ) def cleaner(value, bleach_tags=[], bleach_attrs={}): if value is None: return value parser = HTMLParser() lastvalue = value value = parser.unescape(value) while value != lastvalue: lastvalue = value value = parser.unescape(value) ret = parser.unescape( bleach.clean(value, bleach_tags, bleach_attrs, strip=True) ) return ret def _build_comment(iid, description, created_at, modified_by, updated_at, context, request_id): context_id = context.id if context else None return { "id": iid, "description": description, "created_at": created_at, "modified_by_id": modified_by.id, "updated_at": updated_at, "context_id": context_id, "request_id": request_id, } def _build_request_comment_relationship(req_id, comm_id, context_id, modified_by_id): return { "source_type": "Request", "source_id": req_id, "destination_type": "Comment", "destination_id": comm_id, "context_id": context_id, "modified_by_id": modified_by_id, "updated_at": datetime.datetime.now(), "created_at": datetime.datetime.now() } def _build_request_object_relationship(req, dest): relationship = { "source_type": "Request", "source_id": req.id, "destination_type": dest.type, "destination_id": dest.id, "context_id": req.context.id if req.context else None, "updated_at": datetime.datetime.now(), "created_at": datetime.datetime.now() } identifier = ( 'Request', relationship['source_id'], relationship['destination_type'], relationship['destination_id'], ) return (identifier, relationship) def upgrade(): # 1. Move Audit Objects to Relationship table # source_type = audit_objects.auditable_type # source_id = audit_objects.auditable_id # destination_type = "Request" # destination_id = request.id op.execute(""" INSERT IGNORE INTO relationships ( modified_by_id, created_at, updated_at, source_id, source_type, destination_id, destination_type, context_id) SELECT AO.modified_by_id, NOW(), NOW(), AO.auditable_id, AO.auditable_type, R.id, "Request", AO.context_id FROM requests AS R, audit_objects AS AO WHERE AO.id = R.audit_object_id; """) # 2. Change status values op.execute( """ ALTER TABLE requests CHANGE status status ENUM("Draft","Requested","Responded","Amended Request", "Updated Response","Accepted","Unstarted","In Progress","Finished", "Verified") NOT NULL;""" ) op.execute( """ UPDATE requests SET status="Unstarted" WHERE status="Draft";""" ) op.execute( """ UPDATE requests SET status="In Progress" WHERE status="Requested";""" ) op.execute( """ UPDATE requests SET status="Finished" WHERE status="Responded";""" ) op.execute( """ UPDATE requests SET status="In Progress" WHERE status="Amended Request";""" ) op.execute( """ UPDATE requests SET status="Finished" WHERE status="Updated Response";""" ) op.execute( """ UPDATE requests SET status="Verified" WHERE status="Accepted";""" ) op.execute( """ ALTER TABLE requests CHANGE status status ENUM("Unstarted","In Progress","Finished","Verified") NOT NULL;""" ) # Drop foreign key relationship on assignee_id try: op.drop_constraint("requests_ibfk_1", "requests", type_="foreignkey") except sqlaexceptions.OperationalError as oe: # Ignores error in case constraint no longer exists error_code, _ = oe.orig.args # error_code, message if error_code != 1025: raise oe # Drop index on assignee_id try: op.drop_index("assignee_id", "requests") except sqlaexceptions.OperationalError as oe: # Ignores error in case index no longer exists error_code, _ = oe.orig.args # error_code, message if error_code != 1091: raise oe # Make assignee_id nullable op.alter_column("requests", "assignee_id", existing_nullable=False, nullable=True, type_=types.Integer) def downgrade(): pass
from datetime import datetime from hashlib import sha1 try: from PIL import Image except ImportError: import Image import errno import logging import os import traceback from django.conf import settings from django import template from watermarker import utils from watermarker.models import Watermark register = template.Library() # determine the quality of the image after the watermark is applied QUALITY = getattr(settings, 'WATERMARKING_QUALITY', 85) OBSCURE = getattr(settings, 'WATERMARK_OBSCURE_ORIGINAL', True) RANDOM_POS_ONCE = getattr(settings, 'WATERMARK_RANDOM_POSITION_ONCE', True) log = logging.getLogger('watermarker') class Watermarker(object): def __call__(self, url, name, position=None, opacity=0.5, tile=False, scale=1.0, greyscale=False, rotation=0, obscure=OBSCURE, quality=QUALITY, random_position_once=RANDOM_POS_ONCE, wm_url=None, force_overwrite=False): """ Creates a watermarked copy of an image. * ``name``: This is the name of the Watermark object that you wish to apply to the image. * ``position``: There are several options. * ``R``: random placement, which is the default behavior. * ``C``: center the watermark * ``XxY`` where ``X`` is either a specific pixel position on the x-axis or a percentage of the total width of the target image and ``Y`` is a specific pixel position on the y-axis of the image or a percentage of the total height of the target image. These values represent the location of the top and left edges of the watermark. If either ``X`` or ``Y`` is a percentage, you must use a percent sign. This is not used if either one of the ``tiled`` or ``scale`` parameters are true. Examples: * ``50%x50%``: positions the watermark at the center of the image. * ``50%x100``: positions the watermark at the midpoint of the total width of the image and 100 pixels from the top of the image * ``100x50%``: positions the watermark at the midpoint of the total height of the image and 100 pixels from the left edge of the image * ``100x100``: positions the top-left corner of the watermark at 100 pixels from the top of the image and 100 pixels from the left edge of the image. * ``br``, ``bl``, ``tr``, ``tl`` where ``b`` means "bottom", ``t`` means "top", ``l`` means "left", and ``r`` means "right". This will position the watermark at the extreme edge of the original image with just enough room for the watermark to "fully show". This assumes the watermark is not as big as the original image. * ``opacity``: an integer from 0 to 100. This value represents the transparency level of the watermark when it is applied. A value of 100 means the watermark is completely opaque while a value of 0 means the watermark will be invisible. * ``tile``: ``True`` or ``False`` to specify whether or not the watermark shall be tiled across the entire image. * ``scale``: a floating-point number above 0 to specify the scaling for the watermark. If you want the watermark to be scaled to its maximum without falling off the edge of the target image, use ``F``. By default, scale is set to ``1.0``, or 1:1 scaling, meaning the watermark will be placed on the target image at its original size. * ``greyscale``: ``True`` or ``False`` to specify whether or not the watermark should be converted to a greyscale image before applying it to the target image. Default is ``False``. * ``rotation``: 0 to 359 to specify the number of degrees to rotate the watermark before applying it to the target image. Alternatively, you may set ``rotation=R`` for a random rotation value. * ``obscure``: set to ``False`` if you wish to expose the original image's filename. Defaults to ``True``. * ``quality``: the quality of the resulting watermarked image. Default is 85. """ # look for the specified watermark by name. If it's not there, go no # further try: watermark = Watermark.objects.get(name=name, is_active=True) except Watermark.DoesNotExist: log.error('Watermark "%s" does not exist... Bailing out.' % name) return url # make sure URL is a string url = str(url) basedir = '%s/watermarked' % os.path.dirname(url) base, ext = os.path.splitext(os.path.basename(url)) # open the target image file along with the watermark image target_path = self.get_url_path(url) target = Image.open(target_path) mark = Image.open(watermark.image.path) # determine the actual value that the parameters provided will render random_position = bool(position is None or str(position).lower() == 'r') scale = utils.determine_scale(scale, target, mark) rotation = utils.determine_rotation(rotation, mark) pos = utils.determine_position(position, target, mark) # see if we need to create only one randomly positioned watermarked # image if not random_position or \ (not random_position_once and random_position): log.debug('Generating random position for watermark each time') position = pos else: log.debug('Random positioning watermark once') params = { 'position': position, 'opacity': opacity, 'scale': scale, 'tile': tile, 'greyscale': greyscale, 'rotation': rotation, 'base': base, 'ext': ext, 'quality': quality, 'watermark': watermark.id, 'opacity_int': int(opacity * 100), 'left': pos[0], 'top': pos[1], } log.debug('Params: %s' % params) wm_name = self.watermark_name(mark, **params) if not wm_url: wm_url = self.watermark_path(basedir, base, ext, wm_name, obscure) wm_path = self.get_url_path(wm_url) log.debug('Watermark name: %s; URL: %s; Path: %s' % ( wm_name, wm_url, wm_path )) # see if the image already exists on the filesystem. If it does, use # it. if not force_overwrite and os.access(wm_path, os.R_OK): # see if the Watermark object was modified since the file was # created modified = datetime.fromtimestamp(os.path.getmtime(wm_path)) # only return the old file if things appear to be the same if modified >= watermark.date_updated: log.info('Watermark exists and has not changed. Bailing out.') return wm_url # make sure the position is in our params for the watermark params['position'] = pos self.create_watermark(target, mark, wm_path, **params) # send back the URL to the new, watermarked image return wm_url def get_url_path(self, url, root=settings.MEDIA_ROOT, url_root=settings.MEDIA_URL): """Makes a filesystem path from the specified URL""" if url.startswith(url_root): url = url[len(url_root):] # strip media root url return os.path.normpath(os.path.join(root, url)) def watermark_name(self, mark, **kwargs): """Comes up with a good filename for the watermarked image""" params = [ '%(base)s', 'wm', 'w%(watermark)i', 'o%(opacity_int)i', 'gs%(greyscale)i', 'r%(rotation)i', '_p%(position)s', ] scale = kwargs.get('scale', None) if scale and scale != mark.size: params.append('_s%i' % (float(kwargs['scale'][0]) / mark.size[0] * 100)) if kwargs.get('tile', None): params.append('_tiled') # make thumbnail filename name = '%s%s' % ('_'.join(params), kwargs['ext']) return name % kwargs def watermark_path(self, basedir, base, ext, wm_name, obscure=True): """Determines an appropriate watermark path""" hash = sha1(wm_name).hexdigest() # figure out where the watermark would be saved on the filesystem if obscure: log.debug('Obscuring original image name: %s => %s' % (wm_name, hash)) new_file = os.path.join(basedir, hash + ext) else: log.debug('Not obscuring original image name.') new_file = os.path.join(basedir, hash, base + ext) # make sure the destination directory exists try: root = self.get_url_path(new_file) os.makedirs(os.path.dirname(root)) except OSError, exc: if exc.errno == errno.EEXIST: # not to worry, directory exists pass else: log.error('Error creating path: %s' % traceback.format_exc()) raise else: log.debug('Created directory: %s' % root) return new_file def create_watermark(self, target, mark, path, quality=QUALITY, **kwargs): """Create the watermarked image on the filesystem""" im = utils.watermark(target, mark, **kwargs) if not '.png' in path: im = im.convert('RGB') im.save(path, quality=quality) return im def watermark(url, args=''): """Returns the URL to a watermarked copy of the image specified.""" # initialize some variables args = args.split(',') name = args.pop(0) opacity = 0.5 tile = False scale = 1.0 greyscale = False rotation = 0 position = None obscure = OBSCURE quality = QUALITY random_position_once = RANDOM_POS_ONCE # iterate over all parameters to see what we need to do for arg in args: key, value = arg.split('=') key = key.strip() value = value.strip() if key == 'position': position = value elif key == 'opacity': opacity = utils._percent(value) elif key == 'tile': tile = bool(int(value)) elif key == 'scale': scale = value elif key == 'greyscale': greyscale = bool(int(value)) elif key == 'rotation': rotation = value elif key == 'obscure': obscure = bool(int(value)) elif key == 'quality': quality = int(value) elif key == 'random_position_once': random_position_once = bool(int(value)) mark = Watermarker() return mark(url, name, position, opacity, tile, scale, greyscale, rotation, obscure, quality, random_position_once) register.filter(watermark)
# Copyright (c) 2017, John Skinner import unittest import core.benchmark import core.trial_comparison import trials.slam.tracking_state import benchmarks.tracking.tracking_comparison_benchmark as track_comp class MockTrialResult: def __init__(self, tracking_states): self._tracking_states = tracking_states @property def identifier(self): return 'ThisIsAMockTrialResult' @property def tracking_states(self): return self._tracking_states @tracking_states.setter def tracking_states(self, tracking_states): self._tracking_states = tracking_states def get_tracking_states(self): return self.tracking_states class TestTrackingComparisonBenchmark(unittest.TestCase): def test_benchmark_results_returns_a_benchmark_result(self): trial_result = MockTrialResult(tracking_states={ 1.3333: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 1.6667: trials.slam.tracking_state.TrackingState.OK }) reference_trial_result = MockTrialResult(tracking_states={ 1.3333: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 1.6667: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED }) benchmark = track_comp.TrackingComparisonBenchmark() result = benchmark.compare_trial_results(trial_result, reference_trial_result) self.assertIsInstance(result, core.trial_comparison.TrialComparisonResult) self.assertNotIsInstance(result, core.benchmark.FailedBenchmark) self.assertEqual(benchmark.identifier, result.benchmark) self.assertEqual(trial_result.identifier, result.trial_result) self.assertEqual(reference_trial_result.identifier, result.reference_trial_result) def test_benchmark_produces_expected_results(self): trial_result = MockTrialResult(tracking_states={ 1.3333: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 1.6667: trials.slam.tracking_state.TrackingState.OK, 2: trials.slam.tracking_state.TrackingState.LOST, 2.3333: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 2.6667: trials.slam.tracking_state.TrackingState.OK, 3: trials.slam.tracking_state.TrackingState.LOST, 3.3333: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 3.6667: trials.slam.tracking_state.TrackingState.OK, 4: trials.slam.tracking_state.TrackingState.LOST }) reference_trial_result = MockTrialResult(tracking_states={ 1.3333: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 1.6667: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 2: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 2.3333: trials.slam.tracking_state.TrackingState.OK, 2.6667: trials.slam.tracking_state.TrackingState.OK, 3: trials.slam.tracking_state.TrackingState.OK, 3.3333: trials.slam.tracking_state.TrackingState.LOST, 3.6667: trials.slam.tracking_state.TrackingState.LOST, 4: trials.slam.tracking_state.TrackingState.LOST }) benchmark = track_comp.TrackingComparisonBenchmark() result = benchmark.compare_trial_results(trial_result, reference_trial_result) self.assertNotIsInstance(result, core.benchmark.FailedBenchmark) self.assertEqual(6, len(result.changes)) self.assertEqual((trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, trials.slam.tracking_state.TrackingState.OK), result.changes[1.6667]) self.assertEqual((trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, trials.slam.tracking_state.TrackingState.LOST), result.changes[2]) self.assertEqual((trials.slam.tracking_state.TrackingState.OK, trials.slam.tracking_state.TrackingState.NOT_INITIALIZED), result.changes[2.3333]) self.assertEqual((trials.slam.tracking_state.TrackingState.OK, trials.slam.tracking_state.TrackingState.LOST), result.changes[3]) self.assertEqual((trials.slam.tracking_state.TrackingState.LOST, trials.slam.tracking_state.TrackingState.NOT_INITIALIZED), result.changes[3.3333]) self.assertEqual((trials.slam.tracking_state.TrackingState.LOST, trials.slam.tracking_state.TrackingState.OK), result.changes[3.6667]) def test_benchmark_associates_results(self): trial_result = MockTrialResult(tracking_states={ 1.3433: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 1.6767: trials.slam.tracking_state.TrackingState.OK, 1.99: trials.slam.tracking_state.TrackingState.LOST, 2.3433: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 2.6767: trials.slam.tracking_state.TrackingState.OK, 3.01: trials.slam.tracking_state.TrackingState.LOST, 3.3233: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 3.6767: trials.slam.tracking_state.TrackingState.OK, 4.01: trials.slam.tracking_state.TrackingState.LOST }) reference_trial_result = MockTrialResult(tracking_states={ 1.3333: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 1.6667: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 2: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 2.3333: trials.slam.tracking_state.TrackingState.OK, 2.6667: trials.slam.tracking_state.TrackingState.OK, 3: trials.slam.tracking_state.TrackingState.OK, 3.3333: trials.slam.tracking_state.TrackingState.LOST, 3.6667: trials.slam.tracking_state.TrackingState.LOST, 4: trials.slam.tracking_state.TrackingState.LOST }) benchmark = track_comp.TrackingComparisonBenchmark() result = benchmark.compare_trial_results(trial_result, reference_trial_result) self.assertNotIsInstance(result, core.benchmark.FailedBenchmark) self.assertEqual(6, len(result.changes)) self.assertEqual((trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, trials.slam.tracking_state.TrackingState.OK), result.changes[1.6667]) self.assertEqual((trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, trials.slam.tracking_state.TrackingState.LOST), result.changes[2]) self.assertEqual((trials.slam.tracking_state.TrackingState.OK, trials.slam.tracking_state.TrackingState.NOT_INITIALIZED), result.changes[2.3333]) self.assertEqual((trials.slam.tracking_state.TrackingState.OK, trials.slam.tracking_state.TrackingState.LOST), result.changes[3]) self.assertEqual((trials.slam.tracking_state.TrackingState.LOST, trials.slam.tracking_state.TrackingState.NOT_INITIALIZED), result.changes[3.3333]) self.assertEqual((trials.slam.tracking_state.TrackingState.LOST, trials.slam.tracking_state.TrackingState.OK), result.changes[3.6667]) def test_benchmark_fails_for_not_enough_matching_keys(self): trial_result = MockTrialResult(tracking_states={ 1.4333: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 1.7667: trials.slam.tracking_state.TrackingState.OK, 1.9: trials.slam.tracking_state.TrackingState.LOST, 2.4333: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 2.7667: trials.slam.tracking_state.TrackingState.OK, 3.1: trials.slam.tracking_state.TrackingState.LOST, 3.2333: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 3.7667: trials.slam.tracking_state.TrackingState.OK, 4.1: trials.slam.tracking_state.TrackingState.LOST }) reference_trial_result = MockTrialResult(tracking_states={ 1.3333: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 1.6667: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 2: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 2.3333: trials.slam.tracking_state.TrackingState.OK, 2.6667: trials.slam.tracking_state.TrackingState.OK, 3: trials.slam.tracking_state.TrackingState.OK, 3.3333: trials.slam.tracking_state.TrackingState.LOST, 3.6667: trials.slam.tracking_state.TrackingState.LOST, 4: trials.slam.tracking_state.TrackingState.LOST }) benchmark = track_comp.TrackingComparisonBenchmark() result = benchmark.compare_trial_results(trial_result, reference_trial_result) self.assertIsInstance(result, core.benchmark.FailedBenchmark) def test_offset_adjusts_timestamps(self): trial_result = MockTrialResult(tracking_states={ 101.3433: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 101.6767: trials.slam.tracking_state.TrackingState.OK, 101.99: trials.slam.tracking_state.TrackingState.LOST, 102.3433: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 102.6767: trials.slam.tracking_state.TrackingState.OK, 103.01: trials.slam.tracking_state.TrackingState.LOST, 103.3233: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 103.6767: trials.slam.tracking_state.TrackingState.OK, 104.01: trials.slam.tracking_state.TrackingState.LOST }) reference_trial_result = MockTrialResult(tracking_states={ 1.3333: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 1.6667: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 2: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 2.3333: trials.slam.tracking_state.TrackingState.OK, 2.6667: trials.slam.tracking_state.TrackingState.OK, 3: trials.slam.tracking_state.TrackingState.OK, 3.3333: trials.slam.tracking_state.TrackingState.LOST, 3.6667: trials.slam.tracking_state.TrackingState.LOST, 4: trials.slam.tracking_state.TrackingState.LOST }) # Perform the benchmark, this should fail benchmark = track_comp.TrackingComparisonBenchmark() result = benchmark.compare_trial_results(trial_result, reference_trial_result) self.assertIsInstance(result, core.benchmark.FailedBenchmark) # Adjust the offset, this should work benchmark.offset = 100 # Updates the reference timestamps to match the query ones result = benchmark.compare_trial_results(trial_result, reference_trial_result) self.assertNotIsInstance(result, core.benchmark.FailedBenchmark) self.assertEqual(6, len(result.changes)) self.assertEqual((trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, trials.slam.tracking_state.TrackingState.OK), result.changes[1.6667]) self.assertEqual((trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, trials.slam.tracking_state.TrackingState.LOST), result.changes[2]) self.assertEqual((trials.slam.tracking_state.TrackingState.OK, trials.slam.tracking_state.TrackingState.NOT_INITIALIZED), result.changes[2.3333]) self.assertEqual((trials.slam.tracking_state.TrackingState.OK, trials.slam.tracking_state.TrackingState.LOST), result.changes[3]) self.assertEqual((trials.slam.tracking_state.TrackingState.LOST, trials.slam.tracking_state.TrackingState.NOT_INITIALIZED), result.changes[3.3333]) self.assertEqual((trials.slam.tracking_state.TrackingState.LOST, trials.slam.tracking_state.TrackingState.OK), result.changes[3.6667]) def test_max_difference_affects_associations(self): trial_result = MockTrialResult(tracking_states={ 1.4333: trials.slam.tracking_state.TrackingState.NOT_INITIALIZED, 11.7667: trials.slam.tracking_state.TrackingState.OK, 21.9: trials.slam.tracking_state.TrackingState.LOST, }) reference_trial_result = MockTrialResult(tracking_states={ 2.3333: trials.slam.tracking_state.TrackingState.LOST, 10.6667: trials.slam.tracking_state.TrackingState.LOST, 20: trials.slam.tracking_state.TrackingState.OK }) # Perform the benchmark, this should fail since the keys are far appart benchmark = track_comp.TrackingComparisonBenchmark() result = benchmark.compare_trial_results(trial_result, reference_trial_result) self.assertIsInstance(result, core.benchmark.FailedBenchmark) # Adjust the max difference, this should now allow associations between benchmark.max_difference = 5 result = benchmark.compare_trial_results(trial_result, reference_trial_result) self.assertNotIsInstance(result, core.benchmark.FailedBenchmark)
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. import unittest import os import json from pymatgen.core.structure import Molecule from pymatgen.io.nwchem import NwTask, NwInput, NwInputError, NwOutput test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", 'test_files', "nwchem") coords = [[0.000000, 0.000000, 0.000000], [0.000000, 0.000000, 1.089000], [1.026719, 0.000000, -0.363000], [-0.513360, -0.889165, -0.363000], [-0.513360, 0.889165, -0.363000]] mol = Molecule(["C", "H", "H", "H", "H"], coords) class NwTaskTest(unittest.TestCase): def setUp(self): self.task = NwTask(0, 1, basis_set={"H": "6-31g"}, theory="dft", theory_directives={"xc": "b3lyp"}) self.task_cosmo = NwTask(0, 1, basis_set={"H": "6-31g"}, theory="dft", theory_directives={"xc": "b3lyp"}, alternate_directives={'cosmo': "cosmo"}) self.task_esp = NwTask(0, 1, basis_set={"H": "6-31g"}, theory="esp") def test_multi_bset(self): t = NwTask.from_molecule( mol, theory="dft", basis_set={"C": "6-311++G**", "H": "6-31++G**"}, theory_directives={"xc": "b3lyp"}) ans = """title "H4C1 dft optimize" charge 0 basis cartesian C library "6-311++G**" H library "6-31++G**" end dft xc b3lyp end task dft optimize""" self.assertEqual(str(t), ans) def test_str_and_from_string(self): ans = """title "dft optimize" charge 0 basis cartesian H library "6-31g" end dft xc b3lyp end task dft optimize""" self.assertEqual(str(self.task), ans) def test_to_from_dict(self): d = self.task.as_dict() t = NwTask.from_dict(d) self.assertIsInstance(t, NwTask) def test_init(self): self.assertRaises(NwInputError, NwTask, 0, 1, {"H": "6-31g"}, theory="bad") self.assertRaises(NwInputError, NwTask, 0, 1, {"H": "6-31g"}, operation="bad") def test_dft_task(self): task = NwTask.dft_task(mol, charge=1, operation="energy") ans = """title "H4C1 dft energy" charge 1 basis cartesian C library "6-31g" H library "6-31g" end dft mult 2 xc b3lyp end task dft energy""" self.assertEqual(str(task), ans) def test_dft_cosmo_task(self): task = NwTask.dft_task( mol, charge=mol.charge, operation="energy", xc="b3lyp", basis_set="6-311++G**", alternate_directives={'cosmo': {"dielec": 78.0}}) ans = """title "H4C1 dft energy" charge 0 basis cartesian C library "6-311++G**" H library "6-311++G**" end dft mult 1 xc b3lyp end cosmo dielec 78.0 end task dft energy""" self.assertEqual(str(task), ans) def test_esp_task(self): task = NwTask.esp_task(mol, charge=mol.charge, operation="", basis_set="6-311++G**") ans = """title "H4C1 esp " charge 0 basis cartesian C library "6-311++G**" H library "6-311++G**" end task esp """ self.assertEqual(str(task), ans) class NwInputTest(unittest.TestCase): def setUp(self): tasks = [ NwTask.dft_task(mol, operation="optimize", xc="b3lyp", basis_set="6-31++G*"), NwTask.dft_task(mol, operation="freq", xc="b3lyp", basis_set="6-31++G*"), NwTask.dft_task(mol, operation="energy", xc="b3lyp", basis_set="6-311++G**"), NwTask.dft_task(mol, charge=mol.charge + 1, operation="energy", xc="b3lyp", basis_set="6-311++G**"), NwTask.dft_task(mol, charge=mol.charge - 1, operation="energy", xc="b3lyp", basis_set="6-311++G**") ] self.nwi = NwInput(mol, tasks, geometry_options=["units", "angstroms", "noautoz"], memory_options="total 1000 mb") self.nwi_symm = NwInput(mol, tasks, geometry_options=["units", "angstroms", "noautoz"], symmetry_options=["c1"]) def test_str(self): ans = """memory total 1000 mb geometry units angstroms noautoz C 0.0 0.0 0.0 H 0.0 0.0 1.089 H 1.026719 0.0 -0.363 H -0.51336 -0.889165 -0.363 H -0.51336 0.889165 -0.363 end title "H4C1 dft optimize" charge 0 basis cartesian C library "6-31++G*" H library "6-31++G*" end dft mult 1 xc b3lyp end task dft optimize title "H4C1 dft freq" charge 0 basis cartesian C library "6-31++G*" H library "6-31++G*" end dft mult 1 xc b3lyp end task dft freq title "H4C1 dft energy" charge 0 basis cartesian C library "6-311++G**" H library "6-311++G**" end dft mult 1 xc b3lyp end task dft energy title "H4C1 dft energy" charge 1 basis cartesian C library "6-311++G**" H library "6-311++G**" end dft mult 2 xc b3lyp end task dft energy title "H4C1 dft energy" charge -1 basis cartesian C library "6-311++G**" H library "6-311++G**" end dft mult 2 xc b3lyp end task dft energy """ self.assertEqual(str(self.nwi), ans) ans_symm = """geometry units angstroms noautoz symmetry c1 C 0.0 0.0 0.0 H 0.0 0.0 1.089 H 1.026719 0.0 -0.363 H -0.51336 -0.889165 -0.363 H -0.51336 0.889165 -0.363 end title "H4C1 dft optimize" charge 0 basis cartesian C library "6-31++G*" H library "6-31++G*" end dft mult 1 xc b3lyp end task dft optimize title "H4C1 dft freq" charge 0 basis cartesian C library "6-31++G*" H library "6-31++G*" end dft mult 1 xc b3lyp end task dft freq title "H4C1 dft energy" charge 0 basis cartesian C library "6-311++G**" H library "6-311++G**" end dft mult 1 xc b3lyp end task dft energy title "H4C1 dft energy" charge 1 basis cartesian C library "6-311++G**" H library "6-311++G**" end dft mult 2 xc b3lyp end task dft energy title "H4C1 dft energy" charge -1 basis cartesian C library "6-311++G**" H library "6-311++G**" end dft mult 2 xc b3lyp end task dft energy """ self.assertEqual(str(self.nwi_symm), ans_symm) def test_to_from_dict(self): d = self.nwi.as_dict() nwi = NwInput.from_dict(d) self.assertIsInstance(nwi, NwInput) # Ensure it is json-serializable. json.dumps(d) d = self.nwi_symm.as_dict() nwi_symm = NwInput.from_dict(d) self.assertIsInstance(nwi_symm, NwInput) json.dumps(d) def test_from_string_and_file(self): nwi = NwInput.from_file(os.path.join(test_dir, "ch4.nw")) self.assertEqual(nwi.tasks[0].theory, "dft") self.assertEqual(nwi.memory_options, "total 1000 mb stack 400 mb") self.assertEqual(nwi.tasks[0].basis_set["C"], "6-31++G*") self.assertEqual(nwi.tasks[-1].basis_set["C"], "6-311++G**") # Try a simplified input. str_inp = """start H4C1 geometry units angstroms C 0.0 0.0 0.0 H 0.0 0.0 1.089 H 1.026719 0.0 -0.363 H -0.51336 -0.889165 -0.363 H -0.51336 0.889165 -0.363 end title "H4C1 dft optimize" charge 0 basis cartesian H library "6-31++G*" C library "6-31++G*" end dft xc b3lyp mult 1 end task scf optimize title "H4C1 dft freq" charge 0 task scf freq title "H4C1 dft energy" charge 0 basis cartesian H library "6-311++G**" C library "6-311++G**" end task dft energy title "H4C1 dft energy" charge 1 dft xc b3lyp mult 2 end task dft energy title "H4C1 dft energy" charge -1 task dft energy """ nwi = NwInput.from_string(str_inp) self.assertEqual(nwi.geometry_options, ['units', 'angstroms']) self.assertEqual(nwi.tasks[0].theory, "scf") self.assertEqual(nwi.tasks[0].basis_set["C"], "6-31++G*") self.assertEqual(nwi.tasks[-1].theory, "dft") self.assertEqual(nwi.tasks[-1].basis_set["C"], "6-311++G**") str_inp_symm = str_inp.replace("geometry units angstroms", "geometry units angstroms\n symmetry " "c1") nwi_symm = NwInput.from_string(str_inp_symm) self.assertEqual(nwi_symm.geometry_options, ['units', 'angstroms']) self.assertEqual(nwi_symm.symmetry_options, ['c1']) self.assertEqual(nwi_symm.tasks[0].theory, "scf") self.assertEqual(nwi_symm.tasks[0].basis_set["C"], "6-31++G*") self.assertEqual(nwi_symm.tasks[-1].theory, "dft") self.assertEqual(nwi_symm.tasks[-1].basis_set["C"], "6-311++G**") class NwOutputTest(unittest.TestCase): def test_read(self): nwo = NwOutput(os.path.join(test_dir, "CH4.nwout")) nwo_cosmo = NwOutput(os.path.join(test_dir, "N2O4.nwout")) self.assertEqual(0, nwo[0]["charge"]) self.assertEqual(-1, nwo[-1]["charge"]) self.assertEqual(len(nwo), 5) self.assertAlmostEqual(-1102.6224491715582, nwo[0]["energies"][-1], 2) self.assertAlmostEqual(-1102.9986291578023, nwo[2]["energies"][-1]) self.assertAlmostEqual(-11156.354030653656, nwo_cosmo[5]["energies"][0]["cosmo scf"]) self.assertAlmostEqual(-11153.374133394364, nwo_cosmo[5]["energies"][0]["gas phase"]) self.assertAlmostEqual(-11156.353632962995, nwo_cosmo[5]["energies"][0]["sol phase"], 2) self.assertAlmostEqual(-11168.818934311605, nwo_cosmo[6]["energies"][0]["cosmo scf"], 2) self.assertAlmostEqual(-11166.3624424611462, nwo_cosmo[6]["energies"][0]['gas phase'], 2) self.assertAlmostEqual(-11168.818934311605, nwo_cosmo[6]["energies"][0]['sol phase'], 2) self.assertAlmostEqual(-11165.227959110889, nwo_cosmo[7]["energies"][0]['cosmo scf'], 2) self.assertAlmostEqual(-11165.025443612385, nwo_cosmo[7]["energies"][0]['gas phase'], 2) self.assertAlmostEqual(-11165.227959110154, nwo_cosmo[7]["energies"][0]['sol phase'], 2) self.assertAlmostEqual(nwo[1]["hessian"][0][0], 4.60187e+01) self.assertAlmostEqual(nwo[1]["hessian"][1][2], -1.14030e-08) self.assertAlmostEqual(nwo[1]["hessian"][2][3], 2.60819e+01) self.assertAlmostEqual(nwo[1]["hessian"][6][6], 1.45055e+02) self.assertAlmostEqual(nwo[1]["hessian"][11][14], 1.35078e+01) # CH4.nwout, line 722 self.assertAlmostEqual(nwo[0]["forces"][0][3], -0.001991) # N2O4.nwout, line 1071 self.assertAlmostEqual(nwo_cosmo[0]["forces"][0][4], 0.011948) # There should be four DFT gradients. self.assertEqual(len(nwo_cosmo[0]["forces"]), 4) ie = (nwo[4]["energies"][-1] - nwo[2]["energies"][-1]) ea = (nwo[2]["energies"][-1] - nwo[3]["energies"][-1]) self.assertAlmostEqual(0.7575358648355177, ie) self.assertAlmostEqual(-14.997877958701338, ea) self.assertEqual(nwo[4]["basis_set"]["C"]["description"], "6-311++G**") nwo = NwOutput(os.path.join(test_dir, "H4C3O3_1.nwout")) self.assertTrue(nwo[-1]["has_error"]) self.assertEqual(nwo[-1]["errors"][0], "Bad convergence") nwo = NwOutput(os.path.join(test_dir, "CH3CH2O.nwout")) self.assertTrue(nwo[-1]["has_error"]) self.assertEqual(nwo[-1]["errors"][0], "Bad convergence") nwo = NwOutput(os.path.join(test_dir, "C1N1Cl1_1.nwout")) self.assertTrue(nwo[-1]["has_error"]) self.assertEqual(nwo[-1]["errors"][0], "autoz error") nwo = NwOutput(os.path.join(test_dir, "anthrachinon_wfs_16_ethyl.nwout")) self.assertTrue(nwo[-1]["has_error"]) self.assertEqual(nwo[-1]["errors"][0], "Geometry optimization failed") nwo = NwOutput(os.path.join(test_dir, "anthrachinon_wfs_15_carboxyl.nwout")) self.assertEqual(nwo[1]['frequencies'][0][0], -70.47) self.assertEqual(len(nwo[1]['frequencies'][0][1]), 27) self.assertEqual(nwo[1]['frequencies'][-1][0], 3696.74) self.assertEqual(nwo[1]['frequencies'][-1][1][-1], (0.20498, -0.94542, -0.00073)) self.assertEqual(nwo[1]["normal_frequencies"][1][0], -70.72) self.assertEqual(nwo[1]["normal_frequencies"][3][0], -61.92) self.assertEqual(nwo[1]["normal_frequencies"][1][1][-1], (0.00056, 0.00042, 0.06781)) def test_parse_tddft(self): nwo = NwOutput(os.path.join(test_dir, "phen_tddft.log")) roots = nwo.parse_tddft() self.assertEqual(len(roots["singlet"]), 20) self.assertAlmostEqual(roots["singlet"][0]["energy"], 3.9291) self.assertAlmostEqual(roots["singlet"][0]["osc_strength"], 0.0) self.assertAlmostEqual(roots["singlet"][1]["osc_strength"], 0.00177) def test_get_excitation_spectrum(self): nwo = NwOutput(os.path.join(test_dir, "phen_tddft.log")) spectrum = nwo.get_excitation_spectrum() self.assertEqual(len(spectrum.x), 2000) self.assertAlmostEqual(spectrum.x[0], 1.9291) self.assertAlmostEqual(spectrum.y[0], 0.0) self.assertAlmostEqual(spectrum.y[1000], 0.0007423569947114812) if __name__ == "__main__": unittest.main()
# Copyright 2015 SolidBuilds.com. All rights reserved. # # Authors: Ling Thio <ling.thio@gmail.com> """ SQLAlchemy-boolean-search ========================= SQLAlchemy-boolean-search translates a boolean search expression such as:: field1=*something* and not (field2==1 or field3<=10.0) into its corresponding SQLAlchemy query filter. Install ------- pip install sqlalchemy-boolean-search Usage example ------------- from sqlalchemy_boolean_search import parse_boolean_search # DataModel defined elsewhere (with field1, field2 and field3) from app.models import DataModel # Parse boolean search into a parsed expression boolean_search = 'field1=*something* and not (field2==1 or field3<=10.0)' parsed_expression = parse_boolean_search(boolean_search) # Retrieve records using a filter generated by the parsed expression records = DataModel.query.filter(parsed_expression.filter(DataModel)) Documentation ------------- http://sqlalchemy-boolean-search.readthedocs.org/ Authors ------- * Ling Thio - ling.thio [at] gmail.com """ from __future__ import print_function import pyparsing as pp from pyparsing import ParseException # explicit export from sqlalchemy import func from sqlalchemy.sql import or_, and_, not_, sqltypes # Define a custom exception class class BooleanSearchException(Exception): pass # ***** Utility functions ***** def get_field(DataModelClass, field_name): """ Returns a SQLAlchemy Field from a field name such as 'name' or 'parent.name'. Returns None if no field exists by that field name. """ # Handle hierarchical field names such as 'parent.name' if '.' in field_name: relationship_name, field_name = field_name.split('.', 1) relationship = getattr(DataModelClass, relationship_name) return get_field(relationship.property.mapper.entity, field_name) # Handle flat field names such as 'name' return getattr(DataModelClass, field_name, None) # ***** Define the expression element classes ***** class Condition(object): """ Represents a 'name operand value' condition, where operand can be one of: '<', '<=', '=', '==', '!=', '>=', '>'. """ def __init__(self, data): self.name = data[0][0] self.op = data[0][1] self.value = data[0][2] def filter(self, DataModelClass): """ Return the condition as a SQLAlchemy query condition """ condition = None field = get_field(DataModelClass, self.name) if field: # Prepare field and value lower_field = func.lower(field) value = self.value lower_value = func.lower(value) if field.type.python_type == float: try: value = float(value) lower_field = field lower_value = value except: raise BooleanSearchException( "Field '%(name)s' expects a float value. Received value '%(value)s' instead." % dict(name=self.name, value=self.value)) elif field.type.python_type == int: try: value = int(value) lower_field = field lower_value = value except: raise BooleanSearchException( "Field '%(name)s' expects an integer value. Received value '%(value)s' instead." % dict(name=self.name, value=self.value)) # Return SQLAlchemy condition based on operator value if self.op == '==': condition = lower_field.__eq__(lower_value) elif self.op == '<': condition = lower_field.__lt__(lower_value) elif self.op == '<=': condition = lower_field.__le__(lower_value) elif self.op == '>': condition = lower_field.__gt__(lower_value) elif self.op == '>=': condition = lower_field.__ge__(lower_value) elif self.op == '!=': condition = lower_field.__ne__(lower_value) elif self.op == '=': field = getattr(DataModelClass, self.name) value = self.value if value.find('*') >= 0: value = value.replace('*', '%') condition = field.ilike(value) else: condition = field.ilike('%' + value + '%') else: raise BooleanSearchException( "Table '%(table_name)s' does not have a field named '%(field_name)s'." % dict(table_name=DataModelClass.__tablename__, field_name=self.name)) return condition def __repr__(self): return self.name + self.op + self.value class BoolNot(object): """ Represents the boolean operator NOT """ def __init__(self, data): self.condition = data[0][1] def filter(self, DataModelClass): """ Return the operator as a SQLAlchemy not_() condition """ return not_(self.condition.filter(DataModelClass)) def __repr__(self): return 'not_(' + repr(self.condition) + ')' class BoolAnd(object): """ Represents the boolean operator AND """ def __init__(self, data): self.conditions = [condition for condition in data[0] if condition and condition != 'and'] def filter(self, DataModelClass): """ Return the operator as a SQLAlchemy and_() condition """ conditions = [condition.filter(DataModelClass) for condition in self.conditions] return and_(*conditions) # * converts list to argument sequence def __repr__(self): return 'and_(' + ', '.join([repr(condition) for condition in self.conditions]) + ')' class BoolOr(object): """ Represents the boolean operator OR """ def __init__(self, data): self.conditions = [condition for condition in data[0] if condition and condition != 'or'] def filter(self, DataModelClass): """ Return the operator as a SQLAlchemy or_() condition """ conditions = [condition.filter(DataModelClass) for condition in self.conditions] return or_(*conditions) # * converts list to argument sequence def __repr__(self): return 'or_(' + ', '.join([repr(condition) for condition in self.conditions]) + ')' # ***** Define the boolean condition expressions ***** # Define expression elements number = pp.Regex(r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?") name = pp.Word(pp.alphas + '._', pp.alphanums + '._') operator = pp.Regex("==|!=|<=|>=|<|>|=") value = pp.Word(pp.alphanums + '_.*') | pp.QuotedString('"') | number condition = pp.Group(name + operator + value) condition.setParseAction(Condition) # Define the expression as a hierarchy of boolean operators # with the following precedence: NOT > AND > OR expression_parser = pp.operatorPrecedence(condition, [ (pp.CaselessLiteral("not"), 1, pp.opAssoc.RIGHT, BoolNot), (pp.CaselessLiteral("and"), 2, pp.opAssoc.LEFT, BoolAnd), (pp.CaselessLiteral("or"), 2, pp.opAssoc.LEFT, BoolOr), ]) def parse_boolean_search(boolean_search): """ Parses the boolean search expression into a hierarchy of boolean operators. Returns a BoolNot or BoolAnd or BoolOr object. """ try: expression = expression_parser.parseString(boolean_search)[0] return expression except ParseException as e: raise BooleanSearchException( "Syntax error at offset %(offset)s." % dict(offset=e.col))
from types import IntType, StringType, UnicodeType import time import datetime import calendar import os import sys class Config(dict): ''' based on http://stackoverflow.com/questions/4984647/accessing-dict-keys-like-an-attribute-in-python create a config object based on an imported module. with the module, you have nice config.key attrib access, but you can't config.copy() it so we convert it into a dict... but then we loose the nice attrib access again. so this class gives an object that supports both config.key and config.copy() basically ''' def __init__(self, module_or_dict): try: # it's a module d = module_or_dict.__dict__ except: # it's a dict: d = module_or_dict for k, v in d.items(): if not k.startswith('__'): self[k] = v def __getattr__(self, attr): return self[attr] def __setattr__(self, attr, value): self[attr] = value def copy(self): return Config(self) class Event(): ''' timestamp must be a unix timestamp (int) desc is a string in whatever markup you want (html usually) tags is a list of strings (usally simple words) event_id is optional, it's the elasticsearch _id field ''' def __init__(self, timestamp=None, desc=None, tags=[], event_id=None, extra_attributes={}): assert type(timestamp) is IntType, "timestamp must be an integer: %r" % timestamp assert type(desc) in (StringType, UnicodeType), "desc must be a non-empty string: %r" % desc assert desc, "desc must be a non-empty string: %r" % desc self.timestamp = timestamp self.desc = desc self.tags = tags # just a list of strings self.event_id = event_id self.extra_attributes = extra_attributes def __str__(self): pretty_desc = self.desc if "\n" in self.desc: pretty_desc = "%s..." % self.desc[:self.desc.find('\n')] return "Event object. event_id=%s, ts=%i, tags=%s, desc=%s" % (self.event_id, self.timestamp, ','.join(self.tags), pretty_desc) def __getattr__(self, nm): if nm == 'outage': for tag in self.tags: if tag.startswith('outage='): return tag.replace('outage=', '') return None if nm == 'impact': for tag in self.tags: if tag.startswith('impact='): return tag.replace('impact=', '') return None raise AttributeError("no attribute %s" % nm) class Reportpoint(): def __init__(self, event, outages, muptime, ttf, tttf, ttd, tttd, ttr, tttr): self.event = event self.outages = outages # number of outages occured until now (including this one, if appropriate) self.muptime = muptime self.ttf = ttf self.tttf = tttf self.ttd = ttd self.tttd = tttd self.ttr = ttr self.tttr = tttr def __getattr__(self, nm): divisor = self.outages if divisor == 0: divisor = 1 if nm == 'mttf': return self.tttf / divisor if nm == 'mttd': return self.tttd / divisor if nm == 'mttr': return self.tttr / divisor raise AttributeError("no attribute %s" % nm) class Backend(): def __init__(self, config=None): sys.path.append("%s/%s" % (os.getcwd(), 'python-dateutil')) sys.path.append("%s/%s" % (os.getcwd(), 'requests')) sys.path.append("%s/%s" % (os.getcwd(), 'rawes')) import rawes import requests from rawes.elastic_exception import ElasticException # pyflakes doesn't like globals()['ElasticException'] = ElasticException so: self.ElasticException = ElasticException if config is None: import config config = Config(config) self.config = config self.es = rawes.Elastic(config.es_url, except_on_error=True) # make sure the index exists try: # to explain the custom mapping: # * _source enabled is maybe not really needed, but it's easiest at # least. we just need to be able to reconstruct the original document. # * tags are not analyzed so that when we want to get a list of all # tags (a facet search) it returns the original tags, not the # tokenized terms. self.es.post(config.es_index, data={ "mappings": { "event": { "_source": { "enabled": True }, "properties": { "tags": { "type": "string", "index": "not_analyzed" } } } } }) print "created new ElasticSearch Index" except ElasticException as e: import re if 'IndexAlreadyExistsException' in e.result['error']: pass elif 'already exists as alias' in e.result['error']: pass else: raise except requests.exceptions.ConnectionError as e: sys.stderr.write("Could not connect to ElasticSearch: %s" % e) sys.exit(2) def object_to_dict(self, event): iso = self.unix_timestamp_to_iso8601(event.timestamp) data = { 'date': iso, 'tags': event.tags, 'desc': event.desc } data.update(event.extra_attributes) return data def unix_timestamp_to_iso8601(self, unix_timestamp): return datetime.datetime.utcfromtimestamp(unix_timestamp).isoformat() def iso8601_to_unix_timestamp(self, iso8601): ''' elasticsearch returns something like 2013-03-20T20:41:16 ''' unix = calendar.timegm(datetime.datetime.strptime(iso8601, "%Y-%m-%dT%H:%M:%S").timetuple()) return unix def hit_to_object(self, hit): event_id = hit['_id'] hit = hit['_source'] unix = self.iso8601_to_unix_timestamp(hit['date']) extra_attributes= {} for (k, v) in hit.items(): if k not in ('desc', 'tags', 'date'): extra_attributes[k] = v return Event(timestamp=unix, desc=hit['desc'], tags=hit['tags'], event_id=event_id, extra_attributes=extra_attributes) def add_event(self, event): ret = self.es.post('%s/event' % self.config.es_index, data=self.object_to_dict(event)) return ret['_id'] def delete_event(self, event_id): try: self.es.delete('%s/event/%s' % (self.config.es_index, event_id)) except self.ElasticException as e: if 'found' in e.result and not e.result['found']: raise Exception("Document %s can't be found" % event_id) else: raise def edit_event(self, event): self.es.post('%s/event/%s/_update' % (self.config.es_index, event.event_id), data={'doc': self.object_to_dict(event)}) def es_get_events(self, query = None): if query is None: query = { "query_string": { "query": "*" } } return self.es.get('%s/event/_search?size=1000' % self.config.es_index, data={ "query": query, "sort": [ { "date": { "order": "desc", "ignore_unmapped": True # avoid 'No mapping found for [date] in order to sort on' when we don't have data yet } } ] }) def get_events_raw(self, query=None): ''' return format that's optimized for elasticsearch ''' hits = self.es_get_events(query) events = hits['hits']['hits'] for (i, event) in enumerate(events): event_id = event['_id'] events[i] = event['_source'] events[i]['id'] = event_id events[i]['date'] = self.iso8601_to_unix_timestamp(events[i]['date']) return events def get_events_objects(self): # retuns a list of event objects hits = self.es_get_events() return [self.hit_to_object(event_hit) for event_hit in hits['hits']['hits']] def get_event(self, event_id): # http://localhost:9200/dieterfoobarbaz/event/PZ1su5w5Stmln_c2Kc4B2g event_hit = self.es.get('%s/event/%s' % (self.config.es_index, event_id)) event_obj = self.hit_to_object(event_hit) return event_obj def get_tags(self): # get all different tags # curl -X POST "http://localhost:9200/anthracite/_search?pretty=true&size=0" -d '{ "query" : {"query_string" : {"query" : "*"}}, "facets":{"tags" : { "terms" : {"field" : "tags"} }}}"' tags = self.es.post('%s/_search?size=0' % self.config.es_index, data={ 'query': { 'query_string': { 'query': '*' } }, 'facets': { 'tags': { 'terms': { 'field': 'tags' } } } }) tags = tags['facets']['tags']['terms'] tags = [t['term'] for t in tags] return tags def get_events_range(self): low = self.es.post('%s/_search?size=1' % self.config.es_index, data={ "query": { "match_all": { } }, "sort": [ { "date": { "order": "asc", "ignore_unmapped": True # avoid 'No mapping found for [date] in order to sort on' when we don't have data yet } } ] }) # if there's not a single record in the database: if not len(low['hits']['hits']): return (0, time.time()) high = self.es.post('%s/_search?size=1' % self.config.es_index, data={ "query": { "match_all": { } }, "sort": [ { "date": { "order": "desc", "ignore_unmapped": True # avoid 'No mapping found for [date] in order to sort on' when we don't have data yet } } ] }) low = self.iso8601_to_unix_timestamp(low['hits']['hits'][0]['_source']['date']) high = self.iso8601_to_unix_timestamp(high['hits']['hits'][0]['_source']['date']) return (low, high) def get_events_count(self): count = 0 events = self.es.get('%s/event/_search' % self.config.es_index) count = events['hits']['total'] return count def get_outage_events(self): # TODO sanity checking (order of detected, resolved tags, etc) hits = self.es.get('%s/event/_search' % self.config.es_index, data={ 'query': { 'query_string': { 'query': 'tag like outage=_%' } }, "sort": [ { "date": { "order": "asc", "ignore_unmapped": True # avoid 'No mapping found for [date] in order to sort on' when we don't have data yet } } ] }) events = [] for event_hit in hits['hits']['hits']: event_obj = self.hit_to_object(event_hit) events.append(event_obj) return events class PluginError(Exception): def __init__(self, plugin, msg, underlying_error): self.plugin = plugin self.msg = msg self.underlying_error = underlying_error def __str__(self): return "%s -> %s (%s)" % (self.plugin, self.msg, self.underlying_error) def load_plugins(plugins_to_load, config): ''' loads all the plugins sub-modules returns encountered errors, doesn't raise them because whoever calls this function defines how any errors are handled. meanwhile, loading must continue ''' import plugins errors = [] add_urls = {} remove_urls = [] loaded_plugins = [] plugins_dir = os.path.dirname(plugins.__file__) wd = os.getcwd() os.chdir(plugins_dir) for module in plugins_to_load: try: print "importing plugin '%s'" % module imp = __import__('plugins.' + module, {}, {}, ['*']) loaded_plugins.append(imp) try: add_urls[module] = imp.add_urls except Exception: pass try: remove_urls.extend(imp.remove_urls) except Exception: pass except Exception, e: errors.append(PluginError(module, "Failed to add plugin '%s'" % module, e)) continue os.chdir(wd) state = { 'add_urls': add_urls, 'remove_urls': remove_urls, 'loaded_plugins': loaded_plugins } # make some vars accessible for all imported plugins __builtins__['state'] = state __builtins__['config'] = config return (state, errors)
""" Internal shared-state variables such as config settings and host lists. """ import os import sys from optparse import make_option from fabric.network import HostConnectionCache, ssh from fabric.version import get_version from fabric.utils import _AliasDict, _AttributeDict # # Win32 flag # # Impacts a handful of platform specific behaviors. Note that Cygwin's Python # is actually close enough to "real" UNIXes that it doesn't need (or want!) to # use PyWin32 -- so we only test for literal Win32 setups (vanilla Python, # ActiveState etc) here. win32 = (sys.platform == 'win32') # # Environment dictionary - support structures # # By default, if the user (including code using Fabric as a library) doesn't # set the username, we obtain the currently running username and use that. def _get_system_username(): """ Obtain name of current system user, which will be default connection user. """ import getpass username = None try: username = getpass.getuser() # getpass.getuser supported on both Unix and Windows systems. # getpass.getuser may call pwd.getpwuid which in turns may raise KeyError # if it cannot find a username for the given UID, e.g. on ep.io # and similar "non VPS" style services. Rather than error out, just keep # the 'default' username to None. Can check for this value later if needed. except KeyError: pass except ImportError: if win32: import win32api import win32security import win32profile username = win32api.GetUserName() return username def _rc_path(): """ Return platform-specific default file path for $HOME/.fabricrc. """ rc_file = '.fabricrc' rc_path = '~/' + rc_file expanded_rc_path = os.path.expanduser(rc_path) if expanded_rc_path == rc_path and win32: from win32com.shell.shell import SHGetSpecialFolderPath from win32com.shell.shellcon import CSIDL_PROFILE expanded_rc_path = "%s/%s" % ( SHGetSpecialFolderPath(0, CSIDL_PROFILE), rc_file ) return expanded_rc_path default_port = '22' # hurr durr default_ssh_config_path = '~/.ssh/config' # Options/settings which exist both as environment keys and which can be set on # the command line, are defined here. When used via `fab` they will be added to # the optparse parser, and either way they are added to `env` below (i.e. the # 'dest' value becomes the environment key and the value, the env value). # # Keep in mind that optparse changes hyphens to underscores when automatically # deriving the `dest` name, e.g. `--reject-unknown-hosts` becomes # `reject_unknown_hosts`. # # Furthermore, *always* specify some sort of default to avoid ending up with # optparse.NO_DEFAULT (currently a two-tuple)! In general, None is a better # default than ''. # # User-facing documentation for these are kept in docs/env.rst. env_options = [ make_option('-a', '--no_agent', action='store_true', default=False, help="don't use the running SSH agent" ), make_option('-A', '--forward-agent', action='store_true', default=False, help="forward local agent to remote end" ), make_option('--abort-on-prompts', action='store_true', default=False, help="abort instead of prompting (for password, host, etc)" ), make_option('-c', '--config', dest='rcfile', default=_rc_path(), metavar='PATH', help="specify location of config file to use" ), make_option('--colorize-errors', action='store_true', default=False, help="Color error output", ), make_option('-D', '--disable-known-hosts', action='store_true', default=False, help="do not load user known_hosts file" ), make_option('-e', '--eagerly-disconnect', action='store_true', default=False, help="disconnect from hosts as soon as possible" ), make_option('-f', '--fabfile', default='fabfile', metavar='PATH', help="python module file to import, e.g. '../other.py'" ), make_option('-g', '--gateway', default=None, metavar='HOST', help="gateway host to connect through" ), make_option('--hide', metavar='LEVELS', help="comma-separated list of output levels to hide" ), make_option('-H', '--hosts', default=[], help="comma-separated list of hosts to operate on" ), make_option('-i', action='append', dest='key_filename', metavar='PATH', default=None, help="path to SSH private key file. May be repeated." ), make_option('-k', '--no-keys', action='store_true', default=False, help="don't load private key files from ~/.ssh/" ), make_option('--keepalive', dest='keepalive', type=int, default=0, metavar="N", help="enables a keepalive every N seconds" ), make_option('--linewise', action='store_true', default=False, help="print line-by-line instead of byte-by-byte" ), make_option('-n', '--connection-attempts', type='int', metavar='M', dest='connection_attempts', default=1, help="make M attempts to connect before giving up" ), make_option('--no-pty', dest='always_use_pty', action='store_false', default=True, help="do not use pseudo-terminal in run/sudo" ), make_option('-p', '--password', default=None, help="password for use with authentication and/or sudo" ), make_option('-P', '--parallel', dest='parallel', action='store_true', default=False, help="default to parallel execution method" ), make_option('--port', default=default_port, help="SSH connection port" ), make_option('-r', '--reject-unknown-hosts', action='store_true', default=False, help="reject unknown hosts" ), make_option('--system-known-hosts', default=None, help="load system known_hosts file before reading user known_hosts" ), make_option('-R', '--roles', default=[], help="comma-separated list of roles to operate on" ), make_option('-s', '--shell', default='/bin/bash -l -c', help="specify a new shell, defaults to '/bin/bash -l -c'" ), make_option('--show', metavar='LEVELS', help="comma-separated list of output levels to show" ), make_option('--skip-bad-hosts', action="store_true", default=False, help="skip over hosts that can't be reached" ), make_option('--ssh-config-path', default=default_ssh_config_path, metavar='PATH', help="Path to SSH config file" ), make_option('-t', '--timeout', type='int', default=10, metavar="N", help="set connection timeout to N seconds" ), make_option('-T', '--command-timeout', dest='command_timeout', type='int', default=None, metavar="N", help="set remote command timeout to N seconds" ), make_option('-u', '--user', default=_get_system_username(), help="username to use when connecting to remote hosts" ), make_option('-w', '--warn-only', action='store_true', default=False, help="warn, instead of abort, when commands fail" ), make_option('-x', '--exclude-hosts', default=[], metavar='HOSTS', help="comma-separated list of hosts to exclude" ), make_option('-z', '--pool-size', dest='pool_size', type='int', metavar='INT', default=0, help="number of concurrent processes to use in parallel mode", ), ] # # Environment dictionary - actual dictionary object # # Global environment dict. Currently a catchall for everything: config settings # such as global deep/broad mode, host lists, username etc. # Most default values are specified in `env_options` above, in the interests of # preserving DRY: anything in here is generally not settable via the command # line. env = _AttributeDict({ 'abort_exception': None, 'again_prompt': 'Sorry, try again.', 'all_hosts': [], 'combine_stderr': True, 'colorize_errors': False, 'command': None, 'command_prefixes': [], 'cwd': '', # Must be empty string, not None, for concatenation purposes 'dedupe_hosts': True, 'default_port': default_port, 'eagerly_disconnect': False, 'echo_stdin': True, 'exclude_hosts': [], 'gateway': None, 'host': None, 'host_string': None, 'lcwd': '', # Must be empty string, not None, for concatenation purposes 'local_user': _get_system_username(), 'output_prefix': True, 'passwords': {}, 'path': '', 'path_behavior': 'append', 'port': default_port, 'real_fabfile': None, 'remote_interrupt': None, 'roles': [], 'roledefs': {}, 'shell_env': {}, 'skip_bad_hosts': False, 'ssh_config_path': default_ssh_config_path, 'ok_ret_codes': [0], # a list of return codes that indicate success # -S so sudo accepts passwd via stdin, -p with our known-value prompt for # later detection (thus %s -- gets filled with env.sudo_prompt at runtime) 'sudo_prefix': "sudo -S -p '%(sudo_prompt)s' ", 'sudo_prompt': 'sudo password:', 'sudo_user': None, 'tasks': [], 'prompts': {}, 'use_exceptions_for': {'network': False}, 'use_shell': True, 'use_ssh_config': False, 'user': None, 'version': get_version('short') }) # Fill in exceptions settings exceptions = ['network'] exception_dict = {} for e in exceptions: exception_dict[e] = False env.use_exceptions_for = _AliasDict(exception_dict, aliases={'everything': exceptions}) # Add in option defaults for option in env_options: env[option.dest] = option.default # # Command dictionary # # Keys are the command/function names, values are the callables themselves. # This is filled in when main() runs. commands = {} # # Host connection dict/cache # connections = HostConnectionCache() def _open_session(): return connections[env.host_string].get_transport().open_session() def default_channel(): """ Return a channel object based on ``env.host_string``. """ try: chan = _open_session() except ssh.SSHException, err: if str(err) == 'SSH session not active': connections[env.host_string].close() del connections[env.host_string] chan = _open_session() else: raise chan.settimeout(0.1) chan.input_enabled = True return chan # # Output controls # # Keys are "levels" or "groups" of output, values are always boolean, # determining whether output falling into the given group is printed or not # printed. # # By default, everything except 'debug' is printed, as this is what the average # user, and new users, are most likely to expect. # # See docs/usage.rst for details on what these levels mean. output = _AliasDict({ 'status': True, 'aborts': True, 'warnings': True, 'running': True, 'stdout': True, 'stderr': True, 'debug': False, 'user': True }, aliases={ 'everything': ['warnings', 'running', 'user', 'output'], 'output': ['stdout', 'stderr'], 'commands': ['stdout', 'running'] })
import logging import sqlite3 from common import utils from models.artist import Artist from models.album import Album from models.base import BaseModel logging.basicConfig(format="%(asctime)s %(message)s", level=logging.DEBUG) class Track(BaseModel): def __init__(self, db, id=None, **kwargs): self._db = db self.__data = {} if id is not None: for row in self._db.execute("SELECT * FROM track WHERE id = ?", (id,)): for key in ["id", "tracknumber", "name", "grouping", "filename"]: setattr(self, key, row[key]) self.__data[key] = row[key] else: for (key, value) in kwargs.items(): setattr(self, key, value) self.__data[key] = value def delete(self): delete_sql = "DELETE FROM track WHERE id = ?" with self._db.conn: self._db.execute(delete_sql, (self.id,)) # If there is an old album, remove it if it no longer has any # tracks try: del self._album except Exception: pass old_album = self.album if old_album: self._db.execute("DELETE FROM album_track WHERE track_id = ?", (self.id,)) if not old_album.tracks: old_album.delete() # If there are old artists, remove them if they no longer have # any tracks try: del self._artists except Exception: pass old_artists = self.artists for old_artist in old_artists: self._db.execute("DELETE FROM artist_track WHERE track_id = " "?", (self.id,)) if not old_artist.tracks: old_artist.delete() return True @property def db(self): return self._db @db.setter def db(self, db): self._db = db @property def album(self): if not hasattr(self, "_album"): setattr(self, "_album", None) for row in self._db.execute("SELECT album.* FROM album INNER " "JOIN album_track ON album.id = " "album_track.album_id WHERE " "track_id = ? LIMIT 1", (self.id,)): setattr(self, "_album", Album(id=row["id"], db=self._db, name=row["name"], date=row["date"])) return self._album @property def artists(self): if not hasattr(self, "_artists"): cursor = self._db.cursor() setattr(self, "_artists", []) for row in cursor.execute("SELECT artist.* FROM artist INNER JOIN " "artist_track ON artist.id = " "artist_track.artist_id WHERE " "artist_track.track_id = ?", (self.id,)): self._artists.append(Artist(id=row["id"], db=self._db, name=row["name"], sortname=row["sortname"], musicbrainz_artistid=row[ "musicbrainz_artistid"])) return self._artists def update(self, metadata): c = self._db.cursor() artist_changed = False album_changed = False artist_names = metadata["artist"] musicbrainz_artist_ids = [] artistsorts = [] try: musicbrainz_artist_ids = metadata["musicbrainz_artistid"] except KeyError: pass try: artistsorts = metadata["artistsort"] except KeyError: pass i = 0 artists = [] for artist_name in artist_names: artist = None musicbrainz_artistid = None artistsort = None try: musicbrainz_artistid = musicbrainz_artist_ids[i] except IndexError: pass try: artistsort = artistsorts[i] except IndexError: pass rows = None if musicbrainz_artistid: rows = c.execute("SELECT * FROM artist WHERE " "musicbrainz_artistid = ?", (musicbrainz_artistid,)) else: rows = c.execute("SELECT * FROM artist WHERE name = ?", (artist_name,)) row = rows.fetchone() if row: artist = Artist(id=row["id"], db=self._db, name=row["name"], sortname=row["sortname"], musicbrainz_artistid=row[ "musicbrainz_artistid"]) if artist.name != artist_name: c.execute("UPDATE artist SET name = ? WHERE id = ?", (artist_name, artist.id)) artist.name = artist_name if artist.sortname != artistsort: c.execute("UPDATE artist SET sortname = ? WHERE id = ?", (artistsort, id)) artist.sortname = artistsort else: c.execute("INSERT INTO artist (name, sortname, " "musicbrainz_artistid) VALUES(?, ?, ?)", (artist_name, artistsort, musicbrainz_artistid)) artist = Artist( id=c.lastrowid, db=self._db, name=artist_name, sortname=artistsort, musicbrainz_artistid=musicbrainz_artistid ) i += 1 artists.append(artist) album_name = None album_date = None mb_albumid = None album = None try: album_name = metadata["album"][0] except KeyError: pass try: album_date = metadata["date"][0] except KeyError: pass try: mb_albumid = metadata["musicbrainz_albumid"][0] except KeyError: pass if mb_albumid: rows = c.execute( "SELECT * FROM album WHERE musicbrainz_albumid = ?", (mb_albumid,) ) row = rows.fetchone() if row: album = Album(id=row["id"], db=self._db, name=row["name"], date=row["date"], musicbrainz_albumid=row["musicbrainz_albumid"]) else: c.execute("INSERT INTO album (name, `date`, " "musicbrainz_albumid) VALUES (?, ?, ?)", (album_name, album_date, mb_albumid)) album = Album(id=c.lastrowid, db=self._db, name=album_name, date=album_date, musicbrainz_albumid=mb_albumid) elif album_name: rows = c.execute( "SELECT album.* FROM album INNER JOIN album_artist ON " "album_artist.album_id = album.id WHERE album.name = ? " "AND artist_id = ?", (album_name, artist.id) ) row = rows.fetchone() if row: album = Album(id=row["id"], db=self._db, name=row["name"], date=row["date"], musicbrainz_albumid=row["musicbrainz_albumid"]) else: c.execute("INSERT INTO album (name, `date`) VALUES (?, ?)", (album_name, album_date)) album = Album(id=c.lastrowid, db=self._db, name=album_name, date=album_date) if album: if album.name != album_name: c.execute("UPDATE album SET name = ? WHERE id = ?", (album_name, album.id)) album.name = album_name if album.date != album_date: c.execute("UPDATE album SET date = ? WHERE id = ?", (album_date, album.id)) album.date = album_date track_number = None track_name = None track_grouping = None try: track_number = metadata["tracknumber"][0] setattr(self, "tracknumber", track_number) except KeyError: pass try: track_name = metadata["title"][0] setattr(self, "name", track_name) except KeyError: pass try: track_grouping = metadata["grouping"][0] setattr(self, "grouping", track_grouping) except KeyError: pass c.execute("UPDATE track SET tracknumber = ?, name = ?, grouping = ? " "WHERE id = ?", (track_number, track_name, track_grouping, self.id)) # If there is an old album, remove it if it no longer has any tracks try: del self._album except Exception: pass old_album = self.album if old_album: c.execute("DELETE FROM album_track WHERE track_id = ?", (self.id,)) # If there are old artists, remove them if they no longer have # any tracks try: del self._artists except Exception: pass old_artists = self.artists for old_artist in old_artists: c.execute("DELETE FROM artist_track WHERE track_id = ?", (self.id,)) if album: try: c.execute("INSERT INTO album_track (album_id, track_id) " "VALUES(?, ?)", (album.id, self.id)) except sqlite3.IntegrityError: pass if not old_album.tracks: old_album.delete() setattr(self, "_album", album) for artist in artists: try: c.execute("INSERT INTO artist_track (artist_id, track_id) " "VALUES(?, ?)", (artist.id, self.id)) except sqlite3.IntegrityError: pass if not old_artist.tracks: old_artist.delete() if album: try: c.execute( "INSERT INTO album_artist (artist_id, album_id) " "VALUES(?, ?)", (artist.id, album.id)) except sqlite3.IntegrityError: pass if artists: setattr(self, "_artists", artists) c.close() self._db.commit() return True def save(self): dirty_attributes = {} # check if the internal dict has been modified for (attr, value) in self.__dict__.items(): try: if self.__data[attr] != getattr(self, attr): dirty_attributes[attr] = value except AttributeError: pass except KeyError: pass if len(dirty_attributes) > 0: set_clause = utils.update_clause_from_dict(dirty_attributes) dirty_attributes["id"] = self.id sql = " ".join(("UPDATE track", set_clause, "WHERE id = :id")) with self._db.conn: self._db.execute(sql, dirty_attributes) @classmethod def search(cls, database, **search_params): """Find a track with the given params Args: tracknumber: dict, with 'data' and 'operator' keys name: dict, with 'data' and 'operator' keys grouping: dict, with 'data' and 'operator' keys filename: dict, with 'data' and 'operator' keys """ tracks = [] # unpack search params where_params = {} value_params = {} for (attr, value) in search_params.items(): where_params[attr] = value["operator"] value_params[attr] = value["data"] where_clause = utils.make_where_clause(where_params) result = None if where_clause: statement = " ".join(("SELECT * FROM track", where_clause)) result = database.execute(statement, value_params) else: result = database.execute("SELECT * FROM track") for row in result: tracks.append( Track(id=row["id"], db=database, tracknumber=row["tracknumber"], name=row["name"], grouping=row["grouping"], filename=row["filename"]) ) return tracks @classmethod def find_by_path(cls, path, database): track = None for row in database.execute("SELECT * FROM track WHERE filename = ? " "LIMIT 1", (path,)): track = Track(id=row["id"], db=database, tracknumber=row["tracknumber"], name=row["name"], grouping=row["grouping"], filename=row["filename"]) return track @classmethod def store(cls, filename, metadata, database): if Track.find_by_path(filename, database): return True c = database.cursor() artist_names = metadata["artist"] musicbrainz_artist_ids = [] artistsorts = [] try: musicbrainz_artist_ids = metadata["musicbrainz_artistid"] except KeyError: pass try: artistsorts = metadata["artistsort"] except KeyError: pass i = 0 artists = [] for artist_name in artist_names: musicbrainz_artistid = None artistsort = None try: musicbrainz_artistid = musicbrainz_artist_ids[i] except IndexError: pass try: artistsort = artistsorts[i] except IndexError: pass rows = None row = None if musicbrainz_artistid: rows = c.execute("SELECT * FROM artist WHERE " "musicbrainz_artistid = ?", (musicbrainz_artistid,)) row = rows.fetchone() if not row: rows = c.execute("SELECT * FROM artist WHERE name = ? " "AND musicbrainz_artistid IS NULL", (artist_name,)) row = rows.fetchone() if not row: rows = c.execute("SELECT * FROM artist WHERE name = ?", (artist_name,)) row = rows.fetchone() if row: artist = Artist(id=row["id"], db=database, name=row["name"], sortname=row["sortname"], musicbrainz_artistid=row[ "musicbrainz_artistid"]) if (musicbrainz_artistid and (not hasattr(artist, "musicbrainz_artistid") or not artist.musicbrainz_artistid)): c.execute("UPDATE artist SET musicbrainz_artistid = ? " "WHERE id = ?", (musicbrainz_artistid, artist.id)) if (artistsort and (not hasattr(artist, "sortname") or not artist.sortname)): c.execute("UPDATE artist SET sortname = ? WHERE id = ?", (artistsort, artist.id)) else: c.execute("INSERT INTO artist (name, sortname, " "musicbrainz_artistid) VALUES(?, ?, ?)", (artist_name, artistsort, musicbrainz_artistid)) artist = Artist( id=c.lastrowid, db=database, name=artist_name, sortname=artistsort, musicbrainz_artistid=musicbrainz_artistid ) i += 1 artists.append(artist) album_name = None album_date = None mb_albumid = None album = None try: album_name = metadata["album"][0] except KeyError: pass try: album_date = metadata["date"][0] except KeyError: pass try: mb_albumid = metadata["musicbrainz_albumid"][0] except KeyError: pass if mb_albumid: rows = c.execute("SELECT * FROM album WHERE " "musicbrainz_albumid = ?", (mb_albumid,)) row = rows.fetchone() if row: album = Album(id=row["id"], db=database, name=row["name"], date=row["date"], musicbrainz_albumid=row[ "musicbrainz_albumid"]) else: c.execute("INSERT INTO album (name, `date`, " "musicbrainz_albumid) VALUES (?, ?, ?)", (album_name, album_date, mb_albumid)) album = Album(id=c.lastrowid, db=database, name=album_name, date=album_date, musicbrainz_albumid=mb_albumid) elif album_name: for artist in artists: rows = c.execute("SELECT album.* FROM album INNER JOIN " "album_artist ON album_artist.album_id = " "album.id WHERE album.name = ? AND " "artist_id = ?", (album_name, artist.id)) row = rows.fetchone() if row: album = Album(id=row["id"], db=database, name=row["name"], date=row["date"]) else: c.execute("INSERT INTO album (name, `date`) VALUES(?, ?)", (album_name, album_date)) album = Album(id=c.lastrowid, db=database, name=album_name, date=album_date) for artist in artists: if album: try: c.execute("INSERT INTO album_artist (artist_id, album_id) " "VALUES(?, ?)", (artist.id, album.id)) except sqlite3.IntegrityError: pass track_number = None track_name = None track_grouping = None try: track_number = metadata["tracknumber"][0] except KeyError: pass try: track_name = metadata["title"][0] except KeyError: pass try: track_grouping = metadata["grouping"][0] except KeyError: pass track = None rows = c.execute("SELECT * FROM track WHERE filename = ?", (filename,)) row = rows.fetchone() if row: track = Track(id=row["id"], db=database, tracknumber=row["tracknumber"], name=row["name"], grouping=row["grouping"], filename=row["filename"]) else: c.execute("INSERT INTO track (tracknumber, name, grouping, " "filename) VALUES(?, ?, ?, ?)", (track_number, track_name, track_grouping, filename)) track = Track(id=c.lastrowid, db=database, tracknumber=track_number, name=track_name, grouping=track_grouping, filename=filename) if album: try: c.execute("INSERT INTO album_track (album_id, track_id) " "VALUES(?,?)", (album.id, track.id)) except sqlite3.IntegrityError: pass for artist in artists: try: c.execute("INSERT INTO artist_track (artist_id, track_id) " "VALUES(?, ?)", (artist.id, track.id)) except sqlite3.IntegrityError: pass database.commit() c.close() return track @classmethod def all(cls, database, order="track.id", direction="ASC", limit=None, offset=None): tracks = [] select_string = "SELECT * FROM track LEFT JOIN artist_track ON " \ "artist_track.track_id = track.id LEFT JOIN artist ON " \ "artist_track.artist_id = artist.id LEFT JOIN album_track ON " \ "album_track.track_id = track.id LEFT JOIN album ON " \ "album_track.album_id = album.id ORDER BY %s %s" % (order, direction) if limit and offset: select_string = " ".join((select_string, "LIMIT %s OFFSET %s" % (limit, offset))) result = database.execute(select_string) for row in result: tracks.append(Track(id=row["id"], db=database, tracknumber=row["tracknumber"], name=row["name"], grouping=row["name"], filename=row["filename"])) return tracks
from __future__ import absolute_import import time import logging import random, base64, struct import hashlib import os import urllib import sys from pgoapi.exceptions import (ServerSideRequestThrottlingException, NotLoggedInException, ServerBusyOrOfflineException, NoPlayerPositionSetException, HashingOfflineException, UnexpectedResponseException, BadHashRequestException, BannedAccountException) from pgoapi.pgoapi import PGoApi from pgoapi.pgoapi import PGoApiRequest from pgoapi.pgoapi import RpcApi from pgoapi.protos.pogoprotos.networking.requests.request_type_pb2 import RequestType from pgoapi.utilities import get_time from .human_behaviour import sleep, gps_noise_rng from pokemongo_bot.base_dir import _base_dir from geopy.geocoders import GoogleV3 class PermaBannedException(Exception): pass class ApiWrapper(PGoApi, object): DEVICE_ID = None def __init__(self, config=None): self.config = config self.gen_device_id() self.logger = logging.getLogger(__name__) device_info = { "device_id": ApiWrapper.DEVICE_ID, "device_brand": 'Apple', "device_model": 'iPhone', "device_model_boot": 'iPhone10,2', "hardware_manufacturer": 'Apple', "hardware_model": 'D21AP', "firmware_brand": 'iPhone OS', "firmware_type": '11.1.0' } PGoApi.__init__(self, device_info=device_info) if not self.config.hashkey is None: PGoApi.activate_hash_server(self,self.config.hashkey) # Set to default, just for CI... self.actual_lat, self.actual_lng, self.actual_alt = PGoApi.get_position(self) self.teleporting = False self.noised_lat, self.noised_lng, self.noised_alt = self.actual_lat, self.actual_lng, self.actual_alt self.useVanillaRequest = False def gen_device_id(self): if self.config is None or self.config.username is None: ApiWrapper.DEVICE_ID = "3d65919ca1c2fc3a8e2bd7cc3f974c34" return file_salt = None did_path = os.path.join(_base_dir, 'data', 'deviceid-%s.txt' % self.config.username) if os.path.exists(did_path): file_salt = open(did_path, 'r').read() if self.config is not None: key_string = self.config.username if file_salt is not None: # Config and file are set, so use those. ApiWrapper.DEVICE_ID = hashlib.md5(key_string + file_salt).hexdigest()[:32] else: # Config is set, but file isn't, so make it. rand_float = random.SystemRandom().random() salt = base64.b64encode((struct.pack('!d', rand_float))) ApiWrapper.DEVICE_ID = hashlib.md5(key_string + salt).hexdigest()[:32] with open(did_path, "w") as text_file: text_file.write("{0}".format(salt)) else: if file_salt is not None: # No config, but there's a file, use it. ApiWrapper.DEVICE_ID = hashlib.md5(file_salt).hexdigest()[:32] else: # No config or file, so make up a reasonable default. ApiWrapper.DEVICE_ID = "3d65919ca1c2fc3a8e2bd7cc3f974c34" def create_request(self): RequestClass = ApiRequest if self.useVanillaRequest: RequestClass = PGoApiRequest return RequestClass( self, self._position_lat, self._position_lng, self._position_alt ) def get_component(self, location, component_type): for component in location.raw['address_components']: if component_type in component['types']: return component['short_name'] def login(self, provider, username, password): # login needs base class "create_request" self.useVanillaRequest = True # Get Timecode and Country Code country_code = "US" timezone = "America/Chicago" geolocator = GoogleV3(api_key=self.config.gmapkey) if self.config.locale_by_location: try: location = geolocator.reverse((self.actual_lat, self.actual_lng), timeout = 10, exactly_one=True) country_code = self.get_component(location,'country') except: self.logger.warning("Please make sure you have google api key and enable Google Maps Geocoding API at console.developers.google.com") try: timezone = geolocator.timezone([self.actual_lat, self.actual_lng], timeout=10) except: self.logger.warning("Please make sure you have google api key and enable Google Maps Time Zone API at console.developers.google.com") # Start login process try: if self.config.proxy: PGoApi.set_authentication( self, provider, username=username, password=password, proxy_config={'http': self.config.proxy, 'https': self.config.proxy} ) else: PGoApi.set_authentication( self, provider, username=username, password=password ) except: raise try: if self.config.locale_by_location: response = PGoApi.app_simulation_login(self,country_code,timezone.zone) else: response = PGoApi.app_simulation_login(self) # To prevent user who have not update the api being caught off guard by errors except BadHashRequestException: self.logger.warning("Your hashkey seems to have expired or is not accepted!") self.logger.warning("Please set a valid hash key in your auth JSON file!") exit(-3) raise except BannedAccountException: self.logger.warning("This account is banned!") exit(-3) raise except: raise # cleanup code self.useVanillaRequest = False return response def set_position(self, lat, lng, alt=None, teleporting=False): self.actual_lat = lat self.actual_lng = lng if None != alt: self.actual_alt = alt else: alt = self.actual_alt self.teleporting = teleporting if self.config.replicate_gps_xy_noise: lat_noise = gps_noise_rng(self.config.gps_xy_noise_range) lng_noise = gps_noise_rng(self.config.gps_xy_noise_range) lat = lat + lat_noise lng = lng + lng_noise if self.config.replicate_gps_z_noise: alt_noise = gps_noise_rng(self.config.gps_z_noise_range) alt = alt + alt_noise self.noised_lat, self.noised_lng, self.noised_alt = lat, lng, alt PGoApi.set_position(self, lat, lng, alt) def get_position(self): return (self.actual_lat, self.actual_lng, self.actual_alt) class ApiRequest(PGoApiRequest): def __init__(self, *args): PGoApiRequest.__init__(self, *args) self.logger = logging.getLogger(__name__) self.request_callers = [] self.last_api_request_time = None self.requests_per_seconds = 2 def can_call(self): if not self._req_method_list: raise EmptySubrequestChainException() if (self._position_lat is None) or (self._position_lng is None) or (self._position_alt is None): raise NoPlayerPositionSetException() if self._auth_provider is None or not self._auth_provider.is_login(): self.log.info('Not logged in') raise NotLoggedInException() return True def _call(self): for _attempt in range(10): try: return PGoApiRequest.call(self) except: self.log.info('Request failed, retrying.') sleep(1) else: break def _pop_request_callers(self): r = self.request_callers self.request_callers = [] return [i.upper() for i in r] def is_response_valid(self, result, request_callers): if not result or result is None or not isinstance(result, dict): return False if not 'responses' in result or not 'status_code' in result: return False if not isinstance(result['responses'], dict): return False try: # Permaban symptom is empty response to GET_INVENTORY and status_code = 3 if result['status_code'] == 3 and 'GET_INVENTORY' in request_callers and not result['responses'][ 'GET_INVENTORY']: raise PermaBannedException except KeyError: # Still wrong return False # the response can still programatically be valid at this point # but still be wrong. we need to check if the server did sent what we asked it for request_caller in request_callers: if not request_caller in result['responses']: return False return True def call(self, max_retry=15): request_callers = self._pop_request_callers() if not self.can_call(): return False # currently this is never ran, exceptions are raised before request_timestamp = None api_req_method_list = self._req_method_list result = None try_cnt = 0 throttling_retry = 0 unexpected_response_retry = 0 while True: request_timestamp = self.throttle_sleep() # self._call internally clear this field, so save it self._req_method_list = [req_method for req_method in api_req_method_list] should_throttle_retry = False should_unexpected_response_retry = False hashing_offline = False try: result = self._call() except ServerSideRequestThrottlingException: should_throttle_retry = True except HashingOfflineException: hashing_offline = True except UnexpectedResponseException: should_unexpected_response_retry = True except: should_unexpected_response_retry = True if hashing_offline: self.logger.warning('Hashing server issue, retrying in 5 Secs...') sleep(5) continue if should_throttle_retry: throttling_retry += 1 if throttling_retry >= max_retry: raise ServerSideRequestThrottlingException('Server throttled too many times') sleep(1) # huge sleep ? continue # skip response checking if should_unexpected_response_retry: unexpected_response_retry += 1 if unexpected_response_retry >= 5: self.logger.warning( 'Server is not responding correctly to our requests. Waiting for 30 seconds to reconnect.') sleep(30) else: sleep(2) continue if not self.is_response_valid(result, request_callers): try_cnt += 1 if try_cnt > 3: self.logger.warning( 'Server seems to be busy or offline - try again - {}/{}'.format(try_cnt, max_retry)) if try_cnt >= max_retry: raise ServerBusyOrOfflineException() sleep(1) else: break self.last_api_request_time = request_timestamp return result def __getattr__(self, func): if func.upper() in RequestType.keys(): self.request_callers.append(func) return PGoApiRequest.__getattr__(self, func) def throttle_sleep(self): now_milliseconds = time.time() * 1000 required_delay_between_requests = 1000 / self.requests_per_seconds difference = now_milliseconds - (self.last_api_request_time if self.last_api_request_time else 0) if self.last_api_request_time != None and difference < required_delay_between_requests: sleep_time = required_delay_between_requests - difference time.sleep(sleep_time / 1000) return now_milliseconds
from django.db import models from django.forms.models import BaseInlineFormSet as _BaseInlineFormSet from django.utils.translation import ugettext_lazy as _ from django_superform import SuperModelForm from django_superform.forms import SuperModelFormMetaclass from floppyforms import fields from floppyforms.__future__.models import formfield_callback as floppyforms_formfield_callback from floppyforms.__future__.models import ModelFormMetaclass as FloppyformsModelFormMetaclass import floppyforms.__future__ as forms from .selectrelated import SelectRelatedField __all__ = ( 'FORMFIELD_OVERRIDES', 'FORMFIELD_OVERRIDE_DEFAULTS', 'add_formfield_override', 'formfield_callback', 'BackendFormMetaclass', 'BaseBackendForm', 'CopyOnTranslateInlineFormSetMixin', 'BaseBackendInlineFormSet') FORMFIELD_OVERRIDES = { models.ForeignKey: {'form_class': SelectRelatedField}, } FORMFIELD_OVERRIDE_DEFAULTS = {'choices_form_class': fields.TypedChoiceField} def add_formfield_override(db_field, overrides): """ Allow external apps to add new overrides so that custom db fields can use custom form fields in the backend. """ current_overrides = FORMFIELD_OVERRIDES.setdefault(db_field, {}) current_overrides.update(overrides) def formfield_callback(db_field, **kwargs): defaults = FORMFIELD_OVERRIDE_DEFAULTS.copy() if hasattr(db_field, 'rel') and hasattr(db_field.rel, 'to'): lookup = (db_field.__class__, db_field.rel.to) if lookup in FORMFIELD_OVERRIDES: defaults.update(FORMFIELD_OVERRIDES[lookup]) defaults.update(kwargs) return db_field.formfield(**defaults) if db_field.__class__ in FORMFIELD_OVERRIDES: defaults.update(FORMFIELD_OVERRIDES[db_field.__class__]) defaults.update(kwargs) return db_field.formfield(**defaults) return floppyforms_formfield_callback(db_field, **kwargs) class BackendFormMetaclass(SuperModelFormMetaclass, FloppyformsModelFormMetaclass): def __new__(mcs, name, bases, attrs): if 'formfield_callback' not in attrs: attrs['formfield_callback'] = formfield_callback return super(BackendFormMetaclass, mcs).__new__( mcs, name, bases, attrs) class BaseBackendForm(SuperModelForm, forms.ModelForm): ''' This is the base form that should be used by all backends. It handles the language of objects as expected. ''' __metaclass__ = BackendFormMetaclass class Meta: exclude = () def __init__(self, *args, **kwargs): self.request = kwargs.pop('request', None) super(BaseBackendForm, self).__init__(*args, **kwargs) self.bind_widgets() def bind_widgets(self): for field in self.fields.values(): if hasattr(field.widget, 'bind_to_form'): field.widget.bind_to_form(self) @property def extra_fields(self): return [self[name] for name in self.composite_fields] class CopyOnTranslateInlineFormSetMixin(object): ''' Takes care of the logic to replace the relations in the inline objects if the formset's instance is different to the objects foreign key. This is the case if the form is going to be translated. ''' def clone_object(self, obj, commit): related_object = self.instance if obj.pk is not None: return obj.clone(attrs={ self.fk.name: related_object, }, commit=commit) else: setattr(obj, self.fk.name, related_object) if commit: obj.save() return obj def related_has_changed(self, obj): return getattr(obj, self.fk.get_attname()) != self.instance.pk # This is a (nearly) exact copy from the base class. The only difference is # that we did put in the delete_object hook. def save_existing_objects(self, commit=True): self.changed_objects = [] self.deleted_objects = [] if not self.initial_forms: return [] saved_instances = [] forms_to_delete = self.deleted_forms for form in self.initial_forms: pk_name = self._pk_field.name raw_pk_value = form._raw_value(pk_name) # clean() for different types of PK fields can sometimes return # the model instance, and sometimes the PK. Handle either. pk_value = form.fields[pk_name].clean(raw_pk_value) pk_value = getattr(pk_value, 'pk', pk_value) obj = self._existing_object(pk_value) if form in forms_to_delete: self.deleted_objects.append(obj) # THIS IS THE REASON WHY WE DUPLICATE THIS METHOD FROM THE BASE # CLASS. # We need a hook for deleting objects. self.delete_object(form, obj) continue if form.has_changed() or self.related_has_changed(obj): self.changed_objects.append((obj, form.changed_data)) saved_instances.append(self.save_existing(form, obj, commit=commit)) if not commit: self.saved_forms.append(form) return saved_instances def delete_object(self, form, obj): # Only delete when we have not cloned. if not self.related_has_changed(obj): obj.delete() def save_object(self, form, commit=True, form_save_kwargs=None): form_save_kwargs = form_save_kwargs or {} obj = form.save(commit=False, **form_save_kwargs) if self.related_has_changed(obj): return self.clone_object(obj, commit=commit) if commit: obj.save() if commit and hasattr(form, 'save_m2m'): form.save_m2m() return obj def save_existing(self, form, instance, commit=True): return self.save_object(form, commit=commit) def save_new(self, form, commit=True): return self.save_object(form, commit=commit) class BaseBackendInlineFormSet(CopyOnTranslateInlineFormSetMixin, _BaseInlineFormSet): ''' Should be used for all inline formsets in the backend. It takes care of copying the related objects on a translate. ''' def __init__(self, *args, **kwargs): self.order_field = kwargs.pop('order_field', None) self.min_forms = kwargs.pop('min_forms', None) super(BaseBackendInlineFormSet, self).__init__(*args, **kwargs) # Order the inline queryset if possible. if self.order_field is None: self.order_field = getattr(self.model, 'order_field', None) if self.order_field: self.queryset = self.queryset.order_by(self.order_field) def add_fields(self, form, index): super(BaseBackendInlineFormSet, self).add_fields(form, index) if self.order_field and self.order_field in form.fields: form.fields[self.order_field].widget = forms.HiddenInput() if self.can_delete: form.fields['DELETE'].widget = forms.HiddenInput() def filled_form_count(self): ''' Return the number of forms that the user has actually filled out. ''' deleted_forms = self.deleted_forms return len([ form for form in self.forms if getattr(form, 'cleaned_data', None) and form not in deleted_forms]) def full_clean(self): super(BaseBackendInlineFormSet, self).full_clean() if self.min_forms: if self.is_bound: if self.filled_form_count() < self.min_forms: self._non_form_errors.append(_('Please add at least %(min_forms)s item.') % { 'min_forms': self.min_forms, }) # class TranslatableForm(BaseBackendForm): # def __init__(self, *args, **kwargs): # self.origin = kwargs.pop('origin', None) # # instance = kwargs.pop('instance', None) # create = not (instance and instance.pk) # # if self.origin is not None and create: # instance = self.origin.clone(commit=False) # # kwargs['instance'] = instance # super(BaseBackendForm, self).__init__(*args, **kwargs) # # def set_language(self): # self.instance.language = language.active # if self.origin is not None: # self.instance.translation_set = self.origin.translation_set # # def save(self, *args, **kwargs): # create = self.instance.pk is None # if create: # self.set_language() # return super(BaseBackendForm, self).save(*args, **kwargs) # # # class VersionableForm(BaseBackendForm): # ''' # A base form for models that inherit from VersionableMixin. They do always # come together with a RevisionForm and therefore need a bit extra handling # (e.g. they want to ignore the clonable stuff). # ''' # # class Meta: # exclude = ( # 'translation_set', # 'language', # ) # # def __init__(self, *args, **kwargs): # self.request = kwargs.pop('request') # self.origin = kwargs.pop('origin', None) # # # Do not process origin here and try to clone it. That will happen in # # RevisionForm for the revisioned model. # # forms.ModelForm.__init__(self, *args, **kwargs) # # # class RevisionForm(BaseBackendForm): # ''' # And a base form for all revisionable models (mostly pages). # Should be used on page backends. # ''' # # class Meta: # exclude = ( # 'revision_parent', # 'revision_author', # 'is_working_copy', # 'working_copy_expires', # ) # # def __init__(self, *args, **kwargs): # self.save_working_copy = kwargs.pop('save_working_copy', None) # self.save_branch = kwargs.pop('save_branch', None) # # super(RevisionForm, self).__init__(*args, **kwargs) # # # Remove the field that points to the base object. # parent_field = self._meta.model._versionable_foreign_key # del self.fields[parent_field] # # def set_language(self, origin): # # Translations are handled in VersionableForm. # pass # # def save(self, *args, **kwargs): # create = self.instance.pk is None # # # If a new component item is created, we need to save to a branch. # # Saving to a working copy wouldn't make sense. # assert not create or self.save_branch # # if self.save_branch: # self.instance.is_working_copy = False # self.instance.working_copy_expires = None # # if self.save_working_copy then we don't need to do anything since # # django's default machinery is what we want. # if self.save_working_copy: # pass # # if create: # base_object = kwargs.pop('base_object') # setattr(self.instance, self.instance._versionable_foreign_key, base_object) # # self.instance.revision_author = self.request.user # # result = super(RevisionForm, self).save(*args, **kwargs) # # # make sure m2m relations are cloned, too # if kwargs.get('commit', True) and hasattr(result, 'clone_m2m'): # result.clone_m2m() # # # We cannot account for the commit argument here, since set_branch # # needs a primary key to be available. And if this object is about to # # be created and commit=False was passed... we don't have a pk here. So # # in this case it will blow up loudly. # assert self.instance.pk is not None # if self.save_branch: # self.instance.set_branch(slug=self.save_branch) # # return result
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from ..wrapped_decorator import signature_safe_contextmanager import multiprocessing import os import six import threading from ..data_feeder import DataFeeder from .control_flow import BlockGuard from .layer_function_generator import templatedoc from .. import core from ..executor import global_scope from ..framework import convert_np_dtype_to_dtype_, default_main_program, \ default_startup_program, program_guard, Program, Variable from ..layer_helper import LayerHelper from ..unique_name import generate as unique_name __all__ = [ 'data', 'open_files', 'read_file', 'shuffle', 'batch', 'double_buffer', 'random_data_generator', 'py_reader', 'create_py_reader_by_data', 'Preprocessor', 'load' ] def data(name, shape, append_batch_size=True, dtype='float32', lod_level=0, type=core.VarDesc.VarType.LOD_TENSOR, stop_gradient=True): """ **Data Layer** This function takes in the input and based on whether data has to be returned back as a minibatch, it creates the global variable by using the helper functions. The global variables can be accessed by all the following operators in the graph. All the input variables of this function are passed in as local variables to the LayerHelper constructor. Args: name(str): The name/alias of the function shape(list): Tuple declaring the shape. If :code:`append_batch_size` is True and there is no -1 inside :code:`shape`, it should be considered as the shape of the each sample. Otherwise, it should be considered as the shape of the batched data. append_batch_size(bool): 1. If true, it prepends -1 to the shape. For example if shape=[1], the resulting shape is [-1, 1]. 2. If shape contains -1, such as shape=[1, -1], append_batch_size will be enforced to be be False (ineffective). dtype(basestring): The type of data : float32, float_16, int etc type(VarType): The output type. By default it is LOD_TENSOR. lod_level(int): The LoD Level. 0 means the input data is not a sequence. stop_gradient(bool): A boolean that mentions whether gradient should flow. Returns: Variable: The global variable that gives access to the data. Examples: .. code-block:: python data = fluid.layers.data(name='x', shape=[784], dtype='float32') """ helper = LayerHelper('data', **locals()) shape = list(shape) for i in six.moves.range(len(shape)): if shape[i] is None: shape[i] = -1 append_batch_size = False elif shape[i] < 0: append_batch_size = False if append_batch_size: shape = [-1] + shape # append batch size as -1 data_var = helper.create_global_variable( name=name, shape=shape, dtype=dtype, type=type, stop_gradient=stop_gradient, lod_level=lod_level, is_data=True) return data_var class BlockGuardServ(BlockGuard): """ BlockGuardServ class. BlockGuardServ class is used to create an op with a block in a program. """ def __init__(self, server): if not (isinstance(server, ListenAndServ)): raise TypeError("BlockGuardServ takes a ListenAndServ") super(BlockGuardServ, self).__init__(server.helper.main_program) self.server = server def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: return False self.server.complete_op() return super(BlockGuardServ, self).__exit__(exc_type, exc_val, exc_tb) class ListenAndServ(object): """ **ListenAndServ Layer** ListenAndServ is used to create a rpc server bind and listen on specific TCP port, this server will run the sub-block when received variables from clients. Args: endpoint(string): IP:port string which the server will listen on. inputs(list): a list of variables that the server will get from clients. fan_in(int): how many client are expected to report to this server, default: 1. optimizer_mode(bool): whether to run the server as a parameter server, default: True. Examples: .. code-block:: python with fluid.program_guard(main): serv = layers.ListenAndServ( "127.0.0.1:6170", ["X"], optimizer_mode=False) with serv.do(): x = layers.data( shape=[32, 32], dtype='float32', name="X", append_batch_size=False) fluid.initializer.Constant(value=1.0)(x, main.global_block()) layers.scale(x=x, scale=10.0, out=out_var) exe = fluid.Executor(place) exe.run(main) """ def __init__(self, endpoint, inputs, fan_in=1, optimizer_mode=True): self.helper = LayerHelper("listen_and_serv") self.inputs = inputs self.outputs = [] self.endpoint = endpoint self.fan_in = fan_in # FIXME(typhoonzero): add optimizer_mode is stupid, should make it more # general. self.optimizer_mode = optimizer_mode def do(self): return BlockGuardServ(self) def get_params_and_grads(self): main_program = self.helper.main_program current_block = main_program.current_block() parent_block = self.parent_block() # params and grads in the same order. params = list() grads = list() for op in current_block.ops: # FIXME(typhoonzero): op.inputs is None if it's cloned. if self.optimizer_mode: if "Grad" in op.inputs and "Param" in op.inputs: params.append(op.inputs["Param"].name) grads.append(op.inputs["Grad"].name) else: # simple recv mode, recv operators inputs. for iname in op.input_names: for in_var_name in op.input(iname): params.append(parent_block.var(in_var_name)) grads.append(parent_block.var(in_var_name)) return params, grads def parent_block(self): prog = self.helper.main_program parent_idx = prog.current_block().parent_idx assert parent_idx >= 0 parent_block = prog.block(parent_idx) return parent_block def complete_op(self): main_program = self.helper.main_program current_block = main_program.current_block() parent_block = self.parent_block() parent_block.append_op( type='listen_and_serv', inputs={"X": self.inputs}, outputs={}, attrs={ 'endpoint': self.endpoint, 'Fanin': self.fan_in, 'optimize_blocks': [ current_block ], # did not support multiple optimize blocks in layers 'sync_mode': True, # did not support async now in layers 'grad_to_block_id': [""] }) def Send(endpoints, send_vars, dummy_output=None, sync=True): """ Send variables to the server side, and get vars from server side when server have finished running server side program. Args: endpoints (str): comma seperated IP:PORT pairs in the order of send_vars to send send_vars (list): variables to send to server sync (bool): whether to wait the request finish """ assert (type(send_vars) == list) if dummy_output is None: dummy_output = [] elif isinstance(dummy_output, Variable): dummy_output = [dummy_output] assert (type(dummy_output) == list) epmap = endpoints.split(",") endpoints = list(set(epmap)) helper = LayerHelper("Send", **locals()) rpc_op_role_name = core.op_proto_and_checker_maker.kOpRoleAttrName() helper.append_op( type="send", inputs={"X": send_vars}, outputs={"Out": dummy_output}, attrs={ "endpoints": endpoints, "epmap": epmap, rpc_op_role_name: core.op_proto_and_checker_maker.OpRole.RPC }) if sync: helper.append_op( type="send_barrier", inputs={"X": dummy_output}, outputs={"Out": []}, attrs={"endpoints": endpoints}) def Recv(endpoints, get_vars, dummy_input=None, sync=True): """ Receive variables from server side Args: endpoints (str): comma seperated IP:PORT pairs in the order of send_vars to send get_vars (list): vars to get from server after send completes. sync (bool): whether to wait the request finish Returns: list: list of received variables """ assert (type(get_vars) == list) if dummy_input is None: dummy_input = [] elif isinstance(dummy_input, Variable): dummy_input = [dummy_input] assert (type(dummy_input) == list) epmap = endpoints.split(",") endpoints = list(set(epmap)) helper = LayerHelper("Recv", **locals()) helper.append_op( type="recv", inputs={"X": dummy_input}, outputs={"Out": get_vars}, attrs={"endpoints": endpoints, "epmap": epmap}) if sync: helper.append_op( type="fetch_barrier", outputs={"Out": get_vars}, attrs={"endpoints": endpoints}) return get_vars def monkey_patch_reader_methods(reader): def __get_reader__(): scope = global_scope() var = scope.find_var(reader.name) return var.get_reader() def reset(): return __get_reader__().reset() reader.reset = reset reader.stop_gradient = True reader.persistable = True return reader def _copy_reader_var_(block, var): new_var = block.create_var(name=var.name, type=core.VarDesc.VarType.READER) new_var.desc.set_shapes(var.desc.shapes()) new_var.desc.set_dtypes(var.desc.dtypes()) new_var.desc.set_lod_levels(var.desc.lod_levels()) new_var.persistable = True return new_var def _copy_reader_create_op_(block, op): input_param_names = op.input_names new_input_map = {} for param_name in input_param_names: new_input_map[param_name] = [] arg_names = op.input(param_name) for arg_name in arg_names: new_input_map[param_name].append(block.var(arg_name)) output_param_names = op.output_names new_output_map = {} for param_name in output_param_names: new_output_map[param_name] = [] arg_names = op.output(param_name) for arg_name in arg_names: new_output_map[param_name].append(block.var(arg_name)) new_op = block.append_op( type=op.type, inputs=new_input_map, outputs=new_output_map, attrs=op.all_attrs()) return new_op @templatedoc(op_type='create_recordio_file_reader') def open_recordio_file(filename, shapes, lod_levels, dtypes, pass_num=1, for_parallel=True): """ ${comment} Args: filename(${filename_type}): ${filename_comment}. shapes(list): List of tuples which declaring data shapes. lod_levels(${lod_levels_type}): ${lod_levels_comment}. dtypes(list): List of strs which declaring data type. pass_num(int): Number of passes to run. for_parallel(Bool): Set it as True if you are going to run subsequent operators in parallel. Returns: ${out_comment}. Examples: >>> import paddle.fluid as fluid >>> reader = fluid.layers.io.open_recordio_file( >>> filename='./data.recordio', >>> shapes=[(3,224,224), (1)], >>> lod_levels=[0, 0], >>> dtypes=['float32', 'int64']) >>> # Via the reader, we can use 'read_file' layer to get data: >>> image, label = fluid.layers.io.read_file(reader) """ dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] shape_concat = [] ranks = [] for shape in shapes: shape_concat.extend(shape) ranks.append(len(shape)) var_name = unique_name('open_recordio_file') startup_blk = default_startup_program().current_block() startup_var = startup_blk.create_var(name=var_name) startup_blk.append_op( type='create_recordio_file_reader', outputs={'Out': [startup_var]}, attrs={ 'shape_concat': shape_concat, 'lod_levels': lod_levels, 'filename': filename, 'ranks': ranks }) startup_var.desc.set_dtypes(dtypes) startup_var.persistable = True main_prog_var = _copy_reader_var_(default_main_program().current_block(), startup_var) if pass_num > 1: main_prog_var = multi_pass(reader=main_prog_var, pass_num=pass_num) return monkey_patch_reader_methods(main_prog_var) def random_data_generator(low, high, shapes, lod_levels, for_parallel=True): """ Create a uniform random data generator This layer returns a Reader Variable. Instead of opening a file and reading data from it, this Reader Variable generates float uniform random data by itself. It can be used as a dummy reader to test a network without opening a real file. Args: low(float): The lower bound of data's uniform distribution. high(float): The upper bound of data's uniform distribution. shapes(list): List of tuples which declaring data shapes. lod_levels(list): List of ints which declaring data lod_level. for_parallel(Bool): Set it as True if you are going to run subsequent operators in parallel. Returns: Variable: A Reader Variable from which we can get random data. Examples: .. code-block:: python reader = fluid.layers.random_data_generator( low=0.0, high=1.0, shapes=[[3,224,224], [1]], lod_levels=[0, 0]) # Via the reader, we can use 'read_file' layer to get data: image, label = fluid.layers.read_file(reader) """ dtypes = [core.VarDesc.VarType.FP32] * len(shapes) shape_concat = [] ranks = [] for shape in shapes: shape_concat.extend(shape) ranks.append(len(shape)) var_name = unique_name('random_data_generator') startup_blk = default_startup_program().current_block() startup_var = startup_blk.create_var(name=var_name) startup_blk.append_op( type='create_random_data_generator', outputs={'Out': [startup_var]}, attrs={ 'low': low, 'high': high, 'shape_concat': shape_concat, 'lod_levels': lod_levels, 'ranks': ranks }) startup_var.desc.set_dtypes(dtypes) startup_var.persistable = True main_prog_var = _copy_reader_var_(default_main_program().current_block(), startup_var) return monkey_patch_reader_methods(main_prog_var) def _py_reader(capacity, shapes, dtypes, lod_levels=None, name=None, use_double_buffer=True, feed_list=None): if feed_list is not None: if not isinstance(feed_list, list): raise TypeError("feed_list should be a list of Variable" " instead of " + str(type(feed_list))) lod_levels = [] dtypes = [] shape_concat = [] ranks = [] shapes = [] for feed_data in feed_list: dtypes.append(feed_data.dtype) shape_concat.extend(feed_data.shape) ranks.append(len(feed_data.shape)) shapes.append(feed_data.shape) lod_levels.append(feed_data.lod_level) else: dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] shape_concat = [] ranks = [] for shape in shapes: shape_concat.extend(shape) ranks.append(len(shape)) if lod_levels is None: lod_levels = [0] * len(shapes) if name is None: queue_name = unique_name('lod_tensor_blocking_queue') reader_name = unique_name('create_py_reader') double_buffer_name = unique_name('double_buffer') else: queue_name = "_".join([name, "queue"]) reader_name = "_".join([name, "reader"]) double_buffer_name = "_".join([name, "double_buffer"]) var = global_scope().var(queue_name) feed_queue = core.init_lod_tensor_blocking_queue(var, capacity) startup_blk = default_startup_program().current_block() startup_var = startup_blk.create_var(name=reader_name) startup_blk.append_op( type='create_py_reader', inputs={'blocking_queue': [queue_name]}, outputs={'Out': [startup_var]}, attrs={ 'shape_concat': shape_concat, 'lod_levels': lod_levels, 'ranks': ranks }) startup_var.desc.set_dtypes(dtypes) startup_var.persistable = True main_prog_var = _copy_reader_var_(default_main_program().current_block(), startup_var) reader = monkey_patch_reader_methods(main_prog_var) if use_double_buffer: double_buffer_reader = double_buffer(reader, name=double_buffer_name) # we return a double buffer reader. However, the reset method comes from # py_reader. double_buffer_reader.reset = reader.reset reader = double_buffer_reader # monkey patch py_reader special methods reader.queue = feed_queue current_reset_method = reader.reset reader.thread = None reader.tensor_provider = None reader.exited = False def start_provide_thread(func): def __provider_thread__(): try: for tensors in func(): array = core.LoDTensorArray() for item in tensors: if not isinstance(item, core.LoDTensor): tmp = core.LoDTensor() tmp.set(item, core.CPUPlace()) item = tmp array.append(item) if reader.exited: break feed_queue.push(array) if reader.exited: break feed_queue.close() except Exception as ex: feed_queue.close() raise ex reader.thread = threading.Thread(target=__provider_thread__) reader.thread.daemon = True reader.thread.start() def __set_tensor_provider__(func): reader.tensor_provider = func def __set_paddle_reader__(paddle_reader): with program_guard(Program(), Program()): actual_feed_list = feed_list if actual_feed_list is None: actual_feed_list = [] counter = 0 for dtype, shape, lod_level in zip(dtypes, shapes, lod_levels): name = str(counter) actual_feed_list.append( data( name=name, dtype=dtype, shape=shape, lod_level=lod_level)) counter += 1 data_names = [feed_data.name for feed_data in actual_feed_list] feeder = DataFeeder( feed_list=actual_feed_list, place=core.CPUPlace()) paddle_reader = feeder.decorate_reader( paddle_reader, multi_devices=False) def __tensor_provider__(): for slots in paddle_reader(): yield [slots[data_name] for data_name in data_names] __set_tensor_provider__(__tensor_provider__) def __reset__(): current_reset_method() if reader.thread is not None and reader.tensor_provider is not None: reader.exited = True reader.thread.join() reader.exited = False def __start__(): start_provide_thread(reader.tensor_provider) reader.reset = __reset__ reader.decorate_tensor_provider = __set_tensor_provider__ reader.decorate_paddle_reader = __set_paddle_reader__ reader.decorate_batch_generator = __set_tensor_provider__ reader.decorate_sample_list_generator = __set_paddle_reader__ reader.start = __start__ return reader def py_reader(capacity, shapes, dtypes, lod_levels=None, name=None, use_double_buffer=True): """ Create a Python reader for data feeding in Python This layer returns a Reader Variable. The Reader provides :code:`decorate_paddle_reader()` and :code:`decorate_tensor_provider()` to set a Python generator as the data source in Python side. When :code:`Executor::Run()` is invoked in C++ side, the data from the generator would be read automatically. Unlike :code:`DataFeeder.feed()`, the data reading process and :code:`Executor::Run()` process can run in parallel using :code:`py_reader`. The :code:`start()` method of the Reader should be called when each pass begins, while the :code:`reset()` method should be called when the pass ends and :code:`fluid.core.EOFException` raises. Note that :code:`Program.clone()` method cannot clone :code:`py_reader`. Args: capacity(int): The buffer capacity maintained by :code:`py_reader`. shapes(list|tuple): List of tuples which declaring data shapes. dtypes(list|tuple): List of strs which declaring data type. lod_levels(list|tuple): List of ints which declaring data lod_level. name(basestring): The prefix Python queue name and Reader name. None will be generated automatically. use_double_buffer(bool): Whether use double buffer or not. Returns: Variable: A Reader from which we can get feeding data. Examples: 1. The basic usage of :code:`py_reader` is as follows: >>> import paddle.fluid as fluid >>> import paddle.dataset.mnist as mnist >>> >>> reader = fluid.layers.py_reader(capacity=64, >>> shapes=[(-1,3,224,224), (-1,1)], >>> dtypes=['float32', 'int64']) >>> reader.decorate_paddle_reader( >>> paddle.reader.shuffle(paddle.batch(mnist.train()) >>> >>> img, label = fluid.layers.read_file(reader) >>> loss = network(img, label) # some network definition >>> >>> fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program()) >>> >>> exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name) >>> for epoch_id in range(10): >>> reader.start() >>> try: >>> while True: >>> exe.run(fetch_list=[loss.name]) >>> except fluid.core.EOFException: >>> reader.reset() >>> >>> ... >>> >>> fluid.io.save_inference_model(dirname='./model', feeded_var_names=[img, label], >>> target_vars=[loss], executor=fluid.Executor(fluid.CUDAPlace(0))) 2. When training and testing are both performed, two different :code:`py_reader` should be created with different names, e.g.: >>> import paddle.fluid as fluid >>> import paddle.dataset.mnist as mnist >>> >>> def network(reader): >>> img, label = fluid.layers.read_file(reader) >>> # Here, we omitted the network definition >>> return loss >>> >>> train_reader = fluid.layers.py_reader(capacity=64, >>> shapes=[(-1,3,224,224), (-1,1)], >>> dtypes=['float32', 'int64'], >>> name='train_reader') >>> train_reader.decorate_paddle_reader( >>> paddle.reader.shuffle(paddle.batch(mnist.train()) >>> >>> test_reader = fluid.layers.py_reader(capacity=32, >>> shapes=[(-1,3,224,224), (-1,1)], >>> dtypes=['float32', 'int64'], >>> name='test_reader') >>> test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512)) >>> >>> # Create train_main_prog and train_startup_prog >>> train_main_prog = fluid.Program() >>> train_startup_prog = fluid.Program() >>> with fluid.program_guard(train_main_prog, train_startup_prog): >>> # Use fluid.unique_name.guard() to share parameters with test program >>> with fluid.unique_name.guard(): >>> train_loss = network(train_reader) # some network definition >>> adam = fluid.optimizer.Adam(learning_rate=0.01) >>> adam.minimize(loss) >>> >>> # Create test_main_prog and test_startup_prog >>> test_main_prog = fluid.Program() >>> test_startup_prog = fluid.Program() >>> with fluid.program_guard(test_main_prog, test_startup_prog): >>> # Use fluid.unique_name.guard() to share parameters with train program >>> with fluid.unique_name.guard(): >>> test_loss = network(test_reader) >>> >>> fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog) >>> fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog) >>> >>> train_exe = fluid.ParallelExecutor(use_cuda=True, >>> loss_name=train_loss.name, main_program=train_main_prog) >>> test_exe = fluid.ParallelExecutor(use_cuda=True, >>> loss_name=test_loss.name, main_program=test_main_prog) >>> for epoch_id in range(10): >>> train_reader.start() >>> try: >>> while True: >>> train_exe.run(fetch_list=[train_loss.name]) >>> except fluid.core.EOFException: >>> train_reader.reset() >>> >>> test_reader.start() >>> try: >>> while True: >>> test_exe.run(fetch_list=[test_loss.name]) >>> except fluid.core.EOFException: >>> test_reader.reset() """ return _py_reader( capacity=capacity, shapes=shapes, dtypes=dtypes, lod_levels=lod_levels, name=name, use_double_buffer=use_double_buffer) def create_py_reader_by_data(capacity, feed_list, name=None, use_double_buffer=True): """ Create a Python reader for data feeding in Python This layer returns a Reader Variable. Works much like py_reader except that it's input is feed_list instead of shapes, dtypes and lod_levels Args: capacity(int): The buffer capacity maintained by :code:`py_reader`. feed_list(list(Variable)): The data feed list. name(basestring): The prefix Python queue name and Reader name. None will be generated automatically. use_double_buffer(bool): Whether use double buffer or not. Returns: Variable: A Reader from which we can get feeding data. Examples: 1. The basic usage of :code:`py_reader` is as follows: >>> import paddle.fluid as fluid >>> import paddle.dataset.mnist as mnist >>> >>> image = fluid.layers.data(name='image', shape=[3,224,224], dtypes='float32') >>> label = fluid.layers.data(name='label', shape=[1], dtypes='int64') >>> reader = fluid.layers.create_py_reader_by_data(capacity=64, feed_list=[image, label]) >>> reader.decorate_paddle_reader( >>> paddle.reader.shuffle(paddle.batch(mnist.train()) >>> >>> img, label = fluid.layers.read_file(reader) >>> loss = network(img, label) # some network definition >>> >>> fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program()) >>> >>> exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name) >>> for epoch_id in range(10): >>> reader.start() >>> try: >>> while True: >>> exe.run(fetch_list=[loss.name]) >>> except fluid.core.EOFException: >>> reader.reset() """ return _py_reader( capacity=capacity, shapes=None, dtypes=None, lod_levels=None, name=name, use_double_buffer=use_double_buffer, feed_list=feed_list) def open_files(filenames, shapes, lod_levels, dtypes, thread_num=None, buffer_size=None, pass_num=1, is_test=None): """ Open files This layer takes a list of files to read from and returns a Reader Variable. Via the Reader Variable, we can get data from given files. All files must have name suffixs to indicate their formats, e.g., '*.recordio'. Args: filenames(list): The list of file names. shapes(list): List of tuples which declaring data shapes. lod_levels(list): List of ints which declaring data lod_level. dtypes(list): List of strs which declaring data type. thread_num(None): The number of thread to read files. Default: min(len(filenames), cpu_number). buffer_size(None): The buffer size of reader. Default: 3 * thread_num pass_num(int): Number of passes to run. is_test(bool|None): Whether `open_files` used for testing or not. If it is used for testing, the order of data generated is same as the file order. Otherwise, it is not guaranteed the order of data is same between every epoch. [Default: False]. Returns: Variable: A Reader Variable via which we can get file data. Examples: .. code-block:: python reader = fluid.layers.io.open_files(filenames=['./data1.recordio', './data2.recordio'], shapes=[(3,224,224), (1)], lod_levels=[0, 0], dtypes=['float32', 'int64']) # Via the reader, we can use 'read_file' layer to get data: image, label = fluid.layers.io.read_file(reader) """ if thread_num is None: thread_num = min(len(filenames), multiprocessing.cpu_count()) else: thread_num = int(thread_num) if buffer_size is None: buffer_size = 3 * thread_num else: buffer_size = int(buffer_size) if isinstance(filenames, six.string_types): filenames = [filenames] dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] shape_concat = [] ranks = [] for shape in shapes: shape_concat.extend(shape) ranks.append(len(shape)) multi_file_reader_name = unique_name('multi_file_reader') startup_blk = default_startup_program().current_block() startup_reader = startup_blk.create_var(name=multi_file_reader_name) attrs = { 'shape_concat': shape_concat, 'lod_levels': lod_levels, 'ranks': ranks, 'file_names': filenames, 'thread_num': thread_num, 'buffer_size': buffer_size } if is_test is not None: attrs['is_test'] = is_test startup_blk.append_op( type='open_files', outputs={'Out': [startup_reader]}, attrs=attrs) startup_reader.desc.set_dtypes(dtypes) startup_reader.persistable = True main_prog_reader = _copy_reader_var_(default_main_program().current_block(), startup_reader) if pass_num > 1: main_prog_reader = multi_pass( reader=main_prog_reader, pass_num=pass_num) return monkey_patch_reader_methods(main_prog_reader) def __create_shared_decorated_reader__(op_type, reader, attrs): var_name = unique_name(op_type) startup_blk = default_startup_program().current_block() startup_var = startup_blk.create_var(name=var_name) startop_op = startup_blk.append_op( type=op_type, inputs={'UnderlyingReader': reader}, outputs={'Out': [startup_var]}, attrs=attrs) startup_var.persistable = True main_prog_block = default_main_program().current_block() main_prog_var = _copy_reader_var_(main_prog_block, startup_var) _copy_reader_create_op_(main_prog_block, startop_op) return monkey_patch_reader_methods(main_prog_var) def __create_unshared_decorated_reader__(op_type, reader, attrs, name=None): new_reader_name = name if name is not None else unique_name(op_type) main_blk = default_main_program().current_block() new_reader = main_blk.create_var(name=new_reader_name) main_blk.append_op( type=op_type, inputs={'UnderlyingReader': reader}, outputs={'Out': [new_reader]}, attrs=attrs) return monkey_patch_reader_methods(new_reader) def shuffle(reader, buffer_size): """ Creates a data reader whose data output is shuffled. Output from the iterator that created by original reader will be buffered into shuffle buffer, and then shuffled. The size of shuffle buffer is determined by argument buf_size. Args: reader(callable): the original reader whose output will be shuffled. buf_size(int): shuffle buffer size. Returns: callable: the new reader whose output is shuffled. """ return __create_unshared_decorated_reader__( 'create_shuffle_reader', reader, {'buffer_size': int(buffer_size)}) def batch(reader, batch_size): """ This layer is a reader decorator. It takes a reader and adds 'batching' decoration on it. When reading with the result decorated reader, output data will be automatically organized to the form of batches. Args: reader(Variable): The reader to be decorated with 'batching'. batch_size(int): The batch size. Returns: Variable: The reader which has been decorated with 'batching'. Examples: .. code-block:: python raw_reader = fluid.layers.io.open_files(filenames=['./data1.recordio', './data2.recordio'], shapes=[(3,224,224), (1)], lod_levels=[0, 0], dtypes=['float32', 'int64'], thread_num=2, buffer_size=2) batch_reader = fluid.layers.batch(reader=raw_reader, batch_size=5) # If we read data with the raw_reader: # data = fluid.layers.read_file(raw_reader) # We can only get data instance by instance. # # However, if we read data with the batch_reader: # data = fluid.layers.read_file(batch_reader) # Each 5 adjacent instances will be automatically combined together # to become a batch. So what we get('data') is a batch data instead # of an instance. """ return __create_unshared_decorated_reader__( 'create_batch_reader', reader, {'batch_size': int(batch_size)}) def double_buffer(reader, place=None, name=None): """ Wrap a double buffer reader. The data will copy to target place with a double buffer queue. If the target place is None, the place that executor perform on will be used. Args: reader(Variable): the reader variable need to be wrapped. place(Place): the place of target data. Default is the sample place of executor perform. name(str): Variable name. None if the user does not care. Returns: wrapped reader with double buffer. Examples: >>> reader = fluid.layers.open_files(filenames=['somefile'], >>> shapes=[[-1, 784], [-1, 1]], >>> dtypes=['float32', 'int64']) >>> reader = fluid.layers.double_buffer(reader) >>> img, label = fluid.layers.read_file(reader) """ attrs = dict() if place is not None: attrs['place'] = str(place).upper() return __create_unshared_decorated_reader__( 'create_double_buffer_reader', reader, attrs, name=name) def multi_pass(reader, pass_num): return __create_shared_decorated_reader__( 'create_multi_pass_reader', reader, {'pass_num': int(pass_num)}) def read_file(reader): """ Execute the given reader and get data via it. A reader is also a Variable. It can be a raw reader generated by `fluid.layers.open_files()` or a decorated one generated by `fluid.layers.double_buffer()` and so on. Args: reader(Variable): The reader to execute. Returns: Tuple[Variable]: Data read via the given reader. Examples: .. code-block:: python data_file = fluid.layers.open_files( filenames=['mnist.recordio'], shapes=[(-1, 748), (-1, 1)], lod_levels=[0, 0], dtypes=["float32", "int64"]) data_file = fluid.layers.double_buffer( fluid.layers.batch(data_file, batch_size=64)) input, label = fluid.layers.read_file(data_file) """ helper = LayerHelper('read_file') out = [ helper.create_variable_for_type_inference( stop_gradient=True, dtype='float32') for _ in range(len(reader.desc.shapes())) ] helper.append_op( type='read', inputs={'Reader': [reader]}, outputs={'Out': out}) if len(out) == 1: return out[0] else: return out class Preprocessor(object): """ A block for data pre-processing in reader. Args: reader (Variable): A reader variable. name (str, default None): The name of the reader. Examples: .. code-block:: python preprocessor = fluid.layers.io.Preprocessor(reader=reader) with preprocessor.block(): img, lbl = preprocessor.inputs() img_out = img / 2 lbl_out = lbl + 1 preprocessor.outputs(img_out, lbl_out) data_file = fluid.layers.io.double_buffer(preprocessor()) """ BEFORE_SUB_BLOCK = 0 IN_SUB_BLOCK = 1 AFTER_SUB_BLOCK = 2 def __init__(self, reader, name=None): self.underlying_reader = reader new_reader_name = name if name is not None else unique_name( "create_custom_reader") self.main_prog = default_main_program() self.reader = self.main_prog.current_block().create_var( name=new_reader_name) self.sub_block = None self.source_var_names = None self.sink_var_names = None self.status = Preprocessor.BEFORE_SUB_BLOCK def _is_completed(self): return self.sub_block and self.source_var_names and self.sink_var_names @signature_safe_contextmanager def block(self): self.status = Preprocessor.IN_SUB_BLOCK self.sub_block = self.main_prog._create_block() yield self.main_prog._rollback() self.status = Preprocessor.AFTER_SUB_BLOCK if not self._is_completed(): raise RuntimeError( "The definition of preprocessor is incompleted! " "Please make sure that you have set input and output " "variables by invoking 'inputs' and 'outputs' in " "Preprocessor's sub-block.") def inputs(self): if self.status != Preprocessor.IN_SUB_BLOCK: raise RuntimeError( "Preprocessor.inputs() can only be invoked inside the sub-block." ) source_shapes = self.underlying_reader.desc.shapes() source_dtypes = self.underlying_reader.desc.dtypes() source_lod_levels = self.underlying_reader.desc.lod_levels() self.source_var_names = [ unique_name("preprocessor_source") for _ in six.moves.range(len(source_shapes)) ] source_vars = [] for var_name, shape, dtype, lod_level in zip( self.source_var_names, source_shapes, source_dtypes, source_lod_levels): source_vars.append(self.main_prog.current_block().create_var( name=var_name, shape=shape, dtype=dtype, lod_level=lod_level)) return source_vars def outputs(self, *outs): if self.status != Preprocessor.IN_SUB_BLOCK: raise RuntimeError( "Preprocessor.outputs() can only be invoked inside the sub-block." ) self.sink_var_names = [var.name for var in outs] def __call__(self, *args, **kwargs): if self.status != Preprocessor.AFTER_SUB_BLOCK: raise RuntimeError( "Preprocessor output can only be retrieved after rnn block.") self.main_prog.current_block().append_op( type="create_custom_reader", inputs={'UnderlyingReader': self.underlying_reader}, outputs={'Out': [self.reader]}, attrs={ "sub_block": self.sub_block, "source_var_names": self.source_var_names, "sink_var_names": self.sink_var_names }) return monkey_patch_reader_methods(self.reader) @templatedoc() def load(out, file_path, load_as_fp16=None): """ ${comment} >>> import paddle.fluid as fluid >>> tmp_tensor = fluid.layers.create_tensor(dtype='float32') >>> fluid.layers.load(tmp_tensor, "./tmp_tensor.bin") Args: out(${out_type}): ${out_comment}. file_path(${file_path_type}): ${file_path_comment}. load_as_fp16(${load_as_fp16_type}): ${load_as_fp16_comment}. Returns: None """ helper = LayerHelper("load", **locals()) attrs = {"file_path": file_path} if load_as_fp16 is not None: attrs['load_as_fp16'] = load_as_fp16 helper.append_op(type="load", inputs={}, output={"Out": out}, args=attrs)
''' monte carlo tool for simple strategy, more generally for use with any strategy that I want to back test... Engine takes a class instance, derived from a base class wih two methods initialise() onsimulation() aftersimulation() ontrial() finalise() ''' import unittest, datetime import numpy as np from BackTest import MonteCarloModel, MonteCarloEngine, Simulation import matplotlib.pyplot as plt from random import randint from scipy.optimize import fmin_powell from hashlib import md5 from time import localtime def Root2(r,verbose=True): ''' simple wrapper routine for solver. returns the energy level for a given R=r. the solver can use this method to minimise the energy by varying r as necessary. ''' if r <= 0: return 1e99 else: e = MonteCarloEngine(moduleName='MonteCarloHeadsTailsExample', className='SimpleHeadTailModel') losers = e.start(args={'wager_multiplier':r}) if verbose: print 'solving for r: ', r return losers class SimpleHeadTailModel(MonteCarloModel): ''' model wager previous bet success or not, impact to profit. ''' def toss(self): ''' 1 - win -1 - loss Each toss determines whether the position is successful or not. This way no need to keep track of a decision and an associated variable. Simply do I win or not. ''' coin_toss = randint(1,2) if coin_toss == 1: return 1 else: return -1 def initialise(self, context): self.name = 'My Simple Heads And Tails Model' # start with 10 USD bet self.wager= 100 self.wager_initial= 100 self.starting_pot = 1000 self.previous_value = 1 # default to 1 on first round self.simulations = 100 # MC simulation trials self.trials = 100 # subintervals self.r = np.zeros(shape=(self.simulations, self.trials), dtype=float) # matrix to hold all results self.pnl = np.zeros(shape=(self.simulations, self.trials), dtype=float) # matrix to hold all results print "simulations %d, trials %d starting pot %d " % (self.simulations, self.trials, self.starting_pot) # Tell the engine where to associate the data to security. context[self.name] = Simulation(self.simulations, self.trials, self.toss) self.fig = plt.figure() self.ax = self.fig.add_subplot(211) self.ax1 = self.fig.add_subplot(212) self.ax.autoscale_view(True,True,True) def onsimulation(self, model, simulation, engine): self.r[simulation,0] = 0 # assume starting pot here self.pnl[simulation,0] = self.starting_pot def aftersimulation(self, model, simulation, engine): self.ax.plot(np.arange(0, self.trials, 1), self.r[simulation]) self.ax1.plot(np.arange(0, self.trials, 1), self.pnl[simulation]) def reset_wager(self): self.wager = self.wager_initial def ontrial(self, model, simulation, trial, value, engine, args): ''' want to test some strategies for betting set wager for each bet if previous bet value : float sample from model ''' # if we lost last time then double up if self.previous_value == -1: if args.has_key('args'): self.wager = (self.wager*float(args['args']['wager_multiplier'][0])) else: self.wager = (self.wager*0.1) # keep track of coin toss paths self.r[simulation,trial] = self.r[simulation,trial-1] + value if args.has_key('args'): self.r0 = float(args['args']['wager_multiplier'][0]) else: self.r0 =0.1 # if we won, add the wager # else subtract the wager if self.pnl[simulation,trial-1] > 0: if value == 1 : self.pnl[simulation,trial] = self.pnl[simulation,trial-1] + self.wager else: self.pnl[simulation,trial] = self.pnl[simulation,trial-1] - self.wager else: # no bet to be made here self.pnl[simulation,trial] = self.pnl[simulation,trial-1] # always reset wager self.reset_wager() # keep track of the previous value for next time around self.previous_value = value def add_prefix(self, filename): from hashlib import md5 from time import localtime return "%s_%s"%(md5(str(localtime())).hexdigest(), filename) def finalise(self, model, engine): ''' returns the value that we are trying to minimise, here the number of losers. ''' # what is our survivability number_of_losers = len([f for f in self.pnl if f[len(f)-1]<=0]) number_of_survivors = len([f for f in self.pnl if f[len(f)-1]>0]) number_of_participants = len(self.pnl) print "participants [%d] survivors [%2.1f%%] losers [%2.1f%%] weight [%2.6f] "% (number_of_participants, float(number_of_survivors)/float(number_of_participants)*100, float(number_of_losers)/float(number_of_participants)*100, self.r0) plt.title('Simulations %d Steps %d' % (int(self.simulations), int(self.trials))) plt.xlabel('steps') plt.ylabel('profit and loss') plt.savefig("%s_%s"%(md5(str(localtime())).hexdigest(), 'model')) return float(number_of_losers)/float(number_of_participants)*100 class TestNode(unittest.TestCase): def setUp(self): pass def test_engine(self): ''' example of how to launch the MontoCarloTestEngine this is modelled on the quantopian style interface. ''' e = MonteCarloEngine(moduleName='MonteCarloHeadsTailsExample', className='SimpleHeadTailModel') e.start() def test_minimise(self): print '#################################' print '# Test Equilibrium Loss Wager' print '#################################' wager_multiplier=fmin_powell(Root2, x0=1., maxiter=20) print "highest survivability following loss, multiply wager by %2.4f %% "%(wager_multiplier*100) if __name__ == '__main__': unittest.main() ================================================================== == BackTest module ================================================================== ''' back testing tool for prediction strategy, more generally for use with any strategy that I want to back test... Engine takes a class instance, derived from a base class wih twomethods initialise() ondata() ''' import unittest, time, datetime from pandas import DataFrame import numpy as np from pylab import show import random from abc import ABCMeta, abstractmethod import pandas as pd # # Simple python component wrapper # class Component(object): __metaclass__ = ABCMeta @abstractmethod def start(self): raise NotImplementedError("Should implement intialise()!") class BackTestModel(object): __metaclass__ = ABCMeta @abstractmethod def initialise(self, context): raise NotImplementedError("Should implement intialise()!") @abstractmethod def ondata(self, sid, data): raise NotImplementedError("Should implement ondata()!") class MonteCarloModel(object): __metaclass__ = ABCMeta @abstractmethod def initialise(self, context): raise NotImplementedError("Should implement intialise()!") @abstractmethod def ontrial(self, model, simulation, trial, value, engine, args): raise NotImplementedError("Should implement ontrial()!") def onsimulation(self, model, simulation, engine): raise NotImplementedError("Should implement onsimulation()!") def aftersimulation(self, model, simulation, engine): raise NotImplementedError("Should implement aftersimulation()!") def finalise(self, model, engine): raise NotImplementedError("Should implement finalise()!") class Simulation(object): ''' simple wrapper that describes a simulation ''' def __init__(self, n, m, func): ''' n integer number of simulations m integer number of trials per simulation func class method or function used to sample the value ''' self.number_of_simulations = n self.number_of_trials = m self.func = func @property def sample(self): return self.func class MonteCarloEngineException(Exception): pass class MonteCarloEngine(Component): ''' twist on the Engine that will take a different type of context, this time a Simulation class instance. This will be called class ExampleModel(MonteCarloModel): def initialise(self, context): context['My Simple Model'] = Simulation(10, 100, standard_normal()) print 'setting My Simple Model' def ondata(self, model, simulation, trial, value, engine): print simulation, trial, value, engine ''' def __init__(self, moduleName, className): self.context = {} self.obj = self.__generate__(moduleName, className) #print isinstance(self.obj, MonteCarloModel), hasattr(self.obj, 'initialise') if isinstance(self.obj, MonteCarloModel): if hasattr(self.obj, 'initialise'): self.obj.initialise(self.context) print 'calling initialise' else: print 'no initialise' # # TODO: load data from somewhere for securities in context # print self.context def __generate__(self, module_name, class_name): module = __import__(module_name) class_ = getattr(module, class_name) instance = class_() #print instance return instance def start(self, **args): #print 'starting...', self.obj, self.context, args if self.obj is None: raise MonteCarloEngineException('No engine exists') for name, model in self.context.items(): for simulation in np.arange(0, model.number_of_simulations): # number of MC simulations # call to signal new simulation if hasattr(self.obj, 'onsimulation'): self.obj.onsimulation(model, simulation, self) for trial in np.arange(1,model.number_of_trials): #trials per simulation value = model.sample() if hasattr(self.obj, 'ontrial'): self.obj.ontrial(model, simulation, trial, value, self, args) # call to signal after simulation if hasattr(self.obj, 'aftersimulation'): self.obj.aftersimulation(model, simulation, self) #self.post_ondata(k, index2, value) if hasattr(self.obj, 'finalise'): return self.obj.finalise(model, self) ''' class ExampleModel(BackTestModel): def initialise(self, context): context['ARM.L'] = 'Book1.csv' print 'setting ARM.L' def ondata(self, sid, data): print sid, data ''' class Engine(Component): ''' responsible for handling instances of the back test models ''' def __init__(self, moduleName, className): self.context = {} self.data = {} self.orders = {} self.positions = {} self.pnl = {} self.risk = {} self.obj = self.__generate__(moduleName, className) #print isinstance(self.obj, BackTestModel), hasattr(self.obj, 'initialise') if isinstance(self.obj, BackTestModel): if hasattr(self.obj, 'initialise'): self.obj.initialise(self.context) #print 'calling initialise' else: print 'no initialise' # # TODO: load data from somewhere for securities in context # #print self.context for k in self.context.keys(): #print k t = time.clock() myfilename = self.context[k] data = DataFrame.from_csv(myfilename,header=0,index_col=0,parse_dates=True) print 'load data', time.clock()-t self.data[k] = data self.positions[k] = 0 self.pnl[k] = 0 self.risk[k] = 0 def order(self, sid, value): # queue order to be processed self.orders[sid] = (value, False) @property def position(self): return self.positions def __generate__(self, module_name, class_name): module = __import__(module_name) class_ = getattr(module, class_name) instance = class_() print instance return instance def post_ondata(self, sid, index, value): # process orders for k in self.context.keys(): if hasattr(self, 'orders'): if len(self.orders) == 0: print 'no orders' break m_size, m_processed = self.orders[k] # check we have not processed this order already. if not m_processed: if not (self.positions[k]+ m_size <= 0): self.positions[k] += m_size print 'ordering %d of %s, total %d' % (m_size, k, self.positions[k]) self.orders[k] = None else: print 'no short selling' # handle pnl and risk # tick() charts def start(self): print 'starting...', self.obj, self.context if not self.obj is None: for k in self.context: for i, (index, value) in enumerate(self.data[k]['value'].iteritems()): index2 = datetime.datetime(pd.to_datetime(index).year , pd.to_datetime(index).month , pd.to_datetime(index).day) self.obj.ondata(k , index2 , value , self.data[k]['value'][0:i] , self) self.post_ondata(k, index2, value)
import json import requests from bs4 import BeautifulSoup from espncricinfo.exceptions import MatchNotFoundError, NoScorecardError class Match(object): def __init__(self, match_id): self.match_id = match_id self.match_url = "https://www.espncricinfo.com/matches/engine/match/{0}.html".format(str(match_id)) self.json_url = "https://www.espncricinfo.com/matches/engine/match/{0}.json".format(str(match_id)) self.json = self.get_json() self.html = self.get_html() self.comms_json = self.get_comms_json() if self.json: self.__unicode__ = self._description() self.status = self._status() self.match_class = self._match_class() self.season = self._season() self.description = self._description() self.legacy_scorecard_url = self._legacy_scorecard_url() self.series = self._series() self.series_name = self._series_name() self.series_id = self._series_id() self.event_url = "http://core.espnuk.org/v2/sports/cricket/leagues/{0}/events/{1}".format(str(self.series_id), str(match_id)) self.details_url = self._details_url() self.officials = self._officials() self.current_summary = self._current_summary() self.present_datetime_local = self._present_datetime_local() self.present_datetime_gmt = self._present_datetime_gmt() self.start_datetime_local = self._start_datetime_local() self.start_datetime_gmt = self._start_datetime_gmt() self.cancelled_match = self._cancelled_match() self.rain_rule = self._rain_rule() self.date = self._date() self.continent = self._continent() self.town_area = self._town_area() self.town_name = self._town_name() self.town_id = self._town_id() self.weather_location_code = self._weather_location_code() self.match_title = self._match_title() self.result = self._result() self.ground_id = self._ground_id() self.ground_name = self._ground_name() self.lighting = self._lighting() self.followon = self._followon() self.scheduled_overs = self._scheduled_overs() self.innings_list = self._innings_list() self.innings = self._innings() self.latest_batting = self._latest_batting() self.latest_bowling = self._latest_bowling() self.latest_innings = self._latest_innings() self.latest_innings_fow = self._latest_innings_fow() self.team_1 = self._team_1() self.team_1_id = self._team_1_id() self.team_1_abbreviation = self._team_1_abbreviation() self.team_1_players = self._team_1_players() self.team_1_innings = self._team_1_innings() self.team_1_run_rate = self._team_1_run_rate() self.team_1_overs_batted = self._team_1_overs_batted() self.team_1_batting_result = self._team_1_batting_result() self.team_2 = self._team_2() self.team_2_id = self._team_2_id() self.team_2_abbreviation = self._team_2_abbreviation() self.team_2_players = self._team_2_players() self.team_2_innings = self._team_2_innings() self.team_2_run_rate = self._team_2_run_rate() self.team_2_overs_batted = self._team_2_overs_batted() self.team_2_batting_result = self._team_2_batting_result() if not self.status == 'dormant': self.home_team = self._home_team() self.batting_first = self._batting_first() self.match_winner = self._match_winner() self.toss_winner = self._toss_winner() self.toss_decision = self._toss_decision() self.toss_decision_name = self._toss_decision_name() self.toss_choice_team_id = self._toss_choice_team_id() self.toss_winner_team_id = self._toss_winner_team_id() self.espn_api_url = self._espn_api_url() # from comms_json self.rosters = self._rosters() self.all_innings = self._all_innings() def __str__(self): return self.description def __repr__(self): return (f'{self.__class__.__name__}('f'{self.match_id!r})') def get_json(self): r = requests.get(self.json_url) if r.status_code == 404: raise MatchNotFoundError elif 'Scorecard not yet available' in r.text: raise NoScorecardError else: return r.json() def get_html(self): r = requests.get(self.match_url) if r.status_code == 404: raise MatchNotFoundError else: return BeautifulSoup(r.text, 'html.parser') def match_json(self): return self.json['match'] def innings_comms_url(self, innings=1, page=1): return f"https://hsapi.espncricinfo.com/v1/pages/match/comments?lang=en&leagueId={self.series_id}&eventId={self.match_id}&period={innings}&page={page}&filter=full&liveTest=false" def get_comms_json(self): try: text = self.html.find_all('script')[15].string return json.loads(text) except: return None def _espn_api_url(self): return "https://site.api.espn.com/apis/site/v2/sports/cricket/{0}/summary?event={1}".format(self.series_id, self.match_id) def _legacy_scorecard_url(self): return "https://static.espncricinfo.com"+self.match_json()['legacy_url'] def _details_url(self, page=1, number=1000): return self.event_url+"/competitions/{0}/details?page_size={1}&page={2}".format(str(self.match_id), str(number), str(page)) def __str__(self): return self.json['description'] def __unicode__(self): return self.json['description'] def _status(self): return self.match_json()['match_status'] def _match_class(self): if self.match_json()['international_class_card'] != "": return self.match_json()['international_class_card'] else: return self.match_json()['general_class_card'] def _season(self): return self.match_json()['season'] def _description(self): return self.json['description'] def _series(self): return self.json['series'] def _series_name(self): try: return self.json['series'][-1]['series_name'] except: return None def _series_id(self): return self.json['series'][-1]['core_recreation_id'] def _officials(self): return self.json['official'] # live matches only def _current_summary(self): return self.match_json().get('current_summary') def _present_datetime_local(self): return self.match_json()['present_datetime_local'] def _present_datetime_gmt(self): return self.match_json()['present_datetime_gmt'] def _start_datetime_local(self): return self.match_json()['start_datetime_local'] def _start_datetime_gmt(self): return self.match_json()['start_datetime_gmt'] def _cancelled_match(self): if self.match_json()['cancelled_match'] == 'N': return False else: return True def _rain_rule(self): if self.match_json().get('rain_rule') == "1": return self.match_json()['rain_rule_name'] else: return None def _date(self): return self.match_json()['start_date_raw'] def _continent(self): return self.match_json().get('continent_name') def _town_area(self): return self.match_json().get('town_area') def _town_name(self): return self.match_json().get('town_name') def _town_id(self): return self.match_json().get('town_id') def _weather_location_code(self): return self.match_json().get('weather_location_code') def _match_title(self): return self.match_json()['cms_match_title'] def _result(self): return self.json['live']['status'] def _ground_id(self): return self.match_json()['ground_id'] def _ground_name(self): return self.match_json()['ground_name'] def _lighting(self): return self.match_json()['floodlit_name'] def _followon(self): if self.match_json().get('followon') == '1': return True else: return False def _scheduled_overs(self): try: return int(self.match_json()['scheduled_overs']) except: return None def _innings_list(self): try: return self.json['centre']['common']['innings_list'] except: return None def _innings(self): return self.json['innings'] def _latest_batting(self): try: return self.json['centre']['common']['batting'] except: return None def _latest_bowling(self): try: return self.json['centre']['common']['bowling'] except: return None def _latest_innings(self): try: return self.json['centre']['common']['innings'] except: return None def _latest_innings_fow(self): return self.json['centre'].get('fow') def _team_1(self): return self.json['team'][0] def _team_1_id(self): return self._team_1()['team_id'] def _team_1_abbreviation(self): return self._team_1()['team_abbreviation'] def _team_1_players(self): return self._team_1().get('player', []) def _team_1_innings(self): try: return [inn for inn in self.json['innings'] if inn['batting_team_id'] == self._team_1_id()][0] except: return None def _team_1_run_rate(self): try: return float(self._team_1_innings()['run_rate']) except: return None def _team_1_overs_batted(self): try: return float(self._team_1_innings()['overs']) except: return None def _team_1_batting_result(self): try: return self._team_1_innings()['event_name'] except: return None def _team_2(self): return self.json['team'][1] def _team_2_id(self): return self._team_2()['team_id'] def _team_2_abbreviation(self): return self._team_2()['team_abbreviation'] def _team_2_players(self): return self._team_2().get('player', []) def _team_2_innings(self): try: return [inn for inn in self.json['innings'] if inn['batting_team_id'] == self._team_2_id()][0] except: return None def _team_2_run_rate(self): try: return float(self._team_2_innings()['run_rate']) except: return None def _team_2_overs_batted(self): try: return float(self._team_2_innings()['overs']) except: return None def _team_2_batting_result(self): try: return self._team_2_innings()['event_name'] except: return None def _home_team(self): if self._team_1_id() == self.match_json()['home_team_id']: return self._team_1_abbreviation() else: return self._team_2_abbreviation() def _batting_first(self): if self._team_1_id() == self.match_json()['batting_first_team_id']: return self._team_1_abbreviation() else: return self._team_2_abbreviation() def _match_winner(self): if self._team_1_id() == self.match_json()['winner_team_id']: return self._team_1_abbreviation() else: return self._team_2_abbreviation() def _toss_winner(self): if self._team_1_id() == self.match_json()['toss_winner_team_id']: return self._team_1_id() else: return self._team_2_id() def _toss_decision(self): if self.match_json()['toss_decision'] == '' and len(self.innings) > 0: if self.innings[0]['batting_team_id'] == self.toss_winner: decision = '1' else: decision = '2' else: decision = self.match_json()['toss_decision'] return decision def _toss_decision_name(self): if self.match_json()['toss_decision_name'] == '' and len(self.innings) > 0: if self.innings[0]['batting_team_id'] == self.toss_winner: decision_name = 'bat' else: decision_name = 'bowl' else: decision_name = self.match_json()['toss_decision_name'] return decision_name def _toss_choice_team_id(self): return self.match_json()['toss_choice_team_id'] def _toss_winner_team_id(self): return self.match_json()['toss_winner_team_id'] # comms_json methods def _rosters(self): try: return self.comms_json['props']['pageProps']['data']['pageData']['content']['matchPlayers'] except: return None def _all_innings(self): try: return self.comms_json['props']['pageProps']['data']['pageData']['content']['scorecard']['innings'] except: return self.json['innings'] def batsmen(self, innings): try: return self.comms_json['props']['pageProps']['data']['pageData']['content']['scorecard']['innings'][str(innings)]['inningBatsmen'] except: return None def bowlers(self, innings): try: return self.comms_json['props']['pageProps']['data']['pageData']['content']['scorecard']['innings'][str(innings)]['inningBowlers'] except: return None def extras(self, innings): try: return self.comms_json['props']['pageProps']['data']['pageData']['content']['scorecard']['innings'][str(innings)]['extras'] except: return None def fows(self, innings): try: return self.comms_json['props']['pageProps']['data']['pageData']['content']['scorecard']['innings'][str(innings)]['inningFallOfWickets'] except: return None @staticmethod def get_recent_matches(date=None): if date: url = "https://www.espncricinfo.com/ci/engine/match/index.html?date=%sview=week" % date else: url = "https://www.espncricinfo.com/ci/engine/match/index.html?view=week" r = requests.get(url) soup = BeautifulSoup(r.text, 'html.parser') return [x['href'].split('/',4)[4].split('.')[0] for x in soup.findAll('a', href=True, text='Scorecard')]
"""Snuggs are s-expressions for Numpy.""" from collections import OrderedDict import functools import itertools import operator import re import sys from pyparsing import ( alphanums, ZeroOrMore, nums, oneOf, Word, Literal, Combine, QuotedString, ParseException, Forward, Group, CaselessLiteral, Optional, alphas, OneOrMore, ParseResults, pyparsing_common) import numpy __all__ = ['eval'] __version__ = "1.4.7" # Python 2-3 compatibility string_types = (str,) if sys.version_info[0] >= 3 else (basestring,) # flake8: noqa class Context(object): def __init__(self): self._data = OrderedDict() def add(self, name, val): self._data[name] = val def get(self, name): return self._data[name] def lookup(self, index, subindex=None): s = list(self._data.values())[int(index) - 1] if subindex: return s[int(subindex) - 1] else: return s def clear(self): self._data = OrderedDict() _ctx = Context() class ctx(object): def __init__(self, kwd_dict=None, **kwds): self.kwds = kwd_dict or kwds def __enter__(self): _ctx.clear() for k, v in self.kwds.items(): _ctx.add(k, v) return self def __exit__(self, exc_type=None, exc_val=None, exc_tb=None): self.kwds = None _ctx.clear() class ExpressionError(SyntaxError): """A Snuggs-specific syntax error.""" filename = "<string>" lineno = 1 op_map = { '*': lambda *args: functools.reduce(lambda x, y: operator.mul(x, y), args), '+': lambda *args: functools.reduce(lambda x, y: operator.add(x, y), args), '/': lambda *args: functools.reduce(lambda x, y: operator.truediv(x, y), args), '-': lambda *args: functools.reduce(lambda x, y: operator.sub(x, y), args), '&': lambda *args: functools.reduce(lambda x, y: operator.and_(x, y), args), '|': lambda *args: functools.reduce(lambda x, y: operator.or_(x, y), args), '<': operator.lt, '<=': operator.le, '==': operator.eq, '!=': operator.ne, '>=': operator.ge, '>': operator.gt} def asarray(*args): if len(args) == 1 and hasattr(args[0], '__iter__'): return numpy.asanyarray(list(args[0])) else: return numpy.asanyarray(list(args)) func_map = { 'asarray': asarray, 'read': _ctx.lookup, 'take': lambda a, idx: numpy.take(a, idx - 1, axis=0)} higher_func_map = { 'map': map if sys.version_info[0] >= 3 else itertools.imap, 'partial': functools.partial} # Definition of the grammar. decimal = Literal('.') e = CaselessLiteral('E') sign = Literal('+') | Literal('-') number = Word(nums) name = pyparsing_common.identifier nil = Literal('nil').setParseAction(lambda s, l, t: [None]) def resolve_var(s, l, t): try: return _ctx.get(t[0]) except KeyError: err = ExpressionError( "name '%s' is not defined" % t[0]) err.text = s err.offset = l + 1 raise err var = name.setParseAction(resolve_var) string = QuotedString("'") | QuotedString('"') lparen = Literal('(').suppress() rparen = Literal(')').suppress() op = oneOf(' '.join(op_map.keys())).setParseAction( lambda s, l, t: op_map[t[0]]) def resolve_func(s, l, t): try: return func_map[t[0]] if t[0] in func_map else getattr(numpy, t[0]) except AttributeError: err = ExpressionError( "'%s' is not a function or operator" % t[0]) err.text = s err.offset = l + 1 raise err func = Word(alphanums + '_').setParseAction(resolve_func) higher_func = oneOf('map partial').setParseAction( lambda s, l, t: higher_func_map[t[0]]) func_expr = Forward() higher_func_expr = Forward() expr = higher_func_expr | func_expr operand = higher_func_expr | func_expr | nil | var | pyparsing_common.number | string func_expr << Group( lparen + (higher_func_expr | op | func) + operand + ZeroOrMore(operand) + rparen) higher_func_expr << Group( lparen + higher_func + (nil | higher_func_expr | op | func) + ZeroOrMore(operand) + rparen) def processArg(arg): if not isinstance(arg, ParseResults): return arg else: return processList(arg) def processList(lst): args = [processArg(x) for x in lst[1:]] func = processArg(lst[0]) return func(*args) def handleLine(line): try: result = expr.parseString(line) return processList(result[0]) except ParseException as exc: text = str(exc) m = re.search(r'(Expected .+) \(at char (\d+)\), \(line:(\d+)', text) msg = m.group(1) if 'map|partial' in msg: msg = "expected a function or operator" err = ExpressionError(msg) err.text = line err.offset = int(m.group(2)) + 1 raise err def eval(source, kwd_dict=None, **kwds): """Evaluate a snuggs expression. Parameters ---------- source : str Expression source. kwd_dict : dict A dict of items that form the evaluation context. Deprecated. kwds : dict A dict of items that form the valuation context. Returns ------- object """ kwd_dict = kwd_dict or kwds with ctx(kwd_dict): return handleLine(source)
# -*- coding: utf-8 -*- """ Created on Wed Mar 15 22:52:57 2017 @author: mariapanteli """ import os import numpy as np import pandas as pd import pickle from sklearn.model_selection import train_test_split import extract_primary_features import load_features import util_filter_dataset WIN_SIZE = 8 DATA_DIR = 'data' METADATA_FILE = os.path.join(DATA_DIR, 'metadata.csv') OUTPUT_FILES = [os.path.join(DATA_DIR, 'train_data_'+str(WIN_SIZE)+'.pickle'), os.path.join(DATA_DIR, 'val_data_'+str(WIN_SIZE)+'.pickle'), os.path.join(DATA_DIR, 'test_data_'+str(WIN_SIZE)+'.pickle')] def get_train_val_test_idx(X, Y, seed=None): """ Split in train, validation, test sets. Parameters ---------- X : np.array Data or indices. Y : np.array Class labels for data in X. seed: int Random seed. Returns ------- (X_train, Y_train) : tuple Data X and labels y for the train set (X_val, Y_val) : tuple Data X and labels y for the validation set (X_test, Y_test) : tuple Data X and labels y for the test set """ X_train, X_val_test, Y_train, Y_val_test = train_test_split(X, Y, train_size=0.6, random_state=seed, stratify=Y) X_val, X_test, Y_val, Y_test = train_test_split(X_val_test, Y_val_test, train_size=0.5, random_state=seed, stratify=Y_val_test) return (X_train, Y_train), (X_val, Y_val), (X_test, Y_test) def subset_labels(Y, N_min=10, N_max=100, seed=None): """ Subset dataset to contain minimum N_min and maximum N_max instances per class. Return indices for this subset. Parameters ---------- Y : np.array Class labels N_min : int Minimum instances per class N_max : int Maximum instances per class seed: int Random seed. Returns ------- subset_idx : np.array Indices for a subset with classes of size bounded by N_min, N_max """ np.random.seed(seed=seed) subset_idx = [] labels = np.unique(Y) for label in labels: label_idx = np.where(Y==label)[0] counts = len(label_idx) if counts>=N_max: subset_idx.append(np.random.choice(label_idx, N_max, replace=False)) elif counts>=N_min and counts<N_max: subset_idx.append(label_idx) else: # not enough samples for this class, skip print("Found only %s samples from class %s (minimum %s)" % (counts, label, N_min)) continue if len(subset_idx)>0: subset_idx = np.concatenate(subset_idx, axis=0) return subset_idx else: raise ValueError('No classes found with minimum %s samples' % N_min) def check_extract_primary_features(df): """ Check if csv files for melspectrograms, chromagrams, melodia, speech/music segmentation exist, if the csv files don't exist, extract them. Parameters ---------- df : pd.DataFrame Metadata including class label and path to audio, melspec, chroma """ extract_melspec, extract_chroma, extract_melodia, extract_speech = False, False, False, False # TODO: Check for each file in df, instead of just the first one if os.path.exists(df['Audio'].iloc[0]): if not os.path.exists(df['Melspec'].iloc[0]): extract_melspec = True if not os.path.exists(df['Chroma'].iloc[0]): extract_chroma = True if not os.path.exists(df['Melodia'].iloc[0]): extract_melodia = True if not os.path.exists(df['Speech'].iloc[0]): extract_speech = True else: print("Audio file %s does not exist. Primary features will not be extracted." % df['Audio'].iloc[0]) extract_primary_features.extract_features_for_file_list(df, melspec=extract_melspec, chroma=extract_chroma, melodia=extract_melodia, speech=extract_speech) def extract_features(df, win2sec=8.0): """ Extract features from melspec and chroma. Parameters ---------- df : pd.DataFrame Metadata including class label and path to audio, melspec, chroma win2sec : float The window size for the second frame decomposition of the features Returns ------- X : np.array The features for every frame x every audio file in the dataset Y : np.array The class labels for every frame in the dataset Y_audio : np.array The audio labels """ feat_loader = load_features.FeatureLoader(win2sec=win2sec) frames_rhy, frames_mfcc, frames_chroma, frames_mel, Y_df, Y_audio_df = feat_loader.get_features(df, precomp_melody=False) print frames_rhy.shape, frames_mel.shape, frames_mfcc.shape, frames_chroma.shape X = np.concatenate((frames_rhy, frames_mel, frames_mfcc, frames_chroma), axis=1) Y = Y_df.get_values() Y_audio = Y_audio_df.get_values() return X, Y, Y_audio def sample_dataset(df): """ Select min 10 - max 100 recs from each country. Parameters ---------- df : pd.DataFrame The metadata (including country) of the tracks. Returns ------- df : pd.DataFrame The metadata for the selected subset of tracks. """ df = util_filter_dataset.remove_missing_data(df) subset_idx = subset_labels(df['Country'].get_values()) df = df.iloc[subset_idx, :] return df def features_for_train_test_sets(df, write_output=False): """Split in train/val/test sets, extract features and write output files. Parameters ------- df : pd.DataFrame The metadata for the selected subset of tracks. write_output : boolean Whether to write files with the extracted features for train/val/test sets. """ X_idx, Y = np.arange(len(df)), df['Country'].get_values() extract_features(df.iloc[np.array([0])], win2sec=WIN_SIZE) train_set, val_set, test_set = get_train_val_test_idx(X_idx, Y) X_train, Y_train, Y_audio_train = extract_features(df.iloc[train_set[0]], win2sec=WIN_SIZE) X_val, Y_val, Y_audio_val = extract_features(df.iloc[val_set[0]], win2sec=WIN_SIZE) X_test, Y_test, Y_audio_test = extract_features(df.iloc[test_set[0]], win2sec=WIN_SIZE) train = [X_train, Y_train, Y_audio_train] val = [X_val, Y_val, Y_audio_val] test = [X_test, Y_test, Y_audio_test] if write_output: with open(OUTPUT_FILES[0], 'wb') as f: pickle.dump(train, f) with open(OUTPUT_FILES[1], 'wb') as f: pickle.dump(val, f) with open(OUTPUT_FILES[2], 'wb') as f: pickle.dump(test, f) return train, val, test if __name__ == '__main__': # load dataset df = pd.read_csv(METADATA_FILE) check_extract_primary_features(df) df = sample_dataset(df) train, val, test = features_for_train_test_sets(df, write_output=True)
""" test date_range, bdate_range construction from the convenience range functions """ from datetime import ( datetime, time, timedelta, ) import numpy as np import pytest import pytz from pytz import timezone from pandas._libs.tslibs import timezones from pandas._libs.tslibs.offsets import ( BDay, CDay, DateOffset, MonthEnd, prefix_mapping, ) from pandas.errors import OutOfBoundsDatetime import pandas.util._test_decorators as td import pandas as pd from pandas import ( DatetimeIndex, Timedelta, Timestamp, bdate_range, date_range, offsets, ) import pandas._testing as tm from pandas.core.arrays.datetimes import generate_range START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) def _get_expected_range( begin_to_match, end_to_match, both_range, inclusive_endpoints, ): """Helper to get expected range from a both inclusive range""" left_match = begin_to_match == both_range[0] right_match = end_to_match == both_range[-1] if inclusive_endpoints == "left" and right_match: expected_range = both_range[:-1] elif inclusive_endpoints == "right" and left_match: expected_range = both_range[1:] elif inclusive_endpoints == "neither" and left_match and right_match: expected_range = both_range[1:-1] elif inclusive_endpoints == "neither" and right_match: expected_range = both_range[:-1] elif inclusive_endpoints == "neither" and left_match: expected_range = both_range[1:] elif inclusive_endpoints == "both": expected_range = both_range[:] else: expected_range = both_range[:] return expected_range class TestTimestampEquivDateRange: # Older tests in TestTimeSeries constructed their `stamp` objects # using `date_range` instead of the `Timestamp` constructor. # TestTimestampEquivDateRange checks that these are equivalent in the # pertinent cases. def test_date_range_timestamp_equiv(self): rng = date_range("20090415", "20090519", tz="US/Eastern") stamp = rng[0] ts = Timestamp("20090415", tz="US/Eastern") assert ts == stamp def test_date_range_timestamp_equiv_dateutil(self): rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern") stamp = rng[0] ts = Timestamp("20090415", tz="dateutil/US/Eastern") assert ts == stamp def test_date_range_timestamp_equiv_explicit_pytz(self): rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern")) stamp = rng[0] ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern")) assert ts == stamp @td.skip_if_windows def test_date_range_timestamp_equiv_explicit_dateutil(self): from pandas._libs.tslibs.timezones import dateutil_gettz as gettz rng = date_range("20090415", "20090519", tz=gettz("US/Eastern")) stamp = rng[0] ts = Timestamp("20090415", tz=gettz("US/Eastern")) assert ts == stamp def test_date_range_timestamp_equiv_from_datetime_instance(self): datetime_instance = datetime(2014, 3, 4) # build a timestamp with a frequency, since then it supports # addition/subtraction of integers timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0] ts = Timestamp(datetime_instance) assert ts == timestamp_instance def test_date_range_timestamp_equiv_preserve_frequency(self): timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0] ts = Timestamp("2014-03-05") assert timestamp_instance == ts class TestDateRanges: @pytest.mark.parametrize("freq", ["N", "U", "L", "T", "S", "H", "D"]) def test_date_range_edges(self, freq): # GH#13672 td = Timedelta(f"1{freq}") ts = Timestamp("1970-01-01") idx = date_range( start=ts + td, end=ts + 4 * td, freq=freq, ) exp = DatetimeIndex( [ts + n * td for n in range(1, 5)], freq=freq, ) tm.assert_index_equal(idx, exp) # start after end idx = date_range( start=ts + 4 * td, end=ts + td, freq=freq, ) exp = DatetimeIndex([], freq=freq) tm.assert_index_equal(idx, exp) # start matches end idx = date_range( start=ts + td, end=ts + td, freq=freq, ) exp = DatetimeIndex([ts + td], freq=freq) tm.assert_index_equal(idx, exp) def test_date_range_near_implementation_bound(self): # GH#??? freq = Timedelta(1) with pytest.raises(OutOfBoundsDatetime, match="Cannot generate range with"): date_range(end=Timestamp.min, periods=2, freq=freq) def test_date_range_nat(self): # GH#11587 msg = "Neither `start` nor `end` can be NaT" with pytest.raises(ValueError, match=msg): date_range(start="2016-01-01", end=pd.NaT, freq="D") with pytest.raises(ValueError, match=msg): date_range(start=pd.NaT, end="2016-01-01", freq="D") def test_date_range_multiplication_overflow(self): # GH#24255 # check that overflows in calculating `addend = periods * stride` # are caught with tm.assert_produces_warning(None): # we should _not_ be seeing a overflow RuntimeWarning dti = date_range(start="1677-09-22", periods=213503, freq="D") assert dti[0] == Timestamp("1677-09-22") assert len(dti) == 213503 msg = "Cannot generate range with" with pytest.raises(OutOfBoundsDatetime, match=msg): date_range("1969-05-04", periods=200000000, freq="30000D") def test_date_range_unsigned_overflow_handling(self): # GH#24255 # case where `addend = periods * stride` overflows int64 bounds # but not uint64 bounds dti = date_range(start="1677-09-22", end="2262-04-11", freq="D") dti2 = date_range(start=dti[0], periods=len(dti), freq="D") assert dti2.equals(dti) dti3 = date_range(end=dti[-1], periods=len(dti), freq="D") assert dti3.equals(dti) def test_date_range_int64_overflow_non_recoverable(self): # GH#24255 # case with start later than 1970-01-01, overflow int64 but not uint64 msg = "Cannot generate range with" with pytest.raises(OutOfBoundsDatetime, match=msg): date_range(start="1970-02-01", periods=106752 * 24, freq="H") # case with end before 1970-01-01, overflow int64 but not uint64 with pytest.raises(OutOfBoundsDatetime, match=msg): date_range(end="1969-11-14", periods=106752 * 24, freq="H") @pytest.mark.slow def test_date_range_int64_overflow_stride_endpoint_different_signs(self): # cases where stride * periods overflow int64 and stride/endpoint # have different signs start = Timestamp("2262-02-23") end = Timestamp("1969-11-14") expected = date_range(start=start, end=end, freq="-1H") assert expected[0] == start assert expected[-1] == end dti = date_range(end=end, periods=len(expected), freq="-1H") tm.assert_index_equal(dti, expected) start2 = Timestamp("1970-02-01") end2 = Timestamp("1677-10-22") expected2 = date_range(start=start2, end=end2, freq="-1H") assert expected2[0] == start2 assert expected2[-1] == end2 dti2 = date_range(start=start2, periods=len(expected2), freq="-1H") tm.assert_index_equal(dti2, expected2) def test_date_range_out_of_bounds(self): # GH#14187 msg = "Cannot generate range" with pytest.raises(OutOfBoundsDatetime, match=msg): date_range("2016-01-01", periods=100000, freq="D") with pytest.raises(OutOfBoundsDatetime, match=msg): date_range(end="1763-10-12", periods=100000, freq="D") def test_date_range_gen_error(self): rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min") assert len(rng) == 4 @pytest.mark.parametrize("freq", ["AS", "YS"]) def test_begin_year_alias(self, freq): # see gh-9313 rng = date_range("1/1/2013", "7/1/2017", freq=freq) exp = DatetimeIndex( ["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"], freq=freq, ) tm.assert_index_equal(rng, exp) @pytest.mark.parametrize("freq", ["A", "Y"]) def test_end_year_alias(self, freq): # see gh-9313 rng = date_range("1/1/2013", "7/1/2017", freq=freq) exp = DatetimeIndex( ["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq ) tm.assert_index_equal(rng, exp) @pytest.mark.parametrize("freq", ["BA", "BY"]) def test_business_end_year_alias(self, freq): # see gh-9313 rng = date_range("1/1/2013", "7/1/2017", freq=freq) exp = DatetimeIndex( ["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq ) tm.assert_index_equal(rng, exp) def test_date_range_negative_freq(self): # GH 11018 rng = date_range("2011-12-31", freq="-2A", periods=3) exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A") tm.assert_index_equal(rng, exp) assert rng.freq == "-2A" rng = date_range("2011-01-31", freq="-2M", periods=3) exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M") tm.assert_index_equal(rng, exp) assert rng.freq == "-2M" def test_date_range_bms_bug(self): # #1645 rng = date_range("1/1/2000", periods=10, freq="BMS") ex_first = Timestamp("2000-01-03") assert rng[0] == ex_first def test_date_range_normalize(self): snap = datetime.today() n = 50 rng = date_range(snap, periods=n, normalize=False, freq="2D") offset = timedelta(2) values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset) tm.assert_index_equal(rng, values) rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B") the_time = time(8, 15) for val in rng: assert val.time() == the_time def test_date_range_fy5252(self): dr = date_range( start="2013-01-01", periods=2, freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest"), ) assert dr[0] == Timestamp("2013-01-31") assert dr[1] == Timestamp("2014-01-30") def test_date_range_ambiguous_arguments(self): # #2538 start = datetime(2011, 1, 1, 5, 3, 40) end = datetime(2011, 1, 1, 8, 9, 40) msg = ( "Of the four parameters: start, end, periods, and " "freq, exactly three must be specified" ) with pytest.raises(ValueError, match=msg): date_range(start, end, periods=10, freq="s") def test_date_range_convenience_periods(self): # GH 20808 result = date_range("2018-04-24", "2018-04-27", periods=3) expected = DatetimeIndex( ["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"], freq=None, ) tm.assert_index_equal(result, expected) # Test if spacing remains linear if tz changes to dst in range result = date_range( "2018-04-01 01:00:00", "2018-04-01 04:00:00", tz="Australia/Sydney", periods=3, ) expected = DatetimeIndex( [ Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"), Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"), Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"), ] ) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "start,end,result_tz", [ ["20180101", "20180103", "US/Eastern"], [datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"], [Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"], [ Timestamp("20180101", tz="US/Eastern"), Timestamp("20180103", tz="US/Eastern"), "US/Eastern", ], [ Timestamp("20180101", tz="US/Eastern"), Timestamp("20180103", tz="US/Eastern"), None, ], ], ) def test_date_range_linspacing_tz(self, start, end, result_tz): # GH 20983 result = date_range(start, end, periods=3, tz=result_tz) expected = date_range("20180101", periods=3, freq="D", tz="US/Eastern") tm.assert_index_equal(result, expected) def test_date_range_businesshour(self): idx = DatetimeIndex( [ "2014-07-04 09:00", "2014-07-04 10:00", "2014-07-04 11:00", "2014-07-04 12:00", "2014-07-04 13:00", "2014-07-04 14:00", "2014-07-04 15:00", "2014-07-04 16:00", ], freq="BH", ) rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="BH") tm.assert_index_equal(idx, rng) idx = DatetimeIndex(["2014-07-04 16:00", "2014-07-07 09:00"], freq="BH") rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="BH") tm.assert_index_equal(idx, rng) idx = DatetimeIndex( [ "2014-07-04 09:00", "2014-07-04 10:00", "2014-07-04 11:00", "2014-07-04 12:00", "2014-07-04 13:00", "2014-07-04 14:00", "2014-07-04 15:00", "2014-07-04 16:00", "2014-07-07 09:00", "2014-07-07 10:00", "2014-07-07 11:00", "2014-07-07 12:00", "2014-07-07 13:00", "2014-07-07 14:00", "2014-07-07 15:00", "2014-07-07 16:00", "2014-07-08 09:00", "2014-07-08 10:00", "2014-07-08 11:00", "2014-07-08 12:00", "2014-07-08 13:00", "2014-07-08 14:00", "2014-07-08 15:00", "2014-07-08 16:00", ], freq="BH", ) rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="BH") tm.assert_index_equal(idx, rng) def test_range_misspecified(self): # GH #1095 msg = ( "Of the four parameters: start, end, periods, and " "freq, exactly three must be specified" ) with pytest.raises(ValueError, match=msg): date_range(start="1/1/2000") with pytest.raises(ValueError, match=msg): date_range(end="1/1/2000") with pytest.raises(ValueError, match=msg): date_range(periods=10) with pytest.raises(ValueError, match=msg): date_range(start="1/1/2000", freq="H") with pytest.raises(ValueError, match=msg): date_range(end="1/1/2000", freq="H") with pytest.raises(ValueError, match=msg): date_range(periods=10, freq="H") with pytest.raises(ValueError, match=msg): date_range() def test_compat_replace(self): # https://github.com/statsmodels/statsmodels/issues/3349 # replace should take ints/longs for compat result = date_range(Timestamp("1960-04-01 00:00:00"), periods=76, freq="QS-JAN") assert len(result) == 76 def test_catch_infinite_loop(self): offset = offsets.DateOffset(minute=5) # blow up, don't loop forever msg = "Offset <DateOffset: minute=5> did not increment date" with pytest.raises(ValueError, match=msg): date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset) @pytest.mark.parametrize("periods", (1, 2)) def test_wom_len(self, periods): # https://github.com/pandas-dev/pandas/issues/20517 res = date_range(start="20110101", periods=periods, freq="WOM-1MON") assert len(res) == periods def test_construct_over_dst(self): # GH 20854 pre_dst = Timestamp("2010-11-07 01:00:00").tz_localize( "US/Pacific", ambiguous=True ) pst_dst = Timestamp("2010-11-07 01:00:00").tz_localize( "US/Pacific", ambiguous=False ) expect_data = [ Timestamp("2010-11-07 00:00:00", tz="US/Pacific"), pre_dst, pst_dst, ] expected = DatetimeIndex(expect_data, freq="H") result = date_range(start="2010-11-7", periods=3, freq="H", tz="US/Pacific") tm.assert_index_equal(result, expected) def test_construct_with_different_start_end_string_format(self): # GH 12064 result = date_range( "2013-01-01 00:00:00+09:00", "2013/01/01 02:00:00+09:00", freq="H" ) expected = DatetimeIndex( [ Timestamp("2013-01-01 00:00:00+09:00"), Timestamp("2013-01-01 01:00:00+09:00"), Timestamp("2013-01-01 02:00:00+09:00"), ], freq="H", ) tm.assert_index_equal(result, expected) def test_error_with_zero_monthends(self): msg = r"Offset <0 \* MonthEnds> did not increment date" with pytest.raises(ValueError, match=msg): date_range("1/1/2000", "1/1/2001", freq=MonthEnd(0)) def test_range_bug(self): # GH #770 offset = DateOffset(months=3) result = date_range("2011-1-1", "2012-1-31", freq=offset) start = datetime(2011, 1, 1) expected = DatetimeIndex([start + i * offset for i in range(5)], freq=offset) tm.assert_index_equal(result, expected) def test_range_tz_pytz(self): # see gh-2906 tz = timezone("US/Eastern") start = tz.localize(datetime(2011, 1, 1)) end = tz.localize(datetime(2011, 1, 3)) dr = date_range(start=start, periods=3) assert dr.tz.zone == tz.zone assert dr[0] == start assert dr[2] == end dr = date_range(end=end, periods=3) assert dr.tz.zone == tz.zone assert dr[0] == start assert dr[2] == end dr = date_range(start=start, end=end) assert dr.tz.zone == tz.zone assert dr[0] == start assert dr[2] == end @pytest.mark.parametrize( "start, end", [ [ Timestamp(datetime(2014, 3, 6), tz="US/Eastern"), Timestamp(datetime(2014, 3, 12), tz="US/Eastern"), ], [ Timestamp(datetime(2013, 11, 1), tz="US/Eastern"), Timestamp(datetime(2013, 11, 6), tz="US/Eastern"), ], ], ) def test_range_tz_dst_straddle_pytz(self, start, end): dr = date_range(start, end, freq="D") assert dr[0] == start assert dr[-1] == end assert np.all(dr.hour == 0) dr = date_range(start, end, freq="D", tz="US/Eastern") assert dr[0] == start assert dr[-1] == end assert np.all(dr.hour == 0) dr = date_range( start.replace(tzinfo=None), end.replace(tzinfo=None), freq="D", tz="US/Eastern", ) assert dr[0] == start assert dr[-1] == end assert np.all(dr.hour == 0) def test_range_tz_dateutil(self): # see gh-2906 # Use maybe_get_tz to fix filename in tz under dateutil. from pandas._libs.tslibs.timezones import maybe_get_tz tz = lambda x: maybe_get_tz("dateutil/" + x) start = datetime(2011, 1, 1, tzinfo=tz("US/Eastern")) end = datetime(2011, 1, 3, tzinfo=tz("US/Eastern")) dr = date_range(start=start, periods=3) assert dr.tz == tz("US/Eastern") assert dr[0] == start assert dr[2] == end dr = date_range(end=end, periods=3) assert dr.tz == tz("US/Eastern") assert dr[0] == start assert dr[2] == end dr = date_range(start=start, end=end) assert dr.tz == tz("US/Eastern") assert dr[0] == start assert dr[2] == end @pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"]) def test_range_closed(self, freq, inclusive_endpoints_fixture): begin = datetime(2011, 1, 1) end = datetime(2014, 1, 1) result_range = date_range( begin, end, inclusive=inclusive_endpoints_fixture, freq=freq ) both_range = date_range(begin, end, inclusive="both", freq=freq) expected_range = _get_expected_range( begin, end, both_range, inclusive_endpoints_fixture ) tm.assert_index_equal(expected_range, result_range) @pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"]) def test_range_closed_with_tz_aware_start_end( self, freq, inclusive_endpoints_fixture ): # GH12409, GH12684 begin = Timestamp("2011/1/1", tz="US/Eastern") end = Timestamp("2014/1/1", tz="US/Eastern") result_range = date_range( begin, end, inclusive=inclusive_endpoints_fixture, freq=freq ) both_range = date_range(begin, end, inclusive="both", freq=freq) expected_range = _get_expected_range( begin, end, both_range, inclusive_endpoints_fixture, ) tm.assert_index_equal(expected_range, result_range) @pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"]) def test_range_with_tz_closed_with_tz_aware_start_end( self, freq, inclusive_endpoints_fixture ): begin = Timestamp("2011/1/1") end = Timestamp("2014/1/1") begintz = Timestamp("2011/1/1", tz="US/Eastern") endtz = Timestamp("2014/1/1", tz="US/Eastern") result_range = date_range( begin, end, inclusive=inclusive_endpoints_fixture, freq=freq, tz="US/Eastern", ) both_range = date_range( begin, end, inclusive="both", freq=freq, tz="US/Eastern" ) expected_range = _get_expected_range( begintz, endtz, both_range, inclusive_endpoints_fixture, ) tm.assert_index_equal(expected_range, result_range) def test_range_closed_boundary(self, inclusive_endpoints_fixture): # GH#11804 right_boundary = date_range( "2015-09-12", "2015-12-01", freq="QS-MAR", inclusive=inclusive_endpoints_fixture, ) left_boundary = date_range( "2015-09-01", "2015-09-12", freq="QS-MAR", inclusive=inclusive_endpoints_fixture, ) both_boundary = date_range( "2015-09-01", "2015-12-01", freq="QS-MAR", inclusive=inclusive_endpoints_fixture, ) neither_boundary = date_range( "2015-09-11", "2015-09-12", freq="QS-MAR", inclusive=inclusive_endpoints_fixture, ) expected_right = both_boundary expected_left = both_boundary expected_both = both_boundary if inclusive_endpoints_fixture == "right": expected_left = both_boundary[1:] elif inclusive_endpoints_fixture == "left": expected_right = both_boundary[:-1] elif inclusive_endpoints_fixture == "both": expected_right = both_boundary[1:] expected_left = both_boundary[:-1] expected_neither = both_boundary[1:-1] tm.assert_index_equal(right_boundary, expected_right) tm.assert_index_equal(left_boundary, expected_left) tm.assert_index_equal(both_boundary, expected_both) tm.assert_index_equal(neither_boundary, expected_neither) def test_years_only(self): # GH 6961 dr = date_range("2014", "2015", freq="M") assert dr[0] == datetime(2014, 1, 31) assert dr[-1] == datetime(2014, 12, 31) def test_freq_divides_end_in_nanos(self): # GH 10885 result_1 = date_range("2005-01-12 10:00", "2005-01-12 16:00", freq="345min") result_2 = date_range("2005-01-13 10:00", "2005-01-13 16:00", freq="345min") expected_1 = DatetimeIndex( ["2005-01-12 10:00:00", "2005-01-12 15:45:00"], dtype="datetime64[ns]", freq="345T", tz=None, ) expected_2 = DatetimeIndex( ["2005-01-13 10:00:00", "2005-01-13 15:45:00"], dtype="datetime64[ns]", freq="345T", tz=None, ) tm.assert_index_equal(result_1, expected_1) tm.assert_index_equal(result_2, expected_2) def test_cached_range_bug(self): rng = date_range("2010-09-01 05:00:00", periods=50, freq=DateOffset(hours=6)) assert len(rng) == 50 assert rng[0] == datetime(2010, 9, 1, 5) def test_timezone_comparison_bug(self): # smoke test start = Timestamp("20130220 10:00", tz="US/Eastern") result = date_range(start, periods=2, tz="US/Eastern") assert len(result) == 2 def test_timezone_comparison_assert(self): start = Timestamp("20130220 10:00", tz="US/Eastern") msg = "Inferred time zone not equal to passed time zone" with pytest.raises(AssertionError, match=msg): date_range(start, periods=2, tz="Europe/Berlin") def test_negative_non_tick_frequency_descending_dates(self, tz_aware_fixture): # GH 23270 tz = tz_aware_fixture result = date_range(start="2011-06-01", end="2011-01-01", freq="-1MS", tz=tz) expected = date_range(end="2011-06-01", start="2011-01-01", freq="1MS", tz=tz)[ ::-1 ] tm.assert_index_equal(result, expected) def test_range_where_start_equal_end(self, inclusive_endpoints_fixture): # GH 43394 start = "2021-09-02" end = "2021-09-02" result = date_range( start=start, end=end, freq="D", inclusive=inclusive_endpoints_fixture ) both_range = date_range(start=start, end=end, freq="D", inclusive="both") if inclusive_endpoints_fixture == "neither": expected = both_range[1:-1] elif inclusive_endpoints_fixture in ("left", "right", "both"): expected = both_range[:] tm.assert_index_equal(result, expected) class TestDateRangeTZ: """Tests for date_range with timezones""" def test_hongkong_tz_convert(self): # GH#1673 smoke test dr = date_range("2012-01-01", "2012-01-10", freq="D", tz="Hongkong") # it works! dr.hour @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_date_range_span_dst_transition(self, tzstr): # GH#1778 # Standard -> Daylight Savings Time dr = date_range("03/06/2012 00:00", periods=200, freq="W-FRI", tz="US/Eastern") assert (dr.hour == 0).all() dr = date_range("2012-11-02", periods=10, tz=tzstr) result = dr.hour expected = pd.Index([0] * 10) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_date_range_timezone_str_argument(self, tzstr): tz = timezones.maybe_get_tz(tzstr) result = date_range("1/1/2000", periods=10, tz=tzstr) expected = date_range("1/1/2000", periods=10, tz=tz) tm.assert_index_equal(result, expected) def test_date_range_with_fixedoffset_noname(self): from pandas.tests.indexes.datetimes.test_timezones import fixed_off_no_name off = fixed_off_no_name start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off) end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off) rng = date_range(start=start, end=end) assert off == rng.tz idx = pd.Index([start, end]) assert off == idx.tz @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_date_range_with_tz(self, tzstr): stamp = Timestamp("3/11/2012 05:00", tz=tzstr) assert stamp.hour == 5 rng = date_range("3/11/2012 04:00", periods=10, freq="H", tz=tzstr) assert stamp == rng[1] class TestGenRangeGeneration: def test_generate(self): rng1 = list(generate_range(START, END, offset=BDay())) rng2 = list(generate_range(START, END, offset="B")) assert rng1 == rng2 def test_generate_cday(self): rng1 = list(generate_range(START, END, offset=CDay())) rng2 = list(generate_range(START, END, offset="C")) assert rng1 == rng2 def test_1(self): rng = list(generate_range(start=datetime(2009, 3, 25), periods=2)) expected = [datetime(2009, 3, 25), datetime(2009, 3, 26)] assert rng == expected def test_2(self): rng = list(generate_range(start=datetime(2008, 1, 1), end=datetime(2008, 1, 3))) expected = [datetime(2008, 1, 1), datetime(2008, 1, 2), datetime(2008, 1, 3)] assert rng == expected def test_3(self): rng = list(generate_range(start=datetime(2008, 1, 5), end=datetime(2008, 1, 6))) expected = [] assert rng == expected def test_precision_finer_than_offset(self): # GH#9907 result1 = date_range( start="2015-04-15 00:00:03", end="2016-04-22 00:00:00", freq="Q" ) result2 = date_range( start="2015-04-15 00:00:03", end="2015-06-22 00:00:04", freq="W" ) expected1_list = [ "2015-06-30 00:00:03", "2015-09-30 00:00:03", "2015-12-31 00:00:03", "2016-03-31 00:00:03", ] expected2_list = [ "2015-04-19 00:00:03", "2015-04-26 00:00:03", "2015-05-03 00:00:03", "2015-05-10 00:00:03", "2015-05-17 00:00:03", "2015-05-24 00:00:03", "2015-05-31 00:00:03", "2015-06-07 00:00:03", "2015-06-14 00:00:03", "2015-06-21 00:00:03", ] expected1 = DatetimeIndex( expected1_list, dtype="datetime64[ns]", freq="Q-DEC", tz=None ) expected2 = DatetimeIndex( expected2_list, dtype="datetime64[ns]", freq="W-SUN", tz=None ) tm.assert_index_equal(result1, expected1) tm.assert_index_equal(result2, expected2) dt1, dt2 = "2017-01-01", "2017-01-01" tz1, tz2 = "US/Eastern", "Europe/London" @pytest.mark.parametrize( "start,end", [ (Timestamp(dt1, tz=tz1), Timestamp(dt2)), (Timestamp(dt1), Timestamp(dt2, tz=tz2)), (Timestamp(dt1, tz=tz1), Timestamp(dt2, tz=tz2)), (Timestamp(dt1, tz=tz2), Timestamp(dt2, tz=tz1)), ], ) def test_mismatching_tz_raises_err(self, start, end): # issue 18488 msg = "Start and end cannot both be tz-aware with different timezones" with pytest.raises(TypeError, match=msg): date_range(start, end) with pytest.raises(TypeError, match=msg): date_range(start, end, freq=BDay()) class TestBusinessDateRange: def test_constructor(self): bdate_range(START, END, freq=BDay()) bdate_range(START, periods=20, freq=BDay()) bdate_range(end=START, periods=20, freq=BDay()) msg = "periods must be a number, got B" with pytest.raises(TypeError, match=msg): date_range("2011-1-1", "2012-1-1", "B") with pytest.raises(TypeError, match=msg): bdate_range("2011-1-1", "2012-1-1", "B") msg = "freq must be specified for bdate_range; use date_range instead" with pytest.raises(TypeError, match=msg): bdate_range(START, END, periods=10, freq=None) def test_misc(self): end = datetime(2009, 5, 13) dr = bdate_range(end=end, periods=20) firstDate = end - 19 * BDay() assert len(dr) == 20 assert dr[0] == firstDate assert dr[-1] == end def test_date_parse_failure(self): badly_formed_date = "2007/100/1" msg = "could not convert string to Timestamp" with pytest.raises(ValueError, match=msg): Timestamp(badly_formed_date) with pytest.raises(ValueError, match=msg): bdate_range(start=badly_formed_date, periods=10) with pytest.raises(ValueError, match=msg): bdate_range(end=badly_formed_date, periods=10) with pytest.raises(ValueError, match=msg): bdate_range(badly_formed_date, badly_formed_date) def test_daterange_bug_456(self): # GH #456 rng1 = bdate_range("12/5/2011", "12/5/2011") rng2 = bdate_range("12/2/2011", "12/5/2011") assert rng2._data.freq == BDay() result = rng1.union(rng2) assert isinstance(result, DatetimeIndex) @pytest.mark.parametrize("inclusive", ["left", "right", "neither", "both"]) def test_bdays_and_open_boundaries(self, inclusive): # GH 6673 start = "2018-07-21" # Saturday end = "2018-07-29" # Sunday result = date_range(start, end, freq="B", inclusive=inclusive) bday_start = "2018-07-23" # Monday bday_end = "2018-07-27" # Friday expected = date_range(bday_start, bday_end, freq="D") tm.assert_index_equal(result, expected) # Note: we do _not_ expect the freqs to match here def test_bday_near_overflow(self): # GH#24252 avoid doing unnecessary addition that _would_ overflow start = Timestamp.max.floor("D").to_pydatetime() rng = date_range(start, end=None, periods=1, freq="B") expected = DatetimeIndex([start], freq="B") tm.assert_index_equal(rng, expected) def test_bday_overflow_error(self): # GH#24252 check that we get OutOfBoundsDatetime and not OverflowError msg = "Out of bounds nanosecond timestamp" start = Timestamp.max.floor("D").to_pydatetime() with pytest.raises(OutOfBoundsDatetime, match=msg): date_range(start, periods=2, freq="B") class TestCustomDateRange: def test_constructor(self): bdate_range(START, END, freq=CDay()) bdate_range(START, periods=20, freq=CDay()) bdate_range(end=START, periods=20, freq=CDay()) msg = "periods must be a number, got C" with pytest.raises(TypeError, match=msg): date_range("2011-1-1", "2012-1-1", "C") with pytest.raises(TypeError, match=msg): bdate_range("2011-1-1", "2012-1-1", "C") def test_misc(self): end = datetime(2009, 5, 13) dr = bdate_range(end=end, periods=20, freq="C") firstDate = end - 19 * CDay() assert len(dr) == 20 assert dr[0] == firstDate assert dr[-1] == end def test_daterange_bug_456(self): # GH #456 rng1 = bdate_range("12/5/2011", "12/5/2011", freq="C") rng2 = bdate_range("12/2/2011", "12/5/2011", freq="C") assert rng2._data.freq == CDay() result = rng1.union(rng2) assert isinstance(result, DatetimeIndex) def test_cdaterange(self): result = bdate_range("2013-05-01", periods=3, freq="C") expected = DatetimeIndex(["2013-05-01", "2013-05-02", "2013-05-03"], freq="C") tm.assert_index_equal(result, expected) assert result.freq == expected.freq def test_cdaterange_weekmask(self): result = bdate_range( "2013-05-01", periods=3, freq="C", weekmask="Sun Mon Tue Wed Thu" ) expected = DatetimeIndex( ["2013-05-01", "2013-05-02", "2013-05-05"], freq=result.freq ) tm.assert_index_equal(result, expected) assert result.freq == expected.freq # raise with non-custom freq msg = ( "a custom frequency string is required when holidays or " "weekmask are passed, got frequency B" ) with pytest.raises(ValueError, match=msg): bdate_range("2013-05-01", periods=3, weekmask="Sun Mon Tue Wed Thu") def test_cdaterange_holidays(self): result = bdate_range("2013-05-01", periods=3, freq="C", holidays=["2013-05-01"]) expected = DatetimeIndex( ["2013-05-02", "2013-05-03", "2013-05-06"], freq=result.freq ) tm.assert_index_equal(result, expected) assert result.freq == expected.freq # raise with non-custom freq msg = ( "a custom frequency string is required when holidays or " "weekmask are passed, got frequency B" ) with pytest.raises(ValueError, match=msg): bdate_range("2013-05-01", periods=3, holidays=["2013-05-01"]) def test_cdaterange_weekmask_and_holidays(self): result = bdate_range( "2013-05-01", periods=3, freq="C", weekmask="Sun Mon Tue Wed Thu", holidays=["2013-05-01"], ) expected = DatetimeIndex( ["2013-05-02", "2013-05-05", "2013-05-06"], freq=result.freq ) tm.assert_index_equal(result, expected) assert result.freq == expected.freq # raise with non-custom freq msg = ( "a custom frequency string is required when holidays or " "weekmask are passed, got frequency B" ) with pytest.raises(ValueError, match=msg): bdate_range( "2013-05-01", periods=3, weekmask="Sun Mon Tue Wed Thu", holidays=["2013-05-01"], ) @pytest.mark.parametrize( "freq", [freq for freq in prefix_mapping if freq.startswith("C")] ) def test_all_custom_freq(self, freq): # should not raise bdate_range( START, END, freq=freq, weekmask="Mon Wed Fri", holidays=["2009-03-14"] ) bad_freq = freq + "FOO" msg = f"invalid custom frequency string: {bad_freq}" with pytest.raises(ValueError, match=msg): bdate_range(START, END, freq=bad_freq) @pytest.mark.parametrize( "start_end", [ ("2018-01-01T00:00:01.000Z", "2018-01-03T00:00:01.000Z"), ("2018-01-01T00:00:00.010Z", "2018-01-03T00:00:00.010Z"), ("2001-01-01T00:00:00.010Z", "2001-01-03T00:00:00.010Z"), ], ) def test_range_with_millisecond_resolution(self, start_end): # https://github.com/pandas-dev/pandas/issues/24110 start, end = start_end result = date_range(start=start, end=end, periods=2, inclusive="left") expected = DatetimeIndex([start]) tm.assert_index_equal(result, expected) def test_date_range_with_custom_holidays(): # GH 30593 freq = offsets.CustomBusinessHour(start="15:00", holidays=["2020-11-26"]) result = date_range(start="2020-11-25 15:00", periods=4, freq=freq) expected = DatetimeIndex( [ "2020-11-25 15:00:00", "2020-11-25 16:00:00", "2020-11-27 15:00:00", "2020-11-27 16:00:00", ], freq=freq, ) tm.assert_index_equal(result, expected)
""" Unit tests for the methods in the NMTF class (/code/nmtf_np.py). """ import sys, os project_location = os.path.dirname(__file__)+"/../../../" sys.path.append(project_location) import numpy, math, pytest, itertools, random from BNMTF.code.models.nmtf_np import NMTF """ Test the initialisation of Omega """ def test_init(): # Test getting an exception when R and M are different sizes, and when R is not a 2D array R1 = numpy.ones(3) M = numpy.ones((2,3)) K = 0 L = 0 with pytest.raises(AssertionError) as error: NMTF(R1,M,K,L) assert str(error.value) == "Input matrix R is not a two-dimensional array, but instead 1-dimensional." R2 = numpy.ones((4,3,2)) with pytest.raises(AssertionError) as error: NMTF(R2,M,K,L) assert str(error.value) == "Input matrix R is not a two-dimensional array, but instead 3-dimensional." R3 = numpy.ones((3,2)) with pytest.raises(AssertionError) as error: NMTF(R3,M,K,L) assert str(error.value) == "Input matrix R is not of the same size as the indicator matrix M: (3, 2) and (2, 3) respectively." # Test getting an exception if a row or column is entirely unknown R = numpy.ones((2,3)) M1 = [[1,1,1],[0,0,0]] M2 = [[1,1,0],[1,0,0]] with pytest.raises(AssertionError) as error: NMTF(R,M1,K,L) assert str(error.value) == "Fully unobserved row in R, row 1." with pytest.raises(AssertionError) as error: NMTF(R,M2,K,L) assert str(error.value) == "Fully unobserved column in R, column 2." # Test whether we made a copy of R with 1's at unknown values I,J = 2,4 R = [[1,2,0,4],[5,0,7,0]] M = [[1,1,0,1],[1,0,1,0]] K = 2 L = 3 R_excl_unknown = [[1,2,1,4],[5,1,7,1]] nmtf = NMTF(R,M,K,L) assert numpy.array_equal(R,nmtf.R) assert numpy.array_equal(M,nmtf.M) assert nmtf.I == I assert nmtf.J == J assert nmtf.K == K assert nmtf.L == L assert numpy.array_equal(R_excl_unknown,nmtf.R_excl_unknown) """ Test initialisation of F, S, G """ def test_initialisation(): I,J = 2,3 R = numpy.ones((I,J)) M = numpy.ones((I,J)) K = 4 L = 5 # Init FG ones, S random init_FG = 'ones' init_S = 'random' nmtf = NMTF(R,M,K,L) nmtf.initialise(init_S,init_FG) assert numpy.array_equal(numpy.ones((I,K)),nmtf.F) assert numpy.array_equal(numpy.ones((J,L)),nmtf.G) for (k,l) in itertools.product(range(0,K),range(0,L)): assert nmtf.S[k,l] > 0 and nmtf.S[k,l] < 1 # Init FG random, S ones init_FG = 'random' init_S = 'ones' nmtf = NMTF(R,M,K,L) nmtf.initialise(init_S,init_FG) assert numpy.array_equal(numpy.ones((K,L)),nmtf.S) for (i,k) in itertools.product(range(0,I),range(0,K)): assert nmtf.F[i,k] > 0 and nmtf.F[i,k] < 1 for (j,l) in itertools.product(range(0,J),range(0,L)): assert nmtf.G[j,k] > 0 and nmtf.G[j,k] < 1 # Init FG kmeans, S exponential init_FG = 'kmeans' init_S = 'exponential' nmtf = NMTF(R,M,K,L) nmtf.initialise(init_S,init_FG) for (i,k) in itertools.product(range(0,I),range(0,K)): assert nmtf.F[i,k] == 0.2 or nmtf.F[i,k] == 1.2 for (j,l) in itertools.product(range(0,J),range(0,L)): assert nmtf.G[j,k] == 0.2 or nmtf.G[j,k] == 1.2 for (k,l) in itertools.product(range(0,K),range(0,L)): assert nmtf.S[k,l] > 0 """ Test updates for F, G, S, without dynamic behaviour. """ def test_updates(): R = numpy.array([[1,2],[3,4]],dtype='f') M = numpy.array([[1,1],[0,1]]) I,J,K,L = 2,2,3,1 F = numpy.array([[1,2,3],[4,5,6]],dtype='f') S = numpy.array([[7],[8],[9]],dtype='f') G = numpy.array([[10],[11]],dtype='f') FSG = numpy.array([[500,550],[1220,1342]],dtype='f') FS = numpy.array([[50],[122]],dtype='f') SG = numpy.array([[70,77],[80,88],[90,99]],dtype='f') F_updated = [ [ F[0][0] * ( R[0][0]*SG[0][0]/FSG[0][0] + R[0][1]*SG[0][1]/FSG[0][1] ) / ( SG[0][0]+SG[0][1] ), F[0][1] * ( R[0][0]*SG[1][0]/FSG[0][0] + R[0][1]*SG[1][1]/FSG[0][1] ) / ( SG[1][0]+SG[1][1] ), F[0][2] * ( R[0][0]*SG[2][0]/FSG[0][0] + R[0][1]*SG[2][1]/FSG[0][1] ) / ( SG[2][0]+SG[2][1] ) ], [ F[1][0] * R[1][1]/FSG[1][1], F[1][1] * R[1][1]/FSG[1][1], F[1][2] * R[1][1]/FSG[1][1] ] ] G_updated = [ [ G[0][0] * R[0][0]/FSG[0][0] ], [ G[1][0] * ( R[0][1]*FS[0][0]/FSG[0][1] + R[1][1]*FS[1][0]/FSG[1][1] ) / ( FS[0][0]+FS[1][0] ) ] ] S_updated = [ [ S[0][0] * ( R[0][0]*F[0][0]*G[0][0]/FSG[0][0] + R[0][1]*F[0][0]*G[1][0]/FSG[0][1] + R[1][1]*F[1][0]*G[1][0]/FSG[1][1] ) / ( F[0][0]*G[0][0]+F[0][0]*G[1][0]+F[1][0]*G[1][0] ) ], [ S[1][0] * ( R[0][0]*F[0][1]*G[0][0]/FSG[0][0] + R[0][1]*F[0][1]*G[1][0]/FSG[0][1] + R[1][1]*F[1][1]*G[1][0]/FSG[1][1] ) / ( F[0][1]*G[0][0]+F[0][1]*G[1][0]+F[1][1]*G[1][0] ) ], [ S[2][0] * ( R[0][0]*F[0][2]*G[0][0]/FSG[0][0] + R[0][1]*F[0][2]*G[1][0]/FSG[0][1] + R[1][1]*F[1][2]*G[1][0]/FSG[1][1] ) / ( F[0][2]*G[0][0]+F[0][2]*G[1][0]+F[1][2]*G[1][0] ) ] ] nmtf = NMTF(R,M,K,L) def reset(): nmtf.F = numpy.copy(F) nmtf.S = numpy.copy(S) nmtf.G = numpy.copy(G) print F[0][0], ( R[0][0]*SG[0][0]/FSG[0][0] + R[0][1]*SG[0][1]/FSG[0][1] ), ( SG[0][0]+SG[0][1] ) print F[1][0], R[1][1]/FSG[1][1]*SG[0][1], SG[0][1] # Test F for k in range(0,K): reset() nmtf.update_F(k) for i in range(0,I): print i,k assert abs(F_updated[i][k] - nmtf.F[i,k]) < 0.000000001 # Test G for l in range(0,L): reset() nmtf.update_G(l) for j in range(0,J): assert abs(G_updated[j][l] - nmtf.G[j,l]) < 0.000000001 # Test S def test_S(k,l): reset() nmtf.update_S(k,l) assert abs(S_updated[k][l] - nmtf.S[k,l]) < 0.00000001 test_S(0,0) test_S(1,0) test_S(2,0) """ Test iterations - whether we get no exception """ def test_run(): ###### Test updating F, G, S in that order # Test whether a single iteration gives the correct behaviour, updating F, G, S in that order R = numpy.array([[1,2],[3,4]],dtype='f') M = numpy.array([[1,1],[0,1]]) K = 3 L = 1 F = numpy.array([[1,2,3],[4,5,6]],dtype='f') S = numpy.array([[7],[8],[9]],dtype='f') G = numpy.array([[10],[11]],dtype='f') #FSG = numpy.array([[500,550],[1220,1342]],dtype='f') #FS = numpy.array([[50],[122]],dtype='f') #SG = numpy.array([[70,77],[80,88],[90,99]],dtype='f') nmtf = NMTF(R,M,K,L) # Check we get an Exception if W, H are undefined with pytest.raises(AssertionError) as error: nmtf.run(0) assert str(error.value) == "F, S and G have not been initialised - please run NMTF.initialise() first." nmtf.F = numpy.copy(F) nmtf.S = numpy.copy(S) nmtf.G = numpy.copy(G) nmtf.run(1) """ Test divergence calculation """ def test_compute_I_div(): R = numpy.array([[1,2],[3,4]],dtype=float) M = numpy.array([[1,1],[0,1]]) (I,J,K,L) = (2,2,3,1) F = numpy.array([[1,2,3],[4,5,6]]) S = numpy.array([[7],[8],[9]]) G = numpy.array([[10],[11]]) #R_predicted = numpy.array([[500,550],[1220,1342]],dtype=float) nmtf = NMTF(R,M,K,L) nmtf.F = F nmtf.S = S nmtf.G = G expected_I_div = sum([ 1.0*math.log(1.0/500.0) - 1.0 + 500.0, 2.0*math.log(2.0/550.0) - 2.0 + 550.0, 4.0*math.log(4.0/1342.0) - 4.0 + 1342.0 ]) nmtf = NMTF(R,M,K,L) nmtf.F = F nmtf.S = S nmtf.G = G I_div = nmtf.compute_I_div() assert I_div == expected_I_div """ Test computing the performance of the predictions using the expectations """ def test_predict(): R = numpy.array([[1,2],[3,4]],dtype=float) M = numpy.array([[1,1],[0,1]]) (I,J,K,L) = (2,2,3,1) F = numpy.array([[1,2,3],[4,5,6]]) S = numpy.array([[7],[8],[9]]) G = numpy.array([[10],[11]]) #R_predicted = numpy.array([[500,550],[1220,1342]],dtype=float) nmtf = NMTF(R,M,K,L) nmtf.F = F nmtf.S = S nmtf.G = G performances = nmtf.predict(M) MSE = ( 499*499 + 548*548 + 1338*1338 ) / 3.0 R2 = 1 - (499**2 + 548**2 + 1338**2)/((4.0/3.0)**2 + (1.0/3.0)**2 + (5.0/3.0)**2) #mean=7.0/3.0 #mean_real=7.0/3.0,mean_pred=2392.0/3.0 -> diff_real=[-4.0/3.0,-1.0/3.0,5.0/3.0],diff_pred=[-892.0/3.0,-742.0/3.0,1634.0/3.0] Rp = ((-4.0/3.0*-892.0/3.0)+(-1.0/3.0*-742.0/3.0)+(5.0/3.0*1634.0/3.0)) / (math.sqrt((-4.0/3.0)**2+(-1.0/3.0)**2+(5.0/3.0)**2) * math.sqrt((-892.0/3.0)**2+(-742.0/3.0)**2+(1634.0/3.0)**2)) assert performances['MSE'] == MSE assert abs(performances['R^2'] - R2) < 0.000000001 assert abs(performances['Rp'] - Rp) < 0.000000001 """ Test the evaluation measures MSE, R^2, Rp """ def test_compute_statistics(): R = numpy.array([[1,2],[3,4]],dtype=float) M = numpy.array([[1,1],[0,1]]) (I,J,K,L) = 2, 2, 3, 4 nmtf = NMTF(R,M,K,L) R_pred = numpy.array([[500,550],[1220,1342]],dtype=float) M_pred = numpy.array([[0,0],[1,1]]) MSE_pred = (1217**2 + 1338**2) / 2.0 R2_pred = 1. - (1217**2+1338**2)/(0.5**2+0.5**2) #mean=3.5 Rp_pred = 61. / ( math.sqrt(.5) * math.sqrt(7442.) ) #mean=3.5,var=0.5,mean_pred=1281,var_pred=7442,cov=61 assert MSE_pred == nmtf.compute_MSE(M_pred,R,R_pred) assert R2_pred == nmtf.compute_R2(M_pred,R,R_pred) assert Rp_pred == nmtf.compute_Rp(M_pred,R,R_pred)
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from twilio.base import deserialize from twilio.base import values from twilio.base.instance_context import InstanceContext from twilio.base.instance_resource import InstanceResource from twilio.base.list_resource import ListResource from twilio.base.page import Page class SampleList(ListResource): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version, assistant_sid, task_sid): """ Initialize the SampleList :param Version version: Version that contains the resource :param assistant_sid: The SID of the Assistant that is the parent of the Task associated with the resource :param task_sid: The SID of the Task associated with the resource :returns: twilio.rest.autopilot.v1.assistant.task.sample.SampleList :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleList """ super(SampleList, self).__init__(version) # Path Solution self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, } self._uri = '/Assistants/{assistant_sid}/Tasks/{task_sid}/Samples'.format(**self._solution) def stream(self, language=values.unset, limit=None, page_size=None): """ Streams SampleInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param unicode language: The ISO language-country string that specifies the language used for the sample :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page(language=language, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit']) def list(self, language=values.unset, limit=None, page_size=None): """ Lists SampleInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode language: The ISO language-country string that specifies the language used for the sample :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance] """ return list(self.stream(language=language, limit=limit, page_size=page_size, )) def page(self, language=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of SampleInstance records from the API. Request is executed immediately :param unicode language: The ISO language-country string that specifies the language used for the sample :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of SampleInstance :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SamplePage """ data = values.of({ 'Language': language, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page(method='GET', uri=self._uri, params=data, ) return SamplePage(self._version, response, self._solution) def get_page(self, target_url): """ Retrieve a specific page of SampleInstance records from the API. Request is executed immediately :param str target_url: API-generated URL for the requested results page :returns: Page of SampleInstance :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SamplePage """ response = self._version.domain.twilio.request( 'GET', target_url, ) return SamplePage(self._version, response, self._solution) def create(self, language, tagged_text, source_channel=values.unset): """ Create the SampleInstance :param unicode language: The ISO language-country string that specifies the language used for the new sample :param unicode tagged_text: The text example of how end users might express the task :param unicode source_channel: The communication channel from which the new sample was captured :returns: The created SampleInstance :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance """ data = values.of({'Language': language, 'TaggedText': tagged_text, 'SourceChannel': source_channel, }) payload = self._version.create(method='POST', uri=self._uri, data=data, ) return SampleInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], ) def get(self, sid): """ Constructs a SampleContext :param sid: The unique string that identifies the resource :returns: twilio.rest.autopilot.v1.assistant.task.sample.SampleContext :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleContext """ return SampleContext( self._version, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], sid=sid, ) def __call__(self, sid): """ Constructs a SampleContext :param sid: The unique string that identifies the resource :returns: twilio.rest.autopilot.v1.assistant.task.sample.SampleContext :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleContext """ return SampleContext( self._version, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], sid=sid, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Autopilot.V1.SampleList>' class SamplePage(Page): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version, response, solution): """ Initialize the SamplePage :param Version version: Version that contains the resource :param Response response: Response from the API :param assistant_sid: The SID of the Assistant that is the parent of the Task associated with the resource :param task_sid: The SID of the Task associated with the resource :returns: twilio.rest.autopilot.v1.assistant.task.sample.SamplePage :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SamplePage """ super(SamplePage, self).__init__(version, response) # Path Solution self._solution = solution def get_instance(self, payload): """ Build an instance of SampleInstance :param dict payload: Payload response from the API :returns: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance """ return SampleInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Autopilot.V1.SamplePage>' class SampleContext(InstanceContext): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version, assistant_sid, task_sid, sid): """ Initialize the SampleContext :param Version version: Version that contains the resource :param assistant_sid: The SID of the Assistant that is the parent of the Task associated with the resource to fetch :param task_sid: The SID of the Task associated with the Sample resource to create :param sid: The unique string that identifies the resource :returns: twilio.rest.autopilot.v1.assistant.task.sample.SampleContext :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleContext """ super(SampleContext, self).__init__(version) # Path Solution self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, 'sid': sid, } self._uri = '/Assistants/{assistant_sid}/Tasks/{task_sid}/Samples/{sid}'.format(**self._solution) def fetch(self): """ Fetch the SampleInstance :returns: The fetched SampleInstance :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance """ payload = self._version.fetch(method='GET', uri=self._uri, ) return SampleInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], sid=self._solution['sid'], ) def update(self, language=values.unset, tagged_text=values.unset, source_channel=values.unset): """ Update the SampleInstance :param unicode language: The ISO language-country string that specifies the language used for the sample :param unicode tagged_text: The text example of how end users might express the task :param unicode source_channel: The communication channel from which the sample was captured :returns: The updated SampleInstance :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance """ data = values.of({'Language': language, 'TaggedText': tagged_text, 'SourceChannel': source_channel, }) payload = self._version.update(method='POST', uri=self._uri, data=data, ) return SampleInstance( self._version, payload, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], sid=self._solution['sid'], ) def delete(self): """ Deletes the SampleInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._version.delete(method='DELETE', uri=self._uri, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Autopilot.V1.SampleContext {}>'.format(context) class SampleInstance(InstanceResource): """ PLEASE NOTE that this class contains preview products that are subject to change. Use them with caution. If you currently do not have developer preview access, please contact help@twilio.com. """ def __init__(self, version, payload, assistant_sid, task_sid, sid=None): """ Initialize the SampleInstance :returns: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance """ super(SampleInstance, self).__init__(version) # Marshaled Properties self._properties = { 'account_sid': payload.get('account_sid'), 'date_created': deserialize.iso8601_datetime(payload.get('date_created')), 'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')), 'task_sid': payload.get('task_sid'), 'language': payload.get('language'), 'assistant_sid': payload.get('assistant_sid'), 'sid': payload.get('sid'), 'tagged_text': payload.get('tagged_text'), 'url': payload.get('url'), 'source_channel': payload.get('source_channel'), } # Context self._context = None self._solution = { 'assistant_sid': assistant_sid, 'task_sid': task_sid, 'sid': sid or self._properties['sid'], } @property def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: SampleContext for this SampleInstance :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleContext """ if self._context is None: self._context = SampleContext( self._version, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], sid=self._solution['sid'], ) return self._context @property def account_sid(self): """ :returns: The SID of the Account that created the resource :rtype: unicode """ return self._properties['account_sid'] @property def date_created(self): """ :returns: The RFC 2822 date and time in GMT when the resource was created :rtype: datetime """ return self._properties['date_created'] @property def date_updated(self): """ :returns: The RFC 2822 date and time in GMT when the resource was last updated :rtype: datetime """ return self._properties['date_updated'] @property def task_sid(self): """ :returns: The SID of the Task associated with the resource :rtype: unicode """ return self._properties['task_sid'] @property def language(self): """ :returns: An ISO language-country string that specifies the language used for the sample :rtype: unicode """ return self._properties['language'] @property def assistant_sid(self): """ :returns: The SID of the Assistant that is the parent of the Task associated with the resource :rtype: unicode """ return self._properties['assistant_sid'] @property def sid(self): """ :returns: The unique string that identifies the resource :rtype: unicode """ return self._properties['sid'] @property def tagged_text(self): """ :returns: The text example of how end users might express the task :rtype: unicode """ return self._properties['tagged_text'] @property def url(self): """ :returns: The absolute URL of the Sample resource :rtype: unicode """ return self._properties['url'] @property def source_channel(self): """ :returns: The communication channel from which the sample was captured :rtype: unicode """ return self._properties['source_channel'] def fetch(self): """ Fetch the SampleInstance :returns: The fetched SampleInstance :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance """ return self._proxy.fetch() def update(self, language=values.unset, tagged_text=values.unset, source_channel=values.unset): """ Update the SampleInstance :param unicode language: The ISO language-country string that specifies the language used for the sample :param unicode tagged_text: The text example of how end users might express the task :param unicode source_channel: The communication channel from which the sample was captured :returns: The updated SampleInstance :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance """ return self._proxy.update(language=language, tagged_text=tagged_text, source_channel=source_channel, ) def delete(self): """ Deletes the SampleInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._proxy.delete() def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Autopilot.V1.SampleInstance {}>'.format(context)
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import sys import warnings if sys.version >= '3': basestring = unicode = str from pyspark import since, _NoValue from pyspark.rdd import ignore_unicode_prefix from pyspark.sql.session import _monkey_patch_RDD, SparkSession from pyspark.sql.dataframe import DataFrame from pyspark.sql.readwriter import DataFrameReader from pyspark.sql.streaming import DataStreamReader from pyspark.sql.types import IntegerType, Row, StringType from pyspark.sql.udf import UDFRegistration from pyspark.sql.utils import install_exception_handler __all__ = ["SQLContext", "HiveContext"] class SQLContext(object): """The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x. As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class here for backward compatibility. A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as tables, execute SQL over tables, cache tables, and read parquet files. :param sparkContext: The :class:`SparkContext` backing this SQLContext. :param sparkSession: The :class:`SparkSession` around which this SQLContext wraps. :param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new SQLContext in the JVM, instead we make all calls to this object. """ _instantiatedContext = None @ignore_unicode_prefix def __init__(self, sparkContext, sparkSession=None, jsqlContext=None): """Creates a new SQLContext. >>> from datetime import datetime >>> sqlContext = SQLContext(sc) >>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1, ... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1), ... time=datetime(2014, 8, 1, 14, 1, 5))]) >>> df = allTypes.toDF() >>> df.createOrReplaceTempView("allTypes") >>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a ' ... 'from allTypes where b and i > 0').collect() [Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \ dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)] >>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect() [(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])] """ self._sc = sparkContext self._jsc = self._sc._jsc self._jvm = self._sc._jvm if sparkSession is None: sparkSession = SparkSession.builder.getOrCreate() if jsqlContext is None: jsqlContext = sparkSession._jwrapped self.sparkSession = sparkSession self._jsqlContext = jsqlContext _monkey_patch_RDD(self.sparkSession) install_exception_handler() if SQLContext._instantiatedContext is None: SQLContext._instantiatedContext = self @property def _ssql_ctx(self): """Accessor for the JVM Spark SQL context. Subclasses can override this property to provide their own JVM Contexts. """ return self._jsqlContext @property def _conf(self): """Accessor for the JVM SQL-specific configurations""" return self.sparkSession._jsparkSession.sessionState().conf() @classmethod @since(1.6) def getOrCreate(cls, sc): """ Get the existing SQLContext or create a new one with given SparkContext. :param sc: SparkContext """ if cls._instantiatedContext is None: jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc()) sparkSession = SparkSession(sc, jsqlContext.sparkSession()) cls(sc, sparkSession, jsqlContext) return cls._instantiatedContext @since(1.6) def newSession(self): """ Returns a new SQLContext as new session, that has separate SQLConf, registered temporary views and UDFs, but shared SparkContext and table cache. """ return self.__class__(self._sc, self.sparkSession.newSession()) @since(1.3) def setConf(self, key, value): """Sets the given Spark SQL configuration property. """ self.sparkSession.conf.set(key, value) @ignore_unicode_prefix @since(1.3) def getConf(self, key, defaultValue=_NoValue): """Returns the value of Spark SQL configuration property for the given key. If the key is not set and defaultValue is set, return defaultValue. If the key is not set and defaultValue is not set, return the system default value. >>> sqlContext.getConf("spark.sql.shuffle.partitions") u'200' >>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10") u'10' >>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50") >>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10") u'50' """ return self.sparkSession.conf.get(key, defaultValue) @property @since("1.3.1") def udf(self): """Returns a :class:`UDFRegistration` for UDF registration. :return: :class:`UDFRegistration` """ return self.sparkSession.udf @since(1.4) def range(self, start, end=None, step=1, numPartitions=None): """ Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named ``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with step value ``step``. :param start: the start value :param end: the end value (exclusive) :param step: the incremental step (default: 1) :param numPartitions: the number of partitions of the DataFrame :return: :class:`DataFrame` >>> sqlContext.range(1, 7, 2).collect() [Row(id=1), Row(id=3), Row(id=5)] If only one argument is specified, it will be used as the end value. >>> sqlContext.range(3).collect() [Row(id=0), Row(id=1), Row(id=2)] """ return self.sparkSession.range(start, end, step, numPartitions) @since(1.2) def registerFunction(self, name, f, returnType=None): """An alias for :func:`spark.udf.register`. See :meth:`pyspark.sql.UDFRegistration.register`. .. note:: Deprecated in 2.3.0. Use :func:`spark.udf.register` instead. """ warnings.warn( "Deprecated in 2.3.0. Use spark.udf.register instead.", DeprecationWarning) return self.sparkSession.udf.register(name, f, returnType) @since(2.1) def registerJavaFunction(self, name, javaClassName, returnType=None): """An alias for :func:`spark.udf.registerJavaFunction`. See :meth:`pyspark.sql.UDFRegistration.registerJavaFunction`. .. note:: Deprecated in 2.3.0. Use :func:`spark.udf.registerJavaFunction` instead. """ warnings.warn( "Deprecated in 2.3.0. Use spark.udf.registerJavaFunction instead.", DeprecationWarning) return self.sparkSession.udf.registerJavaFunction(name, javaClassName, returnType) # TODO(andrew): delete this once we refactor things to take in SparkSession def _inferSchema(self, rdd, samplingRatio=None): """ Infer schema from an RDD of Row or tuple. :param rdd: an RDD of Row or tuple :param samplingRatio: sampling ratio, or no sampling (default) :return: :class:`pyspark.sql.types.StructType` """ return self.sparkSession._inferSchema(rdd, samplingRatio) @since(1.3) @ignore_unicode_prefix def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True): """ Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`. When ``schema`` is a list of column names, the type of each column will be inferred from ``data``. When ``schema`` is ``None``, it will try to infer the schema (column names and types) from ``data``, which should be an RDD of :class:`Row`, or :class:`namedtuple`, or :class:`dict`. When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match the real data, or an exception will be thrown at runtime. If the given schema is not :class:`pyspark.sql.types.StructType`, it will be wrapped into a :class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value", each record will also be wrapped into a tuple, which can be converted to row later. If schema inference is needed, ``samplingRatio`` is used to determined the ratio of rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``. :param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`, :class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or :class:`pandas.DataFrame`. :param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of column names, default is None. The data type string format equals to :class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`. :param samplingRatio: the sample ratio of rows used for inferring :param verifySchema: verify data types of every row against schema. :return: :class:`DataFrame` .. versionchanged:: 2.0 The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a datatype string after 2.0. If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a :class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple. .. versionchanged:: 2.1 Added verifySchema. >>> l = [('Alice', 1)] >>> sqlContext.createDataFrame(l).collect() [Row(_1=u'Alice', _2=1)] >>> sqlContext.createDataFrame(l, ['name', 'age']).collect() [Row(name=u'Alice', age=1)] >>> d = [{'name': 'Alice', 'age': 1}] >>> sqlContext.createDataFrame(d).collect() [Row(age=1, name=u'Alice')] >>> rdd = sc.parallelize(l) >>> sqlContext.createDataFrame(rdd).collect() [Row(_1=u'Alice', _2=1)] >>> df = sqlContext.createDataFrame(rdd, ['name', 'age']) >>> df.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql import Row >>> Person = Row('name', 'age') >>> person = rdd.map(lambda r: Person(*r)) >>> df2 = sqlContext.createDataFrame(person) >>> df2.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql.types import * >>> schema = StructType([ ... StructField("name", StringType(), True), ... StructField("age", IntegerType(), True)]) >>> df3 = sqlContext.createDataFrame(rdd, schema) >>> df3.collect() [Row(name=u'Alice', age=1)] >>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP [Row(name=u'Alice', age=1)] >>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP [Row(0=1, 1=2)] >>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect() [Row(a=u'Alice', b=1)] >>> rdd = rdd.map(lambda row: row[1]) >>> sqlContext.createDataFrame(rdd, "int").collect() [Row(value=1)] >>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... Py4JJavaError: ... """ return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema) @since(1.3) def registerDataFrameAsTable(self, df, tableName): """Registers the given :class:`DataFrame` as a temporary table in the catalog. Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`. >>> sqlContext.registerDataFrameAsTable(df, "table1") """ df.createOrReplaceTempView(tableName) @since(1.6) def dropTempTable(self, tableName): """ Remove the temp table from catalog. >>> sqlContext.registerDataFrameAsTable(df, "table1") >>> sqlContext.dropTempTable("table1") """ self.sparkSession.catalog.dropTempView(tableName) @since(1.3) def createExternalTable(self, tableName, path=None, source=None, schema=None, **options): """Creates an external table based on the dataset in a data source. It returns the DataFrame associated with the external table. The data source is specified by the ``source`` and a set of ``options``. If ``source`` is not specified, the default data source configured by ``spark.sql.sources.default`` will be used. Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and created external table. :return: :class:`DataFrame` """ return self.sparkSession.catalog.createExternalTable( tableName, path, source, schema, **options) @ignore_unicode_prefix @since(1.0) def sql(self, sqlQuery): """Returns a :class:`DataFrame` representing the result of the given query. :return: :class:`DataFrame` >>> sqlContext.registerDataFrameAsTable(df, "table1") >>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1") >>> df2.collect() [Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')] """ return self.sparkSession.sql(sqlQuery) @since(1.0) def table(self, tableName): """Returns the specified table or view as a :class:`DataFrame`. :return: :class:`DataFrame` >>> sqlContext.registerDataFrameAsTable(df, "table1") >>> df2 = sqlContext.table("table1") >>> sorted(df.collect()) == sorted(df2.collect()) True """ return self.sparkSession.table(tableName) @ignore_unicode_prefix @since(1.3) def tables(self, dbName=None): """Returns a :class:`DataFrame` containing names of tables in the given database. If ``dbName`` is not specified, the current database will be used. The returned DataFrame has two columns: ``tableName`` and ``isTemporary`` (a column with :class:`BooleanType` indicating if a table is a temporary one or not). :param dbName: string, name of the database to use. :return: :class:`DataFrame` >>> sqlContext.registerDataFrameAsTable(df, "table1") >>> df2 = sqlContext.tables() >>> df2.filter("tableName = 'table1'").first() Row(database=u'', tableName=u'table1', isTemporary=True) """ if dbName is None: return DataFrame(self._ssql_ctx.tables(), self) else: return DataFrame(self._ssql_ctx.tables(dbName), self) @since(1.3) def tableNames(self, dbName=None): """Returns a list of names of tables in the database ``dbName``. :param dbName: string, name of the database to use. Default to the current database. :return: list of table names, in string >>> sqlContext.registerDataFrameAsTable(df, "table1") >>> "table1" in sqlContext.tableNames() True >>> "table1" in sqlContext.tableNames("default") True """ if dbName is None: return [name for name in self._ssql_ctx.tableNames()] else: return [name for name in self._ssql_ctx.tableNames(dbName)] @since(1.0) def cacheTable(self, tableName): """Caches the specified table in-memory.""" self._ssql_ctx.cacheTable(tableName) @since(1.0) def uncacheTable(self, tableName): """Removes the specified table from the in-memory cache.""" self._ssql_ctx.uncacheTable(tableName) @since(1.3) def clearCache(self): """Removes all cached tables from the in-memory cache. """ self._ssql_ctx.clearCache() @property @since(1.4) def read(self): """ Returns a :class:`DataFrameReader` that can be used to read data in as a :class:`DataFrame`. :return: :class:`DataFrameReader` """ return DataFrameReader(self) @property @since(2.0) def readStream(self): """ Returns a :class:`DataStreamReader` that can be used to read data streams as a streaming :class:`DataFrame`. .. note:: Evolving. :return: :class:`DataStreamReader` >>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp()) >>> text_sdf.isStreaming True """ return DataStreamReader(self) @property @since(2.0) def streams(self): """Returns a :class:`StreamingQueryManager` that allows managing all the :class:`StreamingQuery` StreamingQueries active on `this` context. .. note:: Evolving. """ from pyspark.sql.streaming import StreamingQueryManager return StreamingQueryManager(self._ssql_ctx.streams()) class HiveContext(SQLContext): """A variant of Spark SQL that integrates with data stored in Hive. Configuration for Hive is read from ``hive-site.xml`` on the classpath. It supports running both SQL and HiveQL commands. :param sparkContext: The SparkContext to wrap. :param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new :class:`HiveContext` in the JVM, instead we make all calls to this object. .. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate(). """ def __init__(self, sparkContext, jhiveContext=None): warnings.warn( "HiveContext is deprecated in Spark 2.0.0. Please use " + "SparkSession.builder.enableHiveSupport().getOrCreate() instead.", DeprecationWarning) if jhiveContext is None: sparkContext._conf.set("spark.sql.catalogImplementation", "hive") sparkSession = SparkSession.builder._sparkContext(sparkContext).getOrCreate() else: sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession()) SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext) @classmethod def _createForTesting(cls, sparkContext): """(Internal use only) Create a new HiveContext for testing. All test code that touches HiveContext *must* go through this method. Otherwise, you may end up launching multiple derby instances and encounter with incredibly confusing error messages. """ jsc = sparkContext._jsc.sc() jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False) return cls(sparkContext, jtestHive) def refreshTable(self, tableName): """Invalidate and refresh all the cached the metadata of the given table. For performance reasons, Spark SQL or the external data source library it uses might cache certain metadata about a table, such as the location of blocks. When those change outside of Spark SQL, users should call this function to invalidate the cache. """ self._ssql_ctx.refreshTable(tableName) def _test(): import os import doctest import tempfile from pyspark.context import SparkContext from pyspark.sql import Row, SQLContext import pyspark.sql.context os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.sql.context.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') globs['tempfile'] = tempfile globs['os'] = os globs['sc'] = sc globs['sqlContext'] = SQLContext(sc) globs['rdd'] = rdd = sc.parallelize( [Row(field1=1, field2="row1"), Row(field1=2, field2="row2"), Row(field1=3, field2="row3")] ) globs['df'] = rdd.toDF() jsonStrings = [ '{"field1": 1, "field2": "row1", "field3":{"field4":11}}', '{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},' '"field6":[{"field7": "row2"}]}', '{"field1" : null, "field2": "row3", ' '"field3":{"field4":33, "field5": []}}' ] globs['jsonStrings'] = jsonStrings globs['json'] = sc.parallelize(jsonStrings) (failure_count, test_count) = doctest.testmod( pyspark.sql.context, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) globs['sc'].stop() if failure_count: sys.exit(-1) if __name__ == "__main__": _test()
import re import urlparse import pprint import os import sys import redis from beaker.middleware import SessionMiddleware from MacroExecutor import MacroExecutorPage, MacroExecutorWiki, MacroExecutorPreprocess from PortalAuthenticatorOSIS import PortalAuthenticatorOSIS from RequestContext import RequestContext from PortalRest import PortalRest from .OsisBeaker import OsisBeaker from JumpScale import j from gevent.pywsgi import WSGIServer import gevent import time import mimeparse import mimetypes import urllib import cgi import JumpScale.grid.agentcontroller BLOCK_SIZE = 4096 CONTENT_TYPE_JSON = 'application/json' CONTENT_TYPE_JS = 'application/javascript' CONTENT_TYPE_YAML = 'application/yaml' CONTENT_TYPE_PLAIN = 'text/plain' CONTENT_TYPE_HTML = 'text/html' CONTENT_TYPE_PNG = 'image/png' class PortalServer: ##################### INIT def __init__(self): self.hrd = j.application.instanceconfig self.contentdirs = list() self.libpath = j.html.getHtmllibDir() self.started = False self.epoch = time.time() self.cfgdir="cfg" j.core.portal.active=self self.osis = j.core.osis.getClientByInstance() self.pageKey2doc = {} self.routes = {} self.loadConfig() macroPathsPreprocessor = ["macros/preprocess"] macroPathsWiki = ["macros/wiki"] macroPathsPage = ["macros/page"] self.macroexecutorPreprocessor = MacroExecutorPreprocess(macroPathsPreprocessor) self.macroexecutorPage = MacroExecutorPage(macroPathsPage) self.macroexecutorWiki = MacroExecutorWiki(macroPathsWiki) self.bootstrap() session_opts = { 'session.cookie_expires': False, 'session.type': 'OsisBeaker', 'session.namespace_class': OsisBeaker, 'session.namespace_args': {'client': self.osis}, 'session.data_dir': '%s' % j.system.fs.joinPaths(j.dirs.varDir, "beakercache") } self._router = SessionMiddleware(self.router, session_opts) self._webserver = WSGIServer((self.listenip, self.port), self._router) # wwwroot = wwwroot.replace("\\", "/") # while len(wwwroot) > 0 and wwwroot[-1] == "/": # wwwroot = wwwroot[:-1] # self.wwwroot = wwwroot self.confluence2htmlconvertor = j.tools.docgenerator.getConfluence2htmlConvertor() self.activejobs = list() self.jobids2greenlets = dict() self.schedule1min = {} self.schedule15min = {} self.schedule60min = {} self.rediscache=redis.StrictRedis(host='localhost', port=9999, db=0) self.redisprod=redis.StrictRedis(host='localhost', port=9999, db=0) self.jslibroot=j.system.fs.joinPaths(j.dirs.baseDir,"apps","portals","jslib") self.auth=PortalAuthenticatorOSIS(self.osis) self.loadSpaces() self.rest=PortalRest(self) def loadConfig(self): def replaceVar(txt): # txt = txt.replace("$base", j.dirs.baseDir).replace("\\", "/") txt = txt.replace("$appdir", j.system.fs.getcwd()).replace("\\", "/") txt = txt.replace("$vardir", j.dirs.varDir).replace("\\", "/") txt = txt.replace("$htmllibdir", j.html.getHtmllibDir()).replace("\\", "/") txt = txt.replace("\\", "/") return txt ######INIT FILE ini = j.tools.inifile.open(self.cfgdir + "/portal.cfg") if ini.checkParam("main", "appdir"): self.appdir = replaceVar(ini.getValue("main", "appdir")) self.appdir=self.appdir.replace("$base",j.dirs.baseDir) else: self.appdir = j.system.fs.getcwd() self.getContentDirs() #contentdirs need to be loaded before we go to other dir of base server j.system.fs.changeDir(self.appdir) self.listenip = '0.0.0.0' if ini.checkSection('main') and ini.checkParam('main', 'listenip'): self.listenip = ini.getValue('main', 'listenip') self.port = int(ini.getValue("main", "webserverport")) self.addr = ini.getValue("main", "pubipaddr") self.logdir= j.system.fs.joinPaths(j.dirs.logDir,"portal",str(self.port)) j.system.fs.createDir(self.logdir) self.secret = ini.getValue("main", "secret") self.admingroups = ini.getValue("main", "admingroups").split(",") self.filesroot = replaceVar(ini.getValue("main", "filesroot")) j.system.fs.createDir(self.filesroot) if ini.checkParam('main', 'defaultspace'): self.defaultspace = ini.getValue('main', 'defaultspace') or 'system' else: self.defaultspace = 'system' if ini.checkParam('main', 'defaulpage'): self.defaultpage = ini.getValue('main', 'defaultpage') or "" else: self.defaultpage = "" self.getContentDirs() def reset(self): self.routes={} self.loadConfig() self.bootstrap() j.core.codegenerator.resetMemNonSystem() j.core.specparser.resetMemNonSystem() # self.actorsloader.scan(path=self.contentdirs,reset=True) #do we need to load them all self.bucketsloader = j.core.portalloader.getBucketsLoader() self.spacesloader = j.core.portalloader.getSpacesLoader() self.loadSpaces() def bootstrap(self): self.actors = {} # key is the applicationName_actorname (lowercase) self.actorsloader = j.core.portalloader.getActorsLoader() self.app_actor_dict = {} self.taskletengines = {} self.actorsloader.reset() # self.actorsloader._generateLoadActor("system", "contentmanager", actorpath="%s/apps/portalbase/system/system__contentmanager/"%j.dirs.baseDir) # self.actorsloader._generateLoadActor("system", "master", actorpath="system/system__master/") # self.actorsloader._generateLoadActor("system", "usermanager", actorpath="system/system__usermanager/") self.actorsloader.scan(self.contentdirs) self.actorsloader.getActor("system", "contentmanager") self.actorsloader.getActor("system", "usermanager") def loadSpaces(self): self.bucketsloader = j.core.portalloader.getBucketsLoader() self.spacesloader = j.core.portalloader.getSpacesLoader() self.bucketsloader.scan(self.contentdirs) self.spacesloader.scan(self.contentdirs) if "system" not in self.spacesloader.spaces: raise RuntimeError("could not find system space") self.spacesloader.spaces["system"].loadDocProcessor() #need to make sure we have content for the systemspace def getContentDirs(self): """ walk over known content dirs & execute loader on it """ cfgpath = j.system.fs.joinPaths(self.cfgdir, "contentdirs.cfg") def append(path): path=j.system.fs.pathNormalize(path) if path not in self.contentdirs: self.contentdirs.append(path) if j.system.fs.exists(cfgpath): wikicfg = j.system.fs.fileGetContents(cfgpath) paths = wikicfg.split("\n") for path in paths: path = path.strip() if path=="" or path[0]=="#": continue path=path.replace("\\","/") if path[0] != "/" and path.find(":") == -1: path = j.system.fs.joinPaths(j.system.fs.getParent(self.cfgdir), path) append(path) #add own base path self.basepath = j.system.fs.joinPaths(j.system.fs.getParent(self.cfgdir), "base") j.system.fs.createDir(self.basepath) append(self.basepath) #add base path of parent portal appdir = self.appdir append(j.system.fs.joinPaths(appdir, "wiki")) append(j.system.fs.joinPaths(appdir, "system")) def unloadActorFromRoutes(self, appname, actorname): for key in self.routes.keys(): appn, actorn, remaining = key.split("_", 2) # print appn+" "+appname+" "+actorn+" "+actorname if appn == appname and actorn == actorname: self.routes.pop(key) ##################### USER RIGHTS def getUserRight(self, ctx, space): if space == "" or space not in self.spacesloader.spaces: space = "system" spaceobject = self.spacesloader.spaces[space] # print "spaceobject" # print spaceobject.model if "user" in ctx.env['beaker.session']: username = ctx.env['beaker.session']["user"] else: username = "" if username == "": right = "" else: if username=="guest": groupsusers=["guest","guests"] else: groupsusers=self.auth.getGroups(username) right = "" if "admin" in groupsusers: right = "*" # print "groupsusers:%s"%groupsusers if right == "": for groupuser in groupsusers: if groupuser in spaceobject.model.acl: # found match right = spaceobject.model.acl[groupuser] break # if right == "": # #check bitbucket # for key,acl in spaceobject.model.acl.iteritems(): # if key.find("bitbucket")==0: # from IPython import embed # print "DEBUG NOW ooooooo" # embed() if right == "*": right = "rwa" # print "right:%s" % right return username, right def getUserFromCTX(self,ctx): return str(ctx.env["beaker.session"]["user"]) def getGroupsFromCTX(self,ctx): groups=self.auth.getGroups(ctx.env["beaker.session"]["user"]) return [str(item.lower()) for item in groups] def isAdminFromCTX(self,ctx): groups=self.getGroupsFromCTX(ctx) for gr in groups: if gr in self.admingroups: return True return False def isLoggedInFromCTX(self,ctx): user=self.getUserFromCTX(ctx) if user<>"" and user<>"guest": return True return False ##################### process pages, get docs def getpage(self): page = j.tools.docgenerator.pageNewHTML("index.html", htmllibPath="/jslib") return page def sendpage(self, page, start_response): contenttype = "text/html" start_response('200 OK', [('Content-Type', contenttype), ]) return [page.getContent()] def getDoc(self, space, name, ctx, params={}): print "GETDOC:%s" % space space = space.lower() name = name.lower() if name in ["login", "error", "accessdenied", "pagenotfound"]: right = "r" if space == "" and name == "": space = self.defaultspace name = self.defaultpage username, right = self.getUserRight(ctx, space) print "# space:%s name:%s user:%s right:%s" % (space, name, username, right) if "r" not in right: space = 'system' name = "accessdenied" if name != "accessdenied" and name != "pagenotfound": # check security if right == "": params["space"] = space params["page"] = name doc, params = self.getDoc(space, "accessdenied", ctx, params=params) return doc, params else: right = "r" if space not in self.spacesloader.spaces: if space == "system": raise RuntimeError("wiki has not loaded system space, cannot continue") print "could not find space %s" % space doc, params = self.getDoc("system", "pagenotfound", ctx, params) if "space" not in params: params["space"] = space if "page" not in params: params["page"] = name print "could not find space %s" % space ctx.params["error"] = "Could not find space %s\n" % space else: spaceObject = self.spacesloader.getLoaderFromId(space) if spaceObject.docprocessor == None: spaceObject.loadDocProcessor(force=True) # dynamic load of space spacedocgen = spaceObject.docprocessor if name != "" and name in spacedocgen.name2doc: doc = spacedocgen.name2doc[name] else: if name == "accessdenied": # means the accessdenied page does not exist doc, params = self.getDoc("system", "accessdenied", ctx, params) return doc, params if name == "pagenotfound": # means the nofound page does not exist doc, params = self.getDoc("system", "pagenotfound", ctx, params) ctx.start_response("404 Not found", []) return doc, params if name == "": if space in spacedocgen.name2doc: doc = spacedocgen.name2doc[space] elif "home" in spacedocgen.name2doc: doc = spacedocgen.name2doc["home"] else: ctx.params["path"] = "space:%s pagename:%s" % (space, name) # print ctx.params["path"] if "space" not in params: params["space"] = space if "page" not in params: params["page"] = name doc, params = self.getDoc(space, "pagenotfound", ctx, params) else: ctx.params["path"] = "space:%s pagename:%s" % (space, name) doc, params = self.getDoc(space, "pagenotfound", ctx, params) ctx.params["rights"] = right doc.loadFromDisk() if name == "pagenotfound": ctx.start_response("404 Not found", []) return doc, params def returnDoc(self, ctx, start_response, space, docname, extraParams={}): doc, params = self.getDoc(space, docname, ctx, params=ctx.params) if doc.dirty or "reload" in ctx.params: doc.loadFromDisk() doc.preprocess() ctx.params.update(extraParams) # doc.applyParams(ctx.params) content,doc = doc.executeMacrosDynamicWiki(paramsExtra=extraParams, ctx=ctx) page = self.confluence2htmlconvertor.convert(content, doc=doc, requestContext=ctx, page=self.getpage(), paramsExtra=ctx.params) if not 'postprocess' in page.processparameters or page.processparameters['postprocess']: page.body = page.body.replace("$$space", space) page.body = page.body.replace("$$page", doc.original_name) page.body = page.body.replace("$$path", doc.path) page.body = page.body.replace("$$querystr", ctx.env['QUERY_STRING']) page.body = page.body.replace("$$$menuright", "") if "todestruct" in doc.__dict__: doc.destructed = True ctx.start_response('200 OK', [('Content-Type', "text/html"), ]) return page def processor_page(self, environ, start_response, wwwroot, path, prefix="", webprefix="", index=False,includedocs=False,ctx=None,space=None): def indexignore(item): ext = item.split(".")[-1].lower() if ext in ["pyc", "pyo", "bak"]: return True if item[0] == "_": return True if item[0] == ".": return True return False def formatContent(contenttype, path, template, start_response): content = j.system.fs.fileGetContents(path) page = self.getpage() page.addCodeBlock(content, template, edit=True) start_response('200 OK', [('Content-Type', contenttype), ]) return [str(page)] def processHtml(contenttype, path, start_response,ctx,space): content = j.system.fs.fileGetContents(path) r = r"\[\[.*\]\]" #@todo does not seem right to me for match in j.codetools.regex.yieldRegexMatches(r, content): docname = match.founditem.replace("[", "").replace("]", "") doc, params = self.getDoc(space, docname, ctx, params=ctx.params) if doc.name=='pagenotfound': content=content.replace(match.founditem,"*****CONTENT '%s' NOT FOUND******"%docname) else: content2,doc = doc.executeMacrosDynamicWiki(paramsExtra={}, ctx=ctx) page = self.confluence2htmlconvertor.convert(content2, doc=doc, requestContext=ctx, page=self.getpage(), paramsExtra=ctx.params) page.body = page.body.replace("$$space", space) page.body = page.body.replace("$$page", doc.original_name) page.body = page.body.replace("$$path", doc.path) page.body = page.body.replace("$$querystr", ctx.env['QUERY_STRING']) page.body = page.body.replace("$$$menuright", "") content=content.replace(match.founditem,page.body) start_response('200 OK', [('Content-Type', "text/html"), ]) return [content] def removePrefixes(path): path = path.replace("\\", "/") path = path.replace("//", "/") path = path.replace("//", "/") while len(path) > 0 and path[0] == "/": path = path[1:] while path.find(webprefix + "/") == 0: path = path[len(webprefix) + 1:] while path.find(prefix + "/") == 0: path = path[len(prefix) + 1:] return path def send_file(file_path, size): # print "sendfile:%s" % path f = open(file_path, "rb") block = f.read(BLOCK_SIZE * 10) BLOCK_SIZE2 = 0 # print "%s %s" % (file_path,size) while BLOCK_SIZE2 < size: BLOCK_SIZE2 += len(block) # print BLOCK_SIZE2 # print len(block) yield block block = f.read(BLOCK_SIZE) # print "endfile" wwwroot = wwwroot.replace("\\", "/").strip() path = removePrefixes(path) # print "wwwroot:%s" % wwwroot if not wwwroot.replace("/", "") == "": pathfull = wwwroot + "/" + path else: pathfull = path contenttype = "text/html" content = "" headers = list() ext = path.split(".")[-1].lower() contenttype = mimetypes.guess_type(pathfull)[0] if path == "favicon.ico": pathfull = "wiki/System/favicon.ico" if not j.system.fs.exists(pathfull): if j.system.fs.exists(pathfull + '.gz') and 'gzip' in environ.get('HTTP_ACCEPT_ENCODING'): pathfull += ".gz" headers.append(('Vary', 'Accept-Encoding')) headers.append(('Content-Encoding', 'gzip')) else: print "error" headers = [('Content-Type', contenttype), ] start_response("404 Not found", headers) return ["path %s not found" % path] size = os.path.getsize(pathfull) if ext == "html": return processHtml(contenttype, pathfull, start_response,ctx,space) elif ext == "wiki": contenttype = "text/html" # return formatWikiContent(pathfull,start_response) return formatContent(contenttype, pathfull, "python", start_response) elif ext == "py": contenttype = "text/html" return formatContent(contenttype, pathfull, "python", start_response) elif ext == "spec": contenttype = "text/html" return formatContent(contenttype, pathfull, "python", start_response) # print contenttype status = '200 OK' headers.append(('Content-Type', contenttype)) headers.append(("Content-length", str(size))) headers.append(("Cache-Control", 'public,max-age=3600')) start_response(status, headers) if content != "": return [content] else: return send_file(pathfull, size) def process_elfinder(self, path, ctx): from JumpScale.portal.html import elFinder db = j.db.keyvaluestore.getMemoryStore('elfinder') rootpath = db.cacheGet(path) options = {'root': rootpath, 'dotFiles': True} con = elFinder.connector(options) params = ctx.params.copy() if 'rawdata' in params: from JumpScale.portal.html import multipart from cStringIO import StringIO ctx.env.pop('wsgi.input', None) stream = StringIO(ctx.params.pop('rawdata')) forms, files = multipart.parse_form_data(ctx.env, stream=stream) params.update(forms) for key, value in files.iteritems(): if key == 'upload[]': params['upload[]'] = dict() params['upload[]'][value.filename] = value.file if params.get('init') == '1': params.pop('target', None) status, header, response = con.run(params) status = '%s' % status headers = [ (k, v) for k,v in header.iteritems() ] ctx.start_response(status, headers) if 'download' not in params: response = j.db.serializers.getSerializerType('j').dumps(response) else: response = response['content'] return [response] def path2spacePagename(self, path): pagename = "" if path.find("?") != -1: path = path.split("?")[0] while len(path) > 0 and path[-1] == "/": path = path[:-1] if path.find("/") == -1: space = path.strip() else: splitted = path.split("/") space = splitted[0].lower() pagename = splitted[-1].lower() return space, pagename ##################### FORMATTING + logs/raiseerror def log(self, ctx, user, path, space="", pagename=""): path2 = j.system.fs.joinPaths(self.logdir, "user_%s.log" % user) epoch = j.base.time.getTimeEpoch() + 3600 * 6 hrtime = j.base.time.epoch2HRDateTime(epoch) if False and self.geoIP != None: # @todo fix geoip, also make sure nginx forwards the right info ee = self.geoIP.record_by_addr(ctx.env["REMOTE_ADDR"]) loc = "%s_%s_%s" % (ee["area_code"], ee["city"], ee["region_name"]) else: loc = "" msg = "%s|%s|%s|%s|%s|%s|%s\n" % (hrtime, ctx.env["REMOTE_ADDR"], epoch, space, pagename, path, loc) j.system.fs.writeFile(path2, msg, True) if space != "": msg = "%s|%s|%s|%s|%s|%s|%s\n" % (hrtime, ctx.env["REMOTE_ADDR"], epoch, user, pagename, path, loc) pathSpace = j.system.fs.joinPaths(self.logdir, "space_%s.log" % space) j.system.fs.writeFile(pathSpace, msg, True) def raiseError(self, ctx, msg="", msginfo="", errorObject=None, httpcode="500 Internal Server Error"): """ """ if not ctx.checkFormat(): # error in format eco = j.errorconditionhandler.getErrorConditionObject() eco.errormessage = "only format supported = human or json, format is put with param &format=..." eco.type = "INPUT" print "WRONG FORMAT" else: if errorObject != None: eco = errorObject else: eco = j.errorconditionhandler.getErrorConditionObject() method = ctx.env["PATH_INFO"] remoteAddress = ctx.env["REMOTE_ADDR"] queryString = ctx.env["QUERY_STRING"] eco.caller = remoteAddress if msg != "": eco.errormessage = msg else: eco.errormessage = "" if msginfo != "": eco.errormessage += "\msginfo was:\n%s" % msginfo if queryString != "": eco.errormessage += "\nquerystr was:%s" % queryString if method != "": eco.errormessage += "\nmethod was:%s" % method eco.process() if ctx.fformat == "human" or ctx.fformat == "text": if msginfo != None and msginfo != "": msg += "\n<br>%s" % msginfo else: msg += "\n%s" % eco msg = self._text2html(msg) else: # is json # result=[] # result["error"]=eco.obj2dict() def todict(obj): data = {} for key, value in obj.__dict__.iteritems(): try: data[key] = todict(value) except AttributeError: data[key] = value return data eco.tb="" eco.frames=[] msg = j.db.serializers.getSerializerType('j').dumps(todict(eco)) ctx.start_response(httpcode, [('Content-Type', 'text/html')]) j.console.echo("***ERROR***:%s : method %s from ip %s with params %s" % ( eco, method, remoteAddress, queryString), 2) if j.application.debug: return msg else: return "An unexpected error has occurred, please try again later." def _text2html(self, text): text = text.replace("\n", "<br>") # text=text.replace(" ","&nbsp; ") return text def _text2htmlSerializer(self, content): return self._text2html(pprint.pformat(content)) def _resultjsonSerializer(self, content): return j.db.serializers.getSerializerType('j').dumps({"result": content}) def _resultyamlSerializer(self, content): return j.code.object2yaml({"result": content}) def getMimeType(self, contenttype, format_types): supported_types = ["text/plain", "text/html", "application/yaml", "application/json"] CONTENT_TYPES = { "text/plain": str, "text/html": self._text2htmlSerializer, "application/yaml": self._resultyamlSerializer, "application/json": j.db.serializers.getSerializerType('j').dumps } if not contenttype: serializer = format_types["text"]["serializer"] return CONTENT_TYPE_HTML, serializer else: mimeType = mimeparse.best_match(supported_types, contenttype) serializer = CONTENT_TYPES[mimeType] return mimeType, serializer def reformatOutput(self, ctx, result, restreturn=False): FFORMAT_TYPES = { "text": {"content_type": CONTENT_TYPE_HTML, "serializer": self._text2htmlSerializer}, "html": {"content_type": CONTENT_TYPE_HTML, "serializer": self._text2htmlSerializer}, "raw": {"content_type": CONTENT_TYPE_PLAIN, "serializer": str}, "jsonraw": {"content_type": CONTENT_TYPE_JSON, "serializer": j.db.serializers.getSerializerType('j').dumps}, "json": {"content_type": CONTENT_TYPE_JSON, "serializer": self._resultjsonSerializer}, "yaml": {"content_type": CONTENT_TYPE_YAML, "serializer": self._resultyamlSerializer} } if '_jsonp' in ctx.params: result = {'httpStatus': ctx.httpStatus, 'httpMessage': ctx.httpMessage, 'body': result} return CONTENT_TYPE_JS, "%s(%s);" % (ctx.params['_jsonp'], j.db.serializers.getSerializerType('j').dumps(result)) if ctx._response_started: return None, result fformat = ctx.fformat if '_png' in ctx.params: return CONTENT_TYPE_PNG, result if "CONTENT_TYPE" not in ctx.env: ctx.env['CONTENT_TYPE'] = CONTENT_TYPE_PLAIN if ctx.env['CONTENT_TYPE'].find("form-") != -1: ctx.env['CONTENT_TYPE'] = CONTENT_TYPE_PLAIN # normally HTTP_ACCEPT defines the return type we should rewrite this if fformat: # extra format paramter overrides http_accept header return FFORMAT_TYPES[fformat]['content_type'], FFORMAT_TYPES[fformat]['serializer'](result) else: if 'HTTP_ACCEPT' in ctx.env: returntype = ctx.env['HTTP_ACCEPT'] else: returntype = ctx.env['CONTENT_TYPE'] content_type, serializer = self.getMimeType(returntype, FFORMAT_TYPES) return content_type, serializer(result) ##################### router def startSession(self, ctx, path): session = ctx.env['beaker.session'] if "authkey" in ctx.params: # user is authenticated by a special key key = ctx.params["authkey"] if self.auth.existsKey(key): username = self.auth.getUserFromKey(key) session['user'] = username session.save() elif key == self.secret: session['user'] = 'admin' session.save() else: # check if authkey is a session newsession = session.get_by_id(key) if newsession: session = newsession ctx.env['beaker.session'] = session else: ctx.start_response('419 Authentication Timeout', []) return False, [str(self.returnDoc(ctx, ctx.start_response, "system", "accessdenied", extraParams={"path": path}))] if "user_logoff_" in ctx.params and not "user_login_" in ctx.params: session.delete() return False, [str(self.returnDoc(ctx, ctx.start_response, "system", "login", extraParams={"path": path}))] if "user_login_" in ctx.params: # user has filled in his login details, this is response on posted info name = ctx.params['user_login_'] if not ctx.params.has_key('passwd'): secret="" else: secret = ctx.params['passwd'] if self.auth.authenticate(name, secret): session['user'] = name if "querystr" in session: ctx.env['QUERY_STRING'] = session['querystr'] else: ctx.env['QUERY_STRING'] = "" session.save() # user is loging in from login page redirect him to home if path.endswith('system/login'): status = '302' headers = [ ('Location', "/"), ] ctx.start_response(status, headers) return False, [""] else: session['user'] = "" session["querystr"] = "" session.save() return False, [str(self.returnDoc(ctx, ctx.start_response, "system", "accessdenied", extraParams={"path": path}))] if "user" not in session or session["user"] == "": session['user'] = "guest" session.save() if "querystr" in session: session["querystr"] = "" session.save() return True, session def _getParamsFromEnv(self, env, ctx): params = urlparse.parse_qs(env["QUERY_STRING"]) # HTTP parameters can be repeated multiple times, i.e. in case of using <select multiple> # Example: a=1&b=2&a=3 # # urlparse.parse_qs returns a dictionary of names & list of values. Then it's simplified # for lists with only a single element, e.g. # # {'a': ['1', '3'], 'b': ['2']} # # simplified to be # # {'a': ['1', '3'], 'b': '2'} params = dict(((k, v) if len(v) > 1 else (k, v[0])) for k, v in params.items()) if env["REQUEST_METHOD"] in ("POST", "PUT"): postData = env["wsgi.input"].read() if postData.strip() == "": return params msg = "postdata cannot be empty" self.raiseError(ctx, msg) if env['CONTENT_TYPE'].find("application/json") != -1: postParams = j.db.serializers.getSerializerType('j').loads(postData) if postParams: params.update(postParams) return params elif env['CONTENT_TYPE'].find("www-form-urlencoded") != -1: params.update(dict(urlparse.parse_qsl(postData))) return params else: params['rawdata'] = postData return params def router(self, environ, start_response): path = environ["PATH_INFO"].lstrip("/") print "path:%s" % path pathparts = path.split('/') if pathparts[0] == 'wiki': pathparts = pathparts[1:] if path.find("favicon.ico") != -1: return self.processor_page(environ, start_response, self.filesroot, "favicon.ico", prefix="") ctx = RequestContext(application="", actor="", method="", env=environ, start_response=start_response, path=path, params=None) ctx.params = self._getParamsFromEnv(environ, ctx) if path.find("jslib/") == 0: path = path[6:] user = "None" # self.log(ctx, user, path) return self.processor_page(environ, start_response, self.jslibroot, path, prefix="jslib/") if path.find("images/") == 0: space, image = pathparts[1:3] spaceObject = self.getSpace(space) image = image.lower() if image in spaceObject.docprocessor.images: path2 = spaceObject.docprocessor.images[image] return self.processor_page(environ, start_response, j.system.fs.getDirName(path2), j.system.fs.getBaseName(path2), prefix="images") ctx.start_response('404', []) if path.find("files/specs/") == 0: path = path[6:] user = "None" self.log(ctx, user, path) return self.processor_page(environ, start_response, self.filesroot, path, prefix="files/") if path.find(".files") != -1: user = "None" self.log(ctx, user, path) space = pathparts[0].lower() path = "/".join(pathparts[2:]) sploader = self.spacesloader.getSpaceFromId(space) filesroot = j.system.fs.joinPaths(sploader.model.path, ".files") return self.processor_page(environ, start_response, filesroot, path, prefix="") if path.find(".static") != -1: user = "None" self.log(ctx, user, path) space, pagename = self.path2spacePagename(path) space = pathparts[0].lower() path = "/".join(pathparts[2:]) sploader = self.spacesloader.getSpaceFromId(space) filesroot = j.system.fs.joinPaths(sploader.model.path, ".static") return self.processor_page(environ, start_response, filesroot, path, prefix="",includedocs=True,ctx=ctx,space=space) # user is logged in now is_session, session = self.startSession(ctx, path) if not is_session: return session user = session['user'] match = pathparts[0] path = "" if len(pathparts) > 1: path = "/".join(pathparts[1:]) if match == "restmachine": return self.rest.processor_rest(environ, start_response, path, human=False, ctx=ctx) elif match == "elfinder": return self.process_elfinder(path, ctx) elif match == "restextmachine": return self.rest.processor_restext(environ, start_response, path, human=False, ctx=ctx) elif match == "rest": space, pagename = self.path2spacePagename(path.strip("/")) self.log(ctx, user, path, space, pagename) return self.rest.processor_rest(environ, start_response, path, ctx=ctx) elif match == "restext": space, pagename = self.path2spacePagename(path.strip("/")) self.log(ctx, user, path, space, pagename) return self.rest.processor_restext(environ, start_response, path, ctx=ctx) elif match == "ping": status = '200 OK' headers = [ ('Content-Type', "text/html"), ] start_response(status, headers) return ["pong"] elif match == "files": self.log(ctx, user, path) return self.processor_page(environ, start_response, self.filesroot, path, prefix="files") elif match == "specs": return self.processor_page(environ, start_response, "specs", path, prefix="specs") elif match == "appservercode": return self.processor_page(environ, start_response, "code", path, prefix="code", webprefix="appservercode") elif match == "lib": # print self.libpath return self.processor_page(environ, start_response, self.libpath, path, prefix="lib") elif match == 'render': return self.render(environ, start_response) else: path = '/'.join(pathparts) ctx.params["path"] = '/'.join(pathparts) space, pagename = self.path2spacePagename(path) self.log(ctx, user, path, space, pagename) return [str(self.returnDoc(ctx, start_response, space, pagename, {}))] def render(self, environ, start_response): path = environ["PATH_INFO"].lstrip("/") query_string = environ["QUERY_STRING"].lstrip("/") params = cgi.parse_qs(query_string) content = params.get('content', [''])[0] space = params.get('render_space', None) if space: space = space[0] else: start_response('200 OK', [('Content-Type', "text/html")]) return 'Parameter "space" not supplied' doc = params.get('render_doc', None) if doc: doc = doc[0] else: start_response('200 OK', [('Content-Type', "text/html")]) return 'Parameter "doc" not supplied' ctx = RequestContext(application="", actor="", method="", env=environ, start_response=start_response, path=path, params=None) ctx.params = self._getParamsFromEnv(environ, ctx) doc, _ = self.getDoc(space, doc, ctx) doc = doc.copy() doc.source = content doc.loadFromSource() doc.preprocess() content, doc = doc.executeMacrosDynamicWiki(ctx=ctx) page = self.confluence2htmlconvertor.convert(content, doc=doc, requestContext=ctx, page=self.getpage(), paramsExtra=ctx.params) if not 'postprocess' in page.processparameters or page.processparameters['postprocess']: page.body = page.body.replace("$$space", space) page.body = page.body.replace("$$page", doc.original_name) page.body = page.body.replace("$$path", doc.path) page.body = page.body.replace("$$querystr", ctx.env['QUERY_STRING']) page.body = page.body.replace("$$$menuright", "") if "todestruct" in doc.__dict__: doc.destructed = True start_response('200 OK', [('Content-Type', "text/html")]) return str(page) def addRoute(self, function, appname, actor, method, paramvalidation={}, paramdescription={}, \ paramoptional={}, description="", auth=True, returnformat=None): """ @param function is the function which will be called as follows: function(webserver,path,params): function can also be a string, then only the string will be returned if str=='taskletengine' will directly call the taskletengine e.g. for std method calls from actors @appname e.g. system is 1e part of url which is routed http://localhost/appname/actor/method/ @actor e.g. system is 2nd part of url which is routed http://localhost/appname/actor/method/ @method e.g. "test" is part of url which is routed e.g. http://localhost/appname/actor/method/ @paramvalidation e.g. {"name":"\w+","color":""} the values are regexes @paramdescription is optional e.g. {"name":"this is the description for name"} @auth is for authentication if false then there will be no auth key checked example function called def test(self,webserver,path,params): return 'hello world!!' or without the self in the functioncall (when no class method) what you return is being send to the browser example call: http://localhost:9999/test?key=1234&color=dd&name=dd """ appname = appname.replace("_", ".") actor = actor.replace("_", ".") method = method.replace("_", ".") self.app_actor_dict["%s_%s" % (appname, actor)] = 1 methoddict = {'get': 'GET', 'set': 'PUT', 'new': 'POST', 'delete': 'DELETE', 'find': 'GET', 'list': 'GET', 'datatables': 'GET', 'create': 'POST'} self.routes["%s_%s_%s_%s" % ('GET', appname, actor, method)] = [function, paramvalidation, paramdescription, paramoptional, \ description, auth, returnformat] ##################### SCHEDULING def _timer(self): """ will remember time every 0.5 sec """ lfmid = 0 while True: self.epoch = int(time.time()) if lfmid < self.epoch - 200: lfmid = self.epoch self.fiveMinuteId = j.base.time.get5MinuteId(self.epoch) self.hourId = j.base.time.getHourId(self.epoch) self.dayId = j.base.time.getDayId(self.epoch) gevent.sleep(0.5) def _minRepeat(self): while True: gevent.sleep(5) for key in self.schedule1min.keys(): item, args, kwargs = self.schedule1min[key] item(*args, **kwargs) def _15minRepeat(self): while True: gevent.sleep(60 * 15) for key in self.schedule15min.keys(): item, args, kwargs = self.schedule15min[key] item(*args, **kwargs) def _60minRepeat(self): while True: gevent.sleep(60 * 60) for key in self.schedule60min.keys(): item, args, kwargs = self.schedule60min[key] item(*args, **kwargs) def getNow(self): return self.epoch def addSchedule1MinPeriod(self, name, method, *args, **kwargs): self.schedule1min[name] = (method, args, kwargs) def addSchedule15MinPeriod(self, name, method, *args, **kwargs): self.schedule15min[name] = (method, args, kwargs) def addSchedule60MinPeriod(self, name, method, *args, **kwargs): self.schedule60min[name] = (method, args, kwargs) ##################### START-STOP / get spaces/actors/buckets / addgreenlet def start(self): """ Start the web server, serving the `routes`. When no `routes` dict is passed, serve a single 'test' route. This method will block until an exception stops the server. @param routes: routes to serve, will be merged with the already added routes @type routes: dict(string, list(callable, dict(string, string), dict(string, string))) """ TIMER = gevent.greenlet.Greenlet(self._timer) TIMER.start() S1 = gevent.greenlet.Greenlet(self._minRepeat) S1.start() S2 = gevent.greenlet.Greenlet(self._15minRepeat) S2.start() S3 = gevent.greenlet.Greenlet(self._60minRepeat) S3.start() j.console.echo("webserver started on port %s" % self.port) self._webserver.serve_forever() def stop(self): self._webserver.stop() def getSpaces(self): return self.spacesloader.id2object.keys() def getBuckets(self): return self.bucketsloader.id2object.keys() def getActors(self): return self.actorsloader.id2object.keys() def getSpace(self, name, ignore_doc_processor=False): name = name.lower() if name not in self.spacesloader.spaces: raise RuntimeError("Could not find space %s" % name) space = self.spacesloader.spaces[name] if space.docprocessor == None and not ignore_doc_processor: space.loadDocProcessor() return space def loadSpace(self, name): space = self.getSpace(name) space.loadDocProcessor() return space def getBucket(self, name): if name not in self.bucketsloader.buckets: raise RuntimeError("Could not find bucket %s" % name) bucket = self.bucketsloader.buckets(name) return bucket def addGreenlet(self, appName, greenlet): """ """ greenletObject = greenlet() if greenletObject.method == "": raise RuntimeError("greenlet class needs to have a method") if greenletObject.actor == "": raise RuntimeError("greenlet class needs to have a actor") greenletObject.server = self self.addRoute(function=greenletObject.wscall, appname=appName, actor=greenletObject.actor, method=greenletObject.method, paramvalidation=greenletObject.paramvalidation, paramdescription=greenletObject.paramdescription, paramoptional=greenletObject.paramoptional, description=greenletObject.description, auth=greenletObject.auth) def restartInProcess(self, app): import fcntl args = sys.argv[:] import ipdb;ipdb.set_trace() args.insert(0, sys.executable) apppath = j.system.fs.joinPaths(j.dirs.appDir, app) if apppath == '.': apppath = os.getcwd() max_fd = 1024 for fd in range(3, max_fd): try: flags = fcntl.fcntl(fd, fcntl.F_GETFD) except IOError: continue fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) os.chdir(apppath) os.execv(sys.executable, args) def __str__(self): out="" for key,val in self.__dict__.iteritems(): if key[0]<>"_" and key not in ["routes"]: out+="%-35s : %s\n"%(key,val) routes=",".join(self.routes.keys()) out+="%-35s : %s\n"%("routes",routes) items=out.split("\n") items.sort() out="portalserver:"+"\n".join(items) return out __repr__ = __str__
# Copyright 2017 Bracket Computing, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # https://github.com/brkt/brkt-cli/blob/master/LICENSE # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and # limitations under the License. import abc import json import logging import time import urllib2 from brkt_cli import validation from brkt_cli.util import ( BracketError, Deadline, sleep ) ENCRYPT_INITIALIZING = 'initial' ENCRYPT_DOWNLOADING = 'downloading' ENCRYPTION_PROGRESS_TIMEOUT = 10 * 60 # 10 minutes ENCRYPT_RUNNING = 'encrypting' ENCRYPT_SUCCESSFUL = 'finished' ENCRYPT_FAILED = 'failed' ENCRYPT_ENCRYPTING = 'encrypting' ENCRYPTOR_STATUS_PORT = 80 FAILURE_CODE_AWS_PERMISSIONS = 'insufficient_aws_permissions' FAILURE_CODE_GET_YETI_CONFIG = 'failed_get_yeti_config' FAILURE_CODE_INVALID_NTP_SERVERS = 'invalid_ntp_servers' FAILURE_CODE_INVALID_SSH_KEY = 'invalid-ssh-public-key' FAILURE_CODE_INVALID_USERDATA_INPUT = 'invalid_userdata_input' FAILURE_CODE_NET_ROUTE_TIMEOUT = 'failed_network_route' FAILURE_CODE_NOT_AUTHORIZED_YETI = 'not_authorized_yeti' FAILURE_CODE_FORBIDDEN_YETI = 'forbidden_yeti' FAILURE_CODE_TERMINAL_YETI_ERROR = 'terminal_yeti_error_' FAILURE_CODE_UNSUPPORTED_GUEST = 'unsupported_guest' log = logging.getLogger(__name__) class EncryptionError(BracketError): def __init__(self, message): super(EncryptionError, self).__init__(message) self.console_output_file = None class UnsupportedGuestError(BracketError): pass class BaseEncryptorService(object): __metaclass__ = abc.ABCMeta def __init__(self, hostnames, port=ENCRYPTOR_STATUS_PORT): self.hostnames = hostnames self.port = port def fetch(self, path, timeout_secs=2): exceptions_by_host = {} for hostname in self.hostnames: url = 'http://%s:%d%s' % (hostname, self.port, path) try: r = urllib2.urlopen(url, timeout=timeout_secs) data = r.read() self.hostnames = [hostname] return data except IOError as e: log.debug('Unable to connect to %s:%s - %s', hostname, self.port, e) exceptions_by_host[hostname] = e raise EncryptorConnectionError(self.port, exceptions_by_host) @abc.abstractmethod def is_encryptor_up(self): pass @abc.abstractmethod def get_status(self): pass class EncryptorConnectionError(Exception): def __init__(self, port, exceptions_by_host): self.port = port # Maps the hostname to the exception that was generated. self.exceptions_by_host = exceptions_by_host msg = 'Unable to to connect to the encryptor instance ' errors = [] for hostname, exception in self.exceptions_by_host.iteritems(): errors.append('at %s: %s' % (hostname, exception)) msg += ', '.join(errors) super(EncryptorConnectionError, self).__init__(msg) class EncryptorService(BaseEncryptorService): def is_encryptor_up(self): try: self.get_status() log.debug("Successfully got encryptor status") return True except Exception as e: log.debug("Couldn't get encryptor status: %s", e) return False def get_status(self): data = self.fetch('/') info = json.loads(data) info['percent_complete'] = 0 bytes_total = info.get('bytes_total') if info['state'] == ENCRYPT_SUCCESSFUL: info['percent_complete'] = 100 elif bytes_total is not None and bytes_total > 0: ratio = float(info['bytes_written']) / info['bytes_total'] info['percent_complete'] = float(100 * ratio) return info def wait_for_encryptor_up(enc_svc, deadline): start = time.time() while not deadline.is_expired(): if enc_svc.is_encryptor_up(): log.debug( 'Encryption service is up after %.1f seconds', time.time() - start ) return sleep(5) raise EncryptionError( 'Unable to contact encryptor instance at %s, port %d.' % (', '.join(enc_svc.hostnames), enc_svc.port) ) def _handle_failure_code(failure_code): """ Raise EncryptionError with a user-friendly message that's based on the failure code returned by the Metavisor. """ if failure_code == FAILURE_CODE_AWS_PERMISSIONS: raise EncryptionError( 'The specified IAM profile has insufficient permissions') if failure_code == FAILURE_CODE_GET_YETI_CONFIG: raise EncryptionError( 'Unable to determine the location of the Bracket service') if failure_code == FAILURE_CODE_INVALID_NTP_SERVERS: raise EncryptionError( 'Invalid NTP servers') if failure_code == FAILURE_CODE_INVALID_SSH_KEY: raise EncryptionError( 'Unable to load SSH key') if failure_code == FAILURE_CODE_INVALID_USERDATA_INPUT: raise EncryptionError( 'User data passed to Metavisor is invalid') if failure_code == FAILURE_CODE_NET_ROUTE_TIMEOUT: raise EncryptionError( 'Unable to connect to the Bracket service') if failure_code == FAILURE_CODE_NOT_AUTHORIZED_YETI: raise EncryptionError( 'Authentication with the Bracket service failed') if failure_code == FAILURE_CODE_FORBIDDEN_YETI: raise EncryptionError( 'Instance launch forbidden by the Bracket service') if (failure_code and failure_code.startswith(FAILURE_CODE_TERMINAL_YETI_ERROR)): # During auth raise EncryptionError( 'Authentication with the Bracket service failed: %s' % failure_code) if failure_code == FAILURE_CODE_UNSUPPORTED_GUEST: raise UnsupportedGuestError( 'Guest image uses an unsupported operating system') msg = 'Encryption failed' if failure_code: msg += ' with code %s' % failure_code raise EncryptionError(msg) def wait_for_encryption(enc_svc, progress_timeout=ENCRYPTION_PROGRESS_TIMEOUT): err_count = 0 max_errs = 10 start_time = time.time() last_log_time = start_time progress_deadline = Deadline(progress_timeout) last_progress = 0 last_state = '' while err_count < max_errs: try: status = enc_svc.get_status() err_count = 0 except Exception as e: log.warn("Failed getting encryption status: %s", e) log.warn("Retrying. . .") err_count += 1 sleep(10) continue state = status['state'] percent_complete = status['percent_complete'] # For image updates, there is no bytes_written bytes_written = status.get('bytes_written', 0) log.debug('state=%s, percent_complete=%.2f', state, percent_complete) # Make sure that encryption progress hasn't stalled. if progress_deadline.is_expired(): raise EncryptionError( 'Waited for encryption progress for longer than %s seconds' % progress_timeout ) if bytes_written > last_progress or state != last_state: last_progress = bytes_written last_state = state progress_deadline = Deadline(progress_timeout) # Log progress once a minute. now = time.time() if now - last_log_time >= 60: if state == ENCRYPT_INITIALIZING: log.info('Encryption process is initializing') else: state_display = 'Encryption' if state == ENCRYPT_DOWNLOADING: state_display = 'Download from cloud storage' log.info( '%s is %.2f%% complete', state_display, percent_complete) last_log_time = now if state == ENCRYPT_SUCCESSFUL: log.info('Encrypted root drive created.') return elif state == ENCRYPT_FAILED: log.error('Encryption status: %s', json.dumps(status)) _handle_failure_code(status.get('failure_code')) sleep(10) # We've failed to get encryption status for _max_errs_ consecutive tries. # Assume that the server has crashed. raise EncryptionError('Encryption service unavailable') def encryptor_did_single_disk(enc_svc): sd = enc_svc.fetch('/single_disk') if sd == 'True': return True return False def status_port(value): if not value: return ENCRYPTOR_STATUS_PORT return validation.range_int_argument(value, 1, 65535, exclusions=[81, ])
# # Copyright 2015 by Justin MacCallum, Alberto Perez, Ken Dill # All rights reserved # import unittest import mock from meld import system from meld.system import restraints class TestAlwaysActiveCollection(unittest.TestCase): def setUp(self): self.coll = restraints.AlwaysActiveCollection() def test_adding_non_restraint_raises(self): with self.assertRaises(RuntimeError): self.coll.add_restraint(object) def test_should_be_able_to_add_restraint(self): rest = restraints.Restraint() self.coll.add_restraint(rest) self.assertIn(rest, self.coll.restraints) class TestSelectivelyActiveCollection(unittest.TestCase): def test_restraint_should_be_present_after_adding(self): rest = [restraints.SelectableRestraint()] coll = restraints.SelectivelyActiveCollection(rest, 1) self.assertEqual(len(coll.groups), 1) def test_can_add_two_restraints(self): rest = [restraints.SelectableRestraint(), restraints.SelectableRestraint()] coll = restraints.SelectivelyActiveCollection(rest, 1) self.assertEqual(len(coll.groups), 2) def test_adding_non_selectable_restraint_should_raise(self): rest = [restraints.NonSelectableRestraint()] with self.assertRaises(RuntimeError): restraints.SelectivelyActiveCollection(rest, 1) def test_empty_restraint_list_should_raise(self): with self.assertRaises(RuntimeError): restraints.SelectivelyActiveCollection([], 0) def test_negative_num_active_should_raise(self): rest = [restraints.SelectableRestraint()] with self.assertRaises(RuntimeError): restraints.SelectivelyActiveCollection(rest, -1) def test_num_active_greater_than_num_restraints_should_raise(self): rest = [restraints.SelectableRestraint()] with self.assertRaises(RuntimeError): restraints.SelectivelyActiveCollection(rest, 2) def test_num_active_should_be_set(self): rest = [restraints.SelectableRestraint()] coll = restraints.SelectivelyActiveCollection(rest, 1) self.assertEqual(coll.num_active, 1) def test_should_wrap_bare_restraint_in_group(self): rest = [restraints.SelectableRestraint()] with mock.patch('meld.system.restraints.RestraintGroup.__init__', spec=True) as group_init: group_init.return_value = None restraints.SelectivelyActiveCollection(rest, 1) self.assertEqual(group_init.call_count, 1) def test_should_not_wrap_a_group_in_a_group(self): rest = [restraints.SelectableRestraint()] grps = [restraints.RestraintGroup(rest, 1)] with mock.patch('meld.system.restraints.RestraintGroup.__init__', spec=True) as group_init: restraints.SelectivelyActiveCollection(grps, 1) self.assertEqual(group_init.call_count, 0) class TestRestraintGroup(unittest.TestCase): def test_should_accept_selectable_restraint(self): rest = [restraints.SelectableRestraint()] grp = restraints.RestraintGroup(rest, 1) self.assertEqual(len(grp.restraints), 1) def test_should_not_accept_non_selectable_restraint(self): rest = [restraints.NonSelectableRestraint()] with self.assertRaises(RuntimeError): restraints.RestraintGroup(rest, 1) def test_should_raise_on_empy_restraint_list(self): with self.assertRaises(RuntimeError): restraints.RestraintGroup([], 0) def test_num_active_below_zero_should_raise(self): rest = [restraints.SelectableRestraint()] with self.assertRaises(RuntimeError): restraints.RestraintGroup(rest, -1) def test_num_active_above_n_rest_should_raise(self): rest = [restraints.SelectableRestraint()] with self.assertRaises(RuntimeError): restraints.RestraintGroup(rest, 2) def test_num_active_should_be_set(self): rest = [restraints.SelectableRestraint()] grp = restraints.RestraintGroup(rest, 1) self.assertEqual(grp.num_active, 1) class TestRestraintManager(unittest.TestCase): def setUp(self): self.mock_system = mock.Mock(spec=system.System) self.mock_system.index_of_atom.return_value = 0 self.rest_manager = restraints.RestraintManager(self.mock_system) def test_can_add_as_always_active_non_selectable_restraint(self): rest = restraints.NonSelectableRestraint() self.rest_manager.add_as_always_active(rest) self.assertIn(rest, self.rest_manager.always_active) def test_can_add_as_always_active_selectable_restraint(self): rest = restraints.SelectableRestraint() self.rest_manager.add_as_always_active(rest) self.assertIn(rest, self.rest_manager.always_active) def test_can_add_list_of_always_active_restraints(self): rests = [restraints.SelectableRestraint(), restraints.NonSelectableRestraint()] self.rest_manager.add_as_always_active_list(rests) self.assertEqual(len(self.rest_manager.always_active), 2) def test_creating_bad_restraint_raises_error(self): with self.assertRaises(RuntimeError): self.rest_manager.create_restraint('blarg', x=42, y=99, z=-403) def test_can_create_distance_restraint(self): rest = self.rest_manager.create_restraint( 'distance', atom_1_res_index=1, atom_1_name='CA', atom_2_res_index=2, atom_2_name='CA', r1=0, r2=0, r3=0.3, r4=999., k=2500) self.assertTrue(isinstance(rest, restraints.DistanceRestraint)) def test_can_add_seletively_active_collection(self): rest_list = [restraints.SelectableRestraint(), restraints.SelectableRestraint()] self.rest_manager.add_selectively_active_collection(rest_list, 2) self.assertEqual(len(self.rest_manager.selectively_active_collections), 1) def test_can_create_restraint_group(self): rest_list = [restraints.SelectableRestraint(), restraints.SelectableRestraint()] grp = self.rest_manager.create_restraint_group(rest_list, 2) self.assertEqual(len(grp.restraints), 2) class TestDistanceRestraint(unittest.TestCase): def setUp(self): self.mock_system = mock.Mock() self.scaler = restraints.ConstantScaler() self.ramp = restraints.ConstantRamp() def test_should_find_two_indices(self): restraints.DistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 0, 0, 0.3, 999., 1.0) calls = [ mock.call(1, 'CA'), mock.call(2, 'CA')] self.mock_system.index_of_atom.assert_has_calls(calls) def test_should_raise_on_bad_index(self): self.mock_system.index_of_atom.side_effect = KeyError() with self.assertRaises(KeyError): restraints.DistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'BAD', 2, 'CA', 0, 0, 0.3, 999., 1.0) def test_should_raise_if_r2_less_than_r1(self): with self.assertRaises(RuntimeError): restraints.DistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 10., 0., 10., 10., 1.0) def test_should_raise_if_r3_less_than_r2(self): with self.assertRaises(RuntimeError): restraints.DistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 10., 10., 0., 10., 1.0) def test_should_raise_if_r4_less_than_r3(self): with self.assertRaises(RuntimeError): restraints.DistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 10., 10., 10., 0., 1.0) def test_should_raise_with_negative_r(self): with self.assertRaises(RuntimeError): restraints.DistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', -1., 10., 10., 10., 1.0) def test_should_raise_with_negative_k(self): with self.assertRaises(RuntimeError): restraints.DistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 10., 10., 10., 10., -1.0) class TestHyperbolicDistanceRestraint(unittest.TestCase): def setUp(self): self.mock_system = mock.Mock() self.scaler = restraints.ConstantScaler() self.ramp = restraints.ConstantRamp() def test_should_find_two_indices(self): restraints.HyperbolicDistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 0.0, 0.0, 0.6, 0.7, 1.0, 1.0) calls = [ mock.call(1, 'CA'), mock.call(2, 'CA')] self.mock_system.index_of_atom.assert_has_calls(calls) def test_should_raise_on_bad_index(self): self.mock_system.index_of_atom.side_effect = KeyError() with self.assertRaises(KeyError): restraints.HyperbolicDistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'BAD', 2, 'CA', 0.0, 0.1, 0.2, 0.3, 1.0, 1.0) def test_should_raise_with_negative_r(self): with self.assertRaises(RuntimeError): restraints.HyperbolicDistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', -1., 1.0, 2.0, 3.0, 1.0, 1.0) def test_should_raise_if_r2_less_than_r1(self): with self.assertRaises(RuntimeError): restraints.HyperbolicDistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 10.0, 0.0, 1.0, 2.0, 1.0, 1.0) def test_should_raise_if_r3_less_than_r2(self): with self.assertRaises(RuntimeError): restraints.HyperbolicDistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 0.0, 1.0, 0.0, 2.0, 1.0, 1.0) def test_should_raise_if_r4_less_than_r3(self): with self.assertRaises(RuntimeError): restraints.HyperbolicDistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 0.0, 1.0, 2.0, 0.0, 1.0, 1.0) def test_should_raise_if_r4_equals_r3(self): with self.assertRaises(RuntimeError): restraints.HyperbolicDistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 0.0, 1.0, 2.0, 2.0, 1.0, 1.0) def test_should_raise_with_negative_k(self): with self.assertRaises(RuntimeError): restraints.HyperbolicDistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 1., 2., 3., 4., -1.0, 1.0) def test_should_raise_with_negative_asymptote(self): with self.assertRaises(RuntimeError): restraints.HyperbolicDistanceRestraint(self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 1., 2., 3., 4., 1.0, -1.0) class TestTorsionRestraint(unittest.TestCase): def setUp(self): self.mock_system = mock.Mock() self.mock_system.index_of_atom.side_effect = [0, 1, 2, 3] self.scaler = mock.Mock() self.ramp = mock.Mock() def test_should_find_four_indices(self): restraints.TorsionRestraint( self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 3, 'CA', 4, 'CA', 180., 0., 1.0) calls = [ mock.call(1, 'CA'), mock.call(2, 'CA'), mock.call(3, 'CA'), mock.call(4, 'CA')] self.mock_system.index_of_atom.assert_has_calls(calls) def test_should_raise_with_non_unique_indices(self): self.mock_system.index_of_atom.side_effect = [0, 0, 1, 2] # repeated index with self.assertRaises(RuntimeError): restraints.TorsionRestraint( self.mock_system, self.scaler, self.ramp, 1, 'CA', 1, 'CA', 3, 'CA', 4, 'CA', 180., 0., 1.0) def test_should_fail_with_phi_below_minus_180(self): with self.assertRaises(RuntimeError): restraints.TorsionRestraint( self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 3, 'CA', 4, 'CA', -270., 0., 1.0) def test_should_fail_with_phi_above_180(self): with self.assertRaises(RuntimeError): restraints.TorsionRestraint( self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 3, 'CA', 4, 'CA', 270., 0., 1.0) def test_should_fail_with_delta_phi_above_180(self): with self.assertRaises(RuntimeError): restraints.TorsionRestraint( self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 3, 'CA', 4, 'CA', 0., 200., 1.0) def test_should_fail_with_delta_phi_below_0(self): with self.assertRaises(RuntimeError): restraints.TorsionRestraint( self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 3, 'CA', 4, 'CA', 0., -90., 1.0) def test_should_fail_with_negative_k(self): with self.assertRaises(RuntimeError): restraints.TorsionRestraint( self.mock_system, self.scaler, self.ramp, 1, 'CA', 2, 'CA', 3, 'CA', 4, 'CA', 0., 90., -1.0) class TestRdcRestraint(unittest.TestCase): def setUp(self): self.mock_system = mock.Mock() self.mock_system.index_of_atom.side_effect = [0, 1] self.scaler = mock.Mock() self.ramp = mock.Mock() def test_should_find_four_indices(self): restraints.RdcRestraint( self.mock_system, self.scaler, self.ramp, 1, 'N', 1, 'H', 100., 10., 10., 1.0, 1.0, 0) calls = [ mock.call(1, 'N'), mock.call(1, 'H')] self.mock_system.index_of_atom.assert_has_calls(calls) def test_should_raise_with_non_unique_indices(self): self.mock_system.index_of_atom.side_effect = [0, 0] # repeated index with self.assertRaises(ValueError): restraints.RdcRestraint( self.mock_system, self.scaler, self.ramp, 1, 'N', 1, 'N', # repeated index 100., 10., 10., 1.0, 1.0, 0) def test_should_raise_with_negative_tolerance(self): with self.assertRaises(ValueError): restraints.RdcRestraint( self.mock_system, self.scaler, self.ramp, 1, 'N', 1, 'H', 100., 10., -10., 1.0, 1.0, 0) def test_should_raise_with_negative_force_const(self): with self.assertRaises(ValueError): restraints.RdcRestraint( self.mock_system, self.scaler, self.ramp, 1, 'N', 1, 'H', 100., 10., 10., -1.0, 1.0, 0) def test_should_raise_with_negative_weight(self): with self.assertRaises(ValueError): restraints.RdcRestraint( self.mock_system, self.scaler, self.ramp, 1, 'N', 1, 'H', 100., 10., 10., 1.0, -1.0, 0) class TestConstantScaler(unittest.TestCase): def test_should_return_1_when_alpha_is_0(self): scaler = restraints.ConstantScaler() self.assertAlmostEqual(scaler(0.0), 1.0) def test_should_return_1_when_alpha_is_1(self): scaler = restraints.ConstantScaler() self.assertAlmostEqual(scaler(1.0), 1.0) def test_should_raise_if_alpha_is_less_than_zero(self): scaler = restraints.ConstantScaler() with self.assertRaises(RuntimeError): scaler(-1.0) def test_should_raise_if_alpha_is_greater_than_one(self): scaler = restraints.ConstantScaler() with self.assertRaises(RuntimeError): scaler(2.0) class TestLinearScaler(unittest.TestCase): def test_should_raise_when_alpha_min_below_zero(self): with self.assertRaises(RuntimeError): restraints.LinearScaler(-1, 1) def test_should_raise_when_alpha_min_above_one(self): with self.assertRaises(RuntimeError): restraints.LinearScaler(2, 1) def test_should_raise_when_alpha_max_below_zero(self): with self.assertRaises(RuntimeError): restraints.LinearScaler(1, -1) def test_should_raise_when_alpha_max_above_one(self): with self.assertRaises(RuntimeError): restraints.LinearScaler(1, 2) def test_should_raise_if_alpha_max_less_than_alpha_min(self): with self.assertRaises(RuntimeError): restraints.LinearScaler(0.7, 0.6) def test_should_raise_if_alpha_is_below_zero(self): scaler = restraints.LinearScaler(0.2, 0.8) with self.assertRaises(RuntimeError): scaler(-1) def test_should_raise_if_alpha_is_above_one(self): scaler = restraints.LinearScaler(0.2, 0.8) with self.assertRaises(RuntimeError): scaler(2) def test_should_return_1_below_alpha_min(self): scaler = restraints.LinearScaler(0.2, 0.8) self.assertAlmostEqual(scaler(0.1), 1.0) def test_should_return_0_above_alpha_max(self): scaler = restraints.LinearScaler(0.2, 0.8) self.assertAlmostEqual(scaler(0.9), 0.0) def test_should_return_correct_value_in_middle(self): scaler = restraints.LinearScaler(0.0, 1.0) self.assertAlmostEqual(scaler(0.3), 0.7) class TestPlateauLinearScaler(unittest.TestCase): def test_should_raise_when_alpha_min_below_zero(self): with self.assertRaises(RuntimeError): restraints.PlateauLinearScaler(-1,0,0.5, 1) def test_should_raise_when_alpha_min_above_one(self): with self.assertRaises(RuntimeError): restraints.PlateauLinearScaler(2,0,0.5, 1) def test_should_raise_when_alpha_max_below_zero(self): with self.assertRaises(RuntimeError): restraints.PlateauLinearScaler(1,0,0.5, -1) def test_should_raise_when_alpha_max_above_one(self): with self.assertRaises(RuntimeError): restraints.PlateauLinearScaler(1,0,0.5, 2) def test_should_raise_if_alpha_max_less_than_alpha_min(self): with self.assertRaises(RuntimeError): restraints.PlateauLinearScaler(0.7,0,0.5, 0.6) def test_should_raise_if_alpha_is_below_zero(self): scaler = restraints.PlateauLinearScaler(0.2,0.5,0.7,0.8) with self.assertRaises(RuntimeError): scaler(-1) def test_should_raise_if_alpha_is_above_one(self): scaler = restraints.PlateauLinearScaler(0.2,0.6,0.7, 0.8) with self.assertRaises(RuntimeError): scaler(2) def test_should_return_0_below_alpha_min(self): scaler = restraints.PlateauLinearScaler(0.2,0.4,0.6, 0.8) self.assertAlmostEqual(scaler(0.1), 0.0) def test_should_return_0_above_alpha_max(self): scaler = restraints.PlateauLinearScaler(0.2, 0.4,0.6,0.8) self.assertAlmostEqual(scaler(0.9), 0.0) def test_should_return_correct_value_between_alpha_one_alpha_two_down(self): scaler = restraints.PlateauLinearScaler(0.0, 0.4,0.6,1.0) self.assertAlmostEqual(scaler(0.3), 0.75) def test_should_return_correct_value_between_alpha_one_alpha_two_down2(self): scaler = restraints.PlateauLinearScaler(0.0, 0.4,0.6,1.0) self.assertAlmostEqual(scaler(0.1), 0.25) def test_should_return_correct_value_between_alpha_two_alpha_max_up(self): scaler = restraints.PlateauLinearScaler(0.0, 0.4,0.6,1.0) self.assertAlmostEqual(scaler(0.7), 0.75) def test_should_return_correct_value_between_alpha_two_alpha_max_up2(self): scaler = restraints.PlateauLinearScaler(0.0, 0.4,0.6,1.0) self.assertAlmostEqual(scaler(0.9), 0.25) class TestNonLinearScaler(unittest.TestCase): def test_should_raise_when_alpha_min_below_zero(self): with self.assertRaises(RuntimeError): restraints.NonLinearScaler(-1, 1, 4) def test_should_raise_when_alpha_min_above_one(self): with self.assertRaises(RuntimeError): restraints.NonLinearScaler(2, 1, 4) def test_should_raise_when_alpha_max_below_zero(self): with self.assertRaises(RuntimeError): restraints.NonLinearScaler(1, -1, 4) def test_should_raise_when_alpha_max_above_one(self): with self.assertRaises(RuntimeError): restraints.NonLinearScaler(1, 2, 4) def test_should_raise_if_alpha_max_less_than_alpha_min(self): with self.assertRaises(RuntimeError): restraints.NonLinearScaler(0.7, 0.6, 4) def test_should_raise_if_factor_below_one(self): with self.assertRaises(RuntimeError): restraints.NonLinearScaler(0.0, 1.0, 0.2) def test_should_raise_if_alpha_is_below_zero(self): scaler = restraints.NonLinearScaler(0.2, 0.8, 4) with self.assertRaises(RuntimeError): scaler(-1) def test_should_raise_if_alpha_is_above_one(self): scaler = restraints.NonLinearScaler(0.2, 0.8, 4) with self.assertRaises(RuntimeError): scaler(2) def test_should_return_1_below_alpha_min(self): scaler = restraints.NonLinearScaler(0.2, 0.8, 4) self.assertAlmostEqual(scaler(0.1), 1.0) def test_should_return_0_above_alpha_max(self): scaler = restraints.NonLinearScaler(0.2, 0.8, 4) self.assertAlmostEqual(scaler(0.9), 0.0) def test_midpoint_should_return_correct_value(self): scaler = restraints.NonLinearScaler(0.2, 0.8, 4) self.assertAlmostEqual(scaler(0.5), 0.119202922) class TestPlateauNonLinearScaler(unittest.TestCase): def test_should_raise_when_alpha_min_below_zero(self): with self.assertRaises(RuntimeError): restraints.PlateauNonLinearScaler(-1,0.5,0.6,1, 4) def test_should_raise_when_alpha_min_above_one(self): with self.assertRaises(RuntimeError): restraints.PlateauNonLinearScaler(2,0.5,0.6,1, 4) def test_should_raise_when_alpha_max_below_zero(self): with self.assertRaises(RuntimeError): restraints.PlateauNonLinearScaler(1,0.5,0.6,-1, 4) def test_should_raise_when_alpha_max_above_one(self): with self.assertRaises(RuntimeError): restraints.PlateauNonLinearScaler(1,0.5,0.6,2, 4) def test_should_raise_if_alpha_max_less_than_alpha_min(self): with self.assertRaises(RuntimeError): restraints.PlateauNonLinearScaler(0.7,0.65,0.63, 0.6, 4) def test_should_raise_if_factor_below_one(self): with self.assertRaises(RuntimeError): restraints.PlateauNonLinearScaler(0.0,0.5,0.7, 1.0, 0.2) def test_should_raise_if_alpha_is_below_zero(self): scaler = restraints.PlateauNonLinearScaler(0.2, 0.4,0.6,0.8, 4) with self.assertRaises(RuntimeError): scaler(-1) def test_should_raise_if_alpha_is_above_one(self): scaler = restraints.PlateauNonLinearScaler(0.2,0.4,0.6, 0.8, 4) with self.assertRaises(RuntimeError): scaler(2) def test_should_return_0_below_alpha_min(self): scaler = restraints.PlateauNonLinearScaler(0.2,0.4,0.6, 0.8, 4) self.assertAlmostEqual(scaler(0.1), 0.0) def test_should_return_0_above_alpha_max(self): scaler = restraints.PlateauNonLinearScaler(0.2, 0.4,0.6,0.8, 4) self.assertAlmostEqual(scaler(0.9), 0.0) def test_should_return_1_between_alpha_one_alpha_two(self): scaler = restraints.PlateauNonLinearScaler(0.2, 0.4,0.6,0.8, 4) self.assertAlmostEqual(scaler(0.5), 1.0) def test_midpoint_should_return_correct_value_scaling_up(self): scaler = restraints.PlateauNonLinearScaler(0.7, 0.8,0.9,1.0, 4) self.assertAlmostEqual(scaler(0.95), 0.119202922) def test_midpoint_should_return_correct_value_scaling_down(self): scaler = restraints.PlateauNonLinearScaler(0.7, 0.8,0.9,1.0, 4) self.assertAlmostEqual(scaler(0.75), 0.88079708) class TestSmoothScaler(unittest.TestCase): def test_should_raise_when_alpha_min_below_zero(self): with self.assertRaises(RuntimeError): restraints.SmoothScaler(-1, 1) def test_should_raise_when_alpha_min_above_one(self): with self.assertRaises(RuntimeError): restraints.SmoothScaler(2, 1) def test_should_raise_when_alpha_max_below_zero(self): with self.assertRaises(RuntimeError): restraints.SmoothScaler(1,-1) def test_should_raise_when_alpha_max_above_one(self): with self.assertRaises(RuntimeError): restraints.SmoothScaler(1, 2) def test_should_raise_if_alpha_max_less_than_alpha_min(self): with self.assertRaises(RuntimeError): restraints.SmoothScaler(0.7, 0.6) def test_should_raise_if_alpha_is_below_zero(self): scaler = restraints.SmoothScaler(0.2,0.8) with self.assertRaises(RuntimeError): scaler(-1) def test_should_raise_if_alpha_is_above_one(self): scaler = restraints.SmoothScaler(0.2, 0.8) with self.assertRaises(RuntimeError): scaler(2) def test_should_return_1_below_alpha_min(self): scaler = restraints.SmoothScaler(0.2, 0.8) self.assertAlmostEqual(scaler(0.1), 1.0) def test_should_return_0_above_alpha_max(self): scaler = restraints.SmoothScaler(0.2,0.8) self.assertAlmostEqual(scaler(0.9), 0.0) def test_should_return_correct_value_middle(self): scaler = restraints.SmoothScaler(0.5,1.0) self.assertAlmostEqual(scaler(0.75), 0.5) def test_should_return_correct_value_middle2(self): scaler = restraints.SmoothScaler(0.5,1.0) self.assertAlmostEqual(scaler(0.90), 0.104) class TestPlateauSmoothScaler(unittest.TestCase): def test_should_raise_when_alpha_min_below_zero(self): with self.assertRaises(RuntimeError): restraints.PlateauSmoothScaler(-1,0.5,0.6, 1) def test_should_raise_when_alpha_min_above_one(self): with self.assertRaises(RuntimeError): restraints.PlateauSmoothScaler(2,0.5,0.6, 1) def test_should_raise_when_alpha_max_below_zero(self): with self.assertRaises(RuntimeError): restraints.PlateauSmoothScaler(1,0.5,0.6,-1) def test_should_raise_when_alpha_max_above_one(self): with self.assertRaises(RuntimeError): restraints.PlateauSmoothScaler(1,0.5,0.6, 2) def test_should_raise_if_alpha_max_less_than_alpha_min(self): with self.assertRaises(RuntimeError): restraints.PlateauSmoothScaler(0.7,0.5,0.6, 0.6) def test_should_raise_if_alpha_is_below_zero(self): scaler = restraints.PlateauSmoothScaler(0.2,0.5,0.6,0.8) with self.assertRaises(RuntimeError): scaler(-1) def test_should_raise_if_alpha_is_above_one(self): scaler = restraints.PlateauSmoothScaler(0.2, 0.5,0.6,0.8) with self.assertRaises(RuntimeError): scaler(2) def test_should_return_0_below_alpha_min(self): scaler = restraints.PlateauSmoothScaler(0.2,0.5,0.6, 0.8) self.assertAlmostEqual(scaler(0.1), 0.0) def test_should_return_0_above_alpha_max(self): scaler = restraints.PlateauSmoothScaler(0.2,0.5,0.6,0.8) self.assertAlmostEqual(scaler(0.9), 0.0) def test_should_return_1_between_alpha_one_alpha_two(self): scaler = restraints.PlateauSmoothScaler(0.2,0.4,0.6,0.8) self.assertAlmostEqual(scaler(0.5), 1.0) def test_should_return_correct_value_middle_up(self): scaler = restraints.PlateauSmoothScaler(0.2,0.4,0.6,0.8) self.assertAlmostEqual(scaler(0.75), 0.15625) def test_should_return_correct_value_middle_up2(self): scaler = restraints.PlateauSmoothScaler(0.2,0.4,0.6,0.8) self.assertAlmostEqual(scaler(0.65), 0.84375) def test_should_return_correct_value_middle_down(self): scaler = restraints.PlateauSmoothScaler(0.2,0.4,0.6,0.8) self.assertAlmostEqual(scaler(0.35), 0.84375) def test_should_return_correct_value_middle_down2(self): scaler = restraints.PlateauSmoothScaler(0.2,0.4,0.6,0.8) self.assertAlmostEqual(scaler(0.25), 0.15625) class TestCreateRestraintsAndScalers(unittest.TestCase): def setUp(self): self.mock_system = mock.Mock() self.manager = restraints.RestraintManager(self.mock_system) def test_can_create_constant_scaler(self): scaler = self.manager.create_scaler('constant') self.assertTrue(isinstance(scaler, restraints.ConstantScaler)) def test_can_create_linear_scaler(self): scaler = self.manager.create_scaler('linear', alpha_min=0.2, alpha_max=0.8) self.assertTrue(isinstance(scaler, restraints.LinearScaler)) def test_can_create_non_linear_scaler(self): scaler = self.manager.create_scaler('nonlinear', alpha_min=0.2, alpha_max=0.8, factor=4) self.assertTrue(isinstance(scaler, restraints.NonLinearScaler)) def test_creating_restraint_without_specifying_scaler_uses_constant(self): self.mock_system.index_of_atom.side_effect = [0, 1] rest = self.manager.create_restraint( 'distance', atom_1_res_index=1, atom_1_name='CA', atom_2_res_index=2, atom_2_name='CA', r1=0, r2=1, r3=3, r4=4, k=1.0) self.assertTrue(isinstance(rest.scaler, restraints.ConstantScaler)) def test_creating_restraint_with_scaler_should_use_it(self): self.mock_system.index_of_atom.side_effect = [0, 1] scaler = restraints.LinearScaler(0, 1) rest = self.manager.create_restraint( 'distance', scaler, atom_1_res_index=1, atom_1_name='CA', atom_2_res_index=2, atom_2_name='CA', r1=0, r2=1, r3=3, r4=4, k=1.0) self.assertTrue(isinstance(rest.scaler, restraints.LinearScaler)) def test_creating_restraint_should_raise_if_scaler_is_wrong_type(self): scaler = restraints.TimeRamp() self.mock_system.index_of_atom.side_effect = [0, 1] with self.assertRaises(ValueError): rest = self.manager.create_restraint( 'distance', scaler, atom_1_res_index=1, atom_1_name='CA', atom_2_res_index=2, atom_2_name='CA', r1=0., r2=1., r3=3., r4=4., k=1.0) def test_creating_restraint_should_raise_if_ramp_is_wrong_type(self): scaler = restraints.ConstantScaler() ramp = restraints.ConstantScaler() self.mock_system.index_of_atom.side_effect = [0, 1] with self.assertRaises(ValueError): rest = self.manager.create_restraint( 'distance', scaler, ramp=ramp, atom_1_res_index=1, atom_1_name='CA', atom_2_res_index=2, atom_2_name='CA', r1=0., r2=1., r3=3., r4=4., k=1.0) def test_create_restraint_without_specifying_ramp_should_use_constant_ramp(self): scaler = restraints.ConstantScaler() self.mock_system.index_of_atom.side_effect = [0, 1] rest = self.manager.create_restraint( 'distance', scaler, atom_1_res_index=1, atom_1_name='CA', atom_2_res_index=2, atom_2_name='CA', r1=0., r2=1., r3=3., r4=4., k=1.0) self.assertTrue(isinstance(rest.ramp, restraints.ConstantRamp)) class TestConstantRamp(unittest.TestCase): def setUp(self): self.ramp = restraints.ConstantRamp() def test_should_raise_with_negative_time(self): with self.assertRaises(ValueError): self.ramp(-1) def test_should_always_return_one(self): self.assertEqual(self.ramp(0), 1.0) self.assertEqual(self.ramp(1000), 1.0) self.assertEqual(self.ramp(1000000000), 1.0) class TestLinearRamp(unittest.TestCase): def setUp(self): self.ramp = restraints.LinearRamp(100, 200, 0.1, 0.9) def test_should_raise_with_negative_time(self): with self.assertRaises(ValueError): self.ramp(-1) def test_should_return_start_weight_before_start_time(self): self.assertEqual(self.ramp(0), 0.1) def test_return_end_weight_after_end_time(self): self.assertEqual(self.ramp(500), 0.9) def test_should_return_midpoint_half_way_between_start_and_end(self): self.assertAlmostEqual(self.ramp(150), 0.5) class TestNonLinearRampUpWard(unittest.TestCase): def setUp(self): self.ramp = restraints.NonLinearRamp(100, 200, 0.1, 0.9, 4) def test_should_raise_with_negative_time(self): with self.assertRaises(ValueError): self.ramp(-1) def test_should_return_start_weight_before_start_time(self): self.assertEqual(self.ramp(0), 0.1) def test_should_return_end_weight_after_end_time(self): self.assertEqual(self.ramp(500), 0.9) def test_should_return_correct_value_at_midpoint(self): self.assertAlmostEqual(self.ramp(150), 0.195362337617) class TestNonLinearRampDownWard(unittest.TestCase): def setUp(self): self.ramp = restraints.NonLinearRamp(100, 200, 0.9, 0.1, 4) def test_should_raise_with_negative_time(self): with self.assertRaises(ValueError): self.ramp(-1) def test_should_return_start_weight_before_start_time(self): self.assertEqual(self.ramp(0), 0.9) def test_should_return_end_weight_after_end_time(self): self.assertEqual(self.ramp(500), 0.1) def test_should_return_correct_value_at_midpoint(self): self.assertAlmostEqual(self.ramp(150), 0.195362337617) class TestTimeRampSwitcher(unittest.TestCase): def setUp(self): self.first_ramp = mock.Mock() self.second_ramp = mock.Mock() self.ramp_switch = restraints.TimeRampSwitcher(self.first_ramp, self.second_ramp, 500) def test_should_call_first_ramp_before_switching_time(self): self.ramp_switch(0) self.first_ramp.assert_called_once_with(0) self.assertEqual(self.second_ramp.call_count, 0) def test_should_call_second_ramp_on_switching_time(self): self.ramp_switch(500) self.second_ramp.assert_called_once_with(500) self.assertEqual(self.first_ramp.call_count, 0) class TestConstantPositioner(unittest.TestCase): def setUp(self): self.positioner = restraints.ConstantPositioner(42.0) def test_should_raise_when_alpha_below_zero(self): with self.assertRaises(ValueError): self.positioner(-1) def test_should_raise_when_alpha_above_one(self): with self.assertRaises(ValueError): self.positioner(2) def test_always_returns_same_value(self): self.assertEqual(self.positioner(0.0), 42.0) self.assertEqual(self.positioner(0.5), 42.0) self.assertEqual(self.positioner(1.0), 42.0) class TestLinearPositioner(unittest.TestCase): def setUp(self): self.positioner = restraints.LinearPositioner(0.1, 0.9, 0, 100) def test_should_raise_when_alpha_below_zero(self): with self.assertRaises(ValueError): self.positioner(-1) def test_should_raise_when_alpha_above_one(self): with self.assertRaises(ValueError): self.positioner(2) def test_returns_min_value_below_alpha_min(self): self.assertAlmostEqual(self.positioner(0), 0.0) def test_returns_max_value_above_alpha_max(self): self.assertAlmostEqual(self.positioner(1.0), 100.0) def test_returns_mid_value_at_half_way(self): self.assertAlmostEqual(self.positioner(0.5), 50.0)
import datetime import functools import logging from threading import Lock import MySQLdb as mariadb import box logger = logging.getLogger(__name__) class DatabaseFile: def __init__(self, user, password, database): self.db_args = (user, password, database) self.database_name = database self.execute_mutex = Lock() self._conn = mariadb.connect(user=user, password=password) self.execute(f"CREATE DATABASE IF NOT EXISTS {database}") self.close() self.connect(user, password, database) def connect(self, user, password, database): try: self._conn = mariadb.connect(user=user, password=password, database=database) logger.debug("DatabaseFile.connect | connection made") return True except mariadb.Error: logger.exception("") return False def close(self): self._conn.close() @property def conn(self): return self._conn def execute(self, cmd, args=None): with self.execute_mutex as __: if type(cmd) == str: try: cursor = self.conn.cursor() cursor.execute(cmd, args) except (AttributeError, mariadb.OperationalError) as exc: if eval(str(exc))[0] in [2013, 2006]: logger.debug("DatabaseFile.execute | reconnecting to db") self.connect(*self.db_args) cursor = self.conn.cursor() cursor.execute(cmd, args) else: raise else: raise TypeError ret = cursor.fetchall() cursor.close() self.conn.commit() return ret class DatabaseTable: def __init__(self, database, table_name, create_query, defaults): if type(database) != DatabaseFile: raise TypeError self.database = database self.table_name = table_name self.create_query = create_query # query to create the table self.defaults = defaults # defaults of each column on the table self.write_mutex = Lock() def create(self): self._execute(self.create_query) def execute(self, cmd, args=tuple()): logger.debug(f"DatabaseTable.execute | {' '.join(cmd.split())} with args {args}") self._primary_keys.cache_clear() self._columns.cache_clear() self._table_info.cache_clear() return self._execute(cmd, args) def _execute(self, cmd, args=tuple()): args = tuple(map(str, args)) return self.database.execute(cmd, args) def has_key(self, key): return self.__contains__(key) @functools.lru_cache() def __getitem__(self, key): cursor = self._execute(f"SELECT * FROM {self.table_name} WHERE {self._primary_keys()[0]}=%s", (key,)) try: obj = [box.Box(dict(zip(self._columns(), i))) for i in cursor][0] except IndexError as exc: raise KeyError from exc if obj: logger.debug(f"DatabaseTable.__getitem__ | getting {key}: {obj}") return obj else: raise KeyError(f"'{key}'") def get(self, key): fallback = box.Box(dict(zip(self._columns(), (key, *self.defaults)))) try: obj = self[key] logger.debug(f"DatabaseTable.get | getting {key}: {obj}") return obj except KeyError: logger.debug(f"DatabaseTable.get | getting {key}: {fallback}; key does not exist") return fallback def __setitem__(self, key, value): if type(value) in (tuple, list): # I doubt I'll ever use this directly, but it's here if ever I do. if key in self: logger.debug(f"DatabaseTable.__setitem__ | setting {key}; full modify") for k, v in dict(zip(self._columns(), (key, *value))): self.modify_row(key, k, v) else: logger.debug(f"DatabaseTable.__setitem__ | setting {key}; full insert") self.insert_row(key, value) elif type(value) == dict: if key in self: logger.debug(f"DatabaseTable.__setitem__ | setting {key}; partial modify") for k, v in value.items(): self.modify_row(key, k, v) else: logger.debug(f"DatabaseTable.__setitem__ | setting {key}; partial insert") self.__setitem__(key, tuple(value.get(*i) for i in zip([v for v in self._columns() if v not in set(self._primary_keys())], self.defaults))) else: raise TypeError def __delitem__(self, key): with self.write_mutex as _: logger.debug(f"DatabaseTable.__delitem__ | deleting {key}") self._execute(f"DELETE FROM {self.table_name} WHERE {self._primary_keys()[0]}=%s", (key,)) self.__getitem__.cache_clear() self.__contains__.cache_clear() def __iter__(self): return iter(self._execute(f"SELECT {self._primary_keys()[0]} FROM {self.table_name}")) @functools.lru_cache() def __contains__(self, key): ret = bool(self._execute(f"SELECT {self._primary_keys()[0]} FROM {self.table_name} " f"WHERE {self._primary_keys()[0]}=%s", (key,))) logger.debug(f"DatabaseTable.__contains__ | checking {key}'s existence: " f"{'exists' if ret else 'does not exist'}") return ret def modify_row(self, key, column, value): if column not in self._columns(): raise ValueError with self.write_mutex as _: if self[key][column] != value: logger.debug(f"DatabaseTable.modify_row | change {key}'s {column} to {value}") self._execute(f"""UPDATE {self.table_name} SET {column}=%s WHERE {self._primary_keys()[0]}=%s; """, (value, key)) logger.debug(f"DatabaseTable.modify_row | {key}'s {column} changed to {value}") else: logger.debug(f"DatabaseTable.modify_row | {key}'s {column} is already {value}") self.__getitem__.cache_clear() def insert_row(self, key, value): logger.debug(f"DatabaseTable.insert_row | inserting row {value} to {key}") with self.write_mutex as _: self._execute(f"""INSERT INTO {self.table_name}({', '.join(self._columns())}) VALUES({', '.join(['%s' if i is not None else 'NULL' for i in (key, *value)])}) """, (key, *tuple(i for i in value if i is not None))) self.__getitem__.cache_clear() self.__contains__.cache_clear() @property def columns(self): logger.debug(f"DatabaseTable.columns | retrieving") return self._columns() @property def primary_keys(self): logger.debug(f"DatabaseTable.primary_keys | retrieving") return self._primary_keys() @property def table_info(self): logger.debug("DatabaseTable.table_info | retrieving") return self._table_info() @functools.lru_cache() def _columns(self): return tuple(i[1] for i in self._table_info()) @functools.lru_cache() def _primary_keys(self): return tuple(i[0] for i in sorted(list((i[1], i[5]) for i in self._table_info() if i[5] != 0), key=lambda x: x[1])) @functools.lru_cache() def _table_info(self): with self.write_mutex as _: return self._execute(f""" SELECT col.ORDINAL_POSITION, col.COLUMN_NAME, col.COLUMN_TYPE, col.IS_NULLABLE, col.COLUMN_DEFAULT, ifnull(kcu.ORDINAL_POSITION, 0) FROM information_schema.COLUMNS col LEFT JOIN information_schema.KEY_COLUMN_USAGE kcu ON col.TABLE_SCHEMA=kcu.TABLE_SCHEMA AND col.TABLE_NAME=kcu.TABLE_NAME AND col.COLUMN_NAME=kcu.COLUMN_NAME WHERE col.TABLE_SCHEMA='{self.database.database_name}' AND col.TABLE_NAME='{self.table_name}' """) class UserPrefTable(DatabaseTable): def update_last_command(self, key): self[key] = { "last_command": datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") } # "%Y-%m-%dT%H:%M:%S.%f%z"
""" Field classes. """ from __future__ import unicode_literals import copy import datetime import os import re import sys import uuid import warnings from decimal import Decimal, DecimalException from io import BytesIO from django.core import validators from django.core.exceptions import ValidationError # Provide this import for backwards compatibility. from django.core.validators import EMPTY_VALUES # NOQA from django.forms.utils import from_current_timezone, to_current_timezone from django.forms.widgets import ( FILE_INPUT_CONTRADICTION, CheckboxInput, ClearableFileInput, DateInput, DateTimeInput, EmailInput, HiddenInput, MultipleHiddenInput, NullBooleanSelect, NumberInput, Select, SelectMultiple, SplitDateTimeWidget, SplitHiddenDateTimeWidget, TextInput, TimeInput, URLInput, ) from django.utils import formats, six from django.utils.dateparse import parse_duration from django.utils.deprecation import ( RemovedInDjango20Warning, RenameMethodsBase, ) from django.utils.duration import duration_string from django.utils.encoding import force_str, force_text, smart_text from django.utils.ipv6 import clean_ipv6_address from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit from django.utils.translation import ugettext_lazy as _, ungettext_lazy __all__ = ( 'Field', 'CharField', 'IntegerField', 'DateField', 'TimeField', 'DateTimeField', 'DurationField', 'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField', 'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField', 'ComboField', 'MultiValueField', 'FloatField', 'DecimalField', 'SplitDateTimeField', 'GenericIPAddressField', 'FilePathField', 'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField', 'UUIDField', ) class RenameFieldMethods(RenameMethodsBase): renamed_methods = ( ('_has_changed', 'has_changed', RemovedInDjango20Warning), ) class Field(six.with_metaclass(RenameFieldMethods, object)): widget = TextInput # Default widget to use when rendering this type of Field. hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden". default_validators = [] # Default set of validators # Add an 'invalid' entry to default_error_message if you want a specific # field error message not raised by the field validators. default_error_messages = { 'required': _('This field is required.'), } empty_values = list(validators.EMPTY_VALUES) # Tracks each time a Field instance is created. Used to retain order. creation_counter = 0 def __init__(self, required=True, widget=None, label=None, initial=None, help_text='', error_messages=None, show_hidden_initial=False, validators=[], localize=False, label_suffix=None): # required -- Boolean that specifies whether the field is required. # True by default. # widget -- A Widget class, or instance of a Widget class, that should # be used for this Field when displaying it. Each Field has a # default Widget that it'll use if you don't specify this. In # most cases, the default widget is TextInput. # label -- A verbose name for this field, for use in displaying this # field in a form. By default, Django will use a "pretty" # version of the form field name, if the Field is part of a # Form. # initial -- A value to use in this Field's initial display. This value # is *not* used as a fallback if data isn't given. # help_text -- An optional string to use as "help text" for this Field. # error_messages -- An optional dictionary to override the default # messages that the field will raise. # show_hidden_initial -- Boolean that specifies if it is needed to render a # hidden widget with initial value after widget. # validators -- List of additional validators to use # localize -- Boolean that specifies if the field should be localized. # label_suffix -- Suffix to be added to the label. Overrides # form's label_suffix. self.required, self.label, self.initial = required, label, initial self.show_hidden_initial = show_hidden_initial self.help_text = help_text self.label_suffix = label_suffix widget = widget or self.widget if isinstance(widget, type): widget = widget() # Trigger the localization machinery if needed. self.localize = localize if self.localize: widget.is_localized = True # Let the widget know whether it should display as required. widget.is_required = self.required # Hook into self.widget_attrs() for any Field-specific HTML attributes. extra_attrs = self.widget_attrs(widget) if extra_attrs: widget.attrs.update(extra_attrs) self.widget = widget # Increase the creation counter, and save our local copy. self.creation_counter = Field.creation_counter Field.creation_counter += 1 messages = {} for c in reversed(self.__class__.__mro__): messages.update(getattr(c, 'default_error_messages', {})) messages.update(error_messages or {}) self.error_messages = messages self.validators = self.default_validators + validators super(Field, self).__init__() def prepare_value(self, value): return value def to_python(self, value): return value def validate(self, value): if value in self.empty_values and self.required: raise ValidationError(self.error_messages['required'], code='required') def run_validators(self, value): if value in self.empty_values: return errors = [] for v in self.validators: try: v(value) except ValidationError as e: if hasattr(e, 'code') and e.code in self.error_messages: e.message = self.error_messages[e.code] errors.extend(e.error_list) if errors: raise ValidationError(errors) def clean(self, value): """ Validates the given value and returns its "cleaned" value as an appropriate Python object. Raises ValidationError for any errors. """ value = self.to_python(value) self.validate(value) self.run_validators(value) return value def bound_data(self, data, initial): """ Return the value that should be shown for this field on render of a bound form, given the submitted POST data for the field and the initial data, if any. For most fields, this will simply be data; FileFields need to handle it a bit differently. """ return data def widget_attrs(self, widget): """ Given a Widget instance (*not* a Widget class), returns a dictionary of any HTML attributes that should be added to the Widget, based on this Field. """ return {} def has_changed(self, initial, data): """ Return True if data differs from initial. """ # For purposes of seeing whether something has changed, None is # the same as an empty string, if the data or initial value we get # is None, replace it w/ ''. initial_value = initial if initial is not None else '' try: data = self.to_python(data) if hasattr(self, '_coerce'): data = self._coerce(data) except ValidationError: return True data_value = data if data is not None else '' return initial_value != data_value def __deepcopy__(self, memo): result = copy.copy(self) memo[id(self)] = result result.widget = copy.deepcopy(self.widget, memo) result.validators = self.validators[:] return result class CharField(Field): def __init__(self, max_length=None, min_length=None, *args, **kwargs): self.max_length, self.min_length = max_length, min_length super(CharField, self).__init__(*args, **kwargs) if min_length is not None: self.validators.append(validators.MinLengthValidator(int(min_length))) if max_length is not None: self.validators.append(validators.MaxLengthValidator(int(max_length))) def to_python(self, value): "Returns a Unicode object." if value in self.empty_values: return '' return smart_text(value) def widget_attrs(self, widget): attrs = super(CharField, self).widget_attrs(widget) if self.max_length is not None: # The HTML attribute is maxlength, not max_length. attrs.update({'maxlength': str(self.max_length)}) return attrs class IntegerField(Field): widget = NumberInput default_error_messages = { 'invalid': _('Enter a whole number.'), } re_decimal = re.compile(r'\.0*\s*$') def __init__(self, max_value=None, min_value=None, *args, **kwargs): self.max_value, self.min_value = max_value, min_value if kwargs.get('localize') and self.widget == NumberInput: # Localized number input is not well supported on most browsers kwargs.setdefault('widget', super(IntegerField, self).widget) super(IntegerField, self).__init__(*args, **kwargs) if max_value is not None: self.validators.append(validators.MaxValueValidator(max_value)) if min_value is not None: self.validators.append(validators.MinValueValidator(min_value)) def to_python(self, value): """ Validates that int() can be called on the input. Returns the result of int(). Returns None for empty values. """ value = super(IntegerField, self).to_python(value) if value in self.empty_values: return None if self.localize: value = formats.sanitize_separators(value) # Strip trailing decimal and zeros. try: value = int(self.re_decimal.sub('', str(value))) except (ValueError, TypeError): raise ValidationError(self.error_messages['invalid'], code='invalid') return value def widget_attrs(self, widget): attrs = super(IntegerField, self).widget_attrs(widget) if isinstance(widget, NumberInput): if self.min_value is not None: attrs['min'] = self.min_value if self.max_value is not None: attrs['max'] = self.max_value return attrs class FloatField(IntegerField): default_error_messages = { 'invalid': _('Enter a number.'), } def to_python(self, value): """ Validates that float() can be called on the input. Returns the result of float(). Returns None for empty values. """ value = super(IntegerField, self).to_python(value) if value in self.empty_values: return None if self.localize: value = formats.sanitize_separators(value) try: value = float(value) except (ValueError, TypeError): raise ValidationError(self.error_messages['invalid'], code='invalid') return value def validate(self, value): super(FloatField, self).validate(value) # Check for NaN (which is the only thing not equal to itself) and +/- infinity if value != value or value in (Decimal('Inf'), Decimal('-Inf')): raise ValidationError(self.error_messages['invalid'], code='invalid') return value def widget_attrs(self, widget): attrs = super(FloatField, self).widget_attrs(widget) if isinstance(widget, NumberInput) and 'step' not in widget.attrs: attrs.setdefault('step', 'any') return attrs class DecimalField(IntegerField): default_error_messages = { 'invalid': _('Enter a number.'), 'max_digits': ungettext_lazy( 'Ensure that there are no more than %(max)s digit in total.', 'Ensure that there are no more than %(max)s digits in total.', 'max'), 'max_decimal_places': ungettext_lazy( 'Ensure that there are no more than %(max)s decimal place.', 'Ensure that there are no more than %(max)s decimal places.', 'max'), 'max_whole_digits': ungettext_lazy( 'Ensure that there are no more than %(max)s digit before the decimal point.', 'Ensure that there are no more than %(max)s digits before the decimal point.', 'max'), } def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs): self.max_digits, self.decimal_places = max_digits, decimal_places super(DecimalField, self).__init__(max_value, min_value, *args, **kwargs) def to_python(self, value): """ Validates that the input is a decimal number. Returns a Decimal instance. Returns None for empty values. Ensures that there are no more than max_digits in the number, and no more than decimal_places digits after the decimal point. """ if value in self.empty_values: return None if self.localize: value = formats.sanitize_separators(value) value = smart_text(value).strip() try: value = Decimal(value) except DecimalException: raise ValidationError(self.error_messages['invalid'], code='invalid') return value def validate(self, value): super(DecimalField, self).validate(value) if value in self.empty_values: return # Check for NaN, Inf and -Inf values. We can't compare directly for NaN, # since it is never equal to itself. However, NaN is the only value that # isn't equal to itself, so we can use this to identify NaN if value != value or value == Decimal("Inf") or value == Decimal("-Inf"): raise ValidationError(self.error_messages['invalid'], code='invalid') sign, digittuple, exponent = value.as_tuple() decimals = abs(exponent) # digittuple doesn't include any leading zeros. digits = len(digittuple) if decimals > digits: # We have leading zeros up to or past the decimal point. Count # everything past the decimal point as a digit. We do not count # 0 before the decimal point as a digit since that would mean # we would not allow max_digits = decimal_places. digits = decimals whole_digits = digits - decimals if self.max_digits is not None and digits > self.max_digits: raise ValidationError( self.error_messages['max_digits'], code='max_digits', params={'max': self.max_digits}, ) if self.decimal_places is not None and decimals > self.decimal_places: raise ValidationError( self.error_messages['max_decimal_places'], code='max_decimal_places', params={'max': self.decimal_places}, ) if (self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places)): raise ValidationError( self.error_messages['max_whole_digits'], code='max_whole_digits', params={'max': (self.max_digits - self.decimal_places)}, ) return value def widget_attrs(self, widget): attrs = super(DecimalField, self).widget_attrs(widget) if isinstance(widget, NumberInput) and 'step' not in widget.attrs: if self.decimal_places is not None: # Use exponential notation for small values since they might # be parsed as 0 otherwise. ref #20765 step = str(Decimal('1') / 10 ** self.decimal_places).lower() else: step = 'any' attrs.setdefault('step', step) return attrs class BaseTemporalField(Field): def __init__(self, input_formats=None, *args, **kwargs): super(BaseTemporalField, self).__init__(*args, **kwargs) if input_formats is not None: self.input_formats = input_formats def to_python(self, value): # Try to coerce the value to unicode. unicode_value = force_text(value, strings_only=True) if isinstance(unicode_value, six.text_type): value = unicode_value.strip() # If unicode, try to strptime against each input format. if isinstance(value, six.text_type): for format in self.input_formats: try: return self.strptime(value, format) except (ValueError, TypeError): continue raise ValidationError(self.error_messages['invalid'], code='invalid') def strptime(self, value, format): raise NotImplementedError('Subclasses must define this method.') class DateField(BaseTemporalField): widget = DateInput input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS') default_error_messages = { 'invalid': _('Enter a valid date.'), } def to_python(self, value): """ Validates that the input can be converted to a date. Returns a Python datetime.date object. """ if value in self.empty_values: return None if isinstance(value, datetime.datetime): return value.date() if isinstance(value, datetime.date): return value return super(DateField, self).to_python(value) def strptime(self, value, format): return datetime.datetime.strptime(force_str(value), format).date() class TimeField(BaseTemporalField): widget = TimeInput input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS') default_error_messages = { 'invalid': _('Enter a valid time.') } def to_python(self, value): """ Validates that the input can be converted to a time. Returns a Python datetime.time object. """ if value in self.empty_values: return None if isinstance(value, datetime.time): return value return super(TimeField, self).to_python(value) def strptime(self, value, format): return datetime.datetime.strptime(force_str(value), format).time() class DateTimeField(BaseTemporalField): widget = DateTimeInput input_formats = formats.get_format_lazy('DATETIME_INPUT_FORMATS') default_error_messages = { 'invalid': _('Enter a valid date/time.'), } def prepare_value(self, value): if isinstance(value, datetime.datetime): value = to_current_timezone(value) return value def to_python(self, value): """ Validates that the input can be converted to a datetime. Returns a Python datetime.datetime object. """ if value in self.empty_values: return None if isinstance(value, datetime.datetime): return from_current_timezone(value) if isinstance(value, datetime.date): result = datetime.datetime(value.year, value.month, value.day) return from_current_timezone(result) result = super(DateTimeField, self).to_python(value) return from_current_timezone(result) def strptime(self, value, format): return datetime.datetime.strptime(force_str(value), format) class DurationField(Field): default_error_messages = { 'invalid': _('Enter a valid duration.'), } def prepare_value(self, value): if isinstance(value, datetime.timedelta): return duration_string(value) return value def to_python(self, value): if value in self.empty_values: return None if isinstance(value, datetime.timedelta): return value value = parse_duration(value) if value is None: raise ValidationError(self.error_messages['invalid'], code='invalid') return value class RegexField(CharField): def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs): """ regex can be either a string or a compiled regular expression object. error_message is an optional error message to use, if 'Enter a valid value' is too generic for you. """ # error_message is just kept for backwards compatibility: if error_message is not None: warnings.warn( "The 'error_message' argument is deprecated. Use " "Field.error_messages['invalid'] instead.", RemovedInDjango20Warning, stacklevel=2 ) error_messages = kwargs.get('error_messages') or {} error_messages['invalid'] = error_message kwargs['error_messages'] = error_messages super(RegexField, self).__init__(max_length, min_length, *args, **kwargs) self._set_regex(regex) def _get_regex(self): return self._regex def _set_regex(self, regex): if isinstance(regex, six.string_types): regex = re.compile(regex, re.UNICODE) self._regex = regex if hasattr(self, '_regex_validator') and self._regex_validator in self.validators: self.validators.remove(self._regex_validator) self._regex_validator = validators.RegexValidator(regex=regex) self.validators.append(self._regex_validator) regex = property(_get_regex, _set_regex) class EmailField(CharField): widget = EmailInput default_validators = [validators.validate_email] def clean(self, value): value = self.to_python(value).strip() return super(EmailField, self).clean(value) class FileField(Field): widget = ClearableFileInput default_error_messages = { 'invalid': _("No file was submitted. Check the encoding type on the form."), 'missing': _("No file was submitted."), 'empty': _("The submitted file is empty."), 'max_length': ungettext_lazy( 'Ensure this filename has at most %(max)d character (it has %(length)d).', 'Ensure this filename has at most %(max)d characters (it has %(length)d).', 'max'), 'contradiction': _('Please either submit a file or check the clear checkbox, not both.') } def __init__(self, *args, **kwargs): self.max_length = kwargs.pop('max_length', None) self.allow_empty_file = kwargs.pop('allow_empty_file', False) super(FileField, self).__init__(*args, **kwargs) def to_python(self, data): if data in self.empty_values: return None # UploadedFile objects should have name and size attributes. try: file_name = data.name file_size = data.size except AttributeError: raise ValidationError(self.error_messages['invalid'], code='invalid') if self.max_length is not None and len(file_name) > self.max_length: params = {'max': self.max_length, 'length': len(file_name)} raise ValidationError(self.error_messages['max_length'], code='max_length', params=params) if not file_name: raise ValidationError(self.error_messages['invalid'], code='invalid') if not self.allow_empty_file and not file_size: raise ValidationError(self.error_messages['empty'], code='empty') return data def clean(self, data, initial=None): # If the widget got contradictory inputs, we raise a validation error if data is FILE_INPUT_CONTRADICTION: raise ValidationError(self.error_messages['contradiction'], code='contradiction') # False means the field value should be cleared; further validation is # not needed. if data is False: if not self.required: return False # If the field is required, clearing is not possible (the widget # shouldn't return False data in that case anyway). False is not # in self.empty_value; if a False value makes it this far # it should be validated from here on out as None (so it will be # caught by the required check). data = None if not data and initial: return initial return super(FileField, self).clean(data) def bound_data(self, data, initial): if data in (None, FILE_INPUT_CONTRADICTION): return initial return data def has_changed(self, initial, data): if data is None: return False return True class ImageField(FileField): default_error_messages = { 'invalid_image': _( "Upload a valid image. The file you uploaded was either not an " "image or a corrupted image." ), } def to_python(self, data): """ Checks that the file-upload field data contains a valid image (GIF, JPG, PNG, possibly others -- whatever the Python Imaging Library supports). """ f = super(ImageField, self).to_python(data) if f is None: return None from PIL import Image # We need to get a file object for Pillow. We might have a path or we might # have to read the data into memory. if hasattr(data, 'temporary_file_path'): file = data.temporary_file_path() else: if hasattr(data, 'read'): file = BytesIO(data.read()) else: file = BytesIO(data['content']) try: # load() could spot a truncated JPEG, but it loads the entire # image in memory, which is a DoS vector. See #3848 and #18520. image = Image.open(file) # verify() must be called immediately after the constructor. image.verify() # Annotating so subclasses can reuse it for their own validation f.image = image f.content_type = Image.MIME[image.format] except Exception: # Pillow doesn't recognize it as an image. six.reraise(ValidationError, ValidationError( self.error_messages['invalid_image'], code='invalid_image', ), sys.exc_info()[2]) if hasattr(f, 'seek') and callable(f.seek): f.seek(0) return f class URLField(CharField): widget = URLInput default_error_messages = { 'invalid': _('Enter a valid URL.'), } default_validators = [validators.URLValidator()] def to_python(self, value): def split_url(url): """ Returns a list of url parts via ``urlparse.urlsplit`` (or raises a ``ValidationError`` exception for certain). """ try: return list(urlsplit(url)) except ValueError: # urlparse.urlsplit can raise a ValueError with some # misformatted URLs. raise ValidationError(self.error_messages['invalid'], code='invalid') value = super(URLField, self).to_python(value) if value: url_fields = split_url(value) if not url_fields[0]: # If no URL scheme given, assume http:// url_fields[0] = 'http' if not url_fields[1]: # Assume that if no domain is provided, that the path segment # contains the domain. url_fields[1] = url_fields[2] url_fields[2] = '' # Rebuild the url_fields list, since the domain segment may now # contain the path too. url_fields = split_url(urlunsplit(url_fields)) value = urlunsplit(url_fields) return value def clean(self, value): value = self.to_python(value).strip() return super(URLField, self).clean(value) class BooleanField(Field): widget = CheckboxInput def to_python(self, value): """Returns a Python boolean object.""" # Explicitly check for the string 'False', which is what a hidden field # will submit for False. Also check for '0', since this is what # RadioSelect will provide. Because bool("True") == bool('1') == True, # we don't need to handle that explicitly. if isinstance(value, six.string_types) and value.lower() in ('false', '0'): value = False else: value = bool(value) return super(BooleanField, self).to_python(value) def validate(self, value): if not value and self.required: raise ValidationError(self.error_messages['required'], code='required') def has_changed(self, initial, data): # Sometimes data or initial could be None or '' which should be the # same thing as False. if initial == 'False': # show_hidden_initial may have transformed False to 'False' initial = False return bool(initial) != bool(data) class NullBooleanField(BooleanField): """ A field whose valid values are None, True and False. Invalid values are cleaned to None. """ widget = NullBooleanSelect def to_python(self, value): """ Explicitly checks for the string 'True' and 'False', which is what a hidden field will submit for True and False, for 'true' and 'false', which are likely to be returned by JavaScript serializations of forms, and for '1' and '0', which is what a RadioField will submit. Unlike the Booleanfield we need to explicitly check for True, because we are not using the bool() function """ if value in (True, 'True', 'true', '1'): return True elif value in (False, 'False', 'false', '0'): return False else: return None def validate(self, value): pass def has_changed(self, initial, data): # None (unknown) and False (No) are not the same if initial is not None: initial = bool(initial) if data is not None: data = bool(data) return initial != data class CallableChoiceIterator(object): def __init__(self, choices_func): self.choices_func = choices_func def __iter__(self): for e in self.choices_func(): yield e class ChoiceField(Field): widget = Select default_error_messages = { 'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'), } def __init__(self, choices=(), required=True, widget=None, label=None, initial=None, help_text='', *args, **kwargs): super(ChoiceField, self).__init__(required=required, widget=widget, label=label, initial=initial, help_text=help_text, *args, **kwargs) self.choices = choices def __deepcopy__(self, memo): result = super(ChoiceField, self).__deepcopy__(memo) result._choices = copy.deepcopy(self._choices, memo) return result def _get_choices(self): return self._choices def _set_choices(self, value): # Setting choices also sets the choices on the widget. # choices can be any iterable, but we call list() on it because # it will be consumed more than once. if callable(value): value = CallableChoiceIterator(value) else: value = list(value) self._choices = self.widget.choices = value choices = property(_get_choices, _set_choices) def to_python(self, value): "Returns a Unicode object." if value in self.empty_values: return '' return smart_text(value) def validate(self, value): """ Validates that the input is in self.choices. """ super(ChoiceField, self).validate(value) if value and not self.valid_value(value): raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value}, ) def valid_value(self, value): "Check to see if the provided value is a valid choice" text_value = force_text(value) for k, v in self.choices: if isinstance(v, (list, tuple)): # This is an optgroup, so look inside the group for options for k2, v2 in v: if value == k2 or text_value == force_text(k2): return True else: if value == k or text_value == force_text(k): return True return False class TypedChoiceField(ChoiceField): def __init__(self, *args, **kwargs): self.coerce = kwargs.pop('coerce', lambda val: val) self.empty_value = kwargs.pop('empty_value', '') super(TypedChoiceField, self).__init__(*args, **kwargs) def _coerce(self, value): """ Validate that the value can be coerced to the right type (if not empty). """ if value == self.empty_value or value in self.empty_values: return self.empty_value try: value = self.coerce(value) except (ValueError, TypeError, ValidationError): raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value}, ) return value def clean(self, value): value = super(TypedChoiceField, self).clean(value) return self._coerce(value) class MultipleChoiceField(ChoiceField): hidden_widget = MultipleHiddenInput widget = SelectMultiple default_error_messages = { 'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'), 'invalid_list': _('Enter a list of values.'), } def to_python(self, value): if not value: return [] elif not isinstance(value, (list, tuple)): raise ValidationError(self.error_messages['invalid_list'], code='invalid_list') return [smart_text(val) for val in value] def validate(self, value): """ Validates that the input is a list or tuple. """ if self.required and not value: raise ValidationError(self.error_messages['required'], code='required') # Validate that each value in the value list is in self.choices. for val in value: if not self.valid_value(val): raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': val}, ) def has_changed(self, initial, data): if initial is None: initial = [] if data is None: data = [] if len(initial) != len(data): return True initial_set = set(force_text(value) for value in initial) data_set = set(force_text(value) for value in data) return data_set != initial_set class TypedMultipleChoiceField(MultipleChoiceField): def __init__(self, *args, **kwargs): self.coerce = kwargs.pop('coerce', lambda val: val) self.empty_value = kwargs.pop('empty_value', []) super(TypedMultipleChoiceField, self).__init__(*args, **kwargs) def _coerce(self, value): """ Validates that the values are in self.choices and can be coerced to the right type. """ if value == self.empty_value or value in self.empty_values: return self.empty_value new_value = [] for choice in value: try: new_value.append(self.coerce(choice)) except (ValueError, TypeError, ValidationError): raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': choice}, ) return new_value def clean(self, value): value = super(TypedMultipleChoiceField, self).clean(value) return self._coerce(value) def validate(self, value): if value != self.empty_value: super(TypedMultipleChoiceField, self).validate(value) elif self.required: raise ValidationError(self.error_messages['required'], code='required') class ComboField(Field): """ A Field whose clean() method calls multiple Field clean() methods. """ def __init__(self, fields=(), *args, **kwargs): super(ComboField, self).__init__(*args, **kwargs) # Set 'required' to False on the individual fields, because the # required validation will be handled by ComboField, not by those # individual fields. for f in fields: f.required = False self.fields = fields def clean(self, value): """ Validates the given value against all of self.fields, which is a list of Field instances. """ super(ComboField, self).clean(value) for field in self.fields: value = field.clean(value) return value class MultiValueField(Field): """ A Field that aggregates the logic of multiple Fields. Its clean() method takes a "decompressed" list of values, which are then cleaned into a single value according to self.fields. Each value in this list is cleaned by the corresponding field -- the first value is cleaned by the first field, the second value is cleaned by the second field, etc. Once all fields are cleaned, the list of clean values is "compressed" into a single value. Subclasses should not have to implement clean(). Instead, they must implement compress(), which takes a list of valid values and returns a "compressed" version of those values -- a single value. You'll probably want to use this with MultiWidget. """ default_error_messages = { 'invalid': _('Enter a list of values.'), 'incomplete': _('Enter a complete value.'), } def __init__(self, fields=(), *args, **kwargs): self.require_all_fields = kwargs.pop('require_all_fields', True) super(MultiValueField, self).__init__(*args, **kwargs) for f in fields: f.error_messages.setdefault('incomplete', self.error_messages['incomplete']) if self.require_all_fields: # Set 'required' to False on the individual fields, because the # required validation will be handled by MultiValueField, not # by those individual fields. f.required = False self.fields = fields def __deepcopy__(self, memo): result = super(MultiValueField, self).__deepcopy__(memo) result.fields = tuple(x.__deepcopy__(memo) for x in self.fields) return result def validate(self, value): pass def clean(self, value): """ Validates every value in the given list. A value is validated against the corresponding Field in self.fields. For example, if this MultiValueField was instantiated with fields=(DateField(), TimeField()), clean() would call DateField.clean(value[0]) and TimeField.clean(value[1]). """ clean_data = [] errors = [] if not value or isinstance(value, (list, tuple)): if not value or not [v for v in value if v not in self.empty_values]: if self.required: raise ValidationError(self.error_messages['required'], code='required') else: return self.compress([]) else: raise ValidationError(self.error_messages['invalid'], code='invalid') for i, field in enumerate(self.fields): try: field_value = value[i] except IndexError: field_value = None if field_value in self.empty_values: if self.require_all_fields: # Raise a 'required' error if the MultiValueField is # required and any field is empty. if self.required: raise ValidationError(self.error_messages['required'], code='required') elif field.required: # Otherwise, add an 'incomplete' error to the list of # collected errors and skip field cleaning, if a required # field is empty. if field.error_messages['incomplete'] not in errors: errors.append(field.error_messages['incomplete']) continue try: clean_data.append(field.clean(field_value)) except ValidationError as e: # Collect all validation errors in a single list, which we'll # raise at the end of clean(), rather than raising a single # exception for the first error we encounter. Skip duplicates. errors.extend(m for m in e.error_list if m not in errors) if errors: raise ValidationError(errors) out = self.compress(clean_data) self.validate(out) self.run_validators(out) return out def compress(self, data_list): """ Returns a single value for the given list of values. The values can be assumed to be valid. For example, if this MultiValueField was instantiated with fields=(DateField(), TimeField()), this might return a datetime object created by combining the date and time in data_list. """ raise NotImplementedError('Subclasses must implement this method.') def has_changed(self, initial, data): if initial is None: initial = ['' for x in range(0, len(data))] else: if not isinstance(initial, list): initial = self.widget.decompress(initial) for field, initial, data in zip(self.fields, initial, data): try: initial = field.to_python(initial) except ValidationError: return True if field.has_changed(initial, data): return True return False class FilePathField(ChoiceField): def __init__(self, path, match=None, recursive=False, allow_files=True, allow_folders=False, required=True, widget=None, label=None, initial=None, help_text='', *args, **kwargs): self.path, self.match, self.recursive = path, match, recursive self.allow_files, self.allow_folders = allow_files, allow_folders super(FilePathField, self).__init__(choices=(), required=required, widget=widget, label=label, initial=initial, help_text=help_text, *args, **kwargs) if self.required: self.choices = [] else: self.choices = [("", "---------")] if self.match is not None: self.match_re = re.compile(self.match) if recursive: for root, dirs, files in sorted(os.walk(self.path)): if self.allow_files: for f in files: if self.match is None or self.match_re.search(f): f = os.path.join(root, f) self.choices.append((f, f.replace(path, "", 1))) if self.allow_folders: for f in dirs: if f == '__pycache__': continue if self.match is None or self.match_re.search(f): f = os.path.join(root, f) self.choices.append((f, f.replace(path, "", 1))) else: try: for f in sorted(os.listdir(self.path)): if f == '__pycache__': continue full_file = os.path.join(self.path, f) if (((self.allow_files and os.path.isfile(full_file)) or (self.allow_folders and os.path.isdir(full_file))) and (self.match is None or self.match_re.search(f))): self.choices.append((full_file, f)) except OSError: pass self.widget.choices = self.choices class SplitDateTimeField(MultiValueField): widget = SplitDateTimeWidget hidden_widget = SplitHiddenDateTimeWidget default_error_messages = { 'invalid_date': _('Enter a valid date.'), 'invalid_time': _('Enter a valid time.'), } def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs): errors = self.default_error_messages.copy() if 'error_messages' in kwargs: errors.update(kwargs['error_messages']) localize = kwargs.get('localize', False) fields = ( DateField(input_formats=input_date_formats, error_messages={'invalid': errors['invalid_date']}, localize=localize), TimeField(input_formats=input_time_formats, error_messages={'invalid': errors['invalid_time']}, localize=localize), ) super(SplitDateTimeField, self).__init__(fields, *args, **kwargs) def compress(self, data_list): if data_list: # Raise a validation error if time or date is empty # (possible if SplitDateTimeField has required=False). if data_list[0] in self.empty_values: raise ValidationError(self.error_messages['invalid_date'], code='invalid_date') if data_list[1] in self.empty_values: raise ValidationError(self.error_messages['invalid_time'], code='invalid_time') result = datetime.datetime.combine(*data_list) return from_current_timezone(result) return None class GenericIPAddressField(CharField): def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs): self.unpack_ipv4 = unpack_ipv4 self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0] super(GenericIPAddressField, self).__init__(*args, **kwargs) def to_python(self, value): if value in self.empty_values: return '' value = value.strip() if value and ':' in value: return clean_ipv6_address(value, self.unpack_ipv4) return value class SlugField(CharField): default_validators = [validators.validate_slug] def clean(self, value): value = self.to_python(value).strip() return super(SlugField, self).clean(value) class UUIDField(CharField): default_error_messages = { 'invalid': _('Enter a valid UUID.'), } def prepare_value(self, value): if isinstance(value, uuid.UUID): return value.hex return value def to_python(self, value): value = super(UUIDField, self).to_python(value) if value in self.empty_values: return None if not isinstance(value, uuid.UUID): try: value = uuid.UUID(value) except ValueError: raise ValidationError(self.error_messages['invalid'], code='invalid') return value
import requests import json import re import sys if sys.version_info > (3,): long = int BASE_URL = 'http://things.ubidots.com/api/v1.6/' def get_response_json_or_info_message(response): if response.status_code == 204: resp = {"detail": "this response don't need a body"} try: resp = response.json() except Exception: resp = {"detail": "this response doesn't have a valid json response"} return resp class UbidotsError(Exception): pass class UbidotsHTTPError(UbidotsError): def __init__(self, *args, **kwargs): self.response = kwargs['response'] self.detail = get_response_json_or_info_message(self.response) self.status_code = self.response.status_code del kwargs['response'] super(UbidotsHTTPError, self).__init__(*args, **kwargs) class UbidotsError400(UbidotsHTTPError): """Exception thrown when server returns status code 400 Bad request""" pass class UbidotsError404(UbidotsHTTPError): """Exception thrown when server returns status code 404 Not found""" pass class UbidotsError500(UbidotsHTTPError): """Exception thrown when server returns status code 500""" pass class UbidotsForbiddenError(UbidotsHTTPError): """Exception thrown when server returns status code 401 or 403""" pass class UbidotsBulkOperationError(UbidotsHTTPError): ''' TODO: the 'status_code' for this exception is 200!! ''' pass class UbidotsInvalidInputError(UbidotsError): """Exception thrown when client-side verification fails""" pass def create_exception_object(response): """Creates an Exception object for an erronous status code.""" code = response.status_code if code == 500: return UbidotsError500("An Internal Server Error Occurred.", response=response) elif code == 400: return UbidotsError400("Your response is invalid", response=response) elif code == 404: return UbidotsError404("Resource responseed not found:\n ", response=response) elif code in [403, 401]: return UbidotsForbiddenError( "Your token is invalid or you don't have permissions to access this resource:\n ", response=response ) else: return UbidotsError("Not Handled Exception: ", response=response) def raise_informative_exception(list_of_error_codes): def real_decorator(fn): def wrapped_f(self, *args, **kwargs): response = fn(self, *args, **kwargs) if response.status_code in list_of_error_codes: try: body = response.text except: body = "" # error = create_exception_object(response.status_code, body) error = create_exception_object(response) raise error else: return response return wrapped_f return real_decorator def try_again(list_of_error_codes, number_of_tries=2): def real_decorator(fn): def wrapped_f(self, *args, **kwargs): for i in range(number_of_tries): response = fn(self, *args, **kwargs) if response.status_code not in list_of_error_codes: return response else: self.initialize() try: body = response.text except: body = "" error = create_exception_object(response) raise error return wrapped_f return real_decorator def validate_input(type, required_keys=[]): ''' Decorator for validating input on the client side. If validation fails, UbidotsInvalidInputError is raised and the function is not called. ''' def real_decorator(fn): def wrapped_f(self, *args, **kwargs): if not isinstance(args[0], type): raise UbidotsInvalidInputError("Invalid argument type. Required: " + str(type)) def check_keys(obj): for key in required_keys: if key not in obj: raise UbidotsInvalidInputError('Key "%s" is missing' % key) if isinstance(args[0], list): list(map(check_keys, args[0])) elif isinstance(args[0], dict): check_keys(args[0]) return fn(self, *args, **kwargs) return wrapped_f return real_decorator class ServerBridge(object): ''' Responsabilites: Make petitions to the browser with the right headers and arguments ''' def __init__(self, apikey=None, token=None, base_url=None): self.base_url = base_url or BASE_URL if apikey: self._token = None self._apikey = apikey self._apikey_header = {'X-UBIDOTS-APIKEY': self._apikey} self.initialize() elif token: self._apikey = None self._token = token self._set_token_header() def _get_token(self): self._token = self._post_with_apikey('auth/token').json()['token'] self._set_token_header() def _set_token_header(self): self._token_header = {'X-AUTH-TOKEN': self._token} def initialize(self): if self._apikey: self._get_token() @raise_informative_exception([400, 404, 500, 401, 403]) def _post_with_apikey(self, path): headers = self._prepare_headers(self._apikey_header) response = requests.post(self.base_url + path, headers=headers) return response @try_again([403, 401]) @raise_informative_exception([400, 404, 500]) def get(self, path, **kwargs): headers = self._prepare_headers(self._token_header) response = requests.get(self.base_url + path, headers=headers, **kwargs) return response def get_with_url(self, url, **kwargs): headers = self._prepare_headers(self._token_header) response = requests.get(url, headers=headers, **kwargs) return response @try_again([403, 401]) @raise_informative_exception([400, 404, 500]) def post(self, path, data, **kwargs): headers = self._prepare_headers(self._token_header) data = self._prepare_data(data) response = requests.post(self.base_url + path, data=data, headers=headers, **kwargs) return response @try_again([403, 401]) @raise_informative_exception([400, 404, 500]) def delete(self, path, **kwargs): headers = self._prepare_headers(self._token_header) response = requests.delete(self.base_url + path, headers=headers, **kwargs) return response def _prepare_headers(self, *args, **kwargs): headers = self._transform_a_list_of_dictionaries_to_a_dictionary(args) headers.update(self._get_custom_headers()) headers.update(kwargs.items()) return headers def _prepare_data(self, data): return json.dumps(data) def _get_custom_headers(self): headers = {'content-type': 'application/json'} return headers def _transform_a_list_of_dictionaries_to_a_dictionary(self, list_of_dicts): headers = {} for dictionary in list_of_dicts: for key, val in dictionary.items(): headers[key] = val return headers class ApiObject(object): def __init__(self, raw_data, bridge, *args, **kwargs): self.raw = raw_data self.api = kwargs.get('api', None) self.bridge = bridge self._from_raw_to_attributes() def _from_raw_to_attributes(self): for key, value in self.raw.items(): setattr(self, key, value) def transform_to_datasource_objects(raw_datasources, bridge): datasources = [] for ds in raw_datasources: datasources.append(Datasource(ds, bridge)) return datasources def transform_to_variable_objects(raw_variables, bridge): variables = [] for variable in raw_variables: variables.append(Variable(variable, bridge)) return variables class Datasource(ApiObject): def remove_datasource(self): return self.bridge.delete('datasources/' + self.id) == 204 def get_variables(self, numofvars="ALL"): endpoint = 'datasources/' + self.id + '/variables' response = self.bridge.get(endpoint) pag = self.get_new_paginator(self.bridge, response.json(), transform_to_variable_objects, endpoint) return InfoList(pag, numofvars) def get_new_paginator(self, bridge, json_data, transform_function, endpoint): return Paginator(bridge, json_data, transform_function, endpoint) @validate_input(dict, ["name", "unit"]) def create_variable(self, data): response = self.bridge.post('datasources/' + self.id + '/variables', data) return Variable(response.json(), self.bridge, datasource=self) def __repr__(self): return self.name class Variable(ApiObject): def __init__(self, raw_data, bridge, *args, **kwargs): super(Variable, self).__init__(raw_data, bridge, *args, **kwargs) def get_values(self, numofvals="ALL"): endpoint = 'variables/' + self.id + '/values' response = self.bridge.get(endpoint).json() pag = Paginator(self.bridge, response, self.get_transform_function(), endpoint) return InfoList(pag, numofvals) def get_transform_function(self): def transform_function(values, bridge): return values return transform_function @validate_input(dict, ["value"]) def save_value(self, data): if not isinstance(data.get('timestamp', 0), (int, long)): raise UbidotsInvalidInputError('Key "timestamp" must point to an int value.') return self.bridge.post('variables/' + self.id + '/values', data).json() @validate_input(list, ["value", "timestamp"]) def save_values(self, data, force=False): if not all(isinstance(e['timestamp'], (int, long)) for e in data): raise UbidotsInvalidInputError('Key "timestamp" must point to an int value.') path = 'variables/' + self.id + '/values' path += ('', '?force=true')[int(force)] response = self.bridge.post(path, data) data = response.json() if not self._all_values_where_accepted(data): raise UbidotsBulkOperationError("There was a problem with some of your posted values.", response=response) return data def _all_values_where_accepted(self, data): return all(map(lambda x: x['status_code'] == 201, data)) def remove_variable(self): return self.bridge.delete('variables/' + self.id).status_code == 204 def remove_values(self, t_start, t_end): return self.bridge.delete('variables/{0}/values/{1}/{2}'.format(self.id, t_start, t_end)) def remove_all_values(self): from time import time t_start = 0 t_end = int(time()) * 1000 return self.remove_values(t_start=t_start, t_end=t_end) def get_datasource(self, **kwargs): if not self._datasource: api = ApiClient(server_bridge=self.bridge) self._datasource = api.get_datasource(url=self.datasource['url']) return self._datasource def __repr__(self): return self.name class Paginator(object): def __init__(self, bridge, response, transform_function, endpoint): self.bridge = bridge self.response = response self.endpoint = endpoint self.hasNext = self.response['next'] self.transform_function = transform_function self.items_per_page = self._get_number_of_items_per_page() self.items = [] self.actualPage = 1 self.add_new_items(response) def _there_is_more_than_one_page(self): return self.hasNext def _get_number_of_items_per_page(self): return len(self.response['results']) def add_new_items(self, response): self.hasNext = response['next'] new_items = self.transform_function(response['results'], self.bridge) self.items = self.items + new_items self.actualPage = self.actualPage + 1 def get_page(self): try: response = self.bridge.get("{0}?page={1}".format(self.endpoint, self.actualPage)).json() except JSONDecodeError: # When the server returns something that is not JSON decodable # this will crash. raise UbidotsHTTPError("Invalid response from the server") self.add_new_items(response) return self.items def get_all_items(self): self.get_pages() return self.items def get_pages(self): while self.hasNext is not None: self.get_page() def _filter_valid_pages(self, list_of_pages): return list(set(list_of_pages) & set(self.pages)) def _add_items_to_results(self, raw_results): self.result[self.current_page] = raw_results def _flat_items(self, pages): nestedlist = [value for key, value in self.items.items() if key in pages] return [item for sublist in nestedlist for item in sublist] class InfoList(list): def __init__(self, paginator, numofitems='ALL'): self.paginator = paginator items = self.get_items(numofitems) super(InfoList, self).__init__(items) def get_items(self, numofitems): return self.paginator.get_all_items() class ApiClient(object): bridge_class = ServerBridge def __init__(self, apikey=None, token=None, base_url=None, bridge=None): if bridge is None: self.bridge = ServerBridge(apikey, token, base_url) else: self.bridge = bridge def get_datasources(self, numofdsources='ALL', **kwargs): endpoint = 'datasources' response = self.bridge.get(endpoint, **kwargs).json() pag = Paginator(self.bridge, response, transform_to_datasource_objects, endpoint) return InfoList(pag, numofdsources) def get_datasource(self, ds_id=None, url=None, **kwargs): if not id and not url: raise UbidotsInvalidInputError("id or url required") if ds_id: raw_datasource = self.bridge.get('datasources/' + str(ds_id), **kwargs).json() elif url: raw_datasource = self.bridge.get_with_url(url, **kwargs).json() return Datasource(raw_datasource, self.bridge) @validate_input(dict, ["name"]) def create_datasource(self, data): raw_datasource = self.bridge.post('datasources/', data).json() return Datasource(raw_datasource, self.bridge) def get_variables(self, numofvars='ALL', **kwargs): endpoint = 'variables' response = self.bridge.get('variables', **kwargs).json() pag = Paginator(self.bridge, response, transform_to_variable_objects, endpoint) return InfoList(pag, numofvars) def get_variable(self, var_id, **kwargs): raw_variable = self.bridge.get('variables/' + str(var_id), **kwargs).json() return Variable(raw_variable, self.bridge) @validate_input(list, ["variable", "value"]) def save_collection(self, data, force=False): path = "collections/values" path += ('', '?force=true')[int(force)] response = self.bridge.post(path, data) data = response.json() if not self._all_collection_items_where_accepted(data): raise UbidotsBulkOperationError( "There was a problem with some of your posted items values.", response=response ) return data def _all_collection_items_where_accepted(self, data): return all(map(lambda x: x['status_code'] == 201, data))
from __future__ import absolute_import, division, print_function from sqlalchemy import MetaData, Table, Column, types, create_engine, select from sqlalchemy.sql import and_ from sqlalchemy.dialects import mysql from ..model import Goal, Test class LargePickleType(types.PickleType): def load_dialect_impl(self, dialect): if dialect.name == 'mysql': return dialect.type_descriptor(mysql.LONGBLOB) # pragma: nocover else: return dialect.type_descriptor(types.LargeBinary) class SQLPersistentStore(object): def __init__(self, sqlalchemy_url): self.engine = create_engine(sqlalchemy_url, pool_recycle=3600, strategy='threadlocal') self.metadata = MetaData(bind=self.engine) self.pointer_table = Table( 'pointer', self.metadata, Column('pointer', types.String(255), primary_key=True), mysql_engine='InnoDB') self.history_table = Table( 'visitor_histories', self.metadata, Column('vid', types.String(40), primary_key=True), Column('history', LargePickleType, nullable=False), mysql_engine='InnoDB') self.tests_table = Table( 'tests', self.metadata, Column('id', types.Integer, primary_key=True), Column('name', types.String(255), nullable=False, index=True), Column('first_timestamp', types.Integer, nullable=False), Column('last_timestamp', types.Integer, nullable=False), Column('variants', LargePickleType, nullable=False), mysql_engine='InnoDB') self.goal_table = Table( 'goals', self.metadata, Column('id', types.Integer, primary_key=True), Column('name', types.String(255), nullable=False, index=True), Column('value_type', types.CHAR(1), nullable=False, default=''), Column('value_format', types.CHAR(1), nullable=False, default=''), mysql_engine='InnoDB') self.conversion_counts_table = Table( 'conversion_counts', self.metadata, Column('name', types.String(255), primary_key=True), Column('rollup_key', types.String(255), primary_key=True), Column('bucket_id', types.String(255), primary_key=True), Column('site_id', types.Integer, primary_key=True), Column('count', types.Integer, nullable=False, default=0), Column('value', types.Float, nullable=False, default=0), mysql_engine='InnoDB') self.impression_counts_table = Table( 'impression_counts', self.metadata, Column('name', types.String(255), primary_key=True), Column('selected', types.String(255), primary_key=True), Column('rollup_key', types.String(255), primary_key=True), Column('bucket_id', types.String(255), primary_key=True), Column('site_id', types.Integer, primary_key=True), Column('count', types.Integer, nullable=False, default=0), mysql_engine='InnoDB') self.variant_conversion_counts_table = Table( 'variant_conversion_counts', self.metadata, Column('goal_name', types.String(100), primary_key=True), Column('test_name', types.String(100), primary_key=True), Column('selected', types.String(100), primary_key=True), Column('rollup_key', types.String(100), primary_key=True), Column('bucket_id', types.String(100), primary_key=True), Column('site_id', types.Integer, primary_key=True), Column('count', types.Integer, nullable=False, default=0), Column('value', types.Float, nullable=False, default=0), mysql_engine='InnoDB') self.metadata.create_all() def update_pointer(self, ptr): if ptr is None: return q = self.pointer_table.update().values(pointer=ptr) r = q.execute() if r.rowcount == 0: q = self.pointer_table.insert().values(pointer=ptr) q.execute() def get_pointer(self): return select([self.pointer_table.c.pointer]).scalar() def begin(self): return self.engine.begin() def commit(self): return self.engine.commit() def criteria_from_dict(self, table, key_dict): criteria = [] for col, val in key_dict.iteritems(): criteria.append(getattr(table.c, col) == val) if len(criteria) > 1: return and_(*criteria) else: return criteria[0] def put_kv(self, table, key_dict, value_dict, increment=False): whereclause = self.criteria_from_dict(table, key_dict) if increment: update_dict = {} for col, val in value_dict.iteritems(): update_dict[col] = getattr(table.c, col) + val else: update_dict = value_dict q = table.update().values(**update_dict).where(whereclause) r = q.execute() if r.rowcount == 0: value_dict.update(key_dict) q = table.insert().values(**value_dict) q.execute() def put_visitor_history(self, histories): for vid, history in histories.iteritems(): self.put_kv(self.history_table, {'vid': vid}, {'history': history}) def put_test(self, tests): for name, test in tests.iteritems(): self.put_kv(self.tests_table, {'name': name}, {'first_timestamp': test.first_timestamp, 'last_timestamp': test.last_timestamp, 'variants': test.variants}) def put_goal(self, goals): for name, goal in goals.iteritems(): self.put_kv(self.goal_table, {'name': name}, {'value_type': goal.value_type, 'value_format': goal.value_format}) def increment_conversion_counters(self, inc_conversions, inc_values): """ Given a map of (goal name, rollup key, bucket start) tuples to tuples of (integer counts, Decimal values), adjust the state of the counts in the SQL database. """ for key in set(inc_conversions) | set(inc_values): delta = inc_conversions.get(key, 0) value = inc_values.get(key, 0) name, rollup_key, bucket_id, site_id = key self.put_kv(self.conversion_counts_table, {'name': name, 'rollup_key': rollup_key, 'bucket_id': bucket_id, 'site_id': site_id}, {'count': delta, 'value': value}, increment=True) def increment_impression_counters(self, inc_impressions): for key, delta in inc_impressions.iteritems(): name, selected, rollup_key, bucket_id, site_id = key self.put_kv(self.impression_counts_table, {'name': name, 'selected': selected, 'rollup_key': rollup_key, 'bucket_id': bucket_id, 'site_id': site_id}, {'count': delta}, increment=True) def increment_variant_conversion_counters(self, inc_variant_conversions, inc_variant_values): for key in set(inc_variant_conversions) | set(inc_variant_values): delta = inc_variant_conversions.get(key, 0) value = inc_variant_values.get(key, 0) goal_name, test_name, selected, \ rollup_key, bucket_id, site_id = key self.put_kv(self.variant_conversion_counts_table, {'goal_name': goal_name, 'test_name': test_name, 'selected': selected, 'rollup_key': rollup_key, 'bucket_id': bucket_id, 'site_id': site_id}, {'count': delta, 'value': value}, increment=True) def get_kv(self, table, get_cols, key_dict, default=None): to_select = [getattr(table.c, col) for col in get_cols] whereclause = self.criteria_from_dict(table, key_dict) r = select(to_select).where(whereclause).execute().first() if r: return r else: if default: return default else: raise KeyError def get_visitor_history(self, vid): r = self.get_kv(self.history_table, ['history'], {'vid': vid}) return r[0] def get_test(self, name): r = self.get_kv(self.tests_table, ['first_timestamp', 'last_timestamp', 'variants'], {'name': name}) first, last, variants = r return Test(first_timestamp=first, last_timestamp=last, variants=variants) def get_goal(self, name): r = self.get_kv(self.goal_table, ['value_type', 'value_format'], {'name': name}) value_type, value_format = r return Goal(value_type=value_type, value_format=value_format) def count_conversions(self, name, rollup_key, bucket_id, site_id): return self.get_kv(self.conversion_counts_table, ['count', 'value'], {'name': name, 'rollup_key': rollup_key, 'bucket_id': bucket_id, 'site_id': site_id}, default=(0, 0)) def count_impressions(self, name, selected, rollup_key, bucket_id, site_id): r = self.get_kv(self.impression_counts_table, ['count'], {'name': name, 'selected': selected, 'rollup_key': rollup_key, 'bucket_id': bucket_id, 'site_id': site_id}, default=(0,)) return r[0] def count_variant_conversions(self, goal_name, test_name, selected, rollup_key, bucket_id, site_id): return self.get_kv(self.variant_conversion_counts_table, ['count', 'value'], {'goal_name': goal_name, 'test_name': test_name, 'selected': selected, 'rollup_key': rollup_key, 'bucket_id': bucket_id, 'site_id': site_id}, default=(0, 0)) def all_tests(self): t = self.tests_table r = select([t.c.name, t.c.first_timestamp, t.c.last_timestamp, t.c.variants]).execute() ret = {} for name, first, last, variants in r: ret[name] = Test(first_timestamp=first, last_timestamp=last, variants=variants) return ret
import os import re import shutil import logging from binascii import b2a_hex from datetime import datetime from cStringIO import StringIO from ConfigParser import ConfigParser import tg os.environ['HGRCPATH'] = '' # disable loading .hgrc from mercurial import ui, hg from pymongo.errors import DuplicateKeyError from ming.base import Object from ming.orm import Mapper, session from ming.utils import LazyProperty from allura import model as M from allura.lib import helpers as h from allura.model.repository import topological_sort, GitLikeTree log = logging.getLogger(__name__) class Repository(M.Repository): tool_name='Hg' repo_id='hg' type_s='Hg Repository' class __mongometa__: name='hg-repository' @LazyProperty def _impl(self): return HgImplementation(self) def merge_command(self, merge_request): '''Return the command to merge a given commit into a given target branch''' return 'hg checkout %s\nhg pull -r %s %s' % ( merge_request.target_branch, merge_request.downstream.commit_id, merge_request.downstream_repo_url, ) def count(self, branch='default'): return super(Repository, self).count(branch) def log(self, branch='default', offset=0, limit=10): return super(Repository, self).log(branch, offset, limit) class HgUI(ui.ui): '''Hg UI subclass that suppresses reporting of untrusted hgrc files.''' def __init__(self, *args, **kwargs): super(HgUI, self).__init__(*args, **kwargs) self._reportuntrusted = False class HgImplementation(M.RepositoryImplementation): re_hg_user = re.compile('(.*) <(.*)>') skip_internal_files = set([ '00changelog.i', 'requires', 'branch', 'branch.cache', 'dirstate', 'inotify.sock', 'patches', 'wlock', 'undo.dirstate', 'undo.branch', 'journal.dirstate', 'journal.branch', 'store', 'lock', 'journal', 'undo', 'fncache', 'data', 'branchheads.cache', # older versions have these cache files 'tags.cache', # directly in the .hg directory 'cache', # newer versions have cache directory (see http://selenic.com/repo/hg/rev/5ccdca7df211) ]) def __init__(self, repo): self._repo = repo @LazyProperty def _hg(self): return hg.repository(HgUI(), self._repo.full_fs_path) def init(self): fullname = self._setup_paths() log.info('hg init %s', fullname) if os.path.exists(fullname): shutil.rmtree(fullname) repo = hg.repository( ui.ui(), self._repo.full_fs_path, create=True) self.__dict__['_hg'] = repo self._setup_special_files() self._repo.status = 'ready' def clone_from(self, source_url, copy_hooks=False): '''Initialize a repo as a clone of another''' self._repo.status = 'cloning' session(self._repo).flush(self._repo) log.info('Initialize %r as a clone of %s', self._repo, source_url) try: fullname = self._setup_paths(create_repo_dir=False) if os.path.exists(fullname): shutil.rmtree(fullname) # !$ hg doesn't like unicode as urls src, repo = hg.clone( ui.ui(), source_url.encode('utf-8'), self._repo.full_fs_path.encode('utf-8'), update=False) self.__dict__['_hg'] = repo self._setup_special_files(source_url, copy_hooks) except: self._repo.status = 'raise' session(self._repo).flush(self._repo) raise log.info('... %r cloned', self._repo) self._repo.refresh(notify=False) def commit(self, rev): '''Return a Commit object. rev can be _id or a branch/tag name''' # See if the rev is a named ref that we have cached, and use the sha1 # from the cache. This ensures that we don't return a sha1 that we # don't have indexed into mongo yet. for ref in self._repo.heads + self._repo.branches + self._repo.repo_tags: if ref.name == rev: rev = ref.object_id break result = M.repo.Commit.query.get(_id=rev) if result is None: try: impl = self._hg[str(rev)] result = M.repo.Commit.query.get(_id=impl.hex()) except Exception, e: log.exception(e) if result is None: return None result.set_context(self._repo) return result def real_parents(self, ci): """Return all parents of a commit, excluding the 'null revision' (a fake revision added as the parent of the root commit by the Hg api). """ return [p for p in ci.parents() if p] def all_commit_ids(self): """Return a list of commit ids, starting with the head(s) and ending with the root (first commit) of the tree. """ graph = {} to_visit = [ self._hg[hd] for hd in self._hg.heads() ] while to_visit: obj = to_visit.pop() if obj.hex() in graph: continue parents = self.real_parents(obj) graph[obj.hex()] = set( p.hex() for p in parents if p.hex() != obj.hex()) to_visit += parents return reversed([ ci for ci in topological_sort(graph) ]) def new_commits(self, all_commits=False): graph = {} to_visit = [ self._hg[hd] for hd in self._hg.heads() ] while to_visit: obj = to_visit.pop() if obj.hex() in graph: continue if not all_commits: # Look up the object if M.repo.Commit.query.find(dict(_id=obj.hex())).count(): graph[obj.hex()] = set() # mark as parentless continue parents = self.real_parents(obj) graph[obj.hex()] = set( p.hex() for p in parents if p.hex() != obj.hex()) to_visit += parents return list(topological_sort(graph)) def refresh_heads(self): self._repo.heads = [ Object(name=None, object_id=self._hg[head].hex()) for head in self._hg.heads()] self._repo.branches = [] for name, tag in self._hg.branchtags().iteritems(): if ("close" not in self._hg.changelog.read(tag)[5]): self._repo.branches.append( Object(name=name, object_id=self._hg[tag].hex())) self._repo.repo_tags = [ Object(name=name, object_id=self._hg[tag].hex()) for name, tag in self._hg.tags().iteritems()] session(self._repo).flush() def refresh_commit_info(self, oid, seen, lazy=True): from allura.model.repo import CommitDoc ci_doc = CommitDoc.m.get(_id=oid) if ci_doc and lazy: return False obj = self._hg[oid] # Save commit metadata mo = self.re_hg_user.match(obj.user()) if mo: user_name, user_email = mo.groups() else: user_name = user_email = obj.user() user = Object( name=h.really_unicode(user_name), email=h.really_unicode(user_email), date=datetime.utcfromtimestamp(obj.date()[0])) fake_tree = self._tree_from_changectx(obj) args = dict( tree_id=fake_tree.hex(), committed=user, authored=user, message=h.really_unicode(obj.description() or ''), child_ids=[], parent_ids=[ p.hex() for p in self.real_parents(obj) if p.hex() != obj.hex() ]) if ci_doc: ci_doc.update(args) ci_doc.m.save() else: ci_doc = CommitDoc(dict(args, _id=oid)) try: ci_doc.m.insert(safe=True) except DuplicateKeyError: if lazy: return False self.refresh_tree_info(fake_tree, seen, lazy) return True def refresh_tree_info(self, tree, seen, lazy=True): from allura.model.repo import TreeDoc if lazy and tree.hex() in seen: return seen.add(tree.hex()) doc = TreeDoc(dict( _id=tree.hex(), tree_ids=[], blob_ids=[], other_ids=[])) for name, t in tree.trees.iteritems(): self.refresh_tree_info(t, seen, lazy) doc.tree_ids.append( dict(name=h.really_unicode(name), id=t.hex())) for name, oid in tree.blobs.iteritems(): doc.blob_ids.append( dict(name=h.really_unicode(name), id=oid)) doc.m.save(safe=False) return doc def log(self, object_id, skip, count): obj = self._hg[object_id] candidates = [ obj ] result = [] seen = set() while count and candidates: candidates.sort(key=lambda c:sum(c.date())) obj = candidates.pop(-1) if obj.hex() in seen: continue seen.add(obj.hex()) if skip == 0: result.append(obj.hex()) count -= 1 else: skip -= 1 candidates += self.real_parents(obj) return result, [ p.hex() for p in candidates ] def open_blob(self, blob): fctx = self._hg[blob.commit._id][h.really_unicode(blob.path()).encode('utf-8')[1:]] return StringIO(fctx.data()) def blob_size(self, blob): fctx = self._hg[blob.commit._id][h.really_unicode(blob.path()).encode('utf-8')[1:]] return fctx.size() def _copy_hooks(self, source_path): '''Copy existing hooks if source path is given and exists.''' if source_path is None or not os.path.exists(source_path): return hgrc = os.path.join(self._repo.fs_path, self._repo.name, '.hg', 'hgrc') try: os.remove(hgrc) except OSError as e: if os.path.exists(hgrc): raise for name in os.listdir(os.path.join(source_path, '.hg')): source = os.path.join(source_path, '.hg', name) target = os.path.join( self._repo.full_fs_path, '.hg', os.path.basename(source)) if name in self.skip_internal_files: continue if os.path.isdir(source): shutil.copytree(source, target) else: shutil.copy2(source, target) def _setup_hooks(self, source_path=None, copy_hooks=False): 'Set up the hg changegroup hook' if copy_hooks: self._copy_hooks(source_path) hgrc = os.path.join(self._repo.fs_path, self._repo.name, '.hg', 'hgrc') cp = ConfigParser() cp.read(hgrc) if not cp.has_section('hooks'): cp.add_section('hooks') url = (tg.config.get('base_url', 'http://localhost:8080') + '/auth/refresh_repo' + self._repo.url()) cp.set('hooks','; = [the next line is required for site integration, do not remove/modify]', '') cp.set('hooks','changegroup.sourceforge','curl -s %s' % url) with open(hgrc, 'w') as fp: cp.write(fp) os.chmod(hgrc, 0755) def _tree_from_changectx(self, changectx): '''Build a fake git-like tree from a changectx and its manifest''' root = GitLikeTree() for filepath in changectx.manifest(): fctx = changectx[filepath] oid = b2a_hex(fctx.filenode()) root.set_blob(filepath, oid) return root def symbolics_for_commit(self, commit): branch_heads, tags = super(self.__class__, self).symbolics_for_commit(commit) ctx = self._hg[commit._id] return [ctx.branch()], tags def compute_tree_new(self, commit, tree_path='/'): ctx = self._hg[commit._id] fake_tree = self._tree_from_changectx(ctx) fake_tree = fake_tree.get_tree(tree_path) tree = self.refresh_tree_info(fake_tree, set()) return tree._id Mapper.compile_all()
# -*- coding: utf-8 -*- # Copyright 2020 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Make sure that your AWS/GCP credentials are configured correctly """CLI tools for libcloudforensics.""" import argparse import sys from typing import Tuple, List, Optional, Any, Dict from tools import aws_cli from tools import az_cli from tools import gcp_cli PROVIDER_TO_FUNC = { 'aws': { 'copydisk': aws_cli.CreateVolumeCopy, 'createbucket': aws_cli.CreateBucket, 'deleteinstance': aws_cli.DeleteInstance, 'gcstos3': aws_cli.GCSToS3, 'imageebssnapshottos3': aws_cli.ImageEBSSnapshotToS3, 'instanceprofilemitigator': aws_cli.InstanceProfileMitigator, 'listdisks': aws_cli.ListVolumes, 'listimages': aws_cli.ListImages, 'listinstances': aws_cli.ListInstances, 'quarantinevm': aws_cli.InstanceNetworkQuarantine, 'querylogs': aws_cli.QueryLogs, 'startvm': aws_cli.StartAnalysisVm, 'uploadtobucket': aws_cli.UploadToBucket }, 'az': { 'copydisk': az_cli.CreateDiskCopy, 'listinstances': az_cli.ListInstances, 'listdisks': az_cli.ListDisks, 'startvm': az_cli.StartAnalysisVm, 'listmetrics': az_cli.ListMetrics, 'querymetrics': az_cli.QueryMetrics }, 'gcp': { 'bucketacls': gcp_cli.GetBucketACLs, 'bucketsize': gcp_cli.GetBucketSize, 'copydisk': gcp_cli.CreateDiskCopy, 'creatediskgcs': gcp_cli.CreateDiskFromGCSImage, 'deleteinstance': gcp_cli.DeleteInstance, 'deleteobject': gcp_cli.DeleteObject, 'createbucket': gcp_cli.CreateBucket, 'gkequarantine': gcp_cli.GKEWorkloadQuarantine, 'gkeenumerate': gcp_cli.GKEEnumerate, 'listbigqueryjobs': gcp_cli.ListBigQueryJobs, 'listbuckets': gcp_cli.ListBuckets, 'listcloudsqlinstances': gcp_cli.ListCloudSqlInstances, 'listdisks': gcp_cli.ListDisks, 'listinstances': gcp_cli.ListInstances, 'listlogs': gcp_cli.ListLogs, 'listobjects': gcp_cli.ListBucketObjects, 'listservices': gcp_cli.ListServices, 'objectmetadata': gcp_cli.GetGCSObjectMetadata, 'quarantinevm': gcp_cli.InstanceNetworkQuarantine, 'querylogs': gcp_cli.QueryLogs, 'startvm': gcp_cli.StartAnalysisVm, 'S3ToGCS': gcp_cli.S3ToGCS, 'vmremoveserviceaccount': gcp_cli.VMRemoveServiceAccount } } def AddParser( provider: str, # pylint: disable=protected-access provider_parser: argparse._SubParsersAction, # type: ignore # pylint: enable=protected-access func: str, func_helper: str, args: Optional[List[Tuple[str, str, Optional[Any]]]] = None) -> None: """Create a new parser object for a provider's functionality. Args: provider (str): The cloud provider offering the function. This should be one of ['aws', 'gcp']. provider_parser (_SubParsersAction): A provider's subparser object from argparse.ArgumentParser. func (str): The name of the function to look for in the given provider and to add parsing options for. func_helper (str): A helper text describing what the function does. args (List[Tuple]): Optional. A list of arguments to add to the parser. Each argument is a tuple containing the action (str) to add to the parser, a helper text (str), and a default value (Any or None). Raises: NotImplementedError: If the requested provider or function is not implemented. """ if provider not in PROVIDER_TO_FUNC: raise NotImplementedError('Requested provider is not implemented') if func not in PROVIDER_TO_FUNC[provider]: raise NotImplementedError('Requested functionality {0:s} is not ' 'implemented for provider {1:s}'.format( func, provider)) func_parser = provider_parser.add_parser(func, help=func_helper) if args: for argument, helper_text, default_value in args: kwargs = {'help': helper_text} # type: Dict[str, Any] if isinstance(default_value, bool): kwargs['action'] = 'store_true' else: kwargs['default'] = default_value func_parser.add_argument(argument, **kwargs) # type: ignore func_parser.set_defaults(func=PROVIDER_TO_FUNC[provider][func]) def Main() -> None: """Main function for libcloudforensics CLI.""" parser = argparse.ArgumentParser( description='CLI tool for AWS, Azure and GCP.') subparsers = parser.add_subparsers() aws_parser = subparsers.add_parser('aws', help='Tools for AWS') az_parser = subparsers.add_parser('az', help='Tools for Azure') gcp_parser = subparsers.add_parser('gcp', help='Tools for GCP') # AWS parser options aws_parser.add_argument('zone', help='The AWS zone in which resources are ' 'located, e.g. us-east-2b') aws_subparsers = aws_parser.add_subparsers() AddParser('aws', aws_subparsers, 'listinstances', 'List EC2 instances in AWS account.') AddParser('aws', aws_subparsers, 'listdisks', 'List EBS volumes in AWS account.') AddParser('aws', aws_subparsers, 'copydisk', 'Create an AWS volume copy.', args=[ ('--dst_zone', 'The AWS zone in which to copy the volume. By ' 'default this is the same as "zone".', None), ('--instance_id', 'The AWS unique instance ID', None), ('--volume_id', 'The AWS unique volume ID of the volume to ' 'copy. If none specified, then --instance_id ' 'must be specified and the boot volume of the ' 'AWS instance will be copied.', None), ('--volume_type', 'The volume type for the volume copy. ' 'Can be standard, io1, gp2, sc1, st1. The ' 'default behavior is to use the same volume ' 'type as the source volume.', None), ('--src_profile', 'The name of the profile for the source ' 'account, as defined in the AWS credentials ' 'file.', None), ('--dst_profile', 'The name of the profile for the destination ' 'account, as defined in the AWS credentials ' 'file.', None), ('--tags', 'A string dictionary of tags to add to the volume ' 'copy. ', None) ]) AddParser('aws', aws_subparsers, 'querylogs', 'Query AWS CloudTrail logs', args=[ ('--filter', 'Query filter: \'value,key\'', ''), ('--start', 'Start date for query (2020-05-01 11:13:00)', None), ('--end', 'End date for query (2020-05-01 11:13:00)', None) ]) AddParser('aws', aws_subparsers, 'startvm', 'Start a forensic analysis VM.', args=[ ('instance_name', 'Name of EC2 instance to re-use or create.', ''), ('--boot_volume_size', 'Size of instance boot volume in GB.', '50'), ('--boot_volume_type', 'The boot volume type for the VM. ' 'Can be standard, io1, gp2, sc1, st1. ' 'Default is gp2', 'gp2'), ('--cpu_cores', 'Instance CPU core count.', '4'), ('--ami', 'AMI ID to use as base image. Will search ' 'Ubuntu 18.04 LTS server x86_64 for chosen region ' 'by default.', ''), ('--ssh_key_name', 'SSH key pair name. This is the name of an ' 'existing SSH key pair in the AWS account ' 'where the VM will live. Alternatively, ' 'use --generate_ssh_key_pair to create a ' 'new key pair in the AWS account.', None), ('--generate_ssh_key_pair', 'Generate a new SSH key pair in ' 'the AWS account where the ' 'analysis VM will be created. ' 'Returns the private key at the ' 'end of the process. ' 'Takes precedence over ' '--ssh_key_name', False), ('--attach_volumes', 'Comma separated list of volume IDs ' 'to attach. Maximum of 11.', None), ('--dst_profile', 'The name of the profile for the destination ' 'account, as defined in the AWS credentials ' 'file.', None), ('--subnet_id','Subnet to launch the instance in', None), ('--security_group_id', 'Security group to attach to the ' 'instance', None), ('--launch_script','Userdata script for the instance to run at' ' launch', None) ]) AddParser('aws', aws_subparsers, 'listimages', 'List AMI images.', args=[ ('--filter', 'Filter to apply to Name of AMI image.', None), ]) AddParser('aws', aws_subparsers, 'createbucket', 'Create an S3 bucket.', args=[ ('name', 'The name of the bucket.', None), ]) AddParser('aws', aws_subparsers, 'uploadtobucket', 'Upload a file to an S3 bucket.', args=[ ('bucket', 'The name of the bucket.', None), ('filepath', 'Local file name.', None), ]) AddParser('aws', aws_subparsers, 'gcstos3', 'Transfer a file from GCS to an S3 bucket.', args=[ ('project', 'GCP Project name.', None), ('gcs_path', 'Source object path.', None), ('s3_path', 'Destination bucket.', None), ]) AddParser('aws', aws_subparsers, 'imageebssnapshottos3', 'Copy an image of an EBS volume to S3. This is not natively ' 'supported in AWS, so requires launching of an instance to ' 'perform a `dd`. In the S3 destination dir will be a copy of ' 'the snapshot and a hash.', args=[ ('snapshot_id','EBS snapshot ID to make the copy of.', None), ('s3_destination','The S3 destination in the format ' 'bucket[/optional/child/folders]', None), ('--instance_profile_name', 'The name of the instance profile to use/create.', None), ('--subnet_id','Subnet to launch the instance in.', None), ('--security_group_id', 'Security group to attach to the ' 'instance.', None), ('--cleanup_iam', 'Remove created IAM components afterwards', False) ]) AddParser('aws', aws_subparsers, 'deleteinstance', 'Delete an instance.', args=[ ('--instance_id', 'ID of EC2 instance to delete.', ''), ('--instance_name', 'Name of EC2 instance to delete.', ''), ('--region', 'Region in which the instance is.', ''), ('--force_delete', 'Force instance deletion when deletion protection is ' 'activated.', False), ]) AddParser('aws', aws_subparsers, 'quarantinevm', 'Put a VM in ' 'network quarantine.', args=[ ('instance_id', 'ID (i-xxxxxx) of the instance to quarantine.', None), ('--exempted_src_subnets', 'Comma separated list of source ' 'subnets to exempt from ingress firewall rules.', None) ]) AddParser('aws', aws_subparsers, 'instanceprofilemitigator', 'Remove an instance profile from an instance, and optionally ' 'revoke all previously issued temporary credentials.', args=[ ('instance_id', 'ID (i-xxxxxx) of the instance to quarantine.', None), ('--revoke', 'Revoke existing temporary creds for the instance' ' profile.', False) ]) # Azure parser options az_parser.add_argument('default_resource_group_name', help='The default resource group name in which to ' 'create resources') az_subparsers = az_parser.add_subparsers() AddParser('az', az_subparsers, 'listinstances', 'List instances in Azure subscription.', args=[ ('--resource_group_name', 'The resource group name from ' 'which to list instances.', None) ]) AddParser('az', az_subparsers, 'listdisks', 'List disks in Azure subscription.', args=[ ('--resource_group_name', 'The resource group name from ' 'which to list disks.', None) ]) AddParser('az', az_subparsers, 'copydisk', 'Create an Azure disk copy.', args=[ ('--instance_name', 'The instance name.', None), ('--disk_name', 'The name of the disk to copy. If none ' 'specified, then --instance_name must be ' 'specified and the boot disk of the Azure ' 'instance will be copied.', None), ('--disk_type', 'The SKU name for the disk to create. ' 'Can be Standard_LRS, Premium_LRS, ' 'StandardSSD_LRS, or UltraSSD_LRS. The default ' 'behavior is to use the same disk type as ' 'the source disk.', None), ('--region', 'The region in which to create the disk copy. If ' 'not provided, the disk copy will be created in ' 'the "eastus" region.', 'eastus'), ('--src_profile', 'The Azure profile information to use as ' 'source account for the disk copy. Default ' 'will look into environment variables to ' 'authenticate the requests.', None), ('--dst_profile', 'The Azure profile information to use as ' 'destination account for the disk copy. If ' 'not provided, the default behavior is to ' 'use the same destination profile as the ' 'source profile.', None) ]) AddParser('az', az_subparsers, 'startvm', 'Start a forensic analysis VM.', args=[ ('instance_name', 'Name of the Azure instance to create.', None), ('--disk_size', 'Size of disk in GB.', 50), ('--cpu_cores', 'Instance CPU core count.', 4), ('--memory_in_mb', 'Instance amount of RAM memory.', 8192), ('--region', 'The region in which to create the VM. If not ' 'provided, the VM will be created in the ' '"eastus" region.', 'eastus'), ('--attach_disks', 'Comma separated list of disk names ' 'to attach.', None), ('--ssh_public_key', 'A SSH public key to register with the ' 'VM. e.g. ssh-rsa AAdddbbh... If not ' 'provided, a new SSH key pair will be ' 'generated.', None), ('--dst_profile', 'The Azure profile information to use as ' 'destination account for the vm creation.', None) ]) AddParser('az', az_subparsers, 'listmetrics', 'List Azure Monitoring metrics for a resource.', args=[ ('resource_id', 'The resource ID for the resource.', None) ]) AddParser('az', az_subparsers, 'querymetrics', 'Query Azure Monitoring metrics for a resource.', args=[ ('resource_id', 'The resource ID for the resource.', None), ('metrics', 'A comma separated list of metrics to query for ' 'the resource.', None), ('--from_date', 'A start date from which to lookup the ' 'metrics. Format: %Y-%m-%dT%H:%M:%SZ', None), ('--to_date', 'An end date until which to lookup the metrics.' 'Format: %Y-%m-%dT%H:%M:%SZ', None), ('--interval', 'An interval for the metrics, e.g. PT1H will ' 'output metrics values with one hour ' 'granularity.', None), ('--aggregation', 'The type of aggregation for the metrics ' 'values. Default is "Total". Possible values:' ' "Total", "Average"', None), ('--qfilter', 'A filter for the query. E.g. (name.value eq ' '"RunsSucceeded") and (aggregationType eq ' '"Total") and (startTime eq 2016-02-20) and ' '(endTime eq 2016-02-21) and (timeGrain eq ' 'duration "PT1M")', None) ]) # GCP parser options gcp_parser.add_argument( '--project', help='GCP project ID. If not provided, the library will look' ' for a project ID configured with your gcloud SDK. If ' 'none found, errors. For GCP logs operations, a list of' ' project IDs can be passed, as a comma-separated ' 'string: project_id1,project_id2,...') gcp_subparsers = gcp_parser.add_subparsers() AddParser('gcp', gcp_subparsers, 'listinstances', 'List GCE instances in GCP project.') AddParser('gcp', gcp_subparsers, 'listdisks', 'List GCE disks in GCP project.') AddParser('gcp', gcp_subparsers, 'copydisk', 'Create a GCP disk copy.', args=[ ('dst_project', 'Destination GCP project.', ''), ('zone', 'Zone to create the disk in.', ''), ('--instance_name', 'Name of the instance to copy disk from.', ''), ('--disk_name', 'Name of the disk to copy. If none specified, ' 'then --instance_name must be specified and ' 'the boot disk of the instance will be copied.', None), ('--disk_type', 'Type of disk. Can be pd-standard or pd-ssd. ' 'The default behavior is to use the same disk ' 'type as the source disk.', None) ]) AddParser('gcp', gcp_subparsers, 'startvm', 'Start a forensic analysis VM.', args=[ ('instance_name', 'Name of the GCE instance to create.', ''), ('zone', 'Zone to create the instance in.', ''), ('--disk_size', 'Size of disk in GB.', '50'), ('--disk_type', 'Type of disk. Can be pd-standard or pd-ssd. ' 'The default value is pd-ssd.', 'pd-ssd'), ('--cpu_cores', 'Instance CPU core count.', '4'), ('--attach_disks', 'Comma separated list of disk names ' 'to attach.', None) ]) AddParser('gcp', gcp_subparsers, 'deleteinstance', 'Delete a GCE instance.', args=[ ('instance_name', 'Name of the GCE instance to delete.', ''), ('--delete_all_disks', 'Force delete disks marked as "Keep when deleting".', False), ('--force_delete', 'Force instance deletion when deletion protection is ' 'activated.', False) ]) AddParser('gcp', gcp_subparsers, 'querylogs', 'Query GCP logs.', args=[ ('--filter', 'Query filter. If querying multiple logs / ' 'multiple project IDs, enter each filter in a ' 'single string that is comma-separated: ' '--filter="filter1,filter2,..."', None), ('--start', 'Start date for query (2020-05-01T11:13:00Z)', None), ('--end', 'End date for query (2020-05-01T11:13:00Z)', None) ]) AddParser('gcp', gcp_subparsers, 'listlogs', 'List GCP logs for a project.') AddParser('gcp', gcp_subparsers, 'listservices', 'List active services for a project.') AddParser('gcp', gcp_subparsers, 'creatediskgcs', 'Creates GCE persistent ' 'disk from image in GCS.', args=[('gcs_path', 'Path to the source image in GCS.', ''), ('zone', 'Zone to create the disk in.', ''), ('--disk_name', 'Name of the disk to create. If None, name ' 'will be printed at the end.', None)]) AddParser('gcp', gcp_subparsers, 'createbucket', 'Create a GCS bucket in a project.', args=[ ('name', 'Name of bucket.', None), ]) AddParser('gcp', gcp_subparsers, 'listbuckets', 'List GCS buckets for a project.') AddParser('gcp', gcp_subparsers, 'bucketacls', 'List ACLs of a GCS bucket.', args=[ ('path', 'Path to bucket.', None), ]) AddParser('gcp', gcp_subparsers, 'bucketsize', 'Get the size of a GCS bucket.', args=[ ('path', 'Path to bucket.', None) ]) AddParser('gcp', gcp_subparsers, 'objectmetadata', 'List the details of an ' 'object in a GCS bucket.', args=[ ('path', 'Path to object.', None) ]) AddParser('gcp', gcp_subparsers, 'listobjects', 'List the objects in a ' 'GCS bucket.', args=[ ('path', 'Path to bucket.', None), ]) AddParser('gcp', gcp_subparsers, 'listcloudsqlinstances', 'List CloudSQL instances for a project.') AddParser('gcp', gcp_subparsers, 'deleteobject', 'Deletes a GCS object', args=[ ('path', 'Path to GCS object.', None), ]) AddParser('gcp', gcp_subparsers, 'quarantinevm', 'Put a VM in ' 'network quarantine.', args=[ ('instance_name', 'Name of the GCE instance to quranitne.', ''), ('--exempted_src_ips', 'Comma separated list of source IPs ' 'to exempt from ingress firewall rules.', None), ('--enable_logging', 'Enable firewall logging.', False), ]) AddParser('gcp', gcp_subparsers, 'S3ToGCS', 'Transfer an S3 object to a GCS bucket.', args=[ ('s3_path', 'Path to S3 object.', None), ('zone', 'Amazon availability zone.', None), ('gcs_path', 'Target GCS bucket.', None), ]) AddParser('gcp', gcp_subparsers, 'vmremoveserviceaccount', 'Removes a service account attachment from a VM.', args=[ ('instance_name', 'Name of the instance to affect', ''), ('--leave_stopped', 'Leave the machine TERMINATED after ' 'removing the service account (default: False)', False) ]) AddParser('gcp', gcp_subparsers, 'gkequarantine', 'Start the quarantining process for a GKE workload.', args=[ ('cluster', 'The name of the workload\'s GKE cluster.', ''), ('zone', 'The zone of the workload\'s GKE cluster.', ''), ('workload', 'The name of the GKE workload to isolate.', ''), ('namespace', 'The namespace of the workload.', ''), ('--exempted_src_ips', 'Comma separated list of source IPs ' 'to exempt from ingress firewall rules when isolating ' 'nodes.', None) ]) AddParser('gcp', gcp_subparsers, 'gkeenumerate', 'Enumerate a GKE cluster or one of its objects.', args=[ ('cluster', 'The name of the GKE cluster.', ''), ('zone', 'The zone of the GKE cluster.', ''), ('--workload', 'The name of the workload to enumerate.', ''), ('--service', 'The name of the service to enumerate.', None), ('--node', 'The name of the node to enumerate.', None), ('--namespace', 'The namespace of the object to enumerate.', None), ('--as_json', 'Output in JSON format.', False) ]) AddParser('gcp', gcp_subparsers, 'listbigqueryjobs', 'List BigQuery jobs for a project.') if len(sys.argv) == 1: parser.print_help() sys.exit(1) parsed_args = parser.parse_args() if hasattr(parsed_args, 'func'): parsed_args.func(parsed_args) if __name__ == '__main__': Main()
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import contextlib import datetime import functools import hashlib import inspect import json import lockfile import os import pyclbr import random import re import shlex import socket import struct import sys import time import types import uuid import warnings from xml.sax import saxutils from eventlet import event from eventlet import greenthread from eventlet import semaphore from eventlet.green import subprocess import netaddr from nova import exception from nova import flags from nova import log as logging from nova.openstack.common import cfg LOG = logging.getLogger(__name__) ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" FLAGS = flags.FLAGS FLAGS.register_opt( cfg.BoolOpt('disable_process_locking', default=False, help='Whether to disable inter-process locks')) def import_class(import_str): """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition('.') try: __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ImportError, ValueError, AttributeError), exc: LOG.debug(_('Inner Exception: %s'), exc) raise exception.ClassNotFound(class_name=class_str, exception=exc) def import_object(import_str): """Returns an object including a module or module and class.""" try: __import__(import_str) return sys.modules[import_str] except ImportError: cls = import_class(import_str) return cls() def find_config(config_path): """Find a configuration file using the given hint. :param config_path: Full or relative path to the config. :returns: Full path of the config, if it exists. :raises: `nova.exception.ConfigNotFound` """ possible_locations = [ config_path, os.path.join(FLAGS.state_path, "etc", "nova", config_path), os.path.join(FLAGS.state_path, "etc", config_path), os.path.join(FLAGS.state_path, config_path), "/etc/nova/%s" % config_path, ] for path in possible_locations: if os.path.exists(path): return os.path.abspath(path) raise exception.ConfigNotFound(path=os.path.abspath(config_path)) def vpn_ping(address, port, timeout=0.05, session_id=None): """Sends a vpn negotiation packet and returns the server session. Returns False on a failure. Basic packet structure is below. Client packet (14 bytes):: 0 1 8 9 13 +-+--------+-----+ |x| cli_id |?????| +-+--------+-----+ x = packet identifier 0x38 cli_id = 64 bit identifier ? = unknown, probably flags/padding Server packet (26 bytes):: 0 1 8 9 13 14 21 2225 +-+--------+-----+--------+----+ |x| srv_id |?????| cli_id |????| +-+--------+-----+--------+----+ x = packet identifier 0x40 cli_id = 64 bit identifier ? = unknown, probably flags/padding bit 9 was 1 and the rest were 0 in testing """ if session_id is None: session_id = random.randint(0, 0xffffffffffffffff) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) data = struct.pack('!BQxxxxx', 0x38, session_id) sock.sendto(data, (address, port)) sock.settimeout(timeout) try: received = sock.recv(2048) except socket.timeout: return False finally: sock.close() fmt = '!BQxxxxxQxxxx' if len(received) != struct.calcsize(fmt): print struct.calcsize(fmt) return False (identifier, server_sess, client_sess) = struct.unpack(fmt, received) if identifier == 0x40 and client_sess == session_id: return server_sess def fetchfile(url, target): LOG.debug(_('Fetching %s') % url) execute('curl', '--fail', url, '-o', target) def execute(*cmd, **kwargs): """ Helper method to execute command with optional retry. :cmd Passed to subprocess.Popen. :process_input Send to opened process. :check_exit_code Single bool, int, or list of allowed exit codes. Defaults to [0]. Raise exception.ProcessExecutionError unless program exits with one of these code. :delay_on_retry True | False. Defaults to True. If set to True, wait a short amount of time before retrying. :attempts How many times to retry cmd. :run_as_root True | False. Defaults to False. If set to True, the command is prefixed by the command specified in the root_helper FLAG. :raises exception.Error on receiving unknown arguments :raises exception.ProcessExecutionError :returns a tuple, (stdout, stderr) from the spawned process, or None if the command fails. """ process_input = kwargs.pop('process_input', None) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code check_exit_code = [0] elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) run_as_root = kwargs.pop('run_as_root', False) shell = kwargs.pop('shell', False) if len(kwargs): raise exception.Error(_('Got unknown keyword args ' 'to utils.execute: %r') % kwargs) if run_as_root: cmd = shlex.split(FLAGS.root_helper) + list(cmd) cmd = map(str, cmd) while attempts > 0: attempts -= 1 try: LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) _PIPE = subprocess.PIPE # pylint: disable=E1101 obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, close_fds=True, shell=shell) result = None if process_input is not None: result = obj.communicate(process_input) else: result = obj.communicate() obj.stdin.close() # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101 if _returncode: LOG.debug(_('Result was %s') % _returncode) if not ignore_exit_code and _returncode not in check_exit_code: (stdout, stderr) = result raise exception.ProcessExecutionError( exit_code=_returncode, stdout=stdout, stderr=stderr, cmd=' '.join(cmd)) return result except exception.ProcessExecutionError: if not attempts: raise else: LOG.debug(_('%r failed. Retrying.'), cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0) def trycmd(*args, **kwargs): """ A wrapper around execute() to more easily handle warnings and errors. Returns an (out, err) tuple of strings containing the output of the command's stdout and stderr. If 'err' is not empty then the command can be considered to have failed. :discard_warnings True | False. Defaults to False. If set to True, then for succeeding commands, stderr is cleared """ discard_warnings = kwargs.pop('discard_warnings', False) try: out, err = execute(*args, **kwargs) failed = False except exception.ProcessExecutionError, exn: out, err = '', str(exn) LOG.debug(err) failed = True if not failed and discard_warnings and err: # Handle commands that output to stderr but otherwise succeed LOG.debug(err) err = '' return out, err def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): LOG.debug(_('Running cmd (SSH): %s'), ' '.join(cmd)) if addl_env: raise exception.Error(_('Environment not supported over SSH')) if process_input: # This is (probably) fixable if we need it... raise exception.Error(_('process_input not supported over SSH')) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel #stdin.write('process_input would go here') #stdin.flush() # NOTE(justinsb): This seems suspicious... # ...other SSH clients have buffering issues with this approach stdout = stdout_stream.read() stderr = stderr_stream.read() stdin_stream.close() exit_status = channel.recv_exit_status() # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug(_('Result was %s') % exit_status) if check_exit_code and exit_status != 0: raise exception.ProcessExecutionError(exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=' '.join(cmd)) return (stdout, stderr) def abspath(s): return os.path.join(os.path.dirname(__file__), s) def novadir(): import nova return os.path.abspath(nova.__file__).split('nova/__init__.py')[0] def default_flagfile(filename='nova.conf', args=None): if args is None: args = sys.argv for arg in args: if arg.find('flagfile') != -1: return arg[arg.index('flagfile') + len('flagfile') + 1:] else: if not os.path.isabs(filename): # turn relative filename into an absolute path script_dir = os.path.dirname(inspect.stack()[-1][1]) filename = os.path.abspath(os.path.join(script_dir, filename)) if not os.path.exists(filename): filename = "./nova.conf" if not os.path.exists(filename): filename = '/etc/nova/nova.conf' if os.path.exists(filename): flagfile = '--flagfile=%s' % filename args.insert(1, flagfile) return filename def debug(arg): LOG.debug(_('debug in callback: %s'), arg) return arg def generate_uid(topic, size=8): characters = '01234567890abcdefghijklmnopqrstuvwxyz' choices = [random.choice(characters) for x in xrange(size)] return '%s-%s' % (topic, ''.join(choices)) # Default symbols to use for passwords. Avoids visually confusing characters. # ~6 bits per symbol DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O 'abcdefghijkmnopqrstuvwxyz') # Removed: l # ~5 bits per symbol EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1 'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O def current_audit_period(unit=None): if not unit: unit = FLAGS.instance_usage_audit_period rightnow = utcnow() if unit not in ('month', 'day', 'year', 'hour'): raise ValueError('Time period must be hour, day, month or year') n = 1 # we are currently only using multiples of 1 unit (mdragon) if unit == 'month': year = rightnow.year - (n // 12) n = n % 12 if n >= rightnow.month: year -= 1 month = 12 + (rightnow.month - n) else: month = rightnow.month - n begin = datetime.datetime(day=1, month=month, year=year) end = datetime.datetime(day=1, month=rightnow.month, year=rightnow.year) elif unit == 'year': begin = datetime.datetime(day=1, month=1, year=rightnow.year - n) end = datetime.datetime(day=1, month=1, year=rightnow.year) elif unit == 'day': b = rightnow - datetime.timedelta(days=n) begin = datetime.datetime(day=b.day, month=b.month, year=b.year) end = datetime.datetime(day=rightnow.day, month=rightnow.month, year=rightnow.year) elif unit == 'hour': end = rightnow.replace(minute=0, second=0, microsecond=0) begin = end - datetime.timedelta(hours=n) return (begin, end) def usage_from_instance(instance_ref, network_info=None, **kw): image_ref_url = "%s/images/%s" % (generate_glance_url(), instance_ref['image_ref']) usage_info = dict( tenant_id=instance_ref['project_id'], user_id=instance_ref['user_id'], instance_id=instance_ref['uuid'], instance_type=instance_ref['instance_type']['name'], instance_type_id=instance_ref['instance_type_id'], memory_mb=instance_ref['memory_mb'], disk_gb=instance_ref['root_gb'] + instance_ref['ephemeral_gb'], display_name=instance_ref['display_name'], created_at=str(instance_ref['created_at']), launched_at=str(instance_ref['launched_at']) if instance_ref['launched_at'] else '', image_ref_url=image_ref_url, state=instance_ref['vm_state'], state_description=instance_ref['task_state'] if instance_ref['task_state'] else '') if network_info is not None: usage_info['fixed_ips'] = network_info.fixed_ips() usage_info.update(kw) return usage_info def generate_password(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): """Generate a random password from the supplied symbol groups. At least one symbol from each group will be included. Unpredictable results if length is less than the number of symbol groups. Believed to be reasonably secure (with a reasonable password length!) """ r = random.SystemRandom() # NOTE(jerdfelt): Some password policies require at least one character # from each group of symbols, so start off with one random character # from each symbol group password = [r.choice(s) for s in symbolgroups] # If length < len(symbolgroups), the leading characters will only # be from the first length groups. Try our best to not be predictable # by shuffling and then truncating. r.shuffle(password) password = password[:length] length -= len(password) # then fill with random characters from all symbol groups symbols = ''.join(symbolgroups) password.extend([r.choice(symbols) for _i in xrange(length)]) # finally shuffle to ensure first x characters aren't from a # predictable group r.shuffle(password) return ''.join(password) def last_octet(address): return int(address.split('.')[-1]) def get_my_linklocal(interface): try: if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface) condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link' links = [re.search(condition, x) for x in if_str[0].split('\n')] address = [w.group(1) for w in links if w is not None] if address[0] is not None: return address[0] else: raise exception.Error(_('Link Local address is not found.:%s') % if_str) except Exception as ex: raise exception.Error(_("Couldn't get Link Local IP of %(interface)s" " :%(ex)s") % locals()) def utcnow(): """Overridable version of utils.utcnow.""" if utcnow.override_time: return utcnow.override_time return datetime.datetime.utcnow() utcnow.override_time = None def is_older_than(before, seconds): """Return True if before is older than seconds.""" return utcnow() - before > datetime.timedelta(seconds=seconds) def utcnow_ts(): """Timestamp version of our utcnow function.""" return time.mktime(utcnow().timetuple()) def set_time_override(override_time=datetime.datetime.utcnow()): """Override utils.utcnow to return a constant time.""" utcnow.override_time = override_time def advance_time_delta(timedelta): """Advance overriden time using a datetime.timedelta.""" assert(not utcnow.override_time is None) utcnow.override_time += timedelta def advance_time_seconds(seconds): """Advance overriden time by seconds.""" advance_time_delta(datetime.timedelta(0, seconds)) def clear_time_override(): """Remove the overridden time.""" utcnow.override_time = None def strtime(at=None, fmt=PERFECT_TIME_FORMAT): """Returns formatted utcnow.""" if not at: at = utcnow() return at.strftime(fmt) def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): """Turn a formatted time back into a datetime.""" return datetime.datetime.strptime(timestr, fmt) def isotime(at=None): """Returns iso formatted utcnow.""" return strtime(at, ISO_TIME_FORMAT) def parse_isotime(timestr): """Turn an iso formatted time back into a datetime.""" return parse_strtime(timestr, ISO_TIME_FORMAT) def parse_mailmap(mailmap='.mailmap'): mapping = {} if os.path.exists(mailmap): fp = open(mailmap, 'r') for l in fp: l = l.strip() if not l.startswith('#') and ' ' in l: canonical_email, alias = l.split(' ') mapping[alias.lower()] = canonical_email.lower() return mapping def str_dict_replace(s, mapping): for s1, s2 in mapping.iteritems(): s = s.replace(s1, s2) return s class LazyPluggable(object): """A pluggable backend loaded lazily based on some value.""" def __init__(self, pivot, **backends): self.__backends = backends self.__pivot = pivot self.__backend = None def __get_backend(self): if not self.__backend: backend_name = FLAGS[self.__pivot] if backend_name not in self.__backends: raise exception.Error(_('Invalid backend: %s') % backend_name) backend = self.__backends[backend_name] if isinstance(backend, tuple): name = backend[0] fromlist = backend[1] else: name = backend fromlist = backend self.__backend = __import__(name, None, None, fromlist) LOG.debug(_('backend %s'), self.__backend) return self.__backend def __getattr__(self, key): backend = self.__get_backend() return getattr(backend, key) class LoopingCallDone(Exception): """Exception to break out and stop a LoopingCall. The poll-function passed to LoopingCall can raise this exception to break out of the loop normally. This is somewhat analogous to StopIteration. An optional return-value can be included as the argument to the exception; this return-value will be returned by LoopingCall.wait() """ def __init__(self, retvalue=True): """:param retvalue: Value that LoopingCall.wait() should return.""" self.retvalue = retvalue class LoopingCall(object): def __init__(self, f=None, *args, **kw): self.args = args self.kw = kw self.f = f self._running = False def start(self, interval, now=True): self._running = True done = event.Event() def _inner(): if not now: greenthread.sleep(interval) try: while self._running: self.f(*self.args, **self.kw) if not self._running: break greenthread.sleep(interval) except LoopingCallDone, e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn(_inner) return self.done def stop(self): self._running = False def wait(self): return self.done.wait() def xhtml_escape(value): """Escapes a string so it is valid within XML or XHTML. Code is directly from the utf8 function in http://github.com/facebook/tornado/blob/master/tornado/escape.py """ return saxutils.escape(value, {'"': '&quot;'}) def utf8(value): """Try to turn a string into utf-8 if possible. Code is directly from the utf8 function in http://github.com/facebook/tornado/blob/master/tornado/escape.py """ if isinstance(value, unicode): return value.encode('utf-8') assert isinstance(value, str) return value def to_primitive(value, convert_instances=False, level=0): """Convert a complex object into primitives. Handy for JSON serialization. We can optionally handle instances, but since this is a recursive function, we could have cyclical data structures. To handle cyclical data structures we could track the actual objects visited in a set, but not all objects are hashable. Instead we just track the depth of the object inspections and don't go too deep. Therefore, convert_instances=True is lossy ... be aware. """ nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod, inspect.isfunction, inspect.isgeneratorfunction, inspect.isgenerator, inspect.istraceback, inspect.isframe, inspect.iscode, inspect.isbuiltin, inspect.isroutine, inspect.isabstract] for test in nasty: if test(value): return unicode(value) # FIXME(vish): Workaround for LP bug 852095. Without this workaround, # tests that raise an exception in a mocked method that # has a @wrap_exception with a notifier will fail. If # we up the dependency to 0.5.4 (when it is released) we # can remove this workaround. if getattr(value, '__module__', None) == 'mox': return 'mock' if level > 3: return '?' # The try block may not be necessary after the class check above, # but just in case ... try: if isinstance(value, (list, tuple)): o = [] for v in value: o.append(to_primitive(v, convert_instances=convert_instances, level=level)) return o elif isinstance(value, dict): o = {} for k, v in value.iteritems(): o[k] = to_primitive(v, convert_instances=convert_instances, level=level) return o elif isinstance(value, datetime.datetime): return str(value) elif hasattr(value, 'iteritems'): return to_primitive(dict(value.iteritems()), convert_instances=convert_instances, level=level) elif hasattr(value, '__iter__'): return to_primitive(list(value), level) elif convert_instances and hasattr(value, '__dict__'): # Likely an instance of something. Watch for cycles. # Ignore class member vars. return to_primitive(value.__dict__, convert_instances=convert_instances, level=level + 1) else: return value except TypeError, e: # Class objects are tricky since they may define something like # __iter__ defined but it isn't callable as list(). return unicode(value) def dumps(value): try: return json.dumps(value) except TypeError: pass return json.dumps(to_primitive(value)) def loads(s): return json.loads(s) try: import anyjson except ImportError: pass else: anyjson._modules.append(("nova.utils", "dumps", TypeError, "loads", ValueError)) anyjson.force_implementation("nova.utils") _semaphores = {} def synchronized(name, external=False): """Synchronization decorator. Decorating a method like so: @synchronized('mylock') def foo(self, *args): ... ensures that only one thread will execute the bar method at a time. Different methods can share the same lock: @synchronized('mylock') def foo(self, *args): ... @synchronized('mylock') def bar(self, *args): ... This way only one of either foo or bar can be executing at a time. The external keyword argument denotes whether this lock should work across multiple processes. This means that if two different workers both run a a method decorated with @synchronized('mylock', external=True), only one of them will execute at a time. """ def wrap(f): @functools.wraps(f) def inner(*args, **kwargs): # NOTE(soren): If we ever go natively threaded, this will be racy. # See http://stackoverflow.com/questions/5390569/dyn # amically-allocating-and-destroying-mutexes if name not in _semaphores: _semaphores[name] = semaphore.Semaphore() sem = _semaphores[name] LOG.debug(_('Attempting to grab semaphore "%(lock)s" for method ' '"%(method)s"...' % {'lock': name, 'method': f.__name__})) with sem: LOG.debug(_('Got semaphore "%(lock)s" for method ' '"%(method)s"...' % {'lock': name, 'method': f.__name__})) if external and not FLAGS.disable_process_locking: LOG.debug(_('Attempting to grab file lock "%(lock)s" for ' 'method "%(method)s"...' % {'lock': name, 'method': f.__name__})) lock_file_path = os.path.join(FLAGS.lock_path, 'nova-%s' % name) lock = lockfile.FileLock(lock_file_path) with lock: LOG.debug(_('Got file lock "%(lock)s" for ' 'method "%(method)s"...' % {'lock': name, 'method': f.__name__})) retval = f(*args, **kwargs) else: retval = f(*args, **kwargs) # If no-one else is waiting for it, delete it. # See note about possible raciness above. if not sem.balance < 1: del _semaphores[name] return retval return inner return wrap def get_from_path(items, path): """Returns a list of items matching the specified path. Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the intermediate results are lists it will treat each list item individually. A 'None' in items or any child expressions will be ignored, this function will not throw because of None (anywhere) in items. The returned list will contain no None values. """ if path is None: raise exception.Error('Invalid mini_xpath') (first_token, sep, remainder) = path.partition('/') if first_token == '': raise exception.Error('Invalid mini_xpath') results = [] if items is None: return results if not isinstance(items, list): # Wrap single objects in a list items = [items] for item in items: if item is None: continue get_method = getattr(item, 'get', None) if get_method is None: continue child = get_method(first_token) if child is None: continue if isinstance(child, list): # Flatten intermediate lists for x in child: results.append(x) else: results.append(child) if not sep: # No more tokens return results else: return get_from_path(results, remainder) def flatten_dict(dict_, flattened=None): """Recursively flatten a nested dictionary.""" flattened = flattened or {} for key, value in dict_.iteritems(): if hasattr(value, 'iteritems'): flatten_dict(value, flattened) else: flattened[key] = value return flattened def partition_dict(dict_, keys): """Return two dicts, one with `keys` the other with everything else.""" intersection = {} difference = {} for key, value in dict_.iteritems(): if key in keys: intersection[key] = value else: difference[key] = value return intersection, difference def map_dict_keys(dict_, key_map): """Return a dict in which the dictionaries keys are mapped to new keys.""" mapped = {} for key, value in dict_.iteritems(): mapped_key = key_map[key] if key in key_map else key mapped[mapped_key] = value return mapped def subset_dict(dict_, keys): """Return a dict that only contains a subset of keys.""" subset = partition_dict(dict_, keys)[0] return subset def check_isinstance(obj, cls): """Checks that obj is of type cls, and lets PyLint infer types.""" if isinstance(obj, cls): return obj raise Exception(_('Expected object of type: %s') % (str(cls))) # TODO(justinsb): Can we make this better?? return cls() # Ugly PyLint hack def parse_server_string(server_str): """ Parses the given server_string and returns a list of host and port. If it's not a combination of host part and port, the port element is a null string. If the input is invalid expression, return a null list. """ try: # First of all, exclude pure IPv6 address (w/o port). if netaddr.valid_ipv6(server_str): return (server_str, '') # Next, check if this is IPv6 address with a port number combination. if server_str.find("]:") != -1: (address, port) = server_str.replace('[', '', 1).split(']:') return (address, port) # Third, check if this is a combination of an address and a port if server_str.find(':') == -1: return (server_str, '') # This must be a combination of an address and a port (address, port) = server_str.split(':') return (address, port) except Exception: LOG.debug(_('Invalid server_string: %s' % server_str)) return ('', '') def gen_uuid(): return uuid.uuid4() def is_uuid_like(val): """For our purposes, a UUID is a string in canonical form: aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa """ try: uuid.UUID(val) return True except (TypeError, ValueError, AttributeError): return False def bool_from_str(val): """Convert a string representation of a bool into a bool value""" if not val: return False try: return True if int(val) else False except ValueError: return val.lower() == 'true' def is_valid_ipv4(address): """valid the address strictly as per format xxx.xxx.xxx.xxx. where xxx is a value between 0 and 255. """ parts = address.split(".") if len(parts) != 4: return False for item in parts: try: if not 0 <= int(item) <= 255: return False except ValueError: return False return True def is_valid_cidr(address): """Check if the provided ipv4 or ipv6 address is a valid CIDR address or not""" try: # Validate the correct CIDR Address netaddr.IPNetwork(address) except netaddr.core.AddrFormatError: return False # Prior validation partially verify /xx part # Verify it here ip_segment = address.split('/') if (len(ip_segment) <= 1 or ip_segment[1] == ''): return False return True def monkey_patch(): """ If the Flags.monkey_patch set as True, this function patches a decorator for all functions in specified modules. You can set decorators for each modules using FLAGS.monkey_patch_modules. The format is "Module path:Decorator function". Example: 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator' Parameters of the decorator is as follows. (See nova.notifier.api.notify_decorator) name - name of the function function - object of the function """ # If FLAGS.monkey_patch is not True, this function do nothing. if not FLAGS.monkey_patch: return # Get list of modules and decorators for module_and_decorator in FLAGS.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') # import decorator function decorator = import_class(decorator_name) __import__(module) # Retrieve module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): # set the decorator for the class methods if isinstance(module_data[key], pyclbr.Class): clz = import_class("%s.%s" % (module, key)) for method, func in inspect.getmembers(clz, inspect.ismethod): setattr(clz, method, decorator("%s.%s.%s" % (module, key, method), func)) # set the decorator for the function if isinstance(module_data[key], pyclbr.Function): func = import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key, decorator("%s.%s" % (module, key), func)) def convert_to_list_dict(lst, label): """Convert a value or list into a list of dicts""" if not lst: return None if not isinstance(lst, list): lst = [lst] return [{label: x} for x in lst] def timefunc(func): """Decorator that logs how long a particular function took to execute""" @functools.wraps(func) def inner(*args, **kwargs): start_time = time.time() try: return func(*args, **kwargs) finally: total_time = time.time() - start_time LOG.debug(_("timefunc: '%(name)s' took %(total_time).2f secs") % dict(name=func.__name__, total_time=total_time)) return inner def generate_glance_url(): """Generate the URL to glance.""" # TODO(jk0): This will eventually need to take SSL into consideration # when supported in glance. return "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port) @contextlib.contextmanager def save_and_reraise_exception(): """Save current exception, run some code and then re-raise. In some cases the exception context can be cleared, resulting in None being attempted to be reraised after an exception handler is run. This can happen when eventlet switches greenthreads or when running an exception handler, code raises and catches and exception. In both cases the exception context will be cleared. To work around this, we save the exception state, run handler code, and then re-raise the original exception. If another exception occurs, the saved exception is logged and the new exception is reraised. """ type_, value, traceback = sys.exc_info() try: yield except Exception: LOG.exception(_('Original exception being dropped'), exc_info=(type_, value, traceback)) raise raise type_, value, traceback @contextlib.contextmanager def logging_error(message): """Catches exception, write message to the log, re-raise. This is a common refinement of save_and_reraise that writes a specific message to the log. """ try: yield except Exception as error: with save_and_reraise_exception(): LOG.exception(message) def make_dev_path(dev, partition=None, base='/dev'): """Return a path to a particular device. >>> make_dev_path('xvdc') /dev/xvdc >>> make_dev_path('xvdc', 1) /dev/xvdc1 """ path = os.path.join(base, dev) if partition: path += str(partition) return path def total_seconds(td): """Local total_seconds implementation for compatibility with python 2.6""" if hasattr(td, 'total_seconds'): return td.total_seconds() else: return ((td.days * 86400 + td.seconds) * 10 ** 6 + td.microseconds) / 10.0 ** 6 def sanitize_hostname(hostname): """Return a hostname which conforms to RFC-952 and RFC-1123 specs.""" if isinstance(hostname, unicode): hostname = hostname.encode('latin-1', 'ignore') hostname = re.sub('[ _]', '-', hostname) hostname = re.sub('[^\w.-]+', '', hostname) hostname = hostname.lower() hostname = hostname.strip('.-') return hostname def read_cached_file(filename, cache_info, reload_func=None): """Read from a file if it has been modified. :param cache_info: dictionary to hold opaque cache. :param reload_func: optional function to be called with data when file is reloaded due to a modification. :returns: data from file """ mtime = os.path.getmtime(filename) if not cache_info or mtime != cache_info.get('mtime'): with open(filename) as fap: cache_info['data'] = fap.read() cache_info['mtime'] = mtime if reload_func: reload_func(cache_info['data']) return cache_info['data'] def hash_file(file_like_object): """Generate a hash for the contents of a file.""" checksum = hashlib.sha1() any(map(checksum.update, iter(lambda: file_like_object.read(32768), ''))) return checksum.hexdigest() @contextlib.contextmanager def temporary_mutation(obj, **kwargs): """Temporarily set the attr on a particular object to a given value then revert when finished. One use of this is to temporarily set the read_deleted flag on a context object: with temporary_mutation(context, read_deleted="yes"): do_something_that_needed_deleted_objects() """ NOT_PRESENT = object() old_values = {} for attr, new_value in kwargs.items(): old_values[attr] = getattr(obj, attr, NOT_PRESENT) setattr(obj, attr, new_value) try: yield finally: for attr, old_value in old_values.items(): if old_value is NOT_PRESENT: del obj[attr] else: setattr(obj, attr, old_value) def warn_deprecated_class(cls, msg): """ Issues a warning to indicate that the given class is deprecated. If a message is given, it is appended to the deprecation warning. """ fullname = '%s.%s' % (cls.__module__, cls.__name__) if msg: fullmsg = _("Class %(fullname)s is deprecated: %(msg)s") else: fullmsg = _("Class %(fullname)s is deprecated") # Issue the warning warnings.warn(fullmsg % locals(), DeprecationWarning, stacklevel=3) def warn_deprecated_function(func, msg): """ Issues a warning to indicate that the given function is deprecated. If a message is given, it is appended to the deprecation warning. """ name = func.__name__ # Find the function's definition sourcefile = inspect.getsourcefile(func) # Find the line number, if possible if inspect.ismethod(func): code = func.im_func.func_code else: code = func.func_code lineno = getattr(code, 'co_firstlineno', None) if lineno is None: location = sourcefile else: location = "%s:%d" % (sourcefile, lineno) # Build up the message if msg: fullmsg = _("Function %(name)s in %(location)s is deprecated: %(msg)s") else: fullmsg = _("Function %(name)s in %(location)s is deprecated") # Issue the warning warnings.warn(fullmsg % locals(), DeprecationWarning, stacklevel=3) def _stubout(klass, message): """ Scans a class and generates wrapping stubs for __new__() and every class and static method. Returns a dictionary which can be passed to type() to generate a wrapping class. """ overrides = {} def makestub_class(name, func): """ Create a stub for wrapping class methods. """ def stub(cls, *args, **kwargs): warn_deprecated_class(klass, message) return func(*args, **kwargs) # Overwrite the stub's name stub.__name__ = name stub.func_name = name return classmethod(stub) def makestub_static(name, func): """ Create a stub for wrapping static methods. """ def stub(*args, **kwargs): warn_deprecated_class(klass, message) return func(*args, **kwargs) # Overwrite the stub's name stub.__name__ = name stub.func_name = name return staticmethod(stub) for name, kind, _klass, _obj in inspect.classify_class_attrs(klass): # We're only interested in __new__(), class methods, and # static methods... if (name != '__new__' and kind not in ('class method', 'static method')): continue # Get the function... func = getattr(klass, name) # Override it in the class if kind == 'class method': stub = makestub_class(name, func) elif kind == 'static method' or name == '__new__': stub = makestub_static(name, func) # Save it in the overrides dictionary... overrides[name] = stub # Apply the overrides for name, stub in overrides.items(): setattr(klass, name, stub) def deprecated(message=''): """ Marks a function, class, or method as being deprecated. For functions and methods, emits a warning each time the function or method is called. For classes, generates a new subclass which will emit a warning each time the class is instantiated, or each time any class or static method is called. If a message is passed to the decorator, that message will be appended to the emitted warning. This may be used to suggest an alternate way of achieving the desired effect, or to explain why the function, class, or method is deprecated. """ def decorator(f_or_c): # Make sure we can deprecate it... if not callable(f_or_c) or isinstance(f_or_c, types.ClassType): warnings.warn("Cannot mark object %r as deprecated" % f_or_c, DeprecationWarning, stacklevel=2) return f_or_c # If we're deprecating a class, create a subclass of it and # stub out all the class and static methods if inspect.isclass(f_or_c): klass = f_or_c _stubout(klass, message) return klass # OK, it's a function; use a traditional wrapper... func = f_or_c @functools.wraps(func) def wrapper(*args, **kwargs): warn_deprecated_function(func, message) return func(*args, **kwargs) return wrapper return decorator def _showwarning(message, category, filename, lineno, file=None, line=None): """ Redirect warnings into logging. """ fmtmsg = warnings.formatwarning(message, category, filename, lineno, line) LOG.warning(fmtmsg) # Install our warnings handler warnings.showwarning = _showwarning def service_is_up(service): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. elapsed = total_seconds(utcnow() - last_heartbeat) return abs(elapsed) <= FLAGS.service_down_time def generate_mac_address(): """Generate an Ethernet MAC address.""" mac = [0x02, 0x16, 0x3e, random.randint(0x00, 0x7f), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] return ':'.join(map(lambda x: "%02x" % x, mac)) def read_file_as_root(file_path): """Secure helper to read file as root.""" try: out, _err = execute('cat', file_path, run_as_root=True) return out except exception.ProcessExecutionError: raise exception.FileNotFound(file_path=file_path)
""" Various utilities and helper functions. """ from __future__ import division, print_function, absolute_import import numbers import math import numpy as np from scipy.optimize import brentq from scipy.stats import (binom, hypergeom, ttest_ind, ttest_1samp) def binom_conf_interval(n, x, cl=0.975, alternative="two-sided", p=None, **kwargs): """ Compute a confidence interval for a binomial p, the probability of success in each trial. Parameters ---------- n : int The number of Bernoulli trials. x : int The number of successes. cl : float in (0, 1) The desired confidence level. alternative : {"two-sided", "lower", "upper"} Indicates the alternative hypothesis. p : float in (0, 1) Starting point in search for confidence bounds for probability of success in each trial. kwargs : dict Key word arguments Returns ------- tuple lower and upper confidence level with coverage (approximately) 1-alpha. Notes ----- xtol : float Tolerance rtol : float Tolerance maxiter : int Maximum number of iterations. """ assert alternative in ("two-sided", "lower", "upper") if p is None: p = x / n ci_low = 0.0 ci_upp = 1.0 if alternative == 'two-sided': cl = 1 - (1-cl)/2 if alternative != "upper" and x > 0: f = lambda q: cl - binom.cdf(x - 1, n, q) ci_low = brentq(f, 0.0, p, *kwargs) if alternative != "lower" and x < n: f = lambda q: binom.cdf(x, n, q) - (1 - cl) ci_upp = brentq(f, 1.0, p, *kwargs) return ci_low, ci_upp def hypergeom_conf_interval(n, x, N, cl=0.975, alternative="two-sided", G=None, **kwargs): """ Confidence interval for a hypergeometric distribution parameter G, the number of good objects in a population in size N, based on the number x of good objects in a simple random sample of size n. Parameters ---------- n : int The number of draws without replacement. x : int The number of "good" objects in the sample. N : int The number of objects in the population. cl : float in (0, 1) The desired confidence level. alternative : {"two-sided", "lower", "upper"} Indicates the alternative hypothesis. G : int in [0, N] Starting point in search for confidence bounds for the hypergeometric parameter G. kwargs : dict Key word arguments Returns ------- tuple lower and upper confidence level with coverage (at least) 1-alpha. Notes ----- xtol : float Tolerance rtol : float Tolerance maxiter : int Maximum number of iterations. """ assert alternative in ("two-sided", "lower", "upper") if G is None: G = (x / n)*N ci_low = 0 ci_upp = N if alternative == 'two-sided': cl = 1 - (1-cl)/2 if alternative != "upper" and x > 0: f = lambda q: cl - hypergeom.cdf(x-1, N, q, n) ci_low = math.ceil(brentq(f, 0.0, G, *kwargs)) if alternative != "lower" and x < n: f = lambda q: hypergeom.cdf(x, N, q, n) - (1-cl) ci_upp = math.floor(brentq(f, G, N, *kwargs)) return ci_low, ci_upp def get_prng(seed=None): """Turn seed into a np.random.RandomState instance Parameters ---------- seed : {None, int, RandomState} If seed is None, return the RandomState singleton used by np.random. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError. Returns ------- RandomState """ if seed is None or seed is np.random: return np.random.mtrand._rand if isinstance(seed, (numbers.Integral, np.integer)): return np.random.RandomState(seed) if isinstance(seed, np.random.RandomState): return seed raise ValueError('%r cannot be used to seed a numpy.random.RandomState' ' instance' % seed) def permute_within_groups(x, group, seed=None): """ Permutation of condition within each group. Parameters ---------- x : array-like A 1-d array indicating treatment. group : array-like A 1-d array indicating group membership seed : RandomState instance or {None, int, RandomState instance} If None, the pseudorandom number generator is the RandomState instance used by `np.random`; If int, seed is the seed used by the random number generator; If RandomState instance, seed is the pseudorandom number generator Returns ------- permuted : array-like The within group permutation of x. """ prng = get_prng(seed) permuted = x.copy() # (avoid additional flops) -- maybe memoize for g in np.unique(group): gg = group == g permuted[gg] = prng.permutation(permuted[gg]) return permuted def permute_rows(m, seed=None): """ Permute the rows of a matrix in-place Parameters ---------- m : array-like A 2-d array seed : RandomState instance or {None, int, RandomState instance} If None, the pseudorandom number generator is the RandomState instance used by `np.random`; If int, seed is the seed used by the random number generator; If RandomState instance, seed is the pseudorandom number generator Returns ------- None Original matrix is permuted in-place, nothing is returned. """ prng = get_prng(seed) for row in m: prng.shuffle(row) def permute_incidence_fixed_sums(incidence, k=1): """ Permute elements of a (binary) incidence matrix, keeping the row and column sums in-tact. Parameters ---------- incidence : 2D ndarray Incidence matrix to permute. k : int The number of successful pairwise swaps to perform. Notes ----- The row and column sums are kept fixed by always swapping elements two pairs at a time. Returns ------- permuted : 2D ndarray The permuted incidence matrix. """ if not incidence.ndim == 2: raise ValueError("Incidence matrix must be 2D") if incidence.min() != 0 or incidence.max() != 1: raise ValueError("Incidence matrix must be binary") incidence = incidence.copy() n, m = incidence.shape rows = np.arange(n) cols = np.arange(m) K, k = k, 0 while k < K: swappable = False while (not swappable): chosen_rows = np.random.choice(rows, 2, replace=False) s0, s1 = chosen_rows potential_cols0, = np.where((incidence[s0, :] == 1) & (incidence[s1, :] == 0)) potential_cols1, = np.where((incidence[s0, :] == 0) & (incidence[s1, :] == 1)) potential_cols0 = np.setdiff1d(potential_cols0, potential_cols1) if (len(potential_cols0) == 0) or (len(potential_cols1) == 0): continue p0 = np.random.choice(potential_cols0) p1 = np.random.choice(potential_cols1) # These statements should always be true, so we should # never raise an assertion here assert incidence[s0, p0] == 1 assert incidence[s0, p1] == 0 assert incidence[s1, p0] == 0 assert incidence[s1, p1] == 1 swappable = True i0 = incidence.copy() incidence[[s0, s0, s1, s1], [p0, p1, p0, p1]] = [0, 1, 1, 0] k += 1 return incidence
import numpy as np import random import itertools import scipy.misc import matplotlib.pyplot as plt import tensorflow as tf import os #%matplotlib inline class gameOb(): def __init__(self, coordinates, size, intensity, channel, reward, name): self.x = coordinates[0] self.y = coordinates[1] self.size = size self.intensity = intensity self.channel = channel self.reward = reward self.name = name class gameEnv(): def __init__(self, size): self.sizeX = size self.sizeY = size self.actions = 4 self.objects = [] a = self.reset() plt.imshow(a, interpolation = "nearest") #plt.show() def reset(self): self.objects = [] hero = gameOb(self.newPosition(), 1, 1, 2, None, 'hero') self.objects.append(hero) goal = gameOb(self.newPosition(), 1, 1, 1, 1, 'goal') self.objects.append(goal) hole = gameOb(self.newPosition(), 1, 1, 0, -1, 'fire') self.objects.append(hole) goal2 = gameOb(self.newPosition(), 1, 1, 1, 1, 'goal') self.objects.append(goal2) hole2 = gameOb(self.newPosition(), 1, 1, 0, -1, 'fire') self.objects.append(hole2) goal3 = gameOb(self.newPosition(), 1, 1, 1, 1, 'goal') self.objects.append(goal3) goal4 = gameOb(self.newPosition(), 1, 1, 1, 1, 'goal') self.objects.append(goal4) state = self.renderEnv() self.state = state return state def moveChar(self, direction): hero = self.objects[0] heroX = hero.x heroY = hero.y if direction == 0 and hero.y >= 1: hero.y -= 1 if direction == 1 and hero.y <= self.sizeY - 2: hero.y += 1 if direction == 2 and hero.x >= 1: hero.x -= 1 if direction == 3 and hero.x <= self.sizeX - 2: hero.x += 1 self.objects[0] = hero def newPosition(self): iterables = [range(self.sizeX), range(self.sizeY)] points = [] for t in itertools.product(*iterables): points.append(t) currentPositions = [] for objectA in self.objects: if (objectA.x, objectA.y) not in currentPositions: currentPositions.append((objectA.x, objectA.y)) for pos in currentPositions: points.remove(pos) location = np.random.choice(range(len(points)), replace = False) return points[location] def checkGoal(self): others = [] for obj in self.objects: if obj.name == 'hero': hero = obj else: others.append(obj) for other in others: if hero.x == other.x and hero.y == other.y: self.objects.remove(other) if other.reward == 1: self.objects.append(gameOb(self.newPosition(), 1, 1, 1, 1, 'goal')) else: self.objects.append(gameOb(self.newPosition(), 1, 1, 0, -1, 'fire')) return other.reward, False return 0.0, False def renderEnv(self): a = np.ones([self.sizeY + 2, self.sizeX + 2, 3]) a[1:-1, 1:-1, :] = 0 hero = None for item in self.objects: a[item.y + 1:item.y + item.size + 1, item.x + 1:item.x + item.size + 1, item.channel] = item.intensity b = scipy.misc.imresize(a[:, :, 0], [84, 84, 1], interp = 'nearest') c = scipy.misc.imresize(a[:, :, 1], [84, 84, 1], interp = 'nearest') d = scipy.misc.imresize(a[:, :, 2], [84, 84, 1], interp = 'nearest') a = np.stack([b, c, d], axis = 2) return a def step(self, action): self.moveChar(action) reward, done = self.checkGoal() state = self.renderEnv() return state, reward, done env = gameEnv(size = 5) class Qnetwork(): def __init__(self, h_size): self.scalarInput = tf.placeholder(shape = [None, 21168], dtype = tf.float32) self.imageIn = tf.reshape(self.scalarInput, shape = [-1, 84, 84, 3]) self.conv1 = tf.contrib.layers.convolution2d(inputs = self.imageIn, num_outputs = 32, kernel_size = [8, 8], stride = [4, 4], padding = 'VALID', biases_initializer = None) self.conv2 = tf.contrib.layers.convolution2d(inputs = self.conv1, num_outputs = 64, kernel_size = [4, 4], stride = [2, 2], padding = 'VALID', biases_initializer = None) self.conv3 = tf.contrib.layers.convolution2d(inputs = self.conv2, num_outputs = 64, kernel_size = [3, 3], stride = [1, 1], padding = 'VALID', biases_initializer = None) self.conv4 = tf.contrib.layers.convolution2d(inputs = self.conv3, num_outputs = 512, kernel_size = [7, 7], stride = [1, 1], padding = 'VALID', biases_initializer = None) self.streamAC, self.streamVC = tf.split(self.conv4, 2, 3) self.streamA = tf.contrib.layers.flatten(self.streamAC) self.streamV = tf.contrib.layers.flatten(self.streamVC) self.AW = tf.Variable(tf.random_normal([h_size // 2, env.actions])) self.VW = tf.Variable(tf.random_normal([h_size // 2, 1])) self.Advantage = tf.matmul(self.streamA, self.AW) self.Value = tf.matmul(self.streamV, self.VW) self.Qout = self.Value + tf.subtract(self.Advantage, tf.reduce_mean(self.Advantage, reduction_indices = 1, keep_dims = True)) self.predict = tf.argmax(self.Qout, 1) self.targetQ = tf.placeholder(shape = [None], dtype = tf.float32) self.actions = tf.placeholder(shape = [None], dtype = tf.int32) self.actions_onehot = tf.one_hot(self.actions, env.actions, dtype = tf.float32) self.Q = tf.reduce_sum(tf.multiply(self.Qout, self.actions_onehot), reduction_indices = 1) self.td_error = tf.square(self.targetQ - self.Q) self.loss = tf.reduce_mean(self.td_error) self.trainer = tf.train.AdamOptimizer(learning_rate = 0.0001) self.updateModel = self.trainer.minimize(self.loss) class experience_buffer(): def __init__(self, buffer_size = 50000): self.buffer = [] self.buffer_size = buffer_size def add(self, experience): if len(self.buffer) + len(experience) >= self.buffer_size: self.buffer[0:(len(experience) + len(self.buffer)) - self.buffer_size] = [] self.buffer.extend(experience) def sample(self, size): return np.reshape(np.array(random.sample(self.buffer, size)), [size, 5]) def processState(states): return np.reshape(states, [21168]) def updateTargetGraph(tfVars, tau): total_vars = len(tfVars) op_holder = [] for idx, var in enumerate(tfVars[0:total_vars // 2]): op_holder.append(tfVars[idx + total_vars // 2].assign((var.value() * tau) + ((1 - tau) * tfVars[idx + total_vars // 2].value()))) return op_holder def updateTarget(op_holder, sess): for op in op_holder: sess.run(op) batch_size = 32 update_freq = 4 y = .99 startE = 1 endE = 0.1 anneling_steps = 10000. num_episodes = 10000 pre_train_steps = 10000 max_epLength = 50 load_model = False path = "./dqn" h_size = 512 tau = 0.001 mainQN = Qnetwork(h_size) targetQN = Qnetwork(h_size) init = tf.global_variables_initializer() trainables = tf.trainable_variables() targetOps = updateTargetGraph(trainables, tau) myBuffer = experience_buffer() e = startE stepDrop = (startE - endE) / anneling_steps rList = [] total_steps = 0 saver = tf.train.Saver() if not os.path.exists(path): os.makedirs(path) with tf.Session() as sess: if load_model == True: print('Loading Model...') ckpt = tf.train.get_checkpoint_state(path) saver.restore(sess, ckpt.model_checkpoint_path) sess.run(init) updateTarget(targetOps, sess) for i in range(num_episodes + 1): episodeBuffer = experience_buffer() s = env.reset() s = processState(s) d = False rAll = 0 j = 0 while j < max_epLength: j += 1 if np.random.rand(1) < e or total_steps < pre_train_steps: a = np.random.randint(0, 4) else: a = sess.run(mainQN.predict, feed_dict = {mainQN.scalarInput: [s]})[0] s1, r, d = env.step(a) s1 = processState(s1) total_steps += 1 episodeBuffer.add(np.reshape(np.array([s, a, r, s1, d]), [1, 5])) if total_steps > pre_train_steps: if e > endE: e -= stepDrop if total_steps % (update_freq) == 0: trainBatch = myBuffer.sample(batch_size) A = sess.run(mainQN.predict, feed_dict = {mainQN.scalarInput: np.vstack(trainBatch[:, 3])}) Q = sess.run(targetQN.Qout, feed_dict = {targetQN.scalarInput: np.vstack(trainBatch[:, 3])}) doubleQ = Q[range(batch_size), A] targetQ = trainBatch[:, 2] + y * doubleQ _ = sess.run(mainQN.updateModel, feed_dict = {mainQN.scalarInput: np.vstack(trainBatch[:, 0]), mainQN.targetQ: targetQ, mainQN.actions: trainBatch[:, 1]}) updateTarget(targetOps, sess) rAll += r s = s1 if d == True: break myBuffer.add(episodeBuffer.buffer) rList.append(rAll) if i > 0 and i % 25 == 0: print('episode', i, ', average reward of last 25 episode', np.mean(rList[-25:])) if i > 0 and i % 1000 == 0: saver.save(sess, path + '/model-' + str(i) + '.cptk') print("Saved Model") saver.save(sess, path + '/model-' + str(i) + '.cptk') rMat = np.resize(np.array(rList), [len(rList) // 100, 100]) rMean = np.average(rMat, 1) plt.plot(rMean)
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'certified'} DOCUMENTATION = """ --- module: sqs_queue short_description: Creates or deletes AWS SQS queues. description: - Create or delete AWS SQS queues. - Update attributes on existing queues. version_added: "2.0" author: - Alan Loi (@loia) - Fernando Jose Pando (@nand0p) - Nadir Lloret (@nadirollo) requirements: - "boto >= 2.33.0" options: state: description: - Create or delete the queue required: false choices: ['present', 'absent'] default: 'present' name: description: - Name of the queue. required: true default_visibility_timeout: description: - The default visibility timeout in seconds. required: false default: null message_retention_period: description: - The message retention period in seconds. required: false default: null maximum_message_size: description: - The maximum message size in bytes. required: false default: null delivery_delay: description: - The delivery delay in seconds. required: false default: null receive_message_wait_time: description: - The receive message wait time in seconds. required: false default: null policy: description: - The json dict policy to attach to queue required: false default: null version_added: "2.1" redrive_policy: description: - json dict with the redrive_policy (see example) required: false default: null version_added: "2.2" extends_documentation_fragment: - aws - ec2 """ RETURN = ''' default_visibility_timeout: description: The default visibility timeout in seconds. type: int returned: always sample: 30 delivery_delay: description: The delivery delay in seconds. type: int returned: always sample: 0 maximum_message_size: description: The maximum message size in bytes. type: int returned: always sample: 262144 message_retention_period: description: The message retention period in seconds. type: int returned: always sample: 345600 name: description: Name of the SQS Queue type: string returned: always sample: "queuename-987d2de0" queue_arn: description: The queue's Amazon resource name (ARN). type: string returned: on successful creation or update of the queue sample: 'arn:aws:sqs:us-east-1:199999999999:queuename-987d2de0' receive_message_wait_time: description: The receive message wait time in seconds. type: int returned: always sample: 0 region: description: Region that the queue was created within type: string returned: always sample: 'us-east-1' ''' EXAMPLES = ''' # Create SQS queue with redrive policy - sqs_queue: name: my-queue region: ap-southeast-2 default_visibility_timeout: 120 message_retention_period: 86400 maximum_message_size: 1024 delivery_delay: 30 receive_message_wait_time: 20 policy: "{{ json_dict }}" redrive_policy: maxReceiveCount: 5 deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue # Delete SQS queue - sqs_queue: name: my-queue region: ap-southeast-2 state: absent ''' import json import traceback try: import boto.sqs from boto.exception import BotoServerError, NoAuthHandlerFound HAS_BOTO = True except ImportError: HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info def create_or_update_sqs_queue(connection, module): queue_name = module.params.get('name') queue_attributes = dict( default_visibility_timeout=module.params.get('default_visibility_timeout'), message_retention_period=module.params.get('message_retention_period'), maximum_message_size=module.params.get('maximum_message_size'), delivery_delay=module.params.get('delivery_delay'), receive_message_wait_time=module.params.get('receive_message_wait_time'), policy=module.params.get('policy'), redrive_policy=module.params.get('redrive_policy') ) result = dict( region=module.params.get('region'), name=queue_name, ) result.update(queue_attributes) try: queue = connection.get_queue(queue_name) if queue: # Update existing result['changed'] = update_sqs_queue(queue, check_mode=module.check_mode, **queue_attributes) else: # Create new if not module.check_mode: queue = connection.create_queue(queue_name) update_sqs_queue(queue, **queue_attributes) result['changed'] = True if not module.check_mode: result['queue_arn'] = queue.get_attributes('QueueArn')['QueueArn'] result['default_visibility_timeout'] = queue.get_attributes('VisibilityTimeout')['VisibilityTimeout'] result['message_retention_period'] = queue.get_attributes('MessageRetentionPeriod')['MessageRetentionPeriod'] result['maximum_message_size'] = queue.get_attributes('MaximumMessageSize')['MaximumMessageSize'] result['delivery_delay'] = queue.get_attributes('DelaySeconds')['DelaySeconds'] result['receive_message_wait_time'] = queue.get_attributes('ReceiveMessageWaitTimeSeconds')['ReceiveMessageWaitTimeSeconds'] except BotoServerError: result['msg'] = 'Failed to create/update sqs queue due to error: ' + traceback.format_exc() module.fail_json(**result) else: module.exit_json(**result) def update_sqs_queue(queue, check_mode=False, default_visibility_timeout=None, message_retention_period=None, maximum_message_size=None, delivery_delay=None, receive_message_wait_time=None, policy=None, redrive_policy=None): changed = False changed = set_queue_attribute(queue, 'VisibilityTimeout', default_visibility_timeout, check_mode=check_mode) or changed changed = set_queue_attribute(queue, 'MessageRetentionPeriod', message_retention_period, check_mode=check_mode) or changed changed = set_queue_attribute(queue, 'MaximumMessageSize', maximum_message_size, check_mode=check_mode) or changed changed = set_queue_attribute(queue, 'DelaySeconds', delivery_delay, check_mode=check_mode) or changed changed = set_queue_attribute(queue, 'ReceiveMessageWaitTimeSeconds', receive_message_wait_time, check_mode=check_mode) or changed changed = set_queue_attribute(queue, 'Policy', policy, check_mode=check_mode) or changed changed = set_queue_attribute(queue, 'RedrivePolicy', redrive_policy, check_mode=check_mode) or changed return changed def set_queue_attribute(queue, attribute, value, check_mode=False): if not value: return False try: existing_value = queue.get_attributes(attributes=attribute)[attribute] except: existing_value = '' # convert dict attributes to JSON strings (sort keys for comparing) if attribute in ['Policy', 'RedrivePolicy']: value = json.dumps(value, sort_keys=True) if existing_value: existing_value = json.dumps(json.loads(existing_value), sort_keys=True) if str(value) != existing_value: if not check_mode: queue.set_attribute(attribute, value) return True return False def delete_sqs_queue(connection, module): queue_name = module.params.get('name') result = dict( region=module.params.get('region'), name=queue_name, ) try: queue = connection.get_queue(queue_name) if queue: if not module.check_mode: connection.delete_queue(queue) result['changed'] = True else: result['changed'] = False except BotoServerError: result['msg'] = 'Failed to delete sqs queue due to error: ' + traceback.format_exc() module.fail_json(**result) else: module.exit_json(**result) def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state=dict(default='present', choices=['present', 'absent']), name=dict(required=True, type='str'), default_visibility_timeout=dict(type='int'), message_retention_period=dict(type='int'), maximum_message_size=dict(type='int'), delivery_delay=dict(type='int'), receive_message_wait_time=dict(type='int'), policy=dict(type='dict', required=False), redrive_policy=dict(type='dict', required=False), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True) if not HAS_BOTO: module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) if not region: module.fail_json(msg='region must be specified') try: connection = connect_to_aws(boto.sqs, region, **aws_connect_params) except (NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) state = module.params.get('state') if state == 'present': create_or_update_sqs_queue(connection, module) elif state == 'absent': delete_sqs_queue(connection, module) if __name__ == '__main__': main()
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib import linalg as linalg_lib from tensorflow.contrib.linalg.python.ops import linear_operator_addition from tensorflow.python.framework import random_seed from tensorflow.python.ops import linalg_ops from tensorflow.python.platform import test linalg = linalg_lib random_seed.set_random_seed(23) rng = np.random.RandomState(0) add_operators = linear_operator_addition.add_operators # pylint: disable=unused-argument class _BadAdder(linear_operator_addition._Adder): """Adder that will fail if used.""" def can_add(self, op1, op2): raise AssertionError("BadAdder.can_add called!") def _add(self, op1, op2, operator_name, hints): raise AssertionError("This line should not be reached") # pylint: enable=unused-argument class LinearOperatorAdditionCorrectnessTest(test.TestCase): """Tests correctness of addition with combinations of a few Adders. Tests here are done with the _DEFAULT_ADDITION_TIERS, which means add_operators should reduce all operators resulting in one single operator. This shows that we are able to correctly combine adders using the tiered system. All Adders should be tested separately, and there is no need to test every Adder within this class. """ def test_one_operator_is_returned_unchanged(self): op_a = linalg.LinearOperatorDiag([1., 1.]) op_sum = add_operators([op_a]) self.assertEqual(1, len(op_sum)) self.assertTrue(op_sum[0] is op_a) def test_at_least_one_operators_required(self): with self.assertRaisesRegexp(ValueError, "must contain at least one"): add_operators([]) def test_attempting_to_add_numbers_raises(self): with self.assertRaisesRegexp(TypeError, "contain only LinearOperator"): add_operators([1, 2]) def test_two_diag_operators(self): op_a = linalg.LinearOperatorDiag( [1., 1.], is_positive_definite=True, name="A") op_b = linalg.LinearOperatorDiag( [2., 2.], is_positive_definite=True, name="B") with self.test_session(): op_sum = add_operators([op_a, op_b]) self.assertEqual(1, len(op_sum)) op = op_sum[0] self.assertTrue(isinstance(op, linalg_lib.LinearOperatorDiag)) self.assertAllClose([[3., 0.], [0., 3.]], op.to_dense().eval()) # Adding positive definite operators produces positive def. self.assertTrue(op.is_positive_definite) # Real diagonal ==> self-adjoint. self.assertTrue(op.is_self_adjoint) # Positive definite ==> non-singular self.assertTrue(op.is_non_singular) # Enforce particular name for this simple case self.assertEqual("Add/B__A/", op.name) def test_three_diag_operators(self): op1 = linalg.LinearOperatorDiag( [1., 1.], is_positive_definite=True, name="op1") op2 = linalg.LinearOperatorDiag( [2., 2.], is_positive_definite=True, name="op2") op3 = linalg.LinearOperatorDiag( [3., 3.], is_positive_definite=True, name="op3") with self.test_session(): op_sum = add_operators([op1, op2, op3]) self.assertEqual(1, len(op_sum)) op = op_sum[0] self.assertTrue(isinstance(op, linalg_lib.LinearOperatorDiag)) self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense().eval()) # Adding positive definite operators produces positive def. self.assertTrue(op.is_positive_definite) # Real diagonal ==> self-adjoint. self.assertTrue(op.is_self_adjoint) # Positive definite ==> non-singular self.assertTrue(op.is_non_singular) def test_diag_tril_diag(self): op1 = linalg.LinearOperatorDiag( [1., 1.], is_non_singular=True, name="diag_a") op2 = linalg.LinearOperatorTriL( [[2., 0.], [0., 2.]], is_self_adjoint=True, is_non_singular=True, name="tril") op3 = linalg.LinearOperatorDiag( [3., 3.], is_non_singular=True, name="diag_b") with self.test_session(): op_sum = add_operators([op1, op2, op3]) self.assertEqual(1, len(op_sum)) op = op_sum[0] self.assertTrue(isinstance(op, linalg_lib.LinearOperatorTriL)) self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense().eval()) # The diag operators will be self-adjoint (because real and diagonal). # The TriL operator has the self-adjoint hint set. self.assertTrue(op.is_self_adjoint) # Even though op1/2/3 are non-singular, this does not imply op is. # Since no custom hint was provided, we default to None (unknown). self.assertEqual(None, op.is_non_singular) def test_matrix_diag_tril_diag_uses_custom_name(self): op0 = linalg.LinearOperatorMatrix([[-1., -1.], [-1., -1.]], name="matrix") op1 = linalg.LinearOperatorDiag([1., 1.], name="diag_a") op2 = linalg.LinearOperatorTriL([[2., 0.], [1.5, 2.]], name="tril") op3 = linalg.LinearOperatorDiag([3., 3.], name="diag_b") with self.test_session(): op_sum = add_operators([op0, op1, op2, op3], operator_name="my_operator") self.assertEqual(1, len(op_sum)) op = op_sum[0] self.assertTrue(isinstance(op, linalg_lib.LinearOperatorMatrix)) self.assertAllClose([[5., -1.], [0.5, 5.]], op.to_dense().eval()) self.assertEqual("my_operator", op.name) def test_incompatible_domain_dimensions_raises(self): op1 = linalg.LinearOperatorMatrix(rng.rand(2, 3)) op2 = linalg.LinearOperatorDiag(rng.rand(2, 4)) with self.assertRaisesRegexp(ValueError, "must.*same domain dimension"): add_operators([op1, op2]) def test_incompatible_range_dimensions_raises(self): op1 = linalg.LinearOperatorMatrix(rng.rand(2, 3)) op2 = linalg.LinearOperatorDiag(rng.rand(3, 3)) with self.assertRaisesRegexp(ValueError, "must.*same range dimension"): add_operators([op1, op2]) def test_non_broadcastable_batch_shape_raises(self): op1 = linalg.LinearOperatorMatrix(rng.rand(2, 3, 3)) op2 = linalg.LinearOperatorDiag(rng.rand(4, 3, 3)) with self.assertRaisesRegexp(ValueError, "Incompatible shapes"): add_operators([op1, op2]) class LinearOperatorOrderOfAdditionTest(test.TestCase): """Test that the order of addition is done as specified by tiers.""" def test_tier_0_additions_done_in_tier_0(self): diag1 = linalg.LinearOperatorDiag([1.]) diag2 = linalg.LinearOperatorDiag([1.]) diag3 = linalg.LinearOperatorDiag([1.]) addition_tiers = [ [linear_operator_addition._AddAndReturnDiag()], [_BadAdder()], ] # Should not raise since all were added in tier 0, and tier 1 (with the # _BadAdder) was never reached. op_sum = add_operators([diag1, diag2, diag3], addition_tiers=addition_tiers) self.assertEqual(1, len(op_sum)) self.assertTrue(isinstance(op_sum[0], linalg.LinearOperatorDiag)) def test_tier_1_additions_done_by_tier_1(self): diag1 = linalg.LinearOperatorDiag([1.]) diag2 = linalg.LinearOperatorDiag([1.]) tril = linalg.LinearOperatorTriL([[1.]]) addition_tiers = [ [linear_operator_addition._AddAndReturnDiag()], [linear_operator_addition._AddAndReturnTriL()], [_BadAdder()], ] # Should not raise since all were added by tier 1, and the # _BadAdder) was never reached. op_sum = add_operators([diag1, diag2, tril], addition_tiers=addition_tiers) self.assertEqual(1, len(op_sum)) self.assertTrue(isinstance(op_sum[0], linalg.LinearOperatorTriL)) def test_tier_1_additions_done_by_tier_1_with_order_flipped(self): diag1 = linalg.LinearOperatorDiag([1.]) diag2 = linalg.LinearOperatorDiag([1.]) tril = linalg.LinearOperatorTriL([[1.]]) addition_tiers = [ [linear_operator_addition._AddAndReturnTriL()], [linear_operator_addition._AddAndReturnDiag()], [_BadAdder()], ] # Tier 0 could convert to TriL, and this converted everything to TriL, # including the Diags. # Tier 1 was never used. # Tier 2 was never used (therefore, _BadAdder didn't raise). op_sum = add_operators([diag1, diag2, tril], addition_tiers=addition_tiers) self.assertEqual(1, len(op_sum)) self.assertTrue(isinstance(op_sum[0], linalg.LinearOperatorTriL)) def test_cannot_add_everything_so_return_more_than_one_operator(self): diag1 = linalg.LinearOperatorDiag([1.]) diag2 = linalg.LinearOperatorDiag([2.]) tril5 = linalg.LinearOperatorTriL([[5.]]) addition_tiers = [ [linear_operator_addition._AddAndReturnDiag()], ] # Tier 0 (the only tier) can only convert to Diag, so it combines the two # diags, but the TriL is unchanged. # Result should contain two operators, one Diag, one TriL. op_sum = add_operators([diag1, diag2, tril5], addition_tiers=addition_tiers) self.assertEqual(2, len(op_sum)) found_diag = False found_tril = False with self.test_session(): for op in op_sum: if isinstance(op, linalg.LinearOperatorDiag): found_diag = True self.assertAllClose([[3.]], op.to_dense().eval()) if isinstance(op, linalg.LinearOperatorTriL): found_tril = True self.assertAllClose([[5.]], op.to_dense().eval()) self.assertTrue(found_diag and found_tril) def test_intermediate_tier_is_not_skipped(self): diag1 = linalg.LinearOperatorDiag([1.]) diag2 = linalg.LinearOperatorDiag([1.]) tril = linalg.LinearOperatorTriL([[1.]]) addition_tiers = [ [linear_operator_addition._AddAndReturnDiag()], [_BadAdder()], [linear_operator_addition._AddAndReturnTriL()], ] # tril cannot be added in tier 0, and the intermediate tier 1 with the # BadAdder will catch it and raise. with self.assertRaisesRegexp(AssertionError, "BadAdder.can_add called"): add_operators([diag1, diag2, tril], addition_tiers=addition_tiers) class AddAndReturnScaledIdentityTest(test.TestCase): def setUp(self): self._adder = linear_operator_addition._AddAndReturnScaledIdentity() def test_identity_plus_identity(self): id1 = linalg.LinearOperatorIdentity(num_rows=2) id2 = linalg.LinearOperatorIdentity(num_rows=2, batch_shape=[3]) hints = linear_operator_addition._Hints( is_positive_definite=True, is_non_singular=True) self.assertTrue(self._adder.can_add(id1, id2)) operator = self._adder.add(id1, id2, "my_operator", hints) self.assertTrue(isinstance(operator, linalg.LinearOperatorScaledIdentity)) with self.test_session(): self.assertAllClose(2 * linalg_ops.eye(num_rows=2, batch_shape=[3]).eval(), operator.to_dense().eval()) self.assertTrue(operator.is_positive_definite) self.assertTrue(operator.is_non_singular) self.assertEqual("my_operator", operator.name) def test_identity_plus_scaled_identity(self): id1 = linalg.LinearOperatorIdentity(num_rows=2, batch_shape=[3]) id2 = linalg.LinearOperatorScaledIdentity(num_rows=2, multiplier=2.2) hints = linear_operator_addition._Hints( is_positive_definite=True, is_non_singular=True) self.assertTrue(self._adder.can_add(id1, id2)) operator = self._adder.add(id1, id2, "my_operator", hints) self.assertTrue(isinstance(operator, linalg.LinearOperatorScaledIdentity)) with self.test_session(): self.assertAllClose(3.2 * linalg_ops.eye(num_rows=2, batch_shape=[3]).eval(), operator.to_dense().eval()) self.assertTrue(operator.is_positive_definite) self.assertTrue(operator.is_non_singular) self.assertEqual("my_operator", operator.name) def test_scaled_identity_plus_scaled_identity(self): id1 = linalg.LinearOperatorScaledIdentity( num_rows=2, multiplier=[2.2, 2.2, 2.2]) id2 = linalg.LinearOperatorScaledIdentity(num_rows=2, multiplier=-1.0) hints = linear_operator_addition._Hints( is_positive_definite=True, is_non_singular=True) self.assertTrue(self._adder.can_add(id1, id2)) operator = self._adder.add(id1, id2, "my_operator", hints) self.assertTrue(isinstance(operator, linalg.LinearOperatorScaledIdentity)) with self.test_session(): self.assertAllClose(1.2 * linalg_ops.eye(num_rows=2, batch_shape=[3]).eval(), operator.to_dense().eval()) self.assertTrue(operator.is_positive_definite) self.assertTrue(operator.is_non_singular) self.assertEqual("my_operator", operator.name) class AddAndReturnDiagTest(test.TestCase): def setUp(self): self._adder = linear_operator_addition._AddAndReturnDiag() def test_identity_plus_identity_returns_diag(self): id1 = linalg.LinearOperatorIdentity(num_rows=2) id2 = linalg.LinearOperatorIdentity(num_rows=2, batch_shape=[3]) hints = linear_operator_addition._Hints( is_positive_definite=True, is_non_singular=True) self.assertTrue(self._adder.can_add(id1, id2)) operator = self._adder.add(id1, id2, "my_operator", hints) self.assertTrue(isinstance(operator, linalg.LinearOperatorDiag)) with self.test_session(): self.assertAllClose(2 * linalg_ops.eye(num_rows=2, batch_shape=[3]).eval(), operator.to_dense().eval()) self.assertTrue(operator.is_positive_definite) self.assertTrue(operator.is_non_singular) self.assertEqual("my_operator", operator.name) def test_diag_plus_diag(self): diag1 = rng.rand(2, 3, 4) diag2 = rng.rand(4) op1 = linalg.LinearOperatorDiag(diag1) op2 = linalg.LinearOperatorDiag(diag2) hints = linear_operator_addition._Hints( is_positive_definite=True, is_non_singular=True) self.assertTrue(self._adder.can_add(op1, op2)) operator = self._adder.add(op1, op2, "my_operator", hints) self.assertTrue(isinstance(operator, linalg.LinearOperatorDiag)) with self.test_session(): self.assertAllClose( linalg.LinearOperatorDiag(diag1 + diag2).to_dense().eval(), operator.to_dense().eval()) self.assertTrue(operator.is_positive_definite) self.assertTrue(operator.is_non_singular) self.assertEqual("my_operator", operator.name) class AddAndReturnTriLTest(test.TestCase): def setUp(self): self._adder = linear_operator_addition._AddAndReturnTriL() def test_diag_plus_tril(self): diag = linalg.LinearOperatorDiag([1., 2.]) tril = linalg.LinearOperatorTriL([[10., 0.], [30., 0.]]) hints = linear_operator_addition._Hints( is_positive_definite=True, is_non_singular=True) self.assertTrue(self._adder.can_add(diag, diag)) self.assertTrue(self._adder.can_add(diag, tril)) operator = self._adder.add(diag, tril, "my_operator", hints) self.assertTrue(isinstance(operator, linalg.LinearOperatorTriL)) with self.test_session(): self.assertAllClose([[11., 0.], [30., 2.]], operator.to_dense().eval()) self.assertTrue(operator.is_positive_definite) self.assertTrue(operator.is_non_singular) self.assertEqual("my_operator", operator.name) class AddAndReturnMatrixTest(test.TestCase): def setUp(self): self._adder = linear_operator_addition._AddAndReturnMatrix() def test_diag_plus_diag(self): diag1 = linalg.LinearOperatorDiag([1., 2.]) diag2 = linalg.LinearOperatorDiag([-1., 3.]) hints = linear_operator_addition._Hints( is_positive_definite=False, is_non_singular=False) self.assertTrue(self._adder.can_add(diag1, diag2)) operator = self._adder.add(diag1, diag2, "my_operator", hints) self.assertTrue(isinstance(operator, linalg.LinearOperatorMatrix)) with self.test_session(): self.assertAllClose([[0., 0.], [0., 5.]], operator.to_dense().eval()) self.assertFalse(operator.is_positive_definite) self.assertFalse(operator.is_non_singular) self.assertEqual("my_operator", operator.name) if __name__ == "__main__": test.main()
import warnings warnings.filterwarnings('ignore') import tensorflow as tf import numpy as np import pickle import time import sys import os sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../') from dnc_v3 import DNC from recurrent_controller import StatelessRecurrentController from torchtext.datasets import SST import torchtext.data as data def set_acc(target_batch, predict_batch): s = [] for b in range(target_batch.shape[0]): trim_target = [] trim_predict = [] for t in target_batch[b]: if t >-1: trim_target.append(t) for t in predict_batch[b]: if t >-1: trim_predict.append(t) if np.random.rand() > 1: print('{} vs {}'.format(trim_target, trim_predict)) acc = len(set(trim_target).intersection(set(trim_predict))) / len(set(trim_target)) s.append(acc) return np.mean(s) # acc def llprint(message): sys.stdout.write(message) sys.stdout.flush() def load(path): return pickle.load(open(path, 'rb')) def onehot(index, size): # print('-----') # print(index) vec = np.zeros(size, dtype=np.float32) vec[int(index)] = 1.0 return vec def prepare_sample_batch(args, raw_inp, raw_outp, word_space_size_input, word_space_size_output): seq_len = raw_inp.shape[1] holdstep = int(seq_len // args.mem_size) holdstep = min(holdstep, args.max_remmember) hold_mem = np.ones(seq_len, dtype=bool) # print("\n") if args.mode == 'train': if args.memo_type == "poisson": holdstep = np.random.poisson(lam=holdstep, size=1)[0] if holdstep > 0: for iii in range(holdstep, int(seq_len), holdstep): hold_mem[iii] = False else: hold_mem[(seq_len) // 2] = False # print(seq_len) # print(hold_mem) # print(holdstep) input_vecs=[] output_vecs=[] num=0 is_break=False brin=[] brout=[] while not is_break and num<args.batch_size: for input_vec, output_vec in zip(raw_inp, raw_outp): input_vec = input_vec.numpy() brin.append(input_vec) output_vec = [output_vec.numpy()] brout.append(output_vec) # print(input_vec) # print(output_vec) # print('====') # raise False input_vec = [onehot(code, word_space_size_input) for code in input_vec] output_vec = [onehot(code, word_space_size_output) for code in output_vec] input_vecs.append(input_vec) output_vecs.append(output_vec) num+=1 if num==args.batch_size: is_break=True break # raise False return np.asarray(input_vecs), np.asarray(output_vecs), seq_len, 1, brout, brin, hold_mem def load_data(args, text_field, label_field, **kwargs): train_data, test_data, _ = SST.splits(text_field, label_field, filter_pred=lambda ex: ex.label != 'neutral') print("num train {}".format(len(train_data))) print("num test {}".format(len(test_data))) text_field.build_vocab(train_data), label_field.build_vocab(train_data, test_data) train_iter, test_iter = data.BucketIterator.splits( (train_data, test_data), batch_sizes=(args.batch_size, args.batch_size), shuffle=True, **kwargs ) return train_iter, test_iter, len(train_data), len(test_data) def sst_task(args): dirname = os.path.dirname(os.path.abspath(__file__))+'/data/save/' print(dirname) ckpts_dir = os.path.join(dirname , 'checkpoints_{}_{}'.format(args.task,args.use_pretrain_emb)) llprint("Loading Data ... ") llprint("Done!\n") print("\nLoading data...") text_field = data.Field(batch_first=True, lower=True, tokenize='spacy') label_field = data.Field(sequential=False, unk_token=None) train_iter, test_iter, num_train, num_test = load_data(args, text_field, label_field, device=-1, repeat=False) args.vocab_size = len(text_field.vocab) args.n_class = len(label_field.vocab) str2tok = {} for i in range(len(text_field.vocab)): str2tok[text_field.vocab.itos[i]]=i print(args.vocab_size) print(args.n_class) args.word_dict = text_field.vocab pstr2tok = {"POS":1,"NEG":0} print('dim in {}'.format(len(str2tok))) print('dim out {}'.format(len(pstr2tok))) batch_size = args.batch_size input_size = len(str2tok) output_size = len(pstr2tok) words_count = args.mem_size word_size = args.word_size read_heads = args.read_heads momentum = 0.9 iterations = args.iterations start_step = 0 config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = args.gpu_ratio graph = tf.Graph() with graph.as_default(): with tf.Session(graph=graph, config=config) as session: llprint("Building Computational Graph ... ") ncomputer = DNC( StatelessRecurrentController, input_size, output_size, output_size, words_count, word_size, read_heads, batch_size, use_mem=args.use_mem, controller_cell_type=args.cell_type, use_emb_encoder=True, use_emb_decoder=False, dual_controller=True, emb_size=args.emb_dim, decoder_mode=True, train_emb=args.train_emb, memory_read_heads_decode=args.read_heads_decode, hidden_controller_dim=args.hidden_dim, attend_dim=args.attend, cache_attend_dim=args.cache_attend_dim, hold_mem_mode=args.hold_mem_mode, enable_drop_out=args.drop_out_keep > 0, batch_norm=args.batch_norm, nlayer=args.nlayer, name='dnc3'+str(args.hold_mem_mode)+args.memo_type, parallel_rnn=10, ) # optimizer = tf.train.RMSPropOptimizer(args.learning_rate, momentum=momentum) optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate) output, prob, loss, apply_gradients = ncomputer.build_loss_function_multi_label(optimizer, prefer_one_class=True) llprint("Done!\n") llprint("Initializing Variables ... ") session.run(tf.global_variables_initializer()) llprint("Done!\n") if args.from_checkpoint is not '': if args.from_checkpoint=='default': from_checkpoint = ncomputer.print_config() else: from_checkpoint = args.from_checkpoint llprint("Restoring Checkpoint %s ... " % from_checkpoint) ncomputer.restore(session, ckpts_dir, from_checkpoint) llprint("Done!\n") elif "glove" in args.use_pretrain_emb: from optimized_writting_task import imdb_prepare if args.use_pretrain_emb=="glove": dn='./data/glove.6B' else: dn = "./data/glove.42B" ncomputer.assign_pretrain_emb_encoder(session, imdb_prepare.loadGloVe(dirname=dn, emb_dim=args.emb_dim, str2tok_dir=str2tok)) elif args.use_pretrain_emb == 'word2vec': from optimized_writting_task import imdb_prepare mat = imdb_prepare.load_word2vec(emb_dim=args.emb_dim, dirname='./data/word2vec.bin', str2tok_dir=str2tok) ncomputer.assign_pretrain_emb_encoder(session, mat) last_100_losses = [] start = 0 if start_step == 0 else start_step + 1 end = start_step + iterations + 1 sc=0 if args.mode == 'test': sc=-1 end = start start_time_100 = time.time() avg_100_time = 0. avg_counter = 0 if args.mode=='train': log_dir = './data/summary/log_{}_{}/'.format(args.task, args.use_pretrain_emb) if not os.path.isdir(log_dir): os.mkdir(log_dir) log_dir = '{}/{}/'.format(log_dir,ncomputer.print_config()) if not os.path.isdir(log_dir): os.mkdir(log_dir) train_writer = tf.summary.FileWriter(log_dir, session.graph) min_tloss=0 valid_time=num_train//args.batch_size for i in range(start, end + 1): if i > 0: valid_time = args.valid_time try: print("\rEpoch %d/%d" % (i, end)) ii=0 for batch in train_iter: llprint("\rbatch %d/%d" % (ii, num_train//args.batch_size)) summerize = ii > sc and (ii % valid_time == 0) ii+=1 if args.mode == 'train': sent, label = batch.text, batch.label input_data, target_output, seq_len, decoder_length, brout, _, hold_mem = \ prepare_sample_batch(args, sent, label, input_size, output_size) fd={ ncomputer.input_encoder: input_data, ncomputer.input_decoder: target_output, ncomputer.target_output: target_output, ncomputer.sequence_length: seq_len, ncomputer.decode_length: decoder_length, ncomputer.drop_out_keep: args.drop_out_keep, } if args.hold_mem_mode > 0: fd[ncomputer.hold_mem] = hold_mem loss_value,out, _= session.run([ loss, prob, apply_gradients ], feed_dict=fd) last_100_losses.append(loss_value) tpre=0 if summerize: tescores=[] trscores = [] llprint("\n\t episode %d -->Avg. Cross-Entropy: %.7f\n" % (i, np.mean(last_100_losses))) summary = tf.Summary() summary.value.add(tag='batch_train_loss', simple_value=np.mean(last_100_losses)) out = np.reshape(np.asarray(out), [-1, decoder_length, output_size]) out = np.argmax(out, axis=-1) bout_list = [] for b in range(out.shape[0]): out_list = [out[b][0]] bout_list.append(out_list) trscores.append(set_acc(np.asarray(brout),np.asarray(bout_list))) print('done quick test train...') losses = [] all_out=[] all_label=[] for batch2 in test_iter: sent, label = batch2.text, batch2.label rs=len(sent) input_data, target_output, seq_len, decoder_length, rout_list, rin_list, hold_mem = \ prepare_sample_batch(args, sent, label, input_size, output_size) fd={ ncomputer.input_encoder: input_data, ncomputer.input_decoder: target_output, ncomputer.target_output: target_output, ncomputer.sequence_length: seq_len, ncomputer.decode_length: decoder_length, ncomputer.drop_out_keep: 1 } if args.hold_mem_mode > 0: fd[ncomputer.hold_mem] = hold_mem out, loss_v = session.run([prob, loss], feed_dict=fd) losses.append(loss_v) out = np.reshape(np.asarray(out), [-1, decoder_length, output_size]) out = np.argmax(out, axis=-1) bout_list = [] for b in range(out.shape[0]): out_list = [out[b][0]] bout_list.append(out_list) tescores.append(set_acc(np.asarray(rout_list[:rs]), np.asarray(bout_list[:rs]))) if args.mode=='test': print('some predic') print(len(all_out)) print(len(all_label)) for tt, tv in enumerate(all_out): print(all_label[tt]) print(all_out[tt]) print('---') tloss=np.mean(losses) tpre=np.mean(tescores) print('\ntr score {} vs te store {}'.format(np.mean(trscores),np.mean(tescores))) print('test loss {}'.format(tloss)) if args.mode=='train': summary.value.add(tag='train_acc', simple_value=np.mean(trscores)) summary.value.add(tag='test_acc', simple_value=np.mean(tescores)) summary.value.add(tag='test_loss', simple_value=tloss) train_writer.add_summary(summary, i) train_writer.flush() end_time_100 = time.time() elapsed_time = (end_time_100 - start_time_100) / 60 avg_counter += 1 avg_100_time += (1. / avg_counter) * (elapsed_time - avg_100_time) estimated_time = (avg_100_time * ((end - i) / 100.)) / 60. print ("\tAvg. 100 iterations time: %.2f minutes" % (avg_100_time)) print ("\tApprox. time to completion: %.2f hours" % (estimated_time)) start_time_100 = time.time() last_100_losses = [] if summerize: if args.mode=='train' and tpre>min_tloss: min_tloss=tpre llprint("\nSaving Checkpoint ... "), ncomputer.save(session, ckpts_dir, ncomputer.print_config()) llprint("Done!\n") else: print("valid time now {}".format(args.valid_time)) llprint("\nnot save {} vs {}\n".format(tpre, min_tloss)) except KeyboardInterrupt: sys.exit(0) def str2bool(v): if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--mode', default="train") parser.add_argument('--use_mem', default=True, type=str2bool) parser.add_argument('--task', default="sst") parser.add_argument('--from_checkpoint', default="") parser.add_argument('--prob_thresehold', default=0.5, type=float) parser.add_argument('--cell_type', default="lstm") parser.add_argument('--hidden_dim', default=128, type=int) parser.add_argument('--emb_dim', default=50, type=int) parser.add_argument('--attend', default=0, type=int) parser.add_argument('--cache_attend_dim', default=0, type=int) parser.add_argument('--mem_size', default=4, type=int) parser.add_argument('--word_size', default=32, type=int) parser.add_argument('--batch_size', default=32, type=int) parser.add_argument('--read_heads', default=1, type=int) parser.add_argument('--read_heads_decode', default=1, type=int) parser.add_argument('--beam_size', default=0, type=int) parser.add_argument('--top_word', default=10000, type=int) parser.add_argument('--min_count', default=1, type=int) parser.add_argument('--max_len', default=150, type=int) parser.add_argument('--memo_type', default="", type=str) parser.add_argument('--hold_mem_mode', default=0, type=int) parser.add_argument('--max_remmember', default=10, type=int) parser.add_argument('--nlayer', default=1, type=int) parser.add_argument('--drop_out_keep', default=-1, type=float) parser.add_argument('--batch_norm', default=False, type=str2bool) parser.add_argument('--train_emb', default=True, type=str2bool) parser.add_argument('--learning_rate', default=0.0001, type=float) parser.add_argument('--lr_decay_step', default=10000, type=float) parser.add_argument('--lr_decay_rate', default=0.9, type=float) parser.add_argument('--iterations', default=1000000, type=int) parser.add_argument('--valid_time', default=200, type=int) parser.add_argument('--valid_size', default=100, type=int) parser.add_argument('--gpu_ratio', default=0.4, type=float) parser.add_argument('--cpu_num', default=10, type=int) parser.add_argument('--gpu_device', default="1,2,3", type=str) parser.add_argument('--use_pretrain_emb', default="", type=str) args = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_device # args.hold_mem_mode = 2 # args.cache_attend_dim = 8 args.use_mem=False args.attend=8 # args.batch_size=1 args.cell_type="gru" args.drop_out_keep=0.5 sst_task(args)
#!/usr/bin/env python3 """Tests for the ClientSocket class. Login with user "edemo" password "demouser" for a demo account that can be used for basic testing. """ import asyncio import unittest import numpy as np import ibapipy.data.contract as dc import ibapipy.data.order as do from asyncio import Future from ibapipy.client_socket import ClientSocket # General test contract TEST_CONTRACT = dc.Contract('cash', 'eur', 'usd', 'idealpro') # Test date for historical data (we randomize to prevent IB pacing violations) TEST_END_DATE_TIME = '20151125 0{0}:00:00 UTC'.format(np.random.randint(0, 10)) class ClientSocketTests(unittest.TestCase): """Test cases for the ClientSocket class.""" def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) def tearDown(self): self.loop.close() def test_constructor(self): client = ClientSocket() self.assertFalse(client.is_connected) def test_connect(self): fut = Future() class MockClientSocket(ClientSocket): def __init__(self, loop): ClientSocket.__init__(self, loop) @asyncio.coroutine def next_valid_id(self, req_id): fut.set_result(req_id) self.loop.stop() client = MockClientSocket(self.loop) self.assertFalse(client.is_connected) asyncio.async(client.connect()) self.loop.run_forever() self.assertTrue(client.is_connected) self.assertTrue(fut.result() > 0) self.loop.run_until_complete(client.disconnect()) self.assertFalse(client.is_connected) def test_disconnect(self): client = ClientSocket(self.loop) self.assertFalse(client.is_connected) self.loop.run_until_complete(client.connect()) self.assertTrue(client.is_connected) self.loop.run_until_complete(client.disconnect()) self.assertFalse(client.is_connected) def test_calculate_implied_volatility(self): client = ClientSocket(self.loop) task = asyncio.async(client.calculate_implied_volatility(None, None, None, None)) self.assertRaises(NotImplementedError, self.loop.run_until_complete, task) def test_calculate_option_price(self): client = ClientSocket(self.loop) task = asyncio.async(client.calculate_option_price(None, None, None, None)) self.assertRaises(NotImplementedError, self.loop.run_until_complete, task) def test_cancel_calculate_implied_volatility(self): client = ClientSocket(self.loop) task = asyncio.async(client.cancel_calculate_implied_volatility(None)) self.assertRaises(NotImplementedError, self.loop.run_until_complete, task) def test_cancel_calculate_option_price(self): client = ClientSocket(self.loop) task = asyncio.async(client.cancel_calculate_option_price(None)) self.assertRaises(NotImplementedError, self.loop.run_until_complete, task) def test_cancel_fundamental_data(self): client = ClientSocket(self.loop) task = asyncio.async(client.cancel_fundamental_data(None)) self.assertRaises(NotImplementedError, self.loop.run_until_complete, task) def test_cancel_historical_data(self): # TODO pass def test_cancel_mkt_data(self): # TODO pass def test_cancel_mkt_depth(self): client = ClientSocket(self.loop) task = asyncio.async(client.cancel_mkt_depth(None)) self.assertRaises(NotImplementedError, self.loop.run_until_complete, task) def test_cancel_news_bulletins(self): client = ClientSocket(self.loop) task = asyncio.async(client.cancel_news_bulletins()) self.assertRaises(NotImplementedError, self.loop.run_until_complete, task) def test_cancel_order(self): # TODO pass def test_cancel_real_time_bars(self): client = ClientSocket(self.loop) task = asyncio.async(client.cancel_real_time_bars(None)) self.assertRaises(NotImplementedError, self.loop.run_until_complete, task) def test_cancel_scanner_subscription(self): client = ClientSocket(self.loop) task = asyncio.async(client.cancel_scanner_subscription(None)) self.assertRaises(NotImplementedError, self.loop.run_until_complete, task) def test_exercise_options(self): client = ClientSocket(self.loop) task = asyncio.async(client.exercise_options(None, None, None, None, None, None)) self.assertRaises(NotImplementedError, self.loop.run_until_complete, task) def test_place_order(self): fut = Future() class MockClientSocket(ClientSocket): def __init__(self, loop): ClientSocket.__init__(self, loop) self.next_id = -1 self.count = 0 @asyncio.coroutine def next_valid_id(self, req_id): self.next_id = req_id self.loop.stop() @asyncio.coroutine def order_status(self, req_id, status, filled, remaining, avg_fill_price, perm_id, parent_id, last_fill_price, client_id, why_held): if self.next_id == req_id: fut.set_result(True) self.loop.stop() in_order = do.Order('buy', 100000, 'mkt') out_order = do.Order('sell', 100000, 'mkt') client = MockClientSocket(self.loop) asyncio.async(client.connect()) self.loop.run_forever() asyncio.async(client.place_order(client.next_id, TEST_CONTRACT, in_order)) asyncio.async(client.place_order(client.next_id + 1, TEST_CONTRACT, out_order)) self.loop.run_forever() self.loop.run_until_complete(client.disconnect()) self.assertTrue(fut.result()) def test_replace_fa(self): client = ClientSocket(self.loop) task = asyncio.async(client.replace_fa(None, None)) self.assertRaises(NotImplementedError, self.loop.run_until_complete, task) def test_req_account_updates(self): fut_end = Future() fut_accts = Future() fut_time = Future() fut_values = Future() class MockClientSocket(ClientSocket): def __init__(self, loop): ClientSocket.__init__(self, loop) self.values = [] @asyncio.coroutine def account_download_end(self, account_name): fut_end.set_result(account_name) self.loop.stop() @asyncio.coroutine def managed_accounts(self, accounts): fut_accts.set_result(accounts) @asyncio.coroutine def update_account_time(self, timestamp): fut_time.set_result(timestamp) @asyncio.coroutine def update_account_value(self, key, value, currency, account_name): self.values.append((key, value, currency, account_name)) fut_values.set_result(self.values) client = MockClientSocket(self.loop) asyncio.async(client.connect()) asyncio.async(client.req_managed_accts()) self.loop.run_until_complete(fut_accts) account = fut_accts.result() asyncio.async(client.req_account_updates(True, account)) t = asyncio.async(asyncio.wait([fut_end], timeout=10)) self.loop.run_until_complete(t) self.loop.run_until_complete(client.disconnect()) if fut_end.done(): self.assertIsNotNone(fut_end.result()) else: print('IB is slow, account_download_end timed out.') if fut_time.done(): self.assertIsNotNone(fut_time.result()) else: print('IB is slow, update_account_time timed out.') if fut_values.done(): self.assertTrue(len(fut_values.result()) > 0) else: print('IB is slow, update_account_value timed out.') def test_req_historical_data(self): fut = Future() class MockClientSocket(ClientSocket): def __init__(self, loop): ClientSocket.__init__(self, loop) self.next_id = -1 self.count = 0 @asyncio.coroutine def next_valid_id(self, req_id): self.next_id = req_id self.loop.stop() @asyncio.coroutine def historical_data(self, req_id, date, open, high, low, close, volume, bar_count, wap, has_gaps): if date.startswith('finished'): fut.set_result(self.count) self.loop.stop() else: self.count += 1 client = MockClientSocket(self.loop) asyncio.async(client.connect()) self.loop.run_forever() asyncio.async(client.req_historical_data(client.next_id, TEST_CONTRACT, TEST_END_DATE_TIME, '60 S', '1 secs', 'BID_ASK', False, 2)) self.loop.run_forever() self.loop.run_until_complete(client.disconnect()) self.assertEqual(60, fut.result()) def test_req_managed_accts(self): fut = Future() class MockClientSocket(ClientSocket): def __init__(self, loop): ClientSocket.__init__(self, loop) @asyncio.coroutine def managed_accounts(self, accounts): fut.set_result(accounts) self.loop.stop() client = MockClientSocket(self.loop) asyncio.async(client.connect()) asyncio.async(client.req_managed_accts()) asyncio.async(client.disconnect()) self.loop.run_forever() self.assertIsNotNone(fut.result()) def test_req_mkt_data(self): fut = Future() class MockClientSocket(ClientSocket): def __init__(self, loop): ClientSocket.__init__(self, loop) self.next_id = -1 @asyncio.coroutine def next_valid_id(self, req_id): self.bid_price = 0 self.ask_price = 0 self.bid_size = 0 self.ask_size = 0 self.next_id = req_id self.loop.stop() @asyncio.coroutine def tick_price(self, req_id, tick_type, price, can_auto_execute): if tick_type == 1: self.bid_price = price elif tick_type == 2: self.ask_price = price if self.bid_size > 0 and self.ask_size > 0 and \ self.bid_price > 0 and self.ask_price > 0: self.loop.stop() @asyncio.coroutine def tick_size(self, req_id, tick_type, size): if tick_type == 0: self.bid_size = size elif tick_type == 3: self.ask_size = size if self.bid_size > 0 and self.ask_size > 0 and \ self.bid_price > 0 and self.ask_price > 0: self.loop.stop() client = MockClientSocket(self.loop) asyncio.async(client.connect()) self.loop.run_forever() asyncio.async(client.req_mkt_data(client.next_id, TEST_CONTRACT)) self.loop.run_forever() self.loop.run_until_complete(client.disconnect()) def test_req_mkt_depth(self): class MockClientSocket(ClientSocket): def __init__(self, loop): ClientSocket.__init__(self, loop) self.contract = dc.Contract('stk', 'spy', 'usd', 'smart') self.counter = 0 self.future = None self.next_id = -1 @asyncio.coroutine def contract_details(self, req_id, contract): # Can't use SMART for market depth contract.exchange = contract.primary_exch self.contract = contract self.future.set_result(True) @asyncio.coroutine def next_valid_id(self, req_id): self.next_id = req_id self.future.set_result(True) @asyncio.coroutine def update_mkt_depth(self, req_id, position, operation, side, price, size): self.counter += 1 if self.counter > 99: self.future.set_result(self.counter) @asyncio.coroutine def update_mkt_depth_L2(self, req_id, position, market_maker, operation, side, price, size): print('update_mkt_depth_L2', req_id, position, market_maker, operation, side, price, size) client = MockClientSocket(self.loop) client.future = Future() asyncio.async(client.connect()) self.loop.run_until_complete(client.future) client.future = Future() asyncio.async(client.req_contract_details(client.next_id, client.contract)) self.loop.run_until_complete(client.future) client.future = Future() asyncio.async(client.req_mkt_depth(client.next_id, client.contract, num_rows=10)) self.loop.run_until_complete(client.future) self.assertEqual(100, client.future.result()) self.loop.run_until_complete(client.disconnect()) if __name__ == '__main__': # Collection of all test methods all_tests = unittest.TestSuite() all_tests.addTest(ClientSocketTests('test_constructor')) all_tests.addTest(ClientSocketTests('test_connect')) all_tests.addTest(ClientSocketTests('test_disconnect')) all_tests.addTest(ClientSocketTests('test_calculate_implied_volatility')) all_tests.addTest(ClientSocketTests('test_calculate_option_price')) all_tests.addTest(ClientSocketTests('test_cancel_calculate_implied_volatility')) all_tests.addTest(ClientSocketTests('test_cancel_calculate_option_price')) all_tests.addTest(ClientSocketTests('test_cancel_fundamental_data')) all_tests.addTest(ClientSocketTests('test_cancel_historical_data')) all_tests.addTest(ClientSocketTests('test_cancel_mkt_data')) all_tests.addTest(ClientSocketTests('test_cancel_mkt_depth')) all_tests.addTest(ClientSocketTests('test_cancel_news_bulletins')) all_tests.addTest(ClientSocketTests('test_cancel_order')) all_tests.addTest(ClientSocketTests('test_cancel_real_time_bars')) all_tests.addTest(ClientSocketTests('test_cancel_scanner_subscription')) all_tests.addTest(ClientSocketTests('test_exercise_options')) all_tests.addTest(ClientSocketTests('test_place_order')) all_tests.addTest(ClientSocketTests('test_replace_fa')) all_tests.addTest(ClientSocketTests('test_req_account_updates')) all_tests.addTest(ClientSocketTests('test_req_historical_data')) all_tests.addTest(ClientSocketTests('test_req_managed_accts')) all_tests.addTest(ClientSocketTests('test_req_mkt_data')) all_tests.addTest(ClientSocketTests('test_req_mkt_depth')) # Single test to run single_test = unittest.TestSuite() single_test.addTest(ClientSocketTests('test_req_historical_data')) # Run either all_tests or single_test unittest.TextTestRunner(verbosity=2).run(all_tests)
import unittest import numpy as np import copy import builtins import theano import theano.tensor as T import autodiff import autodiff.utils as utils import autodiff.context as c from autodiff.functions import escape context = autodiff.context.Context(force_floatX=False) context_floatX = autodiff.context.Context(force_floatX=True) def checkfn(f, var_ndim=None, *args, **kwargs): test_floatX = kwargs.pop('test_floatX', True) result1 = _checkfn(context, f, var_ndim, *args, **kwargs) if test_floatX: result2 = _checkfn(context_floatX, f, var_ndim, *args, **kwargs) return result1 and result2 else: return result1 def _checkfn(context, f, var_ndim=None, *args, **kwargs): context.reset() override = kwargs.pop('override', None) var_ndim = utils.as_seq(var_ndim) dim = [[4] * nd for nd in var_ndim] values = tuple([np.random.random(d) for d in dim]) # make shallow copies to avoid inplace corruption sym_values = copy.copy(values) sym_args = copy.copy(args) sym_kwargs = copy.copy(kwargs) F = context.recompile(f) sym_vars = F(*(sym_values + sym_args), **sym_kwargs) sym_result = [v.eval() if utils.isvar(v) else v for v in utils.as_seq(sym_vars)] if len(sym_result) == 0: sym_result = None py_result = override or f(*(values + args), **kwargs) if sym_result is None: return sym_result is None and py_result is None else: return np.allclose(py_result, sym_result) class GarbageCollection(unittest.TestCase): # make sure shadowed variables aren't garbage-collected # so their id's do not get reused. If gc takes effect, then # x and y will coexist in the same location in memory (weird...) def test_gc(self): def f(x, y): return [x, y] F = context.recompile(f) assert F(3, 4)[1].eval() == 4 class Tags(unittest.TestCase): def test_tagging(self): def f(arg1, arg2=1, *arg3, **arg4): pass F = context.recompile(f) F(1.0) self.assertTrue('arg1' in context.sym_vars) self.assertTrue('arg2' in context.sym_vars) self.assertTrue('arg3' not in context.sym_vars) self.assertTrue('arg4' not in context.sym_vars) class ForceFloatX(unittest.TestCase): def test_force_floatX(self): def f(x): return x ctx = autodiff.context.Context(force_floatX=False) ctx_floatX = autodiff.context.Context(force_floatX=True) F = ctx.recompile(f) F_floatX = ctx_floatX.recompile(f) x = np.array([1, 2, 3]) self.assertTrue(F(x).dtype == 'int64') self.assertTrue(F_floatX(x).dtype == theano.config.floatX) class Signatures(unittest.TestCase): def test_sig_no_arg(self): def f(): return 1 self.assertTrue(checkfn(f)) def test_sig_one_arg(self): def f(x): return x self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, a=2) self.assertTrue(checkfn(f, [], 2)) self.assertTrue(checkfn(f, [], x=2)) def test_sig_mult_args(self): # multiple args, no default def f(x, y): return x * y self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, 2) self.assertRaises(TypeError, f, a=2, b=2) self.assertTrue(checkfn(f, [], 2, 3)) self.assertTrue(checkfn(f, [], y=4, x=5)) def test_sig_var_args(self): # var args, no default def f(x, y, *z): return x * y * sum(z) self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, 2) self.assertRaises(TypeError, f, a=2, b=2) self.assertTrue(checkfn(f, [], 2, 3)) self.assertTrue(checkfn(f, [], 2, 3, 4)) self.assertTrue(checkfn(f, [], 2, 3, 4, 5)) def test_sig_default_args(self): # multiple args, one default def f(x, y=2): return x * y self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, y=3) self.assertTrue(checkfn(f, [], 2)) self.assertTrue(checkfn(f, [], 2, 3)) self.assertTrue(checkfn(f, [], y=4, x=5)) self.assertTrue(checkfn(f, [], x=5)) # multiple args, all default def f(x=1, y=2): return x * y self.assertTrue(checkfn(f)) self.assertTrue(checkfn(f, [], 1)) self.assertTrue(checkfn(f, [], 1, 2)) self.assertTrue(checkfn(f, [], y=2, x=1)) self.assertTrue(checkfn(f, [], x=5)) self.assertTrue(checkfn(f, [], y=5)) def test_sig_default_var_args(self): # multiple var args, all default def f(x=1, y=2, *z): return x * y * sum(z) self.assertTrue(checkfn(f)) self.assertTrue(checkfn(f, [], 1)) self.assertTrue(checkfn(f, [], 1, 2)) self.assertTrue(checkfn(f, [], 1, 2, 3)) self.assertTrue(checkfn(f, [], 1, 2, 3, 4)) def test_sig_kwargs(self): # kwargs def f(**kwargs): x = kwargs['x'] y = kwargs['y'] z = kwargs['z'] return x * y * z self.assertRaises(KeyError, f) self.assertRaises(TypeError, f, 1) self.assertTrue(checkfn(f, [], x=1, y=2, z=3)) def test_sig_varargs_kwargs(self): # varargs and kwargs def f(a, *b, **kwargs): x = kwargs['x'] y = kwargs['y'] z = kwargs['z'] return x * y * z self.assertRaises(TypeError, f) self.assertRaises(KeyError, f, 1) self.assertRaises(TypeError, f, x=1, y=2, z=3) self.assertTrue(checkfn(f, [], 1, x=1, y=2, z=3)) self.assertTrue(checkfn(f, [], 1, 2, 3, x=1, y=2, z=3)) # varargs and kwargs, use varargs def f(a, *b, **kwargs): x = kwargs['x'] y = kwargs['y'] z = kwargs['z'] return x * y * z * b[0] self.assertTrue(checkfn(f, [], 1, 2, x=1, y=2, z=3)) self.assertTrue(checkfn(f, [], 1, 2, 3, x=1, y=2, z=3)) def test_expand_varargs(self): def f(*args): return args[1] def g(x): args = (x, np.ones((2, 3)), 5) return f(*args) self.assertTrue(checkfn(g, [], 1)) def test_expand_kwargs(self): def f(**args): return args['x'] def g(x): args = dict(x=x, y=np.ones((2, 3)), z=5) return f(**args) self.assertTrue(checkfn(g, [], 1)) class Python(unittest.TestCase): def test_range(self): def f(x): for i in range(3): x += 5 return x self.assertTrue(checkfn(f, [1])) def f(x): a = 3 for i in range(a): x += 5 return x self.assertTrue(checkfn(f, [1])) def f(x): a = x[0] + 10 for i in range(int(a)): x += 5 return x self.assertTrue(checkfn(f, [1])) def f(x, a): for i in range(a): x += 5 return x self.assertTrue(checkfn(f, [1], 3)) def f(): l = [] for i in range(3): l.append(i) return l self.assertTrue(checkfn(f, [])) def f(): l1 = {i:i for i in range(3)} l2 = [l1[i] for i in range(3)] return l2 self.assertRaises(KeyError, checkfn, f, []) def f(): l1 = {escape(i):i for i in range(3)} l2 = [l1[escape(i)] for i in range(3)] return l2 self.assertTrue(checkfn(f, [])) def test_pass(self): def fn(x): pass self.assertTrue(checkfn(fn, [1])) def test_if(self): # test that if statements escape their test arguments def f(switch): if switch > 0: return 1 else: return -1 self.assertTrue(checkfn(f, [], -10)) def test_for(self): def f(): x = 0 for i in range(5): x += i return x self.assertTrue(checkfn(f)) def f(x): for i in range(5): x += i return x self.assertTrue(checkfn(f, [1])) def test_enumerate(self): def f1(x): z = np.arange(x.shape[0]) for i, xi in enumerate(range(4)): z[i] += xi return z self.assertTrue(checkfn(f1, [1])) def f2(x): z = np.arange(x.shape[0]) for i, xi in enumerate(x): z[i] += xi return z self.assertRaises(TypeError, checkfn, f2, [1]) def test_sum(self): def f(): x = np.ones(5) y = np.ones(5) * 5 return builtins.sum([x, y]) self.assertTrue(checkfn(f, [])) def test_max(self): def f(): x = np.arange(5) return builtins.max(x) self.assertTrue(checkfn(f, [])) def f(x): return builtins.max(x) self.assertTrue(checkfn(f, [1])) def test_min(self): def f(): x = np.arange(5) return builtins.min(x) self.assertTrue(checkfn(f, [])) def f(x): return builtins.min(x) self.assertTrue(checkfn(f, [1])) def test_isinstance(self): def f(x): if isinstance(x, int): return 1 elif isinstance(x, float): return -1 self.assertTrue(checkfn(f, [], 1, test_floatX=False)) self.assertTrue(checkfn(f, [], 1.0, test_floatX=False)) def test_tuple_index(self): def f(*x): return x[1] self.assertTrue(checkfn(f, [], 1, 2, 3)) def test_nested_tuple_index(self): def f(*x): return x[1] def g(*x): return f(*x) self.assertTrue(checkfn(g, [], 1, 2, 3)) def test_nested_def_tuple_index(self): def g(*x): def f(*x): return x[1] return f(*x) self.assertTrue(checkfn(g, [], 1, 2, 3)) def test_append(self): def f(): l = [] for i in range(5): l.append(i) return l self.assertTrue(checkfn(f, [])) def test_list_comprehension(self): def f(): x = np.arange(10.0) y = [xi + 10 for xi in escape(x)] return y self.assertTrue(checkfn(f, [])) def test_dict_comprehension(self): def f(): x = np.arange(10.0) y = {escape(xi): xi + 10 for xi in escape(x)} return y[5] self.assertTrue(checkfn(f, [])) def test_tuple_type(self): def f(): x = tuple((3, 4, 5)) return x def f2(x, y): return tuple(i for i in [x, y]) self.assertTrue(checkfn(f, [])) self.assertTrue(checkfn(f2, [], 1.0, 2.0)) def test_inplace_container(self): def f(): x = {3,4,5} x.remove(4) return sum(x) self.assertTrue(checkfn(f, [])) class BasicMath(unittest.TestCase): def test_basic_ops(self): for d in range(3): self.assertTrue(checkfn(lambda x: x + 2, [d])) self.assertTrue(checkfn(lambda x: x - 2, [d])) self.assertTrue(checkfn(lambda x: x * 2, [d])) self.assertTrue(checkfn(lambda x: x / 2, [d])) self.assertTrue(checkfn(lambda x: x / 2.0, [d])) self.assertTrue(checkfn(lambda x: x // 2.0, [d])) self.assertTrue(checkfn(lambda x: x ** 2, [d])) self.assertTrue(checkfn(lambda x: x % 2, [d])) def test_comparisons(self): for d in range(3): self.assertTrue(checkfn(lambda x, y: x > y, [d, d])) self.assertTrue(checkfn(lambda x, y: x < y, [d, d])) self.assertTrue(checkfn(lambda x, y: x >= y, [d, d])) self.assertTrue(checkfn(lambda x, y: x <= y, [d, d])) self.assertTrue(checkfn(lambda x, y: x == y, [d, d])) self.assertTrue(checkfn(lambda x, y: x != y, [d, d])) def test_inplace(self): def iadd(x): x += 10 return x def isub(x): x -= 10 return x def imul(x): x *= 10 return x def idiv(x): x /= 10.0 return x for d in range(3): for f in [iadd, isub, imul, idiv]: self.assertTrue(checkfn(f, [d])) class NumpyFns(unittest.TestCase): """ Test for coverage of functions in np namespace """ def test_all(self): def fn(x): return np.all(x > .5) self.assertTrue(checkfn(fn, [2])) def test_any(self): def fn(x): return np.any(x > .5) self.assertTrue(checkfn(fn, [2])) def test_arange(self): self.assertTrue(checkfn(lambda: np.arange(3), [])) # numpy arange doesn't return an array with the same dtype as its # argument, but theano arange does. In Context, the numpy arange # should be cast to match the theano one. self.assertTrue(checkfn(lambda: np.arange(np.float32(3.)), [])) def test_abs(self): def fn1(x): return np.abs(x) def fn2(x): return abs(x) self.assertTrue(checkfn(fn1, [2])) self.assertTrue(checkfn(fn2, [2])) def test_dot(self): def fn(x, y): return np.dot(x, y) for nd in np.ndindex(*([3] * fn.__code__.co_argcount)): self.assertTrue(checkfn(fn, nd)) def test_exp(self): def fn(x): return np.exp(x) self.assertTrue(checkfn(fn, [2])) def test_log(self): def fn(x): return np.log(x) self.assertTrue(checkfn(fn, [2])) def test_log1p(self): def fn(x): return np.log1p(x) self.assertTrue(checkfn(fn, [2])) def test_log10(self): def fn(x): return np.log10(x) self.assertTrue(checkfn(fn, [2])) def test_max(self): def fn(x): return np.max(x, 0) self.assertTrue(checkfn(fn, [2])) def test_min(self): def fn(x): return np.min(x, 0) self.assertTrue(checkfn(fn, [2])) def test_maximum(self): def fn(x, y): return np.maximum(x, y) self.assertTrue(checkfn(fn, [2, 2])) def test_minimum(self): def fn(x, y): return np.minimum(x, y) self.assertTrue(checkfn(fn, [2, 2])) def test_reshape(self): def fn(x, shape): return np.reshape(x, shape) self.assertTrue(checkfn(fn, [2], [2, 8])) def fn(x, shape1, shape2): return np.reshape(x, [shape1, shape2]) self.assertTrue(checkfn(fn, [2], 2, 8)) self.assertTrue(checkfn(fn, [2], 2, -1)) self.assertTrue(checkfn(lambda x: np.reshape(x, x.shape), [2])) self.assertTrue(checkfn( lambda x: np.reshape(x, (x.shape[0], x.shape[1])), [2])) def test_sum(self): self.assertTrue(checkfn(lambda x: np.sum(x), [2])) self.assertTrue(checkfn(lambda x: np.sum(x, 1), [2])) self.assertTrue(checkfn(lambda x: np.sum(x, axis=1), [2])) self.assertTrue(checkfn(lambda x: np.sum(x, axis=1), [2])) self.assertTrue(checkfn(lambda x: np.sum(x, axis=None), [2])) self.assertTrue(checkfn(lambda x, a: np.sum(x, a), [2], 0)) self.assertTrue(checkfn(lambda x, a: np.sum(x, a), [2], None)) self.assertTrue(checkfn(lambda x, a: np.sum(x, axis=a), [2], 0)) def test_sqrt(self): def fn(x): return np.sqrt(x) self.assertTrue(checkfn(fn, [2])) def test_tanh(self): def fn(x): return np.tanh(x) self.assertTrue(checkfn(fn, [2])) def test_transpose(self): self.assertTrue(checkfn(lambda x: np.transpose(x), [2])) self.assertTrue(checkfn(lambda x: np.transpose(x, (0, 1)), [2])) self.assertTrue(checkfn(lambda x, a: np.transpose(x, a), [2], (0, 1))) self.assertTrue(checkfn( lambda x, a0, a1: np.transpose(x, (a0, a1)), [2], 0, 1)) def test_zeros_like(self): def fn(x): return np.zeros_like(x) self.assertTrue(checkfn(fn, [2])) def test_astype(self): self.assertTrue(checkfn(lambda x: x.astype('float32'), [2])) def test_astype_numpy_class(self): self.assertTrue(checkfn(lambda x: x.astype(np.float32), [2])) def test_cast(self): self.assertTrue(checkfn(lambda x: int(x), [0])) self.assertTrue(checkfn(lambda x: float(x), [0])) self.assertTrue(checkfn(lambda x: bool(x), [0])) self.assertTrue(checkfn(lambda x: np.float_(x), [2])) self.assertTrue(checkfn(lambda x: np.float32(x), [2])) self.assertTrue(checkfn(lambda x: np.float64(x), [2])) self.assertTrue(checkfn(lambda x: np.int_(x), [2])) self.assertTrue(checkfn(lambda x: np.int16(x), [2])) self.assertTrue(checkfn(lambda x: np.bool_(x), [2])) self.assertTrue(checkfn(lambda x: np.bool(x), [0])) def test_alloc(self): self.assertTrue(checkfn(lambda: np.ones(5), [])) self.assertTrue(checkfn(lambda: np.ones((2, 5)), [])) self.assertTrue(checkfn(lambda x: np.ones(x.shape), [0])) self.assertTrue(checkfn(lambda x: np.ones(x.shape), [1])) self.assertTrue(checkfn(lambda x: np.ones(x.shape), [2])) def test_sort(self): self.assertTrue(checkfn(lambda x: np.sort(x), [2])) self.assertTrue(checkfn(lambda x: np.sort(x, 0), [2])) def test_concatenate(self): self.assertTrue(checkfn(lambda x, y: np.vstack((x, y)), [2, 2])) self.assertTrue(checkfn(lambda x, y: np.hstack((x, y)), [2, 2])) def test_axis(self): def f(x, axis=1): return np.std(x, axis=axis) self.assertTrue(checkfn(f, [2])) class RandomNumbers(unittest.TestCase): def check_random(self, fn, *args, **kwargs): context.reset() F = context.recompile(fn) result1 = F(*args, **kwargs).eval() result2 = F(*args, **kwargs).eval() return np.allclose(result1, result2) def test_random(self): self.assertFalse( self.check_random(lambda: np.random.random((10, 10)))) self.assertFalse( self.check_random(lambda s: np.random.random(s), 10.0)) self.assertFalse( self.check_random(lambda s: np.random.random(s), (10, 10))) def test_random_shape(self): self.assertFalse( self.check_random( lambda x: np.random.random(x.shape), np.ones((10, 10)))) def test_random_binomial(self): self.assertFalse( self.check_random(lambda: np.random.binomial(1, .5, (10, 10)))) self.assertFalse( self.check_random(lambda s: np.random.binomial(1, .5, s), 10.0)) self.assertFalse(self.check_random( lambda s: np.random.binomial(1, .5, s), (10, 10))) class ArrayMethodsAttributes(unittest.TestCase): """ Test for coverage of array methods and attributes """ def test_argmax(self): self.assertTrue(checkfn(lambda x: x.argmax(), [2])) self.assertTrue(checkfn(lambda x: x.argmax(1), [2])) self.assertTrue(checkfn(lambda x: x.argmax(axis=1), [2])) self.assertTrue(checkfn(lambda x, a: x.argmax(a), [2], 0)) self.assertTrue(checkfn(lambda x, a: x.argmax(axis=a), [2], 0)) def test_argmin(self): self.assertTrue(checkfn(lambda x: x.argmin(), [2])) self.assertTrue(checkfn(lambda x: x.argmin(1), [2])) self.assertTrue(checkfn(lambda x: x.argmin(axis=1), [2])) self.assertTrue(checkfn(lambda x, a: x.argmin(a), [2], 0)) self.assertTrue(checkfn(lambda x, a: x.argmin(axis=a), [2], 0)) def test_argsort(self): self.assertTrue(checkfn(lambda x: x.argsort(), [2])) self.assertTrue(checkfn(lambda x: x.argsort(1), [2])) self.assertTrue(checkfn(lambda x: x.argsort(axis=1), [2])) self.assertTrue(checkfn(lambda x, a: x.argsort(a), [2], 0)) self.assertTrue(checkfn(lambda x, a: x.argsort(axis=a), [2], 0)) def test_clip(self): def fn(x, a, b): return x.clip(a, b) self.assertTrue(checkfn(fn, [2], .4, .45)) def test_conj(self): def fn(x): return x.conj() self.assertTrue(checkfn(fn, [2])) def test_conjugate(self): def fn(x): return x.conjugate() self.assertTrue(checkfn(fn, [2])) def test_copy(self): def fn(x): return x.copy() self.assertTrue(checkfn(fn, [2])) def test_diagonal(self): def fn(x): return x.diagonal() self.assertTrue(checkfn(fn, [2])) def test_dot(self): def fn(x, y): return x.dot(y) self.assertTrue(checkfn(fn, [2, 2])) self.assertTrue(checkfn(fn, [1, 2])) def test_imag(self): def fn(x): return x.imag self.assertTrue(checkfn(fn, [2])) def test_flatten(self): def fn(x): return x.flatten() self.assertTrue(checkfn(fn, [2])) def test_max(self): self.assertTrue(checkfn(lambda x: x.max(), [2])) self.assertTrue(checkfn(lambda x: x.max(1), [2])) self.assertTrue(checkfn(lambda x: x.max(axis=1), [2])) self.assertTrue(checkfn(lambda x, a: x.max(a), [2], 0)) self.assertTrue(checkfn(lambda x, a: x.max(axis=a), [2], 0)) def test_mean(self): self.assertTrue(checkfn(lambda x: x.mean(), [2])) self.assertTrue(checkfn(lambda x: x.mean(1), [2])) self.assertTrue(checkfn(lambda x: x.mean(axis=1), [2])) self.assertTrue(checkfn(lambda x, a: x.mean(a), [2], 0)) self.assertTrue(checkfn(lambda x, a: x.mean(axis=a), [2], 0)) def test_min(self): self.assertTrue(checkfn(lambda x: x.min(), [2])) self.assertTrue(checkfn(lambda x: x.min(1), [2])) self.assertTrue(checkfn(lambda x: x.min(axis=1), [2])) self.assertTrue(checkfn(lambda x, a: x.min(a), [2], 0)) self.assertTrue(checkfn(lambda x, a: x.min(axis=a), [2], 0)) def test_prod(self): self.assertTrue(checkfn(lambda x: x.prod(), [2])) self.assertTrue(checkfn(lambda x: x.prod(1), [2])) self.assertTrue(checkfn(lambda x: x.prod(axis=1), [2])) self.assertTrue(checkfn(lambda x, a: x.prod(a), [2], 0)) self.assertTrue(checkfn(lambda x, a: x.prod(axis=a), [2], 0)) def test_ravel(self): def fn(x): return x.ravel() self.assertTrue(checkfn(fn, [2])) def test_repeat(self): def fn(x, repeats): return x.repeat(repeats, axis=1) self.assertTrue(checkfn(fn, [2], 5)) def test_real(self): def fn(x): return x.real self.assertTrue(checkfn(fn, [2])) def test_reshape(self): def fn(x, shape): return x.reshape(shape) self.assertTrue(checkfn(fn, [2], [2, 8])) def fn(x, s1, s2): return x.reshape(s1, s2) self.assertTrue(checkfn(fn, [2], 2, 8)) self.assertTrue(checkfn(fn, [2], 2, -1)) def fn(x): return x.reshape(2, 8) self.assertTrue(checkfn(fn, [2])) def test_sort(self): def fn(x): x.sort() return x self.assertRaises(ValueError, checkfn, fn, [2]) def fn(x): x.sort(1) return x self.assertRaises(ValueError, checkfn, fn, [2]) def fn(x): x.sort(axis=1) return x self.assertRaises(ValueError, checkfn, fn, [2]) def fn(x, a): x.sort(a) return x self.assertRaises(ValueError, checkfn, fn, [2], 0) def test_sum(self): self.assertTrue(checkfn(lambda x: x.sum(), [2])) self.assertTrue(checkfn(lambda x: x.sum(1), [2])) self.assertTrue(checkfn(lambda x: x.sum(axis=1), [2])) self.assertTrue(checkfn(lambda x, a: x.sum(a), [2], 0)) self.assertTrue(checkfn(lambda x, a: x.sum(axis=a), [2], 0)) def test_swapaxes(self): def fn(x, a1, a2): return x.swapaxes(a1, a2) self.assertTrue(checkfn(fn, [2], 0, 1)) def test_astype(self): self.assertTrue(checkfn(lambda x: x.astype('int8'), [2])) self.assertTrue(checkfn(lambda x: x.astype('float32'), [2])) self.assertTrue(checkfn(lambda x: x.astype(np.float32), [2])) self.assertTrue(checkfn(lambda x: x.astype(dtype='float32'), [2])) self.assertTrue(checkfn(lambda x: x.astype(dtype=np.float32), [2])) def test_std(self): self.assertTrue(checkfn(lambda x: x.std(), [2])) self.assertTrue(checkfn(lambda x: x.std(1), [2])) self.assertTrue(checkfn(lambda x: x.std(axis=1), [2])) self.assertTrue(checkfn(lambda x, a: x.std(a), [2], 0)) self.assertTrue(checkfn(lambda x, a: x.std(axis=a), [2], 0)) def test_size(self): self.assertTrue(checkfn(lambda x: np.arange(x.size), [1])) self.assertTrue(checkfn(lambda x: np.arange(x.size), [2])) def test_T(self): def fn(x): return x.T self.assertTrue(checkfn(fn, [1])) self.assertTrue(checkfn(fn, [2])) def test_transpose(self): def fn(x): return x.transpose() self.assertTrue(checkfn(fn, [1])) self.assertTrue(checkfn(fn, [2])) def test_var(self): self.assertTrue(checkfn(lambda x: x.var(), [2])) self.assertTrue(checkfn(lambda x: x.var(1), [2])) self.assertTrue(checkfn(lambda x: x.var(axis=1), [2])) self.assertTrue(checkfn(lambda x, a: x.var(a), [2], 0)) self.assertTrue(checkfn(lambda x, a: x.var(axis=a), [2], 0)) class Namespaces(unittest.TestCase): def test_global(self): x = np.ones((3, 4)) def f(): return x.swapaxes(0, 1) self.assertTrue(checkfn(f, [])) def test_nested_functions(self): def g(x): def h(x): return x.swapaxes(1, 0) return h(x) def f(x): return g(x) self.assertTrue(checkfn(f, [2])) def test_define_class(self): """ This fails due to shadowing of s (and then not being to set values) """ def f(): class StringAttr(object): def __init__(self): self.s = "string" S = StringAttr() def f2(**kwargs): if kwargs['string'] == 5: return 1 else: return 0 return f2(**{S.s: 5}) self.assertTrue(checkfn(f, [])) def test_freevars(self): class Test(object): def __init__(self): self.x = np.arange(5.) - 10.0 def getx(self): return self.x t = Test() def f(x): return np.dot(x, t.x) x = np.arange(5.) self.assertTrue(checkfn(f, [], x)) self.assertTrue(id(t.x) in context.sym_vars) class ArraySubscripts(unittest.TestCase): def test_indexing(self): self.assertTrue(checkfn(lambda x: x[2], [1])) self.assertTrue(checkfn(lambda x: x[-2], [1])) self.assertTrue(checkfn(lambda x: x[2], [2])) self.assertTrue(checkfn(lambda x: x[-2], [2])) self.assertTrue(checkfn(lambda x: x[2, 2], [2])) self.assertTrue(checkfn(lambda x: x[-2, -2], [2])) def test_adv_index(self): self.assertTrue(checkfn(lambda x: x[[3, 2, 1], [1, 2, 3]], [2])) self.assertTrue(checkfn(lambda x: x[x > .5], [2])) self.assertTrue(checkfn(lambda x: x[(x > .1) * (x < .5)], [2])) self.assertTrue(checkfn(lambda x: x[[2, 3], 1:], [2])) # @unittest.expectedFailure # def test_adv_index_known_failures(self): # self.assertTrue(checkfn(lambda x: x[1:, x > .5], [2])) # self.assertTrue(checkfn(lambda x: x[x > .5, 1:], [2])) def test_slicing(self): # SLICE+0 self.assertTrue(checkfn(lambda x: x[:], [1])) self.assertTrue(checkfn(lambda x: x[:], [2])) # SLICE+1 self.assertTrue(checkfn(lambda x: x[1:], [1])) self.assertTrue(checkfn(lambda x: x[-2:], [1])) self.assertTrue(checkfn(lambda x: x[1:, 1:], [2])) self.assertTrue(checkfn(lambda x: x[-2:, -2:], [2])) # SLICE+2 self.assertTrue(checkfn(lambda x: x[:2], [1])) self.assertTrue(checkfn(lambda x: x[:-2], [1])) self.assertTrue(checkfn(lambda x: x[:2, :2], [2])) self.assertTrue(checkfn(lambda x: x[:-2, :-2], [2])) # SLICE+3 self.assertTrue(checkfn(lambda x: x[1:3], [1])) self.assertTrue(checkfn(lambda x: x[-3:-1], [1])) self.assertTrue(checkfn(lambda x: x[1:3, 1:3], [2])) self.assertTrue(checkfn(lambda x: x[-3:-1, -3:-1], [2])) def test_index_and_slice(self): self.assertTrue(checkfn(lambda x: x[1:3, 2], [2])) def test_index_assign(self): def f(): x = np.ones((3, 4)) x[2] = 100 return x self.assertTrue(checkfn(f, [])) def f(): x = np.ones((3, 4)) x[2, 2] = 100 return x self.assertTrue(checkfn(f, [])) def f(): x = np.ones((3, 4)) x[2, 2] += 100 return x self.assertTrue(checkfn(f, [])) def f(x): x[2, 2] = 100 return x self.assertTrue(checkfn(f, [2])) def f(x): x[2, 2] += 100 return x self.assertTrue(checkfn(f, [2])) def test_slice_assign(self): def f(): x = np.ones((3, 4)) x[2:3] = 100 return x self.assertTrue(checkfn(f, [])) def f(): x = np.ones((3, 4)) x[2:3, 2:3] += 100 return x self.assertTrue(checkfn(f, [])) def f(x): x[2:3, 2:3] += 100 return x self.assertTrue(checkfn(f, [2])) def test_store_slice(self): # STORE_SLICE+0 def f(x): x[:] = 5 x[:] += 5 return x self.assertTrue(checkfn(f, [1])) self.assertTrue(checkfn(f, [2])) # STORE_SLICE+1 def f(x): x[2:] = 5 x[-2:] += 5 return x def f2(x): x[2:, 2:] = 5 x[-2:, -2:] += 5 return x self.assertTrue(checkfn(f, [1])) self.assertTrue(checkfn(f, [2])) self.assertTrue(checkfn(f2, [2])) # STORE_SLICE+2 def f(x): x[:2] = 5 x[:-2] += 5 return x def f2(x): x[:2, :2] = 5 x[:-2, :-2] += 5 return x self.assertTrue(checkfn(f, [1])) self.assertTrue(checkfn(f, [2])) self.assertTrue(checkfn(f2, [2])) # STORE_SLICE+3 def f(x): x[1:3] = 5 x[-3:-1] += 5 return x def f2(x): x[1:3, 1:3] = 5 x[-3:-1, -3:-1] += 5 return x self.assertTrue(checkfn(f, [1])) self.assertTrue(checkfn(f, [2])) self.assertTrue(checkfn(f2, [2])) def test_array_assign(self): def f(x): o = np.ones((2, 3)) x[1:3, 1:4] = o return x self.assertTrue(checkfn(f, [2])) def test_nested_assign(self): def f(x): x[2:4][1, 2] = 100 return x self.assertTrue(checkfn(f, [2])) def f(x): x[2:4][1, 2] += 100 return x self.assertTrue(checkfn(f, [2])) def f(): d = {1: {2: 3}} d[1][2] = 4 return d[1][2] self.assertTrue(checkfn(f, [])) class TestMethods(unittest.TestCase): def test_instance_method(self): class Test(object): def test(self, x): return x * 2 t = Test() self.assertTrue(checkfn(t.test, [2])) def test_class_method(self): class Test(object): @classmethod def test(cls, x): return x * 2 t = Test() self.assertTrue(checkfn(t.test, [2])) self.assertTrue(checkfn(Test.test, [2])) def test_static_method(self): class Test(object): @staticmethod def test(x): return x * 2 t = Test() self.assertTrue(checkfn(t.test, [2])) self.assertTrue(checkfn(Test.test, [2])) class NumberMethodsAttributes(unittest.TestCase): """ Test for coverage of NumPy number methods and attributes """ def test_reduce_method(self): self.assertTrue(checkfn(lambda x: np.dot(x, x).sum(), [1])) self.assertTrue(checkfn(lambda x: np.dot(x, x).mean(), [1])) class Ops(unittest.TestCase): """ test bytecode op coverage for misc cases """ def test_DUP_TOP(self): def f(x): x[:] += 100 return x self.assertTrue(checkfn(f, [2])) class Collections(unittest.TestCase): def test_views(self): from collections import OrderedDict def f(): d = {1: 2, 3: 4, 5: 6} return list(v for v in d.values()) self.assertTrue(checkfn(f, [])) def test_OrderedDict(self): from collections import OrderedDict o = OrderedDict(a=1, b=2, c=3) def f(): x = 0 for v in o.values(): x += v return x self.assertTrue(checkfn(f, [])) class InferUpdates(unittest.TestCase): def test_assign_updates(self): c = autodiff.context.Context(infer_updates=False) c_upd = autodiff.context.Context(infer_updates=True) class Test: def __init__(self): self.reset() def reset(self): self.tmp = 0.0 test = Test() def f(x): test.tmp = test.tmp + x return test.tmp F = c.recompile(f) F_upd = c_upd.recompile(f) inp = 5.0 test.reset() out = F(inp) test.reset() out_upd = F_upd(inp) compiled = theano.function([], out, updates=c.updates) compiled_upd = theano.function([], out_upd, updates=c_upd.updates) self.assertTrue(np.allclose(compiled(), 5.0)) self.assertTrue(np.allclose(compiled(), 5.0)) self.assertTrue(np.allclose(compiled_upd(), 5.0)) self.assertTrue(np.allclose(compiled_upd(), 10.0)) def test_augassign_updates(self): c = autodiff.context.Context(infer_updates=False) c_upd = autodiff.context.Context(infer_updates=True) class Test: def __init__(self): self.reset() def reset(self): self.tmp = 0.0 test = Test() def f(x): test.tmp += x return test.tmp F = c.recompile(f) F_upd = c_upd.recompile(f) inp = 5.0 test.reset() out = F(inp) test.reset() out_upd = F_upd(inp) compiled = theano.function([], out, updates=c.updates) compiled_upd = theano.function([], out_upd, updates=c_upd.updates) self.assertTrue(np.allclose(compiled(), 5.0)) self.assertTrue(np.allclose(compiled(), 5.0)) self.assertTrue(np.allclose(compiled_upd(), 5.0)) self.assertTrue(np.allclose(compiled_upd(), 10.0))
# Copyright 2012-2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Identity v3 Role action implementations""" import logging import six import sys from cliff import command from cliff import lister from cliff import show from openstackclient.common import utils class AddRole(command.Command): """Adds a role to a user or group on a domain or project""" log = logging.getLogger(__name__ + '.AddRole') def get_parser(self, prog_name): parser = super(AddRole, self).get_parser(prog_name) parser.add_argument( 'role', metavar='<role>', help='Name or ID of role to add', ) user_or_group = parser.add_mutually_exclusive_group() user_or_group.add_argument( '--user', metavar='<user>', help='Name or ID of user to add a role', ) user_or_group.add_argument( '--group', metavar='<group>', help='Name or ID of group to add a role', ) domain_or_project = parser.add_mutually_exclusive_group() domain_or_project.add_argument( '--domain', metavar='<domain>', help='Name or ID of domain associated with user or group', ) domain_or_project.add_argument( '--project', metavar='<project>', help='Name or ID of project associated with user or group', ) return parser def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) identity_client = self.app.client_manager.identity if (not parsed_args.user and not parsed_args.domain and not parsed_args.group and not parsed_args.project): return role = utils.find_resource( identity_client.roles, parsed_args.role, ) if parsed_args.user and parsed_args.domain: user = utils.find_resource( identity_client.users, parsed_args.user, ) domain = utils.find_resource( identity_client.domains, parsed_args.domain, ) identity_client.roles.grant( role.id, user=user.id, domain=domain.id, ) elif parsed_args.user and parsed_args.project: user = utils.find_resource( identity_client.users, parsed_args.user, ) project = utils.find_resource( identity_client.projects, parsed_args.project, ) identity_client.roles.grant( role.id, user=user.id, project=project.id, ) elif parsed_args.group and parsed_args.domain: group = utils.find_resource( identity_client.groups, parsed_args.group, ) domain = utils.find_resource( identity_client.domains, parsed_args.domain, ) identity_client.roles.grant( role.id, group=group.id, domain=domain.id, ) elif parsed_args.group and parsed_args.project: group = utils.find_resource( identity_client.groups, parsed_args.group, ) project = utils.find_resource( identity_client.projects, parsed_args.project, ) identity_client.roles.grant( role.id, group=group.id, project=project.id, ) else: sys.stderr.write("Role not added, incorrect set of arguments \ provided. See openstack --help for more details\n") return class CreateRole(show.ShowOne): """Create new role""" log = logging.getLogger(__name__ + '.CreateRole') def get_parser(self, prog_name): parser = super(CreateRole, self).get_parser(prog_name) parser.add_argument( 'name', metavar='<role-name>', help='New role name', ) return parser def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) identity_client = self.app.client_manager.identity role = identity_client.roles.create(name=parsed_args.name) role._info.pop('links') return zip(*sorted(six.iteritems(role._info))) class DeleteRole(command.Command): """Delete existing role""" log = logging.getLogger(__name__ + '.DeleteRole') def get_parser(self, prog_name): parser = super(DeleteRole, self).get_parser(prog_name) parser.add_argument( 'role', metavar='<role>', help='Name or ID of role to delete', ) return parser def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) identity_client = self.app.client_manager.identity role = utils.find_resource( identity_client.roles, parsed_args.role, ) identity_client.roles.delete(role.id) return class ListRole(lister.Lister): """List roles""" log = logging.getLogger(__name__ + '.ListRole') def get_parser(self, prog_name): parser = super(ListRole, self).get_parser(prog_name) domain_or_project = parser.add_mutually_exclusive_group() domain_or_project.add_argument( '--domain', metavar='<domain>', help='Filter role list by <domain>', ) domain_or_project.add_argument( '--project', metavar='<project>', help='Filter role list by <project>', ) user_or_group = parser.add_mutually_exclusive_group() user_or_group.add_argument( '--user', metavar='<user>', help='Name or ID of user to list roles assigned to', ) user_or_group.add_argument( '--group', metavar='<group>', help='Name or ID of group to list roles assigned to', ) return parser def take_action(self, parsed_args): self.log.debug('take_action(%s)' % parsed_args) identity_client = self.app.client_manager.identity if parsed_args.user: user = utils.find_resource( identity_client.users, parsed_args.user, ) elif parsed_args.group: group = utils.find_resource( identity_client.groups, parsed_args.group, ) if parsed_args.domain: domain = utils.find_resource( identity_client.domains, parsed_args.domain, ) elif parsed_args.project: project = utils.find_resource( identity_client.projects, parsed_args.project, ) # no user or group specified, list all roles in the system if not parsed_args.user and not parsed_args.group: columns = ('ID', 'Name') data = identity_client.roles.list() elif parsed_args.user and parsed_args.domain: columns = ('ID', 'Name', 'Domain', 'User') data = identity_client.roles.list( user=user, domain=domain, ) for user_role in data: user_role.user = user.name user_role.domain = domain.name elif parsed_args.user and parsed_args.project: columns = ('ID', 'Name', 'Project', 'User') data = identity_client.roles.list( user=user, project=project, ) for user_role in data: user_role.user = user.name user_role.project = project.name elif parsed_args.user: columns = ('ID', 'Name') data = identity_client.roles.list( user=user, domain='default', ) elif parsed_args.group and parsed_args.domain: columns = ('ID', 'Name', 'Domain', 'Group') data = identity_client.roles.list( group=group, domain=domain, ) for group_role in data: group_role.group = group.name group_role.domain = domain.name elif parsed_args.group and parsed_args.project: columns = ('ID', 'Name', 'Project', 'Group') data = identity_client.roles.list( group=group, project=project, ) for group_role in data: group_role.group = group.name group_role.project = project.name else: sys.stderr.write("Error: If a user or group is specified, either " "--domain or --project must also be specified to " "list role grants.\n") return ([], []) return (columns, (utils.get_item_properties( s, columns, formatters={}, ) for s in data)) class RemoveRole(command.Command): """Remove role command""" log = logging.getLogger(__name__ + '.RemoveRole') def get_parser(self, prog_name): parser = super(RemoveRole, self).get_parser(prog_name) parser.add_argument( 'role', metavar='<role>', help='Name or ID of role to remove', ) user_or_group = parser.add_mutually_exclusive_group() user_or_group.add_argument( '--user', metavar='<user>', help='Name or ID of user to remove a role', ) user_or_group.add_argument( '--group', metavar='<group>', help='Name or ID of group to remove a role', ) domain_or_project = parser.add_mutually_exclusive_group() domain_or_project.add_argument( '--domain', metavar='<domain>', help='Name or ID of domain associated with user or group', ) domain_or_project.add_argument( '--project', metavar='<project>', help='Name or ID of project associated with user or group', ) return parser def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) identity_client = self.app.client_manager.identity if (not parsed_args.user and not parsed_args.domain and not parsed_args.group and not parsed_args.project): return role = utils.find_resource( identity_client.roles, parsed_args.role, ) if parsed_args.user and parsed_args.domain: user = utils.find_resource( identity_client.users, parsed_args.user, ) domain = utils.find_resource( identity_client.domains, parsed_args.domain, ) identity_client.roles.revoke( role.id, user=user.id, domain=domain.id, ) elif parsed_args.user and parsed_args.project: user = utils.find_resource( identity_client.users, parsed_args.user, ) project = utils.find_resource( identity_client.projects, parsed_args.project, ) identity_client.roles.revoke( role.id, user=user.id, project=project.id, ) elif parsed_args.group and parsed_args.domain: group = utils.find_resource( identity_client.groups, parsed_args.group, ) domain = utils.find_resource( identity_client.domains, parsed_args.domain, ) identity_client.roles.revoke( role.id, group=group.id, domain=domain.id, ) elif parsed_args.group and parsed_args.project: group = utils.find_resource( identity_client.groups, parsed_args.group, ) project = utils.find_resource( identity_client.projects, parsed_args.project, ) identity_client.roles.revoke( role.id, group=group.id, project=project.id, ) else: sys.stderr.write("Role not removed, incorrect set of arguments \ provided. See openstack --help for more details\n") return class SetRole(command.Command): """Set role command""" log = logging.getLogger(__name__ + '.SetRole') def get_parser(self, prog_name): parser = super(SetRole, self).get_parser(prog_name) parser.add_argument( 'role', metavar='<role>', help='Name or ID of role to update', ) parser.add_argument( '--name', metavar='<new-role-name>', help='New role name', ) return parser def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) identity_client = self.app.client_manager.identity if not parsed_args.name: return role = utils.find_resource( identity_client.roles, parsed_args.role, ) identity_client.roles.update(role.id, name=parsed_args.name) return class ShowRole(show.ShowOne): """Show single role""" log = logging.getLogger(__name__ + '.ShowRole') def get_parser(self, prog_name): parser = super(ShowRole, self).get_parser(prog_name) parser.add_argument( 'role', metavar='<role>', help='Name or ID of role to display', ) return parser def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) identity_client = self.app.client_manager.identity role = utils.find_resource( identity_client.roles, parsed_args.role, ) role._info.pop('links') return zip(*sorted(six.iteritems(role._info)))
import json import time import uuid from multiprocessing import Process from multiprocessing import active_children from pyetcd import EtcdNodeExist, EtcdKeyNotFound from etcdb import ProgrammingError, OperationalError, LOCK_WAIT_TIMEOUT from etcdb.sqlparser.parser import SQLParser, SQLParserError class ColInfo(object): def __init__(self, name='', width=None): self.name = name if width and width > len(name): self.width = width else: self.width = len(self.name) def __repr__(self): return "ColInfo: name={name}, width={width}".format(name=self.name, width=self.width) def eval_bool_primary(row, where): pass # op = where[0] # if where['bool_primary'] == class Cursor(object): """These objects represent a database cursor, which is used to manage the context of a fetch operation. Cursors created from the same connection are not isolated, i.e. , any changes done to the database by a cursor are immediately visible by the other cursors. Cursors created from different connections can or can not be isolated, depending on how the transaction support is implemented (see also the connection's .rollback () and .commit () methods). """ description = None """This read-only attribute is a sequence of 7-item sequences. Each of these sequences contains information describing one result column: name type_code display_size internal_size precision scale null_ok The first two items ( name and type_code ) are mandatory, the other five are optional and are set to None if no meaningful values can be provided. """ rowcount = 0 """This read-only attribute specifies the number of rows that the last .execute*() produced (for DQL statements like SELECT ) or affected (for DML statements like UPDATE or INSERT ).""" arraysize = 1 """This read/write attribute specifies the number of rows to fetch at a time with .fetchmany(). It defaults to 1 meaning to fetch a single row at a time. """ connection = None """Etcd connection object""" # ColInfo = ColInfo _rows = () _column_names = () _sql_parser = None _col_infos = () def __init__(self, connection): self.connection = connection self._sql_parser = SQLParser() self._db = connection.db self._timeout = connection.timeout self.lastrowid = None @property def n_cols(self): return len(self._column_names) @property def n_rows(self): return len(self._rows) @property def col_infos(self): return self._col_infos @staticmethod def close(): """Close the cursor now (rather than whenever __del__ is called). """ pass def execute(self, query, args=None): """Prepare and execute a database operation (query or command).""" if args: # print("args = %r" % args) query = query % tuple(["'%s'" % a for a in args]) print("query = %s" % query) self._rows = () try: tree = self._sql_parser.parse(query) except SQLParserError as err: raise ProgrammingError(err) if tree.query_type == 'SELECT': self._column_names, self._rows = self._execute_select(tree) elif tree.query_type == "USE_DATABASE": self._db = tree.db elif tree.query_type == "SHOW_DATABASES": self._column_names, self._rows = self._execute_show_databases() elif tree.query_type == "SHOW_TABLES": self._column_names, self._rows = self._execute_show_tables(tree) elif tree.query_type == "CREATE_DATABASE": self._execute_create_database(tree.db) elif tree.query_type == "DROP_DATABASE": self._execute_drop_database(tree.db) elif tree.query_type == "CREATE_TABLE": self._execute_create_table(tree) elif tree.query_type == "DESC_TABLE": self._column_names, self._rows = self._execute_desc_table(tree) elif tree.query_type == "INSERT": self._execute_insert(tree) elif tree.query_type == "UPDATE": return self._execute_update(tree) self._col_infos = self._update_columns(self._column_names, self._rows) return len(self._rows) @staticmethod def executemany(operation, **kwargs): """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters . """ pass def fetchone(self): """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available.""" try: return self._rows[0] except IndexError: return None finally: self._rows = tuple(r for r in self._rows[1:]) def fetchmany(self, n): """Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a list of tuples). An empty sequence is returned when no more rows are available. """ rows = () for i in xrange(n): row = self.fetchone() if row: rows += (row,) return rows def fetchall(self): """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.""" result = self._rows self._rows = () return result @staticmethod def setinputsizes(sizes): """This can be used before a call to .execute*() to predefine memory areas for the operation's parameters. """ pass @staticmethod def setoutputsize(size): """Set a column buffer size for fetches of large columns (e.g. LONG s, BLOB s, etc.). The column is specified as an index into the result sequence. Not specifying the column will set the default size for all large columns in the cursor. """ pass def _execute_select(self, tree): db = self._get_current_db(tree) tbl = tree.table rows = () columns = () lock_id = self._get_read_lock(db, tbl) try: function_exists = False variable_exists = False for expression in tree.expressions: columns += (expression['name'],) if expression['type'] == 'function': function_exists = True if expression['type'] == 'variable': variable_exists = True result_keys = self._get_pks(db, tbl, tree) if function_exists or variable_exists: result_keys.append(None) for pk in result_keys: row = self.get_table_row(tree, pk) if tree.where: if eval_bool_primary(row, tree.where): rows += (row,) else: rows += (row,) if tree.order['by'] and tree.order['by'] in columns: pos = columns.index(tree.order['by']) def getKey(item): return item[pos] reverse = False if tree.order['direction'] == 'DESC': reverse = True rows = sorted(rows, reverse=reverse, key=getKey) if tree and tree.limit is not None: rows = rows[:tree.limit] return columns, rows finally: self._release_read_lock(db, tbl, lock_id) def _execute_show_tables(self, tree): db = self._get_current_db(tree) etcd_response = self.connection.client.read('/%s' % db) rows = () try: for node in etcd_response.node['nodes']: row = (node['key'].replace('/%s/' % db, '', 1),) if tree.options['full']: row += ('BASE TABLE',) rows += (row,) except KeyError: pass col_names = ('Table',) if tree.options['full']: col_names += ('Type',) return col_names, rows def _execute_create_database(self, db): try: self.connection.client.mkdir('/%s' % db) except EtcdNodeExist as err: raise ProgrammingError("Failed to create database: %s" % err) def _execute_create_table(self, tree): db = self._get_current_db(tree) pk_field = None for field_name, value in tree.fields.iteritems(): try: if value['options']['primary']: pk_field = field_name except KeyError: pass if not pk_field: raise ProgrammingError('Primary key must be defined') if tree.fields[pk_field]['options']['nullable']: raise ProgrammingError('Primary key must be NOT NULL') try: table_name = '/%s/%s' % (db, tree.table) self.connection.client.mkdir(table_name) self.connection.client.write(table_name + "/_fields", json.dumps(tree.fields)) except EtcdNodeExist as err: raise ProgrammingError("Failed to create table: %s" % err) def _execute_show_databases(self): etcd_response = self.connection.client.read('/') rows = () try: for node in etcd_response.node['nodes']: val = node['key'].lstrip('/') rows += (val,), except KeyError: pass return ('Database',), rows def _eval_function(self, name, db=None, tbl=None): if name == "VERSION": return self._eval_function_version() elif name == "COUNT": return self._eval_function_count(db, tbl) def _eval_variable(self, name): if name == "SQL_MODE": return self._eval_variable_sql_mode() def _eval_function_version(self): return self.connection.client.version() def _get_pks(self, db, table, tree=None): """ Get list of primary key values for a given table :param db: database name :param table: table name :param tree: instance of SQLTree :return: list of values or empty list if table is empty """ if not table: return [] pks = [] table_key = "/{db}/{tbl}".format(db=db, tbl=table) etcd_result = self.connection.client.read(table_key) pk = self._get_pk(db, table) pk_name = self._get_pk_name(db, table) pk_type = pk[pk_name]['type'] try: nodes = etcd_result.node['nodes'] for n in nodes: pk_key = n['key'] pk = pk_key.replace(table_key + '/', '', 1) if pk_type in ['INT', 'INTEGER', 'SMALLINT', 'TINYINT']: pk = int(pk) pks.append(pk) pks = sorted(pks) except KeyError: pass return pks def get_table_row(self, tree, pk): db = self._get_current_db(tree) table = tree.table if pk: key = "/{db}/{tbl}/{pk}".format(db=db, tbl=table, pk=pk) etcd_response = self.connection.client.read(key) full_row = json.loads(etcd_response.node['value']) row = () for e in tree.expressions: field_name = e['name'] field = None if e['type'] == 'function': field = self._eval_function(e['name'], db=db, tbl=table) if e['type'] == 'variable': field = self._eval_variable(e['name']) if e['type'] == 'field': # print(e['name']) try: field = full_row[field_name] except KeyError: raise ProgrammingError('Error: Field %s not found' % field_name) row += (field, ) return row else: # One row if pk is None row = () for e in tree.expressions: field = None if e['type'] == 'function': field = self._eval_function(e['name'], db=db, tbl=table) if e['type'] == 'variable': field = self._eval_variable(e['name']) row += (field, ) return row @staticmethod def _eval_variable_sql_mode(): return "STRICT_ALL_TABLES" def _execute_drop_database(self, db): self.connection.client.rmdir('/%s' % db, recursive=True) def _execute_desc_table(self, tree): db = self._get_current_db(tree) table = tree.table key = '/{db}/{table}/_fields'.format(db=db, table=table) try: etcd_result = self.connection.client.read(key) except EtcdKeyNotFound: raise ProgrammingError('Table `{db}`.`{table}` doesn\'t exist'.format(db=db, table=table)) fields = json.loads(etcd_result.node['value']) rows = () for k, v in fields.iteritems(): field_type = v['type'] if v['options']['nullable']: nullable = 'YES' else: nullable = 'NO' indexes = '' if 'primary' in v['options'] and v['options']['primary']: indexes = 'PRI' if 'unique' in v['options'] and v['options']['unique']: indexes = 'UNI' try: default_value = v['options']['default'] except KeyError: default_value = '' extra = '' if 'auto_increment' in v['options'] and v['options']['auto_increment']: extra = 'auto_increment' row = (k, field_type, nullable, indexes, default_value, extra) rows += (row, ) return ('Field', 'Type', 'Null', 'Key', 'Default', 'Extra'), rows def _get_current_db(self, tree): db = self._db if tree.db: db = tree.db if not db: raise OperationalError('No database selected') return db def _get_table_fields(self, db, tbl): etcd_result = self.connection.client.read('/{db}/{tbl}/_fields'.format(db=db, tbl=tbl)) value = etcd_result.node['value'] return json.loads(value) def _get_pk(self, db, tbl): for f, v in self._get_table_fields(db, tbl).iteritems(): try: if v['options']['primary']: return { f: v } except KeyError: pass return None def _get_pk_name(self, db, tbl): return self._get_pk(db, tbl).keys()[0] def _execute_insert(self, tree): db = self._get_current_db(tree) table = tree.table self._get_write_lock(db, table) try: pk_field = self._get_pk_name(db, table) record = {} for field, v in self._get_table_fields(db, table).iteritems(): field_options = v['options'] try: record[field] = tree.fields[field] except KeyError: # value is not given if 'auto_increment' in field_options: self.lastrowid = self._get_next_auto_inc(db, table) record[field] = str(self.lastrowid) elif not field_options['nullable']: try: record[field] = field_options['default'] except KeyError: record[field] = None # Ignore this check for now # raise ProgrammingError('Error: Field %s cannot be NULL and no default value is set') else: record[field] = None pk_value = record[pk_field] self.connection.client.write('/{db}/{tbl}/{pk}'.format(db=db, tbl=table, pk=pk_value), json.dumps(record, sort_keys=True)) self._set_next_auto_inc(db, table) finally: self._release_write_lock(db, table) @staticmethod def _update_columns(columns_names, rows): """ Take a tuple of column names and set their widths to maximums from rows :param columns_names: Tuple of column names :param rows: Tuple of records. A record is a tuple, too. :return: Tuple of ColInfo() instances where widths are large enough to fit any value from rows """ widths = [] columns = () for col in columns_names: columns += (ColInfo(name=col), ) try: for col in columns_names: widths.append(len(col)) for row in rows: i = 0 for _ in columns_names: if len(row[i]) > widths[i]: widths[i] = len(row[i]) i += 1 i = 0 for col in columns: col.width = widths[i] i += 1 except TypeError: pass return columns def _get_meta_lock(self, db, tbl): """ Set a meta lock :param db: database name :param tbl: table name """ key = "/{db}/{tbl}/_lock_meta".format( db=db, tbl=tbl ) expires = time.time() + LOCK_WAIT_TIMEOUT while time.time() < expires: try: response = self.connection.client.\ compare_and_swap(key, '', ttl=self._timeout, prev_exist=False) self._keep_key_alive(key, self._timeout) return response except EtcdNodeExist: pass raise OperationalError('Lock wait timeout') def _release_meta_lock(self, db, tbl): """ Release a meta lock :param db: database name :param tbl: table name """ key = "/{db}/{tbl}/_lock_meta".format( db=db, tbl=tbl ) response = self.connection.client.delete(key) active_children() return response def _refresh_ttl(self, key, timeout): while True: try: self.connection.client.update_ttl(key, timeout) except (EtcdKeyNotFound, KeyboardInterrupt): break def _get_read_lock(self, db, tbl): """ Get read lock on a table db.tbl. Read lock is shared i.e. if other clients are allowed to read from the table :param db: database name :param tbl: table name :return: string with id of the lock or None if db or tbl is empty """ if not db or not tbl: return None self._get_meta_lock(db, tbl) lock_id = str(uuid.uuid4()) if self._write_lock_set(db, tbl): self._wait_until_write_lock_deleted(db, tbl) read_lock_key = '/{db}/{tbl}/_lock_read/{lock_id}'.format( db=db, tbl=tbl, lock_id=lock_id ) self.connection.client.write(read_lock_key, '', ttl=self._timeout) self._keep_key_alive(read_lock_key, self._timeout) self._release_meta_lock(db, tbl) return lock_id def _release_read_lock(self, db, tbl, lock_id): """ Release lock previously set on a table :param db: database name :param tbl: table name :param lock_id: string with lock identifier """ if not lock_id: return read_lock_key = '/{db}/{tbl}/_lock_read/{lock_id}'.format( db=db, tbl=tbl, lock_id=lock_id ) self.connection.client.delete(read_lock_key) def _write_lock_set(self, db, tbl): """ Check if write lock is set on a table :param db: database name :param tbl: table name :return: True or False """ write_lock_key = '/{db}/{tbl}/_lock_write'.format( db=db, tbl=tbl ) try: self.connection.client.read(write_lock_key) return True except EtcdKeyNotFound: return False def _wait_until_write_lock_deleted(self, db, tbl): """ Wait until the write lock is unset :param db: database name :param tbl: table name """ write_lock_key = '/{db}/{tbl}/_lock_write'.format( db=db, tbl=tbl ) try: while True: self.connection.client.read(write_lock_key, wait=True) except EtcdKeyNotFound: pass def _get_active_read_locks(self, db, tbl): """ Get list of active read locks on a table :param db: database name :param tbl: table name :return: list of locks or empty list """ read_lock_key = '/{db}/{tbl}/_lock_read'.format( db=db, tbl=tbl ) active_read_locks = [] try: for lock in self.connection.client.read(read_lock_key).node['nodes']: lock_id = lock['key'].replace('/{db}/{tbl}/_lock_read/'.format( db=db, tbl=tbl ), '', 1) active_read_locks.append(lock_id) except (EtcdKeyNotFound, KeyError): pass return active_read_locks def _get_write_lock(self, db, tbl): """ Set a write lock on a table :param db: :param tbl: """ self._get_meta_lock(db, tbl) self._ensure_no_write_lock(db, tbl) self._ensure_no_read_lock(db, tbl) write_lock_key = '/{db}/{tbl}/_lock_write'.format( db=db, tbl=tbl ) self.connection.client.write(write_lock_key, '', ttl=self._timeout) self._keep_key_alive(write_lock_key, self._timeout) self._release_meta_lock(db, tbl) def _release_write_lock(self, db, tbl): write_lock_key = '/{db}/{tbl}/_lock_write'.format( db=db, tbl=tbl ) self.connection.client.delete(write_lock_key) def _ensure_no_write_lock(self, db, tbl): if self._write_lock_set(db, tbl): self._wait_until_write_lock_deleted(db, tbl) def _ensure_no_read_lock(self, db, tbl): for lock in self._get_active_read_locks(db, tbl): self._wait_until_read_lock_released(db, tbl, lock) def _wait_until_read_lock_released(self, db, tbl, lock_id): read_lock_key = '/{db}/{tbl}/_lock_read/{lock_id}'.format( db=db, tbl=tbl, lock_id=lock_id ) try: while True: self.connection.client.read(read_lock_key) # TODO take into account modifiedIndex because race condition is possible # modified_index = response.node['modifiedIndex'] self.connection.client.read(read_lock_key, params={}, wait=True) except EtcdKeyNotFound: pass def _keep_key_alive(self, key, timeout): p = Process(target=self._refresh_ttl, args=(key, timeout)) p.start() def _get_next_auto_inc(self, db, tbl): key = '/{db}/{tbl}/_auto_inc'.format( db=db, tbl=tbl, ) try: etcd_result = self.connection.client.read(key) return int(etcd_result.node['value']) except EtcdKeyNotFound: return 1 def _set_next_auto_inc(self, db, tbl): n = self._get_next_auto_inc(db, tbl) key = '/{db}/{tbl}/_auto_inc'.format( db=db, tbl=tbl, ) self.connection.client.write(key, n + 1) def _execute_update(self, tree): return 1 def _eval_function_count(self, db, tbl): key = '/{db}/{tbl}'.format( db=db, tbl=tbl, ) etcd_result = self.connection.client.read(key) try: return int(len(etcd_result.node['nodes'])) except KeyError: return 0
''' .. Red9 Studio Pack: Maya Pipeline Solutions Author: Mark Jackson email: rednineinfo@gmail.com Red9 blog : http://red9-consultancy.blogspot.co.uk/ MarkJ blog: http://markj3d.blogspot.co.uk This is a new implementation of the PoseSaver core, same file format and ConfigObj but now supports relative pose data handled via a posePointCloud and the snapping core .. note:: I use the node short name as the key in the dictionary so ALL NODES must have unique names or you may get unexpected results! ''' import Red9.startup.setup as r9Setup import Red9_CoreUtils as r9Core import Red9_General as r9General import Red9_AnimationUtils as r9Anim import Red9_Meta as r9Meta import maya.cmds as cmds import os import Red9.packages.configobj as configobj import time import getpass import logging logging.basicConfig() log = logging.getLogger(__name__) log.setLevel(logging.INFO) def getFolderPoseHandler(posePath): ''' Check if the given directory contains a poseHandler.py file if so return the filename. PoseHandlers are a way of extending or over-loading the standard behaviour of the poseSaver, see Vimeo for a more detailed explanation. ''' poseHandler=None poseHandlers=[py for py in os.listdir(posePath) if py.endswith('poseHandler.py')] if poseHandlers: poseHandler=poseHandlers[0] return poseHandler class DataMap(object): ''' New base class for handling data ''' def __init__(self, filterSettings=None, *args, **kws): ''' The idea of the DataMap is to make the node handling part of any system generic. This allows us to use this baseClass to build up things like poseSavers and all we have to worry about is the data extraction part, all the node handling and file handling is already done by this class ;) Note that we're not passing any data in terms of nodes here, We'll deal with those in the Save and Load calls. ''' self.poseDict={} self.infoDict={} self.skeletonDict={} self.filepath='' self.mayaUpAxis = r9Setup.mayaUpAxis() self.thumbnailRes=[128,128] self.__metaPose=False self.metaRig=None # filled by the code as we process self.matchMethod='base' # method used to match nodes internally in the poseDict self.useFilter=True self.prioritySnapOnly=False self.skipAttrs=[] # attrs to completely ignore in any pose handling # make sure we have a settings object if filterSettings: if issubclass(type(filterSettings), r9Core.FilterNode_Settings): self.settings=filterSettings self.__metaPose=self.settings.metaRig else: raise StandardError('filterSettings param requires an r9Core.FilterNode_Settings object') else: self.settings=r9Core.FilterNode_Settings() self.__metaPose=self.settings.metaRig self.settings.printSettings() #Property so we sync the settings metaRig bool to the class metaPose bool def __get_metaPose(self): return self.__metaPose def __set_metaPose(self, val): self.__metaPose=val self.settings.metaRig=val metaPose = property(__get_metaPose, __set_metaPose) def setMetaRig(self,node): log.info('setting internal metaRig from given node : %s' % node) if r9Meta.isMetaNodeInherited(node,'MetaRig'): self.metaRig=r9Meta.MetaClass(node) else: self.metaRig=r9Meta.getConnectedMetaSystemRoot(node) return self.metaRig def hasFolderOverload(self): ''' modified so you can now prefix the poseHandler.py file makes it easier to keep track of in a production environment ''' self.poseHandler=None if self.filepath: self.poseHandler = getFolderPoseHandler(os.path.dirname(self.filepath)) return self.poseHandler def getNodesFromFolderConfig(self, rootNode, mode): ''' if the poseFolder has a poseHandler.py file use that to return the nodes to use for the pose instead ''' import imp log.debug('getNodesFromFolderConfig - useFilter=True : custom poseHandler running') posedir=os.path.dirname(self.filepath) print 'imp : ', self.poseHandler.split('.py')[0], ' : ', os.path.join(posedir, self.poseHandler) tempPoseFuncs = imp.load_source(self.poseHandler.split('.py')[0], os.path.join(posedir, self.poseHandler)) if mode=='load': nodes=tempPoseFuncs.poseGetNodesLoad(self,rootNode) if mode=='save': nodes=tempPoseFuncs.poseGetNodesSave(self,rootNode) del(tempPoseFuncs) return nodes def getNodes(self, nodes): ''' get the nodes to process This is designed to allow for specific hooks to be used from user code stored in the pose folder itself. ''' if not type(nodes)==list: nodes=[nodes] if self.useFilter: log.debug('getNodes - useFilter=True : filteActive=True - no custom poseHandler') if self.settings.filterIsActive(): return r9Core.FilterNode(nodes,self.settings).ProcessFilter() # main node filter else: log.debug('getNodes - useFilter=True : filteActive=False - no custom poseHandler') return nodes else: log.debug('getNodes - useFilter=False : no custom poseHandler') return nodes def getSkippedAttrs(self, rootNode=None): ''' the returned list of attrs from this function will be COMPLETELY ignored by the pose system. They will not be saved or loaded. Currently only supported under MetaRig ''' if self.metaRig and self.metaRig.hasAttr('poseSkippedAttrs'): return self.metaRig.poseSkippedAttrs return [] def getMaintainedAttrs(self, nodesToLoad, parentSpaceAttrs): ''' Attrs returned here will be cached prior to pose load, then restored in-tact afterwards ''' parentSwitches=[] if not type(parentSpaceAttrs)==list: parentSpaceAttrs=[parentSpaceAttrs] for child in nodesToLoad: for attr in parentSpaceAttrs: if cmds.attributeQuery(attr, exists=True, node=child): parentSwitches.append((child, attr, cmds.getAttr('%s.%s' % (child,attr)))) log.debug('parentAttrCache : %s > %s' % (child,attr)) return parentSwitches # Data Collection - Build the dataMap --------------------------------------------- @r9General.Timer def _collectNodeData_keyframes(self, node, key): ''' Capture and build keyframe data from this node and fill the data to the datamap[key] ''' attrs = r9Anim.getChannelBoxAttrs(node=node, asDict=True, incLocked=False) if not attrs['keyable']: return else: if not 'keydata' in self.poseDict[key]: self.poseDict[key]['keydata'] = {} for attr in attrs['keyable']: #print 'node : ', node, 'attr : ', attr channel = '%s.%s' % (node, attr) keyList = cmds.keyframe(channel, q=True, vc=True, tc=True, t=()) tangents = cmds.keyTangent(channel, q=True, t=(), itt=True, ott=True) if keyList: self.poseDict[key]['keydata'][attr] = '' for keyframe, value, t1, t2 in zip(keyList[0::2], keyList[1::2], tangents[0::2], tangents[1::2]): self.poseDict[key]['keydata'][attr] += '(%.02f,%f,"%s","%s"),' % (keyframe, value, t1, t2) # save key & tangent data #for keyframe, value in zip(keyList[0::2], keyList[1::2]): # tangentData = cmds.keyTangent(channel, q=True, t=(keyframe, keyframe), itt=True, ott=True) # self.poseDict[key]['attrs'][attr] += '(%.02f,%f,"%s","%s"),' % (keyframe, value, tangentData[0], tangentData[1]) def _collectNodeData_attrs(self, node, key): ''' Capture and build attribute data from this node and fill the data to the datamap[key] ''' channels=r9Anim.getSettableChannels(node,incStatics=True) if channels: self.poseDict[key]['attrs']={} for attr in channels: if attr in self.skipAttrs: log.debug('Skipping attr as requested : %s' % attr) continue try: if cmds.getAttr('%s.%s' % (node,attr),type=True)=='TdataCompound': # blendShape weights support attrs=cmds.aliasAttr(node, q=True)[::2] # extract the target channels from the multi for attr in attrs: self.poseDict[key]['attrs'][attr]=cmds.getAttr('%s.%s' % (node,attr)) else: self.poseDict[key]['attrs'][attr]=cmds.getAttr('%s.%s' % (node,attr)) except: log.debug('%s : attr is invalid in this instance' % attr) def _collectNodeData(self, node, key): ''' To Be Overloaded : what data to push into the main dataMap for each node found collected ''' self._collectNodeData_keyframes(node,key) self._collectNodeData_attrs(node, key) def _buildBlock_info(self): ''' Generic Info block for the data file, this could do with expanding ''' self.infoDict['author']=getpass.getuser() self.infoDict['date']=time.ctime() self.infoDict['metaPose']=self.metaPose if self.metaRig: self.infoDict['metaRigNode']=self.metaRig.mNode self.infoDict['metaRigNodeID']=self.metaRig.mNodeID if self.metaRig.hasAttr('version'): self.infoDict['version'] = self.metaRig.version if self.metaRig.hasAttr('rigType'): self.infoDict['rigType'] = self.metaRig.rigType if self.rootJnt: self.infoDict['skeletonRootJnt']=self.rootJnt def _buildBlock_poseDict(self, nodes): ''' Build the internal poseDict up from the given nodes. This is the core of the Pose System ''' getMirrorID=r9Anim.MirrorHierarchy().getMirrorCompiledID if self.metaPose: getMetaDict=self.metaRig.getNodeConnectionMetaDataMap # optimisation for i,node in enumerate(nodes): key=r9Core.nodeNameStrip(node) self.poseDict[key]={} self.poseDict[key]['ID']=i # selection order index self.poseDict[key]['longName']=node # longNode name mirrorID=getMirrorID(node) if mirrorID: self.poseDict[key]['mirrorID']=mirrorID if self.metaPose: self.poseDict[key]['metaData']=getMetaDict(node) # metaSystem the node is wired too self._collectNodeData(node, key) def _buildBlocks_to_run(self, nodes): ''' To Be Overloaded : What capture routines to run in order to build the DataMap ''' self.poseDict={} self._buildBlock_info() self._buildBlock_poseDict(nodes) def buildDataMap(self, nodes): ''' build the internal dataMap dict, useful as a separate func so it can be used in the PoseCompare class easily. This is the main internal call for managing the actual pose data for save ..note: this replaces the original pose call self.buildInternalPoseData() ''' self.metaRig=None self.rootJnt=None if not type(nodes)==list: nodes=[nodes] # cast to list for consistency rootNode=nodes[0] if self.settings.filterIsActive() and self.useFilter: if self.metaPose: if self.setMetaRig(rootNode): self.rootJnt=self.metaRig.getSkeletonRoots() if self.rootJnt: self.rootJnt=self.rootJnt[0] else: if cmds.attributeQuery('exportSkeletonRoot',node=rootNode,exists=True): connectedSkel=cmds.listConnections('%s.%s' % (rootNode,'exportSkeletonRoot'),destination=True,source=True) if connectedSkel and cmds.nodeType(connectedSkel)=='joint': self.rootJnt=connectedSkel[0] elif cmds.nodeType(rootNode)=='joint': self.rootJnt=rootNode if cmds.attributeQuery('animSkeletonRoot',node=rootNode,exists=True): connectedSkel=cmds.listConnections('%s.%s' % (rootNode,'animSkeletonRoot'),destination=True,source=True) if connectedSkel and cmds.nodeType(connectedSkel)=='joint': self.rootJnt=connectedSkel[0] elif cmds.nodeType(rootNode)=='joint': self.rootJnt=rootNode else: if self.metaPose: self.setMetaRig(rootNode) #fill the skip list, these attrs will be totally ignored by the code self.skipAttrs=self.getSkippedAttrs(rootNode) if self.hasFolderOverload(): # and self.useFilter: nodesToStore=self.getNodesFromFolderConfig(nodes,mode='save') else: nodesToStore=self.getNodes(nodes) if not nodesToStore: raise IOError('No Matching Nodes found to store the pose data from') self._buildBlocks_to_run(nodesToStore) # Data Mapping - Apply the dataMap ------------------------------------------------ def _matchNodes_to_data(self, nodes): ''' pre-loader function that processes all the nodes and data prior to actually calling the load... why? this is for the poseMixer for speed. This reads the file, matches the nodes to the internal file data and fills up the self.matchedPairs data [(src,dest),(src,dest)] ..note: this replaced the original call self._poseLoad_buildcache() ''' if not type(nodes)==list: nodes=[nodes] # cast to list for consistency if self.metaPose: self.setMetaRig(nodes[0]) if self.filepath and not os.path.exists(self.filepath): raise StandardError('Given Path does not Exist') if self.filepath and self.hasFolderOverload(): # and useFilter: nodesToLoad = self.getNodesFromFolderConfig(nodes, mode='load') else: nodesToLoad=self.getNodes(nodes) if not nodesToLoad: raise StandardError('Nothing selected or returned by the filter to load the pose onto') if self.filepath: self._readPose(self.filepath) log.info('Pose Read Successfully from : %s' % self.filepath) if self.metaPose: if 'metaPose' in self.infoDict and self.metaRig: try: if eval(self.infoDict['metaPose']): self.matchMethod = 'metaData' except: self.matchMethod = 'metaData' else: log.debug('Warning, trying to load a NON metaPose to a MRig - switching to NameMatching') #fill the skip list, these attrs will be totally ignored by the code self.skipAttrs=self.getSkippedAttrs(nodes[0]) #Build the master list of matched nodes that we're going to apply data to #Note: this is built up from matching keys in the poseDict to the given nodes self.matchedPairs = self._matchNodesToPoseData(nodesToLoad) return nodesToLoad @r9General.Timer def _applyData_attrs(self, *args, **kws): ''' Load Example for attrs : use self.matchedPairs for the process list of pre-matched tuples of (poseDict[key], node in scene) ''' for key, dest in self.matchedPairs: log.debug('Applying Key Block : %s' % key) try: if not 'attrs' in self.poseDict[key]: continue for attr, val in self.poseDict[key]['attrs'].items(): try: val = eval(val) except: pass log.debug('node : %s : attr : %s : val %s' % (dest, attr, val)) try: cmds.setAttr('%s.%s' % (dest, attr), val) except StandardError, err: log.debug(err) except: log.debug('Pose Object Key : %s : has no Attr block data' % key) @r9General.Timer def _applyData_keyframes(self, offset=0.0, *args, **kws): ''' Load Example for keyframe data : use self.matchedPairs for the process list of pre-matched tuples of (poseDict[key], node in scene) ''' for key, dest in self.matchedPairs: log.debug('Applying Key Block : %s' % key) if not 'keydata' in self.poseDict[key]: continue for attr, keydata in self.poseDict[key]['keydata'].items(): try: chn='%s.%s' % (dest, attr) #log.debug('node : %s : attr : %s : keydata : %s' % (dest, attr, str(keydata))) for ktime, value, inTan, outTan in eval(keydata): cmds.setKeyframe(chn, t=ktime, v=value, itt=inTan, ott=outTan) except StandardError, err: log.debug('failed to set animData for key : %s.%s' % (dest,attr)) def _applyData(self, *args, **kws): ''' To Be Overloaded ''' self._applyData_keyframes() # Process the data ------------------------------------------------- def _writePose(self, filepath): ''' Write the Pose ConfigObj to file ''' ConfigObj = configobj.ConfigObj(indent_type='\t') ConfigObj['filterNode_settings']=self.settings.__dict__ ConfigObj['poseData']=self.poseDict ConfigObj['info']=self.infoDict if self.skeletonDict: ConfigObj['skeletonDict']=self.skeletonDict ConfigObj.filename = filepath ConfigObj.write() @r9General.Timer def _readPose(self, filename): ''' Read the pose file and build up the internal poseDict TODO: do we allow the data to be filled from the pose filter thats stored??????? ''' if filename: if os.path.exists(filename): #for key, val in configobj.ConfigObj(filename)['filterNode_settings'].items(): # self.settings.__dict__[key]=decodeString(val) self.poseDict=configobj.ConfigObj(filename)['poseData'] if 'info' in configobj.ConfigObj(filename): self.infoDict=configobj.ConfigObj(filename)['info'] if 'skeletonDict' in configobj.ConfigObj(filename): self.skeletonDict=configobj.ConfigObj(filename)['skeletonDict'] else: raise StandardError('Given filepath doesnt not exist : %s' % filename) else: raise StandardError('No FilePath given to read the pose from') @r9General.Timer def _matchNodesToPoseData(self, nodes): ''' Main filter to extract matching data pairs prior to processing return : tuple such that : (poseDict[key], destinationNode) NOTE: I've changed this so that matchMethod is now an internal PoseData attr :param nodes: nodes to try and match from the poseDict ''' matchedPairs=[] log.info('using matchMethod : %s' % self.matchMethod) if self.matchMethod=='stripPrefix' or self.matchMethod=='base': log.info('matchMethodStandard : %s' % self.matchMethod) matchedPairs=r9Core.matchNodeLists([key for key in self.poseDict.keys()], nodes, matchMethod=self.matchMethod) if self.matchMethod=='index': for i, node in enumerate(nodes): for key in self.poseDict.keys(): if int(self.poseDict[key]['ID'])==i: matchedPairs.append((key,node)) log.info('poseKey : %s %s >> matchedSource : %s %i' % (key, self.poseDict[key]['ID'], node, i)) break if self.matchMethod=='mirrorIndex': getMirrorID=r9Anim.MirrorHierarchy().getMirrorCompiledID for node in nodes: mirrorID=getMirrorID(node) if not mirrorID: continue for key in self.poseDict.keys(): if self.poseDict[key]['mirrorID'] and self.poseDict[key]['mirrorID']==mirrorID: matchedPairs.append((key,node)) log.info('poseKey : %s %s >> matched MirrorIndex : %s' % (key, node, self.poseDict[key]['mirrorID'])) break if self.matchMethod=='metaData': getMetaDict=self.metaRig.getNodeConnectionMetaDataMap # optimisation poseKeys=dict(self.poseDict) # optimisation for node in nodes: try: metaDict=getMetaDict(node) for key in poseKeys: if poseKeys[key]['metaData']==metaDict: matchedPairs.append((key,node)) poseKeys.pop(key) break except: log.info('FAILURE to load MetaData pose blocks - Reverting to Name') matchedPairs=r9Core.matchNodeLists([key for key in self.poseDict.keys()], nodes) return matchedPairs def matchInternalPoseObjects(self, nodes=None, fromFilter=True): ''' This is a throw-away and only used in the UI to select for debugging! from a given poseFile return or select the internal stored objects ''' InternalNodes=[] if not fromFilter: #no filter, we just pass in the longName thats stored for key in self.poseDict.keys(): if cmds.objExists(self.poseDict[key]['longName']): InternalNodes.append(self.poseDict[key]['longName']) elif cmds.objExists(key): InternalNodes.append(key) elif cmds.objExists(r9Core.nodeNameStrip(key)): InternalNodes.append(r9Core.nodeNameStrip(key)) else: #use the internal Poses filter and then Match against scene nodes if self.settings.filterIsActive(): filterData=r9Core.FilterNode(nodes,self.settings).ProcessFilter() matchedPairs=self._matchNodesToPoseData(filterData) if matchedPairs: InternalNodes=[node for _,node in matchedPairs] if not InternalNodes: raise StandardError('No Matching Nodes found!!') return InternalNodes #Main Calls ---------------------------------------- @r9General.Timer def saveData(self, nodes, filepath=None, useFilter=True, storeThumbnail=True): ''' Entry point for the generic PoseSave. :param nodes: nodes to store the data against OR the rootNode if the filter is active. :param filepath: posefile to save - if not given the pose is cached on this class instance. :param useFilter: use the filterSettings or not. ''' #push args to object - means that any poseHandler.py file has access to them self.filepath=filepath self.useFilter=useFilter if self.filepath: log.debug('PosePath given : %s' % filepath) self.buildDataMap(nodes) if self.filepath: self._writePose(filepath) if storeThumbnail: sel=cmds.ls(sl=True,l=True) cmds.select(cl=True) r9General.thumbNailScreen(filepath,self.thumbnailRes[0],self.thumbnailRes[1]) if sel: cmds.select(sel) log.info('Data Saved Successfully to : %s' % filepath) @r9General.Timer def loadData(self, nodes, filepath=None, useFilter=True, *args, **kws): ''' Entry point for the generic DataLoad. :param nodes: if given load the data to only these. If given and filter=True this is the rootNode for the filter. :param filepath: posefile to load - if not given the pose is loaded from a cached instance on this class. :param useFilter: If the pose has an active Filter_Settings block and this is True then use the filter on the destination hierarchy. ''' if not type(nodes)==list: nodes=[nodes] # cast to list for consistency #push args to object - means that any poseHandler.py file has access to them self.filepath = filepath self.useFilter = useFilter # used in the getNodes call nodesToLoad = self._matchNodes_to_data(nodes) if not self.matchedPairs: raise StandardError('No Matching Nodes found in the PoseFile!') else: if self.prioritySnapOnly: #we've already filtered the hierarchy, may as well just filter the results for speed nodesToLoad=r9Core.prioritizeNodeList(nodesToLoad, self.settings.filterPriority, regex=True, prioritysOnly=True) nodesToLoad.reverse() # nodes now matched, apply the data in the dataMap self._applyData() class PoseData(DataMap): ''' The PoseData is stored per node inside an internal dict as follows: >>> node = '|group|Rig|Body|TestCtr' >>> poseDict['TestCtr'] >>> poseDict['TestCtr']['ID'] = 0 index in the Hierarchy used to build the data up >>> poseDict['TestCtr']['longName'] = '|group|Rig|Body|TestCtr' >>> poseDict['TestCtr']['attrs']['translateX'] = 0.5 >>> poseDict['TestCtr']['attrs']['translateY'] = 1.0 >>> poseDict['TestCtr']['attrs']['translateZ'] = 22 >>> >>> #if we're storing as MetaData we also include: >>> poseDict['TestCtr']['metaData']['metaAttr'] = CTRL_L_Thing = the attr that wires this node to the MetaSubsystem >>> poseDict['TestCtr']['metaData']['metaNodeID'] = L_Arm_System = the metaNode this node is wired to via the above attr Matching of nodes against this dict is via either the nodeName, nodeIndex (ID) or the metaData block. New functionality allows you to use the main calls to cache a pose and reload it from this class instance, wraps things up nicely for you: >>> pose=r9Pose.PoseData() >>> pose.metaPose=True >>> >>> #cache the pose (just don't pass in a filePath) >>> pose.poseSave(cmds.ls(sl=True)) >>> #reload the cache you just stored >>> pose.poseLoad(cmds.ls(sl=True)) .. note:: If the root node of the hierarchy passed into the poseSave() has a message attr 'exportSkeletonRoot' or 'animSkeletonRoot' and that message is connected to a skeleton then the pose will also include an internal 'skeleton' pose, storing all child joints into a separate block in the poseFile that can be used by the PoseCompare class/function. For metaData based rigs this calls a function on the metaRig class getSkeletonRoots() which wraps the 'exportSkeletonRoot' attr, allowing you to overload this behaviour in your own MetaRig subclasses. ''' def __init__(self, filterSettings=None, *args, **kws): ''' I'm not passing any data in terms of nodes here, We'll deal with those in the PoseSave and PoseLoad calls. Leaves this open for expansion ''' super(PoseData, self).__init__(filterSettings=filterSettings, *args,**kws) self.poseDict={} self.infoDict={} self.skeletonDict={} self.posePointCloudNodes=[] self.poseCurrentCache={} # cached dict storing the current state of the objects prior to applying the pose self.relativePose=False self.relativeRots='projected' self.relativeTrans='projected' def _collectNodeData(self, node, key): ''' collect the attr data from the node and add it to the poseDict[key] ''' self._collectNodeData_attrs(node, key) def _buildBlock_skeletonData(self, rootJnt): ''' :param rootNode: root of the skeleton to process ''' self.skeletonDict={} if not rootJnt: log.info('skeleton rootJnt joint was not found') return fn=r9Core.FilterNode(rootJnt) fn.settings.nodeTypes='joint' fn.settings.incRoots=False skeleton=fn.ProcessFilter() for jnt in skeleton: key=r9Core.nodeNameStrip(jnt) self.skeletonDict[key]={} self.skeletonDict[key]['attrs']={} for attr in ['translateX','translateY','translateZ', 'rotateX','rotateY','rotateZ']: try: self.skeletonDict[key]['attrs'][attr]=cmds.getAttr('%s.%s' % (jnt,attr)) except: log.debug('%s : attr is invalid in this instance' % attr) def _buildBlocks_to_run(self, nodes): ''' What capture routines to run in order to build the poseDict data ''' self.poseDict={} self._buildBlock_info() self._buildBlock_poseDict(nodes) self._buildBlock_skeletonData(self.rootJnt) def _cacheCurrentNodeStates(self): ''' this is purely for the _applyPose with percent and optimization for the UI's ''' log.info('updating the currentCache') self.poseCurrentCache={} for key, dest in self.matchedPairs: log.debug('caching current node data : %s' % key) self.poseCurrentCache[key]={} if not 'attrs' in self.poseDict[key]: continue for attr, _ in self.poseDict[key]['attrs'].items(): try: self.poseCurrentCache[key][attr]=cmds.getAttr('%s.%s' % (dest, attr)) except: log.debug('Attr mismatch on destination : %s.%s' % (dest, attr)) @r9General.Timer def _applyData(self, percent=None): ''' :param percent: percent of the pose to load ''' mix_percent=False # gets over float values of zero from failing if percent or type(percent)==float: mix_percent=True if not self.poseCurrentCache: self._cacheCurrentNodeStates() for key, dest in self.matchedPairs: log.debug('Applying Key Block : %s' % key) try: if not 'attrs' in self.poseDict[key]: continue for attr, val in self.poseDict[key]['attrs'].items(): if attr in self.skipAttrs: log.debug('Skipping attr as requested : %s' % attr) continue try: val = eval(val) except: pass log.debug('node : %s : attr : %s : val %s' % (dest, attr, val)) try: if not mix_percent: cmds.setAttr('%s.%s' % (dest, attr), val) else: current = self.poseCurrentCache[key][attr] blendVal = ((val - current) / 100) * percent # print 'loading at percent : %s (current=%s , stored=%s' % (percent,current,current+blendVal) cmds.setAttr('%s.%s' % (dest, attr), current + blendVal) except StandardError, err: log.debug(err) except: log.debug('Pose Object Key : %s : has no Attr block data' % key) #Main Calls ---------------------------------------- @r9General.Timer def poseSave(self, nodes, filepath=None, useFilter=True, storeThumbnail=True): ''' Entry point for the generic PoseSave. :param nodes: nodes to store the data against OR the rootNode if the filter is active. :param filepath: posefile to save - if not given the pose is cached on this class instance. :param useFilter: use the filterSettings or not. :param storeThumbnail: generate and store a thu8mbnail from the screen to go alongside the pose ''' #push args to object - means that any poseHandler.py file has access to them self.filepath=filepath self.useFilter=useFilter if self.filepath: log.debug('PosePath given : %s' % filepath) self.buildDataMap(nodes) if self.filepath: self._writePose(filepath) if storeThumbnail: sel=cmds.ls(sl=True,l=True) cmds.select(cl=True) r9General.thumbNailScreen(filepath,self.thumbnailRes[0],self.thumbnailRes[1]) if sel: cmds.select(sel) log.info('Pose Saved Successfully to : %s' % filepath) @r9General.Timer def poseLoad(self, nodes, filepath=None, useFilter=True, relativePose=False, relativeRots='projected', relativeTrans='projected', maintainSpaces=False, percent=None): ''' Entry point for the generic PoseLoad. :param nodes: if given load the data to only these. If given and filter=True this is the rootNode for the filter. :param filepath: posefile to load - if not given the pose is loaded from a cached instance on this class. :param useFilter: If the pose has an active Filter_Settings block and this is True then use the filter on the destination hierarchy. :param relativePose: kick in the posePointCloud to align the loaded pose relatively to the selected node. :param relativeRots: 'projected' or 'absolute' - how to calculate the offset. :param relativeTrans: 'projected' or 'absolute' - how to calculate the offset. :param maintainSpaces: this preserves any parentSwitching mismatches between the stored pose and the current rig settings, current spaces are maintained. This only checks those nodes in the snapList and only runs under relative mode. ''' if relativePose and not cmds.ls(sl=True): raise StandardError('Nothing selected to align Relative Pose too') if not type(nodes)==list: nodes=[nodes] # cast to list for consistency #push args to object - means that any poseHandler.py file has access to them self.relativePose = relativePose self.relativeRots = relativeRots self.relativeTrans = relativeTrans self.PosePointCloud = None self.filepath = filepath self.useFilter = useFilter # used in the getNodes call self.maintainSpaces = maintainSpaces self.mayaUpAxis = r9Setup.mayaUpAxis() nodesToLoad = self._matchNodes_to_data(nodes) if not self.matchedPairs: raise StandardError('No Matching Nodes found in the PoseFile!') else: if self.relativePose: if self.prioritySnapOnly: #we've already filtered the hierarchy, may as well just filter the results for speed nodesToLoad=r9Core.prioritizeNodeList(nodesToLoad, self.settings.filterPriority, regex=True, prioritysOnly=True) nodesToLoad.reverse() #setup the PosePointCloud ------------------------------------------------- reference=cmds.ls(sl=True,l=True)[0] self.PosePointCloud=PosePointCloud(nodesToLoad) self.PosePointCloud.buildOffsetCloud(reference, raw=True) resetCache=[cmds.getAttr('%s.translate' % self.PosePointCloud.posePointRoot), cmds.getAttr('%s.rotate' % self.PosePointCloud.posePointRoot)] if self.maintainSpaces: if self.metaRig: parentSpaceCache=self.getMaintainedAttrs(nodesToLoad, self.metaRig.parentSwitchAttr) elif 'parentSpaces' in self.settings.rigData: parentSpaceCache=self.getMaintainedAttrs(nodesToLoad, self.settings.rigData['parentSpaces']) self._applyData(percent) if self.relativePose: #snap the poseCloud to the new xform of the referenced node, snap the cloud #to the pose, reset the clouds parent to the cached xform and then snap the #nodes back to the cloud r9Anim.AnimFunctions.snap([reference,self.PosePointCloud.posePointRoot]) if self.relativeRots=='projected': if self.mayaUpAxis=='y': cmds.setAttr('%s.rx' % self.PosePointCloud.posePointRoot,0) cmds.setAttr('%s.rz' % self.PosePointCloud.posePointRoot,0) elif self.mayaUpAxis=='z': # fucking Z!!!!!! cmds.setAttr('%s.rx' % self.PosePointCloud.posePointRoot,0) cmds.setAttr('%s.ry' % self.PosePointCloud.posePointRoot,0) self.PosePointCloud._snapPosePntstoNodes() if not self.relativeTrans=='projected': cmds.setAttr('%s.translate' % self.PosePointCloud.posePointRoot, resetCache[0][0][0], resetCache[0][0][1], resetCache[0][0][2]) if not self.relativeRots=='projected': cmds.setAttr('%s.rotate' % self.PosePointCloud.posePointRoot, resetCache[1][0][0], resetCache[1][0][1], resetCache[1][0][2]) if self.relativeRots=='projected': if self.mayaUpAxis=='y': cmds.setAttr('%s.ry' % self.PosePointCloud.posePointRoot,resetCache[1][0][1]) elif self.mayaUpAxis=='z': # fucking Z!!!!!! cmds.setAttr('%s.rz' % self.PosePointCloud.posePointRoot,resetCache[1][0][2]) if self.relativeTrans=='projected': if self.mayaUpAxis=='y': cmds.setAttr('%s.tx' % self.PosePointCloud.posePointRoot,resetCache[0][0][0]) cmds.setAttr('%s.tz' % self.PosePointCloud.posePointRoot,resetCache[0][0][2]) elif self.mayaUpAxis=='z': # fucking Z!!!!!! cmds.setAttr('%s.tx' % self.PosePointCloud.posePointRoot,resetCache[0][0][0]) cmds.setAttr('%s.ty' % self.PosePointCloud.posePointRoot,resetCache[0][0][1]) #if maintainSpaces then restore the original parentSwitch attr values #BEFORE pushing the point cloud data back to the rig if self.maintainSpaces and parentSpaceCache: # and self.metaRig: for child,attr,value in parentSpaceCache: log.debug('Resetting parentSwitches : %s.%s = %f' % (r9Core.nodeNameStrip(child),attr,value)) cmds.setAttr('%s.%s' % (child,attr), value) self.PosePointCloud._snapNodestoPosePnts() self.PosePointCloud.delete() cmds.select(reference) class PosePointCloud(object): ''' PosePointCloud is the technique inside the PoseSaver used to snap the pose into relative space. It's been added as a tool in it's own right as it's sometimes useful to be able to shift poses in global space. ''' def __init__(self, nodes, filterSettings=None, mesh=None): ''' :param rootReference: the object to be used as the PPT's pivot reference :param nodes: feed the nodes to process in as a list, if a filter is given then these are the rootNodes for it :param filterSettings: pass in a filterSettings object to filter the given hierarchy :param mesh: this is really for reference, rather than make a locator, pass in a reference geo which is then shapeSwapped for the PPC root node giving great reference! ''' self.mesh = mesh self.refMesh = 'posePointCloudGeoRef' self.refMeshShape = 'posePointCloudGeoRefShape' self.mayaUpAxis = r9Setup.mayaUpAxis() self.inputNodes = nodes # inputNodes for processing self.posePointCloudNodes = [] # generated ppt nodes self.posePointRoot = None self.settings = None self.prioritySnapOnly=False # ONLY make ppt points for the filterPriority nodes if filterSettings: if not issubclass(type(filterSettings), r9Core.FilterNode_Settings): raise StandardError('filterSettings param requires an r9Core.FilterNode_Settings object') elif filterSettings.filterIsActive(): self.settings=filterSettings def buildOffsetCloud(self, rootReference=None, raw=False): ''' Build a point cloud up for each node in nodes :param nodes: list of objects to be in the cloud :param rootReference: the node used for the initial pivot location :param raw: build the cloud but DON'T snap the nodes into place - an optimisation for the PoseLoad sequence ''' self.posePointRoot=cmds.ls(cmds.spaceLocator(name='posePointCloud'),l=True)[0] ppcShape=cmds.listRelatives(self.posePointRoot,type='shape')[0] cmds.setAttr("%s.localScaleZ" % ppcShape, 30) cmds.setAttr("%s.localScaleX" % ppcShape, 30) cmds.setAttr("%s.localScaleY" % ppcShape, 30) if self.settings: if self.prioritySnapOnly: self.settings.searchPattern=self.settings.filterPriority self.inputNodes=r9Core.FilterNode(self.inputNodes, self.settings).ProcessFilter() if self.inputNodes: self.inputNodes.reverse() # for the snapping operations if self.mayaUpAxis=='y': cmds.setAttr('%s.rotateOrder' % self.posePointRoot, 2) if rootReference: # and not mesh: r9Anim.AnimFunctions.snap([rootReference,self.posePointRoot]) for node in self.inputNodes: pnt=cmds.spaceLocator(name='pp_%s' % r9Core.nodeNameStrip(node))[0] if not raw: r9Anim.AnimFunctions.snap([node,pnt]) cmds.parent(pnt,self.posePointRoot) self.posePointCloudNodes.append((pnt,node)) cmds.select(self.posePointRoot) if self.mesh: self.shapeSwapMesh() return self.posePointCloudNodes def _snapPosePntstoNodes(self): ''' snap each pntCloud point to their respective Maya nodes ''' for pnt,node in self.posePointCloudNodes: log.debug('snapping PPT : %s' % pnt) r9Anim.AnimFunctions.snap([node,pnt]) def _snapNodestoPosePnts(self): ''' snap each MAYA node to it's respective pntCloud point ''' for pnt, node in self.posePointCloudNodes: log.debug('snapping Ctrl : %s > %s : %s' % (r9Core.nodeNameStrip(node), pnt, node)) r9Anim.AnimFunctions.snap([pnt,node]) def shapeSwapMesh(self): ''' Swap the mesh Geo so it's a shape under the PPC transform root TODO: Make sure that the duplicate message link bug is covered!! ''' cmds.duplicate(self.mesh,rc=True,n=self.refMesh)[0] r9Core.LockChannels().processState(self.refMesh,['tx','ty','tz','rx','ry','rz','sx','sy','sz'],\ mode='fullkey',hierarchy=False) try: #turn on the overrides so the duplicate geo can be selected cmds.setAttr("%s.overrideDisplayType" % self.refMeshShape, 0) cmds.setAttr("%s.overrideEnabled" % self.refMeshShape, 1) cmds.setAttr("%s.overrideLevelOfDetail" % self.refMeshShape, 0) except: log.debug('Couldnt set the draw overrides for the refGeo') cmds.parent(self.refMesh,self.posePointRoot) cmds.makeIdentity(self.refMesh,apply=True,t=True,r=True) cmds.parent(self.refMeshShape,self.posePointRoot,r=True,s=True) cmds.delete(self.refMesh) def applyPosePointCloud(self): self._snapNodestoPosePnts() def updatePosePointCloud(self): self._snapPosePntstoNodes() if self.mesh: cmds.delete(self.refMeshShape) self.shapeSwapMesh() cmds.refresh() def delete(self): cmds.delete(self.posePointRoot) class PoseCompare(object): ''' This is aimed at comparing a rigs current pose with a given one, be that a pose file on disc, a pose class object, or even a poseObject against another. It will compare either the main [poseData].keys or the ['skeletonDict'].keys and for key in keys compare, with tolerance, the [attrs] block. >>> #build an mPose object and fill the internal poseDict >>> mPoseA=r9Pose.PoseData() >>> mPoseA.metaPose=True >>> mPoseA.buildInternalPoseData(cmds.ls(sl=True)) >>> >>> mPoseB=r9Pose.PoseData() >>> mPoseB.metaPose=True >>> mPoseB.buildInternalPoseData(cmds.ls(sl=True)) >>> >>> compare=r9Pose.PoseCompare(mPoseA,mPoseB) >>> >>> #.... or .... >>> compare=r9Pose.PoseCompare(mPoseA,'H:/Red9PoseTests/thisPose.pose') >>> #.... or .... >>> compare=r9Pose.PoseCompare('H:/Red9PoseTests/thisPose.pose','H:/Red9PoseTests/thatPose.pose') >>> >>> compare.compare() #>> bool, True = same >>> compare.fails['failedAttrs'] ''' def __init__(self, currentPose, referencePose, angularTolerance=0.1, linearTolerance=0.01, compareDict='poseDict', filterMap=[], ignoreBlocks=[]): ''' Make sure we have 2 PoseData objects to compare :param currentPose: either a PoseData object or a valid pose file :param referencePose: either a PoseData object or a valid pose file :param tolerance: tolerance by which floats are matched :param angularTolerance: the tolerance used to check rotate attr float values :param linearTolerance: the tolerance used to check all other float attrs :param compareDict: the internal main dict in the pose file to compare the data with :param filterMap: if given this is used as a high level filter, only matching nodes get compared others get skipped. Good for passing in a mater core skeleton to test whilst ignoring extra nodes :param ignoreBlocks: allows the given failure blocks to be ignored. We mainly use this for ['missingKeys'] .. note:: In the new setup if the skeletonRoot jnt is found we add a whole new dict to serialize the current skeleton data to the pose, this means that we can compare a pose on a rig via the internal skeleton transforms as well as the actual rig controllers...makes validation a lot more accurate for export * 'poseDict' = [poseData] main controller data * 'skeletonDict' = [skeletonDict] block generated if exportSkeletonRoot is connected * 'infoDict' = [info] block ''' self.status = False self.compareDict = compareDict self.angularTolerance = angularTolerance self.angularAttrs = ['rotateX', 'rotateY', 'rotateZ'] self.linearTolerance = linearTolerance self.linearAttrs = ['translateX', 'translateY', 'translateZ'] self.filterMap = filterMap self.ignoreBlocks = ignoreBlocks if isinstance(currentPose, PoseData): self.currentPose = currentPose elif os.path.exists(currentPose): self.currentPose = PoseData() self.currentPose._readPose(currentPose) elif not os.path.exists(referencePose): raise IOError('Given CurrentPose Path is invalid!') if isinstance(referencePose, PoseData): self.referencePose = referencePose elif os.path.exists(referencePose): self.referencePose = PoseData() self.referencePose._readPose(referencePose) elif not os.path.exists(referencePose): raise IOError('Given ReferencePose Path is invalid!') def __addFailedAttr(self, key, attr): ''' add failed attrs data to the dict ''' if not 'failedAttrs' in self.fails: self.fails['failedAttrs'] = {} if not key in self.fails['failedAttrs']: self.fails['failedAttrs'][key] = {} if not 'attrMismatch' in self.fails['failedAttrs'][key]: self.fails['failedAttrs'][key]['attrMismatch'] = [] self.fails['failedAttrs'][key]['attrMismatch'].append(attr) def compare(self): ''' Compare the 2 PoseData objects via their internal [key][attrs] blocks return a bool. After processing self.fails is a dict holding all the fails for processing later if required ''' self.fails = {} logprint = 'PoseCompare returns : %s ========================================\n' % self.compareDict currentDic = getattr(self.currentPose, self.compareDict) referenceDic = getattr(self.referencePose, self.compareDict) if not currentDic or not referenceDic: raise StandardError('missing pose section <<%s>> compare aborted' % self.compareDict) for key, attrBlock in currentDic.items(): if self.filterMap and not key in self.filterMap: log.debug('node not in filterMap - skipping key %s' % key) continue if key in referenceDic: referenceAttrBlock = referenceDic[key] else: if not 'missingKeys' in self.ignoreBlocks: logprint += 'ERROR: Key Mismatch : %s\n' % key if not 'missingKeys' in self.fails: self.fails['missingKeys'] = [] self.fails['missingKeys'].append(key) else: log.debug('missingKeys in ignoreblock : node is missing from data but being skipped "%s"' % key) continue if not 'attrs' in attrBlock: log.debug('%s node has no attrs block in the pose' % key) continue for attr, value in attrBlock['attrs'].items(): # attr missing completely from the key if not attr in referenceAttrBlock['attrs']: if not 'failedAttrs' in self.fails: self.fails['failedAttrs'] = {} if not key in self.fails['failedAttrs']: self.fails['failedAttrs'][key] = {} if not 'missingAttrs' in self.fails['failedAttrs'][key]: self.fails['failedAttrs'][key]['missingAttrs'] = [] self.fails['failedAttrs'][key]['missingAttrs'].append(attr) # log.info('missing attribute in data : "%s.%s"' % (key,attr)) logprint += 'ERROR: Missing attribute in data : "%s.%s"\n' % (key, attr) continue # test the attrs value matches value = r9Core.decodeString(value) # decode as this may be a configObj refValue = r9Core.decodeString(referenceAttrBlock['attrs'][attr]) # decode as this may be a configObj if type(value) == float: matched = False if attr in self.angularAttrs: matched = r9Core.floatIsEqual(value, refValue, self.angularTolerance, allowGimbal=True) else: matched = r9Core.floatIsEqual(value, refValue, self.linearTolerance, allowGimbal=False) if not matched: self.__addFailedAttr(key, attr) # log.info('AttrValue float mismatch : "%s.%s" currentValue=%s >> expectedValue=%s' % (key,attr,value,refValue)) logprint += 'ERROR: AttrValue float mismatch : "%s.%s" currentValue=%s >> expectedValue=%s\n' % (key, attr, value, refValue) continue elif not value == refValue: self.__addFailedAttr(key, attr) # log.info('AttrValue mismatch : "%s.%s" currentValue=%s >> expectedValue=%s' % (key,attr,value,refValue)) logprint += 'ERROR: AttrValue mismatch : "%s.%s" currentValue=%s >> expectedValue=%s\n' % (key, attr, value, refValue) continue if 'missingKeys' in self.fails or 'failedAttrs' in self.fails: logprint += 'PoseCompare returns : ========================================' print logprint return False self.status = True return True def batchPatchPoses(posedir, config, poseroot, load=True, save=True, patchfunc=None,\ relativePose=False, relativeRots=False, relativeTrans=False): ''' whats this?? a fast method to run through all the poses in a given dictionary and update or patch them. If patchfunc isn't given it'll just run through and resave the pose - updating the systems if needed. If it is then it gets run between the load and save calls. :param posedir: directory of poses to process :param config: hierarchy settings cfg to use to ID the nodes (hierarchy tab preset = filterSettings object) :param poseroot: root node to the filters - poseTab rootNode/MetaRig root :param patchfunc: optional function to run between the load and save call in processing, great for fixing issues on mass with poses. Note we now pass pose file back into this func as an arg :param load: should the batch load the pose :param save: should the batch resave the pose ''' filterObj=r9Core.FilterNode_Settings() filterObj.read(os.path.join(r9Setup.red9ModulePath(), 'presets', config)) # 'Crytek_New_Meta.cfg')) mPose=PoseData(filterObj) files=os.listdir(posedir) files.sort() for f in files: if f.lower().endswith('.pose'): if load: mPose.poseLoad(poseroot, os.path.join(posedir,f), useFilter=True, relativePose=relativePose, relativeRots=relativeRots, relativeTrans=relativeTrans) if patchfunc: patchfunc(f) if save: mPose.poseSave(poseroot, os.path.join(posedir,f), useFilter=True, storeThumbnail=False) log.info('Processed Pose File : %s' % f)
# Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import os import time import uuid import fixtures from keystoneauth1 import loading as ks_loading import openstack.config as occ from oslo_config import cfg from requests import structures from requests_mock.contrib import fixture as rm_fixture from six.moves import urllib import tempfile import openstack.cloud import openstack.connection from openstack.tests import fakes from openstack.fixture import connection as os_fixture from openstack.tests import base _ProjectData = collections.namedtuple( 'ProjectData', 'project_id, project_name, enabled, domain_id, description, ' 'json_response, json_request') _UserData = collections.namedtuple( 'UserData', 'user_id, password, name, email, description, domain_id, enabled, ' 'json_response, json_request') _GroupData = collections.namedtuple( 'GroupData', 'group_id, group_name, domain_id, description, json_response, ' 'json_request') _DomainData = collections.namedtuple( 'DomainData', 'domain_id, domain_name, description, json_response, ' 'json_request') _ServiceData = collections.namedtuple( 'Servicedata', 'service_id, service_name, service_type, description, enabled, ' 'json_response_v3, json_response_v2, json_request') _EndpointDataV3 = collections.namedtuple( 'EndpointData', 'endpoint_id, service_id, interface, region, url, enabled, ' 'json_response, json_request') _EndpointDataV2 = collections.namedtuple( 'EndpointData', 'endpoint_id, service_id, region, public_url, internal_url, ' 'admin_url, v3_endpoint_list, json_response, ' 'json_request') # NOTE(notmorgan): Shade does not support domain-specific roles # This should eventually be fixed if it becomes a main-stream feature. _RoleData = collections.namedtuple( 'RoleData', 'role_id, role_name, json_response, json_request') class TestCase(base.TestCase): strict_cloud = False def setUp(self, cloud_config_fixture='clouds.yaml'): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # Sleeps are for real testing, but unit tests shouldn't need them realsleep = time.sleep def _nosleep(seconds): return realsleep(seconds * 0.0001) self.sleep_fixture = self.useFixture(fixtures.MonkeyPatch( 'time.sleep', _nosleep)) self.fixtures_directory = 'openstack/tests/unit/fixtures' self.os_fixture = self.useFixture( os_fixture.ConnectionFixture(project_id=fakes.PROJECT_ID)) # Isolate openstack.config from test environment config = tempfile.NamedTemporaryFile(delete=False) cloud_path = '%s/clouds/%s' % (self.fixtures_directory, cloud_config_fixture) with open(cloud_path, 'rb') as f: content = f.read() config.write(content) config.close() vendor = tempfile.NamedTemporaryFile(delete=False) vendor.write(b'{}') vendor.close() self.config = occ.OpenStackConfig( config_files=[config.name], vendor_files=[vendor.name], secure_files=['non-existant']) self.oslo_config_dict = { # All defaults for nova 'nova': {}, # monasca-api not in the service catalog 'monasca-api': {}, # Overrides for heat 'heat': { 'region_name': 'SpecialRegion', 'interface': 'internal', 'endpoint_override': 'https://example.org:8888/heat/v2' }, # test a service with dashes 'ironic_inspector': { 'endpoint_override': 'https://example.org:5050', }, } # FIXME(notmorgan): Convert the uri_registry, discovery.json, and # use of keystone_v3/v2 to a proper fixtures.Fixture. For now this # is acceptable, but eventually this should become it's own fixture # that encapsulates the registry, registering the URIs, and # assert_calls (and calling assert_calls every test case that uses # it on cleanup). Subclassing here could be 100% eliminated in the # future allowing any class to simply # self.useFixture(openstack.cloud.RequestsMockFixture) and get all # the benefits. # NOTE(notmorgan): use an ordered dict here to ensure we preserve the # order in which items are added to the uri_registry. This makes # the behavior more consistent when dealing with ensuring the # requests_mock uri/query_string matchers are ordered and parse the # request in the correct orders. self._uri_registry = collections.OrderedDict() self.discovery_json = os.path.join( self.fixtures_directory, 'discovery.json') self.use_keystone_v3() self.__register_uris_called = False def _load_ks_cfg_opts(self): conf = cfg.ConfigOpts() for group, opts in self.oslo_config_dict.items(): conf.register_group(cfg.OptGroup(group)) if opts is not None: ks_loading.register_adapter_conf_options(conf, group) for name, val in opts.items(): conf.set_override(name, val, group=group) return conf # TODO(shade) Update this to handle service type aliases def get_mock_url(self, service_type, interface='public', resource=None, append=None, base_url_append=None, qs_elements=None): endpoint_url = self.cloud.endpoint_for( service_type=service_type, interface=interface) # Strip trailing slashes, so as not to produce double-slashes below if endpoint_url.endswith('/'): endpoint_url = endpoint_url[:-1] to_join = [endpoint_url] qs = '' if base_url_append: to_join.append(base_url_append) if resource: to_join.append(resource) to_join.extend(append or []) if qs_elements is not None: qs = '?%s' % '&'.join(qs_elements) return '%(uri)s%(qs)s' % {'uri': '/'.join(to_join), 'qs': qs} def mock_for_keystone_projects(self, project=None, v3=True, list_get=False, id_get=False, project_list=None, project_count=None): if project: assert not (project_list or project_count) elif project_list: assert not (project or project_count) elif project_count: assert not (project or project_list) else: raise Exception('Must specify a project, project_list, ' 'or project_count') assert list_get or id_get base_url_append = 'v3' if v3 else None if project: project_list = [project] elif project_count: # Generate multiple projects project_list = [self._get_project_data(v3=v3) for c in range(0, project_count)] uri_mock_list = [] if list_get: uri_mock_list.append( dict(method='GET', uri=self.get_mock_url( service_type='identity', interface='admin', resource='projects', base_url_append=base_url_append), status_code=200, json={'projects': [p.json_response['project'] for p in project_list]}) ) if id_get: for p in project_list: uri_mock_list.append( dict(method='GET', uri=self.get_mock_url( service_type='identity', interface='admin', resource='projects', append=[p.project_id], base_url_append=base_url_append), status_code=200, json=p.json_response) ) self.__do_register_uris(uri_mock_list) return project_list def _get_project_data(self, project_name=None, enabled=None, domain_id=None, description=None, v3=True, project_id=None): project_name = project_name or self.getUniqueString('projectName') project_id = uuid.UUID(project_id or uuid.uuid4().hex).hex response = {'id': project_id, 'name': project_name} request = {'name': project_name} domain_id = (domain_id or uuid.uuid4().hex) if v3 else None if domain_id: request['domain_id'] = domain_id response['domain_id'] = domain_id if enabled is not None: enabled = bool(enabled) response['enabled'] = enabled request['enabled'] = enabled response.setdefault('enabled', True) request.setdefault('enabled', True) if description: response['description'] = description request['description'] = description request.setdefault('description', None) if v3: project_key = 'project' else: project_key = 'tenant' return _ProjectData(project_id, project_name, enabled, domain_id, description, {project_key: response}, {project_key: request}) def _get_group_data(self, name=None, domain_id=None, description=None): group_id = uuid.uuid4().hex name = name or self.getUniqueString('groupname') domain_id = uuid.UUID(domain_id or uuid.uuid4().hex).hex response = {'id': group_id, 'name': name, 'domain_id': domain_id} request = {'name': name, 'domain_id': domain_id} if description is not None: response['description'] = description request['description'] = description return _GroupData(group_id, name, domain_id, description, {'group': response}, {'group': request}) def _get_user_data(self, name=None, password=None, **kwargs): name = name or self.getUniqueString('username') password = password or self.getUniqueString('user_password') user_id = uuid.uuid4().hex response = {'name': name, 'id': user_id} request = {'name': name, 'password': password} if kwargs.get('domain_id'): kwargs['domain_id'] = uuid.UUID(kwargs['domain_id']).hex response['domain_id'] = kwargs.pop('domain_id') request['domain_id'] = response['domain_id'] response['email'] = kwargs.pop('email', None) request['email'] = response['email'] response['enabled'] = kwargs.pop('enabled', True) request['enabled'] = response['enabled'] response['description'] = kwargs.pop('description', None) if response['description']: request['description'] = response['description'] self.assertIs(0, len(kwargs), message='extra key-word args received ' 'on _get_user_data') return _UserData(user_id, password, name, response['email'], response['description'], response.get('domain_id'), response.get('enabled'), {'user': response}, {'user': request}) def _get_domain_data(self, domain_name=None, description=None, enabled=None): domain_id = uuid.uuid4().hex domain_name = domain_name or self.getUniqueString('domainName') response = {'id': domain_id, 'name': domain_name} request = {'name': domain_name} if enabled is not None: request['enabled'] = bool(enabled) response['enabled'] = bool(enabled) if description: response['description'] = description request['description'] = description response.setdefault('enabled', True) return _DomainData(domain_id, domain_name, description, {'domain': response}, {'domain': request}) def _get_service_data(self, type=None, name=None, description=None, enabled=True): service_id = uuid.uuid4().hex name = name or uuid.uuid4().hex type = type or uuid.uuid4().hex response = {'id': service_id, 'name': name, 'type': type, 'enabled': enabled} if description is not None: response['description'] = description request = response.copy() request.pop('id') return _ServiceData(service_id, name, type, description, enabled, {'service': response}, {'OS-KSADM:service': response}, request) def _get_endpoint_v3_data(self, service_id=None, region=None, url=None, interface=None, enabled=True): endpoint_id = uuid.uuid4().hex service_id = service_id or uuid.uuid4().hex region = region or uuid.uuid4().hex url = url or 'https://example.com/' interface = interface or uuid.uuid4().hex response = {'id': endpoint_id, 'service_id': service_id, 'region': region, 'interface': interface, 'url': url, 'enabled': enabled} request = response.copy() request.pop('id') response['region_id'] = response['region'] return _EndpointDataV3(endpoint_id, service_id, interface, region, url, enabled, {'endpoint': response}, {'endpoint': request}) def _get_endpoint_v2_data(self, service_id=None, region=None, public_url=None, admin_url=None, internal_url=None): endpoint_id = uuid.uuid4().hex service_id = service_id or uuid.uuid4().hex region = region or uuid.uuid4().hex response = {'id': endpoint_id, 'service_id': service_id, 'region': region} v3_endpoints = {} request = response.copy() request.pop('id') if admin_url: response['adminURL'] = admin_url v3_endpoints['admin'] = self._get_endpoint_v3_data( service_id, region, public_url, interface='admin') if internal_url: response['internalURL'] = internal_url v3_endpoints['internal'] = self._get_endpoint_v3_data( service_id, region, internal_url, interface='internal') if public_url: response['publicURL'] = public_url v3_endpoints['public'] = self._get_endpoint_v3_data( service_id, region, public_url, interface='public') request = response.copy() request.pop('id') for u in ('publicURL', 'internalURL', 'adminURL'): if request.get(u): request[u.lower()] = request.pop(u) return _EndpointDataV2(endpoint_id, service_id, region, public_url, internal_url, admin_url, v3_endpoints, {'endpoint': response}, {'endpoint': request}) def _get_role_data(self, role_name=None): role_id = uuid.uuid4().hex role_name = role_name or uuid.uuid4().hex request = {'name': role_name} response = request.copy() response['id'] = role_id return _RoleData(role_id, role_name, {'role': response}, {'role': request}) def use_broken_keystone(self): self.adapter = self.useFixture(rm_fixture.Fixture()) self.calls = [] self._uri_registry.clear() self.__do_register_uris([ dict(method='GET', uri='https://identity.example.com/', text=open(self.discovery_json, 'r').read()), dict(method='POST', uri='https://identity.example.com/v3/auth/tokens', status_code=400), ]) self._make_test_cloud(identity_api_version='3') def use_nothing(self): self.calls = [] self._uri_registry.clear() def get_keystone_v3_token( self, project_name='admin', ): return dict( method='POST', uri='https://identity.example.com/v3/auth/tokens', headers={ 'X-Subject-Token': self.getUniqueString('KeystoneToken') }, json=self.os_fixture.v3_token, validate=dict(json={ 'auth': { 'identity': { 'methods': ['password'], 'password': { 'user': { 'domain': { 'name': 'default', }, 'name': 'admin', 'password': 'password' } } }, 'scope': { 'project': { 'domain': { 'name': 'default' }, 'name': project_name } } } }), ) def get_keystone_discovery(self): with open(self.discovery_json, 'r') as discovery_file: return dict( method='GET', uri='https://identity.example.com/', text=discovery_file.read(), ) def use_keystone_v3(self): self.adapter = self.useFixture(rm_fixture.Fixture()) self.calls = [] self._uri_registry.clear() self.__do_register_uris([ self.get_keystone_discovery(), self.get_keystone_v3_token(), ]) self._make_test_cloud(identity_api_version='3') def use_keystone_v2(self): self.adapter = self.useFixture(rm_fixture.Fixture()) self.calls = [] self._uri_registry.clear() self.__do_register_uris([ self.get_keystone_discovery(), dict(method='POST', uri='https://identity.example.com/v2.0/tokens', json=self.os_fixture.v2_token, ), ]) self._make_test_cloud(cloud_name='_test_cloud_v2_', identity_api_version='2.0') def _make_test_cloud(self, cloud_name='_test_cloud_', **kwargs): test_cloud = os.environ.get('OPENSTACKSDK_OS_CLOUD', cloud_name) self.cloud_config = self.config.get_one( cloud=test_cloud, validate=True, **kwargs) self.cloud = openstack.connection.Connection( config=self.cloud_config, strict=self.strict_cloud) def get_cinder_discovery_mock_dict( self, block_storage_version_json='block-storage-version.json', block_storage_discovery_url='https://block-storage.example.com/'): discovery_fixture = os.path.join( self.fixtures_directory, block_storage_version_json) return dict(method='GET', uri=block_storage_discovery_url, text=open(discovery_fixture, 'r').read()) def get_glance_discovery_mock_dict( self, image_version_json='image-version.json', image_discovery_url='https://image.example.com/'): discovery_fixture = os.path.join( self.fixtures_directory, image_version_json) return dict(method='GET', uri=image_discovery_url, status_code=300, text=open(discovery_fixture, 'r').read()) def get_nova_discovery_mock_dict( self, compute_version_json='compute-version.json', compute_discovery_url='https://compute.example.com/v2.1/'): discovery_fixture = os.path.join( self.fixtures_directory, compute_version_json) return dict( method='GET', uri=compute_discovery_url, text=open(discovery_fixture, 'r').read()) def get_placement_discovery_mock_dict(self): discovery_fixture = os.path.join( self.fixtures_directory, "placement.json") return dict(method='GET', uri="https://placement.example.com/", text=open(discovery_fixture, 'r').read()) def get_designate_discovery_mock_dict(self): discovery_fixture = os.path.join( self.fixtures_directory, "dns.json") return dict(method='GET', uri="https://dns.example.com/", text=open(discovery_fixture, 'r').read()) def get_ironic_discovery_mock_dict(self): discovery_fixture = os.path.join( self.fixtures_directory, "baremetal.json") return dict(method='GET', uri="https://baremetal.example.com/", text=open(discovery_fixture, 'r').read()) def get_senlin_discovery_mock_dict(self): discovery_fixture = os.path.join( self.fixtures_directory, "clustering.json") return dict(method='GET', uri="https://clustering.example.com/", text=open(discovery_fixture, 'r').read()) def use_compute_discovery( self, compute_version_json='compute-version.json', compute_discovery_url='https://compute.example.com/v2.1/'): self.__do_register_uris([ self.get_nova_discovery_mock_dict( compute_version_json, compute_discovery_url), ]) def use_glance( self, image_version_json='image-version.json', image_discovery_url='https://image.example.com/'): # NOTE(notmorgan): This method is only meant to be used in "setUp" # where the ordering of the url being registered is tightly controlled # if the functionality of .use_glance is meant to be used during an # actual test case, use .get_glance_discovery_mock and apply to the # right location in the mock_uris when calling .register_uris self.__do_register_uris([ self.get_glance_discovery_mock_dict( image_version_json, image_discovery_url)]) def use_cinder(self): self.__do_register_uris([ self.get_cinder_discovery_mock_dict()]) def use_placement(self): self.__do_register_uris([ self.get_placement_discovery_mock_dict()]) def use_designate(self): # NOTE(slaweq): This method is only meant to be used in "setUp" # where the ordering of the url being registered is tightly controlled # if the functionality of .use_designate is meant to be used during an # actual test case, use .get_designate_discovery_mock and apply to the # right location in the mock_uris when calling .register_uris self.__do_register_uris([ self.get_designate_discovery_mock_dict()]) def use_ironic(self): # NOTE(TheJulia): This method is only meant to be used in "setUp" # where the ordering of the url being registered is tightly controlled # if the functionality of .use_ironic is meant to be used during an # actual test case, use .get_ironic_discovery_mock and apply to the # right location in the mock_uris when calling .register_uris self.__do_register_uris([ self.get_ironic_discovery_mock_dict()]) def use_senlin(self): # NOTE(elachance): This method is only meant to be used in "setUp" # where the ordering of the url being registered is tightly controlled # if the functionality of .use_senlin is meant to be used during an # actual test case, use .get_senlin_discovery_mock and apply to the # right location in the mock_uris when calling .register_uris self.__do_register_uris([ self.get_senlin_discovery_mock_dict()]) def register_uris(self, uri_mock_list=None): """Mock a list of URIs and responses via requests mock. This method may be called only once per test-case to avoid odd and difficult to debug interactions. Discovery and Auth request mocking happens separately from this method. :param uri_mock_list: List of dictionaries that template out what is passed to requests_mock fixture's `register_uri`. Format is: {'method': <HTTP_METHOD>, 'uri': <URI to be mocked>, ... } Common keys to pass in the dictionary: * json: the json response (dict) * status_code: the HTTP status (int) * validate: The request body (dict) to validate with assert_calls all key-word arguments that are valid to send to requests_mock are supported. This list should be in the order in which calls are made. When `assert_calls` is executed, order here will be validated. Duplicate URIs and Methods are allowed and will be collapsed into a single matcher. Each response will be returned in order as the URI+Method is hit. :type uri_mock_list: list :return: None """ assert not self.__register_uris_called self.__do_register_uris(uri_mock_list or []) self.__register_uris_called = True def __do_register_uris(self, uri_mock_list=None): for to_mock in uri_mock_list: kw_params = {k: to_mock.pop(k) for k in ('request_headers', 'complete_qs', '_real_http') if k in to_mock} method = to_mock.pop('method') uri = to_mock.pop('uri') # NOTE(notmorgan): make sure the delimiter is non-url-safe, in this # case "|" is used so that the split can be a bit easier on # maintainers of this code. key = '{method}|{uri}|{params}'.format( method=method, uri=uri, params=kw_params) validate = to_mock.pop('validate', {}) valid_keys = set(['json', 'headers', 'params', 'data']) invalid_keys = set(validate.keys()) - valid_keys if invalid_keys: raise TypeError( "Invalid values passed to validate: {keys}".format( keys=invalid_keys)) headers = structures.CaseInsensitiveDict(to_mock.pop('headers', {})) if 'content-type' not in headers: headers[u'content-type'] = 'application/json' if 'exc' not in to_mock: to_mock['headers'] = headers self.calls += [ dict( method=method, url=uri, **validate) ] self._uri_registry.setdefault( key, {'response_list': [], 'kw_params': kw_params}) if self._uri_registry[key]['kw_params'] != kw_params: raise AssertionError( 'PROGRAMMING ERROR: key-word-params ' 'should be part of the uri_key and cannot change, ' 'it will affect the matcher in requests_mock. ' '%(old)r != %(new)r' % {'old': self._uri_registry[key]['kw_params'], 'new': kw_params}) self._uri_registry[key]['response_list'].append(to_mock) for mocked, params in self._uri_registry.items(): mock_method, mock_uri, _ignored = mocked.split('|', 2) self.adapter.register_uri( mock_method, mock_uri, params['response_list'], **params['kw_params']) def assert_no_calls(self): # TODO(mordred) For now, creating the adapter for self.conn is # triggering catalog lookups. Make sure no_calls is only 2. # When we can make that on-demand through a descriptor object, # drop this to 0. self.assertEqual(2, len(self.adapter.request_history)) def assert_calls(self, stop_after=None, do_count=True): for (x, (call, history)) in enumerate( zip(self.calls, self.adapter.request_history)): if stop_after and x > stop_after: break call_uri_parts = urllib.parse.urlparse(call['url']) history_uri_parts = urllib.parse.urlparse(history.url) self.assertEqual( (call['method'], call_uri_parts.scheme, call_uri_parts.netloc, call_uri_parts.path, call_uri_parts.params, urllib.parse.parse_qs(call_uri_parts.query)), (history.method, history_uri_parts.scheme, history_uri_parts.netloc, history_uri_parts.path, history_uri_parts.params, urllib.parse.parse_qs(history_uri_parts.query)), ('REST mismatch on call %(index)d. Expected %(call)r. ' 'Got %(history)r). ' 'NOTE: query string order differences wont cause mismatch' % { 'index': x, 'call': '{method} {url}'.format(method=call['method'], url=call['url']), 'history': '{method} {url}'.format( method=history.method, url=history.url)}) ) if 'json' in call: self.assertEqual( call['json'], history.json(), 'json content mismatch in call {index}'.format(index=x)) # headers in a call isn't exhaustive - it's checking to make sure # a specific header or headers are there, not that they are the # only headers if 'headers' in call: for key, value in call['headers'].items(): self.assertEqual( value, history.headers[key], 'header mismatch in call {index}'.format(index=x)) if do_count: self.assertEqual( len(self.calls), len(self.adapter.request_history)) class IronicTestCase(TestCase): def setUp(self): super(IronicTestCase, self).setUp() self.use_ironic() self.uuid = str(uuid.uuid4()) self.name = self.getUniqueString('name') def get_mock_url(self, **kwargs): kwargs.setdefault('service_type', 'baremetal') kwargs.setdefault('interface', 'public') kwargs.setdefault('base_url_append', 'v1') return super(IronicTestCase, self).get_mock_url(**kwargs)
# coding: utf-8 """ Simple gLite job manager. See https://wiki.italiangrid.it/twiki/bin/view/CREAM/UserGuide. """ __all__ = ["GLiteJobManager", "GLiteJobFileFactory"] import os import sys import time import re import random import subprocess from law.config import Config from law.job.base import BaseJobManager, BaseJobFileFactory from law.target.file import add_scheme from law.util import interruptable_popen, make_list, make_unique, quote_cmd from law.logger import get_logger logger = get_logger(__name__) _cfg = Config.instance() class GLiteJobManager(BaseJobManager): # chunking settings chunk_size_submit = 0 chunk_size_cancel = _cfg.get_expanded_int("job", "glite_chunk_size_cancel") chunk_size_cleanup = _cfg.get_expanded_int("job", "glite_chunk_size_cleanup") chunk_size_query = _cfg.get_expanded_int("job", "glite_chunk_size_query") submission_job_id_cre = re.compile(r"^https?\:\/\/.+\:\d+\/.+") status_block_cre = re.compile(r"(\w+)\s*\=\s*\[([^\]]*)\]") def __init__(self, ce=None, delegation_id=None, threads=1): super(GLiteJobManager, self).__init__() self.ce = ce self.delegation_id = delegation_id self.threads = threads def submit(self, job_file, ce=None, delegation_id=None, retries=0, retry_delay=3, silent=False): # default arguments if ce is None: ce = self.ce if delegation_id is None: delegation_id = self.delegation_id # check arguments if not ce: raise ValueError("ce must not be empty") # prepare round robin for ces and delegations ce = make_list(ce) if delegation_id: delegation_id = make_list(delegation_id) if len(ce) != len(delegation_id): raise Exception("numbers of CEs ({}) and delegation ids ({}) do not match".format( len(ce), len(delegation_id))) # get the job file location as the submission command is run it the same directory job_file_dir, job_file_name = os.path.split(os.path.abspath(job_file)) # define the actual submission in a loop to simplify retries while True: # build the command i = random.randint(0, len(ce) - 1) cmd = ["glite-ce-job-submit", "-r", ce[i]] if delegation_id: cmd += ["-D", delegation_id[i]] cmd += [job_file_name] cmd = quote_cmd(cmd) # run the command # glite prints everything to stdout logger.debug("submit glite job with command '{}'".format(cmd)) code, out, _ = interruptable_popen(cmd, shell=True, executable="/bin/bash", stdout=subprocess.PIPE, stderr=sys.stderr, cwd=job_file_dir) # in some cases, the return code is 0 but the ce did not respond with a valid id if code == 0: job_id = out.strip().split("\n")[-1].strip() if not self.submission_job_id_cre.match(job_id): code = 1 out = "bad job id '{}' from output:\n{}".format(job_id, out) # retry or done? if code == 0: return job_id else: logger.debug("submission of glite job '{}' failed with code {}:\n{}".format( job_file, code, out)) if retries > 0: retries -= 1 time.sleep(retry_delay) continue elif silent: return None else: raise Exception("submission of glite job '{}' failed:\n{}".format( job_file, out)) def cancel(self, job_id, silent=False): # build the command cmd = ["glite-ce-job-cancel", "-N"] + make_list(job_id) cmd = quote_cmd(cmd) # run it logger.debug("cancel glite job(s) with command '{}'".format(cmd)) code, out, _ = interruptable_popen(cmd, shell=True, executable="/bin/bash", stdout=subprocess.PIPE, stderr=sys.stderr) # check success if code != 0 and not silent: # glite prints everything to stdout raise Exception("cancellation of glite job(s) '{}' failed with code {}:\n{}".format( job_id, code, out)) def cleanup(self, job_id, silent=False): # build the command cmd = ["glite-ce-job-purge", "-N"] + make_list(job_id) cmd = quote_cmd(cmd) # run it logger.debug("cleanup glite job(s) with command '{}'".format(cmd)) code, out, _ = interruptable_popen(cmd, shell=True, executable="/bin/bash", stdout=subprocess.PIPE, stderr=sys.stderr) # check success if code != 0 and not silent: # glite prints everything to stdout raise Exception("cleanup of glite job(s) '{}' failed with code {}:\n{}".format( job_id, code, out)) def query(self, job_id, silent=False): chunking = isinstance(job_id, (list, tuple)) job_ids = make_list(job_id) # build the command cmd = ["glite-ce-job-status", "-n", "-L", "0"] + job_ids cmd = quote_cmd(cmd) # run it logger.debug("query glite job(s) with command '{}'".format(cmd)) code, out, _ = interruptable_popen(cmd, shell=True, executable="/bin/bash", stdout=subprocess.PIPE, stderr=sys.stderr) # handle errors if code != 0: if silent: return None else: # glite prints everything to stdout raise Exception("status query of glite job(s) '{}' failed with code {}:\n{}".format( job_id, code, out)) # parse the output and extract the status per job query_data = self.parse_query_output(out) # compare to the requested job ids and perform some checks for _job_id in job_ids: if _job_id not in query_data: if not chunking: if silent: return None else: raise Exception("glite job(s) '{}' not found in query response".format( job_id)) else: query_data[_job_id] = self.job_status_dict(job_id=_job_id, status=self.FAILED, error="job not found in query response") return query_data if chunking else query_data[job_id] @classmethod def parse_query_output(cls, out): # blocks per job are separated by ****** blocks = [] for block in out.split("******"): block = dict(cls.status_block_cre.findall(block)) if block: blocks.append(block) # retrieve information per block mapped to the job id query_data = {} for block in blocks: # extract the job id job_id = block.get("JobID") if job_id is None: continue # extract the status name status = block.get("Status") or None # extract the exit code and try to cast it to int code = block.get("ExitCode") or None if code is not None: try: code = int(code) except: pass # extract the fail reason reason = block.get("FailureReason") or block.get("Description") # special cases if status is None and code is None and reason is None: reason = "cannot parse data for job {}".format(job_id) if block: found = ["{}={}".format(*tpl) for tpl in block.items()] reason += ", found " + ", ".join(found) elif status is None: status = "DONE-FAILED" if reason is None: reason = "cannot find status of job {}".format(job_id) elif status == "DONE-OK" and code not in (0, None): status = "DONE-FAILED" # map the status status = cls.map_status(status) # save the result query_data[job_id] = cls.job_status_dict(job_id, status, code, reason) return query_data @classmethod def map_status(cls, status): # see https://wiki.italiangrid.it/twiki/bin/view/CREAM/UserGuide#4_CREAM_job_states if status in ("REGISTERED", "PENDING", "IDLE", "HELD"): return cls.PENDING elif status in ("RUNNING", "REALLY-RUNNING"): return cls.RUNNING elif status in ("DONE-OK",): return cls.FINISHED elif status in ("CANCELLED", "DONE-FAILED", "ABORTED"): return cls.FAILED else: return cls.FAILED class GLiteJobFileFactory(BaseJobFileFactory): config_attrs = BaseJobFileFactory.config_attrs + [ "file_name", "executable", "arguments", "input_files", "output_files", "postfix_output_files", "output_uri", "stderr", "stdout", "vo", "custom_content", "absolute_paths", ] def __init__(self, file_name="job.jdl", executable=None, arguments=None, input_files=None, output_files=None, postfix_output_files=True, output_uri=None, stdout="stdout.txt", stderr="stderr.txt", vo=None, custom_content=None, absolute_paths=False, **kwargs): # get some default kwargs from the config cfg = Config.instance() if kwargs.get("dir") is None: kwargs["dir"] = cfg.get_expanded("job", cfg.find_option("job", "glite_job_file_dir", "job_file_dir")) if kwargs.get("mkdtemp") is None: kwargs["mkdtemp"] = cfg.get_expanded_boolean("job", cfg.find_option("job", "glite_job_file_dir_mkdtemp", "job_file_dir_mkdtemp")) if kwargs.get("cleanup") is None: kwargs["cleanup"] = cfg.get_expanded_boolean("job", cfg.find_option("job", "glite_job_file_dir_cleanup", "job_file_dir_cleanup")) super(GLiteJobFileFactory, self).__init__(**kwargs) self.file_name = file_name self.executable = executable self.arguments = arguments self.input_files = input_files or [] self.output_files = output_files or [] self.postfix_output_files = postfix_output_files self.output_uri = output_uri self.stdout = stdout self.stderr = stderr self.vo = vo self.custom_content = custom_content self.absolute_paths = absolute_paths def create(self, postfix=None, render_variables=None, **kwargs): # merge kwargs and instance attributes c = self.get_config(kwargs) # some sanity checks if not c.file_name: raise ValueError("file_name must not be empty") elif not c.executable: raise ValueError("executable must not be empty") # default render variables if not render_variables: render_variables = {} # add postfix to render variables if postfix and "file_postfix" not in render_variables: render_variables["file_postfix"] = postfix # add output_uri to render variables if c.output_uri and "output_uri" not in render_variables: render_variables["output_uri"] = c.output_uri # linearize render variables render_variables = self.linearize_render_variables(render_variables) # prepare the job file and the executable job_file = self.postfix_file(os.path.join(c.dir, c.file_name), postfix) executable_is_file = c.executable in map(os.path.basename, c.input_files) if executable_is_file: c.executable = self.postfix_file(c.executable, postfix) # prepare input files def prepare_input(path): path = self.provide_input(os.path.abspath(path), postfix, c.dir, render_variables) path = add_scheme(path, "file") if c.absolute_paths else os.path.basename(path) return path c.input_files = list(map(prepare_input, c.input_files)) # ensure that log files are contained in the output files if c.stdout and c.stdout not in c.output_files: c.output_files.append(c.stdout) if c.stderr and c.stderr not in c.output_files: c.output_files.append(c.stderr) # postfix output files if c.postfix_output_files: c.output_files = [self.postfix_file(path, postfix) for path in c.output_files] c.stdout = c.stdout and self.postfix_file(c.stdout, postfix) c.stderr = c.stderr and self.postfix_file(c.stderr, postfix) # custom log file if c.custom_log_file: c.custom_log_file = self.postfix_file(c.custom_log_file, postfix) c.output_files.append(c.custom_log_file) # job file content content = [] content.append(("Executable", c.executable)) if c.arguments: content.append(("Arguments", c.arguments)) if c.input_files: content.append(("InputSandbox", make_unique(c.input_files))) if c.output_files: content.append(("OutputSandbox", make_unique(c.output_files))) if c.output_uri: content.append(("OutputSandboxBaseDestUri", c.output_uri)) if c.vo: content.append(("VirtualOrganisation", c.vo)) if c.stdout: content.append(("StdOutput", c.stdout)) if c.stderr: content.append(("StdError", c.stderr)) # add custom content if c.custom_content: content += c.custom_content # write the job file with open(job_file, "w") as f: f.write("[\n") for key, value in content: f.write(self.create_line(key, value) + "\n") f.write("]\n") logger.debug("created glite job file at '{}'".format(job_file)) return job_file, c @classmethod def create_line(cls, key, value): if isinstance(value, (list, tuple)): value = "{{{}}}".format(", ".join("\"{}\"".format(v) for v in value)) else: value = "\"{}\"".format(value) return "{} = {};".format(key, value)
import os import sys import time import math import argparse import subprocess import numpy as np np.set_printoptions(precision=2, linewidth=160) # MPI from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() # prettyNeat from neat_src import * # NEAT from domain import * # Task environments # -- Run NEAT ------------------------------------------------------------ -- # def master(): """Main NEAT optimization script """ global fileName, hyp data = NeatDataGatherer(fileName, hyp) alg = Neat(hyp) for gen in range(hyp['maxGen']): pop = alg.ask() # Get newly evolved individuals from NEAT reward = batchMpiEval(pop) # Send pop to be evaluated by workers alg.tell(reward) # Send fitness to NEAT data = gatherData(data,alg,gen,hyp) print(gen, '\t - \t', data.display()) # Clean up and data gathering at run end data = gatherData(data,alg,gen,hyp,savePop=True) data.save() data.savePop(alg.pop,fileName) # Save population as 2D numpy arrays stopAllWorkers() def gatherData(data,alg,gen,hyp,savePop=False): """Collects run data, saves it to disk, and exports pickled population Args: data - (DataGatherer) - collected run data alg - (Neat) - neat algorithm container .pop - [Ind] - list of individuals in population .species - (Species) - current species gen - (ind) - current generation hyp - (dict) - algorithm hyperparameters savePop - (bool) - save current population to disk? Return: data - (DataGatherer) - updated run data """ data.gatherData(alg.pop, alg.species) if (gen%hyp['save_mod']) is 0: data = checkBest(data) data.save(gen) if savePop is True: # Get a sample pop to play with in notebooks global fileName pref = 'log/' + fileName import pickle with open(pref+'_pop.obj', 'wb') as fp: pickle.dump(alg.pop,fp) return data def checkBest(data): """Checks better performing individual if it performs over many trials. Test a new 'best' individual with many different seeds to see if it really outperforms the current best. Args: data - (DataGatherer) - collected run data Return: data - (DataGatherer) - collected run data with best individual updated * This is a bit hacky, but is only for data gathering, and not optimization """ global filename, hyp if data.newBest is True: bestReps = max(hyp['bestReps'], (nWorker-1)) rep = np.tile(data.best[-1], bestReps) fitVector = batchMpiEval(rep, sameSeedForEachIndividual=False) trueFit = np.mean(fitVector) if trueFit > data.best[-2].fitness: # Actually better! data.best[-1].fitness = trueFit data.fit_top[-1] = trueFit data.bestFitVec = fitVector else: # Just lucky! prev = hyp['save_mod'] data.best[-prev:] = data.best[-prev] data.fit_top[-prev:] = data.fit_top[-prev] data.newBest = False return data # -- Parallelization ----------------------------------------------------- -- # def batchMpiEval(pop, sameSeedForEachIndividual=True): """Sends population to workers for evaluation one batch at a time. Args: pop - [Ind] - list of individuals .wMat - (np_array) - weight matrix of network [N X N] .aVec - (np_array) - activation function of each node [N X 1] Return: reward - (np_array) - fitness value of each individual [N X 1] Todo: * Asynchronous evaluation instead of batches """ global nWorker, hyp nSlave = nWorker-1 nJobs = len(pop) nBatch= math.ceil(nJobs/nSlave) # First worker is master # Set same seed for each individual if sameSeedForEachIndividual is False: seed = np.random.randint(1000, size=nJobs) else: seed = np.random.randint(1000) reward = np.empty(nJobs, dtype=np.float64) i = 0 # Index of fitness we are filling for iBatch in range(nBatch): # Send one batch of individuals for iWork in range(nSlave): # (one to each worker if there) if i < nJobs: wVec = pop[i].wMat.flatten() n_wVec = np.shape(wVec)[0] aVec = pop[i].aVec.flatten() n_aVec = np.shape(aVec)[0] comm.send(n_wVec, dest=(iWork)+1, tag=1) comm.Send( wVec, dest=(iWork)+1, tag=2) comm.send(n_aVec, dest=(iWork)+1, tag=3) comm.Send( aVec, dest=(iWork)+1, tag=4) if sameSeedForEachIndividual is False: comm.send(seed.item(i), dest=(iWork)+1, tag=5) else: comm.send( seed, dest=(iWork)+1, tag=5) else: # message size of 0 is signal to shutdown workers n_wVec = 0 comm.send(n_wVec, dest=(iWork)+1) i = i+1 # Get fitness values back for that batch i -= nSlave for iWork in range(1,nSlave+1): if i < nJobs: workResult = np.empty(1, dtype='d') comm.Recv(workResult, source=iWork) reward[i] = workResult i+=1 return reward def slave(): """Evaluation process: evaluates networks sent from master process. PseudoArgs (recieved from master): wVec - (np_array) - weight matrix as a flattened vector [1 X N**2] n_wVec - (int) - length of weight vector (N**2) aVec - (np_array) - activation function of each node [1 X N] - stored as ints, see applyAct in ann.py n_aVec - (int) - length of activation vector (N) seed - (int) - random seed (for consistency across workers) PseudoReturn (sent to master): result - (float) - fitness value of network """ global hyp task = GymTask(games[hyp['task']], nReps=hyp['alg_nReps']) # Evaluate any weight vectors sent this way while True: n_wVec = comm.recv(source=0, tag=1)# how long is the array that's coming? if n_wVec > 0: wVec = np.empty(n_wVec, dtype='d')# allocate space to receive weights comm.Recv(wVec, source=0, tag=2) # recieve weights n_aVec = comm.recv(source=0,tag=3)# how long is the array that's coming? aVec = np.empty(n_aVec, dtype='d')# allocate space to receive activation comm.Recv(aVec, source=0, tag=4) # recieve it seed = comm.recv(source=0, tag=5) # random seed as int result = task.getFitness(wVec, aVec) # process it comm.Send(result, dest=0) # send it back if n_wVec < 0: # End signal recieved print('Worker # ', rank, ' shutting down.') break def stopAllWorkers(): """Sends signal to all workers to shutdown. """ global nWorker nSlave = nWorker-1 print('stopping workers') for iWork in range(nSlave): comm.send(-1, dest=(iWork)+1, tag=1) def mpi_fork(n): """Re-launches the current script with workers Returns "parent" for original parent, "child" for MPI children (from https://github.com/garymcintire/mpi_util/) """ if n<=1: return "child" if os.getenv("IN_MPI") is None: env = os.environ.copy() env.update( MKL_NUM_THREADS="1", OMP_NUM_THREADS="1", IN_MPI="1" ) print( ["mpirun", "-np", str(n), sys.executable] + sys.argv) subprocess.check_call(["mpirun", "-np", str(n), sys.executable] +['-u']+ sys.argv, env=env) return "parent" else: global nWorker, rank nWorker = comm.Get_size() rank = comm.Get_rank() #print('assigning the rank and nworkers', nWorker, rank) return "child" # -- Input Parsing ------------------------------------------------------- -- # def main(argv): """Handles command line input, launches optimization or evaluation script depending on MPI rank. """ global fileName, hyp # Used by both master and slave processes fileName = args.outPrefix hyp_default = args.default hyp_adjust = args.hyperparam hyp = loadHyp(pFileName=hyp_default) updateHyp(hyp,hyp_adjust) # Launch main thread and workers if (rank == 0): master() else: slave() if __name__ == "__main__": ''' Parse input and launch ''' parser = argparse.ArgumentParser(description=('Evolve NEAT networks')) parser.add_argument('-d', '--default', type=str,\ help='default hyperparameter file', default='p/default_neat.json') parser.add_argument('-p', '--hyperparam', type=str,\ help='hyperparameter file', default=None) parser.add_argument('-o', '--outPrefix', type=str,\ help='file name for result output', default='test') parser.add_argument('-n', '--num_worker', type=int,\ help='number of cores to use', default=2) args = parser.parse_args() # Use MPI if parallel if "parent" == mpi_fork(args.num_worker+1): os._exit(0) main(args)
#!/usr/bin/python ''' Copyright 2012 Google Inc. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. ''' ''' Rebaselines the given GM tests, on all bots and all configurations. ''' # System-level imports import argparse import json import os import re import subprocess import sys import urllib2 # Imports from within Skia # # We need to add the 'gm' directory, so that we can import gm_json.py within # that directory. That script allows us to parse the actual-results.json file # written out by the GM tool. # Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end* # so any dirs that are already in the PYTHONPATH will be preferred. # # This assumes that the 'gm' directory has been checked out as a sibling of # the 'tools' directory containing this script, which will be the case if # 'trunk' was checked out as a single unit. GM_DIRECTORY = os.path.realpath( os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm')) if GM_DIRECTORY not in sys.path: sys.path.append(GM_DIRECTORY) import gm_json # TODO(epoger): In the long run, we want to build this list automatically, # but for now we hard-code it until we can properly address # https://code.google.com/p/skia/issues/detail?id=1544 # ('live query of builder list makes rebaseline.py slow to start up') TEST_BUILDERS = [ 'Test-Android-GalaxyNexus-SGX540-Arm7-Debug', 'Test-Android-GalaxyNexus-SGX540-Arm7-Release', 'Test-Android-IntelRhb-SGX544-x86-Debug', 'Test-Android-IntelRhb-SGX544-x86-Release', 'Test-Android-Nexus10-MaliT604-Arm7-Debug', 'Test-Android-Nexus10-MaliT604-Arm7-Release', 'Test-Android-Nexus4-Adreno320-Arm7-Debug', 'Test-Android-Nexus4-Adreno320-Arm7-Release', 'Test-Android-Nexus7-Tegra3-Arm7-Debug', 'Test-Android-Nexus7-Tegra3-Arm7-Release', 'Test-Android-NexusS-SGX540-Arm7-Debug', 'Test-Android-NexusS-SGX540-Arm7-Release', 'Test-Android-Xoom-Tegra2-Arm7-Debug', 'Test-Android-Xoom-Tegra2-Arm7-Release', 'Test-ChromeOS-Alex-GMA3150-x86-Debug', 'Test-ChromeOS-Alex-GMA3150-x86-Release', 'Test-ChromeOS-Daisy-MaliT604-Arm7-Debug', 'Test-ChromeOS-Daisy-MaliT604-Arm7-Release', 'Test-ChromeOS-Link-HD4000-x86_64-Debug', 'Test-ChromeOS-Link-HD4000-x86_64-Release', 'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Debug', 'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Release', 'Test-Mac10.6-MacMini4.1-GeForce320M-x86_64-Debug', 'Test-Mac10.6-MacMini4.1-GeForce320M-x86_64-Release', 'Test-Mac10.7-MacMini4.1-GeForce320M-x86-Debug', 'Test-Mac10.7-MacMini4.1-GeForce320M-x86-Release', 'Test-Mac10.7-MacMini4.1-GeForce320M-x86_64-Debug', 'Test-Mac10.7-MacMini4.1-GeForce320M-x86_64-Release', 'Test-Mac10.8-MacMini4.1-GeForce320M-x86-Debug', 'Test-Mac10.8-MacMini4.1-GeForce320M-x86-Release', 'Test-Mac10.8-MacMini4.1-GeForce320M-x86_64-Debug', 'Test-Mac10.8-MacMini4.1-GeForce320M-x86_64-Release', 'Test-Ubuntu12-ShuttleA-HD2000-x86_64-Release-Valgrind', 'Test-Ubuntu12-ShuttleA-GTX660-x86-Debug', 'Test-Ubuntu12-ShuttleA-GTX660-x86-Release', 'Test-Ubuntu12-ShuttleA-GTX660-x86_64-Debug', 'Test-Ubuntu12-ShuttleA-GTX660-x86_64-Release', 'Test-Ubuntu13.10-ShuttleA-NoGPU-x86_64-Debug', 'Test-Win7-ShuttleA-HD2000-x86-Debug', 'Test-Win7-ShuttleA-HD2000-x86-Debug-ANGLE', 'Test-Win7-ShuttleA-HD2000-x86-Debug-DirectWrite', 'Test-Win7-ShuttleA-HD2000-x86-Release', 'Test-Win7-ShuttleA-HD2000-x86-Release-ANGLE', 'Test-Win7-ShuttleA-HD2000-x86-Release-DirectWrite', 'Test-Win7-ShuttleA-HD2000-x86_64-Debug', 'Test-Win7-ShuttleA-HD2000-x86_64-Release', 'Test-Win8-ShuttleA-GTX660-x86-Debug', 'Test-Win8-ShuttleA-GTX660-x86-Release', 'Test-Win8-ShuttleA-GTX660-x86_64-Debug', 'Test-Win8-ShuttleA-GTX660-x86_64-Release', 'Test-Win8-ShuttleA-HD7770-x86-Debug', 'Test-Win8-ShuttleA-HD7770-x86-Release', 'Test-Win8-ShuttleA-HD7770-x86_64-Debug', 'Test-Win8-ShuttleA-HD7770-x86_64-Release', ] # TODO: Get this from builder_name_schema in buildbot. TRYBOT_SUFFIX = '-Trybot' class _InternalException(Exception): pass class ExceptionHandler(object): """ Object that handles exceptions, either raising them immediately or collecting them to display later on.""" # params: def __init__(self, keep_going_on_failure=False): """ params: keep_going_on_failure: if False, report failures and quit right away; if True, collect failures until ReportAllFailures() is called """ self._keep_going_on_failure = keep_going_on_failure self._failures_encountered = [] def RaiseExceptionOrContinue(self): """ We have encountered an exception; either collect the info and keep going, or exit the program right away.""" # Get traceback information about the most recently raised exception. exc_info = sys.exc_info() if self._keep_going_on_failure: print >> sys.stderr, ('WARNING: swallowing exception %s' % repr(exc_info[1])) self._failures_encountered.append(exc_info) else: print >> sys.stderr, ( '\nHalting at first exception.\n' + 'Please file a bug to epoger@google.com at ' + 'https://code.google.com/p/skia/issues/entry, containing the ' + 'command you ran and the following stack trace.\n\n' + 'Afterwards, you can re-run with the --keep-going-on-failure ' + 'option set.\n') raise exc_info[1], None, exc_info[2] def ReportAllFailures(self): if self._failures_encountered: print >> sys.stderr, ('Encountered %d failures (see above).' % len(self._failures_encountered)) sys.exit(1) # Object that rebaselines a JSON expectations file (not individual image files). class JsonRebaseliner(object): # params: # expectations_root: root directory of all expectations JSON files # expectations_input_filename: filename (under expectations_root) of JSON # expectations file to read; typically # "expected-results.json" # expectations_output_filename: filename (under expectations_root) to # which updated expectations should be # written; typically the same as # expectations_input_filename, to overwrite # the old content # actuals_base_url: base URL from which to read actual-result JSON files # actuals_filename: filename (under actuals_base_url) from which to read a # summary of results; typically "actual-results.json" # exception_handler: reference to rebaseline.ExceptionHandler object # tests: list of tests to rebaseline, or None if we should rebaseline # whatever files the JSON results summary file tells us to # configs: which configs to run for each test, or None if we should # rebaseline whatever configs the JSON results summary file tells # us to # add_new: if True, add expectations for tests which don't have any yet # add_ignored: if True, add expectations for tests for which failures are # currently ignored # bugs: optional list of bug numbers which pertain to these expectations # notes: free-form text notes to add to all updated expectations # mark_unreviewed: if True, mark these expectations as NOT having been # reviewed by a human; otherwise, leave that field blank. # Currently, there is no way to make this script mark # expectations as reviewed-by-human=True. # TODO(epoger): Add that capability to a review tool. # mark_ignore_failure: if True, mark failures of a given test as being # ignored. # from_trybot: if True, read actual-result JSON files generated from a # trybot run rather than a waterfall run. def __init__(self, expectations_root, expectations_input_filename, expectations_output_filename, actuals_base_url, actuals_filename, exception_handler, tests=None, configs=None, add_new=False, add_ignored=False, bugs=None, notes=None, mark_unreviewed=None, mark_ignore_failure=False, from_trybot=False): self._expectations_root = expectations_root self._expectations_input_filename = expectations_input_filename self._expectations_output_filename = expectations_output_filename self._tests = tests self._configs = configs self._actuals_base_url = actuals_base_url self._actuals_filename = actuals_filename self._exception_handler = exception_handler self._add_new = add_new self._add_ignored = add_ignored self._bugs = bugs self._notes = notes self._mark_unreviewed = mark_unreviewed self._mark_ignore_failure = mark_ignore_failure; if self._tests or self._configs: self._image_filename_re = re.compile(gm_json.IMAGE_FILENAME_PATTERN) else: self._image_filename_re = None self._using_svn = os.path.isdir(os.path.join(expectations_root, '.svn')) self._from_trybot = from_trybot # Executes subprocess.call(cmd). # Raises an Exception if the command fails. def _Call(self, cmd): if subprocess.call(cmd) != 0: raise _InternalException('error running command: ' + ' '.join(cmd)) # Returns the full contents of filepath, as a single string. # If filepath looks like a URL, try to read it that way instead of as # a path on local storage. # # Raises _InternalException if there is a problem. def _GetFileContents(self, filepath): if filepath.startswith('http:') or filepath.startswith('https:'): try: return urllib2.urlopen(filepath).read() except urllib2.HTTPError as e: raise _InternalException('unable to read URL %s: %s' % ( filepath, e)) else: return open(filepath, 'r').read() # Returns a dictionary of actual results from actual-results.json file. # # The dictionary returned has this format: # { # u'imageblur_565.png': [u'bitmap-64bitMD5', 3359963596899141322], # u'imageblur_8888.png': [u'bitmap-64bitMD5', 4217923806027861152], # u'shadertext3_8888.png': [u'bitmap-64bitMD5', 3713708307125704716] # } # # If the JSON actual result summary file cannot be loaded, logs a warning # message and returns None. # If the JSON actual result summary file can be loaded, but we have # trouble parsing it, raises an Exception. # # params: # json_url: URL pointing to a JSON actual result summary file # sections: a list of section names to include in the results, e.g. # [gm_json.JSONKEY_ACTUALRESULTS_FAILED, # gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON] ; # if None, then include ALL sections. def _GetActualResults(self, json_url, sections=None): try: json_contents = self._GetFileContents(json_url) except _InternalException: print >> sys.stderr, ( 'could not read json_url %s ; skipping this platform.' % json_url) return None json_dict = gm_json.LoadFromString(json_contents) results_to_return = {} actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS] if not sections: sections = actual_results.keys() for section in sections: section_results = actual_results[section] if section_results: results_to_return.update(section_results) return results_to_return # Rebaseline all tests/types we specified in the constructor, # within this builder's subdirectory in expectations/gm . # # params: # builder : e.g. 'Test-Win7-ShuttleA-HD2000-x86-Release' def RebaselineSubdir(self, builder): # Read in the actual result summary, and extract all the tests whose # results we need to update. results_builder = str(builder) if self._from_trybot: results_builder = results_builder + TRYBOT_SUFFIX actuals_url = '/'.join([self._actuals_base_url, results_builder, self._actuals_filename]) # Only update results for tests that are currently failing. # We don't want to rewrite results for tests that are already succeeding, # because we don't want to add annotation fields (such as # JSONKEY_EXPECTEDRESULTS_BUGS) except for tests whose expectations we # are actually modifying. sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED] if self._add_new: sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON) if self._add_ignored: sections.append(gm_json.JSONKEY_ACTUALRESULTS_FAILUREIGNORED) results_to_update = self._GetActualResults(json_url=actuals_url, sections=sections) # Read in current expectations. expectations_input_filepath = os.path.join( self._expectations_root, builder, self._expectations_input_filename) expectations_dict = gm_json.LoadFromFile(expectations_input_filepath) expected_results = expectations_dict.get(gm_json.JSONKEY_EXPECTEDRESULTS) if not expected_results: expected_results = {} expectations_dict[gm_json.JSONKEY_EXPECTEDRESULTS] = expected_results # Update the expectations in memory, skipping any tests/configs that # the caller asked to exclude. skipped_images = [] if results_to_update: for (image_name, image_results) in results_to_update.iteritems(): if self._image_filename_re: (test, config) = self._image_filename_re.match(image_name).groups() if self._tests: if test not in self._tests: skipped_images.append(image_name) continue if self._configs: if config not in self._configs: skipped_images.append(image_name) continue if not expected_results.get(image_name): expected_results[image_name] = {} expected_results[image_name]\ [gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS]\ = [image_results] if self._mark_unreviewed: expected_results[image_name]\ [gm_json.JSONKEY_EXPECTEDRESULTS_REVIEWED]\ = False if self._mark_ignore_failure: expected_results[image_name]\ [gm_json.JSONKEY_EXPECTEDRESULTS_IGNOREFAILURE]\ = True if self._bugs: expected_results[image_name]\ [gm_json.JSONKEY_EXPECTEDRESULTS_BUGS]\ = self._bugs if self._notes: expected_results[image_name]\ [gm_json.JSONKEY_EXPECTEDRESULTS_NOTES]\ = self._notes # Write out updated expectations. expectations_output_filepath = os.path.join( self._expectations_root, builder, self._expectations_output_filename) gm_json.WriteToFile(expectations_dict, expectations_output_filepath) # Mark the JSON file as plaintext, so text-style diffs can be applied. # Fixes https://code.google.com/p/skia/issues/detail?id=1442 if self._using_svn: self._Call(['svn', 'propset', '--quiet', 'svn:mime-type', 'text/x-json', expectations_output_filepath]) # main... parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, epilog='Here is the full set of builders we know about:' + '\n '.join([''] + sorted(TEST_BUILDERS))) parser.add_argument('--actuals-base-url', help=('base URL from which to read files containing JSON ' 'summaries of actual GM results; defaults to ' '%(default)s. To get a specific revision (useful for ' 'trybots) replace "svn" with "svn-history/r123". ' 'If SKIMAGE is True, defaults to ' + gm_json.SKIMAGE_ACTUALS_BASE_URL), default='http://skia-autogen.googlecode.com/svn/gm-actual') parser.add_argument('--actuals-filename', help=('filename (within builder-specific subdirectories ' 'of ACTUALS_BASE_URL) to read a summary of results ' 'from; defaults to %(default)s'), default='actual-results.json') parser.add_argument('--add-new', action='store_true', help=('in addition to the standard behavior of ' 'updating expectations for failing tests, add ' 'expectations for tests which don\'t have ' 'expectations yet.')) parser.add_argument('--add-ignored', action='store_true', help=('in addition to the standard behavior of ' 'updating expectations for failing tests, add ' 'expectations for tests for which failures are ' 'currently ignored.')) parser.add_argument('--bugs', metavar='BUG', type=int, nargs='+', help=('Skia bug numbers (under ' 'https://code.google.com/p/skia/issues/list ) which ' 'pertain to this set of rebaselines.')) parser.add_argument('--builders', metavar='BUILDER', nargs='+', help=('which platforms to rebaseline; ' 'if unspecified, rebaseline all known platforms ' '(see below for a list)')) # TODO(epoger): Add test that exercises --configs argument. parser.add_argument('--configs', metavar='CONFIG', nargs='+', help=('which configurations to rebaseline, e.g. ' '"--configs 565 8888", as a filter over the full set ' 'of results in ACTUALS_FILENAME; if unspecified, ' 'rebaseline *all* configs that are available.')) parser.add_argument('--deprecated', action='store_true', help=('run the tool even though it has been deprecated; ' 'see http://tinyurl.com/SkiaRebaselineServer for ' 'the recommended/supported process')) parser.add_argument('--expectations-filename', help=('filename (under EXPECTATIONS_ROOT) to read ' 'current expectations from, and to write new ' 'expectations into (unless a separate ' 'EXPECTATIONS_FILENAME_OUTPUT has been specified); ' 'defaults to %(default)s'), default='expected-results.json') parser.add_argument('--expectations-filename-output', help=('filename (under EXPECTATIONS_ROOT) to write ' 'updated expectations into; by default, overwrites ' 'the input file (EXPECTATIONS_FILENAME)'), default='') parser.add_argument('--expectations-root', help=('root of expectations directory to update-- should ' 'contain one or more builder subdirectories. ' 'Defaults to %(default)s. If SKIMAGE is set, ' ' defaults to ' + gm_json.SKIMAGE_EXPECTATIONS_ROOT), default=os.path.join('expectations', 'gm')) parser.add_argument('--keep-going-on-failure', action='store_true', help=('instead of halting at the first error encountered, ' 'keep going and rebaseline as many tests as ' 'possible, and then report the full set of errors ' 'at the end')) parser.add_argument('--notes', help=('free-form text notes to add to all updated ' 'expectations')) # TODO(epoger): Add test that exercises --tests argument. parser.add_argument('--tests', metavar='TEST', nargs='+', help=('which tests to rebaseline, e.g. ' '"--tests aaclip bigmatrix", as a filter over the ' 'full set of results in ACTUALS_FILENAME; if ' 'unspecified, rebaseline *all* tests that are ' 'available.')) parser.add_argument('--unreviewed', action='store_true', help=('mark all expectations modified by this run as ' '"%s": False' % gm_json.JSONKEY_EXPECTEDRESULTS_REVIEWED)) parser.add_argument('--ignore-failure', action='store_true', help=('mark all expectations modified by this run as ' '"%s": True' % gm_json.JSONKEY_ACTUALRESULTS_FAILUREIGNORED)) parser.add_argument('--from-trybot', action='store_true', help=('pull the actual-results.json file from the ' 'corresponding trybot, rather than the main builder')) parser.add_argument('--skimage', action='store_true', help=('Rebaseline skimage results instead of gm. Defaults ' 'to False. If True, TESTS and CONFIGS are ignored, ' 'and ACTUALS_BASE_URL and EXPECTATIONS_ROOT are set ' 'to alternate defaults, specific to skimage.')) args = parser.parse_args() if not args.deprecated: raise Exception( 'This tool has been deprecated; see' ' http://tinyurl.com/SkiaRebaselineServer for the recommended/supported' ' process, or re-run with the --deprecated option to press on.') exception_handler = ExceptionHandler( keep_going_on_failure=args.keep_going_on_failure) if args.builders: builders = args.builders missing_json_is_fatal = True else: builders = sorted(TEST_BUILDERS) missing_json_is_fatal = False if args.skimage: # Use a different default if --skimage is specified. if args.actuals_base_url == parser.get_default('actuals_base_url'): args.actuals_base_url = gm_json.SKIMAGE_ACTUALS_BASE_URL if args.expectations_root == parser.get_default('expectations_root'): args.expectations_root = gm_json.SKIMAGE_EXPECTATIONS_ROOT for builder in builders: if not builder in TEST_BUILDERS: raise Exception(('unrecognized builder "%s"; ' + 'should be one of %s') % ( builder, TEST_BUILDERS)) expectations_json_file = os.path.join(args.expectations_root, builder, args.expectations_filename) if os.path.isfile(expectations_json_file): rebaseliner = JsonRebaseliner( expectations_root=args.expectations_root, expectations_input_filename=args.expectations_filename, expectations_output_filename=(args.expectations_filename_output or args.expectations_filename), tests=args.tests, configs=args.configs, actuals_base_url=args.actuals_base_url, actuals_filename=args.actuals_filename, exception_handler=exception_handler, add_new=args.add_new, add_ignored=args.add_ignored, bugs=args.bugs, notes=args.notes, mark_unreviewed=args.unreviewed, mark_ignore_failure=args.ignore_failure, from_trybot=args.from_trybot) try: rebaseliner.RebaselineSubdir(builder=builder) except: exception_handler.RaiseExceptionOrContinue() else: try: raise _InternalException('expectations_json_file %s not found' % expectations_json_file) except: exception_handler.RaiseExceptionOrContinue() exception_handler.ReportAllFailures()
import collections import logging import numpy as np import platform import random from typing import List # Import ray before psutil will make sure we use psutil's bundled version import ray # noqa F401 import psutil # noqa E402 from ray.rllib.execution.segment_tree import SumSegmentTree, MinSegmentTree from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch, \ DEFAULT_POLICY_ID from ray.rllib.utils.annotations import DeveloperAPI from ray.util.iter import ParallelIteratorWorker from ray.util.debug import log_once from ray.rllib.utils.timer import TimerStat from ray.rllib.utils.window_stat import WindowStat from ray.rllib.utils.typing import SampleBatchType # Constant that represents all policies in lockstep replay mode. _ALL_POLICIES = "__all__" logger = logging.getLogger(__name__) def warn_replay_buffer_size(*, item: SampleBatchType, num_items: int) -> None: """Warn if the configured replay buffer size is too large.""" if log_once("replay_buffer_size"): item_size = item.size_bytes() psutil_mem = psutil.virtual_memory() total_gb = psutil_mem.total / 1e9 mem_size = num_items * item_size / 1e9 msg = ("Estimated max memory usage for replay buffer is {} GB " "({} batches of size {}, {} bytes each), " "available system memory is {} GB".format( mem_size, num_items, item.count, item_size, total_gb)) if mem_size > total_gb: raise ValueError(msg) elif mem_size > 0.2 * total_gb: logger.warning(msg) else: logger.info(msg) @DeveloperAPI class ReplayBuffer: @DeveloperAPI def __init__(self, size: int): """Create Prioritized Replay buffer. Args: size (int): Max number of timesteps to store in the FIFO buffer. """ self._storage = [] self._maxsize = size self._next_idx = 0 self._hit_count = np.zeros(size) self._eviction_started = False self._num_timesteps_added = 0 self._num_timesteps_added_wrap = 0 self._num_timesteps_sampled = 0 self._evicted_hit_stats = WindowStat("evicted_hit", 1000) self._est_size_bytes = 0 def __len__(self): return len(self._storage) @DeveloperAPI def add(self, item: SampleBatchType, weight: float): warn_replay_buffer_size( item=item, num_items=self._maxsize / item.count) assert item.count > 0, item self._num_timesteps_added += item.count self._num_timesteps_added_wrap += item.count if self._next_idx >= len(self._storage): self._storage.append(item) self._est_size_bytes += item.size_bytes() else: self._storage[self._next_idx] = item # Wrap around storage as a circular buffer once we hit maxsize. if self._num_timesteps_added_wrap >= self._maxsize: self._eviction_started = True self._num_timesteps_added_wrap = 0 self._next_idx = 0 else: self._next_idx += 1 if self._eviction_started: self._evicted_hit_stats.push(self._hit_count[self._next_idx]) self._hit_count[self._next_idx] = 0 def _encode_sample(self, idxes: List[int]) -> SampleBatchType: out = SampleBatch.concat_samples([self._storage[i] for i in idxes]) out.decompress_if_needed() return out @DeveloperAPI def sample(self, num_items: int) -> SampleBatchType: """Sample a batch of experiences. Args: num_items (int): Number of items to sample from this buffer. Returns: SampleBatchType: concatenated batch of items. """ idxes = [ random.randint(0, len(self._storage) - 1) for _ in range(num_items) ] self._num_sampled += num_items return self._encode_sample(idxes) @DeveloperAPI def stats(self, debug=False): data = { "added_count": self._num_timesteps_added, "sampled_count": self._num_timesteps_sampled, "est_size_bytes": self._est_size_bytes, "num_entries": len(self._storage), } if debug: data.update(self._evicted_hit_stats.stats()) return data @DeveloperAPI class PrioritizedReplayBuffer(ReplayBuffer): @DeveloperAPI def __init__(self, size: int, alpha: float): """Create Prioritized Replay buffer. Args: size (int): Max number of items to store in the FIFO buffer. alpha (float): how much prioritization is used (0 - no prioritization, 1 - full prioritization). See also: ReplayBuffer.__init__() """ super(PrioritizedReplayBuffer, self).__init__(size) assert alpha > 0 self._alpha = alpha it_capacity = 1 while it_capacity < size: it_capacity *= 2 self._it_sum = SumSegmentTree(it_capacity) self._it_min = MinSegmentTree(it_capacity) self._max_priority = 1.0 self._prio_change_stats = WindowStat("reprio", 1000) @DeveloperAPI def add(self, item: SampleBatchType, weight: float): idx = self._next_idx super(PrioritizedReplayBuffer, self).add(item, weight) if weight is None: weight = self._max_priority self._it_sum[idx] = weight**self._alpha self._it_min[idx] = weight**self._alpha def _sample_proportional(self, num_items: int): res = [] for _ in range(num_items): # TODO(szymon): should we ensure no repeats? mass = random.random() * self._it_sum.sum(0, len(self._storage)) idx = self._it_sum.find_prefixsum_idx(mass) res.append(idx) return res @DeveloperAPI def sample(self, num_items: int, beta: float) -> SampleBatchType: """Sample a batch of experiences and return priority weights, indices. Args: num_items (int): Number of items to sample from this buffer. beta (float): To what degree to use importance weights (0 - no corrections, 1 - full correction). Returns: SampleBatchType: Concatenated batch of items including "weights" and "batch_indexes" fields denoting IS of each sampled transition and original idxes in buffer of sampled experiences. """ assert beta >= 0.0 idxes = self._sample_proportional(num_items) weights = [] batch_indexes = [] p_min = self._it_min.min() / self._it_sum.sum() max_weight = (p_min * len(self._storage))**(-beta) for idx in idxes: p_sample = self._it_sum[idx] / self._it_sum.sum() weight = (p_sample * len(self._storage))**(-beta) count = self._storage[idx].count weights.extend([weight / max_weight] * count) batch_indexes.extend([idx] * count) self._num_timesteps_sampled += count batch = self._encode_sample(idxes) # Note: prioritization is not supported in lockstep replay mode. if isinstance(batch, SampleBatch): assert len(weights) == batch.count assert len(batch_indexes) == batch.count batch["weights"] = np.array(weights) batch["batch_indexes"] = np.array(batch_indexes) return batch @DeveloperAPI def update_priorities(self, idxes, priorities): """Update priorities of sampled transitions. sets priority of transition at index idxes[i] in buffer to priorities[i]. Parameters ---------- idxes: [int] List of idxes of sampled transitions priorities: [float] List of updated priorities corresponding to transitions at the sampled idxes denoted by variable `idxes`. """ assert len(idxes) == len(priorities) for idx, priority in zip(idxes, priorities): assert priority > 0 assert 0 <= idx < len(self._storage) delta = priority**self._alpha - self._it_sum[idx] self._prio_change_stats.push(delta) self._it_sum[idx] = priority**self._alpha self._it_min[idx] = priority**self._alpha self._max_priority = max(self._max_priority, priority) @DeveloperAPI def stats(self, debug=False): parent = ReplayBuffer.stats(self, debug) if debug: parent.update(self._prio_change_stats.stats()) return parent # Visible for testing. _local_replay_buffer = None class LocalReplayBuffer(ParallelIteratorWorker): """A replay buffer shard. Ray actors are single-threaded, so for scalability multiple replay actors may be created to increase parallelism.""" def __init__(self, num_shards=1, learning_starts=1000, buffer_size=10000, replay_batch_size=1, prioritized_replay_alpha=0.6, prioritized_replay_beta=0.4, prioritized_replay_eps=1e-6, replay_mode="independent", replay_sequence_length=1): self.replay_starts = learning_starts // num_shards self.buffer_size = buffer_size // num_shards self.replay_batch_size = replay_batch_size self.prioritized_replay_beta = prioritized_replay_beta self.prioritized_replay_eps = prioritized_replay_eps self.replay_mode = replay_mode self.replay_sequence_length = replay_sequence_length if replay_sequence_length > 1: self.replay_batch_size = int( max(1, replay_batch_size // replay_sequence_length)) logger.info( "Since replay_sequence_length={} and replay_batch_size={}, " "we will replay {} sequences at a time.".format( replay_sequence_length, replay_batch_size, self.replay_batch_size)) if replay_mode not in ["lockstep", "independent"]: raise ValueError("Unsupported replay mode: {}".format(replay_mode)) def gen_replay(): while True: yield self.replay() ParallelIteratorWorker.__init__(self, gen_replay, False) def new_buffer(): return PrioritizedReplayBuffer( self.buffer_size, alpha=prioritized_replay_alpha) self.replay_buffers = collections.defaultdict(new_buffer) # Metrics self.add_batch_timer = TimerStat() self.replay_timer = TimerStat() self.update_priorities_timer = TimerStat() self.num_added = 0 # Make externally accessible for testing. global _local_replay_buffer _local_replay_buffer = self # If set, return this instead of the usual data for testing. self._fake_batch = None @staticmethod def get_instance_for_testing(): global _local_replay_buffer return _local_replay_buffer def get_host(self): return platform.node() def add_batch(self, batch): # Make a copy so the replay buffer doesn't pin plasma memory. batch = batch.copy() # Handle everything as if multiagent if isinstance(batch, SampleBatch): batch = MultiAgentBatch({DEFAULT_POLICY_ID: batch}, batch.count) with self.add_batch_timer: if self.replay_mode == "lockstep": # Note that prioritization is not supported in this mode. for s in batch.timeslices(self.replay_sequence_length): self.replay_buffers[_ALL_POLICIES].add(s, weight=None) else: for policy_id, b in batch.policy_batches.items(): for s in b.timeslices(self.replay_sequence_length): if "weights" in s: weight = np.mean(s["weights"]) else: weight = None self.replay_buffers[policy_id].add(s, weight=weight) self.num_added += batch.count def replay(self): if self._fake_batch: fake_batch = SampleBatch(self._fake_batch) return MultiAgentBatch({ DEFAULT_POLICY_ID: fake_batch }, fake_batch.count) if self.num_added < self.replay_starts: return None with self.replay_timer: if self.replay_mode == "lockstep": return self.replay_buffers[_ALL_POLICIES].sample( self.replay_batch_size, beta=self.prioritized_replay_beta) else: samples = {} for policy_id, replay_buffer in self.replay_buffers.items(): samples[policy_id] = replay_buffer.sample( self.replay_batch_size, beta=self.prioritized_replay_beta) return MultiAgentBatch(samples, self.replay_batch_size) def update_priorities(self, prio_dict): with self.update_priorities_timer: for policy_id, (batch_indexes, td_errors) in prio_dict.items(): new_priorities = ( np.abs(td_errors) + self.prioritized_replay_eps) self.replay_buffers[policy_id].update_priorities( batch_indexes, new_priorities) def stats(self, debug=False): stat = { "add_batch_time_ms": round(1000 * self.add_batch_timer.mean, 3), "replay_time_ms": round(1000 * self.replay_timer.mean, 3), "update_priorities_time_ms": round( 1000 * self.update_priorities_timer.mean, 3), } for policy_id, replay_buffer in self.replay_buffers.items(): stat.update({ "policy_{}".format(policy_id): replay_buffer.stats(debug=debug) }) return stat ReplayActor = ray.remote(num_cpus=0)(LocalReplayBuffer)
# Copyright (C) 2015 Artem Chepurnoy <artemchep@gmail.com> # # This script is published under the terms of the MIT license. # See http://opensource.org/licenses/mit-license.php # Python 3 is required import sys import itertools from datetime import datetime class Clue: """ A clue for solving the sudoku. Attributes: x The X coordinate in a matrix of sudoku. y The Y coordinate in a matrix of sudoku. possibilities The list of possible values. """ x = 0 y = 0 possibilities = [] def __str__(self): return '(x=%d y=%d possibilities=%s)' % (self.x, self.y, self.possibilities) class Sudoku: def __init__(self, sudoku="""8 0 0 0 0 0 0 0 0 0 0 3 6 0 0 0 0 0 0 7 0 0 9 0 2 0 0 0 5 0 0 0 7 0 0 0 0 0 0 0 4 5 7 0 0 0 0 0 1 0 0 0 3 0 0 0 1 0 0 0 0 6 8 0 0 8 5 0 0 0 1 0 0 9 0 0 0 0 4 0 0""", diagonal=False): sudoku = [[int(e) for e in row.split()] for row in sudoku.split('\n')] self._n = len(sudoku) for row in sudoku: if len(row) != self._n: raise ValueError("The sudoku is missing some values.") # Basics. self._line = range(self._n) self._matrix = [[i // self._n, i % self._n] for i in range(self._n ** 2)] self._link_map = self._create_link_map(diagonal) # Depth matrix. self._depth_matrix = [[[float(len(self._link_map[i][j])), i, j] for j in self._line] for i in self._line] self._depth_line = list(itertools.chain.from_iterable(self._depth_matrix)) # Calculate the current depth state. Initially, the ceil with most links is # the best choice to set into. k = max(e[0] for e in self._depth_line) + 2 for e in self._depth_line: e[0] = self._n - e[0] / k # Superposition matrix. # noinspection PyUnusedLocal self._x = [[list(range(-self._n, 0)) for j in self._line] for i in self._line] # Apply the initial values. for i, j in self._matrix: value = sudoku[i][j] if value: self.set(value, i, j) def _create_link_map(self, diagonal=False): n_region = int(self._n ** .5) # Check for the correct input. if n_region ** 2 != self._n: raise ValueError("Unsupported size of sudoku.") region = [[i // n_region, i % n_region] for i in self._line] # Create mapping. m = [] for i in self._line: column = [] for j in self._line: ceil = [] # Add row. ceil.extend([[e, j] for e in self._line if e != i]) # Add column. ceil.extend([[i, e] for e in self._line if e != j]) # Add region. for a, b in region: x = a + i // n_region * n_region y = b + j // n_region * n_region if x != i and y != j: ceil.append([x, y]) if diagonal: # Add main diagonal. if i == j: ceil.extend([[e, e] for e in self._line if e != i]) # Add sub-diagonal. if i == self._n - j - 1: ceil.extend([[e, self._n - e - 1] for e in self._line if e != j]) column.append(ceil) m.append(column) return m def set(self, value, x, y): """ :param value: The value to be set :param x: The X coordinate :param y: The Y coordinate """ if 0 < value <= self._n and -value in self._x[x][y]: self._set(-value, x, y) self._depth_line.remove(self._depth_matrix[x][y]) else: raise ValueError('Failed to set %d to [%d;%d]!' % (value, y + 1, x + 1)) # Re-sort the depth map. self._depth_line.sort(key=lambda e: e[0]) def clue(self, fast_search=True): """ :return: The best possible step. """ clue = Clue() clue.x = self._depth_line[0][1] clue.y = self._depth_line[0][2] clue.possibilities = [-e for e in self._x[clue.x][clue.y]] return clue def solve(self): """ :return: <i>True</i> if one or more solutions of this sudoku exists, <i>False</i> otherwise. """ solution = self._solve() self._x = solution return bool(solution) def _solve(self): if not self._depth_line: return self._x # Choose the best candidate. clue = self._depth_line[0] if not clue[0]: # Found an empty ceil with no # possible values. return None i, j = clue[1], clue[2] del self._depth_line[0] # Try all possibilities. x_value = self._x[i][j] for value in x_value: log = [] self._set(value, i, j, log) self._depth_line.sort(key=lambda e: e[0]) # Try to solve it. if self._solve() is not None: return self._x # Restore. for k in log: a, b = k >> 16, k & (1 << 16) - 1 self._x[a][b].append(value) self._depth_matrix[a][b][0] += 1 self._x[i][j] = x_value self._depth_line.insert(0, clue) self._depth_line.sort(key=lambda e: e[0]) return None def _set(self, value, i, j, fallback=None): self._x[i][j] = [-value] # Remove this element from # other linked cells. for a, b in self._link_map[i][j]: try: self._x[a][b].remove(value) self._depth_matrix[a][b][0] -= 1 # Remember the ceil's location if fallback is not None: fallback.append(a << 16 | b) except ValueError: pass @property def solution(self): return self._x @staticmethod def format(x): return '\n'.join([' '.join([str(e[0]) for e in row]) for row in x]) def solve(text): now = datetime.now() sudoku = Sudoku(text) solved = sudoku.solve() if solved: divider = ' '.join('-' for i in range(len(sudoku.solution))) print('The sudoku has been solved:') print(divider) print(Sudoku.format(sudoku.solution)) # noinspection PyUnusedLocal print(divider) print('Elapsed real time: %ss.' % (datetime.now() - now).total_seconds()) else: print('Failed to solve!') if __name__ == '__main__': if len(sys.argv) > 1: with open(sys.argv[1], 'r') as file: data = file.read() print(data) print() solve(data) else: solve('\n'.join([input('Line #%d: ' % (i + 1)) for i in range(int(input('Enter the size of the sudoku: ')))]))
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2017, 2018. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ Qsphere visualization """ import re import sys import time from functools import reduce from string import Template import numpy as np from scipy import linalg from qiskit.visualization.utils import _validate_input_state from qiskit.visualization.exceptions import VisualizationError if ('ipykernel' in sys.modules) and ('spyder' not in sys.modules): try: from IPython.core.display import display, HTML except ImportError: print("Error importing IPython.core.display") def iplot_state_qsphere(rho, figsize=None): """ Create a Q sphere representation. Graphical representation of the input array, using a Q sphere for each eigenvalue. Args: rho (array): State vector or density matrix. figsize (tuple): Figure size in pixels. """ # HTML html_template = Template(""" <p> <div id="content_$divNumber" style="position: absolute; z-index: 1;"> <div id="qsphere_$divNumber"></div> </div> </p> """) # JavaScript javascript_template = Template(""" <script> requirejs.config({ paths: { qVisualization: "https://qvisualization.mybluemix.net/q-visualizations" } }); require(["qVisualization"], function(qVisualizations) { data = $data; qVisualizations.plotState("qsphere_$divNumber", "qsphere", data, $options); }); </script> """) rho = _validate_input_state(rho) if figsize is None: options = {} else: options = {'width': figsize[0], 'height': figsize[1]} qspheres_data = [] # Process data and execute num = int(np.log2(len(rho))) # get the eigenvectors and eigenvalues weig, stateall = linalg.eigh(rho) for _ in range(2**num): # start with the max probmix = weig.max() prob_location = weig.argmax() if probmix > 0.001: # print("The " + str(k) + "th eigenvalue = " + str(probmix)) # get the max eigenvalue state = stateall[:, prob_location] loc = np.absolute(state).argmax() # get the element location closes to lowest bin representation. for j in range(2**num): test = np.absolute(np.absolute(state[j]) - np.absolute(state[loc])) if test < 0.001: loc = j break # remove the global phase angles = (np.angle(state[loc]) + 2 * np.pi) % (2 * np.pi) angleset = np.exp(-1j*angles) state = angleset*state state.flatten() spherepoints = [] for i in range(2**num): # get x,y,z points element = bin(i)[2:].zfill(num) weight = element.count("1") number_of_divisions = n_choose_k(num, weight) weight_order = bit_string_index(element) angle = weight_order * 2 * np.pi / number_of_divisions zvalue = -2 * weight / num + 1 xvalue = np.sqrt(1 - zvalue**2) * np.cos(angle) yvalue = np.sqrt(1 - zvalue**2) * np.sin(angle) # get prob and angle - prob will be shade and angle color prob = np.real(np.dot(state[i], state[i].conj())) angles = (np.angle(state[i]) + 2 * np.pi) % (2 * np.pi) qpoint = { 'x': xvalue, 'y': yvalue, 'z': zvalue, 'prob': prob, 'phase': angles } spherepoints.append(qpoint) # Associate all points to one sphere sphere = { 'points': spherepoints, 'eigenvalue': probmix } # Add sphere to the spheres array qspheres_data.append(sphere) weig[prob_location] = 0 div_number = str(time.time()) div_number = re.sub('[.]', '', div_number) html = html_template.substitute({ 'divNumber': div_number }) javascript = javascript_template.substitute({ 'data': qspheres_data, 'divNumber': div_number, 'options': options }) display(HTML(html + javascript)) def n_choose_k(n, k): """Return the number of combinations for n choose k. Args: n (int): the total number of options . k (int): The number of elements. Returns: int: returns the binomial coefficient """ if n == 0: return 0 return reduce(lambda x, y: x * y[0] / y[1], zip(range(n - k + 1, n + 1), range(1, k + 1)), 1) def bit_string_index(text): """Return the index of a string of 0s and 1s.""" n = len(text) k = text.count("1") if text.count("0") != n - k: raise VisualizationError("s must be a string of 0 and 1") ones = [pos for pos, char in enumerate(text) if char == "1"] return lex_index(n, k, ones) def lex_index(n, k, lst): """Return the lex index of a combination.. Args: n (int): the total number of options . k (int): The number of elements. lst (list): list Returns: int: returns int index for lex order Raises: VisualizationError: if length of list is not equal to k """ if len(lst) != k: raise VisualizationError("list should have length k") comb = list(map(lambda x: n - 1 - x, lst)) dualm = sum([n_choose_k(comb[k - 1 - i], i + 1) for i in range(k)]) return int(dualm)
# # Copyright (c) 2014 Juniper Networks, Inc. All rights reserved. # """ This file contains implementation of data model for SVC monitor """ from pysandesh.gen_py.sandesh.ttypes import SandeshLevel from cfgm_common.vnc_db import DBBase from cfgm_common import svc_info class DBBaseSM(DBBase): obj_type = __name__ class LoadbalancerPoolSM(DBBaseSM): _dict = {} obj_type = 'loadbalancer_pool' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.members = set() self.loadbalancer_healthmonitors = set() self.service_instance = None self.virtual_machine_interface = None self.virtual_ip = None self.update(obj_dict) self.last_sent = None # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.params = obj['loadbalancer_pool_properties'] self.provider = obj['loadbalancer_pool_provider'] self.members = set([lm['uuid'] for lm in obj.get('loadbalancer_members', [])]) self.id_perms = obj['id_perms'] self.parent_uuid = obj['parent_uuid'] self.display_name = obj['display_name'] self.update_single_ref('service_instance', obj) self.update_single_ref('virtual_ip', obj) self.update_single_ref('virtual_machine_interface', obj) self.update_multiple_refs('loadbalancer_healthmonitor', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] cls._manager.loadbalancer_agent.delete_loadbalancer_pool(obj) obj.update_single_ref('service_instance', {}) obj.update_single_ref('virtual_ip', {}) obj.update_single_ref('virtual_machine_interface', {}) obj.update_multiple_refs('loadbalancer_healthmonitor', {}) del cls._dict[uuid] # end delete def add(self): self.last_sent = \ self._manager.loadbalancer_agent.loadbalancer_pool_add(self) if len(self.members): for member in self.members: member_obj = LoadbalancerMemberSM.get(member) if member_obj: member_obj.last_sent = \ self._manager.loadbalancer_agent.loadbalancer_member_add(member_obj) if self.virtual_ip: vip_obj = VirtualIpSM.get(self.virtual_ip) if vip_obj: vip_obj.last_sent = \ self._manager.loadbalancer_agent.virtual_ip_add(vip_obj) # end add # end class LoadbalancerPoolSM class LoadbalancerMemberSM(DBBaseSM): _dict = {} obj_type = 'loadbalancer_member' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.loadbalancer_pool = {} self.update(obj_dict) self.last_sent = None if self.loadbalancer_pool: parent = LoadbalancerPoolSM.get(self.loadbalancer_pool) parent.members.add(self.uuid) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.params = obj['loadbalancer_member_properties'] self.loadbalancer_pool = self.get_parent_uuid(obj) self.id_perms = obj['id_perms'] # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] cls._manager.loadbalancer_agent.delete_loadbalancer_member(obj) if obj.loadbalancer_pool: parent = LoadbalancerPoolSM.get(obj.loadbalancer_pool) if parent: parent.members.discard(obj.uuid) del cls._dict[uuid] # end delete # end class LoadbalancerMemberSM class VirtualIpSM(DBBaseSM): _dict = {} obj_type = 'virtual_ip' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_machine_interface = None self.loadbalancer_pool = None self.update(obj_dict) self.last_sent = None # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.params = obj['virtual_ip_properties'] self.update_single_ref('virtual_machine_interface', obj) self.update_single_ref('loadbalancer_pool', obj) self.id_perms = obj['id_perms'] self.parent_uuid = obj['parent_uuid'] self.display_name = obj['display_name'] # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] cls._manager.loadbalancer_agent.delete_virtual_ip(obj) obj.update_single_ref('virtual_machine_interface', {}) obj.update_single_ref('loadbalancer_pool', {}) del cls._dict[uuid] # end delete # end class VirtualIpSM class HealthMonitorSM(DBBaseSM): _dict = {} obj_type = 'loadbalancer_healthmonitor' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.loadbalancer_pools = set() self.last_sent = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.params = obj['loadbalancer_healthmonitor_properties'] self.update_multiple_refs('loadbalancer_pool', obj) self.id_perms = obj['id_perms'] self.parent_uuid = obj['parent_uuid'] self.display_name = obj['display_name'] self.last_sent = self._manager.loadbalancer_agent.update_hm(self) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('loadbalancer_pool', {}) del cls._dict[uuid] # end delete # end class HealthMonitorSM class VirtualMachineSM(DBBaseSM): _dict = {} obj_type = 'virtual_machine' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.service_instance = None self.virtual_router = None self.virtual_machine_interfaces = set() self.virtualization_type = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.update_single_ref('service_instance', obj) self.update_single_ref('virtual_router', obj) self.update_multiple_refs('virtual_machine_interface', obj) self.display_name = obj['display_name'] display_list = self.display_name.split('__') if self.service_instance and len(display_list) == 5: self.virtualization_type = display_list[-1] self.proj_fq_name = display_list[0:2] self.index = int(display_list[-2]) - 1 # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_single_ref('service_instance', {}) obj.update_single_ref('virtual_router', {}) obj.update_multiple_refs('virtual_machine_interface', {}) del cls._dict[uuid] # end delete # end VirtualMachineSM class VirtualRouterSM(DBBaseSM): _dict = {} obj_type = 'virtual_router' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_machines = set() self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.update_multiple_refs('virtual_machine', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('virtual_machine', {}) del cls._dict[uuid] # end delete # end VirtualRouterSM class VirtualMachineInterfaceSM(DBBaseSM): _dict = {} obj_type = 'virtual_machine_interface' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.params = None self.if_type = None self.virtual_ip = None self.virtual_network = None self.virtual_machine = None self.loadbalancer_pool = None self.logical_interface = None self.instance_ip = None self.floating_ip = None self.interface_route_table = None self.security_group = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] if obj.get('virtual_machine_interface_properties', None): self.params = obj['virtual_machine_interface_properties'] self.if_type = self.params.get('service_interface_type', None) self.update_single_ref('virtual_ip', obj) self.update_single_ref('loadbalancer_pool', obj) self.update_single_ref('instance_ip', obj) self.update_single_ref('floating_ip', obj) self.update_single_ref('virtual_network', obj) self.update_single_ref('virtual_machine', obj) self.update_single_ref('logical_interface', obj) self.update_single_ref('interface_route_table', obj) self.update_single_ref('security_group', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_single_ref('virtual_ip', {}) obj.update_single_ref('loadbalancer_pool', {}) obj.update_single_ref('instance_ip', {}) obj.update_single_ref('floating_ip', {}) obj.update_single_ref('virtual_network', {}) obj.update_single_ref('virtual_machine', {}) obj.update_single_ref('logical_interface', {}) obj.update_single_ref('interface_route_table', {}) obj.update_single_ref('security_group', {}) del cls._dict[uuid] # end delete # end VirtualMachineInterfaceSM class ServiceInstanceSM(DBBaseSM): _dict = {} obj_type = 'service_instance' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.service_template = None self.loadbalancer_pool = None self.virtual_machines = set() self.params = None self.state = 'init' self.launch_count = 0 self.image = None self.flavor = None self.max_instances = 0 self.availability_zone = None self.ha_mode = None self.vr_id = None self.vn_changed = False self.local_preference = [None, None] self.vn_info = [] self.update(obj_dict) if self.ha_mode == 'active-standby': self.max_instances = 2 self.local_preference = [svc_info.get_active_preference(), svc_info.get_standby_preference()] # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.proj_name = obj['fq_name'][-2] self.check_vn_changes(obj) self.params = obj['service_instance_properties'] self.update_single_ref('service_template', obj) self.update_single_ref('loadbalancer_pool', obj) self.update_multiple_refs('virtual_machine', obj) self.id_perms = obj['id_perms'] self.vr_id = self.params.get('virtual_router_id', None) self.ha_mode = self.params.get('ha_mode', None) if self.ha_mode != 'active-standby': scale_out = self.params.get('scale_out', None) if scale_out: self.max_instances = scale_out.get('max_instances', 1) # end update def check_vn_changes(self, obj): self.vn_changed = False if not self.params: return old_ifs = self.params.get('interface_list', []) new_ifs = obj['service_instance_properties'].get('interface_list', []) for index in range(0, len(old_ifs)): try: old_if = old_ifs[index] new_if = new_ifs[index] except IndexError: continue if not old_if['virtual_network'] or not new_if['virtual_network']: continue if old_if['virtual_network'] != new_if['virtual_network']: self.vn_changed = True return #end check_vn_changes @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_single_ref('service_template', {}) obj.update_single_ref('loadbalancer_pool', {}) obj.update_multiple_refs('virtual_machine', {}) del cls._dict[uuid] # end delete # end class ServiceInstanceSM class ServiceTemplateSM(DBBaseSM): _dict = {} obj_type = 'service_template' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.service_instances = set() self.virtualization_type = 'virtual-machine' self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.params = obj.get('service_template_properties') if self.params: self.virtualization_type = self.params.get( 'service_virtualization_type') or 'virtual-machine' self.update_multiple_refs('service_instance', obj) self.id_perms = obj['id_perms'] # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('service_instance', {}) del cls._dict[uuid] # end delete # end class ServiceTemplateSM class VirtualNetworkSM(DBBaseSM): _dict = {} obj_type = 'virtual_network' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_machine_interfaces = set() obj_dict = self.update(obj_dict) self.add_to_parent(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.update_multiple_refs('virtual_machine_interface', obj) return obj # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('virtual_machine_interface', {}) obj.remove_from_parent() del cls._dict[uuid] # end delete # end class VirtualNetworkSM class FloatingIpSM(DBBaseSM): _dict = {} obj_type = 'floating_ip' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.address = None self.virtual_machine_interfaces = set() self.virtual_ip = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.address = obj['floating_ip_address'] self.update_multiple_refs('virtual_machine_interface', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('virtual_machine_interface', {}) del cls._dict[uuid] # end delete # end class FloatingIpSM class InstanceIpSM(DBBaseSM): _dict = {} obj_type = 'instance_ip' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.address = None self.virtual_machine_interfaces = set() self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.address = obj.get('instance_ip_address', None) self.update_multiple_refs('virtual_machine_interface', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('virtual_machine_interface', {}) del cls._dict[uuid] # end delete # end class InstanceIpSM class LogicalInterfaceSM(DBBaseSM): _dict = {} obj_type = 'logical_interface' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_machine_interface = None self.logical_interface_vlan_tag = 0 self.update(obj_dict) if self.physical_interface: parent = PhysicalInterfaceSM.get(self.physical_interface) elif self.physical_router: parent = PhysicalRouterSM.get(self.physical_router) if parent: parent.logical_interfaces.add(self.uuid) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) if obj['parent_type'] == 'physical-router': self.physical_router = self.get_parent_uuid(obj) self.physical_interface = None else: self.physical_interface = self.get_parent_uuid(obj) self.physical_router = None self.update_single_ref('virtual_machine_interface', obj) self.name = obj['fq_name'][-1] self.logical_interface_vlan_tag = obj.get('logical_interface_vlan_tag', 0) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] if obj.physical_interface: parent = PhysicalInterfaceSM.get(obj.physical_interface) elif obj.physical_router: parent = PhysicalInterfaceSM.get(obj.physical_router) if parent: parent.logical_interfaces.discard(obj.uuid) obj.update_single_ref('virtual_machine_interface', {}) del cls._dict[uuid] # end delete # end LogicalInterfaceSM class PhysicalInterfaceSM(DBBaseSM): _dict = {} obj_type = 'physical_interface' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.update(obj_dict) pr = PhysicalRouterSM.get(self.physical_router) if pr: pr.physical_interfaces.add(self.uuid) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.physical_router = self.get_parent_uuid(obj) self.logical_interfaces = set([li['uuid'] for li in obj.get('logical_interfaces', [])]) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] pr = PhysicalRouterSM.get(obj.physical_router) if pr: pr.physical_interfaces.discard(obj.uuid) del cls._dict[uuid] # end delete # end PhysicalInterfaceSM class PhysicalRouterSM(DBBaseSM): _dict = {} obj_type = 'physical_router' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.management_ip = obj.get('physical_router_management_ip') self.vendor = obj.get('physical_router_vendor_name') self.physical_interfaces = set([pi['uuid'] for pi in obj.get('physical_interfaces', [])]) self.logical_interfaces = set([li['uuid'] for li in obj.get('logical_interfaces', [])]) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] del cls._dict[uuid] # end delete # end PhysicalRouterSM class ProjectSM(DBBaseSM): _dict = {} obj_type = 'project' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.service_instances = set() self.virtual_networks = set() obj_dict = self.update(obj_dict) self.set_children('virtual_network', obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.update_multiple_refs('service_instance', obj) return obj # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('service_instance', {}) del cls._dict[uuid] # end delete # end ProjectSM class DomainSM(DBBaseSM): _dict = {} obj_type = 'domain' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.fq_name = obj['fq_name'] # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] del cls._dict[uuid] # end delete # end DomainSM class SecurityGroupSM(DBBaseSM): _dict = {} obj_type = 'security_group' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] if self.name != 'default': self.delete(self.uuid) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] del cls._dict[uuid] # end delete # end SecurityGroupSM class InterfaceRouteTableSM(DBBaseSM): _dict = {} obj_type = 'interface_route_table' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_machine_interfaces = set() self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.update_multiple_refs('virtual_machine_interface', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('virtual_machine_interface', {}) del cls._dict[uuid] # end delete # end InterfaceRouteTableSM class ServiceApplianceSM(DBBaseSM): _dict = {} obj_type = 'service_appliance' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.service_appliance_set = None self.kvpairs = [] self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] kvpairs = obj.get('service_appliance_properties', None) if kvpairs: self.kvpairs = kvpairs.get('key_value_pair', []) self.user_credential = obj.get('service_appliance_user_credentials', None) self.ip_address = obj.get('service_appliance_ip_address', None) self.service_appliance_set = self.get_parent_uuid(obj) if self.service_appliance_set: parent = ServiceApplianceSetSM.get(self.service_appliance_set) parent.service_appliances.add(self.uuid) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] if obj.service_appliance_set: parent = ServiceApplianceSetSM.get(obj.service_appliance_set) if parent: parent.service_appliances.discard(obj.uuid) del cls._dict[uuid] # end delete # end ServiceApplianceSM class ServiceApplianceSetSM(DBBaseSM): _dict = {} obj_type = 'service_appliance_set' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.service_appliances = set() self.kvpairs = [] self.ha_mode = "standalone" self.update(obj_dict) # end __init__ def add(self): self._manager.loadbalancer_agent.load_driver(self) # end add def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.driver = obj.get('service_appliance_driver', None) kvpairs = obj.get('service_appliance_set_properties', None) if kvpairs: self.kvpairs = kvpairs.get('key_value_pair', []) self.service_appliances = set([sa['uuid'] for sa in obj.get('service_appliances', [])]) if 'service_appliance_ha_mode' in obj: self.ha_mode = obj['service_appliance_ha_mode'] # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] cls._manager.loadbalancer_agent.unload_driver(obj) del cls._dict[uuid] # end delete # end ServiceApplianceSetSM class LogicalRouterSM(DBBaseSM): _dict = {} obj_type = 'logical_router' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.service_instance = None self.virtual_network = None self.virtual_machine_interfaces = set() self.last_virtual_machine_interfaces = set() self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.parent_uuid = obj['parent_uuid'] self.update_single_ref('service_instance', obj) self.update_multiple_refs('virtual_machine_interface', obj) self.update_single_ref('virtual_network', obj) self.name = obj['fq_name'][-1] # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] cls._manager.snat_agent.delete_snat_instance(obj) obj.update_single_ref('service_instance', {}) obj.update_single_ref('virtual_network', {}) obj.update_multiple_refs('virtual_machine_interface', {}) del cls._dict[uuid] # end delete # end LogicalRouterSM
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. import os import time from datetime import datetime import random import string import webbrowser import json import requests from parlai.core.agents import create_agent_from_shared from .setup_aws import setup_aws, check_mturk_balance, create_hit_type, create_hit_with_hit_type, setup_aws_credentials def _get_random_alphanumeric_string(N): return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(N)) def _setup_relay(task_config, num_hits, is_sandbox): """Sets up relay server """ # set up relay server html_api_endpoint_url, json_api_endpoint_url, requester_key_gt = setup_aws(task_config, num_hits, is_sandbox) return html_api_endpoint_url, json_api_endpoint_url, requester_key_gt def _send_new_message(json_api_endpoint_url, task_group_id, conversation_id, agent_id, message_text=None, reward=None, episode_done=False): post_data_dict = { 'method_name': 'send_new_message', 'task_group_id': task_group_id, 'conversation_id': conversation_id, 'cur_agent_id': agent_id, 'episode_done': episode_done, } if message_text: post_data_dict['text'] = message_text if reward: post_data_dict['reward'] = reward request = requests.post(json_api_endpoint_url, data=json.dumps(post_data_dict)) return json.loads(request.json()) def _get_new_messages(json_api_endpoint_url, task_group_id, after_message_id, excluded_agent_id=None): params = { 'method_name': 'get_new_messages', 'task_group_id': task_group_id, 'last_message_id': after_message_id, } if excluded_agent_id: params['excluded_agent_id'] = excluded_agent_id request = requests.get(json_api_endpoint_url, params=params) return json.loads(request.json()) def _get_pending_review_count(json_api_endpoint_url, task_group_id, requester_key): params = { 'method_name': 'get_pending_review_count', 'task_group_id': task_group_id, 'requester_key': requester_key } request = requests.get(json_api_endpoint_url, params=params) return request.json() def _get_all_review_status(json_api_endpoint_url, task_group_id, requester_key): params = { 'method_name': 'get_all_review_status', 'task_group_id': task_group_id, 'requester_key': requester_key } request = requests.get(json_api_endpoint_url, params=params) return request.json() def create_hits(opt, task_config, task_module_name, bot, chat_page_only=False): num_hits = opt['num_hits'] hit_reward = opt['reward'] is_sandbox = opt['is_sandbox'] verbose = opt['verbose'] print("\nYou are going to allow workers from Amazon Mechanical Turk to chat with your dialog model running on your local machine.\nDuring this process, Internet connection is required, and you should turn off your computer's auto-sleep feature.\n") key_input = input("Please press Enter to continue... ") print("") setup_aws_credentials() if not check_mturk_balance(num_hits=num_hits, hit_reward=hit_reward, is_sandbox=is_sandbox): return task_group_id = str(int(time.time())) + '_' + _get_random_alphanumeric_string(10) # Random string to further avoid collision print('Setting up MTurk backend...') html_api_endpoint_url, json_api_endpoint_url, requester_key_gt = _setup_relay(task_config, num_hits, is_sandbox) approval_index_url_template = html_api_endpoint_url + "?method_name=approval_index&task_group_id={{task_group_id}}&conversation_id=1&cur_agent_id={{cur_agent_id}}&requester_key="+requester_key_gt worker_agent_id = task_config['worker_agent_id'] bot_agent_id = bot.getID() cids = range(1, num_hits+1) cid_map = {cid: i for i, cid in enumerate(cids)} c_done_map = {cid: False for cid in cids} logs = {cid: [] for cid in cids} shared = bot.share() bots = [] last_message_id = -1 # If the bot needs to send the first message of the conversation, it will send it here for cid in cids: new_bot = create_agent_from_shared(shared) new_bot.conversation_id = cid bots.append(new_bot) response = new_bot.act() if response: if response.get('episode_done', False): c_done_map[cid] = True if verbose: print('Conversation '+str(cid)+' - Bot says: ' + str(response)) logs[cid].append(response) new_message = _send_new_message( json_api_endpoint_url=json_api_endpoint_url, task_group_id=task_group_id, conversation_id=cid, agent_id=bot_agent_id, message_text=response.get('text', None), reward=response.get('reward', None), episode_done=response.get('episode_done', False), ) if new_message['message_id'] > last_message_id: last_message_id = new_message['message_id'] hits_created = False conversations_remaining = set(cids) # Main loop for polling and handling new messages while len(conversations_remaining) > 0: ret = _get_new_messages( json_api_endpoint_url=json_api_endpoint_url, task_group_id=task_group_id, after_message_id=last_message_id, excluded_agent_id=bot_agent_id, ) conversation_dict = ret['conversation_dict'] new_last_message_id = ret['last_message_id'] if new_last_message_id: last_message_id = new_last_message_id time.sleep(1) for conversation_id, new_messages in conversation_dict.items(): conversation_id = int(conversation_id) if conversation_id in conversations_remaining and len(new_messages) > 0: agent = bots[cid_map[conversation_id]] for new_message in new_messages: if verbose: print('Conversation '+str(conversation_id)+' - Bot received: ' + str(new_message)) logs[conversation_id].append(new_message) agent.observe(new_message) if new_message.get('episode_done', False) or c_done_map[conversation_id]: # We're done here conversations_remaining.remove(conversation_id) print('Conversation '+str(conversation_id)+' is DONE!\n') else: # Agent still needs to reply response = agent.act() if response: if response.get('episode_done', False): c_done_map[conversation_id] = True if verbose: print('Conversation '+str(conversation_id)+' - Bot says: ' + str(response)) logs[conversation_id].append(response) _send_new_message( json_api_endpoint_url=json_api_endpoint_url, task_group_id=task_group_id, conversation_id=conversation_id, agent_id=bot_agent_id, message_text=response.get('text', None), reward=response.get('reward', None), episode_done=response.get('episode_done', False), ) # We don't create new HITs until this point, so that the HIT page will always have the conversation fully populated. if not hits_created: print('Creating HITs...') hit_type_id = create_hit_type( hit_title=task_config['hit_title'], hit_description=task_config['hit_description'] + ' (ID: ' + task_group_id + ')', hit_keywords=task_config['hit_keywords'], hit_reward=hit_reward, is_sandbox=is_sandbox ) mturk_chat_url = None mturk_page_url = None for cid in cids: mturk_chat_url = html_api_endpoint_url + "?method_name=chat_index&task_group_id="+str(task_group_id)+"&conversation_id="+str(cid)+"&cur_agent_id="+str(worker_agent_id) if not chat_page_only: mturk_page_url = create_hit_with_hit_type( page_url=mturk_chat_url, hit_type_id=hit_type_id, is_sandbox=is_sandbox ) print("MTurk setup done.\n") if chat_page_only: webbrowser.open(mturk_chat_url) else: print("Link to your HIT: " + mturk_page_url + "\n") print("Waiting for Turkers to complete the tasks... (Please don't close your laptop or put your computer into sleep or standby mode.)\n") hits_created = True while _get_pending_review_count(json_api_endpoint_url=json_api_endpoint_url, task_group_id=task_group_id, requester_key=requester_key_gt) != num_hits: time.sleep(2) mturk_approval_url = html_api_endpoint_url + "?method_name=approval_index&task_group_id="+str(task_group_id)+"&conversation_id=1&cur_agent_id="+worker_agent_id+"&requester_key="+requester_key_gt print("\nAll HITs are done! Please go to the following link to approve/reject them (or they will be auto-approved in 4 weeks if no action is taken):\n") print(mturk_approval_url) print("") approval_status_dict = {cid: '' for cid in cids} # Loop for checking approval status while _get_pending_review_count(json_api_endpoint_url=json_api_endpoint_url, task_group_id=task_group_id, requester_key=requester_key_gt) > 0: time.sleep(2) print("Approvals are done!") for hit_info in _get_all_review_status(json_api_endpoint_url=json_api_endpoint_url, task_group_id=task_group_id, requester_key=requester_key_gt): conversation_id = hit_info['conversation_id'] approval_status_dict[conversation_id] = hit_info['approval_status'] logs_approved = {cid:log for (cid,log) in logs.items() if approval_status_dict[cid] == 'approved'} logs_rejected = {cid:log for (cid,log) in logs.items() if approval_status_dict[cid] == 'rejected'} # Saving logs to file # Log format: {conversation_id: [list of messages in the conversation]} mturk_log_path = opt['mturk_log_path'] task_group_path = os.path.join(mturk_log_path, task_module_name + '_' + datetime.now().strftime('%Y-%m-%d_%H:%M:%S')) os.makedirs(task_group_path) with open(os.path.join(task_group_path, 'approved.json'), 'w') as fout: fout.write(json.dumps(logs_approved)) with open(os.path.join(task_group_path, 'rejected.json'), 'w') as fout: fout.write(json.dumps(logs_rejected)) print("All conversations are saved to "+opt['mturk_log_path']+" in JSON format.\n")
#!/usr/bin/env python # cmpcodesize/main.py - Command-line entry point for cmpcodesize -*- python -*- # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See https://swift.org/LICENSE.txt for license information # See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors from __future__ import print_function import argparse import collections import csv import glob import os import sys from cmpcodesize.compare import \ compare_function_sizes, compare_sizes_of_file, list_function_sizes,\ read_sizes SHORTCUTS = { "O": "bin/Benchmark_O", "Osize": "bin/Benchmark_Osize", "Onone": "bin/Benchmark_Onone", "dylib": "lib/swift/macosx/x86_64/libswiftCore.dylib", } def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=""" Compares code sizes of "new" files, taking "old" files as a reference. Environment variables: SWIFT_NEW_BUILDDIR The new build-dir E.g. .../swiftnew/build/Ninja-ReleaseAssert+stdlib-Release/swift-macosx-x86_64 SWIFT_OLD_BUILDDIR The old build-dir E.g. .../swiftold/build/Ninja-ReleaseAssert+stdlib-Release/swift-macosx-x86_64 How to specify files: 1) No files: Compares codesize of the Benchmark_* executables and the swiftCore dylib in the new and old build-dirs. Example: cmpcodesize 2) One or more paths relative to the build-dirs (can be a pattern): Compares the files in the new and old build-dirs. Aliases: O => bin/Benchmark_O Osize => bin/Benchmark_Osize Onone => bin/Benchmark_Onone dylib => lib/swift/macosx/x86_64/libswiftCore.dylib Examples: cmpcodesize Onone cmpcodesize benchmark/PerfTestSuite/O/*.o 3) Two files: Compares these two files (the first is the old file). Example: cmpcodesize test.o newversion.o 4) Two lists of files, separated by '--': Compares a set of files. Example: cmpcodesize olddir/*.o -- newdir/*.o 5) One file (only available with the -l option): Lists function sizes for that file Example: cmpcodesize -l test.o""") # Optional arguments. parser.add_argument('-a', '--additional-sections', help='Show sizes of additional sections.', action='store_true', dest='all_sections', default=False) parser.add_argument('-c', '--category', help='Show functions by category.', action='store_true', dest='list_categories', default=False) parser.add_argument('-l', '--list', help='List all functions (can be a very long list). ' + 'Cannot be used in conjunction with ' + '--additional-sections or --category. ' + 'You must specify between one and two files ' + 'when using this option.', action='store_true', dest='list_functions', default=False) parser.add_argument('-s', '--summarize', help='Summarize the sizes of multiple files instead ' + 'of listing each file separately.', action='store_true', dest='sum_sizes', default=False) parser.add_argument('-p', '--parseable', help='Generate output as CSV that can be parsed by ' + 'other programs.', action='store_true', default=False) parser.add_argument('-o', '--old-build-directory', help='The directory containing the baseline objects ' + 'against which to compare sizes.', action='store', dest='old_build_dir', default=None) parser.add_argument('-n', '--new-build-directory', help='The directory containing the new objects whose' + 'sizes are to be compared against the baseline.', action='store', dest='new_build_dir', default=None) # Positional arguments. # These can be specified in means beyond what argparse supports, # so we gather them in a list and parse them manually. parser.add_argument('files', nargs='*', help='A list of old and new files.') # argparse can't handle an '--' argument, so we replace it with # a custom identifier. separator_token = '*-*-*' parsed_arguments = parser.parse_args( [separator_token if arg == '--' else arg for arg in sys.argv[1:]]) if parsed_arguments.list_functions: # --list is mutually exclusive with both --additional-sections # and --category. argparse is only capable of expressing mutual # exclusivity among options, not among groups of options, so # we detect this case manually. assert (not parsed_arguments.all_sections and not parsed_arguments.list_categories), \ 'Incorrect usage: --list cannot be specified in conjunction ' + \ 'with --additional-sections or --category.' # A file must be specified when using --list. assert parsed_arguments.files, \ 'Incorrect usage: Must specify between one and two files when ' + \ 'using --list, but you specified no files.' csv_out = None if parsed_arguments.parseable: csv_out = csv.writer(sys.stdout) if separator_token in parsed_arguments.files: separator_index = parsed_arguments.files.index(separator_token) old_files = parsed_arguments.files[:separator_index] new_files = parsed_arguments.files[separator_index + 1:] else: old_file_args = parsed_arguments.files old_build_dir = parsed_arguments.old_build_dir if not old_build_dir: old_build_dir = os.environ.get("SWIFT_OLD_BUILDDIR") new_build_dir = parsed_arguments.new_build_dir if not new_build_dir: new_build_dir = os.environ.get("SWIFT_NEW_BUILDDIR") if not parsed_arguments.files: assert old_build_dir and new_build_dir, \ 'Incorrect usage: You must specify either a list of ' + \ 'files, or have both $SWIFT_OLD_BUILDDIR and ' + \ '$SWIFT_NEW_BUILDDIR environment variables set.\n' + \ '$SWIFT_OLD_BUILDDIR = {0}\n$SWIFT_NEW_BUILDDIR = {1}'.format( old_build_dir, new_build_dir) old_file_args = list(SHORTCUTS.keys()) old_files = [] new_files = [] num_expanded = 0 for file in old_file_args: if file in SHORTCUTS: file = SHORTCUTS[file] if not file.startswith("./") and old_build_dir and new_build_dir: old_expanded = glob.glob(os.path.join(old_build_dir, file)) new_expanded = glob.glob(os.path.join(new_build_dir, file)) if old_expanded and new_expanded: old_files.extend(old_expanded) new_files.extend(new_expanded) num_expanded += 1 if num_expanded != 0 and num_expanded != len(old_file_args): sys.exit("mix of expanded/not-expanded arguments") if num_expanded == 0: if len(old_file_args) > 2: sys.exit("too many arguments") old_files = old_file_args[0:1] new_files = old_file_args[1:2] for file in (old_files + new_files): if not os.path.isfile(file): sys.exit("file " + file + " not found") if parsed_arguments.list_functions: if not new_files: sizes = collections.defaultdict(int) for file in old_files: read_sizes(sizes, file, True, False) print(os.linesep.join(list_function_sizes(sizes.items()))) else: compare_function_sizes(old_files, new_files, csv=csv_out) else: if csv_out: csv_out.writerow(["Title", "Section", "Old", "Old Relative", "New", "New Relative", "Percentage Change"]) else: print("%-26s%16s %14s %14s %s" % ("Title", "Section", "Old", "New", "Percent")) if parsed_arguments.sum_sizes: compare_sizes_of_file(old_files, new_files, parsed_arguments.all_sections, parsed_arguments.list_categories, csv=csv_out) else: if len(old_files) != len(new_files): sys.exit("number of new files must be the same of old files") old_files.sort() new_files.sort() for old_file, new_file in zip(old_files, new_files): compare_sizes_of_file([old_file], [new_file], parsed_arguments.all_sections, parsed_arguments.list_categories, csv=csv_out) if __name__ == '__main__': main()
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models from ..._vendor import _convert_request from ...operations._agent_pools_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_available_agent_pool_versions_request, build_get_request, build_get_upgrade_profile_request, build_list_request, build_upgrade_node_image_version_request_initial T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class AgentPoolsOperations: """AgentPoolsOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.containerservice.v2021_05_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def list( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> AsyncIterable["_models.AgentPoolListResult"]: """Gets a list of agent pools in the specified managed cluster. Gets a list of agent pools in the specified managed cluster. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param resource_name: The name of the managed cluster resource. :type resource_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either AgentPoolListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2021_05_01.models.AgentPoolListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=self.list.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("AgentPoolListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools'} # type: ignore @distributed_trace_async async def get( self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any ) -> "_models.AgentPool": """Gets the specified managed cluster agent pool. Gets the specified managed cluster agent pool. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param resource_name: The name of the managed cluster resource. :type resource_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: AgentPool, or the result of cls(response) :rtype: ~azure.mgmt.containerservice.v2021_05_01.models.AgentPool :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, agent_pool_name=agent_pool_name, template_url=self.get.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore async def _create_or_update_initial( self, resource_group_name: str, resource_name: str, agent_pool_name: str, parameters: "_models.AgentPool", **kwargs: Any ) -> "_models.AgentPool": cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _json = self._serialize.body(parameters, 'AgentPool') request = build_create_or_update_request_initial( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, agent_pool_name=agent_pool_name, content_type=content_type, json=_json, template_url=self._create_or_update_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('AgentPool', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore @distributed_trace_async async def begin_create_or_update( self, resource_group_name: str, resource_name: str, agent_pool_name: str, parameters: "_models.AgentPool", **kwargs: Any ) -> AsyncLROPoller["_models.AgentPool"]: """Creates or updates an agent pool in the specified managed cluster. Creates or updates an agent pool in the specified managed cluster. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param resource_name: The name of the managed cluster resource. :type resource_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :param parameters: The agent pool to create or update. :type parameters: ~azure.mgmt.containerservice.v2021_05_01.models.AgentPool :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2021_05_01.models.AgentPool] :raises: ~azure.core.exceptions.HttpResponseError """ content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, resource_name=resource_name, agent_pool_name=agent_pool_name, parameters=parameters, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore async def _delete_initial( self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_delete_request_initial( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, agent_pool_name=agent_pool_name, template_url=self._delete_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore @distributed_trace_async async def begin_delete( self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Deletes an agent pool in the specified managed cluster. Deletes an agent pool in the specified managed cluster. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param resource_name: The name of the managed cluster resource. :type resource_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError """ polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, resource_name=resource_name, agent_pool_name=agent_pool_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore @distributed_trace_async async def get_upgrade_profile( self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any ) -> "_models.AgentPoolUpgradeProfile": """Gets the upgrade profile for an agent pool. Gets the upgrade profile for an agent pool. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param resource_name: The name of the managed cluster resource. :type resource_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: AgentPoolUpgradeProfile, or the result of cls(response) :rtype: ~azure.mgmt.containerservice.v2021_05_01.models.AgentPoolUpgradeProfile :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolUpgradeProfile"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_upgrade_profile_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, agent_pool_name=agent_pool_name, template_url=self.get_upgrade_profile.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('AgentPoolUpgradeProfile', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default'} # type: ignore @distributed_trace_async async def get_available_agent_pool_versions( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> "_models.AgentPoolAvailableVersions": """Gets a list of supported Kubernetes versions for the specified agent pool. See `supported Kubernetes versions <https://docs.microsoft.com/azure/aks/supported-kubernetes-versions>`_ for more details about the version lifecycle. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param resource_name: The name of the managed cluster resource. :type resource_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: AgentPoolAvailableVersions, or the result of cls(response) :rtype: ~azure.mgmt.containerservice.v2021_05_01.models.AgentPoolAvailableVersions :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolAvailableVersions"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_available_agent_pool_versions_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=self.get_available_agent_pool_versions.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('AgentPoolAvailableVersions', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_available_agent_pool_versions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions'} # type: ignore async def _upgrade_node_image_version_initial( self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any ) -> Optional["_models.AgentPool"]: cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AgentPool"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_upgrade_node_image_version_request_initial( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, agent_pool_name=agent_pool_name, template_url=self._upgrade_node_image_version_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 202: deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _upgrade_node_image_version_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion'} # type: ignore @distributed_trace_async async def begin_upgrade_node_image_version( self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any ) -> AsyncLROPoller["_models.AgentPool"]: """Upgrades the node image version of an agent pool to the latest. Upgrading the node image version of an agent pool applies the newest OS and runtime updates to the nodes. AKS provides one new image per week with the latest updates. For more details on node image versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param resource_name: The name of the managed cluster resource. :type resource_name: str :param agent_pool_name: The name of the agent pool. :type agent_pool_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either AgentPool or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2021_05_01.models.AgentPool] :raises: ~azure.core.exceptions.HttpResponseError """ polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._upgrade_node_image_version_initial( resource_group_name=resource_group_name, resource_name=resource_name, agent_pool_name=agent_pool_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('AgentPool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_upgrade_node_image_version.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion'} # type: ignore
#!/usr/bin/python import fileinput import string import sys import os mpi = False if ( mpi ): print 'Using MPI' else: print 'Not using MPI' fortran_compiler = 'ifort' fortran_opt_flags = '-g -c -O2 -openmp -heap-arrays' fortran_link_flags = '-g -O2 -openmp -heap-arrays' c_compiler = 'icc' c_opt_flags = '-g -c -O2 -openmp' if ( mpi ): fortran_linker = 'mpif90' else: fortran_linker = fortran_compiler src_dir = './src/' lst_dir = './lst/' exe_dir = './exe/' lib_name = 'tce_sort_f77_omp.a' flush_rank='1000' count = '20' rank = '32' ranks = [rank,rank,rank,rank] size = int(ranks[0])*int(ranks[1])*int(ranks[2])*int(ranks[3]) sizechar = str(size) def perm(l): sz = len(l) if sz <= 1: return [l] return [p[:i]+[l[0]]+p[i:] for i in xrange(sz) for p in perm(l[1:])] indices = ['4','3','2','1'] #transpose_list = [indices] #loop_list = [indices] transpose_list = perm(indices) loop_list = perm(indices) print fortran_compiler+' '+fortran_opt_flags+' -c tce_sort_hirata.F' os.system(fortran_compiler+' '+fortran_opt_flags+' -c tce_sort_hirata.F') os.system('ar -r '+lib_name+' tce_sort_hirata.o') timer = '' if ( timer == "ticks" ): timer_call = "getticks()" print c_compiler+' '+c_opt_flags+' -c getticks_bgp.c' os.system(c_compiler+' '+c_opt_flags+' -c getticks_bgp.c') os.system('ar -r '+lib_name+' getticks_bgp.o') else: timer_call = "rtc()" for transpose_order in transpose_list: dummy = 0 A = transpose_order[0] B = transpose_order[1] C = transpose_order[2] D = transpose_order[3] driver_name = 'transpose_'+A+B+C+D+'superflush_omp' print driver_name source_name = driver_name+'_driver.F' lst_name = driver_name+'_driver.lst' source_file = open(source_name,'w') source_file.write(' PROGRAM ARRAYTEST2\n') if ( mpi ): source_file.write('#include "mpif.h"\n') source_file.write(' REAL*8 before('+ranks[0]+','+ranks[0]+','+ranks[0]+','+ranks[0]+')\n') source_file.write(' REAL*8 after_jeff('+sizechar+')\n') source_file.write(' REAL*8 after_hirata('+sizechar+')\n') #source_file.write(' REAL*8 after_glass('+sizechar+')\n') source_file.write(' REAL*8 X('+flush_rank+','+flush_rank+'),Y('+flush_rank+','+flush_rank+')\n') if ( timer == "ticks" ): source_file.write(' INTEGER*8 Tstart,Tfinish\n') source_file.write(' INTEGER*8 Thirata,Thirata2,Tglass,Tjeff\n') source_file.write(' INTEGER*8 Tbest\n') else: source_file.write(' REAL*8 Tstart,Tfinish\n') source_file.write(' REAL*8 Thirata,Thirata2,Tglass,Tjeff\n') source_file.write(' REAL*8 Tbest\n') source_file.write(' REAL*8 Tspeedup\n') source_file.write(' INTEGER*4 i,j,k,l\n') source_file.write(' INTEGER*4 ii,jj\n') source_file.write(' INTEGER*4 aSize(4)\n') source_file.write(' INTEGER*4 perm(4)\n') source_file.write(' INTEGER*4 fastest(4)\n') if ( mpi ): source_file.write(' INTEGER ierror\n') #source_file.write(' LOGICAL glass_correct\n') #source_file.write(' EXTERNAL glass_correct\n') if ( mpi ): source_file.write(' call mpi_init(ierror)\n') #source_file.write(' call hpm_init()\n') source_file.write(' aSize(1) = '+ranks[0]+'\n') source_file.write(' aSize(2) = '+ranks[1]+'\n') source_file.write(' aSize(3) = '+ranks[2]+'\n') source_file.write(' aSize(4) = '+ranks[3]+'\n') source_file.write(' perm(1) = '+A+'\n') source_file.write(' perm(2) = '+B+'\n') source_file.write(' perm(3) = '+C+'\n') source_file.write(' perm(4) = '+D+'\n') source_file.write(' DO 70 i = 1, '+ranks[0]+'\n') source_file.write(' DO 60 j = 1, '+ranks[1]+'\n') source_file.write(' DO 50 k = 1, '+ranks[2]+'\n') source_file.write(' DO 40 l = 1, '+ranks[3]+'\n') source_file.write(' before(i,j,k,l) = l + k*10 + j*100 + i*1000\n') source_file.write('40 CONTINUE\n') source_file.write('50 CONTINUE\n') source_file.write('60 CONTINUE\n') source_file.write('70 CONTINUE\n') # THIS PART FLUSHES THE CACHE source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' X(jj,ii) = 1d0/(ii+jj)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' Y(jj,ii) = 1d0/(ii+jj)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' X(jj,ii) = 7d0*X(ii,jj)+X(jj,ii)-3d0*Y(ii,jj)+2d0*Y(jj,ii)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') # END CACHE FLUSH if ( timer == "ticks" ): source_file.write(' Tbest=999999\n') source_file.write(' Tstart=0\n') source_file.write(' Tfinish=0\n') else: source_file.write(' Tbest=999999.0d0\n') source_file.write(' Tstart=0.0d0\n') source_file.write(' Tfinish=0.0d0\n') #source_file.write(' call hpm_start("tce_sort_4_omp #1")\n') source_file.write(' Tstart='+timer_call+'\n') source_file.write(' DO 30 i = 1, '+count+'\n') source_file.write(' CALL tce_sort_4(before, after_hirata,\n') source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4),\n') source_file.write(' & perm(1), perm(2), perm(3), perm(4))\n') source_file.write('30 CONTINUE\n') source_file.write(' Tfinish='+timer_call+'\n') #source_file.write(' call hpm_stop("tce_sort_4_omp #1")\n') source_file.write(' Thirata=(Tfinish-Tstart)\n') # THIS PART FLUSHES THE CACHE source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' X(jj,ii) = 1d0/(ii+jj)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' Y(jj,ii) = 1d0/(ii+jj)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' X(jj,ii) = 7d0*X(ii,jj)+X(jj,ii)-3d0*Y(ii,jj)+2d0*Y(jj,ii)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') # END CACHE FLUSH source_file.write(' write(6,*) "TESTING TRANPOSE TYPE '+A+B+C+D+'"\n') source_file.write(' write(6,*) "FOR 4D ARRAY OF RANK '+rank+'"\n') source_file.write(' write(6,*) "==================="\n') source_file.write(' write(6,*) "The compilation flags were:"\n') for option in range(0,len(fortran_opt_flags.split())): source_file.write(' write(6,*) "'+fortran_opt_flags.split()[option]+'"\n') source_file.write(' write(6,*) "==================="\n') source_file.write(' write(6,*) "Hirata OpenMP Reference #1 = ",Thirata,"seconds"\n') #source_file.write(' IF(glass_correct(perm(1), perm(2), perm(3), perm(4))) THEN\n') #source_file.write(' write(6,*) "KGlass Reference = ",Tglass,"seconds"\n') #source_file.write(' ENDIF\n') source_file.write(' write(6,1001) "Algorithm","Jeff","Speedup","Best","Best Speedup"\n') for loop_order in loop_list: dummy = dummy+1 a = loop_order[0] b = loop_order[1] c = loop_order[2] d = loop_order[3] subroutine_name = 'trans_'+A+B+C+D+'_loop_'+a+b+c+d+'_omp' # THIS PART FLUSHES THE CACHE source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' X(jj,ii) = 1d0/(ii+jj)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' Y(jj,ii) = 1d0/(ii+jj)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' X(jj,ii) = 7d0*X(ii,jj)+X(jj,ii)-3d0*Y(ii,jj)+2d0*Y(jj,ii)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') # END CACHE FLUSH if ( timer == "ticks" ): source_file.write(' Tstart=0\n') source_file.write(' Tfinish=0\n') source_file.write(' Tjeff=0\n') else: source_file.write(' Tstart=0.0d0\n') source_file.write(' Tfinish=0.0d0\n') source_file.write(' Tjeff=0.0d0\n') source_file.write(' DO '+str(100+dummy)+' i = 1, '+count+'\n') # THIS PART FLUSHES THE CACHE source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' X(jj,ii) = 1d0/(ii+jj)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' Y(jj,ii) = 1d0/(ii+jj)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' X(jj,ii) = 7d0*X(ii,jj)+X(jj,ii)-3d0*Y(ii,jj)+2d0*Y(jj,ii)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') # END CACHE FLUSH #source_file.write(' call hpm_start("'+subroutine_name+'")\n') source_file.write(' Tstart='+timer_call+'\n') source_file.write(' CALL '+subroutine_name+'(before, after_jeff,\n') source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4))\n') source_file.write(' Tfinish='+timer_call+'\n') source_file.write(' Tjeff=Tjeff+(Tfinish-Tstart)\n') #source_file.write(' call hpm_stop("'+subroutine_name+'")\n') source_file.write(str(100+dummy)+' CONTINUE\n') source_file.write(' Tspeedup=(1d0*Thirata)/(1d0*Tjeff)\n') source_file.write(' if (Tjeff<Tbest) then\n') source_file.write(' Tbest=Tjeff\n') source_file.write(' fastest(1)='+a+'\n') source_file.write(' fastest(2)='+b+'\n') source_file.write(' fastest(3)='+c+'\n') source_file.write(' fastest(4)='+d+'\n') source_file.write(' endif\n') if 0 < dummy < 10: nice_dummy=' '+str(dummy) if 9 < dummy < 100: nice_dummy=' '+str(dummy) if 99 < dummy < 999: nice_dummy=''+str(dummy) #source_file.write(' write(6,1100) "'+nice_dummy+' Loop '+a+b+c+d+' ",\n') #source_file.write(' & Tjeff,Tspeedup,Tbest,Thirata/Tbest\n') source_file.write(' write(6,*) "'+nice_dummy+' Loop '+a+b+c+d+' ",\n') source_file.write(' & Tjeff,Tspeedup,Tbest,Thirata/Tbest\n') source_file.write(' DO '+str(500+dummy)+' i = 1, '+sizechar+'\n') source_file.write(' IF (after_jeff(i).ne.after_hirata(i)) THEN\n') source_file.write(' PRINT*,"jeff error ",i,after_jeff(i),after_hirata(i)\n') source_file.write(' ENDIF\n') source_file.write(str(500+dummy)+' CONTINUE\n') if ( timer == "ticks" ): source_file.write(' Tstart=0\n') source_file.write(' Tfinish=0\n') source_file.write(' Thirata2=0\n') else: source_file.write(' Tstart=0.0d0\n') source_file.write(' Tfinish=0.0d0\n') source_file.write(' Thirata2=0.0d0\n') source_file.write(' DO 34 i = 1, '+count+'\n') # THIS PART FLUSHES THE CACHE source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' X(jj,ii) = 1d0/(ii+jj)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' Y(jj,ii) = 1d0/(ii+jj)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') source_file.write(' do ii=1,'+flush_rank+'\n') source_file.write(' do jj=1,'+flush_rank+'\n') source_file.write(' X(jj,ii) = 7d0*X(ii,jj)+X(jj,ii)-3d0*Y(ii,jj)+2d0*Y(jj,ii)\n') source_file.write(' enddo \n') source_file.write(' enddo \n') # END CACHE FLUSH #source_file.write(' call hpm_start("tce_sort_4_omp #2")\n') source_file.write(' Tstart='+timer_call+'\n') source_file.write(' CALL tce_sort_4(before, after_hirata,\n') source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4),\n') source_file.write(' & perm(1), perm(2), perm(3), perm(4))\n') source_file.write(' Tfinish='+timer_call+'\n') #source_file.write(' call hpm_stop("tce_sort_4_omp #2")\n') source_file.write(' Thirata2=Thirata2+(Tfinish-Tstart)\n') source_file.write('34 CONTINUE\n') source_file.write(' write(6,*) "Hirata OpenMP Reference #2 = ",Thirata2,"seconds"\n') source_file.write(' write(6,1020) "The best loop order is:",\n') source_file.write(' & fastest(1),fastest(2),fastest(3),fastest(4)\n') #source_file.write(' write(6,1030) "The best time is:",Tbest\n') source_file.write(' write(6,*) "The best time is:",Tbest\n') #source_file.write(' write(6,1030) "The best speedup is:",(1d0*Thirata)/(1d0*Tbest)\n') source_file.write(' write(6,*) "Best speedup (#1) is:",(1d0*Thirata)/(1d0*Tbest)\n') source_file.write(' write(6,*) "Best speedup (#2) is:",(1d0*Thirata2)/(1d0*Tbest)\n') #source_file.write(' call hpm_print()\n') #source_file.write(' call hpm_print_flops()\n') #source_file.write(' call hpm_print_flops_agg()\n') if ( mpi ): source_file.write(' call mpi_finalize(ierror)\n') source_file.write(' STOP\n') source_file.write(' 1001 format(1x,a13,a12,a15,a9,a18)\n') source_file.write(' 1020 format(1x,a30,8x,4i1)\n') source_file.write('! 1030 format(1x,a30,d18.12)\n') source_file.write('! 1100 format(1x,a16,4i18)\n') source_file.write(' END\n') source_file.close() print fortran_linker+' '+fortran_link_flags+' '+' '+source_name+' '+lib_name+' '+' -o '+exe_dir+driver_name+'.x' #os.system(fortran_linker+' '+fortran_link_flags+' '+' '+source_name+' '+lib_name+' '+hpm_lib+' -o '+exe_dir+driver_name+'.x') os.system(fortran_linker+' '+fortran_link_flags+' '+' '+source_name+' '+lib_name+' '+' -o '+exe_dir+driver_name+'.x') os.system('mv '+source_name+' '+src_dir) #os.system('mv '+lst_name+' '+lst_dir)
import os from aimes.emgr.utils.misc import * __author__ = "Matteo Turilli" __copyright__ = "Copyright 2015, The AIMES Project" __license__ = "MIT" # ----------------------------------------------------------------------------- def initialize_log(run): '''Pass. ''' f = open(run['files']['log'], "a", 1) # Title. separator = "=" * len("Run - "+run['tag']) print >> f, "%s\nRun - %s\n%s\n\n" % (separator, run['tag'], separator) f.flush() os.fsync(f) return f # ----------------------------------------------------------------------------- def initialize_runtime(run): '''Pass. ''' f = open(run['files']['runtime'], "a", 1) header = '[%s] [%s] - Run %d/-%d' % (timestamp(), run['tag'], run['number'], run['left']) rdir = '[%s] [%s] - Root: %s' % (timestamp(), run['tag'], run['root']) separator = "-" * len(header) print >> f, "%s" % separator print >> f, "%s" % header print >> f, "%s" % rdir f.flush() os.fsync(f) return f # ----------------------------------------------------------------------------- # LOGGING # ----------------------------------------------------------------------------- def log_rp(run): '''Write a log file of the experiment run. ''' f = run['log'] title = 'Radical Pilot IDs' separator = "=" * len(title) print >> f, "%s\n%s\n%s\n\n" % (separator, title, separator) # Session and managers ID. print >> f, "Session ID : %s" % run['session_id'] print >> f, "Pilot manager ID : %s" % run['pilot_manager_id'] print >> f, "Unit manager ID : %s" % run['unit_manager_id'] # Pilots. for pilot, resource in run['pilot_ids']: print >> f, "Pilot ID/resource : %s %s" % (pilot, resource) f.flush() os.fsync(f) # ----------------------------------------------------------------------------- def log_skeleton(run, workflow): '''Pass. ''' f = run['log'] title = 'Skeleton' separator = "=" * len(title) print >> f, "%s\n%s\n%s\n\n" % (separator, title, separator) print >> f, "Totals:" print >> f, "\tNumber of stages : %d" % \ len(workflow['skeleton'].stages) print >> f, "\tNumber of tasks : %d" % \ len(workflow['skeleton'].tasks) print >> f, "\tInput data : %.2f MB" % \ float((workflow['skeleton_input_data']/1024)/1024) print >> f, "\tOutput data : %.2f MB" % \ float((workflow['skeleton_output_data']/1024)/1024) print >> f, "\tLongest task execution time : %s seconds" % \ workflow['task_time']['max'] print >> f, "\tShortest task execution time : %s seconds" % \ workflow['task_time']['min'] print >> f, "\tLargest compute task : %s core(s)" % \ workflow['task_compute']['max'] print >> f, "\tSmallest compute task : %s core(s)" % \ workflow['task_compute']['min'] print >> f, '' for stage in workflow['skeleton'].stages: print >> f, "%s:" % stage.name print >> f, "\tNumber of tasks : %d" % len(stage.tasks) print >> f, "\tTime distribution : %s" % run['uniformity'] print >> f, "\tInput files : %d for a total of %.2f MB" % \ (workflow[stage.name]['input'][0], float((workflow[stage.name]['input'][1]/1024.0)/1024.0)) print >> f, "\tOutput files : %d for a total of %.2f MB" % \ (workflow[stage.name]['output'][0], float((workflow[stage.name]['output'][1]/1024.0)/1024.0)) print >> f, '' print >> f, "Execution boundaries:" print >> f, "\tLowest number of cores : %s" % \ workflow['task_compute']['min'] print >> f, "\tlongest execution time : %s seconds" % \ workflow['stages_time']['max'] print >> f, "\tHighest number of cores : %s" % \ workflow['stages_compute']['max'] print >> f, "\tshortest execution time : %s seconds" % \ workflow['task_time']['max'] print >> f, "\n" f.flush() os.fsync(f) # ----------------------------------------------------------------------------- def log_bundle(run, resources): '''Pass. ''' f = run['log'] # report.header("Skeleton Workflow S01") title = 'Bundle' separator = "=" * len(title) print >> f, "%s\n%s\n%s\n\n" % (separator, title, separator) # Report back to the demo about the available resource bundle. print >> f, "Target resources IDs : %s" % ', '.join( map(str, resources['resource_ids'])) print >> f, "Total core capacity : %i" % resources['core_capacity'] print >> f, "\n" f.flush() os.fsync(f) # ----------------------------------------------------------------------------- def log_execution_stategy(cfg, run, strategy): '''Pass. ''' f = run['log'] title = 'Execution Strategy' separator = "=" * len(title) print >> f, "%s\n%s\n%s\n\n" % (separator, title, separator) print >> f, "Configurations:" if 'supported' in cfg['bundle']['resources']: print "I am here: %s" % cfg['bundle']['resources']['supported'] print >> f, "\tTarget resource for early binding : %s" %\ cfg['bundle']['resources']['supported'] print >> f, "\tTarget resources for late binding : %s" %\ ', '.join(map(str, cfg['bundle']['resources']['supported'].keys())) if 'supported' in cfg['bundle']['resources']: print >> f, "\tTarget resource for early binding : %s" %\ cfg['bundle']['resources']['unsupported'] print >> f, "\tTarget resources for late binding : %s" %\ ', '.join(map(str, cfg['bundle']['resources']['unsupported'].keys())) print >> f, "\tType of task-to-resource binding : %s" %\ run['binding'] print >> f, '' print >> f, "Heuristics:" print >> f, "\tDegree of concurrency for task execution : %s%%" %\ strategy['heuristic']['percentage_concurrency'] print >> f, "\tPercentage of bundle resources targeted : %s%%" %\ strategy['heuristic']['percentage_resources'] print >> f, '' print >> f, "Inferences:" print >> f, "\tNumber of target resources : %d" % \ len(strategy['inference']['target_resources']) print >> f, "\tTarget resource(s) for pilot(s) : %s" % \ ', '.join(map(str, strategy['inference']['target_resources'])) print >> f, "\tNumber of pilots : %d" % \ strategy['inference']['number_pilots'] print >> f, "\tTotal workflow number of cores : %s" % \ strategy['inference']['cores_workload'] print >> f, "\tType of scheduler for RP : %s" % \ strategy['inference']['rp_scheduler'] print >> f, "\tTotal workflow compute time : %s seconds" % \ strategy['inference']['compute_time_workload'] print >> f, "\tTotal workflow staging time : %s seconds" % \ strategy['inference']['staging_time_workload'] print >> f, "\tTotal RP overhead time : %s seconds" % \ strategy['inference']['rp_overhead_time_workload'] print >> f, "\n" f.flush() os.fsync(f) # ----------------------------------------------------------------------------- def log_pilot_descriptions(run): '''Pass. ''' f = run['log'] title = 'Pilot Descriptions' separator = "=" * len(title) print >> f, "%s\n%s\n%s\n\n" % (separator, title, separator) for pdesc in run['pdescs']: print >> f, "%s:" % pdesc.resource print >> f, "\tAllocation; None -> RP default : %s" % pdesc.project print >> f, "\tQueue; None -> RP default : %s" % pdesc.queue print >> f, "\tNumber of cores : %s" % pdesc.cores print >> f, "\tWalltime in minutes : %s" % pdesc.runtime print >> f, "\tStop once the workflow is done : %s" % pdesc.cleanup print >> f, '' print >> f, "\n" f.flush() os.fsync(f) # ----------------------------------------------------------------------------- def log_cu_descriptions(cfg, run, workflow): '''Pass. ''' f = run['log'] title = 'Compute Unit Descriptions' separator = "=" * len(title) cuds = [j for i in run['cuds'].values() for j in i] print >> f, "%s\n%s\n%s\n\n" % (separator, title, separator) if cfg['workload_type'] == 'skeleton': print >> f, "Total tasks submitted : %d" % workflow['skeleton_tasks'] print >> f, "Total CU translated : %d" % len(cuds) for core in cfg['cores']: print >> f, "Total CU with %s cores : %s" % (core, cuds.count(core)) print >> f, '' print >> f, "Print the first units for reference:" # for cud in cuds[0:4]: if cfg['workload_type'] == 'skeleton': for cud in cuds: print >> f, "%s:" % cud.name print >> f, "\tExecutable : %s" % cud.executable print >> f, "\tArguments executable : %s" % cud.arguments print >> f, "\tNumber of cores : %s" % cud.cores print >> f, "\tPre-execution : %s" % cud.pre_exec print >> f, "\tInput staging : %s" % cud.input_staging print >> f, "\tOutput staging : %s" % cud.output_staging print >> f, "\tCleanup : %s" % cud.cleanup print >> f, '' print >> f, "\n" f.flush() os.fsync(f) # ----------------------------------------------------------------------------- def record_run_state(run): '''Pass. ''' f = run['runtime'] print >> f, "[%s] [%s] - State: %s" % (timestamp(), run['tag'], run['state']) f.flush() os.fsync(f) # ----------------------------------------------------------------------------- def record_run_session(run): '''Pass. ''' f = run['runtime'] print >> f, "[%s] [%s] - Session: %s" % (timestamp(), run['tag'], run['session_id']) f.flush() os.fsync(f)
#!/usr/bin/python # Copyright (c) 2012 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import print_function import glob import hashlib import optparse import os import posixpath import shutil import subprocess import stat import sys import tarfile """A Cygwin aware version compress/extract object. This module supports creating and unpacking a tarfile on all platforms. For Cygwin, Mac, and Linux, it will use the standard tarfile implementation. For Win32 it will detect Cygwin style symlinks as it archives and convert them to symlinks. For Win32, it is unfortunate that os.stat does not return a FileID in the ino field which would allow us to correctly determine which files are hardlinks, so instead we assume that any files in the archive that are an exact match are hardlinks to the same data. We know they are not Symlinks because we are using Cygwin style symlinks only, which appear to Win32 a normal file. All paths stored and retrieved from a TAR file are expected to be POSIX style, Win32 style paths will be rejected. NOTE: All paths represent by the tarfile and all API functions are POSIX style paths except for CygTar.Add which assumes a Native path. """ def ToNativePath(native_path): """Convert to a posix style path if this is win32.""" if sys.platform == 'win32': return native_path.replace('/', '\\') return native_path def IsCygwinSymlink(symtext): """Return true if the provided text looks like a Cygwin symlink.""" return symtext[:12] == '!<symlink>\xff\xfe' def SymDatToPath(symtext): """Convert a Cygwin style symlink data to a relative path.""" return ''.join([ch for ch in symtext[12:] if ch != '\x00']) def PathToSymDat(filepath): """Convert a filepath to cygwin style symlink data.""" symtag = '!<symlink>\xff\xfe' unipath = ''.join([ch + '\x00' for ch in filepath]) strterm = '\x00\x00' return symtag + unipath + strterm def CreateWin32Link(filepath, targpath, verbose): """Create a link on Win32 if possible Uses mklink to create a link (hardlink or junction) if possible. On failure, it will assume mklink is unavailible and copy the file instead. Future calls will not attempt to use mklink.""" targ_is_dir = os.path.isdir(targpath) call_mklink = False if targ_is_dir and CreateWin32Link.try_junction: # Creating a link to a directory will fail, but a junction (which is more # like a symlink) will work. mklink_flag = '/J' call_mklink = True elif not targ_is_dir and CreateWin32Link.try_hardlink: mklink_flag = '/H' call_mklink = True # Assume an error, if subprocess succeeds, then it should return 0 err = 1 if call_mklink: try: cmd = ['cmd', '/C', 'mklink %s %s %s' % ( mklink_flag, ToNativePath(filepath), ToNativePath(targpath))] err = subprocess.call(cmd, stdout = open(os.devnull, 'wb'), stderr = open(os.devnull, 'wb')) except EnvironmentError: if targ_is_dir: CreateWin32Link.try_junction = False else: CreateWin32Link.try_hardlink = False # If we failed to create a link, then just copy it. We wrap this in a # retry for Windows which often has stale file lock issues. if err or not os.path.exists(filepath): if targ_is_dir and verbose: print('Failed to create junction %s -> %s. Copying instead.\n' % (filepath, targpath)) for cnt in range(1,4): try: if targ_is_dir: shutil.copytree(targpath, filepath) else: shutil.copyfile(targpath, filepath) return False except EnvironmentError: if verbose: print( 'Try %d: Failed hardlink %s -> %s\n' % (cnt, filepath, targpath)) if verbose: print('Giving up.') CreateWin32Link.try_hardlink = True CreateWin32Link.try_junction = True def ComputeFileHash(filepath): """Generate a sha1 hash for the file at the given path.""" sha1 = hashlib.sha1() with open(filepath, 'rb') as fp: sha1.update(fp.read()) return sha1.hexdigest() def ReadableSizeOf(num): """Convert to a human readable number.""" if num < 1024.0: return '[%5dB]' % num for x in ['B','K','M','G','T']: if num < 1024.0: return '[%5.1f%s]' % (num, x) num /= 1024.0 return '[%dT]' % int(num) class CygTar(object): """ CygTar is an object which represents a Win32 and Cygwin aware tarball.""" def __init__(self, filename, mode='r', verbose=False): self.size_map = {} self.file_hashes = {} # Set errorlevel=1 so that fatal errors actually raise! if 'r' in mode: self.read_file = open(filename, 'rb') self.read_filesize = os.path.getsize(filename) self.tar = tarfile.open(mode=mode, fileobj=self.read_file, errorlevel=1) else: self.read_file = None self.read_filesize = 0 self.tar = tarfile.open(filename, mode=mode, errorlevel=1) self.verbose = verbose def __DumpInfo(self, tarinfo): """Prints information on a single object in the tarball.""" typeinfo = '?' lnk = '' if tarinfo.issym(): typeinfo = 'S' lnk = '-> ' + tarinfo.linkname if tarinfo.islnk(): typeinfo = 'H' lnk = '-> ' + tarinfo.linkname if tarinfo.isdir(): typeinfo = 'D' if tarinfo.isfile(): typeinfo = 'F' reable_size = ReadableSizeOf(tarinfo.size) print('%s %s : %s %s' % (reable_size, typeinfo, tarinfo.name, lnk)) return tarinfo def __AddFile(self, tarinfo, fileobj=None): """Add a file to the archive.""" if self.verbose: self.__DumpInfo(tarinfo) self.tar.addfile(tarinfo, fileobj) def __AddLink(self, tarinfo, linktype, linkpath): """Add a Win32 symlink or hardlink to the archive.""" tarinfo.linkname = linkpath tarinfo.type = linktype tarinfo.size = 0 self.__AddFile(tarinfo) def Add(self, filepath, prefix=None): """Add path filepath to the archive which may be Native style. Add files individually recursing on directories. For POSIX we use tarfile.addfile directly on symlinks and hardlinks. For files, we must check if they are duplicates which we convert to hardlinks or symlinks which we convert from a file to a symlink in the tarfile. All other files are added as a standard file. """ # At this point tarinfo.name will contain a POSIX style path regardless # of the original filepath. tarinfo = self.tar.gettarinfo(filepath) if prefix: tarinfo.name = posixpath.join(prefix, tarinfo.name) if sys.platform == 'win32': # On win32 os.stat() always claims that files are world writable # which means that unless we remove this bit here we end up with # world writables files in the archive, which is almost certainly # not intended. tarinfo.mode &= ~stat.S_IWOTH tarinfo.mode &= ~stat.S_IWGRP # If we want cygwin to be able to extract this archive and use # executables and dll files we need to mark all the archive members as # executable. This is essentially what happens anyway when the # archive is extracted on win32. tarinfo.mode |= stat.S_IXUSR | stat.S_IXOTH | stat.S_IXGRP # If this a symlink or hardlink, add it if tarinfo.issym() or tarinfo.islnk(): tarinfo.size = 0 self.__AddFile(tarinfo) return True # If it's a directory, then you want to recurse into it if tarinfo.isdir(): self.__AddFile(tarinfo) native_files = glob.glob(os.path.join(filepath, '*')) for native_file in native_files: if not self.Add(native_file, prefix): return False return True # At this point we only allow addition of "FILES" if not tarinfo.isfile(): print('Failed to add non real file: %s' % filepath) return False # Now check if it is a Cygwin style link disguised as a file. # We go ahead and check on all platforms just in case we are tar'ing a # mount shared with windows. if tarinfo.size <= 524: with open(filepath) as fp: symtext = fp.read() if IsCygwinSymlink(symtext): self.__AddLink(tarinfo, tarfile.SYMTYPE, SymDatToPath(symtext)) return True # Otherwise, check if its a hardlink by seeing if it matches any unique # hash within the list of hashed files for that file size. nodelist = self.size_map.get(tarinfo.size, []) # If that size bucket is empty, add this file, no need to get the hash until # we get a bucket collision for the first time.. if not nodelist: self.size_map[tarinfo.size] = [filepath] with open(filepath, 'rb') as fp: self.__AddFile(tarinfo, fp) return True # If the size collides with anything, we'll need to check hashes. We assume # no hash collisions for SHA1 on a given bucket, since the number of files # in a bucket over possible SHA1 values is near zero. newhash = ComputeFileHash(filepath) self.file_hashes[filepath] = newhash for oldname in nodelist: oldhash = self.file_hashes.get(oldname, None) if not oldhash: oldhash = ComputeFileHash(oldname) self.file_hashes[oldname] = oldhash if oldhash == newhash: self.__AddLink(tarinfo, tarfile.LNKTYPE, oldname) return True # Otherwise, we missed, so add it to the bucket for this size self.size_map[tarinfo.size].append(filepath) with open(filepath, 'rb') as fp: self.__AddFile(tarinfo, fp) return True def Extract(self): """Extract the tarfile to the current directory.""" if self.verbose: sys.stdout.write('|' + ('-' * 48) + '|\n') sys.stdout.flush() dots_outputted = 0 win32_symlinks = {} for m in self.tar: if self.verbose: cnt = self.read_file.tell() curdots = cnt * 50 // self.read_filesize if dots_outputted < curdots: for dot in range(dots_outputted, curdots): sys.stdout.write('.') sys.stdout.flush() dots_outputted = curdots # For hardlinks in Windows, we try to use mklink, and instead copy on # failure. if m.islnk() and sys.platform == 'win32': CreateWin32Link(m.name, m.linkname, self.verbose) # On Windows we treat symlinks as if they were hard links. # Proper Windows symlinks supported by everything can be made with # mklink, but only by an Administrator. The older toolchains are # built with Cygwin, so they could use Cygwin-style symlinks; but # newer toolchains do not use Cygwin, and nothing else on the system # understands Cygwin-style symlinks, so avoid them. elif m.issym() and sys.platform == 'win32': # For a hard link, the link target (m.linkname) always appears # in the archive before the link itself (m.name), so the links # can just be made on the fly. However, a symlink might well # appear in the archive before its target file, so there would # not yet be any file to hard-link to. Hence, we have to collect # all the symlinks and create them in dependency order at the end. linkname = m.linkname if not posixpath.isabs(linkname): linkname = posixpath.join(posixpath.dirname(m.name), linkname) linkname = posixpath.normpath(linkname) win32_symlinks[posixpath.normpath(m.name)] = linkname # Otherwise, extract normally. else: self.tar.extract(m) win32_symlinks_left = list(win32_symlinks.items()) while win32_symlinks_left: this_symlink = win32_symlinks_left.pop(0) name, linkname = this_symlink if linkname in win32_symlinks: # The target is itself a symlink not yet created. # Wait for it to come 'round on the guitar. win32_symlinks_left.append(this_symlink) else: del win32_symlinks[name] CreateWin32Link(name, linkname, self.verbose) if self.verbose: sys.stdout.write('\n') sys.stdout.flush() def List(self): """List the set of objects in the tarball.""" for tarinfo in self.tar: self.__DumpInfo(tarinfo) def Close(self): self.tar.close() if self.read_file is not None: self.read_file.close() self.read_file = None self.read_filesize = 0 def Main(args): parser = optparse.OptionParser() # Modes parser.add_option('-c', '--create', help='Create a tarball.', action='store_const', const='c', dest='action', default='') parser.add_option('-x', '--extract', help='Extract a tarball.', action='store_const', const='x', dest='action') parser.add_option('-t', '--list', help='List sources in tarball.', action='store_const', const='t', dest='action') # Compression formats parser.add_option('-j', '--bzip2', help='Create a bz2 tarball.', action='store_const', const=':bz2', dest='format', default='') parser.add_option('-z', '--gzip', help='Create a gzip tarball.', action='store_const', const=':gz', dest='format', ) # Misc parser.add_option('-v', '--verbose', help='Use verbose output.', action='store_true', dest='verbose', default=False) parser.add_option('-f', '--file', help='Name of tarball.', dest='filename', default='') parser.add_option('-C', '--directory', help='Change directory.', dest='cd', default='') parser.add_option('--prefix', help='Subdirectory prefix for all paths') options, args = parser.parse_args(args[1:]) if not options.action: parser.error('Expecting compress or extract') if not options.filename: parser.error('Expecting a filename') if options.action in ['c'] and not args: parser.error('Expecting list of sources to add') if options.action in ['x', 't'] and args: parser.error('Unexpected source list on extract') if options.action == 'c': mode = 'w' + options.format else: mode = 'r'+ options.format tar = CygTar(options.filename, mode, verbose=options.verbose) if options.cd: os.chdir(options.cd) if options.action == 't': tar.List() return 0 if options.action == 'x': tar.Extract() return 0 if options.action == 'c': for filepath in args: if not tar.Add(filepath, options.prefix): return -1 tar.Close() return 0 parser.error('Missing action c, t, or x.') return -1 if __name__ == '__main__': sys.exit(Main(sys.argv))
import unittest from django import forms from django.forms.utils import ErrorList from django.core.exceptions import ValidationError from wagtail.wagtailcore import blocks class TestFieldBlock(unittest.TestCase): def test_charfield_render(self): block = blocks.CharBlock() html = block.render("Hello world!") self.assertEqual(html, "Hello world!") def test_charfield_render_form(self): block = blocks.CharBlock() html = block.render_form("Hello world!") self.assertIn('<div class="field char_field widget-text_input">', html) self.assertIn('<input id="" name="" placeholder="" type="text" value="Hello world!" />', html) def test_charfield_render_form_with_prefix(self): block = blocks.CharBlock() html = block.render_form("Hello world!", prefix='foo') self.assertIn('<input id="foo" name="foo" placeholder="" type="text" value="Hello world!" />', html) def test_charfield_render_form_with_error(self): block = blocks.CharBlock() html = block.render_form("Hello world!", errors=ErrorList([ValidationError("This field is required.")]) ) self.assertIn('This field is required.', html) def test_charfield_searchable_content(self): block = blocks.CharBlock() content = block.get_searchable_content("Hello world!") self.assertEqual(content, ["Hello world!"]) def test_choicefield_render(self): class ChoiceBlock(blocks.FieldBlock): field = forms.ChoiceField(choices=( ('choice-1', "Choice 1"), ('choice-2', "Choice 2"), )) block = ChoiceBlock() html = block.render('choice-2') self.assertEqual(html, "choice-2") def test_choicefield_render_form(self): class ChoiceBlock(blocks.FieldBlock): field = forms.ChoiceField(choices=( ('choice-1', "Choice 1"), ('choice-2', "Choice 2"), )) block = ChoiceBlock() html = block.render_form('choice-2') self.assertIn('<div class="field choice_field widget-select">', html) self.assertIn('<select id="" name="" placeholder="">', html) self.assertIn('<option value="choice-1">Choice 1</option>', html) self.assertIn('<option value="choice-2" selected="selected">Choice 2</option>', html) @unittest.expectedFailure # Returning "choice-1" instead of "Choice 1" def test_choicefield_searchable_content(self): class ChoiceBlock(blocks.FieldBlock): field = forms.ChoiceField(choices=( ('choice-1', "Choice 1"), ('choice-2', "Choice 2"), )) block = ChoiceBlock() content = block.get_searchable_content("choice-1") self.assertEqual(content, ["Choice 1"]) class TestChoiceBlock(unittest.TestCase): def setUp(self): from django.db.models.fields import BLANK_CHOICE_DASH self.blank_choice_dash_label = BLANK_CHOICE_DASH[0][1] def test_render_required_choice_block(self): block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')]) html = block.render_form('coffee', prefix='beverage') self.assertIn('<select id="beverage" name="beverage" placeholder="">', html) # blank option should still be rendered for required fields # (we may want it as an initial value) self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html) self.assertIn('<option value="tea">Tea</option>', html) self.assertIn('<option value="coffee" selected="selected">Coffee</option>', html) def test_validate_required_choice_block(self): block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')]) self.assertEqual(block.clean('coffee'), 'coffee') with self.assertRaises(ValidationError): block.clean('whisky') with self.assertRaises(ValidationError): block.clean('') with self.assertRaises(ValidationError): block.clean(None) def test_render_non_required_choice_block(self): block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], required=False) html = block.render_form('coffee', prefix='beverage') self.assertIn('<select id="beverage" name="beverage" placeholder="">', html) self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html) self.assertIn('<option value="tea">Tea</option>', html) self.assertIn('<option value="coffee" selected="selected">Coffee</option>', html) def test_validate_non_required_choice_block(self): block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], required=False) self.assertEqual(block.clean('coffee'), 'coffee') with self.assertRaises(ValidationError): block.clean('whisky') self.assertEqual(block.clean(''), '') self.assertEqual(block.clean(None), '') def test_render_choice_block_with_existing_blank_choice(self): block = blocks.ChoiceBlock( choices=[('tea', 'Tea'), ('coffee', 'Coffee'), ('', 'No thanks')], required=False) html = block.render_form(None, prefix='beverage') self.assertIn('<select id="beverage" name="beverage" placeholder="">', html) self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html) self.assertIn('<option value="" selected="selected">No thanks</option>', html) self.assertIn('<option value="tea">Tea</option>', html) self.assertIn('<option value="coffee">Coffee</option>', html) def test_named_groups_without_blank_option(self): block = blocks.ChoiceBlock( choices=[ ('Alcoholic', [ ('gin', 'Gin'), ('whisky', 'Whisky'), ]), ('Non-alcoholic', [ ('tea', 'Tea'), ('coffee', 'Coffee'), ]), ]) # test rendering with the blank option selected html = block.render_form(None, prefix='beverage') self.assertIn('<select id="beverage" name="beverage" placeholder="">', html) self.assertIn('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html) self.assertIn('<optgroup label="Alcoholic">', html) self.assertIn('<option value="tea">Tea</option>', html) # test rendering with a non-blank option selected html = block.render_form('tea', prefix='beverage') self.assertIn('<select id="beverage" name="beverage" placeholder="">', html) self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html) self.assertIn('<optgroup label="Alcoholic">', html) self.assertIn('<option value="tea" selected="selected">Tea</option>', html) def test_named_groups_with_blank_option(self): block = blocks.ChoiceBlock( choices=[ ('Alcoholic', [ ('gin', 'Gin'), ('whisky', 'Whisky'), ]), ('Non-alcoholic', [ ('tea', 'Tea'), ('coffee', 'Coffee'), ]), ('Not thirsty', [ ('', 'No thanks') ]), ], required=False) # test rendering with the blank option selected html = block.render_form(None, prefix='beverage') self.assertIn('<select id="beverage" name="beverage" placeholder="">', html) self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html) self.assertNotIn('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html) self.assertIn('<optgroup label="Alcoholic">', html) self.assertIn('<option value="tea">Tea</option>', html) self.assertIn('<option value="" selected="selected">No thanks</option>', html) # test rendering with a non-blank option selected html = block.render_form('tea', prefix='beverage') self.assertIn('<select id="beverage" name="beverage" placeholder="">', html) self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html) self.assertNotIn('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html) self.assertIn('<optgroup label="Alcoholic">', html) self.assertIn('<option value="tea" selected="selected">Tea</option>', html) def test_subclassing(self): class BeverageChoiceBlock(blocks.ChoiceBlock): choices = [ ('tea', 'Tea'), ('coffee', 'Coffee'), ] block = BeverageChoiceBlock(required=False) html = block.render_form('tea', prefix='beverage') self.assertIn('<select id="beverage" name="beverage" placeholder="">', html) self.assertIn('<option value="tea" selected="selected">Tea</option>', html) # subclasses of ChoiceBlock should deconstruct to a basic ChoiceBlock for migrations self.assertEqual( block.deconstruct(), ( 'wagtail.wagtailcore.blocks.ChoiceBlock', [], { 'choices': [('tea', 'Tea'), ('coffee', 'Coffee')], 'required': False, }, ) ) class TestMeta(unittest.TestCase): def test_set_template_with_meta(self): class HeadingBlock(blocks.CharBlock): class Meta: template = 'heading.html' block = HeadingBlock() self.assertEqual(block.meta.template, 'heading.html') def test_set_template_with_constructor(self): block = blocks.CharBlock(template='heading.html') self.assertEqual(block.meta.template, 'heading.html') def test_set_template_with_constructor_overrides_meta(self): class HeadingBlock(blocks.CharBlock): class Meta: template = 'heading.html' block = HeadingBlock(template='subheading.html') self.assertEqual(block.meta.template, 'subheading.html') def test_meta_multiple_inheritance(self): class HeadingBlock(blocks.CharBlock): class Meta: template = 'heading.html' test = 'Foo' class SubHeadingBlock(HeadingBlock): class Meta: template = 'subheading.html' block = SubHeadingBlock() self.assertEqual(block.meta.template, 'subheading.html') self.assertEqual(block.meta.test, 'Foo') class TestStructBlock(unittest.TestCase): def test_initialisation(self): block = blocks.StructBlock([ ('title', blocks.CharBlock()), ('link', blocks.URLBlock()), ]) self.assertEqual(list(block.child_blocks.keys()), ['title', 'link']) def test_initialisation_from_subclass(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock() link = blocks.URLBlock() block = LinkBlock() self.assertEqual(list(block.child_blocks.keys()), ['title', 'link']) def test_initialisation_from_subclass_with_extra(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock() link = blocks.URLBlock() block = LinkBlock([ ('classname', blocks.CharBlock()) ]) self.assertEqual(list(block.child_blocks.keys()), ['title', 'link', 'classname']) def test_initialisation_with_multiple_subclassses(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock() link = blocks.URLBlock() class StyledLinkBlock(LinkBlock): classname = blocks.CharBlock() block = StyledLinkBlock() self.assertEqual(list(block.child_blocks.keys()), ['title', 'link', 'classname']) @unittest.expectedFailure # Field order doesn't match inheritance order def test_initialisation_with_mixins(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock() link = blocks.URLBlock() class StylingMixin(blocks.StructBlock): classname = blocks.CharBlock() class StyledLinkBlock(LinkBlock, StylingMixin): pass block = StyledLinkBlock() self.assertEqual(list(block.child_blocks.keys()), ['title', 'link', 'classname']) def test_render(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock() link = blocks.URLBlock() block = LinkBlock() html = block.render({ 'title': "Wagtail site", 'link': 'http://www.wagtail.io', }) self.assertIn('<dt>title</dt>', html) self.assertIn('<dd>Wagtail site</dd>', html) self.assertIn('<dt>link</dt>', html) self.assertIn('<dd>http://www.wagtail.io</dd>', html) @unittest.expectedFailure def test_render_unknown_field(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock() link = blocks.URLBlock() block = LinkBlock() html = block.render({ 'title': "Wagtail site", 'link': 'http://www.wagtail.io', 'image': 10, }) self.assertIn('<dt>title</dt>', html) self.assertIn('<dd>Wagtail site</dd>', html) self.assertIn('<dt>link</dt>', html) self.assertIn('<dd>http://www.wagtail.io</dd>', html) # Don't render the extra item self.assertNotIn('<dt>image</dt>', html) def test_render_form(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock() link = blocks.URLBlock() block = LinkBlock() html = block.render_form({ 'title': "Wagtail site", 'link': 'http://www.wagtail.io', }, prefix='mylink') self.assertIn('<div class="struct-block">', html) self.assertIn('<div class="field char_field widget-text_input fieldname-title">', html) self.assertIn('<input id="mylink-title" name="mylink-title" placeholder="Title" type="text" value="Wagtail site" />', html) self.assertIn('<div class="field url_field widget-url_input fieldname-link">', html) self.assertIn('<input id="mylink-link" name="mylink-link" placeholder="Link" type="url" value="http://www.wagtail.io" />', html) def test_render_form_unknown_field(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock() link = blocks.URLBlock() block = LinkBlock() html = block.render_form({ 'title': "Wagtail site", 'link': 'http://www.wagtail.io', 'image': 10, }, prefix='mylink') self.assertIn('<input id="mylink-title" name="mylink-title" placeholder="Title" type="text" value="Wagtail site" />', html) self.assertIn('<input id="mylink-link" name="mylink-link" placeholder="Link" type="url" value="http://www.wagtail.io" />', html) # Don't render the extra field self.assertNotIn('mylink-image', html) def test_render_form_uses_default_value(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock(default="Torchbox") link = blocks.URLBlock(default="http://www.torchbox.com") block = LinkBlock() html = block.render_form({}, prefix='mylink') self.assertIn('<input id="mylink-title" name="mylink-title" placeholder="Title" type="text" value="Torchbox" />', html) self.assertIn('<input id="mylink-link" name="mylink-link" placeholder="Link" type="url" value="http://www.torchbox.com" />', html) def test_media_inheritance(self): class ScriptedCharBlock(blocks.CharBlock): media = forms.Media(js=['scripted_char_block.js']) class LinkBlock(blocks.StructBlock): title = ScriptedCharBlock(default="Torchbox") link = blocks.URLBlock(default="http://www.torchbox.com") block = LinkBlock() self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js())) def test_html_declaration_inheritance(self): class CharBlockWithDeclarations(blocks.CharBlock): def html_declarations(self): return '<script type="text/x-html-template">hello world</script>' class LinkBlock(blocks.StructBlock): title = CharBlockWithDeclarations(default="Torchbox") link = blocks.URLBlock(default="http://www.torchbox.com") block = LinkBlock() self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations()) def test_searchable_content(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock() link = blocks.URLBlock() block = LinkBlock() content = block.get_searchable_content({ 'title': "Wagtail site", 'link': 'http://www.wagtail.io', }) self.assertEqual(content, ["Wagtail site"]) def test_value_from_datadict(self): block = blocks.StructBlock([ ('title', blocks.CharBlock()), ('link', blocks.URLBlock()), ]) struct_val = block.value_from_datadict({ 'mylink-title': "Torchbox", 'mylink-link': "http://www.torchbox.com" }, {}, 'mylink') self.assertEqual(struct_val['title'], "Torchbox") self.assertEqual(struct_val['link'], "http://www.torchbox.com") self.assertTrue(isinstance(struct_val, blocks.StructValue)) self.assertTrue(isinstance(struct_val.bound_blocks['link'].block, blocks.URLBlock)) class TestListBlock(unittest.TestCase): def test_initialise_with_class(self): block = blocks.ListBlock(blocks.CharBlock) # Child block should be initialised for us self.assertIsInstance(block.child_block, blocks.CharBlock) def test_initialise_with_instance(self): child_block = blocks.CharBlock() block = blocks.ListBlock(child_block) self.assertEqual(block.child_block, child_block) def render(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock() link = blocks.URLBlock() block = blocks.ListBlock(LinkBlock()) return block.render([ { 'title': "Wagtail", 'link': 'http://www.wagtail.io', }, { 'title': "Django", 'link': 'http://www.djangoproject.com', }, ]) def test_render_uses_ul(self): html = self.render() self.assertIn('<ul>', html) self.assertIn('</ul>', html) def test_render_uses_li(self): html = self.render() self.assertIn('<li>', html) self.assertIn('</li>', html) def render_form(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock() link = blocks.URLBlock() block = blocks.ListBlock(LinkBlock) html = block.render_form([ { 'title': "Wagtail", 'link': 'http://www.wagtail.io', }, { 'title': "Django", 'link': 'http://www.djangoproject.com', }, ] , prefix='links') return html def test_render_form_wrapper_class(self): html = self.render_form() self.assertIn('<div class="sequence">', html) def test_render_form_count_field(self): html = self.render_form() self.assertIn('<input type="hidden" name="links-count" id="links-count" value="2">', html) def test_render_form_delete_field(self): html = self.render_form() self.assertIn('<input type="hidden" id="links-0-deleted" name="links-0-deleted" value="">', html) def test_render_form_order_fields(self): html = self.render_form() self.assertIn('<input type="hidden" id="links-0-order" name="links-0-order" value="0">', html) self.assertIn('<input type="hidden" id="links-1-order" name="links-1-order" value="1">', html) def test_render_form_labels(self): html = self.render_form() self.assertIn('<label for=links-0-value-title>Title</label>', html) self.assertIn('<label for=links-0-value-link>Link</label>', html) def test_render_form_values(self): html = self.render_form() self.assertIn('<input id="links-0-value-title" name="links-0-value-title" placeholder="Title" type="text" value="Wagtail" />', html) self.assertIn('<input id="links-0-value-link" name="links-0-value-link" placeholder="Link" type="url" value="http://www.wagtail.io" />', html) self.assertIn('<input id="links-1-value-title" name="links-1-value-title" placeholder="Title" type="text" value="Django" />', html) self.assertIn('<input id="links-1-value-link" name="links-1-value-link" placeholder="Link" type="url" value="http://www.djangoproject.com" />', html) def test_html_declarations(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock() link = blocks.URLBlock() block = blocks.ListBlock(LinkBlock) html = block.html_declarations() self.assertIn('<input id="__PREFIX__-value-title" name="__PREFIX__-value-title" placeholder="Title" type="text" />', html) self.assertIn('<input id="__PREFIX__-value-link" name="__PREFIX__-value-link" placeholder="Link" type="url" />', html) def test_html_declarations_uses_default(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock(default="Github") link = blocks.URLBlock(default="http://www.github.com") block = blocks.ListBlock(LinkBlock) html = block.html_declarations() self.assertIn('<input id="__PREFIX__-value-title" name="__PREFIX__-value-title" placeholder="Title" type="text" value="Github" />', html) self.assertIn('<input id="__PREFIX__-value-link" name="__PREFIX__-value-link" placeholder="Link" type="url" value="http://www.github.com" />', html) def test_media_inheritance(self): class ScriptedCharBlock(blocks.CharBlock): media = forms.Media(js=['scripted_char_block.js']) block = blocks.ListBlock(ScriptedCharBlock()) self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js())) def test_html_declaration_inheritance(self): class CharBlockWithDeclarations(blocks.CharBlock): def html_declarations(self): return '<script type="text/x-html-template">hello world</script>' block = blocks.ListBlock(CharBlockWithDeclarations()) self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations()) def test_searchable_content(self): class LinkBlock(blocks.StructBlock): title = blocks.CharBlock() link = blocks.URLBlock() block = blocks.ListBlock(LinkBlock()) content = block.get_searchable_content([ { 'title': "Wagtail", 'link': 'http://www.wagtail.io', }, { 'title': "Django", 'link': 'http://www.djangoproject.com', }, ]) self.assertEqual(content, ["Wagtail", "Django"]) class TestStreamBlock(unittest.TestCase): def test_initialisation(self): block = blocks.StreamBlock([ ('heading', blocks.CharBlock()), ('paragraph', blocks.CharBlock()), ]) self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph']) def test_initialisation_with_binary_string_names(self): # migrations will sometimes write out names as binary strings, just to keep us on our toes block = blocks.StreamBlock([ (b'heading', blocks.CharBlock()), (b'paragraph', blocks.CharBlock()), ]) self.assertEqual(list(block.child_blocks.keys()), [b'heading', b'paragraph']) def test_initialisation_from_subclass(self): class ArticleBlock(blocks.StreamBlock): heading = blocks.CharBlock() paragraph = blocks.CharBlock() block = ArticleBlock() self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph']) def test_initialisation_from_subclass_with_extra(self): class ArticleBlock(blocks.StreamBlock): heading = blocks.CharBlock() paragraph = blocks.CharBlock() block = ArticleBlock([ ('intro', blocks.CharBlock()) ]) self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph', 'intro']) def test_initialisation_with_multiple_subclassses(self): class ArticleBlock(blocks.StreamBlock): heading = blocks.CharBlock() paragraph = blocks.CharBlock() class ArticleWithIntroBlock(ArticleBlock): intro = blocks.CharBlock() block = ArticleWithIntroBlock() self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph', 'intro']) @unittest.expectedFailure # Field order doesn't match inheritance order def test_initialisation_with_mixins(self): class ArticleBlock(blocks.StreamBlock): heading = blocks.CharBlock() paragraph = blocks.CharBlock() class IntroMixin(blocks.StreamBlock): intro = blocks.CharBlock() class ArticleWithIntroBlock(ArticleBlock, IntroMixin): pass block = ArticleWithIntroBlock() self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph', 'intro']) def render_article(self, data): class ArticleBlock(blocks.StreamBlock): heading = blocks.CharBlock() paragraph = blocks.RichTextBlock() block = ArticleBlock() value = block.to_python(data) return block.render(value) def test_render(self): html = self.render_article([ { 'type': 'heading', 'value': "My title", }, { 'type': 'paragraph', 'value': 'My <i>first</i> paragraph', }, { 'type': 'paragraph', 'value': 'My second paragraph', }, ]) self.assertIn('<div class="block-heading">My title</div>', html) self.assertIn('<div class="block-paragraph"><div class="rich-text">My <i>first</i> paragraph</div></div>', html) self.assertIn('<div class="block-paragraph"><div class="rich-text">My second paragraph</div></div>', html) def test_render_unknown_type(self): # This can happen if a developer removes a type from their StreamBlock html = self.render_article([ { 'type': 'foo', 'value': "Hello", }, { 'type': 'paragraph', 'value': 'My first paragraph', }, ]) self.assertNotIn('foo', html) self.assertNotIn('Hello', html) self.assertIn('<div class="block-paragraph"><div class="rich-text">My first paragraph</div></div>', html) def render_form(self): class ArticleBlock(blocks.StreamBlock): heading = blocks.CharBlock() paragraph = blocks.CharBlock() block = ArticleBlock() value = block.to_python([ { 'type': 'heading', 'value': "My title", }, { 'type': 'paragraph', 'value': 'My first paragraph', }, { 'type': 'paragraph', 'value': 'My second paragraph', }, ]) return block.render_form(value, prefix='myarticle') def test_render_form_wrapper_class(self): html = self.render_form() self.assertIn('<div class="sequence">', html) def test_render_form_count_field(self): html = self.render_form() self.assertIn('<input type="hidden" name="myarticle-count" id="myarticle-count" value="3">', html) def test_render_form_delete_field(self): html = self.render_form() self.assertIn('<input type="hidden" id="myarticle-0-deleted" name="myarticle-0-deleted" value="">', html) def test_render_form_order_fields(self): html = self.render_form() self.assertIn('<input type="hidden" id="myarticle-0-order" name="myarticle-0-order" value="0">', html) self.assertIn('<input type="hidden" id="myarticle-1-order" name="myarticle-1-order" value="1">', html) self.assertIn('<input type="hidden" id="myarticle-2-order" name="myarticle-2-order" value="2">', html) def test_render_form_type_fields(self): html = self.render_form() self.assertIn('<input type="hidden" id="myarticle-0-type" name="myarticle-0-type" value="heading">', html) self.assertIn('<input type="hidden" id="myarticle-1-type" name="myarticle-1-type" value="paragraph">', html) self.assertIn('<input type="hidden" id="myarticle-2-type" name="myarticle-2-type" value="paragraph">', html) def test_render_form_value_fields(self): html = self.render_form() self.assertIn('<input id="myarticle-0-value" name="myarticle-0-value" placeholder="Heading" type="text" value="My title" />', html) self.assertIn('<input id="myarticle-1-value" name="myarticle-1-value" placeholder="Paragraph" type="text" value="My first paragraph" />', html) self.assertIn('<input id="myarticle-2-value" name="myarticle-2-value" placeholder="Paragraph" type="text" value="My second paragraph" />', html) def test_html_declarations(self): class ArticleBlock(blocks.StreamBlock): heading = blocks.CharBlock() paragraph = blocks.CharBlock() block = ArticleBlock() html = block.html_declarations() self.assertIn('<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Heading" type="text" />', html) self.assertIn('<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Paragraph" type="text" />', html) def test_html_declarations_uses_default(self): class ArticleBlock(blocks.StreamBlock): heading = blocks.CharBlock(default="Fish found on moon") paragraph = blocks.CharBlock(default="Lorem ipsum dolor sit amet") block = ArticleBlock() html = block.html_declarations() self.assertIn('<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Heading" type="text" value="Fish found on moon" />', html) self.assertIn('<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Paragraph" type="text" value="Lorem ipsum dolor sit amet" />', html) def test_media_inheritance(self): class ScriptedCharBlock(blocks.CharBlock): media = forms.Media(js=['scripted_char_block.js']) class ArticleBlock(blocks.StreamBlock): heading = ScriptedCharBlock() paragraph = blocks.CharBlock() block = ArticleBlock() self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js())) def test_html_declaration_inheritance(self): class CharBlockWithDeclarations(blocks.CharBlock): def html_declarations(self): return '<script type="text/x-html-template">hello world</script>' class ArticleBlock(blocks.StreamBlock): heading = CharBlockWithDeclarations(default="Torchbox") paragraph = blocks.CharBlock() block = ArticleBlock() self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations()) def test_ordering_in_form_submission(self): class ArticleBlock(blocks.StreamBlock): heading = blocks.CharBlock() paragraph = blocks.CharBlock() block = ArticleBlock() # check that items are ordered by the 'order' field, not the order they appear in the form post_data = {'article-count': '3'} for i in range(0, 3): post_data.update({ 'article-%d-deleted' % i: '', 'article-%d-order' % i: str(2 - i), 'article-%d-type' % i: 'heading', 'article-%d-value' % i: "heading %d" % i }) block_value = block.value_from_datadict(post_data, {}, 'article') self.assertEqual(block_value[2].value, "heading 0") # check that items are ordered by 'order' numerically, not alphabetically post_data = {'article-count': '12'} for i in range(0, 12): post_data.update({ 'article-%d-deleted' % i: '', 'article-%d-order' % i: str(i), 'article-%d-type' % i: 'heading', 'article-%d-value' % i: "heading %d" % i }) block_value = block.value_from_datadict(post_data, {}, 'article') self.assertEqual(block_value[2].value, "heading 2") def test_searchable_content(self): class ArticleBlock(blocks.StreamBlock): heading = blocks.CharBlock() paragraph = blocks.CharBlock() block = ArticleBlock() value = block.to_python([ { 'type': 'heading', 'value': "My title", }, { 'type': 'paragraph', 'value': 'My first paragraph', }, { 'type': 'paragraph', 'value': 'My second paragraph', }, ]) content = block.get_searchable_content(value) self.assertEqual(content, [ "My title", "My first paragraph", "My second paragraph", ])
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType import copy as _copy class Hoverlabel(_BaseTraceHierarchyType): # class properties # -------------------- _parent_path_str = "histogram2d" _path_str = "histogram2d.hoverlabel" _valid_props = { "align", "alignsrc", "bgcolor", "bgcolorsrc", "bordercolor", "bordercolorsrc", "font", "namelength", "namelengthsrc", } # align # ----- @property def align(self): """ Sets the horizontal alignment of the text content within hover label box. Has an effect only if the hover label text spans more two or more lines The 'align' property is an enumeration that may be specified as: - One of the following enumeration values: ['left', 'right', 'auto'] - A tuple, list, or one-dimensional numpy array of the above Returns ------- Any|numpy.ndarray """ return self["align"] @align.setter def align(self, val): self["align"] = val # alignsrc # -------- @property def alignsrc(self): """ Sets the source reference on Chart Studio Cloud for `align`. The 'alignsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["alignsrc"] @alignsrc.setter def alignsrc(self, val): self["alignsrc"] = val # bgcolor # ------- @property def bgcolor(self): """ Sets the background color of the hover labels for this trace The 'bgcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["bgcolor"] @bgcolor.setter def bgcolor(self, val): self["bgcolor"] = val # bgcolorsrc # ---------- @property def bgcolorsrc(self): """ Sets the source reference on Chart Studio Cloud for `bgcolor`. The 'bgcolorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["bgcolorsrc"] @bgcolorsrc.setter def bgcolorsrc(self, val): self["bgcolorsrc"] = val # bordercolor # ----------- @property def bordercolor(self): """ Sets the border color of the hover labels for this trace. The 'bordercolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen - A list or array of any of the above Returns ------- str|numpy.ndarray """ return self["bordercolor"] @bordercolor.setter def bordercolor(self, val): self["bordercolor"] = val # bordercolorsrc # -------------- @property def bordercolorsrc(self): """ Sets the source reference on Chart Studio Cloud for `bordercolor`. The 'bordercolorsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["bordercolorsrc"] @bordercolorsrc.setter def bordercolorsrc(self, val): self["bordercolorsrc"] = val # font # ---- @property def font(self): """ Sets the font used in hover labels. The 'font' property is an instance of Font that may be specified as: - An instance of :class:`plotly.graph_objs.histogram2d.hoverlabel.Font` - A dict of string/value properties that will be passed to the Font constructor Supported dict properties: color colorsrc Sets the source reference on Chart Studio Cloud for `color`. family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". familysrc Sets the source reference on Chart Studio Cloud for `family`. size sizesrc Sets the source reference on Chart Studio Cloud for `size`. Returns ------- plotly.graph_objs.histogram2d.hoverlabel.Font """ return self["font"] @font.setter def font(self, val): self["font"] = val # namelength # ---------- @property def namelength(self): """ Sets the default length (in number of characters) of the trace name in the hover labels for all traces. -1 shows the whole name regardless of length. 0-3 shows the first 0-3 characters, and an integer >3 will show the whole name if it is less than that many characters, but if it is longer, will truncate to `namelength - 3` characters and add an ellipsis. The 'namelength' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [-1, 9223372036854775807] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|numpy.ndarray """ return self["namelength"] @namelength.setter def namelength(self, val): self["namelength"] = val # namelengthsrc # ------------- @property def namelengthsrc(self): """ Sets the source reference on Chart Studio Cloud for `namelength`. The 'namelengthsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["namelengthsrc"] @namelengthsrc.setter def namelengthsrc(self, val): self["namelengthsrc"] = val # Self properties description # --------------------------- @property def _prop_descriptions(self): return """\ align Sets the horizontal alignment of the text content within hover label box. Has an effect only if the hover label text spans more two or more lines alignsrc Sets the source reference on Chart Studio Cloud for `align`. bgcolor Sets the background color of the hover labels for this trace bgcolorsrc Sets the source reference on Chart Studio Cloud for `bgcolor`. bordercolor Sets the border color of the hover labels for this trace. bordercolorsrc Sets the source reference on Chart Studio Cloud for `bordercolor`. font Sets the font used in hover labels. namelength Sets the default length (in number of characters) of the trace name in the hover labels for all traces. -1 shows the whole name regardless of length. 0-3 shows the first 0-3 characters, and an integer >3 will show the whole name if it is less than that many characters, but if it is longer, will truncate to `namelength - 3` characters and add an ellipsis. namelengthsrc Sets the source reference on Chart Studio Cloud for `namelength`. """ def __init__( self, arg=None, align=None, alignsrc=None, bgcolor=None, bgcolorsrc=None, bordercolor=None, bordercolorsrc=None, font=None, namelength=None, namelengthsrc=None, **kwargs ): """ Construct a new Hoverlabel object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.histogram2d.Hoverlabel` align Sets the horizontal alignment of the text content within hover label box. Has an effect only if the hover label text spans more two or more lines alignsrc Sets the source reference on Chart Studio Cloud for `align`. bgcolor Sets the background color of the hover labels for this trace bgcolorsrc Sets the source reference on Chart Studio Cloud for `bgcolor`. bordercolor Sets the border color of the hover labels for this trace. bordercolorsrc Sets the source reference on Chart Studio Cloud for `bordercolor`. font Sets the font used in hover labels. namelength Sets the default length (in number of characters) of the trace name in the hover labels for all traces. -1 shows the whole name regardless of length. 0-3 shows the first 0-3 characters, and an integer >3 will show the whole name if it is less than that many characters, but if it is longer, will truncate to `namelength - 3` characters and add an ellipsis. namelengthsrc Sets the source reference on Chart Studio Cloud for `namelength`. Returns ------- Hoverlabel """ super(Hoverlabel, self).__init__("hoverlabel") if "_parent" in kwargs: self._parent = kwargs["_parent"] return # Validate arg # ------------ if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError( """\ The first argument to the plotly.graph_objs.histogram2d.Hoverlabel constructor must be a dict or an instance of :class:`plotly.graph_objs.histogram2d.Hoverlabel`""" ) # Handle skip_invalid # ------------------- self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) # Populate data dict with properties # ---------------------------------- _v = arg.pop("align", None) _v = align if align is not None else _v if _v is not None: self["align"] = _v _v = arg.pop("alignsrc", None) _v = alignsrc if alignsrc is not None else _v if _v is not None: self["alignsrc"] = _v _v = arg.pop("bgcolor", None) _v = bgcolor if bgcolor is not None else _v if _v is not None: self["bgcolor"] = _v _v = arg.pop("bgcolorsrc", None) _v = bgcolorsrc if bgcolorsrc is not None else _v if _v is not None: self["bgcolorsrc"] = _v _v = arg.pop("bordercolor", None) _v = bordercolor if bordercolor is not None else _v if _v is not None: self["bordercolor"] = _v _v = arg.pop("bordercolorsrc", None) _v = bordercolorsrc if bordercolorsrc is not None else _v if _v is not None: self["bordercolorsrc"] = _v _v = arg.pop("font", None) _v = font if font is not None else _v if _v is not None: self["font"] = _v _v = arg.pop("namelength", None) _v = namelength if namelength is not None else _v if _v is not None: self["namelength"] = _v _v = arg.pop("namelengthsrc", None) _v = namelengthsrc if namelengthsrc is not None else _v if _v is not None: self["namelengthsrc"] = _v # Process unknown kwargs # ---------------------- self._process_kwargs(**dict(arg, **kwargs)) # Reset skip_invalid # ------------------ self._skip_invalid = False
from django.test import TestCase from django.conf import settings from django.utils import timezone as tz from django.utils import dateparse import mock from msgvis.apps.datatable import models from msgvis.apps.corpus import models as corpus_models from msgvis.apps.dimensions.models import CategoricalDimension from msgvis.apps.dimensions import registry from msgvis.apps.base.tests import DistributionTestCaseMixins class TestDataTable(DistributionTestCaseMixins, TestCase): """Some basic functionality tests""" @mock.patch('msgvis.apps.dimensions.registry.get_dimension') def test_create_with_keys(self, get_dimension): """If given strings, finds the matching dimensions.""" datatable = models.DataTable('foo', 'bar') get_dimension.assert_has_calls([ mock.call('foo'), mock.call('bar') ], any_order=True) @mock.patch('msgvis.apps.dimensions.registry.get_dimension') def test_create_with_dimensions(self, get_dimension): """Accepts arguments that are dimensions""" d1 = mock.Mock(spec=CategoricalDimension) d2 = mock.Mock(spec=CategoricalDimension) datatable = models.DataTable(d1, d2) self.assertEquals(get_dimension.call_count, 0) def test_create_with_one_dimension(self): """Can be created with only one dimension""" d1 = mock.Mock(spec=CategoricalDimension) datatable = models.DataTable(d1) self.assertIsNone(datatable.secondary_dimension) class TestCategoricalDataTable(DistributionTestCaseMixins, TestCase): """Tests for categorical dimensions only, on the Message object""" def test_render_single_categorical(self): """Can produce a datatable with a single categorical dimension.""" values = [True, False] bool_distribution = self.get_distribution(values) dataset = self.generate_messages_for_distribution( field_name='contains_url', distribution=bool_distribution, ) dimension = registry.get_dimension('contains_url') datatable = models.DataTable(dimension) result = datatable.render(dataset.message_set.all()) self.assertDistributionsEqual(result, bool_distribution, level_key='contains_url', measure_key='value') def test_render_double_categorical(self): """Can produce a datatable with a two categorical dimensions.""" field_names = ('contains_url', 'contains_mention') values = [(True, True), (True, False), (False, True), (False, False)] bi_bool_distribution = self.get_distribution(values) dataset = self.generate_messages_for_multi_distribution( field_names=field_names, distribution=bi_bool_distribution, ) d1 = registry.get_dimension(field_names[0]) d2 = registry.get_dimension(field_names[1]) datatable = models.DataTable(d1, d2) result = datatable.render(dataset.message_set.all()) self.assertMultiDistributionsEqual(result, bi_bool_distribution, field_names, measure_key='value') class TestQuantitativeDataTable(DistributionTestCaseMixins, TestCase): """Tests for quantitative dimensions only, on the Message object""" def test_render_single_quantitative_narrow(self): """ Can produce a datatable with only a single quantitative dimension. The distribution is small enough no binning is needed. """ values = [0, 2, 3, 4, 6] quant_distribution = self.get_distribution(values) dataset = self.generate_messages_for_distribution( field_name='shared_count', distribution=quant_distribution, ) dimension = registry.get_dimension('shares') datatable = models.DataTable(dimension) result = datatable.render(dataset.message_set.all()) self.assertDistributionsEqual(result, quant_distribution, level_key='shares', measure_key='value') def test_render_single_quantitative_wide(self): """ Can produce a datatable with only a single quantitative dimension. The distribution is very wide and binning must be used. """ values = [0, 2, 3, 4, 60000] quant_distribution = self.get_distribution(values) dataset = self.generate_messages_for_distribution( field_name='shared_count', distribution=quant_distribution, ) binned_distribution = { 0: sum(quant_distribution[value] for value in values[:4]), 60000: quant_distribution[values[4]], } dimension = registry.get_dimension('shares') datatable = models.DataTable(dimension) result = datatable.render(dataset.message_set.all(), desired_primary_bins=5) self.assertDistributionsEqual(result, binned_distribution, level_key='shares', measure_key='value') def test_double_quantitative_narrow(self): """Can it render two quantitative dimensions when binning is not needed.""" values = [(0, 1), (2, 3), (3, 2), (4, 5), (6, 7)] quant_distribution = self.get_distribution(values) dataset = self.generate_messages_for_multi_distribution( ('shared_count', 'replied_to_count'), quant_distribution) d1 = registry.get_dimension('shares') d2 = registry.get_dimension('replies') datatable = models.DataTable(d1, d2) result = datatable.render(dataset.message_set.all()) self.assertMultiDistributionsEqual(result, quant_distribution, ('shares', 'replies'), measure_key='value') def test_double_quantitative_one_wide(self): """Can it render two quant dimensions, when one requires binning?""" values = [(0, 1), (2, 3), (6, 59999), (6, 60000)] quant_distribution = self.get_distribution(values) dataset = self.generate_messages_for_multi_distribution( ('shared_count', 'replied_to_count'), quant_distribution) binned_distribution = { (0, 0): quant_distribution[values[0]], (2, 0): quant_distribution[values[1]], (6, 59995): quant_distribution[values[2]] + quant_distribution[values[3]] } d1 = registry.get_dimension('shares') d2 = registry.get_dimension('replies') datatable = models.DataTable(d1, d2) result = datatable.render(dataset.message_set.all(), desired_secondary_bins=5) self.assertMultiDistributionsEqual(result, binned_distribution, ('shares', 'replies'), measure_key='value') class TestRelatedCategoricalDataTable(DistributionTestCaseMixins, TestCase): """Tests for categorical dimensions only, on a related table.""" def test_render_single_related_categorical(self): """Can produce a datatable with a single related categorical dimension.""" # Create some language labels language_ids = self.create_test_languages() language_distribution = self.get_distribution(language_ids) language_name_distribution = self.recover_related_field_distribution(language_distribution, corpus_models.Language, 'name') dataset = self.generate_messages_for_distribution( field_name='language_id', distribution=language_distribution, ) dimension = registry.get_dimension('language') datatable = models.DataTable(dimension) result = datatable.render(dataset.message_set.all()) self.assertDistributionsEqual(result, language_name_distribution, level_key='language', measure_key='value') def test_render_two_related_categorical(self): """Can produce a datatable with two related categorical dimensions.""" # Create some language labels language_ids = self.create_test_languages() dataset = self.create_authors_with_values('username', ['username_%d' % d for d in xrange(5)]) author_ids = dataset.person_set.values_list('id', flat=True).distinct() # create language/person pairs value_pairs = [] for lang in language_ids: for author in author_ids: # skip cases where both are even, just so's there's gaps if lang % 2 == 0 and author % 2 == 0: continue value_pairs.append((lang, author)) # Distribute some messages id_distribution = self.get_distribution(value_pairs) self.generate_messages_for_multi_distribution(('language_id', 'sender_id'), id_distribution, dataset=dataset) # Get the actual expected distribution value_distribution = self.convert_id_distribution_to_related(id_distribution, (corpus_models.Language, corpus_models.Person), ('name', 'username')) d1 = registry.get_dimension('language') d2 = registry.get_dimension('sender') datatable = models.DataTable(d1, d2) result = datatable.render(dataset.message_set.all()) self.assertMultiDistributionsEqual(result, value_distribution, ('language', 'sender'), measure_key='value') class CategoricalDimensionsRegistryTest(DistributionTestCaseMixins, TestCase): """Tests transplanted from the dimension distribution tests""" def doCategoricalDistributionTest(self, dimension_key, dataset, distribution): dimension = registry.get_dimension(dimension_key) datatable = models.DataTable(dimension) # Calculate the categorical distribution over the field name result = datatable.render(dataset.message_set.all()) self.assertDistributionsEqual(result, distribution, level_key=dimension.key, measure_key='value') def test_related_categorical_distribution(self): """ Checks that the distribution of a categorical related model field, in this case Language, can be calculated correctly. """ # Create some language labels language_ids = self.create_test_languages() language_distribution = self.get_distribution(language_ids) language_name_distribution = self.recover_related_field_distribution(language_distribution, corpus_models.Language, 'name') dataset = self.generate_messages_for_distribution( field_name='language_id', distribution=language_distribution, ) self.doCategoricalDistributionTest('language', dataset, language_name_distribution) def test_many_related_model_distribution(self): """ Checks that the distribution of a categorical many-to-many related model field, in this case Hashtags, can be calculated correctly. """ hashtag_ids = self.create_test_hashtags() hashtag_distribution = self.get_distribution(hashtag_ids) hashtag_text_distribution = self.recover_related_field_distribution(hashtag_distribution, corpus_models.Hashtag, 'text') dataset = self.generate_messages_for_distribution( field_name='hashtags', distribution=hashtag_distribution, many=True, ) self.doCategoricalDistributionTest('hashtags', dataset, hashtag_text_distribution) def test_boolean_distribution(self): """ Checks that the distribution of a boolean field, in this case 'contains_hashtag', can be calculated correctly. """ values = [True, False] bool_distribution = self.get_distribution(values) dataset = self.generate_messages_for_distribution( field_name='contains_url', distribution=bool_distribution, ) self.doCategoricalDistributionTest('contains_url', dataset, bool_distribution) def test_boolean_distribution_with_zeros(self): """ Checks that the tests work if there are zeros in the expected distribution. """ values = [True, False] bool_distribution = self.get_distribution(values, min_count=0) self.assertEquals(bool_distribution[True], 0) dataset = self.generate_messages_for_distribution( field_name='contains_url', distribution=bool_distribution, ) self.doCategoricalDistributionTest('contains_url', dataset, bool_distribution) def test_empty_boolean_distribution(self): """ Checks that we can calculate a distribution with no messages. """ bool_distribution = {True: 0, False: 0} dataset = self.generate_messages_for_distribution( field_name='contains_url', distribution=bool_distribution, ) self.doCategoricalDistributionTest('contains_url', dataset, bool_distribution) class QuantitativeDistributionsTest(DistributionTestCaseMixins, TestCase): """Ported from the dimension distribution tests""" def doQuantitativeDimensionsTest(self, dimension_key, dataset, distribution, **kwargs): dimension = registry.get_dimension(dimension_key) datatable = models.DataTable(dimension) # Calculate the categorical distribution over the field name result = datatable.render(dataset.message_set.all(), **kwargs) self.assertDistributionsEqual(result, distribution, level_key=dimension.key, measure_key='value') def test_count_distribution(self): """ Checks that the distribution of a count field, in this case shared_count, can be calculated correctly. """ shared_counts = range(0, 5) shared_count_distribution = self.get_distribution(shared_counts) dataset = self.generate_messages_for_distribution( field_name='shared_count', distribution=shared_count_distribution, ) self.doQuantitativeDimensionsTest('shares', dataset, shared_count_distribution, desired_primary_bins=50) def test_wide_count_distribution(self): """ If the range of the counts is very large, they should come out binned. """ shared_counts = [1, 2, 100, 101] shared_count_distribution = self.get_distribution(shared_counts) dataset = self.generate_messages_for_distribution( field_name='shared_count', distribution=shared_count_distribution, ) binned_distribution = { 0: shared_count_distribution[1] + shared_count_distribution[2], 100: shared_count_distribution[100] + shared_count_distribution[101], } self.doQuantitativeDimensionsTest('shares', dataset, binned_distribution, desired_primary_bins=5) def test_excludes_all_data(self): """ If the filters exclude all the data, an empty result set should be produced. """ field_names = ('shared_count', 'replied_to_count') values = [(1, 1), (1, 4), (1, 3), (2, 1), (2, 2)] bi_distribution = self.get_distribution(values) dataset = self.generate_messages_for_multi_distribution(field_names, bi_distribution) d1 = registry.get_dimension('shares') d2 = registry.get_dimension('replies') datatable = models.DataTable(d1, d2) filtered = dataset.message_set.filter( shared_count__range=(2, 5), replied_to_count__range=(3, 5), ) result = datatable.render(filtered) self.assertEquals(result.count(), 0) class TimeDistributionsTest(DistributionTestCaseMixins, TestCase): def setUp(self): # Get an arbitrary time to work with self.base_time = tz.datetime(2012, 5, 2, 20, 10, 2, 0) if settings.USE_TZ: self.base_time = self.base_time.replace(tzinfo=tz.utc) def generate_times(self, start_time, offset_type, offsets): """ Generate a list of datetimes starting with start. The offset type is a property for timedelta. The offsets is an array of numbers. """ yield start_time for offset in offsets: start_time += tz.timedelta(**{offset_type: offset}) yield start_time def fix_datetimes(self, results): """ Given a list of value/count dictionaries, makes sure that all the values are datetimes, not strings. """ for row in results: timeval = row['time'] count = row['value'] if isinstance(timeval, basestring): timeval = dateparse.parse_datetime(timeval) if settings.USE_TZ: timeval = timeval.replace(tzinfo=tz.utc) row['time'] = timeval def doTimeDimensionsTest(self, dataset, distribution, **kwargs): dimension_key = 'time' dimension = registry.get_dimension(dimension_key) datatable = models.DataTable(dimension) # Calculate the categorical distribution over the field name result = datatable.render(dataset.message_set.all(), **kwargs) self.fix_datetimes(result) self.assertDistributionsEqual(result, distribution, level_key=dimension.key, measure_key='value') def test_narrow_time_distribution(self): """ Checks that the distribution of a time field can be calculated correctly. """ times = list(self.generate_times(self.base_time, 'minutes', [2, 5, 10, 12, 1])) time_distribution = self.get_distribution(times) dataset = self.generate_messages_for_distribution( field_name='time', distribution=time_distribution, ) self.doTimeDimensionsTest(dataset, time_distribution, desired_primary_bins=2000) def test_wide_time_distribution(self): """ If the range of the counts is very large, they should come out binned. """ # base_time plus 4 days later times = list(self.generate_times(self.base_time, 'days', [4])) time_distribution = self.get_distribution(times) dataset = self.generate_messages_for_distribution( field_name='time', distribution=time_distribution, ) # Remove the time parts day1 = times[0].replace(hour=0, minute=0, second=0, microsecond=0) day2 = times[1].replace(hour=0, minute=0, second=0, microsecond=0) binned_distribution = { day1: time_distribution[times[0]], day2: time_distribution[times[1]] } self.doTimeDimensionsTest(dataset, binned_distribution, desired_primary_bins=4) class AuthorFieldDistributionsTest(DistributionTestCaseMixins, TestCase): def doDistributionTest(self, dimension_key, dataset, distribution, **kwargs): dimension = registry.get_dimension(dimension_key) # Calculate the categorical distribution over the field name datatable = models.DataTable(dimension) result = datatable.render(dataset.message_set.all(), **kwargs) self.assertDistributionsEqual(result, distribution, level_key=dimension_key, measure_key='value') def test_author_name_distribution(self): """Count messages by author name""" dataset = self.create_authors_with_values('username', ['username_%d' % d for d in xrange(5)]) author_distribution = self.distibute_messages_to_authors(dataset) author_name_distribution = self.recover_related_field_distribution(author_distribution, corpus_models.Person, 'username') self.doDistributionTest('sender', dataset, author_name_distribution) def test_author_count_distribution(self): """Can count messages for different author message_counts""" dataset = self.create_authors_with_values('message_count', [5, 10, 15, 20, 25]) author_distribution = self.distibute_messages_to_authors(dataset) author_count_distribution = self.recover_related_field_distribution(author_distribution, corpus_models.Person, 'message_count') self.doDistributionTest('sender_message_count', dataset, author_count_distribution) def test_author_count_distribution_with_duplicates(self): """Multiple authors with the same message_count.""" dataset = self.create_authors_with_values('message_count', [5, 10, 15, 20, 25, 5, 10, 15]) author_distribution = self.distibute_messages_to_authors(dataset) author_count_distribution = self.recover_related_field_distribution(author_distribution, corpus_models.Person, 'message_count') self.doDistributionTest('sender_message_count', dataset, author_count_distribution) def test_wide_author_count_distribution(self): """ If the range of the counts is very large, they should come out binned. """ dataset = self.create_authors_with_values('message_count', [5, 10, 2005]) author_distribution = self.distibute_messages_to_authors(dataset) author_count_distribution = self.recover_related_field_distribution(author_distribution, corpus_models.Person, 'message_count') binned_distribution = { 0: author_count_distribution[5] + author_count_distribution[10], 2000: author_count_distribution[2005] } self.doDistributionTest('sender_message_count', dataset, binned_distribution, desired_primary_bins=2) def test_narrow_author_count_distribution(self): """ If the range is very small but we ask for a lot of bins, we should get a bin size of 1. """ dataset = self.create_authors_with_values('message_count', [5, 6, 7]) author_distribution = self.distibute_messages_to_authors(dataset) author_count_distribution = self.recover_related_field_distribution(author_distribution, corpus_models.Person, 'message_count') self.doDistributionTest('sender_message_count', dataset, author_count_distribution, desired_primary_bins=50) class GenerateDataTableTest(DistributionTestCaseMixins, TestCase): """Test the combined data table generation routine""" def test_generate(self): """It should render a data table""" dataset = self.create_empty_dataset() render_calls = [] class MockDataTable(models.DataTable): def render(self, *args, **kwargs): render_calls.append((args, kwargs)) datatable = MockDataTable(primary_dimension='time') datatable.generate(dataset) self.assertEquals(len(render_calls), 1)
"""The tests for the Entity component helper.""" # pylint: disable=protected-access from collections import OrderedDict from datetime import timedelta import logging from unittest.mock import Mock, patch import asynctest import pytest from homeassistant.components import group from homeassistant.const import ENTITY_MATCH_ALL import homeassistant.core as ha from homeassistant.exceptions import PlatformNotReady from homeassistant.helpers import discovery from homeassistant.helpers.entity_component import EntityComponent from homeassistant.setup import async_setup_component import homeassistant.util.dt as dt_util from tests.common import ( MockConfigEntry, MockEntity, MockModule, MockPlatform, async_fire_time_changed, mock_coro, mock_entity_platform, mock_integration, ) _LOGGER = logging.getLogger(__name__) DOMAIN = "test_domain" async def test_setting_up_group(hass): """Set up the setting of a group.""" assert await async_setup_component(hass, "group", {"group": {}}) component = EntityComponent(_LOGGER, DOMAIN, hass, group_name="everyone") # No group after setup assert len(hass.states.async_entity_ids()) == 0 await component.async_add_entities([MockEntity()]) await hass.async_block_till_done() # group exists assert len(hass.states.async_entity_ids()) == 2 assert hass.states.async_entity_ids("group") == ["group.everyone"] grp = hass.states.get("group.everyone") assert grp.attributes.get("entity_id") == ("test_domain.unnamed_device",) # group extended await component.async_add_entities([MockEntity(name="goodbye")]) await hass.async_block_till_done() assert len(hass.states.async_entity_ids()) == 3 grp = hass.states.get("group.everyone") # Ordered in order of added to the group assert grp.attributes.get("entity_id") == ( "test_domain.goodbye", "test_domain.unnamed_device", ) async def test_setup_loads_platforms(hass): """Test the loading of the platforms.""" component_setup = Mock(return_value=True) platform_setup = Mock(return_value=None) mock_integration(hass, MockModule("test_component", setup=component_setup)) # mock the dependencies mock_integration(hass, MockModule("mod2", dependencies=["test_component"])) mock_entity_platform(hass, "test_domain.mod2", MockPlatform(platform_setup)) component = EntityComponent(_LOGGER, DOMAIN, hass) assert not component_setup.called assert not platform_setup.called component.setup({DOMAIN: {"platform": "mod2"}}) await hass.async_block_till_done() assert component_setup.called assert platform_setup.called async def test_setup_recovers_when_setup_raises(hass): """Test the setup if exceptions are happening.""" platform1_setup = Mock(side_effect=Exception("Broken")) platform2_setup = Mock(return_value=None) mock_entity_platform(hass, "test_domain.mod1", MockPlatform(platform1_setup)) mock_entity_platform(hass, "test_domain.mod2", MockPlatform(platform2_setup)) component = EntityComponent(_LOGGER, DOMAIN, hass) assert not platform1_setup.called assert not platform2_setup.called component.setup( OrderedDict( [ (DOMAIN, {"platform": "mod1"}), ("{} 2".format(DOMAIN), {"platform": "non_exist"}), ("{} 3".format(DOMAIN), {"platform": "mod2"}), ] ) ) await hass.async_block_till_done() assert platform1_setup.called assert platform2_setup.called @asynctest.patch( "homeassistant.helpers.entity_component.EntityComponent" ".async_setup_platform", return_value=mock_coro(), ) @asynctest.patch( "homeassistant.setup.async_setup_component", return_value=mock_coro(True) ) async def test_setup_does_discovery(mock_setup_component, mock_setup, hass): """Test setup for discovery.""" component = EntityComponent(_LOGGER, DOMAIN, hass) component.setup({}) discovery.load_platform( hass, DOMAIN, "platform_test", {"msg": "discovery_info"}, {DOMAIN: {}} ) await hass.async_block_till_done() assert mock_setup.called assert ("platform_test", {}, {"msg": "discovery_info"}) == mock_setup.call_args[0] @asynctest.patch("homeassistant.helpers.entity_platform." "async_track_time_interval") async def test_set_scan_interval_via_config(mock_track, hass): """Test the setting of the scan interval via configuration.""" def platform_setup(hass, config, add_entities, discovery_info=None): """Test the platform setup.""" add_entities([MockEntity(should_poll=True)]) mock_entity_platform(hass, "test_domain.platform", MockPlatform(platform_setup)) component = EntityComponent(_LOGGER, DOMAIN, hass) component.setup( {DOMAIN: {"platform": "platform", "scan_interval": timedelta(seconds=30)}} ) await hass.async_block_till_done() assert mock_track.called assert timedelta(seconds=30) == mock_track.call_args[0][2] async def test_set_entity_namespace_via_config(hass): """Test setting an entity namespace.""" def platform_setup(hass, config, add_entities, discovery_info=None): """Test the platform setup.""" add_entities([MockEntity(name="beer"), MockEntity(name=None)]) platform = MockPlatform(platform_setup) mock_entity_platform(hass, "test_domain.platform", platform) component = EntityComponent(_LOGGER, DOMAIN, hass) component.setup({DOMAIN: {"platform": "platform", "entity_namespace": "yummy"}}) await hass.async_block_till_done() assert sorted(hass.states.async_entity_ids()) == [ "test_domain.yummy_beer", "test_domain.yummy_unnamed_device", ] async def test_extract_from_service_available_device(hass): """Test the extraction of entity from service and device is available.""" component = EntityComponent(_LOGGER, DOMAIN, hass) await component.async_add_entities( [ MockEntity(name="test_1"), MockEntity(name="test_2", available=False), MockEntity(name="test_3"), MockEntity(name="test_4", available=False), ] ) call_1 = ha.ServiceCall("test", "service", data={"entity_id": ENTITY_MATCH_ALL}) assert ["test_domain.test_1", "test_domain.test_3"] == sorted( ent.entity_id for ent in (await component.async_extract_from_service(call_1)) ) call_2 = ha.ServiceCall( "test", "service", data={"entity_id": ["test_domain.test_3", "test_domain.test_4"]}, ) assert ["test_domain.test_3"] == sorted( ent.entity_id for ent in (await component.async_extract_from_service(call_2)) ) async def test_platform_not_ready(hass): """Test that we retry when platform not ready.""" platform1_setup = Mock(side_effect=[PlatformNotReady, PlatformNotReady, None]) mock_integration(hass, MockModule("mod1")) mock_entity_platform(hass, "test_domain.mod1", MockPlatform(platform1_setup)) component = EntityComponent(_LOGGER, DOMAIN, hass) await component.async_setup({DOMAIN: {"platform": "mod1"}}) assert len(platform1_setup.mock_calls) == 1 assert "test_domain.mod1" not in hass.config.components utcnow = dt_util.utcnow() with patch("homeassistant.util.dt.utcnow", return_value=utcnow): # Should not trigger attempt 2 async_fire_time_changed(hass, utcnow + timedelta(seconds=29)) await hass.async_block_till_done() assert len(platform1_setup.mock_calls) == 1 # Should trigger attempt 2 async_fire_time_changed(hass, utcnow + timedelta(seconds=30)) await hass.async_block_till_done() assert len(platform1_setup.mock_calls) == 2 assert "test_domain.mod1" not in hass.config.components # This should not trigger attempt 3 async_fire_time_changed(hass, utcnow + timedelta(seconds=59)) await hass.async_block_till_done() assert len(platform1_setup.mock_calls) == 2 # Trigger attempt 3, which succeeds async_fire_time_changed(hass, utcnow + timedelta(seconds=60)) await hass.async_block_till_done() assert len(platform1_setup.mock_calls) == 3 assert "test_domain.mod1" in hass.config.components async def test_extract_from_service_fails_if_no_entity_id(hass): """Test the extraction of everything from service.""" component = EntityComponent(_LOGGER, DOMAIN, hass) await component.async_add_entities( [MockEntity(name="test_1"), MockEntity(name="test_2")] ) call = ha.ServiceCall("test", "service") assert [] == sorted( ent.entity_id for ent in (await component.async_extract_from_service(call)) ) async def test_extract_from_service_filter_out_non_existing_entities(hass): """Test the extraction of non existing entities from service.""" component = EntityComponent(_LOGGER, DOMAIN, hass) await component.async_add_entities( [MockEntity(name="test_1"), MockEntity(name="test_2")] ) call = ha.ServiceCall( "test", "service", {"entity_id": ["test_domain.test_2", "test_domain.non_exist"]}, ) assert ["test_domain.test_2"] == [ ent.entity_id for ent in await component.async_extract_from_service(call) ] async def test_extract_from_service_no_group_expand(hass): """Test not expanding a group.""" component = EntityComponent(_LOGGER, DOMAIN, hass) test_group = await group.Group.async_create_group( hass, "test_group", ["light.Ceiling", "light.Kitchen"] ) await component.async_add_entities([test_group]) call = ha.ServiceCall("test", "service", {"entity_id": ["group.test_group"]}) extracted = await component.async_extract_from_service(call, expand_group=False) assert extracted == [test_group] async def test_setup_dependencies_platform(hass): """Test we setup the dependencies of a platform. We're explictely testing that we process dependencies even if a component with the same name has already been loaded. """ mock_integration( hass, MockModule("test_component", dependencies=["test_component2"]) ) mock_integration(hass, MockModule("test_component2")) mock_entity_platform(hass, "test_domain.test_component", MockPlatform()) component = EntityComponent(_LOGGER, DOMAIN, hass) await component.async_setup({DOMAIN: {"platform": "test_component"}}) assert "test_component" in hass.config.components assert "test_component2" in hass.config.components assert "test_domain.test_component" in hass.config.components async def test_setup_entry(hass): """Test setup entry calls async_setup_entry on platform.""" mock_setup_entry = Mock(return_value=mock_coro(True)) mock_entity_platform( hass, "test_domain.entry_domain", MockPlatform( async_setup_entry=mock_setup_entry, scan_interval=timedelta(seconds=5) ), ) component = EntityComponent(_LOGGER, DOMAIN, hass) entry = MockConfigEntry(domain="entry_domain") assert await component.async_setup_entry(entry) assert len(mock_setup_entry.mock_calls) == 1 p_hass, p_entry, _ = mock_setup_entry.mock_calls[0][1] assert p_hass is hass assert p_entry is entry assert component._platforms[entry.entry_id].scan_interval == timedelta(seconds=5) async def test_setup_entry_platform_not_exist(hass): """Test setup entry fails if platform doesnt exist.""" component = EntityComponent(_LOGGER, DOMAIN, hass) entry = MockConfigEntry(domain="non_existing") assert (await component.async_setup_entry(entry)) is False async def test_setup_entry_fails_duplicate(hass): """Test we don't allow setting up a config entry twice.""" mock_setup_entry = Mock(return_value=mock_coro(True)) mock_entity_platform( hass, "test_domain.entry_domain", MockPlatform(async_setup_entry=mock_setup_entry), ) component = EntityComponent(_LOGGER, DOMAIN, hass) entry = MockConfigEntry(domain="entry_domain") assert await component.async_setup_entry(entry) with pytest.raises(ValueError): await component.async_setup_entry(entry) async def test_unload_entry_resets_platform(hass): """Test unloading an entry removes all entities.""" mock_setup_entry = Mock(return_value=mock_coro(True)) mock_entity_platform( hass, "test_domain.entry_domain", MockPlatform(async_setup_entry=mock_setup_entry), ) component = EntityComponent(_LOGGER, DOMAIN, hass) entry = MockConfigEntry(domain="entry_domain") assert await component.async_setup_entry(entry) assert len(mock_setup_entry.mock_calls) == 1 add_entities = mock_setup_entry.mock_calls[0][1][2] add_entities([MockEntity()]) await hass.async_block_till_done() assert len(hass.states.async_entity_ids()) == 1 assert await component.async_unload_entry(entry) assert len(hass.states.async_entity_ids()) == 0 async def test_unload_entry_fails_if_never_loaded(hass): """.""" component = EntityComponent(_LOGGER, DOMAIN, hass) entry = MockConfigEntry(domain="entry_domain") with pytest.raises(ValueError): await component.async_unload_entry(entry) async def test_update_entity(hass): """Test that we can update an entity with the helper.""" component = EntityComponent(_LOGGER, DOMAIN, hass) entity = MockEntity() entity.async_update_ha_state = Mock(return_value=mock_coro()) await component.async_add_entities([entity]) # Called as part of async_add_entities assert len(entity.async_update_ha_state.mock_calls) == 1 await hass.helpers.entity_component.async_update_entity(entity.entity_id) assert len(entity.async_update_ha_state.mock_calls) == 2 assert entity.async_update_ha_state.mock_calls[-1][1][0] is True async def test_set_service_race(hass): """Test race condition on setting service.""" exception = False def async_loop_exception_handler(_, _2) -> None: """Handle all exception inside the core loop.""" nonlocal exception exception = True hass.loop.set_exception_handler(async_loop_exception_handler) await async_setup_component(hass, "group", {}) component = EntityComponent(_LOGGER, DOMAIN, hass, group_name="yo") for _ in range(2): hass.async_create_task(component.async_add_entities([MockEntity()])) await hass.async_block_till_done() assert not exception async def test_extract_all_omit_entity_id(hass, caplog): """Test extract all with None and *.""" component = EntityComponent(_LOGGER, DOMAIN, hass) await component.async_add_entities( [MockEntity(name="test_1"), MockEntity(name="test_2")] ) call = ha.ServiceCall("test", "service") assert [] == sorted( ent.entity_id for ent in await component.async_extract_from_service(call) ) async def test_extract_all_use_match_all(hass, caplog): """Test extract all with None and *.""" component = EntityComponent(_LOGGER, DOMAIN, hass) await component.async_add_entities( [MockEntity(name="test_1"), MockEntity(name="test_2")] ) call = ha.ServiceCall("test", "service", {"entity_id": "all"}) assert ["test_domain.test_1", "test_domain.test_2"] == sorted( ent.entity_id for ent in await component.async_extract_from_service(call) ) assert ( "Not passing an entity ID to a service to target all entities is " "deprecated" ) not in caplog.text
from collections import OrderedDict from numpy import log10 as nplog10, zeros, min, max, linspace, array, concatenate, isfinite, greater from uncertainties.unumpy import uarray, nominal_values, std_devs, log10 as umlog10 from CodeTools.PlottingManager import myPickle from ManageFlow import DataToTreat from Math_Libraries.bces_script import bces from Plotting_Libraries.dazer_plotter import Plot_Conf from cloudy_library.cloudy_methods import Cloudy_Tools import pyneb as pn pn.atomicData.setDataFile('s_iii_coll_HRS12.dat') def Figure_Legends_Colors(ColorVector): Model_dict = OrderedDict() Legends_dict = OrderedDict() Colors_dict = OrderedDict() Model_dict['_z0.004_age5.0'] = 'log age = 5.0 log z = -2.4, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.004_age5.5'] = 'log age = 5.5 log z = -2.4, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.004_age6.0'] = 'log age = 6.0 log z = -2.4, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.004_age6.5'] = 'log age = 6.5 log z = -2.4, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.004_age7.0'] = 'log age = 7.0 log z = -2.4, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.004_age7.5'] = 'log age = 7.5 log z = -2.4, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.008_age5.0'] = 'log age = 5.0 log z = -2.1, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.008_age5.5'] = 'log age = 5.5 log z = -2.1, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.008_age6.0'] = 'log age = 6.0 log z = -2.1, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.008_age6.5'] = 'log age = 6.5 log z = -2.1, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.008_age7.0'] = 'log age = 7.0 log z = -2.1, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.008_age7.5'] = 'log age = 7.5 log z = -2.1, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.02_age5.0'] = 'log age = 5.0 log z = -1.7, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.02_age5.5'] = 'log age = 5.5 log z = -1.7, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.02_age6.0'] = 'log age = 6.0 log z = -1.7, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.02_age6.5'] = 'log age = 6.5 log z = -1.7, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.02_age7.0'] = 'log age = 7.0 log z = -1.7, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.02_age7.5'] = 'log age = 7.5 log z = -1.7, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.05_age5.0'] = 'log age = 5.0 log z = -1.31, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.05_age5.5'] = 'log age = 5.5 log z = -1.31, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.05_age6.0'] = 'log age = 6.0 log z = -1.31, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.05_age6.5'] = 'log age = 6.5 log z = -1.31, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.05_age7.0'] = 'log age = 7.0 log z = -1.31, file="sp-kro_z0001-z05_stellar.mod"' Model_dict['_z0.05_age7.5'] = 'log age = 7.5 log z = -1.31, file="sp-kro_z0001-z05_stellar.mod"' Legends_dict['_z0.004_age5.0'] = 'log(age) = 5.0' Legends_dict['_z0.004_age5.5'] = 'log(age) = 5.5' Legends_dict['_z0.004_age6.0'] = 'log(age) = 6.0' Legends_dict['_z0.004_age6.5'] = 'log(age) = 6.5' Legends_dict['_z0.004_age7.0'] = 'log(age) = 7.0' Legends_dict['_z0.004_age7.5'] = 'log(age) = 7.5' Legends_dict['_z0.008_age5.0'] = 'log(age) = 5.0' Legends_dict['_z0.008_age5.5'] = 'log(age) = 5.5' Legends_dict['_z0.008_age6.0'] = 'log(age) = 6.0' Legends_dict['_z0.008_age6.5'] = 'log(age) = 6.5' Legends_dict['_z0.008_age7.0'] = 'log(age) = 7.0' Legends_dict['_z0.008_age7.5'] = 'log(age) = 7.5' Legends_dict['_z0.02_age5.0'] = 'log(age) = 5.0' Legends_dict['_z0.02_age5.5'] = 'log(age) = 5.5' Legends_dict['_z0.02_age6.0'] = 'log(age) = 6.0' Legends_dict['_z0.02_age6.5'] = 'log(age) = 6.5' Legends_dict['_z0.02_age7.0'] = 'log(age) = 7.0' Legends_dict['_z0.02_age7.5'] = 'log(age) = 7.5' Legends_dict['_z0.05_age5.0'] = 'log(age) = 5.0' Legends_dict['_z0.05_age5.5'] = 'log(age) = 5.5' Legends_dict['_z0.05_age6.0'] = 'log(age) = 6.0' Legends_dict['_z0.05_age6.5'] = 'log(age) = 6.5' Legends_dict['_z0.05_age7.0'] = 'log(age) = 7.0' Legends_dict['_z0.05_age7.5'] = 'log(age) = 7.5' Colors_dict['_z0.004_age5.0'] = ColorVector[2][0] Colors_dict['_z0.004_age5.5'] = ColorVector[2][1] Colors_dict['_z0.004_age6.0'] = ColorVector[2][2] Colors_dict['_z0.004_age6.5'] = ColorVector[2][3] Colors_dict['_z0.004_age7.0'] = ColorVector[2][4] Colors_dict['_z0.004_age7.5'] = ColorVector[2][5] Colors_dict['_z0.008_age5.0'] = ColorVector[2][0] Colors_dict['_z0.008_age5.5'] = ColorVector[2][1] Colors_dict['_z0.008_age6.0'] = ColorVector[2][2] Colors_dict['_z0.008_age6.5'] = ColorVector[2][3] Colors_dict['_z0.008_age7.0'] = ColorVector[2][4] Colors_dict['_z0.008_age7.5'] = ColorVector[2][5] Colors_dict['_z0.02_age5.0'] = ColorVector[2][0] Colors_dict['_z0.02_age5.5'] = ColorVector[2][1] Colors_dict['_z0.02_age6.0'] = ColorVector[2][2] Colors_dict['_z0.02_age6.5'] = ColorVector[2][3] Colors_dict['_z0.02_age7.0'] = ColorVector[2][4] Colors_dict['_z0.02_age7.5'] = ColorVector[2][5] Colors_dict['_z0.05_age5.0'] = ColorVector[2][0] Colors_dict['_z0.05_age5.5'] = ColorVector[2][1] Colors_dict['_z0.05_age6.0'] = ColorVector[2][2] Colors_dict['_z0.05_age6.5'] = ColorVector[2][3] Colors_dict['_z0.05_age7.0'] = ColorVector[2][4] Colors_dict['_z0.05_age7.5'] = ColorVector[2][5] # Legends_dict['_z0.004_age5.0'] = 'z = 0.004' # Legends_dict['_z0.004_age5.5'] = 'z = 0.004' # Legends_dict['_z0.004_age6.0'] = 'z = 0.004' # Legends_dict['_z0.004_age6.5'] = 'z = 0.004' # Legends_dict['_z0.004_age7.0'] = 'z = 0.004' # Legends_dict['_z0.004_age7.5'] = 'z = 0.004' # # Legends_dict['_z0.008_age5.0'] = 'z = 0.008' # Legends_dict['_z0.008_age5.5'] = 'z = 0.008' # Legends_dict['_z0.008_age6.0'] = 'z = 0.008' # Legends_dict['_z0.008_age6.5'] = 'z = 0.008' # Legends_dict['_z0.008_age7.0'] = 'z = 0.008' # Legends_dict['_z0.008_age7.5'] = 'z = 0.008' # # Legends_dict['_z0.02_age5.0'] = 'z = 0.02' # Legends_dict['_z0.02_age5.5'] = 'z = 0.02' # Legends_dict['_z0.02_age6.0'] = 'z = 0.02' # Legends_dict['_z0.02_age6.5'] = 'z = 0.02' # Legends_dict['_z0.02_age7.0'] = 'z = 0.02' # Legends_dict['_z0.02_age7.5'] = 'z = 0.02' # # Legends_dict['_z0.05_age5.0'] = 'z = 0.05' # Legends_dict['_z0.05_age5.5'] = 'z = 0.05' # Legends_dict['_z0.05_age6.0'] = 'z = 0.05' # Legends_dict['_z0.05_age6.5'] = 'z = 0.05' # Legends_dict['_z0.05_age7.0'] = 'z = 0.05' # Legends_dict['_z0.05_age7.5'] = 'z = 0.05' # # Colors_dict['_z0.004_age5.0'] = ColorVector[2][0] # Colors_dict['_z0.004_age5.5'] = ColorVector[2][0] # Colors_dict['_z0.004_age6.0'] = ColorVector[2][0] # Colors_dict['_z0.004_age6.5'] = ColorVector[2][0] # Colors_dict['_z0.004_age7.0'] = ColorVector[2][0] # Colors_dict['_z0.004_age7.5'] = ColorVector[2][0] # # Colors_dict['_z0.008_age5.0'] = ColorVector[2][1] # Colors_dict['_z0.008_age5.5'] = ColorVector[2][1] # Colors_dict['_z0.008_age6.0'] = ColorVector[2][1] # Colors_dict['_z0.008_age6.5'] = ColorVector[2][1] # Colors_dict['_z0.008_age7.0'] = ColorVector[2][1] # Colors_dict['_z0.008_age7.5'] = ColorVector[2][1] # # Colors_dict['_z0.02_age5.0'] = ColorVector[2][2] # Colors_dict['_z0.02_age5.5'] = ColorVector[2][2] # Colors_dict['_z0.02_age6.0'] = ColorVector[2][2] # Colors_dict['_z0.02_age6.5'] = ColorVector[2][2] # Colors_dict['_z0.02_age7.0'] = ColorVector[2][2] # Colors_dict['_z0.02_age7.5'] = ColorVector[2][2] # # Colors_dict['_z0.05_age5.0'] = ColorVector[2][3] # Colors_dict['_z0.05_age5.5'] = ColorVector[2][3] # Colors_dict['_z0.05_age6.0'] = ColorVector[2][3] # Colors_dict['_z0.05_age6.5'] = ColorVector[2][3] # Colors_dict['_z0.05_age7.0'] = ColorVector[2][3] # Colors_dict['_z0.05_age7.5'] = ColorVector[2][3] return Model_dict, Legends_dict, Colors_dict def Ar_S_model_pyneb(Line_dict, diags, Ar3_atom, Ar4_atom, S3_atom, S4_atom): TSIII, NSII = diags.getCrossTemDen(diag_tem = '[SIII] 6312/9200+', diag_den = '[SII] 6731/6716', value_tem = Line_dict['6312.06A'] / (Line_dict['9068.62A'] + Line_dict['9532A']), value_den = Line_dict['S2_6731A']/Line_dict['S2_6716A']) TOIII, NSII_2 = diags.getCrossTemDen(diag_tem = '[OIII] 4363/5007+', diag_den = '[SII] 6731/6716', value_tem = Line_dict['O3_4363A']/(Line_dict['O3_5007A'] + Line_dict['O3_4959A']), value_den = Line_dict['S2_6731A']/Line_dict['S2_6716A']) Ar3 = Ar3_atom.getIonAbundance(int_ratio = Line_dict['Ar3_7751A'] + Line_dict['Ar3_7135A'], tem=TSIII, den=NSII, to_eval = 'L(7751) + L(7136)', Hbeta = Line_dict['H1_4861A']) Ar4 = Ar4_atom.getIonAbundance(int_ratio = Line_dict['Ar4_4740A'] + Line_dict['Ar4_4711A'], tem=TOIII, den=NSII, to_eval = 'L(4740) + L(4711)', Hbeta = Line_dict['H1_4861A']) S3 = S3_atom.getIonAbundance(int_ratio = (Line_dict['9068.62A'] + Line_dict['9532A']), tem=TSIII, den=NSII, to_eval = 'L(9069)+L(9531)', Hbeta = Line_dict['H1_4861A']) S4 = S3_atom.getIonAbundance(int_ratio = (Line_dict['10.51m']), tem=TOIII, den=NSII, to_eval = 'L(10.51)', Hbeta = Line_dict['H1_4861A']) x_axis = nplog10(Ar3) - nplog10(Ar4) y_axis = nplog10(S3) - nplog10(S4) indexes = x_axis>0.0 return x_axis[indexes], y_axis[indexes], TSIII[indexes], TOIII[indexes] def Ar_S_model(Line_dict, threshold = 0.0): R3 = (Line_dict['9068.62A'] + Line_dict['9532A']) / Line_dict['6312A'] TSIII_4 = (0.5147 + 0.0003187 * R3 + (23.6404 / R3)) TOIII_4 = ((TSIII_4 + 0.0846)/1.0807) TSIII = TSIII_4 * 10000 TOIII = TOIII_4 * 10000 logS3HI = nplog10(Line_dict['10.51m']/Line_dict['4861.36A']) + 6.3956 + 0.0416/TOIII_4 - 0.4216 * nplog10(TOIII_4) - 12 logS2HI = nplog10((Line_dict['9068.62A'] + Line_dict['9532A']) / Line_dict['4861.36A']) + 6.012 + 0.6309 / TSIII_4 - 0.5722 * nplog10(TSIII_4) -12 logAr3HI = nplog10(Line_dict['4740.12A'] / Line_dict['4861.36A']) + 5.705 + 1.246/TOIII_4 - 0.156 * nplog10(TOIII_4) - 12 logAr2HI = nplog10(Line_dict['7135A'] / Line_dict['4861.36A']) + 6.157 + 0.808/TSIII_4 - 0.508 * nplog10(TSIII_4) - 12 x_axis = logS2HI - logS3HI y_axis = logAr2HI - logAr3HI indexes = x_axis>0.0 return x_axis[indexes], y_axis[indexes], TSIII[indexes], TOIII[indexes] def import_data_from_objLog_triple(FilesList, pv): Valid_objects = [] List_Abundances = ['ArIII_HII', 'ArIV_HII', 'TSIII', 'SIII_HII'] Empty_Array = [0] * (len(List_Abundances) + 1) #Dictionary of dictionaries to store object abundances Abund_dict = OrderedDict() for abund in List_Abundances: Abund_dict[abund] = [] #Loop through files for i in range(len(FilesList)): #Analyze file address CodeName, FileName, FileFolder = pv.Analyze_Address(FilesList[i]) #Loop through abundances in the log All_observed = True Empty_Array[0] = CodeName for j in range(len(List_Abundances)): abundance = List_Abundances[j] Empty_Array[j+1] = pv.GetParameter_ObjLog(CodeName, FileFolder, Parameter = abundance, Assumption = 'float') #If the abundance was measure store it if Empty_Array[j+1] == None: All_observed = False if All_observed: Valid_objects.append(array(Empty_Array, copy=True)) return array(Valid_objects) pv = myPickle() dz = Plot_Conf() ct = Cloudy_Tools() diags = pn.Diagnostics() #Define data type and location Catalogue_Dic = DataToTreat() Pattern = Catalogue_Dic['Datatype'] + '.fits' #Define figure format dz.FigConf(n_colors=6) #Define script name and location # ScriptFolder = '/home/vital/Dropbox/Astrophysics/Tools/Cloudy/S_Ar_test/Few_Models/' ScriptFolder = '/home/vital/Dropbox/Astrophysics/Tools/Cloudy/S_Ar_test/Complete_Model/' ScriptPrefix = 'S_Ar_test' #4 metallicities 0.004, 0.008, 0.02, 0.05 #5 ages 5.0, 5.5, 6.0, 6.5, 7.0, 7.5 Model_dict, Legends_dict, Colors_dict = Figure_Legends_Colors(dz.ColorVector) list_xvalues = array([]) list_yvalues = array([]) list_TSIII = array([]) list_TOIII = array([]) #----Observations data FilesList = pv.Folder_Explorer(Pattern, Catalogue_Dic['Obj_Folder'], CheckComputer=False) Abundances_Matrix = import_data_from_objLog_triple(FilesList, pv) Objects = Abundances_Matrix[:,0] ArIII_HII_array = Abundances_Matrix[:,1] ArIV_HII_array = Abundances_Matrix[:,2] Temps = Abundances_Matrix[:,3] SIII_HII_array = Abundances_Matrix[:,4] print 'longitudes', len(Objects), len(ArIII_HII_array), len(ArIV_HII_array), len(Temps), len(SIII_HII_array) logArII_ArIII = umlog10(ArIII_HII_array/ArIV_HII_array) for key in Model_dict.keys(): #Declare scripting name ScriptName = ScriptPrefix + key + '.in' #Generate lines dictionary with the output data Line_dict = ct.load_predicted_lines(ScriptName, ScriptFolder) x_values, y_values, TSIII, TOIII = Ar_S_model(Line_dict, diags) # dz.data_plot(x_values, y_values, color=Colors_dict[key], label=Legends_dict[key], markerstyle='o') dz.data_plot(TSIII, y_values, color=Colors_dict[key], label=Legends_dict[key], markerstyle='o') list_xvalues = concatenate([list_xvalues, x_values]) list_yvalues = concatenate([list_yvalues, y_values]) list_TSIII = concatenate([list_TSIII, TSIII]) list_TOIII = concatenate([list_TOIII, TOIII]) Not_infinite = isfinite(list_yvalues) list_xvalues_clean = list_xvalues[Not_infinite] list_yvalues_clean = list_yvalues[Not_infinite] list_TSIII_clean = list_TSIII[Not_infinite] list_xvalues_Above = greater(list_xvalues_clean, 0) list_xvalues_clean_greater = list_xvalues_clean[list_xvalues_Above] list_yvalues_clean_greater = list_yvalues_clean[list_xvalues_Above] list_TSIII_clean_greater = list_TSIII_clean[list_xvalues_Above] # # #----------------------Plotting temperatures #Plot wording xtitle = r'$T[SIII] (K)$' ytitle = r'$log(Ar^{+2}/Ar^{+3})$' title = r'Argon ionic abundance versus $S^{+2}$ temperature in Cloudy models' print len(Temps), len(logArII_ArIII) dz.data_plot(nominal_values(Temps), nominal_values(logArII_ArIII), color=dz.ColorVector[1], label='Observations', markerstyle='o', x_error=std_devs(Temps), y_error=std_devs(logArII_ArIII)) dz.FigWording(xtitle, ytitle, title, axis_Size = 20.0, title_Size = 20.0, legend_size=20.0, legend_loc='upper right') 'ArIons_vs_TSIII_Obs' dz.Axis.set_xlim(5000,20000) #Display figure # dz.display_fig() dz.savefig(output_address = '/home/vital/Dropbox/Astrophysics/Papers/Elemental_RegressionsSulfur/Cloudy_Models/ArIons_vs_TSIII_Obs') print 'Data treated' # #----------------------Plotting abundances # #Perform linear regression # zero_vector = zeros(len(list_xvalues_clean_greater)) # m ,n, m_err, n_err, covab = bces(list_xvalues_clean_greater, zero_vector, list_yvalues_clean_greater, zero_vector, zero_vector) # # # x_regresion = linspace(0, max(list_xvalues_clean_greater), 50) # y_regression = m[0] * x_regresion + n[0] # # # LinearRegression_Label = r'Linear fitting'.format(n = round(n[0],2) ,nerr = round(n_err[0],2)) # dz.data_plot(x_regresion, y_regression, label=LinearRegression_Label, linestyle='--', color=dz.ColorVector[1]) # # # logSII_SIII_theo = m[0] * logArII_ArIII + n[0] # # # dz.data_plot(nominal_values(logArII_ArIII), nominal_values(logSII_SIII_theo), color=dz.ColorVector[1], label='Observations', markerstyle='o', x_error=std_devs(logArII_ArIII), y_error=std_devs(logSII_SIII_theo)) # # # #Plot fitting formula # formula = r"$log\left(Ar^{{+2}}/Ar^{{+3}}\right) = {m} \cdot log\left(S^{{+2}}/S^{{+3}}\right) + {n}$".format(m='m', n='n') # formula2 = r"$m = {m} \pm {merror}; n = {n} \pm {nerror}$".format(m=round(m[0],3), merror=round(m_err[0],3), n=round(n[0],3), nerror=round(n_err[0],3)) # dz.Axis.text(0.50, 0.15, formula, transform=dz.Axis.transAxes, fontsize=20) # dz.Axis.text(0.50, 0.08, formula2, transform=dz.Axis.transAxes, fontsize=20) # # #Plot wording # xtitle = r'$log(S^{+2}/S^{+3})$' # ytitle = r'$log(Ar^{+2}/Ar^{+3})$' # title = 'Argon - Sulfur ionic relation in Cloudy photoionization models' # dz.FigWording(xtitle, ytitle, title, axis_Size = 20.0, title_Size = 20.0, legend_size=20.0, legend_loc='best') # # # dz.Axis.set_xlim(-0.5, 5) # dz.Axis.set_ylim(0.5, 7) # # #Display figure # # dz.display_fig() # dz.savefig(output_address = '/home/vital/Dropbox/Astrophysics/Papers/Elemental_RegressionsSulfur/Cloudy_Models/ArIons_vs_SIons_Obs') # print 'Data treated'
#!/usr/bin/python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import glob import optparse import os.path import socket import sys import thread import time import urllib # Allow the import of third party modules script_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.join(script_dir, '../../../../third_party/')) sys.path.append(os.path.join(script_dir, '../../../../tools/valgrind/')) sys.path.append(os.path.join(script_dir, '../../../../testing/')) import browsertester.browserlauncher import browsertester.rpclistener import browsertester.server import memcheck_analyze import tsan_analyze import test_env def BuildArgParser(): usage = 'usage: %prog [options]' parser = optparse.OptionParser(usage) parser.add_option('-p', '--port', dest='port', action='store', type='int', default='0', help='The TCP port the server will bind to. ' 'The default is to pick an unused port number.') parser.add_option('--browser_path', dest='browser_path', action='store', type='string', default=None, help='Use the browser located here.') parser.add_option('--map_file', dest='map_files', action='append', type='string', nargs=2, default=[], metavar='DEST SRC', help='Add file SRC to be served from the HTTP server, ' 'to be made visible under the path DEST.') parser.add_option('--serving_dir', dest='serving_dirs', action='append', type='string', default=[], metavar='DIRNAME', help='Add directory DIRNAME to be served from the HTTP ' 'server to be made visible under the root.') parser.add_option('--test_arg', dest='test_args', action='append', type='string', nargs=2, default=[], metavar='KEY VALUE', help='Parameterize the test with a key/value pair.') parser.add_option('--redirect_url', dest='map_redirects', action='append', type='string', nargs=2, default=[], metavar='DEST SRC', help='Add a redirect to the HTTP server, ' 'requests for SRC will result in a redirect (302) to DEST.') parser.add_option('--prefer_portable_in_manifest', dest='prefer_portable_in_manifest', action='store_true', default=False, help='Use portable programs in manifest if available.') parser.add_option('-f', '--file', dest='files', action='append', type='string', default=[], metavar='FILENAME', help='Add a file to serve from the HTTP server, to be ' 'made visible in the root directory. ' '"--file path/to/foo.html" is equivalent to ' '"--map_file foo.html path/to/foo.html"') parser.add_option('--mime_type', dest='mime_types', action='append', type='string', nargs=2, default=[], metavar='DEST SRC', help='Map file extension SRC to MIME type DEST when ' 'serving it from the HTTP server.') parser.add_option('-u', '--url', dest='url', action='store', type='string', default=None, help='The webpage to load.') parser.add_option('--ppapi_plugin', dest='ppapi_plugin', action='store', type='string', default=None, help='Use the browser plugin located here.') parser.add_option('--sel_ldr', dest='sel_ldr', action='store', type='string', default=None, help='Use the sel_ldr located here.') parser.add_option('--sel_ldr_bootstrap', dest='sel_ldr_bootstrap', action='store', type='string', default=None, help='Use the bootstrap loader located here.') parser.add_option('--irt_library', dest='irt_library', action='store', type='string', default=None, help='Use the integrated runtime (IRT) library ' 'located here.') parser.add_option('--interactive', dest='interactive', action='store_true', default=False, help='Do not quit after testing is done. ' 'Handy for iterative development. Disables timeout.') parser.add_option('--debug', dest='debug', action='store_true', default=False, help='Request debugging output from browser.') parser.add_option('--timeout', dest='timeout', action='store', type='float', default=5.0, help='The maximum amount of time to wait, in seconds, for ' 'the browser to make a request. The timer resets with each ' 'request.') parser.add_option('--hard_timeout', dest='hard_timeout', action='store', type='float', default=None, help='The maximum amount of time to wait, in seconds, for ' 'the entire test. This will kill runaway tests. ') parser.add_option('--allow_404', dest='allow_404', action='store_true', default=False, help='Allow 404s to occur without failing the test.') parser.add_option('-b', '--bandwidth', dest='bandwidth', action='store', type='float', default='0.0', help='The amount of bandwidth (megabits / second) to ' 'simulate between the client and the server. This used for ' 'replies with file payloads. All other responses are ' 'assumed to be short. Bandwidth values <= 0.0 are assumed ' 'to mean infinite bandwidth.') parser.add_option('--extension', dest='browser_extensions', action='append', type='string', default=[], help='Load the browser extensions located at the list of ' 'paths. Note: this currently only works with the Chrome ' 'browser.') parser.add_option('--tool', dest='tool', action='store', type='string', default=None, help='Run tests under a tool.') parser.add_option('--browser_flag', dest='browser_flags', action='append', type='string', default=[], help='Additional flags for the chrome command.') parser.add_option('--enable_ppapi_dev', dest='enable_ppapi_dev', action='store', type='int', default=1, help='Enable/disable PPAPI Dev interfaces while testing.') parser.add_option('--nacl_exe_stdin', dest='nacl_exe_stdin', type='string', default=None, help='Redirect standard input of NaCl executable.') parser.add_option('--nacl_exe_stdout', dest='nacl_exe_stdout', type='string', default=None, help='Redirect standard output of NaCl executable.') parser.add_option('--nacl_exe_stderr', dest='nacl_exe_stderr', type='string', default=None, help='Redirect standard error of NaCl executable.') parser.add_option('--expect_browser_process_crash', dest='expect_browser_process_crash', action='store_true', help='Do not signal a failure if the browser process ' 'crashes') parser.add_option('--enable_crash_reporter', dest='enable_crash_reporter', action='store_true', default=False, help='Force crash reporting on.') return parser def ProcessToolLogs(options, logs_dir): if options.tool == 'memcheck': analyzer = memcheck_analyze.MemcheckAnalyzer('', use_gdb=True) logs_wildcard = 'xml.*' elif options.tool == 'tsan': analyzer = tsan_analyze.TsanAnalyzer('', use_gdb=True) logs_wildcard = 'log.*' files = glob.glob(os.path.join(logs_dir, logs_wildcard)) retcode = analyzer.Report(files, options.url) return retcode # An exception that indicates possible flake. class RetryTest(Exception): pass def DumpNetLog(netlog): sys.stdout.write('\n') if not os.path.isfile(netlog): sys.stdout.write('Cannot find netlog, did Chrome actually launch?\n') else: sys.stdout.write('Netlog exists (%d bytes).\n' % os.path.getsize(netlog)) sys.stdout.write('Dumping it to stdout.\n\n\n') sys.stdout.write(open(netlog).read()) sys.stdout.write('\n\n\n') # Try to discover the real IP address of this machine. If we can't figure it # out, fall back to localhost. # A windows bug makes using the loopback interface flaky in rare cases. # http://code.google.com/p/chromium/issues/detail?id=114369 def GetHostName(): host = 'localhost' try: host = socket.gethostbyname(socket.gethostname()) except Exception: pass if host == '0.0.0.0': host = 'localhost' return host def RunTestsOnce(url, options): # Set the default here so we're assured hard_timeout will be defined. # Tests, such as run_inbrowser_trusted_crash_in_startup_test, may not use the # RunFromCommand line entry point - and otherwise get stuck in an infinite # loop when something goes wrong and the hard timeout is not set. # http://code.google.com/p/chromium/issues/detail?id=105406 if options.hard_timeout is None: options.hard_timeout = options.timeout * 4 options.files.append(os.path.join(script_dir, 'browserdata', 'nacltest.js')) # Setup the environment with the setuid sandbox path. test_env.enable_sandbox_if_required(os.environ) # Create server host = GetHostName() try: server = browsertester.server.Create(host, options.port) except Exception: sys.stdout.write('Could not bind %r, falling back to localhost.\n' % host) server = browsertester.server.Create('localhost', options.port) # If port 0 has been requested, an arbitrary port will be bound so we need to # query it. Older version of Python do not set server_address correctly when # The requested port is 0 so we need to break encapsulation and query the # socket directly. host, port = server.socket.getsockname() file_mapping = dict(options.map_files) for filename in options.files: file_mapping[os.path.basename(filename)] = filename for server_path, real_path in file_mapping.iteritems(): if not os.path.exists(real_path): raise AssertionError('\'%s\' does not exist.' % real_path) mime_types = {} for ext, mime_type in options.mime_types: mime_types['.' + ext] = mime_type def ShutdownCallback(): server.TestingEnded() close_browser = options.tool is not None and not options.interactive return close_browser listener = browsertester.rpclistener.RPCListener(ShutdownCallback) server.Configure(file_mapping, dict(options.map_redirects), mime_types, options.allow_404, options.bandwidth, listener, options.serving_dirs) browser = browsertester.browserlauncher.ChromeLauncher(options) full_url = 'http://%s:%d/%s' % (host, port, url) if len(options.test_args) > 0: full_url += '?' + urllib.urlencode(options.test_args) browser.Run(full_url, port) server.TestingBegun(0.125) # In Python 2.5, server.handle_request may block indefinitely. Serving pages # is done in its own thread so the main thread can time out as needed. def Serve(): while server.test_in_progress or options.interactive: server.handle_request() thread.start_new_thread(Serve, ()) tool_failed = False time_started = time.time() def HardTimeout(total_time): return total_time >= 0.0 and time.time() - time_started >= total_time try: while server.test_in_progress or options.interactive: if not browser.IsRunning(): if options.expect_browser_process_crash: break listener.ServerError('Browser process ended during test ' '(return code %r)' % browser.GetReturnCode()) # If Chrome exits prematurely without making a single request to the # web server, this is probally a Chrome crash-on-launch bug not related # to the test at hand. Retry, unless we're in interactive mode. In # interactive mode the user may manually close the browser, so don't # retry (it would just be annoying.) if not server.received_request and not options.interactive: raise RetryTest('Chrome failed to launch.') else: break elif not options.interactive and server.TimedOut(options.timeout): js_time = server.TimeSinceJSHeartbeat() err = 'Did not hear from the test for %.1f seconds.' % options.timeout err += '\nHeard from Javascript %.1f seconds ago.' % js_time if js_time > 2.0: err += '\nThe renderer probably hung or crashed.' else: err += '\nThe test probably did not get a callback that it expected.' listener.ServerError(err) break elif not options.interactive and HardTimeout(options.hard_timeout): listener.ServerError('The test took over %.1f seconds. This is ' 'probably a runaway test.' % options.hard_timeout) break else: # If Python 2.5 support is dropped, stick server.handle_request() here. time.sleep(0.125) if options.tool: sys.stdout.write('##################### Waiting for the tool to exit\n') browser.WaitForProcessDeath() sys.stdout.write('##################### Processing tool logs\n') tool_failed = ProcessToolLogs(options, browser.tool_log_dir) finally: try: if listener.ever_failed and not options.interactive: if not server.received_request: sys.stdout.write('\nNo URLs were served by the test runner. It is ' 'unlikely this test failure has anything to do with ' 'this particular test.\n') DumpNetLog(browser.NetLogName()) except Exception: listener.ever_failed = 1 browser.Cleanup() # We avoid calling server.server_close() here because it causes # the HTTP server thread to exit uncleanly with an EBADF error, # which adds noise to the logs (though it does not cause the test # to fail). server_close() does not attempt to tell the server # loop to shut down before closing the socket FD it is # select()ing. Since we are about to exit, we don't really need # to close the socket FD. if tool_failed: return 2 elif listener.ever_failed: return 1 else: return 0 # This is an entrypoint for tests that treat the browser tester as a Python # library rather than an opaque script. # (e.g. run_inbrowser_trusted_crash_in_startup_test) def Run(url, options): result = 1 attempt = 1 while True: try: result = RunTestsOnce(url, options) break except RetryTest: # Only retry once. if attempt < 2: sys.stdout.write('\n@@@STEP_WARNINGS@@@\n') sys.stdout.write('WARNING: suspected flake, retrying test!\n\n') attempt += 1 continue else: sys.stdout.write('\nWARNING: failed too many times, not retrying.\n\n') result = 1 break return result def RunFromCommandLine(): parser = BuildArgParser() options, args = parser.parse_args() if len(args) != 0: print args parser.error('Invalid arguments') # Validate the URL url = options.url if url is None: parser.error('Must specify a URL') return Run(url, options) if __name__ == '__main__': sys.exit(RunFromCommandLine())
############################################################################# ## ## Copyright (C) 2016 Riverbank Computing Limited. ## Copyright (C) 2006 Thorsten Marek. ## All right reserved. ## ## This file is part of PyQt. ## ## You may use this file under the terms of the GPL v2 or the revised BSD ## license as follows: ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are ## met: ## * Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. ## * Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in ## the documentation and/or other materials provided with the ## distribution. ## * Neither the name of the Riverbank Computing Limited nor the names ## of its contributors may be used to endorse or promote products ## derived from this software without specific prior written ## permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################# import sys import logging import os.path import re from xml.etree.ElementTree import parse, SubElement from .objcreator import QObjectCreator from .properties import Properties logger = logging.getLogger(__name__) DEBUG = logger.debug QtCore = None QtWidgets = None def _parse_alignment(alignment): """ Convert a C++ alignment to the corresponding flags. """ align_flags = None for qt_align in alignment.split('|'): _, qt_align = qt_align.split('::') align = getattr(QtCore.Qt, qt_align) if align_flags is None: align_flags = align else: align_flags |= align return align_flags def _layout_position(elem): """ Return either (), (0, alignment), (row, column, rowspan, colspan) or (row, column, rowspan, colspan, alignment) depending on the type of layout and its configuration. The result will be suitable to use as arguments to the layout. """ row = elem.attrib.get('row') column = elem.attrib.get('column') alignment = elem.attrib.get('alignment') # See if it is a box layout. if row is None or column is None: if alignment is None: return () return (0, _parse_alignment(alignment)) # It must be a grid or a form layout. row = int(row) column = int(column) rowspan = int(elem.attrib.get('rowspan', 1)) colspan = int(elem.attrib.get('colspan', 1)) if alignment is None: return (row, column, rowspan, colspan) return (row, column, rowspan, colspan, _parse_alignment(alignment)) class WidgetStack(list): topwidget = None def push(self, item): DEBUG("push %s %s" % (item.metaObject().className(), item.objectName())) self.append(item) if isinstance(item, QtWidgets.QWidget): self.topwidget = item def popLayout(self): layout = list.pop(self) DEBUG("pop layout %s %s" % (layout.metaObject().className(), layout.objectName())) return layout def popWidget(self): widget = list.pop(self) DEBUG("pop widget %s %s" % (widget.metaObject().className(), widget.objectName())) for item in reversed(self): if isinstance(item, QtWidgets.QWidget): self.topwidget = item break else: self.topwidget = None DEBUG("new topwidget %s" % (self.topwidget,)) return widget def peek(self): return self[-1] def topIsLayout(self): return isinstance(self[-1], QtWidgets.QLayout) def topIsLayoutWidget(self): # A plain QWidget is a layout widget unless it's parent is a # QMainWindow or a QTabWidget. Note that the corresponding uic test is # a little more complicated as it involves features not supported by # pyuic. if type(self[-1]) is not QtWidgets.QWidget: return False if len(self) < 2: return False return type(self[-2]) not in (QtWidgets.QMainWindow, QtWidgets.QTabWidget) class ButtonGroup(object): """ Encapsulate the configuration of a button group and its implementation. """ def __init__(self): """ Initialise the button group. """ self.exclusive = True self.object = None class UIParser(object): def __init__(self, qtcore_module, qtgui_module, qtwidgets_module, creatorPolicy): self.factory = QObjectCreator(creatorPolicy) self.wprops = Properties(self.factory, qtcore_module, qtgui_module, qtwidgets_module) global QtCore, QtWidgets QtCore = qtcore_module QtWidgets = qtwidgets_module self.reset() def uniqueName(self, name): """UIParser.uniqueName(string) -> string Create a unique name from a string. >>> p = UIParser(QtCore, QtGui, QtWidgets) >>> p.uniqueName("foo") 'foo' >>> p.uniqueName("foo") 'foo1' """ try: suffix = self.name_suffixes[name] except KeyError: self.name_suffixes[name] = 0 return name suffix += 1 self.name_suffixes[name] = suffix return "%s%i" % (name, suffix) def reset(self): try: self.wprops.reset() except AttributeError: pass self.toplevelWidget = None self.stack = WidgetStack() self.name_suffixes = {} self.defaults = {'spacing': -1, 'margin': -1} self.actions = [] self.currentActionGroup = None self.resources = [] self.button_groups = {} def setupObject(self, clsname, parent, branch, is_attribute=True): name = self.uniqueName(branch.attrib.get('name') or clsname[1:].lower()) if parent is None: args = () else: args = (parent, ) obj = self.factory.createQObject(clsname, name, args, is_attribute) self.wprops.setProperties(obj, branch) obj.setObjectName(name) if is_attribute: setattr(self.toplevelWidget, name, obj) return obj def getProperty(self, elem, name): for prop in elem.findall('property'): if prop.attrib['name'] == name: return prop return None def createWidget(self, elem): self.column_counter = 0 self.row_counter = 0 self.item_nr = 0 self.itemstack = [] self.sorting_enabled = None widget_class = elem.attrib['class'].replace('::', '.') if widget_class == 'Line': widget_class = 'QFrame' # Ignore the parent if it is a container. parent = self.stack.topwidget if isinstance(parent, (QtWidgets.QDockWidget, QtWidgets.QMdiArea, QtWidgets.QScrollArea, QtWidgets.QStackedWidget, QtWidgets.QToolBox, QtWidgets.QTabWidget, QtWidgets.QWizard)): parent = None self.stack.push(self.setupObject(widget_class, parent, elem)) if isinstance(self.stack.topwidget, QtWidgets.QTableWidget): if self.getProperty(elem, 'columnCount') is None: self.stack.topwidget.setColumnCount(len(elem.findall("column"))) if self.getProperty(elem, 'rowCount') is None: self.stack.topwidget.setRowCount(len(elem.findall("row"))) self.traverseWidgetTree(elem) widget = self.stack.popWidget() if isinstance(widget, QtWidgets.QTreeView): self.handleHeaderView(elem, "header", widget.header()) elif isinstance(widget, QtWidgets.QTableView): self.handleHeaderView(elem, "horizontalHeader", widget.horizontalHeader()) self.handleHeaderView(elem, "verticalHeader", widget.verticalHeader()) elif isinstance(widget, QtWidgets.QAbstractButton): bg_i18n = self.wprops.getAttribute(elem, "buttonGroup") if bg_i18n is not None: # This should be handled properly in case the problem arises # elsewhere as well. try: # We are compiling the .ui file. bg_name = bg_i18n.string except AttributeError: # We are loading the .ui file. bg_name = bg_i18n # Designer allows the creation of .ui files without explicit # button groups, even though uic then issues warnings. We # handle it in two stages by first making sure it has a name # and then making sure one exists with that name. if not bg_name: bg_name = 'buttonGroup' try: bg = self.button_groups[bg_name] except KeyError: bg = self.button_groups[bg_name] = ButtonGroup() if bg.object is None: bg.object = self.factory.createQObject("QButtonGroup", bg_name, (self.toplevelWidget, )) setattr(self.toplevelWidget, bg_name, bg.object) bg.object.setObjectName(bg_name) if not bg.exclusive: bg.object.setExclusive(False) bg.object.addButton(widget) if self.sorting_enabled is not None: widget.setSortingEnabled(self.sorting_enabled) self.sorting_enabled = None if self.stack.topIsLayout(): lay = self.stack.peek() lp = elem.attrib['layout-position'] if isinstance(lay, QtWidgets.QFormLayout): lay.setWidget(lp[0], self._form_layout_role(lp), widget) else: lay.addWidget(widget, *lp) topwidget = self.stack.topwidget if isinstance(topwidget, QtWidgets.QToolBox): icon = self.wprops.getAttribute(elem, "icon") if icon is not None: topwidget.addItem(widget, icon, self.wprops.getAttribute(elem, "label")) else: topwidget.addItem(widget, self.wprops.getAttribute(elem, "label")) tooltip = self.wprops.getAttribute(elem, "toolTip") if tooltip is not None: topwidget.setItemToolTip(topwidget.indexOf(widget), tooltip) elif isinstance(topwidget, QtWidgets.QTabWidget): icon = self.wprops.getAttribute(elem, "icon") if icon is not None: topwidget.addTab(widget, icon, self.wprops.getAttribute(elem, "title")) else: topwidget.addTab(widget, self.wprops.getAttribute(elem, "title")) tooltip = self.wprops.getAttribute(elem, "toolTip") if tooltip is not None: topwidget.setTabToolTip(topwidget.indexOf(widget), tooltip) elif isinstance(topwidget, QtWidgets.QWizard): topwidget.addPage(widget) elif isinstance(topwidget, QtWidgets.QStackedWidget): topwidget.addWidget(widget) elif isinstance(topwidget, (QtWidgets.QDockWidget, QtWidgets.QScrollArea)): topwidget.setWidget(widget) elif isinstance(topwidget, QtWidgets.QMainWindow): if type(widget) == QtWidgets.QWidget: topwidget.setCentralWidget(widget) elif isinstance(widget, QtWidgets.QToolBar): tbArea = self.wprops.getAttribute(elem, "toolBarArea") if tbArea is None: topwidget.addToolBar(widget) else: topwidget.addToolBar(tbArea, widget) tbBreak = self.wprops.getAttribute(elem, "toolBarBreak") if tbBreak: topwidget.insertToolBarBreak(widget) elif isinstance(widget, QtWidgets.QMenuBar): topwidget.setMenuBar(widget) elif isinstance(widget, QtWidgets.QStatusBar): topwidget.setStatusBar(widget) elif isinstance(widget, QtWidgets.QDockWidget): dwArea = self.wprops.getAttribute(elem, "dockWidgetArea") topwidget.addDockWidget(QtCore.Qt.DockWidgetArea(dwArea), widget) def handleHeaderView(self, elem, name, header): value = self.wprops.getAttribute(elem, name + "Visible") if value is not None: header.setVisible(value) value = self.wprops.getAttribute(elem, name + "CascadingSectionResizes") if value is not None: header.setCascadingSectionResizes(value) value = self.wprops.getAttribute(elem, name + "DefaultSectionSize") if value is not None: header.setDefaultSectionSize(value) value = self.wprops.getAttribute(elem, name + "HighlightSections") if value is not None: header.setHighlightSections(value) value = self.wprops.getAttribute(elem, name + "MinimumSectionSize") if value is not None: header.setMinimumSectionSize(value) value = self.wprops.getAttribute(elem, name + "ShowSortIndicator") if value is not None: header.setSortIndicatorShown(value) value = self.wprops.getAttribute(elem, name + "StretchLastSection") if value is not None: header.setStretchLastSection(value) def createSpacer(self, elem): width = elem.findtext("property/size/width") height = elem.findtext("property/size/height") if width is None or height is None: size_args = () else: size_args = (int(width), int(height)) sizeType = self.wprops.getProperty(elem, "sizeType", QtWidgets.QSizePolicy.Expanding) policy = (QtWidgets.QSizePolicy.Minimum, sizeType) if self.wprops.getProperty(elem, "orientation") == QtCore.Qt.Horizontal: policy = policy[1], policy[0] spacer = self.factory.createQObject("QSpacerItem", self.uniqueName("spacerItem"), size_args + policy, is_attribute=False) if self.stack.topIsLayout(): lay = self.stack.peek() lp = elem.attrib['layout-position'] if isinstance(lay, QtWidgets.QFormLayout): lay.setItem(lp[0], self._form_layout_role(lp), spacer) else: lay.addItem(spacer, *lp) def createLayout(self, elem): # We use an internal property to handle margins which will use separate # left, top, right and bottom margins if they are found to be # different. The following will select, in order of preference, # separate margins, the same margin in all directions, and the default # margin. margin = self.wprops.getProperty(elem, 'margin', self.defaults['margin']) left = self.wprops.getProperty(elem, 'leftMargin', margin) top = self.wprops.getProperty(elem, 'topMargin', margin) right = self.wprops.getProperty(elem, 'rightMargin', margin) bottom = self.wprops.getProperty(elem, 'bottomMargin', margin) # A layout widget should, by default, have no margins. if self.stack.topIsLayoutWidget(): if left < 0: left = 0 if top < 0: top = 0 if right < 0: right = 0 if bottom < 0: bottom = 0 if left >= 0 or top >= 0 or right >= 0 or bottom >= 0: # We inject the new internal property. cme = SubElement(elem, 'property', name='pyuicMargins') SubElement(cme, 'number').text = str(left) SubElement(cme, 'number').text = str(top) SubElement(cme, 'number').text = str(right) SubElement(cme, 'number').text = str(bottom) # We use an internal property to handle spacing which will use separate # horizontal and vertical spacing if they are found to be different. # The following will select, in order of preference, separate # horizontal and vertical spacing, the same spacing in both directions, # and the default spacing. spacing = self.wprops.getProperty(elem, 'spacing', self.defaults['spacing']) horiz = self.wprops.getProperty(elem, 'horizontalSpacing', spacing) vert = self.wprops.getProperty(elem, 'verticalSpacing', spacing) if horiz >= 0 or vert >= 0: # We inject the new internal property. cme = SubElement(elem, 'property', name='pyuicSpacing') SubElement(cme, 'number').text = str(horiz) SubElement(cme, 'number').text = str(vert) classname = elem.attrib["class"] if self.stack.topIsLayout(): parent = None else: parent = self.stack.topwidget if "name" not in elem.attrib: elem.attrib["name"] = classname[1:].lower() self.stack.push(self.setupObject(classname, parent, elem)) self.traverseWidgetTree(elem) layout = self.stack.popLayout() self.configureLayout(elem, layout) if self.stack.topIsLayout(): top_layout = self.stack.peek() lp = elem.attrib['layout-position'] if isinstance(top_layout, QtWidgets.QFormLayout): top_layout.setLayout(lp[0], self._form_layout_role(lp), layout) else: top_layout.addLayout(layout, *lp) def configureLayout(self, elem, layout): if isinstance(layout, QtWidgets.QGridLayout): self.setArray(elem, 'columnminimumwidth', layout.setColumnMinimumWidth) self.setArray(elem, 'rowminimumheight', layout.setRowMinimumHeight) self.setArray(elem, 'columnstretch', layout.setColumnStretch) self.setArray(elem, 'rowstretch', layout.setRowStretch) elif isinstance(layout, QtWidgets.QBoxLayout): self.setArray(elem, 'stretch', layout.setStretch) def setArray(self, elem, name, setter): array = elem.attrib.get(name) if array: for idx, value in enumerate(array.split(',')): value = int(value) if value > 0: setter(idx, value) def disableSorting(self, w): if self.item_nr == 0: self.sorting_enabled = self.factory.invoke("__sortingEnabled", w.isSortingEnabled) w.setSortingEnabled(False) def handleItem(self, elem): if self.stack.topIsLayout(): elem[0].attrib['layout-position'] = _layout_position(elem) self.traverseWidgetTree(elem) else: w = self.stack.topwidget if isinstance(w, QtWidgets.QComboBox): text = self.wprops.getProperty(elem, "text") icon = self.wprops.getProperty(elem, "icon") if icon: w.addItem(icon, '') else: w.addItem('') w.setItemText(self.item_nr, text) elif isinstance(w, QtWidgets.QListWidget): self.disableSorting(w) item = self.createWidgetItem('QListWidgetItem', elem, w.item, self.item_nr) w.addItem(item) elif isinstance(w, QtWidgets.QTreeWidget): if self.itemstack: parent, _ = self.itemstack[-1] _, nr_in_root = self.itemstack[0] else: parent = w nr_in_root = self.item_nr item = self.factory.createQObject("QTreeWidgetItem", "item_%d" % len(self.itemstack), (parent, ), False) if self.item_nr == 0 and not self.itemstack: self.sorting_enabled = self.factory.invoke("__sortingEnabled", w.isSortingEnabled) w.setSortingEnabled(False) self.itemstack.append((item, self.item_nr)) self.item_nr = 0 # We have to access the item via the tree when setting the # text. titm = w.topLevelItem(nr_in_root) for child, nr_in_parent in self.itemstack[1:]: titm = titm.child(nr_in_parent) column = -1 for prop in elem.findall('property'): c_prop = self.wprops.convert(prop) c_prop_name = prop.attrib['name'] if c_prop_name == 'text': column += 1 if c_prop: titm.setText(column, c_prop) elif c_prop_name == 'statusTip': item.setStatusTip(column, c_prop) elif c_prop_name == 'toolTip': item.setToolTip(column, c_prop) elif c_prop_name == 'whatsThis': item.setWhatsThis(column, c_prop) elif c_prop_name == 'font': item.setFont(column, c_prop) elif c_prop_name == 'icon': item.setIcon(column, c_prop) elif c_prop_name == 'background': item.setBackground(column, c_prop) elif c_prop_name == 'foreground': item.setForeground(column, c_prop) elif c_prop_name == 'flags': item.setFlags(c_prop) elif c_prop_name == 'checkState': item.setCheckState(column, c_prop) self.traverseWidgetTree(elem) _, self.item_nr = self.itemstack.pop() elif isinstance(w, QtWidgets.QTableWidget): row = int(elem.attrib['row']) col = int(elem.attrib['column']) self.disableSorting(w) item = self.createWidgetItem('QTableWidgetItem', elem, w.item, row, col) w.setItem(row, col, item) self.item_nr += 1 def addAction(self, elem): self.actions.append((self.stack.topwidget, elem.attrib["name"])) @staticmethod def any_i18n(*args): """ Return True if any argument appears to be an i18n string. """ for a in args: if a is not None and not isinstance(a, str): return True return False def createWidgetItem(self, item_type, elem, getter, *getter_args): """ Create a specific type of widget item. """ item = self.factory.createQObject(item_type, "item", (), False) props = self.wprops # Note that not all types of widget items support the full set of # properties. text = props.getProperty(elem, 'text') status_tip = props.getProperty(elem, 'statusTip') tool_tip = props.getProperty(elem, 'toolTip') whats_this = props.getProperty(elem, 'whatsThis') if self.any_i18n(text, status_tip, tool_tip, whats_this): self.factory.invoke("item", getter, getter_args) if text: item.setText(text) if status_tip: item.setStatusTip(status_tip) if tool_tip: item.setToolTip(tool_tip) if whats_this: item.setWhatsThis(whats_this) text_alignment = props.getProperty(elem, 'textAlignment') if text_alignment: item.setTextAlignment(text_alignment) font = props.getProperty(elem, 'font') if font: item.setFont(font) icon = props.getProperty(elem, 'icon') if icon: item.setIcon(icon) background = props.getProperty(elem, 'background') if background: item.setBackground(background) foreground = props.getProperty(elem, 'foreground') if foreground: item.setForeground(foreground) flags = props.getProperty(elem, 'flags') if flags: item.setFlags(flags) check_state = props.getProperty(elem, 'checkState') if check_state: item.setCheckState(check_state) return item def addHeader(self, elem): w = self.stack.topwidget if isinstance(w, QtWidgets.QTreeWidget): props = self.wprops col = self.column_counter text = props.getProperty(elem, 'text') if text: w.headerItem().setText(col, text) status_tip = props.getProperty(elem, 'statusTip') if status_tip: w.headerItem().setStatusTip(col, status_tip) tool_tip = props.getProperty(elem, 'toolTip') if tool_tip: w.headerItem().setToolTip(col, tool_tip) whats_this = props.getProperty(elem, 'whatsThis') if whats_this: w.headerItem().setWhatsThis(col, whats_this) text_alignment = props.getProperty(elem, 'textAlignment') if text_alignment: w.headerItem().setTextAlignment(col, text_alignment) font = props.getProperty(elem, 'font') if font: w.headerItem().setFont(col, font) icon = props.getProperty(elem, 'icon') if icon: w.headerItem().setIcon(col, icon) background = props.getProperty(elem, 'background') if background: w.headerItem().setBackground(col, background) foreground = props.getProperty(elem, 'foreground') if foreground: w.headerItem().setForeground(col, foreground) self.column_counter += 1 elif isinstance(w, QtWidgets.QTableWidget): if len(elem) != 0: if elem.tag == 'column': item = self.createWidgetItem('QTableWidgetItem', elem, w.horizontalHeaderItem, self.column_counter) w.setHorizontalHeaderItem(self.column_counter, item) self.column_counter += 1 elif elem.tag == 'row': item = self.createWidgetItem('QTableWidgetItem', elem, w.verticalHeaderItem, self.row_counter) w.setVerticalHeaderItem(self.row_counter, item) self.row_counter += 1 def setZOrder(self, elem): # Designer can generate empty zorder elements. if elem.text is None: return # Designer allows the z-order of spacer items to be specified even # though they can't be raised, so ignore any missing raise_() method. try: getattr(self.toplevelWidget, elem.text).raise_() except AttributeError: # Note that uic issues a warning message. pass def createAction(self, elem): self.setupObject("QAction", self.currentActionGroup or self.toplevelWidget, elem) def createActionGroup(self, elem): action_group = self.setupObject("QActionGroup", self.toplevelWidget, elem) self.currentActionGroup = action_group self.traverseWidgetTree(elem) self.currentActionGroup = None widgetTreeItemHandlers = { "widget" : createWidget, "addaction" : addAction, "layout" : createLayout, "spacer" : createSpacer, "item" : handleItem, "action" : createAction, "actiongroup": createActionGroup, "column" : addHeader, "row" : addHeader, "zorder" : setZOrder, } def traverseWidgetTree(self, elem): for child in iter(elem): try: handler = self.widgetTreeItemHandlers[child.tag] except KeyError: continue handler(self, child) def createUserInterface(self, elem): # Get the names of the class and widget. cname = elem.attrib["class"] wname = elem.attrib["name"] # If there was no widget name then derive it from the class name. if not wname: wname = cname if wname.startswith("Q"): wname = wname[1:] wname = wname[0].lower() + wname[1:] self.toplevelWidget = self.createToplevelWidget(cname, wname) self.toplevelWidget.setObjectName(wname) DEBUG("toplevel widget is %s", self.toplevelWidget.metaObject().className()) self.wprops.setProperties(self.toplevelWidget, elem) self.stack.push(self.toplevelWidget) self.traverseWidgetTree(elem) self.stack.popWidget() self.addActions() self.setBuddies() self.setDelayedProps() def addActions(self): for widget, action_name in self.actions: if action_name == "separator": widget.addSeparator() else: DEBUG("add action %s to %s", action_name, widget.objectName()) action_obj = getattr(self.toplevelWidget, action_name) if isinstance(action_obj, QtWidgets.QMenu): widget.addAction(action_obj.menuAction()) elif not isinstance(action_obj, QtWidgets.QActionGroup): widget.addAction(action_obj) def setDelayedProps(self): for widget, layout, setter, args in self.wprops.delayed_props: if layout: widget = widget.layout() setter = getattr(widget, setter) setter(args) def setBuddies(self): for widget, buddy in self.wprops.buddies: DEBUG("%s is buddy of %s", buddy, widget.objectName()) try: widget.setBuddy(getattr(self.toplevelWidget, buddy)) except AttributeError: DEBUG("ERROR in ui spec: %s (buddy of %s) does not exist", buddy, widget.objectName()) def classname(self, elem): DEBUG("uiname is %s", elem.text) name = elem.text if name is None: name = "" self.uiname = name self.wprops.uiname = name self.setContext(name) def setContext(self, context): """ Reimplemented by a sub-class if it needs to know the translation context. """ pass def readDefaults(self, elem): self.defaults['margin'] = int(elem.attrib['margin']) self.defaults['spacing'] = int(elem.attrib['spacing']) def setTaborder(self, elem): lastwidget = None for widget_elem in elem: widget = getattr(self.toplevelWidget, widget_elem.text) if lastwidget is not None: self.toplevelWidget.setTabOrder(lastwidget, widget) lastwidget = widget def readResources(self, elem): """ Read a "resources" tag and add the module to import to the parser's list of them. """ try: iterator = getattr(elem, 'iter') except AttributeError: iterator = getattr(elem, 'getiterator') for include in iterator("include"): loc = include.attrib.get("location") # Apply the convention for naming the Python files generated by # pyrcc5. if loc and loc.endswith('.qrc'): mname = os.path.basename(loc[:-4] + self._resource_suffix) if mname not in self.resources: self.resources.append(mname) def createConnections(self, elem): def name2object(obj): if obj == self.uiname: return self.toplevelWidget else: return getattr(self.toplevelWidget, obj) for conn in iter(elem): signal = conn.findtext('signal') signal_name, signal_args = signal.split('(') signal_args = signal_args[:-1].replace(' ', '') sender = name2object(conn.findtext('sender')) bound_signal = getattr(sender, signal_name) slot = self.factory.getSlot(name2object(conn.findtext('receiver')), conn.findtext('slot').split('(')[0]) if signal_args == '': bound_signal.connect(slot) else: signal_args = signal_args.split(',') if len(signal_args) == 1: bound_signal[signal_args[0]].connect(slot) else: bound_signal[tuple(signal_args)].connect(slot) QtCore.QMetaObject.connectSlotsByName(self.toplevelWidget) def customWidgets(self, elem): def header2module(header): """header2module(header) -> string Convert paths to C++ header files to according Python modules >>> header2module("foo/bar/baz.h") 'foo.bar.baz' """ if header.endswith(".h"): header = header[:-2] mpath = [] for part in header.split('/'): # Ignore any empty parts or those that refer to the current # directory. if part not in ('', '.'): if part == '..': # We should allow this for Python3. raise SyntaxError("custom widget header file name may not contain '..'.") mpath.append(part) return '.'.join(mpath) for custom_widget in iter(elem): classname = custom_widget.findtext("class") self.factory.addCustomWidget(classname, custom_widget.findtext("extends") or "QWidget", header2module(custom_widget.findtext("header"))) def createToplevelWidget(self, classname, widgetname): raise NotImplementedError def buttonGroups(self, elem): for button_group in iter(elem): if button_group.tag == 'buttongroup': bg_name = button_group.attrib['name'] bg = ButtonGroup() self.button_groups[bg_name] = bg prop = self.getProperty(button_group, 'exclusive') if prop is not None: if prop.findtext('bool') == 'false': bg.exclusive = False # finalize will be called after the whole tree has been parsed and can be # overridden. def finalize(self): pass def parse(self, filename, resource_suffix, base_dir=''): self.wprops.set_base_dir(base_dir) self._resource_suffix = resource_suffix # The order in which the different branches are handled is important. # The widget tree handler relies on all custom widgets being known, and # in order to create the connections, all widgets have to be populated. branchHandlers = ( ("layoutdefault", self.readDefaults), ("class", self.classname), ("buttongroups", self.buttonGroups), ("customwidgets", self.customWidgets), ("widget", self.createUserInterface), ("connections", self.createConnections), ("tabstops", self.setTaborder), ("resources", self.readResources), ) document = parse(filename) version = document.getroot().attrib["version"] DEBUG("UI version is %s" % (version,)) # Right now, only version 4.0 is supported. assert version in ("4.0",) for tagname, actor in branchHandlers: elem = document.find(tagname) if elem is not None: actor(elem) self.finalize() w = self.toplevelWidget self.reset() return w @staticmethod def _form_layout_role(layout_position): if layout_position[3] > 1: role = QtWidgets.QFormLayout.SpanningRole elif layout_position[1] == 1: role = QtWidgets.QFormLayout.FieldRole else: role = QtWidgets.QFormLayout.LabelRole return role
"""file_extraction.py: extracts the zip attachment(s) from raw email file provided by the Simple Email Service""" import os import zipfile from collections import namedtuple from email import policy from email.parser import BytesParser __author__ = 'sweetjonnie' __license__ = 'Apache License 2.0' __email__ = 'jsavell@gmail.com' __status__ = 'Prototype' # pylint: disable=too-few-public-methods FileAttributes = namedtuple('FileAttributes', ['file_name', 'file_content']) def extract_files(source_filepath, target_directory): """the signature of this function is the API of this module the implementation of this function is the agent of dependency injection source_filepath is the full path to the input file target_directory is the full path of the directory to be used for processing return value is a collection of utf-8 strings, each of which contains the contents of an extracted and unarchived file""" result = [] def file_content_consumer(file_content): """trivial consumer for file_content collection""" result.append(file_content) file_consumer = FileConsumer( file_content_consumer ) tee_consumer1 = TeeConsumer( file_consumer ) file_archive_consumer = FileArchiveConsumer( tee_consumer1, target_directory ) file_archive_patch_consumer = FileArchivePatchConsumer( file_archive_consumer ) file_attchmnt_content_consumer = FileAttachmentContentConsumer( file_archive_patch_consumer, target_directory ) tee_consumer2 = TeeConsumer( file_attchmnt_content_consumer ) workflow = MailFileConsumer( tee_consumer2 ) # initiate consumption workflow( source_filepath ) return result def extract_filename(line): """NOT part of the API; simply external to its client. extract the name of filename from the metadata.""" key = 'filename' filename_position_start = line.index(key) + len(key) while (filename_position_start < len(line) and not line[filename_position_start].isalpha()): filename_position_start += 1 filename_position_end = len(line) - 1 while (filename_position_end > 0 and not line[filename_position_end].isalpha()): filename_position_end -= 1 if filename_position_end < filename_position_start: raise FileNotFoundError('embedded file name not found') else: return line[filename_position_start: 1 + filename_position_end] def valid_directory_or_die(directory_path): """NOT part of the API; simply external to its client. life is too short for bad directories. fight back.""" if (not os.path.exists(directory_path) or not os.path.isdir(directory_path)): raise NotADirectoryError(directory_path) else: return directory_path def examine_consumer_input(func): """NOT part of the API; simply external to its client. decorator used for debugging the flow of information. don't wish to debug? then don't decorate (TM).""" def wrapper(target, value): """Crocket: 'just a wrapper function ...'""" msg = 'target {} declares input {}'.format( target.__class__.__name__, value ) print(msg) func(target, value) return wrapper class TeeConsumer: """given a collection of things, this command iterates over the collection and ships each item downstream. see also: https://en.wikipedia.org/wiki/Tee_(command) http://www.enterpriseintegrationpatterns.com/patterns/messaging/ RecipientList.html http://www.enterpriseintegrationpatterns.com/patterns/messaging/ WireTap.html """ def __init__(self, consumer): self._consumer = consumer @examine_consumer_input def __call__(self, collection): for item in collection: self._consumer(item) class MailFileConsumer: """given an absolute path to a mail file, this command shreds the file and extracts desired attachments. attribution: code borrowed from https://stackoverflow.com/questions/17874360/ python-how-to-parse-the-body-from-a-raw-email-given-that-raw-email-does-not""" def __init__(self, consumer): self._consumer = consumer @examine_consumer_input def __call__(self, file_name): result = [] with open(file_name, 'rb') as file_pointer: msg = BytesParser(policy=policy.default).parse(file_pointer) if msg.is_multipart(): for part in msg.walk(): content_type = part.get_content_type() content_disposition = part.get('Content-Disposition') content = part.get_payload(decode=True) if (content_type == 'application/zip' and content_disposition is not None and content_disposition.startswith('attachment;')): try: file_name = extract_filename(content_disposition) file_attributes = FileAttributes( file_name=file_name, file_content=content ) result.append(file_attributes) except ValueError: # this is an error we may discard pass if len(result) <= 0: raise FileNotFoundError('no embedded file found') else: return self._consumer(result) class FileAttachmentContentConsumer: """given a set of file-attributes, this command creates and populates a file.""" def __init__(self, consumer, directory_path): self._consumer = consumer self._directory_path = valid_directory_or_die(directory_path) @examine_consumer_input def __call__(self, file_attributes): file_path = os.path.join( self._directory_path, file_attributes.file_name ) with open(file_path, 'wb') as file_pointer: file_pointer.write(file_attributes.file_content) return self._consumer(file_path) class FileArchivePatchConsumer: """given an absolute path to an archive file, this command patches the archive in order to overcome a limitation of the zipfile module. attribution: code stolen from https://stackoverflow.com/questions/3083235/ unzipping-file-results-in-badzipfile-file-is-not-a-zip-file""" def __init__(self, consumer): self._consumer = consumer @examine_consumer_input def __call__(self, archive_path): with open(archive_path, 'r+b') as file_pointer: content = file_pointer.read() pos = content.rfind(b'\x50\x4b\x05\x06') if pos > 0: file_pointer.seek(pos + 20) file_pointer.truncate() file_pointer.write(b'\x00\x00') file_pointer.seek(0) else: pass self._consumer(archive_path) class FileArchiveConsumer: """given an archive file, this command unarchives the file and creates a file for each piece of content found within.""" def __init__(self, consumer, directory_path): self._consumer = consumer self._directory_path = valid_directory_or_die(directory_path) @examine_consumer_input def __call__(self, archive_path): result = [] with zipfile.ZipFile(archive_path) as zip_file: for info in zip_file.infolist(): file_name = info.filename file_path = os.path.join(self._directory_path, file_name) with open(file_path, 'wb') as file_pointer: file_pointer.write(zip_file.read(file_name)) result.append(file_path) self._consumer(result) # os.remove(archive_path) class FileConsumer: """given an absolute path to a file, this command exports the file's contents.""" def __init__(self, consumer): self._consumer = consumer @examine_consumer_input def __call__(self, file_path): with open(file_path, encoding='utf-8') as file_pointer: content = file_pointer.read() self._consumer(content) # os.remove(file_path)
# Copyright 2011 Justin Santa Barbara # Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import hashlib import os import os.path import shutil import tempfile import uuid import mock import netaddr from oslo_concurrency import processutils import six import six.moves.builtins as __builtin__ from magnum.common import exception from magnum.common import utils from magnum.tests import base class UtilsTestCase(base.TestCase): def test_random_alnum(self): s = utils.random_alnum(10) self.assertEqual(10, len(s)) s = utils.random_alnum(100) self.assertEqual(100, len(s)) def test_unlink(self): with mock.patch.object(os, "unlink") as unlink_mock: unlink_mock.return_value = None utils.unlink_without_raise("/fake/path") unlink_mock.assert_called_once_with("/fake/path") def test_unlink_ENOENT(self): with mock.patch.object(os, "unlink") as unlink_mock: unlink_mock.side_effect = OSError(errno.ENOENT) utils.unlink_without_raise("/fake/path") unlink_mock.assert_called_once_with("/fake/path") def test_create_link(self): with mock.patch.object(os, "symlink") as symlink_mock: symlink_mock.return_value = None utils.create_link_without_raise("/fake/source", "/fake/link") symlink_mock.assert_called_once_with("/fake/source", "/fake/link") def test_create_link_EEXIST(self): with mock.patch.object(os, "symlink") as symlink_mock: symlink_mock.side_effect = OSError(errno.EEXIST) utils.create_link_without_raise("/fake/source", "/fake/link") symlink_mock.assert_called_once_with("/fake/source", "/fake/link") def test_generate_uid(self): topic = 'test' size = 8 s = utils.generate_uid(topic) self.assertEqual(len(topic) + size + 1, len(s)) self.assertEqual(topic, s[:len(topic)]) size = 22 s = utils.generate_uid(topic, size) self.assertEqual(len(topic) + size + 1, len(s)) def test_valid_ipv4(self): self.assertTrue(utils.is_valid_ipv4('10.0.0.1')) self.assertTrue(utils.is_valid_ipv4('255.255.255.255')) def test_invalid_ipv4(self): self.assertFalse(utils.is_valid_ipv4('')) self.assertFalse(utils.is_valid_ipv4('x.x.x.x')) self.assertFalse(utils.is_valid_ipv4('256.256.256.256')) self.assertFalse(utils.is_valid_ipv4( 'AA42:0000:0000:0000:0202:B3FF:FE1E:8329')) def test_valid_ipv6(self): self.assertTrue(utils.is_valid_ipv6( 'AA42:0000:0000:0000:0202:B3FF:FE1E:8329')) self.assertTrue(utils.is_valid_ipv6( 'AA42::0202:B3FF:FE1E:8329')) def test_invalid_ipv6(self): self.assertFalse(utils.is_valid_ipv6('')) self.assertFalse(utils.is_valid_ipv6('10.0.0.1')) self.assertFalse(utils.is_valid_ipv6('AA42::0202:B3FF:FE1E:')) def test_valid_cidr(self): self.assertTrue(utils.is_valid_cidr('10.0.0.0/24')) self.assertTrue(utils.is_valid_cidr('10.0.0.1/32')) self.assertTrue(utils.is_valid_cidr('0.0.0.0/0')) def test_invalid_cidr(self): self.assertFalse(utils.is_valid_cidr('10.0.0.1')) self.assertFalse(utils.is_valid_cidr('10.0.0.1/33')) def test_valid_network(self): self.assertEqual('IPv4', utils.get_ip_version('10.0.0.1')) self.assertEqual('IPv6', utils.get_ip_version( 'AA42:0000:0000:0000:0202:B3FF:FE1E:8329')) def test_invalid_network(self): self.assertRaises(netaddr.core.AddrFormatError, utils.get_ip_version, 'x.x.x.x') def test_convert_to_list_dict(self): self.assertIsNone(utils.convert_to_list_dict(None, 'fred')) self.assertIsNone(utils.convert_to_list_dict('', 'fred')) self.assertEqual([{'fred': 'list'}], utils.convert_to_list_dict('list', 'fred')) self.assertEqual([{'fred': 'first'}, {'fred': 'second'}], utils.convert_to_list_dict(['first', 'second'], 'fred')) def test_get_k8s_quantity(self): self.assertEqual(1024000.0, utils.get_k8s_quantity('1000Ki')) self.assertEqual(0.001, utils.get_k8s_quantity('1E-3')) self.assertEqual(0.5, utils.get_k8s_quantity('0.0005k')) self.assertEqual(0.5, utils.get_k8s_quantity('500m')) self.assertEqual(1300000.0, utils.get_k8s_quantity('1.3E+6')) self.assertEqual(1300000.0, utils.get_k8s_quantity('1.3E6')) self.assertRaises(exception.UnsupportedK8sQuantityFormat, utils.get_k8s_quantity, '1E1E') def test_get_docker_quanity(self): self.assertEqual(512, utils.get_docker_quanity('512')) self.assertEqual(512, utils.get_docker_quanity('512b')) self.assertEqual(512 * 1024, utils.get_docker_quanity('512k')) self.assertEqual(512 * 1024 * 1024, utils.get_docker_quanity('512m')) self.assertEqual(512 * 1024 * 1024 * 1024, utils.get_docker_quanity('512g')) self.assertRaises(exception.UnsupportedDockerQuantityFormat, utils.get_docker_quanity, '512bb') self.assertRaises(exception.UnsupportedDockerQuantityFormat, utils.get_docker_quanity, '512B') class ExecuteTestCase(base.TestCase): def test_retry_on_failure(self): fd, tmpfilename = tempfile.mkstemp() _, tmpfilename2 = tempfile.mkstemp() try: fp = os.fdopen(fd, 'w+') fp.write('''#!/bin/sh # If stdin fails to get passed during one of the runs, make a note. if ! grep -q foo then echo 'failure' > "$1" fi # If stdin has failed to get passed during this or a previous run, exit early. if grep failure "$1" then exit 1 fi runs="$(cat $1)" if [ -z "$runs" ] then runs=0 fi runs=$(($runs + 1)) echo $runs > "$1" exit 1 ''') fp.close() os.chmod(tmpfilename, 0o755) try: self.assertRaises(processutils.ProcessExecutionError, utils.execute, tmpfilename, tmpfilename2, attempts=10, process_input=b'foo', delay_on_retry=False) except OSError as e: if e.errno == errno.EACCES: self.skipTest("Permissions error detected. " "Are you running with a noexec /tmp?") else: raise with open(tmpfilename2, 'r') as fp: runs = fp.read() self.assertNotEqual(runs.strip(), 'failure', 'stdin did not ' 'always get passed ' 'correctly') runs = int(runs.strip()) self.assertEqual(10, runs, 'Ran %d times instead of 10.' % runs) finally: os.unlink(tmpfilename) os.unlink(tmpfilename2) def test_unknown_kwargs_raises_error(self): self.assertRaises(processutils.UnknownArgumentError, utils.execute, '/usr/bin/env', 'true', this_is_not_a_valid_kwarg=True) def test_check_exit_code_boolean(self): utils.execute('/usr/bin/env', 'false', check_exit_code=False) self.assertRaises(processutils.ProcessExecutionError, utils.execute, '/usr/bin/env', 'false', check_exit_code=True) def test_no_retry_on_success(self): fd, tmpfilename = tempfile.mkstemp() _, tmpfilename2 = tempfile.mkstemp() try: fp = os.fdopen(fd, 'w+') fp.write('''#!/bin/sh # If we've already run, bail out. grep -q foo "$1" && exit 1 # Mark that we've run before. echo foo > "$1" # Check that stdin gets passed correctly. grep foo ''') fp.close() os.chmod(tmpfilename, 0o755) try: utils.execute(tmpfilename, tmpfilename2, process_input=b'foo', attempts=2) except OSError as e: if e.errno == errno.EACCES: self.skipTest("Permissions error detected. " "Are you running with a noexec /tmp?") else: raise finally: os.unlink(tmpfilename) os.unlink(tmpfilename2) @mock.patch.object(processutils, 'execute') @mock.patch.object(os.environ, 'copy', return_value={}) def test_execute_use_standard_locale_no_env_variables(self, env_mock, execute_mock): utils.execute('foo', use_standard_locale=True) execute_mock.assert_called_once_with('foo', env_variables={'LC_ALL': 'C'}) @mock.patch.object(processutils, 'execute') def test_execute_use_standard_locale_with_env_variables(self, execute_mock): utils.execute('foo', use_standard_locale=True, env_variables={'foo': 'bar'}) execute_mock.assert_called_once_with('foo', env_variables={'LC_ALL': 'C', 'foo': 'bar'}) @mock.patch.object(processutils, 'execute') def test_execute_not_use_standard_locale(self, execute_mock): utils.execute('foo', use_standard_locale=False, env_variables={'foo': 'bar'}) execute_mock.assert_called_once_with('foo', env_variables={'foo': 'bar'}) def test_execute_get_root_helper(self): with mock.patch.object(processutils, 'execute') as execute_mock: helper = utils._get_root_helper() utils.execute('foo', run_as_root=True) execute_mock.assert_called_once_with('foo', run_as_root=True, root_helper=helper) def test_execute_without_root_helper(self): with mock.patch.object(processutils, 'execute') as execute_mock: utils.execute('foo', run_as_root=False) execute_mock.assert_called_once_with('foo', run_as_root=False) class GenericUtilsTestCase(base.TestCase): def test_hostname_unicode_sanitization(self): hostname = u"\u7684.test.example.com" self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_periods(self): hostname = "....test.example.com..." self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_dashes(self): hostname = "----test.example.com---" self.assertEqual("test.example.com", utils.sanitize_hostname(hostname)) def test_hostname_sanitize_characters(self): hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+" self.assertEqual("91----test-host.example.com-0", utils.sanitize_hostname(hostname)) def test_hostname_translate(self): hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>" self.assertEqual("hello", utils.sanitize_hostname(hostname)) def test_read_cached_file(self): with mock.patch.object(os.path, "getmtime") as getmtime_mock: getmtime_mock.return_value = 1 cache_data = {"data": 1123, "mtime": 1} data = utils.read_cached_file("/this/is/a/fake", cache_data) self.assertEqual(cache_data["data"], data) getmtime_mock.assert_called_once_with(mock.ANY) def test_read_modified_cached_file(self): with mock.patch.object(os.path, "getmtime") as getmtime_mock: with mock.patch.object(__builtin__, 'open') as open_mock: getmtime_mock.return_value = 2 fake_contents = "lorem ipsum" fake_file = mock.Mock() fake_file.read.return_value = fake_contents fake_context_manager = mock.MagicMock() fake_context_manager.__enter__.return_value = fake_file fake_context_manager.__exit__.return_value = None open_mock.return_value = fake_context_manager cache_data = {"data": 1123, "mtime": 1} self.reload_called = False def test_reload(reloaded_data): self.assertEqual(fake_contents, reloaded_data) self.reload_called = True data = utils.read_cached_file("/this/is/a/fake", cache_data, reload_func=test_reload) self.assertEqual(fake_contents, data) self.assertTrue(self.reload_called) getmtime_mock.assert_called_once_with(mock.ANY) open_mock.assert_called_once_with(mock.ANY) fake_file.read.assert_called_once_with() fake_context_manager.__exit__.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY) fake_context_manager.__enter__.assert_called_once_with() def test_hash_file(self): data = 'Mary had a little lamb, its fleece as white as snow' flo = six.StringIO(data) h1 = utils.hash_file(flo) h2 = hashlib.sha1(six.b(data)).hexdigest() self.assertEqual(h1, h2) def test_is_valid_boolstr(self): self.assertTrue(utils.is_valid_boolstr('true')) self.assertTrue(utils.is_valid_boolstr('false')) self.assertTrue(utils.is_valid_boolstr('yes')) self.assertTrue(utils.is_valid_boolstr('no')) self.assertTrue(utils.is_valid_boolstr('y')) self.assertTrue(utils.is_valid_boolstr('n')) self.assertTrue(utils.is_valid_boolstr('1')) self.assertTrue(utils.is_valid_boolstr('0')) self.assertFalse(utils.is_valid_boolstr('maybe')) self.assertFalse(utils.is_valid_boolstr('only on tuesdays')) def test_is_valid_ipv6_cidr(self): self.assertTrue(utils.is_valid_ipv6_cidr("2600::/64")) self.assertTrue(utils.is_valid_ipv6_cidr( "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254/48")) self.assertTrue(utils.is_valid_ipv6_cidr( "0000:0000:0000:0000:0000:0000:0000:0001/32")) self.assertTrue(utils.is_valid_ipv6_cidr( "0000:0000:0000:0000:0000:0000:0000:0001")) self.assertFalse(utils.is_valid_ipv6_cidr("foo")) self.assertFalse(utils.is_valid_ipv6_cidr("127.0.0.1")) def test_get_shortened_ipv6(self): self.assertEqual("abcd:ef01:2345:6789:abcd:ef01:c0a8:fefe", utils.get_shortened_ipv6( "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254")) self.assertEqual("::1", utils.get_shortened_ipv6( "0000:0000:0000:0000:0000:0000:0000:0001")) self.assertEqual("caca::caca:0:babe:201:102", utils.get_shortened_ipv6( "caca:0000:0000:caca:0000:babe:0201:0102")) self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6, "127.0.0.1") self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6, "failure") def test_get_shortened_ipv6_cidr(self): self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr( "2600:0000:0000:0000:0000:0000:0000:0000/64")) self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr( "2600::1/64")) self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6_cidr, "127.0.0.1") self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6_cidr, "failure") def test_is_valid_mac(self): self.assertTrue(utils.is_valid_mac("52:54:00:cf:2d:31")) self.assertTrue(utils.is_valid_mac(u"52:54:00:cf:2d:31")) self.assertFalse(utils.is_valid_mac("127.0.0.1")) self.assertFalse(utils.is_valid_mac("not:a:mac:address")) self.assertFalse(utils.is_valid_mac("52-54-00-cf-2d-31")) self.assertFalse(utils.is_valid_mac("aa bb cc dd ee ff")) self.assertTrue(utils.is_valid_mac("AA:BB:CC:DD:EE:FF")) self.assertFalse(utils.is_valid_mac("AA BB CC DD EE FF")) self.assertFalse(utils.is_valid_mac("AA-BB-CC-DD-EE-FF")) def test_validate_and_normalize_mac(self): mac = 'AA:BB:CC:DD:EE:FF' with mock.patch.object(utils, 'is_valid_mac') as m_mock: m_mock.return_value = True self.assertEqual(mac.lower(), utils.validate_and_normalize_mac(mac)) def test_validate_and_normalize_mac_invalid_format(self): with mock.patch.object(utils, 'is_valid_mac') as m_mock: m_mock.return_value = False self.assertRaises(exception.InvalidMAC, utils.validate_and_normalize_mac, 'invalid-mac') def test_safe_rstrip(self): value = '/test/' rstripped_value = '/test' not_rstripped = '/' self.assertEqual(rstripped_value, utils.safe_rstrip(value, '/')) self.assertEqual(not_rstripped, utils.safe_rstrip(not_rstripped, '/')) def test_safe_rstrip_not_raises_exceptions(self): # Supplying an integer should normally raise an exception because it # does not save the rstrip() method. value = 10 # In the case of raising an exception safe_rstrip() should return the # original value. self.assertEqual(value, utils.safe_rstrip(value)) class MkfsTestCase(base.TestCase): @mock.patch.object(utils, 'execute') def test_mkfs(self, execute_mock): utils.mkfs('ext4', '/my/block/dev') utils.mkfs('msdos', '/my/msdos/block/dev') utils.mkfs('swap', '/my/swap/block/dev') expected = [mock.call('mkfs', '-t', 'ext4', '-F', '/my/block/dev', run_as_root=True, use_standard_locale=True), mock.call('mkfs', '-t', 'msdos', '/my/msdos/block/dev', run_as_root=True, use_standard_locale=True), mock.call('mkswap', '/my/swap/block/dev', run_as_root=True, use_standard_locale=True)] self.assertEqual(expected, execute_mock.call_args_list) @mock.patch.object(utils, 'execute') def test_mkfs_with_label(self, execute_mock): utils.mkfs('ext4', '/my/block/dev', 'ext4-vol') utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol') utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol') expected = [mock.call('mkfs', '-t', 'ext4', '-F', '-L', 'ext4-vol', '/my/block/dev', run_as_root=True, use_standard_locale=True), mock.call('mkfs', '-t', 'msdos', '-n', 'msdos-vol', '/my/msdos/block/dev', run_as_root=True, use_standard_locale=True), mock.call('mkswap', '-L', 'swap-vol', '/my/swap/block/dev', run_as_root=True, use_standard_locale=True)] self.assertEqual(expected, execute_mock.call_args_list) @mock.patch.object(utils, 'execute', side_effect=processutils.ProcessExecutionError( stderr=os.strerror(errno.ENOENT))) def test_mkfs_with_unsupported_fs(self, execute_mock): self.assertRaises(exception.FileSystemNotSupported, utils.mkfs, 'foo', '/my/block/dev') @mock.patch.object(utils, 'execute', side_effect=processutils.ProcessExecutionError( stderr='fake')) def test_mkfs_with_unexpected_error(self, execute_mock): self.assertRaises(processutils.ProcessExecutionError, utils.mkfs, 'ext4', '/my/block/dev', 'ext4-vol') class IntLikeTestCase(base.TestCase): def test_is_int_like(self): self.assertTrue(utils.is_int_like(1)) self.assertTrue(utils.is_int_like("1")) self.assertTrue(utils.is_int_like("514")) self.assertTrue(utils.is_int_like("0")) self.assertFalse(utils.is_int_like(1.1)) self.assertFalse(utils.is_int_like("1.1")) self.assertFalse(utils.is_int_like("1.1.1")) self.assertFalse(utils.is_int_like(None)) self.assertFalse(utils.is_int_like("0.")) self.assertFalse(utils.is_int_like("aaaaaa")) self.assertFalse(utils.is_int_like("....")) self.assertFalse(utils.is_int_like("1g")) self.assertFalse( utils.is_int_like("0cc3346e-9fef-4445-abe6-5d2b2690ec64")) self.assertFalse(utils.is_int_like("a1")) class UUIDTestCase(base.TestCase): def test_generate_uuid(self): uuid_string = utils.generate_uuid() self.assertIsInstance(uuid_string, str) self.assertEqual(36, len(uuid_string)) # make sure there are 4 dashes self.assertEqual(32, len(uuid_string.replace('-', ''))) def test_is_uuid_like(self): self.assertTrue(utils.is_uuid_like(str(uuid.uuid4()))) def test_id_is_uuid_like(self): self.assertFalse(utils.is_uuid_like(1234567)) def test_name_is_uuid_like(self): self.assertFalse(utils.is_uuid_like('zhongyueluo')) class TempFilesTestCase(base.TestCase): def test_tempdir(self): dirname = None with utils.tempdir() as tempdir: self.assertTrue(os.path.isdir(tempdir)) dirname = tempdir self.assertFalse(os.path.exists(dirname)) @mock.patch.object(shutil, 'rmtree') @mock.patch.object(tempfile, 'mkdtemp') def test_tempdir_mocked(self, mkdtemp_mock, rmtree_mock): self.config(tempdir='abc') mkdtemp_mock.return_value = 'temp-dir' kwargs = {'a': 'b'} with utils.tempdir(**kwargs) as tempdir: self.assertEqual('temp-dir', tempdir) tempdir_created = tempdir mkdtemp_mock.assert_called_once_with(**kwargs) rmtree_mock.assert_called_once_with(tempdir_created) @mock.patch.object(utils, 'LOG') @mock.patch.object(shutil, 'rmtree') @mock.patch.object(tempfile, 'mkdtemp') def test_tempdir_mocked_error_on_rmtree(self, mkdtemp_mock, rmtree_mock, log_mock): self.config(tempdir='abc') mkdtemp_mock.return_value = 'temp-dir' rmtree_mock.side_effect = OSError with utils.tempdir() as tempdir: self.assertEqual('temp-dir', tempdir) tempdir_created = tempdir rmtree_mock.assert_called_once_with(tempdir_created) self.assertTrue(log_mock.error.called) class Urllib2_invalid_scheme(base.TestCase): def test_raise_exception_invalid_scheme_file(self): self.assertRaises( exception.Urllib2InvalidScheme, utils.raise_exception_invalid_scheme, 'file:///etc/passwd') def test_raise_exception_invalid_scheme_starting_colon(self): self.assertRaises( exception.Urllib2InvalidScheme, utils.raise_exception_invalid_scheme, ':///etc/passwd') def test_raise_exception_invalid_scheme_None(self): self.assertRaises( exception.Urllib2InvalidScheme, utils.raise_exception_invalid_scheme, None) def test_raise_exception_invalid_scheme_empty_string(self): self.assertRaises( exception.Urllib2InvalidScheme, utils.raise_exception_invalid_scheme, '') def test_raise_exception_invalid_scheme_http(self): utils.raise_exception_invalid_scheme(url='http://www.openstack.org') def test_raise_exception_invalid_scheme_https(self): utils.raise_exception_invalid_scheme(url='https://www.openstack.org') class GeneratePasswordTestCase(base.TestCase): def test_generate_password(self): password = utils.generate_password(length=12) self.assertTrue([c for c in password if c in '0123456789']) self.assertTrue([c for c in password if c in 'abcdefghijklmnopqrstuvwxyz']) self.assertTrue([c for c in password if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
# -*- coding: utf-8 -*- # Copyright 2010-2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Generator script for connection data.""" __author__ = "hidehiko" import cStringIO as StringIO import itertools import logging import optparse import os import struct import sys from build_tools import code_generator_util INVALID_COST = 30000 INVALID_1BYTE_COST = 255 RESOLUTION_FOR_1BYTE = 64 FILE_MAGIC = '\xAB\xCD' FALSE_VALUES = ['f', 'false', '0'] TRUE_VALUES = ['t', 'true', '1'] def ParseBoolFlag(value): if value is None: return False value = value.lower() if value in TRUE_VALUES: return True if value in FALSE_VALUES: return False # Unknown value. logging.critical('Unknown boolean flag: %s', value) sys.exit(1) def GetPosSize(filepath): # The pos-size should be equal to the number of lines. # TODO(hidehiko): Merge this method with pos_util in dictionary. with open(filepath, 'r') as stream: stream = code_generator_util.SkipLineComment(stream) # Count the number of lines. return sum(1 for _ in stream) def ParseConnectionFile(text_connection_file, pos_size, special_pos_size): # The result is a square matrix. mat_size = pos_size + special_pos_size matrix = [[0] * mat_size for _ in xrange(mat_size)] with open(text_connection_file) as stream: stream = code_generator_util.SkipLineComment(stream) # The first line contains the matrix column/row size. size = stream.next().rstrip() assert (int(size) == pos_size), '%s != %d' % (size, pos_size) for array_index, cost in enumerate(stream): cost = int(cost.rstrip()) rid = array_index / pos_size lid = array_index % pos_size if rid == 0 and lid == 0: cost = 0 matrix[rid][lid] = cost # Fill INVALID_COST in matrix elements for special POS. for rid in xrange(pos_size, mat_size): for lid in xrange(1, mat_size): # Skip EOS matrix[rid][lid] = INVALID_COST for lid in xrange(pos_size, mat_size): for rid in xrange(1, mat_size): # Skip BOS matrix[rid][lid] = INVALID_COST return matrix def CreateModeValueList(matrix): """Create a list of modes of each row.""" result = [] for row in matrix: m = {} for cost in row: if cost == INVALID_COST: # Heuristically, we do not compress INVALID_COST. continue m[cost] = m.get(cost, 0) + 1 mode_value = max(m.iteritems(), key=lambda (_, count): count)[0] result.append(mode_value) return result def CompressMatrixByModeValue(matrix, mode_value_list): # To compress the data size, we hold mode values for each row in a separate # list, and fill None into the matrix if it equals to the corresponding # mode value. assert len(matrix) == len(mode_value_list) for row, mode_value in itertools.izip(matrix, mode_value_list): for index in xrange(len(row)): if row[index] == mode_value: row[index] = None def OutputBitList(bit_list, stream): # Make sure that the bit list is aligned to the byte boundary. assert len(bit_list) % 8 == 0 for bits in code_generator_util.SplitChunk(bit_list, 8): byte = 0 for bit_index, bit in enumerate(bits): if bit: # Fill in LSB to MSB order. byte |= (1 << bit_index) stream.write(struct.pack('B', byte)) def BuildBinaryData(matrix, mode_value_list, use_1byte_cost): # To compress the connection data, we use two-level succinct bit vector. # # The basic idea to compress the rid-lid matrix is compressing each row as # follows: # find the mode value of the row, and set the cells containins the value # empty, thus we get a sparse array. # We can compress sparse array by using succinct bit vector. # (Please see also storage/louds/simple_succinct_bit_vector_index and # storage/louds/bit_vector_based_array.) # In addition, we compress the bit vector, too. Fortunately the distribution # of bits is biased, so we group consecutive 8-bits and create another # bit vector, named chunk-bits; # - if no bits are 1, the corresponding bit is 0, otherwise 1. # By using the bit vector, we can compact the original bit vector by skipping # consecutive eight 0-bits. We can calculate the actual bit position in # the compact bit vector by using Rank1 operation on chunk-bits. # # The file format is as follows: # FILE_MAGIC (\xAB\xCD): 2bytes # Resolution: 2bytes # Num rids: 2bytes # Num lids: 2bytes # A list of mode values: 2bytes * rids (aligned to 32bits) # A list of row data. # # The row data format is as follows: # The size of compact bits in bytes: 2bytes # The size of values in bytes: 2bytes # chunk_bits, compact_bits, followed by values. if use_1byte_cost: resolution = RESOLUTION_FOR_1BYTE else: resolution = 1 stream = StringIO.StringIO() # Output header. stream.write(FILE_MAGIC) matrix_size = len(matrix) assert 0 <= matrix_size <= 65535 stream.write(struct.pack('<HHH', resolution, matrix_size, matrix_size)) # Output mode value list. for value in mode_value_list: assert 0 <= value <= 65536 stream.write(struct.pack('<H', value)) # 4 bytes alignment. if len(mode_value_list) % 2: stream.write('\x00\x00') # Process each row: for row in matrix: chunk_bits = [] compact_bits = [] values = [] for chunk in code_generator_util.SplitChunk(row, 8): if all(cost is None for cost in chunk): # All bits are 0, so output 0-chunk bit. chunk_bits.append(False) continue chunk_bits.append(True) for cost in chunk: if cost is None: compact_bits.append(False) else: compact_bits.append(True) if use_1byte_cost: if cost == INVALID_COST: cost = INVALID_1BYTE_COST else: cost /= resolution assert cost != INVALID_1BYTE_COST values.append(cost) # 4 bytes alignment. while len(chunk_bits) % 32: chunk_bits.append(False) while len(compact_bits) % 32: compact_bits.append(False) if use_1byte_cost: while len(values) % 4: values.append(0) values_size = len(values) else: while len(values) % 2: values.append(0) values_size = len(values) * 2 # Output the bits for a row. stream.write(struct.pack('<HH', len(compact_bits) / 8, values_size)) OutputBitList(chunk_bits, stream) OutputBitList(compact_bits, stream) if use_1byte_cost: for value in values: assert 0 <= value <= 255 stream.write(struct.pack('<B', value)) else: for value in values: assert 0 <= value <= 65535 stream.write(struct.pack('<H', value)) return stream.getvalue() def ParseOptions(): parser = optparse.OptionParser() parser.add_option('--text_connection_file', dest='text_connection_file') parser.add_option('--id_file', dest='id_file') parser.add_option('--special_pos_file', dest='special_pos_file') parser.add_option('--target_compiler', dest='target_compiler') parser.add_option('--use_1byte_cost', dest='use_1byte_cost') parser.add_option('--binary_output_file', dest='binary_output_file') parser.add_option('--header_output_file', dest='header_output_file') return parser.parse_args()[0] def main(): options = ParseOptions() pos_size = GetPosSize(options.id_file) special_pos_size = GetPosSize(options.special_pos_file) matrix = ParseConnectionFile( options.text_connection_file, pos_size, special_pos_size) mode_value_list = CreateModeValueList(matrix) CompressMatrixByModeValue(matrix, mode_value_list) binary = BuildBinaryData( matrix, mode_value_list, ParseBoolFlag(options.use_1byte_cost)) if options.binary_output_file: dirpath = os.path.dirname(options.binary_output_file) if not os.path.exists(dirpath): os.makedirs(dirpath) with open(options.binary_output_file, 'wb') as stream: stream.write(binary) if options.header_output_file: dirpath = os.path.dirname(options.header_output_file) if not os.path.exists(dirpath): os.makedirs(dirpath) with open(options.header_output_file, 'wb') as stream: code_generator_util.WriteCppDataArray( binary, 'ConnectionData', options.target_compiler, stream) if __name__ == '__main__': main()
# Copyright 2009, Peter A. Bigot # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain a # copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pyxb.bundles.wssplat.raw.wsdl11 import * import pyxb.bundles.wssplat.raw.wsdl11 as raw_wsdl11 import pyxb.namespace import pyxb.utils.domutils as domutils import xml.dom def ImportRelatedNamespaces (): """Import modules for related namespaces so they are available to create binding instances from the WSDL sources.""" try: import pyxb.bundles.wssplat.soapbind11 except ImportError: pass try: import pyxb.bundles.wssplat.soapbind12 except ImportError: pass try: import pyxb.bundles.wssplat.soap11 except ImportError: pass try: import pyxb.bundles.wssplat.soap12 except ImportError: pass try: import pyxb.bundles.wssplat.soapenv except ImportError: pass try: import pyxb.bundles.wssplat.httpbind except ImportError: pass try: import pyxb.bundles.wssplat.mimebind except ImportError: pass class _WSDL_binding_mixin (object): """Mix-in class to mark element Python bindings that are expected to be wildcard matches in WSDL binding elements.""" pass class _WSDL_port_mixin (object): """Mix-in class to mark element Python bindings that are expected to be wildcard matches in WSDL port elements.""" pass class _WSDL_operation_mixin (object): """Mix-in class to mark element Python bindings that are expected to be wildcard matches in WSDL (binding) operation elements.""" pass class tPort (raw_wsdl11.tPort): def __getBindingReference (self): return self.__bindingReference def _setBindingReference (self, binding_reference): self.__bindingReference = binding_reference __bindingReference = None bindingReference = property(__getBindingReference) def __getAddressReference (self): return self.__addressReference def _setAddressReference (self, address_reference): self.__addressReference = address_reference __addressReference = None addressReference = property(__getAddressReference) raw_wsdl11.tPort._SetSupersedingClass(tPort) class tBinding (raw_wsdl11.tBinding): def __getPortTypeReference (self): return self.__portTypeReference def setPortTypeReference (self, port_type_reference): self.__portTypeReference = port_type_reference __portTypeReference = None portTypeReference = property(__getPortTypeReference) def __getProtocolBinding (self): """Return the protocol-specific binding information.""" return self.__protocolBinding def _setProtocolBinding (self, protocol_binding): self.__protocolBinding = protocol_binding __protocolBinding = None protocolBinding = property(__getProtocolBinding) def operationMap (self): return self.__operationMap __operationMap = None def __init__ (self, *args, **kw): super(tBinding, self).__init__(*args, **kw) self.__operationMap = { } raw_wsdl11.tBinding._SetSupersedingClass(tBinding) class tPortType (raw_wsdl11.tPortType): def operationMap (self): return self.__operationMap __operationMap = None def __init__ (self, *args, **kw): super(tPortType, self).__init__(*args, **kw) self.__operationMap = { } raw_wsdl11.tPortType._SetSupersedingClass(tPortType) class tParam (raw_wsdl11.tParam): def __getMessageReference (self): return self.__messageReference def _setMessageReference (self, message_reference): self.__messageReference = message_reference __messageReference = None messageReference = property(__getMessageReference) raw_wsdl11.tParam._SetSupersedingClass(tParam) class tFault (raw_wsdl11.tFault): def __getMessageReference (self): return self.__messageReference def _setMessageReference (self, message_reference): self.__messageReference = message_reference __messageReference = None messageReference = property(__getMessageReference) raw_wsdl11.tFault._SetSupersedingClass(tFault) class tPart (raw_wsdl11.tPart): def __getElementReference (self): return self.__elementReference def _setElementReference (self, element_reference): self.__elementReference = element_reference __elementReference = None elementReference = property(__getElementReference) def __getTypeReference (self): return self.__typeReference def _setTypeReference (self, type_reference): self.__typeReference = type_reference __typeReference = None typeReference = property(__getTypeReference) raw_wsdl11.tPart._SetSupersedingClass(tPart) class tBindingOperation (raw_wsdl11.tBindingOperation): def __getOperationReference (self): return self.__operationReference def _setOperationReference (self, operation_reference): self.__operationReference = operation_reference __operationReference = None operationReference = property(__getOperationReference) raw_wsdl11.tBindingOperation._SetSupersedingClass(tBindingOperation) class tDefinitions (raw_wsdl11.tDefinitions): def messageMap (self): return self.targetNamespace().messages() def namespaceContext (self): return self.__namespaceContext __namespaceContext = None def bindingMap (self): return self.__bindingMap __bindingMap = None def targetNamespace (self): return self.namespaceContext().targetNamespace() def namespace (self): return self.__namespace __namespace = None def _addToMap (self, map, qname, value): map[qname] = value (ns, ln) = qname if (ns == self.targetNamespace()): map[(None, ln)] = value elif (ns is None): map[(self.targetNamespace(), ln)] = value return map def schema (self): return self.__schema __schema = None @classmethod def _PreFactory_vx (self, args, kw): # Import standard bindings. If we do this, then wildcard # binding, port, and operation elements will be recognized and # converted into bindings. import pyxb.bundles.wssplat.soapbind11 import pyxb.bundles.wssplat.soapbind12 import pyxb.bundles.wssplat.httpbind # Ensure we have definitions for any externally-referenced # things we might need. @todo: This might have to # chronologically precede the import above. pyxb.namespace.archive.NamespaceArchive.PreLoadArchives() raw_wsdl11.Namespace.validateComponentModel() state = ( kw.pop('process_schema', False), kw.pop('generation_uid', None), kw.get('_dom_node', None) ) return state def _postFactory_vx (self, state): (process_schema, generation_uid, dom_node) = state assert isinstance(dom_node, xml.dom.Node) node_en = pyxb.namespace.ExpandedName(dom_node) self.__namespaceContext = pyxb.namespace.resolution.NamespaceContext.GetNodeContext(dom_node) self.__buildMaps() if process_schema: self.__processSchema(generation_uid) self.__finalizeReferences() return self __WSDLCategories = ( 'service', 'port', 'message', 'binding', 'portType' ) def __buildMaps (self): tns = self.namespaceContext().targetNamespace() tns.configureCategories(self.__WSDLCategories) for m in self.message: tns.messages()[m.name] = m for pt in self.portType: tns.portTypes()[pt.name] = pt for op in pt.operation: pt.operationMap()[op.name] = op params = op.fault[:] if op.input is not None: params.append(op.input) if op.output is not None: params.append(op.output) for p in params: msg_en = m._namespaceContext().interpretQName(p.message) p._setMessageReference(msg_en.message()) for b in self.binding: tns.bindings()[b.name] = b port_type_en = b._namespaceContext().interpretQName(b.type) b.setPortTypeReference(port_type_en.portType()) for wc in b.wildcardElements(): if isinstance(wc, _WSDL_binding_mixin): b._setProtocolBinding(wc) break for op in b.operation: b.operationMap()[op.name] = op for wc in op.wildcardElements(): if isinstance(wc, _WSDL_operation_mixin): op._setOperationReference(wc) break for s in self.service: tns.services()[s.name] = s for p in s.port: binding_en = p._namespaceContext().interpretQName(p.binding) p._setBindingReference(binding_en.binding()) for wc in p.wildcardElements(): if isinstance(wc, _WSDL_port_mixin): p._setAddressReference(wc) break def __processSchema (self, generation_uid): global pyxb import pyxb.xmlschema print 'PS %s' % (generation_uid,) if self.__schema is not None: print 'Already have schema' return self.__schema for t in self.types: for wc in t.wildcardElements(): if isinstance(wc, xml.dom.Node) and pyxb.namespace.XMLSchema.nodeIsNamed(wc, 'schema'): # Try to load component models for any namespace referenced by this. # Probably shouldn't need to do this except for imported ones. for ns in self.namespaceContext().inScopeNamespaces().values(): try: ns.validateComponentModel() except Exception, e: print 'Error validating component model for %s: %s' % (ns.uri(), e) self.__schema = pyxb.xmlschema.schema.CreateFromDOM(wc, namespace_context=self.namespaceContext(), generation_uid=generation_uid) elif isinstance(wc, pyxb.xmlschema.schema): self.__schema = wc else: print 'No match: %s %s' % (wc.namespaceURI, namespace.localName) if self.__schema is not None: return self.__schema return None def __finalizeReferences (self): tns = self.namespaceContext().targetNamespace() for m in tns.messages().values(): for p in m.part: if (p.element is not None) and (p.elementReference is None): elt_en = p._namespaceContext().interpretQName(p.element) p._setElementReference(elt_en.elementDeclaration()) if (p.type is not None) and (p.typeReference is None): type_en = p._namespaceContext().interpretQName(p.type) p._setTypeReference(type_en.typeDefinition()) raw_wsdl11.tDefinitions._SetSupersedingClass(tDefinitions) pyxb.namespace.resolution.NamespaceContext._AddTargetNamespaceAttribute(raw_wsdl11.Namespace.createExpandedName('definitions'), pyxb.namespace.ExpandedName('targetNamespace'))
import demistomock as demisto from CommonServerPython import * from CommonServerUserPython import * from typing import Optional ''' IMPORTS ''' import re import os import json import requests from base64 import b64encode ''' GLOBAL VARS / INSTANCE CONFIGURATION ''' PARAMS = demisto.params() USERNAME = PARAMS.get('credentials', {}).get('identifier', '') PASSWORD = PARAMS.get('credentials', {}).get('password', '') AUTH = (USERNAME + ':' + PASSWORD).encode('utf-8') BASIC_AUTH = 'Basic ' + b64encode(AUTH).decode() # Remove trailing slash to prevent wrong URL path to service SERVER = PARAMS.get('url', '') SERVER = SERVER[:-1] if (SERVER and SERVER.endswith('/')) else SERVER # Service base URL BASE_URL = SERVER + '/v1/' # Should we use SSL USE_SSL = not PARAMS.get('insecure', False) PROXY = PARAMS.get('proxy', False) # Headers to be sent in requests HEADERS = { 'Authorization': BASIC_AUTH } # Context fields that should always be uppercase ALWAYS_UPPER_CASE = { 'md5', 'sha1', 'sha256', 'sha512', 'pcap', 'ip', 'url', 'id', 'pid', 'ppid', 'uuid', 'asn', 'mime' } THREAT_TEXT_TO_DBOTSCORE = { 'no threats detected': 1, 'suspicious activity': 2, 'malicious activity': 3 } ''' SETUP ''' # Disable insecure warnings requests.packages.urllib3.disable_warnings() # Remove proxy if not set to true in params if not PROXY: os.environ.pop('HTTP_PROXY', '') os.environ.pop('HTTPS_PROXY', '') os.environ.pop('http_proxy', '') os.environ.pop('https_proxy', '') ''' HELPER FUNCTIONS ''' def underscore_to_camel_case(s): """ Convert an underscore separated string to camel case, leaving one-word strings untouched Parameters ---------- s : str The string to convert (e.g. heLLo_world) (required). Returns ------- str The converted string (e.g. heLLoWorld). """ if not isinstance(s, str): return s components = s.split('_') return ''.join(x.title() if i != 0 else x for i, x in enumerate(components)) def make_upper(string): """ Make argument uppercase if it is a member of 'ALWAYS_UPPER_CASE' global variable Parameters ---------- string : str The string to check and potentially make uppercase. Returns ------- str Uppercased string (or original string if it didn't match the criteria). """ if isinstance(string, str): if string.casefold() in ALWAYS_UPPER_CASE: return string.upper() elif string.casefold() == 'ssdeep': # special case return 'SSDeep' else: return string else: return string def make_capital(string): """Capitalize first letter of a string, leaving the rest of the string as is Parameters ---------- string : str The string to capitalize (e.g. 'foRUm'). Returns ------- str The capitalized string (e.g. 'FoRUm'). """ if isinstance(string, str) and string: return string[:1].upper() + string[1:] else: return string def make_singular(word): """Relatively naive/imperfect function to make a word singular Parameters ---------- word : str The string to make singular (e.g. 'zebras'). Returns ------- str The string in singular form (e.e. 'zebra'). """ if not isinstance(word, str) or not word: return word word_as_lower = word.casefold() # Not a plural if not word_as_lower.endswith('s'): return word # Word ends in 's' and is therefore possibly plural else: es_endings = ('sses', 'shes', 'ches', 'xes', 'zes') if word_as_lower.endswith(es_endings): # Then the word was pluralized by adding 'es' return word[:-2] elif word_as_lower.endswith('ss'): # Then it's probably not a plural, e.g. 'assess' or 'process' return word elif len(word) <= 2: # Then it's probably not a plural, e.g. 'OS' return word elif word_as_lower.endswith('sis') or word_as_lower.endswith('us'): # Then it's probably singular like 'analysis' and 'cactus' and 'focus' return word else: # Assume regular noun pluralization of adding an 's' return word[:-1] def travel_object(obj, key_functions=[], val_functions=[]): """Recursively apply functions to the keys and values of a dictionary Parameters ---------- obj : dict/list List or dict to recurse through. key_functions : list Functions to apply to the keys in 'obj'. val_functions : list Functions to apply to the values in 'obj' Returns ------- list/dict A list or dict in which all nested keys and values have been altered by the key_functions and val_functions respectively. """ def operate_on_dict(the_dict): new_dict = {} for key, val in the_dict.items(): new_key = key for key_func in key_functions: new_key = key_func(new_key) if isinstance(val, dict) or isinstance(val, list): new_val = travel_object(val, key_functions=key_functions, val_functions=val_functions) else: new_val = val for val_func in val_functions: new_val = val_func(val) new_dict[new_key] = new_val return new_dict if isinstance(obj, list): new_list = [] for item in obj: new_item = operate_on_dict(item) if isinstance(item, dict) else item new_list.append(new_item) return new_list elif isinstance(obj, dict): altered_dict = operate_on_dict(obj) return altered_dict else: err_msg = 'Invalid type: the passed "obj" argument was not of type "dict" or "list".' raise TypeError(err_msg) def generate_dbotscore(response): """Creates DBotScore object based on the contents of 'response' argument Parameters ---------- response : dict Object returned by ANYRUN API call in 'get_report' function. Returns ------- dict A DBotScore object. """ analysis = response.get('data', {}).get('analysis', {}) main_object = analysis.get('content', {}).get('mainObject', {}) submission_type = main_object.get('type') submission_type = 'hash' if submission_type in {'file', 'download'} else submission_type threat_text = analysis.get('scores', {}).get('verdict', {}).get('threatLevelText', '').casefold() if submission_type == 'hash': hashes = main_object.get('hashes', {}) indicator = hashes.get('sha256', hashes.get('sha1', hashes.get('md5'))) else: indicator = main_object.get('url') dbot_score = { "DBotScore": { "Indicator": indicator, "Type": submission_type, "Vendor": "ANYRUN", "Score": THREAT_TEXT_TO_DBOTSCORE.get(threat_text, 0) } } return dbot_score def add_malicious_key(entity, verdict): """Return the entity with the additional 'Malicious' key if determined as such by ANYRUN Parameters ---------- entity : dict File or URL object. verdict : dict Task analysis verdict for a detonated file or url. Returns ------- dict The modified entity if it was malicious, otherwise the original entity. """ threat_level_text = verdict.get('threatLevelText', '') if threat_level_text.casefold() == 'malicious activity': entity['Malicious'] = { 'Vendor': 'ANYRUN', 'Description': threat_level_text } return entity def ec_file(main_object): """Return File entity in Demisto format for use in entry context Parameters ---------- main_object : dict The main object from a report's contents. Returns ------- dict File object populated by report contents. """ name = main_object.get('filename') hashes = main_object.get('hashes', {}) md5 = hashes.get('md5') sha1 = hashes.get('sha1') sha256 = hashes.get('sha256') ssdeep = hashes.get('ssdeep') ext = main_object.get('info', {}).get('ext') file_ec = { 'File': { 'Name': name, 'MD5': md5, 'SHA1': sha1, 'SHA256': sha256, 'SSDeep': ssdeep, 'Extension': ext } } return file_ec def ec_url(main_object): """Return URL entity in Demisto format for use in entry context Parameters ---------- main_object : dict The main object from a report's contents. Returns ------- dict URL object populated by report contents. """ url = main_object.get('url') url_ec = { 'URL': { 'Data': url } } return url_ec def ec_entity(response): """ Return URL or File entity in Demisto format for use in entry context depending on data in 'response' (the report) Parameters ---------- response : dict Object returned by ANYRUN API call in 'get_report' function. Returns ------- dict File or URL object populated by report contents. """ analysis = response.get('data', {}).get('analysis', {}) verdict = analysis.get('scores', {}).get('verdict', {}) main_object = analysis.get('content', {}).get('mainObject', {}) submission_type = main_object.get('type') entity = None if submission_type == 'url': entity = ec_url(main_object) entity['URL'] = add_malicious_key(entity.get('URL', {}), verdict) else: entity = ec_file(main_object) entity['File'] = add_malicious_key(entity.get('File', {}), verdict) return entity def taskid_from_url(anyrun_url): """Extract task ID from ANYRUN url inside a 'task' result returned by the get_history command Parameters ---------- anyrun_url : str URL that contains an ANYRUN task ID. Returns ------- str An ANYRUN task ID. """ pattern = r'tasks/(.*?)/' match = re.search(pattern, anyrun_url) if match: task_id = match.groups()[0] else: task_id = None return task_id def images_from_report(response): """Retrieve images from an ANYRUN report Parameters ---------- response : dict Object returned by ANYRUN API call in 'get_report' function. Returns ------- list List of images from ANYRUN report. """ data = response.get('data', {}) analysis = data.get('analysis', {}) content = analysis.get('content', {}) screenshots = content.get('screenshots', []) screen_captures = [] for idx, shot in enumerate(screenshots): screen_cap_url = shot.get('permanentUrl') img_response = requests.request('GET', screen_cap_url, verify=USE_SSL) stored_img = fileResult('screenshot{}.png'.format(idx), img_response.content) img_entry = { 'Type': entryTypes['image'], 'ContentsFormat': formats['text'], 'File': stored_img['File'], 'FileID': stored_img['FileID'], 'Contents': '' } screen_captures.append(img_entry) return screen_captures def contents_from_report(response): """Selectively retrieve content from an ANYRUN report Parameters ---------- response : dict Object returned by ANYRUN API call in 'get_report' function. Returns ------- dict Selected content from ANYRUN report. """ data = response.get('data', {}) environment = data.get('environments', {}) analysis = data.get('analysis', {}) processes = data.get('processes', []) incidents = data.get('incidents', []) status = data.get('status') # Retrieve environment info from response os = environment.get('os', {}).get('title') # Retrieve threat score + info from response score = analysis.get('scores', {}) verdict = score.get('verdict', {}) threat_level_text = verdict.get('threatLevelText') # Retrieve analysis time stuff start_text = analysis.get('creationText') # Retrieve submitted file info from response content = analysis.get('content', {}) main_object = content.get('mainObject', {}) info = main_object.get('info', {}) mime = info.get('mime') file_info = info.get('file') hashes = main_object.get('hashes') # Retrieve network details network = data.get('network', {}) threats = network.get('threats', []) connections = network.get('connections', []) http_reqs = network.get('httpRequests', []) dns_requests = network.get('dnsRequests', []) reformatted_threats = [] for threat in threats: reformatted_threat = { 'ProcessUUID': threat.get('process'), 'Message': threat.get('msg'), 'Class': threat.get('class'), 'SrcPort': threat.get('srcport'), 'DstPort': threat.get('dstport'), 'SrcIP': threat.get('srcip'), 'DstIP': threat.get('dstip') } reformatted_threats.append(reformatted_threat) network['threats'] = reformatted_threats reformatted_connections = [] for connection in connections: reformatted_connection = { 'Reputation': connection.get('reputation'), 'ProcessUUID': connection.get('process'), 'ASN': connection.get('asn'), 'Country': connection.get('country'), 'Protocol': connection.get('protocol'), 'Port': connection.get('port'), 'IP': connection.get('ip') } reformatted_connections.append(reformatted_connection) network['connections'] = reformatted_connections reformatted_http_reqs = [] for http_req in http_reqs: reformatted_http_req = { 'Reputation': http_req.get('reputation'), 'Country': http_req.get('country'), 'ProcessUUID': http_req.get('process'), 'Body': http_req.get('body'), 'HttpCode': http_req.get('httpCode'), 'Status': http_req.get('status'), 'ProxyDetected': http_req.get('proxyDetected'), 'Port': http_req.get('port'), 'IP': http_req.get('ip'), 'URL': http_req.get('url'), 'Host': http_req.get('host'), 'Method': http_req.get('method') } reformatted_http_reqs.append(reformatted_http_req) network['httpRequests'] = reformatted_http_reqs reformatted_dns_requests = [] for dns_request in dns_requests: reformatted_dns_request = { 'Reputation': dns_request.get('reputation'), 'IP': dns_request.get('ips'), 'Domain': dns_request.get('domain') } reformatted_dns_requests.append(reformatted_dns_request) network['dnsRequests'] = reformatted_dns_requests # Retrieve process details reformatted_processes = [] for process in processes: context = process.get('context', {}) reformatted_process = { 'FileName': process.get('fileName'), 'PID': process.get('pid'), 'PPID': process.get('ppid'), 'ProcessUUID': process.get('uuid'), 'CMD': process.get('commandLine'), 'Path': process.get('image'), 'User': context.get('userName'), 'IntegrityLevel': context.get('integrityLevel'), 'ExitCode': process.get('exitCode'), 'MainProcess': process.get('mainProcess'), 'Version': process.get('versionInfo', {}) } reformatted_processes.append(reformatted_process) # Retrieve incident details reformatted_incidents = [] for incident in incidents: reformatted_incident = { 'ProcessUUID': incident.get('process'), 'Category': incident.get('desc'), 'Action': incident.get('title'), 'ThreatLevel': incident.get('threatLevel') } reformatted_incidents.append(reformatted_incident) contents = { 'OS': os, 'AnalysisDate': start_text, 'Verdict': threat_level_text, 'MIME': mime, 'FileInfo': file_info, 'Process': reformatted_processes, 'Behavior': reformatted_incidents, 'Status': status } if hashes: for key, val in hashes.items(): contents[key] = val if network: for key, val in network.items(): contents[key] = val return contents def humanreadable_from_report_contents(contents): """Make the selected contents pulled from a report suitable for war room output Parameters ---------- contents : dict Contents selected from an ANYRUN report for Demisto output. Returns ------- dict Contents formatted so that nested dicts/lists appear nicely in a war room entry. """ def dict_to_string(nested_dict): return json.dumps(nested_dict).lstrip('{').rstrip('}').replace('\'', '').replace('\"', '') humanreadable_contents = {} for key, val in contents.items(): if isinstance(val, dict): humanreadable_contents[key] = dict_to_string(val) elif isinstance(val, list): humanreadable_vals = [] for item in val: if isinstance(item, dict): humanreadable_vals.append(dict_to_string(item)) else: humanreadable_vals.append(item) humanreadable_contents[key] = humanreadable_vals else: humanreadable_contents[key] = val return humanreadable_contents def contents_from_history(filter, response): """Return desired fields from filtered response Parameters ---------- filter : str File name (for a file analysis), URL (for a URL analysis), Task ID, or hash by which to filter task history. response : dict Object returned by ANYRUN API call in 'get_history' function. Returns ------- list List of Task summaries matching the filter. """ # Filter response tasks = response.get('data', {}).get('tasks', {}) desired_fields = {'related', 'verdict', 'date'} filtered_tasks = [] for task in tasks: # First fetch fields that we can filter on name = task.get('name') hashes = task.get('hashes') file_url = task.get('file') task_id = taskid_from_url(file_url) if filter and filter not in {name, task_id, *hashes.values()}: continue # Reconstruct task dict with desired output fields if filter satisfied filtered_task = {'name': name, 'id': task_id, 'file': file_url, 'hashes': hashes} for field in task: if field in desired_fields: filtered_task[field] = task.get(field) filtered_tasks.append(filtered_task) return filtered_tasks def http_request(method, url_suffix, params=None, data=None, files=None): """ A wrapper for requests lib to send our requests and handle requests and responses better Parameters ---------- method : str HTTP method, e.g. 'GET', 'POST' ... etc. url_suffix : str API endpoint. params : dict URL parameters. data : dict Data to be sent in a 'POST' request. files : dict File data to be sent in a 'POST' request. Returns ------- dict Response JSON from having made the request. """ try: res = requests.request( method, BASE_URL + url_suffix, verify=USE_SSL, params=params, data=data, files=files, headers=HEADERS, ) # Handle error responses gracefully if res.status_code not in {200, 201}: err_msg = 'Error in ANYRUN Integration API call [{}] - {}'.format(res.status_code, res.reason) try: if res.json().get('error'): err_msg += '\n{}'.format(res.json().get('message')) raise DemistoException(err_msg, res=res) except json.decoder.JSONDecodeError: raise DemistoException(err_msg, res=res) return res.json() except requests.exceptions.ConnectionError: err_msg = 'Connection Error - Check that the Server URL parameter is correct.' raise DemistoException(err_msg) ''' COMMANDS + REQUESTS FUNCTIONS ''' def test_module(): """Performs get_history API call to verify integration is operational Returns ------- str 'ok' message. """ get_history(args={'limit': 1}) demisto.results('ok') def get_history(args={}): """Make API call to ANYRUN to get analysis history Parameters ---------- args : dict URL parameters that determine which, and how many results are returned in the response. Returns ------- dict Response JSON from ANYRUN API call. """ url_suffix = 'analysis/' params = args response = http_request('GET', url_suffix=url_suffix, params=params) return response def get_history_command(): """Return ANYRUN task analysis history to Demisto""" args = demisto.args() filter = args.pop('filter', None) response = get_history(args) contents = contents_from_history(filter, response) formatting_funcs = [underscore_to_camel_case, make_capital, make_singular, make_upper] formatted_contents = travel_object(contents, key_functions=formatting_funcs) if contents: entry_context: Optional[dict] = { 'ANYRUN.Task(val.ID && val.ID === obj.ID)': formatted_contents } title = 'Task History - Filtered By "{}"'.format(filter) if filter else 'Task History' # Make Related Clickable for task in formatted_contents: related = task.get('Related', '') task['Related'] = '[{}]({})'.format(related, related) human_readable = tableToMarkdown(title, formatted_contents, removeNull=True) else: human_readable = 'No results found.' entry_context = None return_outputs(readable_output=human_readable, outputs=entry_context, raw_response=response) def get_report(task_id): """Make API call to ANYRUN to get task report Parameters ---------- task_id : str The unique task ID of the analysis whose report to fetch. Returns ------- dict Response JSON from ANYRUN API call. """ try: # according to the any-run documentation, this request should work: # https://any.run/api-documentation/#api-Analysis-GetReport url_suffix = f'analysis/{task_id}' response = http_request('GET', url_suffix=url_suffix) except DemistoException as exc: if exc.res and exc.res.status_code != 403: raise # in case of 403, try a work-around suggested by customer url_suffix = 'analysis/' params = { 'task': task_id, } response = http_request('GET', url_suffix=url_suffix, params=params) return response def get_report_command(): """Return ANYRUN analysis report to Demisto""" args = demisto.args() task_id = args.get('task') response = get_report(task_id) images = images_from_report(response) contents = contents_from_report(response) formatting_funcs = [underscore_to_camel_case, make_capital, make_singular, make_upper] formatted_contents = travel_object(contents, key_functions=formatting_funcs) dbot_score = generate_dbotscore(response) entity = ec_entity(response) entry_context = { 'ANYRUN.Task(val.ID && val.ID === obj.ID)': { 'ID': task_id, **formatted_contents }, **dbot_score, **entity } title = 'Report for Task {}'.format(task_id) human_readable_content = humanreadable_from_report_contents(formatted_contents) human_readable = tableToMarkdown(title, human_readable_content, removeNull=True) return_outputs(readable_output=human_readable, outputs=entry_context, raw_response=response) if images: demisto.results(images) def run_analysis(args): """Make API call to ANYRUN to submit file or url for analysis Parameters ---------- args : dict The analysis specifications and data. Returns ------- dict Response JSON from ANYRUN API call. """ try: entry_id = args.pop('file', None) obj_url = args.get('obj_url') obj_type = args.get('obj_type') if obj_type == 'remote file': obj_type = 'download' args['obj_type'] = 'download' # In the case only a url was entered but the object type arg wasn't changed if not entry_id and obj_url and obj_type == 'file': args['obj_type'] = obj_type = 'url' files = None if obj_type == 'file': cmd_res = demisto.getFilePath(entry_id) file_path = cmd_res.get('path') name = cmd_res.get('name') files = { 'file': (name, open(file_path, 'rb')) } # Format command arguments to API's parameter expectations env_bitness = int(args.get('env_bitness', 32)) args['env_bitness'] = env_bitness env_version = args.get('env_version').lower() if env_version == 'windows vista': args['env_version'] = 'vista' elif env_version == 'windows 8.1': args['env_version'] = '8.1' elif env_version == 'windows 10': args['env_version'] = '10' else: args['env_version'] = '7' url_suffix = 'analysis' response = http_request('POST', url_suffix, data=args, files=files) return response except ValueError: err_msg = 'Invalid entryID - File not found for the given entryID' return_error(err_msg) def run_analysis_command(): """Submit file or URL to ANYRUN for analysis and return task ID to Demisto""" args = demisto.args() response = run_analysis(args) task_id = response.get('data', {}).get('taskid') title = 'Submission Successful' human_readable = tableToMarkdown(title, {'Task': task_id}, removeNull=True) entry_context = {'ANYRUN.Task(val.ID && val.ID === obj.ID)': {'ID': task_id}} return_outputs(readable_output=human_readable, outputs=entry_context, raw_response=response) ''' COMMANDS MANAGER / SWITCH PANEL ''' COMMANDS = { 'test-module': test_module, 'anyrun-get-history': get_history_command, 'anyrun-get-report': get_report_command, 'anyrun-run-analysis': run_analysis_command, } ''' EXECUTION ''' def main(): """Main Execution block""" try: cmd_name = demisto.command() LOG('Command being called is {}'.format(cmd_name)) handle_proxy() if cmd_name in COMMANDS.keys(): COMMANDS[cmd_name]() except Exception as e: return_error(str(e), error=traceback.format_exc()) # python2 uses __builtin__ python3 uses builtins if __name__ in ('__builtin__', 'builtins'): main()
# Convert GPO Fdsys STATUTE metadata into bill files. # # GPO has the Statutes at Large from 1951 (the 65th # volume, 82nd Congress) to the present, with metadata # at the level of the law. # # The bill files have sort of made up action entries # since we don't know the legislative history of the bill. # We also assume all bills are enacted by being signed # by the President for the sake of outputting status # information. # # First download the Statutes at Large from GPO: # # ./run fdsys --collections=STATUTE --store=mods # # To process statute text, get the text PDFs: # # ./run fdsys --collections=STATUTE --store=pdfs --granules # # Then run this script: # # ./run statutes # # Processes all downloaded statutes files and saves bill files: # data/82/bills/hr/hr1/data.json and # data/82/bills/hr/hr1/text-versions/enr/data.json # # Specify --textversions to only write the text-versions file. # # If the individual statute PDF files are available, then # additional options are possible: # # If --linkpdf is given, then *hard links* are created from # where the PDF should be for bill text to where the PDF has # been downloaded in the fdsys directory. # # If --extracttext is given, then the pdf is converted to text # using "pdftotext -layout" and they are stored in files like # data/82/bills/hr/hr1/text-versions/enr/document.txt. They are # UTF-8 encoded and have form-feed characters marking page breaks. # # Examples: # ./run statutes --volume=65 # ./run statutes --volumes=65-86 # ./run statutes --year=1951 # ./run statutes --years=1951-1972 # Processes just the indicated volume or range of volumes. # Starting with the 93rd Congress (1973-1974, corresponding # to volume 78 of the Statutes of Large), we have bill # data from THOMAS. Be careful not to overwrite those files. # # With bill text missing from THOMAS/GPO from the 93rd to # 102nd Congresses, fill in the text-versions files like so: # ./run statutes --volumes=87-106 --textversions import logging import time import datetime from lxml import etree import glob import json import os.path import subprocess import utils import bill_info import bill_versions import fdsys def run(options): root_dir = utils.data_dir() + '/fdsys/STATUTE' if "volume" in options: to_fetch = glob.glob(root_dir + "/*/STATUTE-" + str(int(options["volume"]))) elif "volumes" in options: start, end = options["volumes"].split("-") to_fetch = [] for v in xrange(int(start), int(end) + 1): to_fetch.extend(glob.glob(root_dir + "/*/STATUTE-" + str(v))) elif "year" in options: to_fetch = glob.glob(root_dir + "/" + str(int(options["year"])) + "/STATUTE-*") elif "years" in options: start, end = options["years"].split("-") to_fetch = [] for y in xrange(int(start), int(end) + 1): to_fetch.extend(glob.glob(root_dir + "/" + str(y) + "/STATUTE-*")) else: to_fetch = sorted(glob.glob(root_dir + "/*/STATUTE-*")) logging.warn("Going to process %i volumes" % len(to_fetch)) utils.process_set(to_fetch, proc_statute_volume, options) def proc_statute_volume(path, options): mods = etree.parse(path + "/mods.xml") mods_ns = {"mods": "http://www.loc.gov/mods/v3"} # Load the THOMAS committee names for this Congress, which is our best # bet for normalizing committee names in the GPO data. congress = mods.find("/mods:extension[2]/mods:congress", mods_ns).text utils.fetch_committee_names(congress, options) logging.warn("Processing %s (Congress %s)" % (path, congress)) package_id = mods.find("/mods:extension[2]/mods:accessId", mods_ns).text for bill in mods.findall("/mods:relatedItem", mods_ns): # MODS files also contain information about: # ['BACKMATTER', 'FRONTMATTER', 'CONSTAMEND', 'PROCLAMATION', 'REORGPLAN'] if bill.find("mods:extension/mods:granuleClass", mods_ns).text not in ["PUBLICLAW", "PRIVATELAW", "HCONRES", "SCONRES"]: continue # Get the title and source URL (used in error messages). title_text = bill.find("mods:titleInfo/mods:title", mods_ns).text.replace('""', '"') source_url = bill.find("mods:location/mods:url[@displayLabel='Content Detail']", mods_ns).text # Bill number bill_elements = bill.findall("mods:extension/mods:bill[@priority='primary']", mods_ns) if len(bill_elements) == 0: logging.error("No bill number identified for '%s' (%s)" % (title_text, source_url)) continue elif len(bill_elements) > 1: logging.error("Multiple bill numbers identified for '%s'" % title_text) for be in bill_elements: logging.error(" -- " + etree.tostring(be).strip()) logging.error(" @ " + source_url) continue else: bill_congress = bill_elements[0].attrib["congress"] bill_type = bill_elements[0].attrib["type"].lower() bill_number = bill_elements[0].attrib["number"] bill_id = "%s%s-%s" % (bill_type, bill_number, bill_congress) # Title titles = [] titles.append({ "title": title_text, "as": "enacted", "type": "official", "is_for_portion": False, }) # Subject descriptor = bill.find("mods:extension/mods:descriptor", mods_ns) if descriptor is not None: subject = descriptor.text else: subject = None # Committees committees = [] cong_committee = bill.find("mods:extension/mods:congCommittee", mods_ns) if cong_committee is not None: chambers = {"H": "House", "S": "Senate", "J": "Joint"} committee = chambers[cong_committee.attrib["chamber"]] + " " + cong_committee.find("mods:name", mods_ns).text committee_info = { "committee": committee, "activity": [], # XXX "committee_id": utils.committee_names[committee] if committee in utils.committee_names else None, } committees.append(committee_info) # The 'granuleDate' is the enactment date? granule_date = bill.find("mods:extension/mods:granuleDate", mods_ns).text sources = [{ "source": "statutes", "package_id": package_id, "access_id": bill.find("mods:extension/mods:accessId", mods_ns).text, "source_url": source_url, "volume": bill.find("mods:extension/mods:volume", mods_ns).text, "page": bill.find("mods:part[@type='article']/mods:extent[@unit='pages']/mods:start", mods_ns).text, "position": bill.find("mods:extension/mods:pagePosition", mods_ns).text, }] law_elements = bill.findall("mods:extension/mods:law", mods_ns) # XXX: If <law> is missing, this assumes it is a concurrent resolution. # This may be a problem if the code is updated to accept joint resolutions for constitutional amendments. if (law_elements is None) or (len(law_elements) != 1): other_chamber = {"HOUSE": "s", "SENATE": "h"} actions = [{ "type": "vote", "vote_type": "vote2", "where": other_chamber[bill.find("mods:extension/mods:originChamber", mods_ns).text], "result": "pass", # XXX "how": "unknown", # XXX # "text": "", "acted_at": granule_date, # XXX "status": "PASSED:CONCURRENTRES", "references": [], # XXX }] else: law_congress = law_elements[0].attrib["congress"] law_number = law_elements[0].attrib["number"] law_type = ("private" if (law_elements[0].attrib["isPrivate"] == "true") else "public") # Check for typos in the metadata. if law_congress != bill_congress: logging.error("Congress mismatch for %s%s: %s or %s? (%s)" % (bill_type, bill_number, bill_congress, law_congress, source_url)) continue actions = [{ "congress": law_congress, "number": law_number, "type": "enacted", "law": law_type, "text": "Became %s Law No: %s-%s." % (law_type.capitalize(), law_congress, law_number), "acted_at": granule_date, # XXX "status": "ENACTED:SIGNED", # XXX: Check for overridden vetoes! "references": [], # XXX }] status, status_date = bill_info.latest_status(actions) bill_data = { 'bill_id': bill_id, 'bill_type': bill_type, 'number': bill_number, 'congress': bill_congress, 'introduced_at': None, # XXX 'sponsor': None, # XXX 'cosponsors': [], # XXX 'actions': actions, # XXX 'history': bill_info.history_from_actions(actions), 'status': status, 'status_at': status_date, 'enacted_as': bill_info.slip_law_from(actions), 'titles': titles, 'official_title': bill_info.current_title_for(titles, "official"), 'short_title': bill_info.current_title_for(titles, "short"), # XXX 'popular_title': bill_info.current_title_for(titles, "popular"), # XXX 'subjects_top_term': subject, 'subjects': [], 'related_bills': [], # XXX: <associatedBills> usually only lists the current bill. 'committees': committees, 'amendments': [], # XXX 'sources': sources, 'updated_at': datetime.datetime.fromtimestamp(time.time()), } if not options.get('textversions', False): bill_info.output_bill(bill_data, options) # XXX: Can't use bill_versions.fetch_version() because it depends on fdsys. version_code = "enr" bill_version_id = "%s%s-%s-%s" % (bill_type, bill_number, bill_congress, version_code) bill_version = { 'bill_version_id': bill_version_id, 'version_code': version_code, 'issued_on': status_date, 'urls': {"pdf": bill.find("mods:location/mods:url[@displayLabel='PDF rendition']", mods_ns).text}, 'sources': sources, } utils.write( json.dumps(bill_version, sort_keys=True, indent=2, default=utils.format_datetime), bill_versions.output_for_bill_version(bill_version_id) ) # Process the granule PDF. # - Hard-link it into the right place to be seen as bill text. # - Run "pdftotext -layout" to convert it to plain text and save it in the bill text location. pdf_file = path + "/" + sources[0]["access_id"] + "/document.pdf" if os.path.exists(pdf_file): dst_path = fdsys.output_for_bill(bill_data["bill_id"], "text-versions/" + version_code, is_data_dot=False) if options.get("linkpdf", False): os.link(pdf_file, dst_path + "/document.pdf") # a good idea if options.get("extracttext", False): logging.error("Running pdftotext on %s..." % pdf_file) if subprocess.call(["pdftotext", "-layout", pdf_file, dst_path + "/document.txt"]) != 0: raise Exception("pdftotext failed on %s" % pdf_file) return {'ok': True, 'saved': True}
# Copyright 2015 IBM Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_service import periodic_task from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.guestagent.datastore.experimental.db2 import service from trove.guestagent import dbaas from trove.guestagent import volume LOG = logging.getLogger(__name__) CONF = cfg.CONF MANAGER = CONF.datastore_manager class Manager(periodic_task.PeriodicTasks): """ This is DB2 Manager class. It is dynamically loaded based off of the datastore of the Trove instance. """ def __init__(self): self.appStatus = service.DB2AppStatus() self.app = service.DB2App(self.appStatus) self.admin = service.DB2Admin() super(Manager, self).__init__(CONF) @periodic_task.periodic_task def update_status(self, context): """ Updates the status of DB2 Trove instance. It is decorated with perodic task so it is automatically called every 3 ticks. """ self.appStatus.update() def rpc_ping(self, context): LOG.debug("Responding to RPC ping.") return True def prepare(self, context, packages, databases, memory_mb, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None): """ This is called when the Trove instance first comes online. It is the first rpc message passed from the task manager. prepare handles all the base configuration of the DB2 instance. """ LOG.debug("Preparing the guest agent for DB2.") self.appStatus.begin_install() if device_path: device = volume.VolumeDevice(device_path) device.unmount_device(device_path) device.format() device.mount(mount_point) LOG.debug('Mounted the volume.') self.app.change_ownership(mount_point) self.app.start_db() if databases: self.create_database(context, databases) if users: self.create_user(context, users) self.update_status(context) self.app.complete_install_or_restart() LOG.info(_('Completed setup of DB2 database instance.')) def restart(self, context): """ Restart this DB2 instance. This method is called when the guest agent gets a restart message from the taskmanager. """ LOG.debug("Restart a DB2 server instance.") self.app.restart() def stop_db(self, context, do_not_start_on_reboot=False): """ Stop this DB2 instance. This method is called when the guest agent gets a stop message from the taskmanager. """ LOG.debug("Stop a given DB2 server instance.") self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" LOG.debug("Get the filesystem stats.") mount_point = CONF.get( 'db2' if not MANAGER else MANAGER).mount_point return dbaas.get_filesystem_volume_stats(mount_point) def create_database(self, context, databases): LOG.debug("Creating database(s)." % databases) self.admin.create_database(databases) def delete_database(self, context, database): LOG.debug("Deleting database %s." % database) return self.admin.delete_database(database) def list_databases(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing all databases.") return self.admin.list_databases(limit, marker, include_marker) def create_user(self, context, users): LOG.debug("Create user(s).") self.admin.create_user(users) def delete_user(self, context, user): LOG.debug("Delete a user %s." % user) self.admin.delete_user(user) def get_user(self, context, username, hostname): LOG.debug("Show details of user %s." % username) return self.admin.get_user(username, hostname) def list_users(self, context, limit=None, marker=None, include_marker=False): LOG.debug("List all users.") return self.admin.list_users(limit, marker, include_marker) def list_access(self, context, username, hostname): LOG.debug("List all the databases the user has access to.") return self.admin.list_access(username, hostname) def mount_volume(self, context, device_path=None, mount_point=None): device = volume.VolumeDevice(device_path) device.mount(mount_point, write_to_fstab=False) LOG.debug("Mounted the device %s at the mount point %s." % (device_path, mount_point)) def unmount_volume(self, context, device_path=None, mount_point=None): device = volume.VolumeDevice(device_path) device.unmount(mount_point) LOG.debug("Unmounted the device %s from the mount point %s." % (device_path, mount_point)) def resize_fs(self, context, device_path=None, mount_point=None): device = volume.VolumeDevice(device_path) device.resize_fs(mount_point) LOG.debug("Resized the filesystem %s." % mount_point) def start_db_with_conf_changes(self, context, config_contents): LOG.debug("Starting DB2 with configuration changes.") self.app.start_db_with_conf_changes(config_contents) def grant_access(self, context, username, hostname, databases): LOG.debug("Granting acccess.") raise exception.DatastoreOperationNotSupported( operation='grant_access', datastore=MANAGER) def revoke_access(self, context, username, hostname, database): LOG.debug("Revoking access.") raise exception.DatastoreOperationNotSupported( operation='revoke_access', datastore=MANAGER) def reset_configuration(self, context, configuration): """ Currently this method does nothing. This method needs to be implemented to enable rollback of flavor-resize on guestagent side. """ LOG.debug("Resetting DB2 configuration.") pass def change_passwords(self, context, users): LOG.debug("Changing password.") raise exception.DatastoreOperationNotSupported( operation='change_passwords', datastore=MANAGER) def update_attributes(self, context, username, hostname, user_attrs): LOG.debug("Updating database attributes.") raise exception.DatastoreOperationNotSupported( operation='update_attributes', datastore=MANAGER) def enable_root(self, context): LOG.debug("Enabling root.") raise exception.DatastoreOperationNotSupported( operation='enable_root', datastore=MANAGER) def is_root_enabled(self, context): LOG.debug("Checking if root is enabled.") raise exception.DatastoreOperationNotSupported( operation='is_root_enabled', datastore=MANAGER) def _perform_restore(self, backup_info, context, restore_location, app): raise exception.DatastoreOperationNotSupported( operation='_perform_restore', datastore=MANAGER) def create_backup(self, context, backup_info): LOG.debug("Creating backup.") raise exception.DatastoreOperationNotSupported( operation='create_backup', datastore=MANAGER) def get_config_changes(self, cluster_config, mount_point=None): LOG.debug("Get configuration changes") raise exception.DatastoreOperationNotSupported( operation='get_configuration_changes', datastore=MANAGER)
""" Visualization of :mod:`time series <pySPACE.resources.data_types.time_series>` based on EEG signals to combine it with mapping to real sensor positions """ import logging import os, pylab, numpy, warnings from pySPACE.missions.nodes.visualization.base import VisualizationBase from pySPACE.resources.dataset_defs.stream import StreamDataset from matplotlib.mlab import griddata class ElectrodeCoordinationPlotNode(VisualizationBase): """ Node for plotting EEG time series as topographies. This node uses time series data and plots snapshots of the activity in the brain, i.e. in the electrode configuration space. The node inherits the functionality of the VisualisationBase. Therefore, see documentation of :mod:`VisualisationBase <pySPACE.missions.nodes.visualization.base>` to view basic functionality. **Parameters** :Layout Options: :contourlines: If set to True, contour lines are added to the plot. (*optional, default: False*) :nose_ears: Mark nose as triangle and ears as bars (common for EEG plots) (*optional, default: False*) :smooth_corners: If true, the generated graphics will be rectangular, i.e., the corners around the round head shape are filled. This is achieved by adding sham electrodes to the corners, who's signals are calculated as the means of neighboring electrodes. .. note:: This is only for visualization purposes and does not reflect the true data! Be careful with use in scientific publications! (*optional, default: False*) :add_info: If set to True, additional information (e.g. number of trial) is displayed. (*optional, default: False*) :figlabels: If this option is True, channel names and channel coordinates. (*optional, default: False*) :figtitle: The title that is displayed. If None, the class name is used. (*optional, default: None*) :single_plot: All results per window are plotted into one figure with columns being classes and rows being time points. (*optional, default: False*) :Influence the way how the data is plotted: :clip: If set to True, the values are clipped to maximum and minimum defined by parameter limits. This is only working if limits are defined. (*optional, default: False*) :limits: Here, the user can set the limits for the color in the contour plot (e.g. [-1.0,1.0]). If this option is not set, the colorbar is normalized according to the data. (*optional, default: False*) **Exemplary Call** .. code-block:: yaml - node : Time_Series_Source - node : Electrode_Coordination_Plot parameters : figlabels : True create_movie : True time_stamps : [200, 400] timeshift : -200 smooth_corners : False - node: Nil_Sink Here is an alternative call: .. code-block:: yaml - node : Electrode_Coordination_Plot parameters : single_trial : True accum_avg : True separate_training_and_test : True add_info : True time_stamps : [400] limit2class : Target :Author: Sirko Straube (sirko.straube@dfki.de) :Date of Last Revision: 2013/01/01 .. todo:: Depending on which plot backend is used, the resizing of the figure does not work well. Currently this is not well supported by matplotlib. Change when support is improved. The key command is fig.set_size_inches([a,b]) and the dpi property. """ input_types = ["TimeSeries"] def __init__(self, clip=False, contourlines=False, limits=False, nose_ears=False, smooth_corners=False, add_info=False, single_plot=False, figlabels=False, figtitle=None, **kwargs): super(ElectrodeCoordinationPlotNode, self).__init__(**kwargs) if limits: limits = [float(i) for i in limits] #make sure limits consists of floats # define electrode grid xi = numpy.linspace(-125, 125, 200) yi = numpy.linspace(-100, 100, 200) self.set_permanent_attributes(xi=xi, yi=yi, clip=clip, contourlines=contourlines, limits=limits, nose_ears=nose_ears, smooth_corners=smooth_corners, add_info=add_info, single_plot=single_plot, figlabels=figlabels, figtitle=figtitle, time_checked=False) def _plotValues(self, values, #dict TimeSeries values plot_label, #str Plot-Label fig_num, #int Figure-number for classify plots # 1: average, # 2: single trial, # 3: average accumulating store_dir = None, #str Directory to store the plots counter=0): #int Plotcounter for all trials #compute sampling_frequency and classes to plot sampling_frequency = values[values.keys()[0]].sampling_frequency list_of_classes = values.keys() num_of_classes = len(list_of_classes) #autoscale color bar or use user scaling if self.limits: levels = self._compute_levels() else: levels = None #compute maximum and minimum for colorbar scaling if not existing vmax = float(max(numpy.array(v).max() for v in values.itervalues())) vmin = float(min(numpy.array(v).min() for v in values.itervalues())) levels = self._compute_levels(limits=[vmin, vmax]) #normalizer=matplotlib.colors.Normalize(vmin=vmin, vmax=vmax) #computing time points to show num_tpoints = values.values()[0].shape[0] all_tpoints = numpy.arange(0, num_tpoints * (1000/sampling_frequency), 1000 / sampling_frequency ) + self.timeshift if self.time_stamps == [-1]: tpoints = all_tpoints else: #check if desired time points are existing and confirm if not self.time_checked: for t in self.time_stamps: if not t in all_tpoints: warnings.warn("Electrode_Coordination_Plot:: At least" \ " one desired time stamp not available!" \ " Legal time stamps are " \ + str(all_tpoints) + ". Switching to " \ "next legal time point. Please check " \ "for consistency!") if t < 0: new_t = self.timeshift else: new_t = range(0, t+1, int(1000/sampling_frequency))[-1] #if we obtain an empty list reset to timeshift if new_t == []: new_t = self.timeshift else: new_t = new_t+self.timeshift #finally check for too high or low values if new_t < self.timeshift: new_t = self.timeshift elif new_t > all_tpoints[-1]: new_t = all_tpoints[-1] self.time_stamps[self.time_stamps.index(t)] = new_t self.time_checked = True #has to be performed only once tpoints = numpy.array(self.time_stamps) num_of_tpoints = len(tpoints) # selecting formatting and clearing figure default_size = [8., 6.] if self.single_plot: num_of_rows = num_of_tpoints if num_of_rows > 4: default_size[0] = default_size[0]/(int((num_of_rows+3)/4)) default_size[1] = default_size[1]/(int((num_of_rows+3)/4)) else: num_of_rows = 1 f=pylab.figure(fig_num, figsize=[default_size[0]*num_of_classes, default_size[1]*num_of_rows]) if pylab.get_backend() in pylab.matplotlib.backends.interactive_bk: f.show() if counter%20 == 19: #clear every 20th trial pylab.figure(fig_num).clear() # Iterate over the time window for time_index in range(num_of_tpoints): pylab.subplots_adjust(left=0.025, right=0.8) #shift a bit to the left if self.single_plot: pl_offset=time_index else: pl_offset=0 for index, class_label in enumerate(list_of_classes): current_plot_num=(num_of_classes*pl_offset)+index+1 pylab.subplot(num_of_rows, num_of_classes, current_plot_num) pylab.gca().clear() # Get the values for the respective class data = values[class_label].view(numpy.ndarray) ec = self.get_metadata("electrode_coordinates") if ec is None: ec = StreamDataset.ec # observe channels channel_names = [channel for channel in values[class_label].channel_names if channel in ec.keys()] fcn = [channel for channel in values[class_label].channel_names if not channel in ec.keys()] if not fcn == []: self._log("Unsupported channels ignored:%s ."%str(fcn), level=logging.CRITICAL) if channel_names == []: self._log("No channel for plotting left.", level=logging.CRITICAL) return ec_2d = StreamDataset.project2d(ec) # Define x and y coordinates of electrodes in the order of # the channels of the data x = numpy.array([ec_2d[key][0] for key in channel_names]) y = numpy.array([ec_2d[key][1] for key in channel_names]) # The values of the electrodes at this point of time pos=list(all_tpoints).index(tpoints[time_index]) z = data[pos, :] if self.smooth_corners: x,y,z = self._smooth_corners(x,y,z, data, channel_names, pos) # griddata returns a masked array # you can get the data via zi[~zi.mask] try: zi = griddata(x, y, z, self.xi, self.yi) except RuntimeError: warnings.warn( "Natbib packackage is not available for interpolating a" " grid. Using linear interpolation instead.") zi = griddata(x, y, z, self.xi, self.yi, interpl='linear') # clip values if self.clip and self.limits: # minimum and maximum zi = numpy.clip(zi, self.limits[0], self.limits[1]) # contour the gridded data, # plotting dots at the nonuniform data points. cs=pylab.contourf(self.xi, self.yi, zi, 15, cmap=pylab.cm.jet, levels=levels) if self.contourlines: pylab.contour(self.xi, self.yi, zi, 15, linewidths=0.5, colors='k', levels=levels) if self.figlabels: # plot data points. if not self.smooth_corners: pylab.scatter(x, y, c='b', s=5, marker='o') else: # dont plot invented electrode positions pylab.scatter(x[:-4], y[:-4], c='b', s=5, marker='o') # Add channel labels for label, position in ec_2d.iteritems(): if label in channel_names: pylab.text(position[0], position[1], label) if self.add_info: if counter: if len(list_of_classes) > 1: if index == 0: pylab.text(-120, -98, 'Trial No. ' + str(counter), fontsize=12) else: pylab.text(-120, -98, 'Trial No. ' + str(counter), fontsize=12) if self.nose_ears: #nose ytips=[87.00,87.00, 97] xtips=[-10.00,10.00, 0] pylab.fill(xtips,ytips, facecolor='k', edgecolor='none') #left xtips=[-108.0,-113.0,-113.0,-108.0] ytips=[-10.0,-10.0,10.0,10.0] pylab.fill(xtips,ytips, facecolor='k', edgecolor='none') #right xtips=[108.0,114.0,113.0,108.0] ytips=[-10.0,-10.0,10.0,10.0] pylab.fill(xtips,ytips, facecolor='k', edgecolor='none') pylab.xlim(-125, 125) pylab.ylim(-100, 100) if not self.single_plot or time_index==0: #if single_plot=True do only for the first row if self.figtitle: pylab.title(self.figtitle, fontsize=20) else: pylab.title(class_label, fontsize=20) pylab.setp(pylab.gca(), xticks=[], yticks=[]) pylab.draw() caxis = pylab.axes([.85, .1, .04, .75]) cb = pylab.colorbar(mappable=cs, cax=caxis) # TODO: The label read 'Amplitude ($\mu$V)' # Removed the unit. Or can we really still assume # a (correct) \muV scale after all preprocessing? cb.ax.set_ylabel(r'Amplitude', fontsize=16) # Position of the time axes ax = pylab.axes([.79, .94, .18, .04]) pylab.gca().clear() pylab.bar(tpoints[time_index], 1.0, width=1000.0/sampling_frequency) pylab.xlim(tpoints[0], tpoints[-1]) pylab.xlabel("time (ms)", fontsize=12) pylab.setp(ax, yticks=[],xticks=[all_tpoints[0], tpoints[time_index], all_tpoints[-1]]) # Draw or store the figure if store_dir is None: pylab.draw() #pylab.show() elif self.single_plot and not current_plot_num==(num_of_rows*num_of_classes): #save only if everything is plotted pylab.draw() #pylab.show() else: current_split=self.current_split if current_split != 0 and not\ plot_label.endswith('_split_' + str(current_split)): #more than one split and first call plot_label = plot_label + '_split_' + str(current_split) f_name=str(store_dir) + str(os.sep) + str(plot_label) + "_" + str(int(tpoints[time_index])) pylab.savefig(f_name + ".png") if self.store_data: import pickle f_name=str(store_dir) + str(os.sep) + str(plot_label) pickle.dump(values, open(f_name + ".pickle",'w')) def _smooth_corners(self, x, y, z, data, channel_names, time_index): """ Add sham electrodes to the corners of the coordinate system """ # invent new corner electrodes using x and y positions of the margin # electrodes of the 64 electrode cap. Data is mean of neighbouring # electrodes based, again, on the 64 electrode cap. # # frontleft FL positioned at [x(FT9), y(Fp1)] x=numpy.append(x,x[numpy.where(numpy.array(channel_names)=='FT9')]) y=numpy.append(y,y[numpy.where(numpy.array(channel_names)=='Fp1')]) nz = data[time_index, numpy.where(numpy.array(channel_names)=='Fp1')] +\ data[time_index, numpy.where(numpy.array(channel_names)=='AF7')] +\ data[time_index, numpy.where(numpy.array(channel_names)=='F7')] +\ data[time_index, numpy.where(numpy.array(channel_names)=='FT9')] z=numpy.append(z, 0.1 * nz / 4.0) # frontright FR positioned at [x(FT10), y(Fp1)] x=numpy.append(x,x[numpy.where(numpy.array(channel_names)=='FT10')]) y=numpy.append(y,y[numpy.where(numpy.array(channel_names)=='Fp1')]) nz = data[time_index, numpy.where(numpy.array(channel_names)=='Fp2')] +\ data[time_index, numpy.where(numpy.array(channel_names)=='AF8')] +\ data[time_index, numpy.where(numpy.array(channel_names)=='F8')] +\ data[time_index, numpy.where(numpy.array(channel_names)=='FT10')] z=numpy.append(z, 0.1 * nz / 4.0) # backleft BL positioned at [x(FT9), y(Oz)] x=numpy.append(x,x[numpy.where(numpy.array(channel_names)=='FT9')]) y=numpy.append(y,y[numpy.where(numpy.array(channel_names)=='Oz')]) nz = data[time_index, numpy.where(numpy.array(channel_names)=='TP9')] +\ data[time_index, numpy.where(numpy.array(channel_names)=='P7')] +\ data[time_index, numpy.where(numpy.array(channel_names)=='PO9')] z=numpy.append(z, 0.1 * nz / 4.0) # backright BR positioned at [x(FT10), y(Oz)] x=numpy.append(x,x[numpy.where(numpy.array(channel_names)=='FT10')]) y=numpy.append(y,y[numpy.where(numpy.array(channel_names)=='Oz')]) nz = data[time_index, numpy.where(numpy.array(channel_names)=='TP10')] +\ data[time_index, numpy.where(numpy.array(channel_names)=='P8')] +\ data[time_index, numpy.where(numpy.array(channel_names)=='PO10')] z=numpy.append(z, 0.1 * nz / 4.0) return x,y,z def _compute_levels(self, limits=None): if not limits: if self.limits: limits=self.limits else: #should never be reached return None rel_precision = int(('%1.e'%limits[0])[-3:]) #determine the precision of the values epsilon=pow(10, rel_precision-5) #add a small amount to make sure that clipping works step = (limits[1]-limits[0])/100 #split into 100 steps levels=numpy.arange(limits[0],limits[1]+epsilon,step) levels=numpy.round(levels, decimals=abs(rel_precision)+2) levels[0]=levels[0]-epsilon levels[-1]=levels[-1]+epsilon return levels _NODE_MAPPING = {"Electrode_Coordination_Plot": ElectrodeCoordinationPlotNode}
#!/usr/bin/env python """ github_buildbot.py is based on git_buildbot.py github_buildbot.py will determine the repository information from the JSON HTTP POST it receives from github.com and build the appropriate repository. If your github repository is private, you must add a ssh key to the github repository for the user who initiated the build on the buildslave. """ import tempfile import logging import os import re import sys import traceback from twisted.web import server, resource from twisted.internet import reactor from twisted.spread import pb from twisted.cred import credentials from optparse import OptionParser try: import json except ImportError: import simplejson as json class GitHubBuildBot(resource.Resource): """ GitHubBuildBot creates the webserver that responds to the GitHub Service Hook. """ isLeaf = True master = None port = None def render_POST(self, request): """ Reponds only to POST events and starts the build process :arguments: request the http request object """ try: payload = json.loads(request.args['payload'][0]) user = payload['repository']['owner']['name'] repo = payload['repository']['name'] repo_url = payload['repository']['url'] self.private = payload['repository']['private'] project = request.args.get('project', None) if project: project = project[0] logging.debug("Payload: " + str(payload)) self.process_change(payload, user, repo, repo_url, project) except Exception: logging.error("Encountered an exception:") for msg in traceback.format_exception(*sys.exc_info()): logging.error(msg.strip()) def process_change(self, payload, user, repo, repo_url, project): """ Consumes the JSON as a python object and actually starts the build. :arguments: payload Python Object that represents the JSON sent by GitHub Service Hook. """ changes = [] newrev = payload['after'] refname = payload['ref'] # We only care about regular heads, i.e. branches match = re.match(r"^refs\/heads\/(.+)$", refname) if not match: logging.info("Ignoring refname `%s': Not a branch" % refname) branch = match.group(1) # Find out if the branch was created, deleted or updated. Branches # being deleted aren't really interesting. if re.match(r"^0*$", newrev): logging.info("Branch `%s' deleted, ignoring" % branch) else: for commit in payload['commits']: files = [] files.extend(commit['added']) files.extend(commit['modified']) files.extend(commit['removed']) change = {'revision': commit['id'], 'revlink': commit['url'], 'comments': commit['message'], 'branch': branch, 'who': commit['author']['name'] + " <" + commit['author']['email'] + ">", 'files': files, 'links': [commit['url']], 'repository': repo_url, 'project': project, } changes.append(change) # Submit the changes, if any if not changes: logging.warning("No changes found") return host, port = self.master.split(':') port = int(port) factory = pb.PBClientFactory() deferred = factory.login(credentials.UsernamePassword("change", "changepw")) reactor.connectTCP(host, port, factory) deferred.addErrback(self.connectFailed) deferred.addCallback(self.connected, changes) def connectFailed(self, error): """ If connection is failed. Logs the error. """ logging.error("Could not connect to master: %s" % error.getErrorMessage()) return error def addChange(self, dummy, remote, changei): """ Sends changes from the commit to the buildmaster. """ logging.debug("addChange %s, %s" % (repr(remote), repr(changei))) try: change = changei.next() except StopIteration: remote.broker.transport.loseConnection() return None logging.info("New revision: %s" % change['revision'][:8]) for key, value in change.iteritems(): logging.debug(" %s: %s" % (key, value)) deferred = remote.callRemote('addChange', change) deferred.addCallback(self.addChange, remote, changei) return deferred def connected(self, remote, changes): """ Reponds to the connected event. """ return self.addChange(None, remote, changes.__iter__()) def main(): """ The main event loop that starts the server and configures it. """ usage = "usage: %prog [options]" parser = OptionParser(usage) parser.add_option("-p", "--port", help="Port the HTTP server listens to for the GitHub Service Hook" + " [default: %default]", default=4000, type=int, dest="port") parser.add_option("-m", "--buildmaster", help="Buildbot Master host and port. ie: localhost:9989 [default:" + " %default]", default="localhost:9989", dest="buildmaster") parser.add_option("-l", "--log", help="The absolute path, including filename, to save the log to" + " [default: %default]", default = tempfile.gettempdir() + "/github_buildbot.log", dest="log") parser.add_option("-L", "--level", help="The logging level: debug, info, warn, error, fatal [default:" + " %default]", default='warn', dest="level") parser.add_option("-g", "--github", help="The github server. Changing this is useful if you've specified" + " a specific HOST handle in ~/.ssh/config for github " + "[default: %default]", default='github.com', dest="github") parser.add_option("--pidfile", help="Write the process identifier (PID) to this file on start." + " The file is removed on clean exit. [default: %default]", default=None, dest="pidfile") (options, _) = parser.parse_args() if options.pidfile: with open(options.pidfile, 'w') as f: f.write(str(os.getpid())) levels = { 'debug':logging.DEBUG, 'info':logging.INFO, 'warn':logging.WARNING, 'error':logging.ERROR, 'fatal':logging.FATAL, } filename = options.log log_format = "%(asctime)s - %(levelname)s - %(message)s" logging.basicConfig(filename=filename, format=log_format, level=levels[options.level]) github_bot = GitHubBuildBot() github_bot.github = options.github github_bot.master = options.buildmaster site = server.Site(github_bot) reactor.listenTCP(options.port, site) reactor.run() if options.pidfile and os.path.exists(options.pidfile): os.unlink(options.pidfile) if __name__ == '__main__': main()