repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
kebrister/lscat-blcontrols
|
pmac/mk_pgpmac_redis.py
|
1
|
36959
|
#! /usr/bin/python
# coding=utf-8
import sys
import iniParser
import datetime
if len(sys.argv) <= 1:
print >> sys.stderr, "Usage: %s headOfRedisVariableNames [prefIniFileName [hardIniFileName]]"
sys.exit(-1)
if len(sys.argv) > 1:
head = sys.argv[1]
if len(sys.argv) > 2:
pref_ini = sys.argv[2]
else:
pref_ini = None
if len(sys.argv) > 3:
hard_ini = sys.argv[3]
else:
hard_ini = None
configs = {
"orange-2" : { "re" : "redis\.kvseq|stns\.2\.(.+)", "head" : "stns.2", "pub" : "MD2-21-ID-E", "pg" : "1", "autoscint" : "1"},
"orange-2.ls-cat.org" : { "re" : "redis\.kvseq|stns\.2\.(.+)", "head" : "stns.2", "pub" : "MD2-21-ID-E", "pg" : "1", "autoscint" : "1"},
"venison.ls-cat.org" : { "re" : "redis\.kvseq|stns\.2\.(.+)", "head" : "stns.2", "pub" : "MD2-21-ID-E", "pg" : "1", "autoscint" : "1"},
"mung-2" : { "re" : "redis\.kvseq|stns\.1\.(.+)", "head" : "stns.1", "pub" : "MD2-21-ID-D", "pg" : "1", "autoscint" : "1"},
"mung-2.ls-cat.org" : { "re" : "redis\.kvseq|stns\.1\.(.+)", "head" : "stns.1", "pub" : "MD2-21-ID-D", "pg" : "1", "autoscint" : "1"},
"vidalia.ls-cat.org" : { "re" : "redis\.kvseq|stns\.1\.(.+)", "head" : "stns.1", "pub" : "MD2-21-ID-D", "pg" : "1", "autoscint" : "1"},
}
plcc2_dict = {
"omega" : { "status1" : "M5001", "status2" : "M5021", "position" : "M5041"},
"align.x" : { "status1" : "M5002", "status2" : "M5022", "position" : "M5042"},
"align.y" : { "status1" : "M5003", "status2" : "M5023", "position" : "M5043"},
"align.z" : { "status1" : "M5004", "status2" : "M5024", "position" : "M5044"},
"lightPolar" : { "status1" : "M5005", "status2" : "M5025", "position" : "M5045"},
"cam.zoom" : { "status1" : "M5006", "status2" : "M5026", "position" : "M5046"},
"appy" : { "status1" : "M5007", "status2" : "M5027", "position" : "M5047"},
"appz" : { "status1" : "M5008", "status2" : "M5028", "position" : "M5048"},
"capy" : { "status1" : "M5009", "status2" : "M5029", "position" : "M5049"},
"capz" : { "status1" : "M5010", "status2" : "M5030", "position" : "M5050"},
"scint" : { "status1" : "M5011", "status2" : "M5031", "position" : "M5051"},
"centering.x" : { "status1" : "M5012", "status2" : "M5032", "position" : "M5052"},
"centering.y" : { "status1" : "M5013", "status2" : "M5033", "position" : "M5053"},
"kappa" : { "status1" : "M5014", "status2" : "M5034", "position" : "M5054"},
"phi" : { "status1" : "M5015", "status2" : "M5035", "position" : "M5055"}
}
# M5001=M1 ; Omega
# M5002=M2 ; Alignment Table X
# M5003=M3 ; Alignment Table Y
# M5004=M4 ; Alignment Table Z
# M5005=M5 ; Analyser
# M5006=M6 ; Zoom
# M5007=M7 ; Aperture Y
# M5008=M8 ; Aperture Z
# M5009=M9 ; Capillary Y
# M5010=M10 ; Capillary Z
# M5011=M11 ; Scintillator Z
# M5012=M17 ; Center X
# M5013=M18 ; Center Y
# M5014=M19 ; Kappa
# M5015=M20 ; Phi
#
# M5021=M91 ; Omega
# M5022=M92 ; Alignment Table X
# M5023=M93 ; Alignment Table Y
# M5024=M94 ; Alignment Table Z
# M5025=M95 ; Analyser
# M5026=M96 ; Zoom
# M5027=M97 ; Aperture Y
# M5028=M98 ; Aperture Z
# M5029=M99 ; Capillary Y
# M5030=M100 ; Capillary Z
# M5031=M101 ; Scintillator Z
# M5032=M107 ; Center X
# M5033=M108 ; Center Y
# M5034=M109 ; Kappa
# M5035=M110 ; Phi
#
#
# ; Motor actual position
# M5041=(M181/(I108*32)) ; Phi
# M5042=(M182/(I208*32)) ; Table XYZ : X
# M5043=(M183/(I308*32)) ; Table XYZ : Y
# M5044=(M184/(I408*32)) ; Table XYZ : Z
# M5045=(M185/(I508*32)) ; Analyser
# M5046=(M186/(I608*32)) ; Zoom camera
# M5047=(M187/(I708*32)) ; Aperture Y
# M5048=(M188/(I808*32)) ; Aperture Z
# M5049=(M189/(I908*32)) ; Capillary Y
# M5050=(M190/(I1008*32)) ; Capillary Z
# M5051=(M191/(I1108*32)) ; Scintillator Z
# M5052=(M197/(I1708*32)) ; Centring #17
# M5053=(M198/(I1808*32)) ; Centring #18
# M5054=(M199/(I1908*32)) ; Mini Kappa 1
# M5055=(M200/(I2008*32)) ; Mini Kappa 2
#
# M5060=M6000 ; 11C byte 1
# M5061=M6001 ; 11C byte 2
# M5062=M6002 ; 11C byte 3
# M5063=M6003 ; 11C byte 5
# M5064=M6004 ; 11C byte 6
# M5065=M1200 ; Front Light DAC
# M5066=M1201 ; Back Light DAC
# M5067=M1203 ; Scintillator Piezo
# ;***************** Motor Status 1,Limits,Open loop *****************************
# ;PMAC side
# M1->X:$0B0,24 ; Phi
# M2->X:$130,24 ; Table XYZ : X
# M3->X:$1B0,24 ; Table XYZ : Y
# M4->X:$230,24 ; Table XYZ : Z
# M5->X:$2B0,24 ; Analyser
# M6->X:$330,24 ; Zoom DC Camera
# M7->X:$3B0,24 ; Aperture Y
# M8->X:$430,24 ; Aperture Z
# M9->X:$4B0,24 ; Capillary Y
# M10->X:$530,24 ; Capillary Z
# M11->X:$5B0,24 ; Scintillator Z
# M12->X:$630,24 ; Unused
# M13->X:$6B0,24 ; Unused
# M14->X:$730,24 ; Unused
# M15->X:$7B0,24 ; Unused
# M16->X:$830,24 ; Unused
# M17->X:$8B0,24 ; Centring Table Motor #17
# M18->X:$930,24 ; Centring Table Motor #18
# M19->X:$9B0,24 ; Mini Kappa 1
# M20->X:$A30,24 ; Mini Kappa 2
# M21->X:$AB0,24 ; Unused
# M22->X:$B30,24 ; Unused
# M23->X:$BB0,24 ; Unused
# M24->X:$C30,24 ; Unused
#
# ;open loop status
# M61->x:$0B0,18,1 ; Phi
# M62->x:$130,18,1 ; Table XYZ : X
# M63->x:$1B0,18,1 ; Table XYZ : Y
# M64->x:$230,18,1 ; Table XYZ : Z
# M65->x:$2B0,18,1 ; Analyser
# M66->x:$330,18,1 ; Zoom DC Camera
# M67->x:$3B0,18,1 ; Aperture Y
# M68->x:$430,18,1 ; Aperture Z
# M69->x:$4B0,18,1 ; Capillary Y
# M70->x:$530,18,1 ; Capillary Z
# M71->x:$5B0,18,1 ; Scintillator Z
# M72->x:$630,18,1 ; Unused
# M73->x:$6B0,18,1 ; Unused
# M74->x:$730,18,1 ; Unused
# M75->x:$7B0,18,1 ; Unused
# M76->x:$830,18,1 ; Unused
# M77->x:$8B0,18,1 ; Centring Table Motor X #17
# M78->x:$930,18,1 ; Centring Table Motor Y #18
# M79->x:$9B0,18,1 ; Mini Kappa 1
# M80->x:$A30,18,1 ; Mini Kappa 2
# ; M81->x:$AB0,18,1 ; Unused
# ; M82->x:$B30,18,1 ; Unused
# ; M83->X:$BB0,18,1 ; Unused
# ; M84->X:$C30,18,1 ; Unused
#
# ;*************** Motor Status 2,I2T,Fatal following error **********************
# ;PMAC side
# M91->Y:$0C0,24 ; Phi
# M92->Y:$140,24 ; Table XYZ : X
# M93->Y:$1C0,24 ; Table XYZ : Y
# M94->Y:$240,24 ; Table XYZ : Z
# M95->Y:$2C0,24 ; Analyser
# M96->Y:$340,24 ; Zoom DC Camera
# M97->Y:$3C0,24 ; Aperture Y
# M98->Y:$440,24 ; Aperture Z
# M99->Y:$4C0,24 ; Capillary Y
# M100->Y:$540,24 ; Capillary Z
# M101->Y:$5C0,24 ; Scintillator Z
# M102->Y:$640,24 ; Unused
# M103->Y:$6C0,24 ; Unused
# M104->Y:$740,24 ; Unused
# M105->Y:$7C0,24 ; Unused
# M106->Y:$840,24 ; Unused
# M107->Y:$8C0,24 ; Centring Table Motor #17
# M108->Y:$940,24 ; Centring Table Motor #18
# M109->Y:$9C0,24 ; Mini Kappa 1
# M110->Y:$A40,24 ; Mini Kappa 2
# M111->Y:$AC0,24 ; Unused
# M112->Y:$B40,24 ; Unused
# M113->Y:$BC0,24 ; Unused
# M114->Y:$C40,24 ; Unused
#
# ;**************************** In position status *******************************
# M121->Y:$0C0,0,1 ; Phi
# M122->Y:$140,0,1 ; Table XYZ : X
# M123->Y:$1C0,0,1 ; Table XYZ : Y
# M124->Y:$240,0,1 ; Table XYZ : Z
# M125->Y:$2C0,0,1 ; Analyser
# ; ;M125=1 Patch when Analyser goes really wrong !
# M126->Y:$340,0,1 ; Zoom DC Camera
# M127->Y:$3C0,0,1 ; Aperture Y
# M128->Y:$440,0,1 ; Aperture Z
# M129->Y:$4C0,0,1 ; Capillary Y
# M130->Y:$540,0,1 ; Capillary Z
# M131->Y:$5C0,0,1 ; Scintillator Z
# M132->Y:$640,0,1 ; Unused
# M133->Y:$6C0,0,1 ; Unused
# M134->Y:$740,0,1 ; Unused
# M135->Y:$7C0,0,1 ; Unused
# M136->Y:$840,0,1 ; Unused
# M137->Y:$8C0,0,1 ; Centring Table Motor #17
# M138->Y:$940,0,1 ; Centring Table Motor #18
# M139->Y:$9C0,0,1 ; Mini Kappa 1
# M140->Y:$A40,0,1 ; Mini Kappa 2
# M141->Y:$AC0,0,1 ; Unused
# M142->Y:$B40,0,1 ; Unused
# M143->Y:$BC0,0,1 ; Unused
# M144->Y:$C40,0,1 ; Unused
#
# Bug/Feature: only fields listed in motor_dict will be searched for in the ini file.
#
# Also see the comments for the motor_field_lists list below
#
# motor_dict keys
# motor_num: The pmac motor number between 1 and 32 inclusive. Leave undefined or set to -1 for motor for DAC and Binary Output motor like objects
# coord_num: The coordinate system the said motor finds itself in between 1 and 16 inclusive. Leave undefined or 0 for DAC and Binary Output motor like objects.
# max_accel: counts/msec/msec
# max_speed: counts/msec
# u2c: The conversion between counts and user units: Multiply user units by u2c to get counts. Should never be zero.
# active: 1 if the motor should be set up and used, 0 otherwise
# hard_ini: The section name for this motor in the microdiff_hard.ini file
# moveMode: freeRotation, rotation, or translation (default) used for the LS-CAT GUI
# reference: (omega only) The angle for which centering.y is up and centering.x is positive downstream
# axis: The axis letter for the PMAC in the specified coordinate system (X, Y, Z, etc)
# neutralPosition: The offset in user units between the home position and what we want to call zero
# printf: The printf format string for the position in the ncurses interface (uses a field width specifier *)
# format: The printf format string to update the redis value
# maxPosition: The software upper limit in user units relative to the home position
# minPosition: The software lower limit in user units relative to the home position
# smallStep: Recommened small step value for a user interface
# largeStep: Recommened large step value for a user interface
# update_resolution: Don't update redis until the position has changed by this amount in user units
#
# NOTE: active_init, home, and inactive_init should only be specified if the default string will not serve the purposes such as
# for omega and the polarizer
#
# active_init: A comma separated list of strings (double quoted if spaces present) enclosed in braces to send to the PMAC when the motor is active.
# home:` A comma separated list of strings (double quoted if spaces present) enclosed in braces to send to the PMAC to home the motor
# inactive_init: A comma separated list of strings (double quoted if spaces present) enclosed in braces to send to the PMAC when the motor is inactive.
motor_dict = {
"omega" : { "motor_num" : "1", "max_accel" : "2", "max_speed" : "1664", "coord_num" : "1", "u2c" : "12800",
"home" : '{"M401=1 M1115=1 #1$",&1E,#1&1B1R}',"active_init" : '{M31=1,&1#1->X,"M700=(M700 | $000001) ^ $000001", M1115=1}',
"inactive_init" : '{M31=0,&1#1->0,"M700=M700 | $000001",M1115=0}',"moveMode" : "freeRotation",
"reference" : "228.5", "format" : "%.3f", "printf" : "%*.4f deg", "axis" : "X",
"hard_ini" : "PHIRotationAxis.PHIMotor", "neutralPosition" : "0", "active" : "1"
},
"align.x" : { "motor_num" : "2", "max_accel" : "2", "max_speed" : "121", "coord_num" : "3", "u2c" : "60620.8",
"smallStep" : "0.001",
"axis" : "X", "format" : "%.3f",
"minPosition" : "0.1", "maxPosition" : "4.0",
"hard_ini" : "PHIAxisXYZTable.PHIXMotor", "neutralPosition" : "0", "active" : "1"
},
"align.y" : { "motor_num" : "3", "max_accel" : "0.5", "max_speed" : "121", "coord_num" : "3", "u2c" : "60620.8",
"smallStep" : "0.001",
"axis" : "Y", "format" : "%.3f",
"minPosition" : "0.16", "maxPosition" : "16.15",
"hard_ini" : "PHIAxisXYZTable.PHIYMotor", "neutralPosition" : "0", "active" : "1"
},
"align.z" : { "motor_num" : "4", "max_accel" : "0.5", "max_speed" : "121", "coord_num" : "3", "u2c" : "60620.8",
"smallStep" : "0.001",
"axis" : "Z", "format" : "%.3f",
"minPosition" : "0.45", "maxPosition" : "5.85",
"hard_ini" : "PHIAxisXYZTable.PHIZMotor", "neutralPosition" : "0", "active" : "1"
},
"lightPolar" : { "motor_num" : "5", "max_accel" : "0.2", "max_speed" : "3", "u2c" : "142", "coord_num" : "0",
"home" : '{#5$,#5HMZ}', "active_init" : '{}', "inactive_init" : '{}',
"largeStep" : "45", "smallStep" : "10", "format" : "%.1f",
"printf" : "%*.1f deg", "update_resolution" : "1",
"hard_ini" : "Analyser.AnalyserMotor", "neutralPosition" : "0", "active" : "1"
},
"cam.zoom" : { "motor_num" : "6","max_accel" : "0.2", "max_speed" : "10", "coord_num" : "4", "u2c" : "1.0",
"smallStep" : "1",
"axis" : "Z","format" : "%.0f",
"minPosition" : "1","update_resolution" : "1",
"hard_ini" : "CoaxZoom.ZoomMotor", "neutralPosition" : "0", "in_position_band" : "1600", "active" : "1"
},
"appy" : { "motor_num" : "7","max_accel" : "1", "max_speed" : "201", "coord_num" : "5", "u2c" : "121241.6",
"smallStep" : "0.002",
"axis" : "Y","format" : "%.3f",
"minPosition" : "0.2","maxPosition" : "3.25",
"hard_ini" : "ApertureYZTable.ApertureYMotor", "neutralPosition" : "0", "active" : "1"
},
"appz" : { "motor_num" : "8","max_accel" : "1", "max_speed" : "201", "coord_num" : "5", "u2c" : "60620.8",
"smallStep" : "0.002",
"axis" : "Z","format" : "%.3f",
"minPosition" : "0.3","maxPosition" : "82.5",
"hard_ini" : "ApertureYZTable.ApertureZMotor", "neutralPosition" : "0", "active" : "1"
},
"capy" : { "motor_num" : "9","max_accel" : "1", "max_speed" : "201", "coord_num" : "5", "u2c" : "121241.6",
"smallStep" : "0.002",
"axis" : "U","format" : "%.3f",
"minPosition" : "0.05","maxPosition" : "3.19",
"hard_ini" : "CapillaryBSYZtable.CapillaryBSYMotor", "neutralPosition" : "0", "active" : "1"
},
"capz" : { "motor_num" : "10","max_accel" : "0.5", "max_speed" : "201", "coord_num" : "5", "u2c" : "19865.6",
"smallStep" : "0.002",
"axis" : "V","format" : "%.3f",
"minPosition" : "0.57","maxPosition" : "81.49",
"hard_ini" : "CapillaryBSYZtable.CapillaryBSZMotor", "neutralPosition" : "0", "active" : "1"
},
"scint" : { "motor_num" : "11","max_accel" : "0.5", "max_speed" : "151", "coord_num" : "5", "u2c" : "19865.6",
"smallStep" : "0.002",
"axis" : "W","format" : "%.3f",
"minPosition" : "0.2","maxPosition" : "86.1",
"hard_ini" : "ScintillatorPhotodiode.Zmotor", "neutralPosition" : "0", "active" : "1"
},
"centering.x" : { "motor_num" : "17","max_accel" : "0.5", "max_speed" : "150", "coord_num" : "2", "u2c" : "182400",
"smallStep" : "0.001",
"axis" : "X","format" : "%.3f",
"minPosition" : "-2.56","maxPosition" : "2.496",
"hard_ini" : "CentringXYTable.XCentringMotor", "neutralPosition" : "0", "active" : "1"
},
"centering.y" : {"motor_num" : "18","max_accel" : "0.5", "max_speed" : "150", "coord_num" : "2", "u2c" : "182400",
"smallStep" : "0.001",
"axis" : "Y","format" : "%.3f",
"minPosition" : "-2.58","maxPosition" : "2.4",
"hard_ini" : "CentringXYTable.YCentringMotor", "neutralPosition" : "0", "active" : "1"
},
"kappa" : { "motor_num" : "19","max_accel" : "0.2", "max_speed" : "50", "coord_num" : "7", "u2c" : "2844.444",
"moveMode" : "rotation",
"axis" : "X","format" : "%.2f",
"minPosition" : "-5","update_resolution" : "1.0",
"hard_ini" : "MiniKappa.Kappa1", "neutralPosition" : "0", "active" : "1"
},
"phi" : { "motor_num" : "20","max_accel" : "0.2", "max_speed" : "50", "coord_num" : "7", "u2c" : "711.111",
"moveMode" : "freeRotation",
"axis" : "Y","format" : "%.2f",
"update_resolution" : "1.0",
"hard_ini" : "MiniKappa.Kappa2", "neutralPosition" : "0", "active" : "1"
},
"fastShutter" : { "canHome" : "false","type" : "BO",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"frontLight.intensity" : { "canHome" : "false","type" : "DAC",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"backLight.intensity" : { "canHome" : "false","type" : "DAC",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"scint.focus" : { "canHome" : "false","type" : "DAC",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"backLight" : { "canHome" : "false","type" : "BO",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"cryo" : { "canHome" : "false","type" : "BO",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"dryer" : { "canHome" : "false","type" : "BO",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"fluo" : { "canHome" : "false","type" : "BO",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"frontLight" : { "canHome" : "false","type" : "BO",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"backLight.factor" : { "canHome" : "false","type" : "DAC",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"frontLight.factor" : { "canHome" : "false","type" : "DAC",
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
},
"smartMagnet" : { "canHome" : "false","type" : "BO", "active_init" : '{m1100=0,m1106=1}', "inactive_init" : '{m1100=1,m1106=0}',
"update_resolution" : "0.5","canStop" : "false", "active" : "1", "in_position_band" : "0"
}
}
def mk_home( mname, d):
if not d.has_key("motor_num") or not d.has_key("coord_num"):
return ""
motor_num = int(d["motor_num"])
coord_num = int(d["coord_num"])
if motor_num < 1 or motor_num > 32:
return ""
if mname == "kappa":
prog_num = 119
else:
prog_num = motor_num
return '{#%d$,M%d=1,&%dE,#%d&%dB%dR}' % (motor_num, motor_num+400, coord_num, motor_num, coord_num, prog_num)
def mk_active_init( d):
if not d.has_key("motor_num") or not d.has_key("coord_num") or not d.has_key( "axis"):
return ""
motor_num = int(d["motor_num"])
coord_num = int(d["coord_num"])
axis = str(d["axis"])
mask = 1 << (motor_num - 1)
if motor_num < 1 or motor_num > 32:
return ""
return '{M%d=1,&%d#%d->%s,"M700=(M700 | $%0x) ^ $%0x"}' % (motor_num + 30, coord_num, motor_num, axis, mask, mask)
def mk_inactive_init( d):
if not d.has_key("motor_num") or not d.has_key("coord_num") or not d.has_key( "axis"):
return ""
motor_num = int(d["motor_num"])
coord_num = int(d["coord_num"])
axis = str(d["axis"])
mask = 1 << (motor_num - 1)
if motor_num < 1 or motor_num > 32:
return ""
return '{M%d=0,&%d#%d->0,"M700=M700 | $%0x"}' % (motor_num + 30, coord_num, motor_num, mask)
def active_simulation( sim):
if str(sim) != "0":
rtn = "0"
else:
rtn = "1"
return rtn
def asis( arg):
return arg
hard_ini_fields = {
"active" : ["Simulation", active_simulation],
"coord_num" : ["CoordinateSystem", asis],
"largeStep" : ["LargeStep", asis],
"maxPosition" : ["MaxPosition", asis],
"minPosition" : ["MinPosition", asis],
"motor_num" : ["MotorNumber", asis],
"neutralPosition" : ["NeutralPosition", asis],
"precision" : ["Precision", asis],
"smallStep" : ["SmallStep", asis],
"u2c" : ["UnitRatio", asis]
}
# DBR TYPES
# 0 String
# 1 Short (16 bit)
# 2 Float (32 bit)
# 3 Enum (not supported as of 121219)
# 4 Char (8 bit)
# 5 Int (32 bit)
# 6 Double (64 bit)
motor_field_lists = [
# name, default, dbrtype
["active", "1", 1], # 1 if the motor is to be enabled and used (not fully supported as of 121219)
["active_init", "", 0], # postgresql style string array of initialization strings to send to PMAC if the motor is active
["axis", "", 4], # PMAC axis (single charater: X,Y,Z, etc)
["canHome", "0", 1], # 1 if a homing routine can be called
["canMove", "true", 0], # "true" if we can move this motor, "false" if we cannot.
["canStop", "true", 0], # "true" if it makes sense to display a stop button, "false" otherwise
["coord_num", "", 1], # PMAC coordinate system number for this motor
["currentPreset", "", 0], # Name of the current preset position
["in_position_band", "160", 1], # Motors within this amount are considered "In Position". UNITS ARE 1/16 OF A COUNT
["format", "%f", 0], # format string for publish position to redis
["hard_ini", None, 0], # Name of section in microdiff_hard.ini
["home", "", 0], # postgresql style string array of strings to send to PMAC to home motor
["inPosition", "true", 0], # "true" if the motor is in position, "false" if it is moving
["inactive_init", "", 0], # postgresql style string array of initialization strings to send to PMAC if the motor is inactive
["largeStep", "1.0", 6], # increment for large step in a UI
["maxPosition", "Infinity", 6], # upper soft limit
["max_accel", "", 0], # maximum motor acceleration, used for motors that are too be scanned (ie, omega)
["max_speed", "", 6], # maximum motor speed, used for motors that are too be scanned (ie, omega)
["minPosition", "-Infinity", 6], # lower soft limit
["motor_num", "-1", 1], # PMAC motor number
["moveMode", "translation",0], # translation, rotation, freeRotation
["name", "", 0], # What we think the motor should be called in a UI
["negLimitSet", "0", 1], # 1 if on the limit, 0 otherwise
["neutralPosition", "0", 6], # Move here after a home and call it zero. Should be called -offset or offset or somehting like that.
["posLimitSet", "0", 1], # 1 if on the limit, 0 otherwise
["position", "", 6], # our position
["precision", "0.001", 6], # precision of the motion: moves of less than this amount are ignored (use in_position_band instead)
["presets.length", "0", 1], # number of presets defined
["printPrecision", "3", 1], # for ui to print out position (see the printf field for another way of doing this)
["printf", "%*.3f", 0], # printf style format string for ncurses interface
["smallStep", "0.1", 6], # step size for UI for a fine movement
["status_str", "", 0], # Explanation of what the motor is doing
["type", "PMAC", 0], # type of motor: PMAC, DAC, BO, SOFT, etc
["u2c", "1.0", 6], # multipy user units times u2c to get motor counts
["unit", "mm", 0], # user units
["update_resolution", "0.001", 4] # update redis when motor is moving only when a change of this magnetude is seen
]
bi_list = ["CryoSwitch"]
motor_presets = {
"align.x" : [
# name value canTune pref_ini section pref_ini option
[ "Beam", "0.0", "1", "PHIAxisXYZTable", "XBeam_X1"],
[ "Back", "-1.8", "1", "PHIAxisXYZTable", "XScintillatorOut_X2"],
[ "Back_Vector", "-1.8", "1", "PHIAxisXYZTable", "XScintillatorOut_X2"]
],
"align.y" : [
# name value canTune pref_ini section pref_ini option
[ "Beam", "0.0", "1", "PHIAxisXYZTable", "YBeam_Y1"],
[ "Back", "1.0", "1", "PHIAxisXYZTable", "YScintillatorOut_Y2"],
[ "Back_Vector", "1.0", "1", "PHIAxisXYZTable", "YScintillatorOut_Y2"]
],
"align.z" : [
# name value canTune pref_ini section pref_ini option
[ "Beam", "0.0", "1", "PHIAxisXYZTable", "ZBeam_Z1"],
[ "Back", "1.9", "1", "PHIAxisXYZTable", "ZScintillatorOut_Z2"],
[ "Back_Vector", "1.9", "1", "PHIAxisXYZTable", "ZScintillatorOut_Z2"]
],
"appy" : [
# name value canTune pref_ini section pref_ini option
[ "In", "0.117", "1", "ApertureYZTable", "BeamHorizontalPosition_Y0"]
],
"appz" : [
[ "In", "80", "1", "ApertureYZTable", "BeamVerticalPosition_Z1"],
[ "Out", "71.777", "0", "ApertureYZTable", "VerticalOffScreenPosition_Z2"],
[ "Cover", "2.0", "0", "ApertureYZTable", "OffVerticalPosition_Z0"]
],
"backLight" : [
[ "On", "1", None, None, None],
[ "Off", "0", None, None, None]
],
"frontLight" : [
[ "On", "1", None, None, None],
[ "Off", "0", None, None, None]
],
"capy" : [
[ "In", "0.082", "1", "CapillaryBSYZtable", "HorizontalBeamPosition_Y0"]
],
"capz" : [
[ "In", "78.2617", "1", "CapillaryBSYZtable", "VerticalBeamPosition_Z1"],
[ "Out", "69.944", "0", "CapillaryBSYZtable", "VerticalOffScreenPosition_Z2"],
[ "Cover", "0.3", "0", "CapillaryBSYZtable", "VeticalOffPosition_Z0"]
],
"fastShutter" : [
[ "Open", "1", None, None, None],
[ "Close", "0", None, None, None]
],
"kappa" : [
[ "manualMount", "180.0", None, "MiniKappa", "Kappa1MountPosition"],
[ "reference", "228.5", None, "CentringXYTable", "PhiReference"]
],
"omega" : [
[ "manualMount", "180.0", None, "PHIRotationAxis", "KappaMountPosition"]
],
"scint.focus" : [
[ "tuner", "53", "1", "ScintillatorPhotodiode", "OnFocusPiezoPosition"]
],
"scint" : [
[ "Photodiode", "53.0", "1", "ScintillatorPhotodiode", "DiodeOnBeamVerticalPosition_Z2"],
[ "Scintillator", "78.788", "1", "ScintillatorPhotodiode", "ScintiOnBeamVerticalPosition_Z1"],
[ "Cover", "2.0", "0", "ScintillatorPhotodiode", "OffVerticalPosition_Z0"]
]
}
zoom_settings = [
#lev front back pos scalex scaley section
[1, 4.0, 8.0, 34100, 2.7083, 3.3442, "CoaxCam.Zoom1"],
[2, 6.0, 8.1, 31440, 2.2487, 2.2776, "CoaxCam.Zoom2"],
[3, 6.5, 8.2, 27460, 1.7520, 1.7550, "CoaxCam.Zoom3"],
[4, 7.0, 8.3, 23480, 1.3360, 1.3400, "CoaxCam.Zoom4"],
[5, 8.0, 10.0, 19500, 1.0140, 1.0110, "CoaxCam.Zoom5"],
[6, 9.0, 12.0, 15520, 0.7710, 0.7760, "CoaxCam.Zoom6"],
[7, 10.0, 17.0, 11540, 0.5880, 0.5920, "CoaxCam.Zoom7"],
[8, 12.0, 25.0, 7560, 0.4460, 0.4480, "CoaxCam.Zoom8"],
[9, 15.0, 37.0, 3580, 0.3410, 0.3460, "CoaxCam.Zoom9"],
[10, 16.0, 42.0, 0, 0.2700, 0.2690, "CoaxCam.Zoom10"]
]
# config
for c in configs.keys():
print "HMSET config.%s HEAD '%s' PUB '%s' RE '%s' PG '%s' AUTOSCINT '%s'" % \
(c.lower(), configs[c]["head"], configs[c]["pub"], configs[c]["re"], configs[c]["pg"], configs[c]["autoscint"])
# motor stuff
if hard_ini:
hi = iniParser.iniParser( hard_ini)
hi.read()
for m in motor_dict.keys():
print "HSETNX %s.%s.name VALUE '%s'" % (head, m, m) # These values are not part of any defaults
print "PUBLISH mk_pgpmac_redis %s.%s.name" % (head, m) #
print "HSETNX %s.%s.name DBRTYPE 0" % (head, m) #
print "HSETNX %s.%s.position VALUE ''" % (head, m) #
print "PUBLISH mk_pgpmac_redis %s.%s.position" % (head, m) #
print "HSETNX %s.%s.position DBRTYPE 6" % (head, m) #
if hard_ini != None and motor_dict[m].has_key("hard_ini"):
motor_dict[m]["motor_num"] = hi.get(motor_dict[m]["hard_ini"], "motornumber")
motor_dict[m]["coord_num"] = hi.get(motor_dict[m]["hard_ini"], "coordinatesystem")
# set home, active_init, and inactive_init based on current motor and coordinate numbers
#
if not motor_dict[m].has_key( "home"):
motor_dict[m]["home"] = mk_home( m, motor_dict[m])
if not motor_dict[m].has_key( "active_init"):
motor_dict[m]["active_init"] = mk_active_init( motor_dict[m])
if not motor_dict[m].has_key( "inactive_init"):
motor_dict[m]["inactive_init"] = mk_inactive_init( motor_dict[m])
for k in motor_dict[m]:
if k == "hard_ini": # this is sort of a meta field
continue
# Use the value from the hard ini file, if it is available
# Overide the current value if it is available
#
if hard_ini == None or \
not motor_dict[m].has_key("hard_ini") or \
motor_dict[m]["hard_ini"] == None or \
not hard_ini_fields.has_key( k) or \
not hi.has_section( motor_dict[m]["hard_ini"]) or \
not hi.has_option( motor_dict[m]["hard_ini"], hard_ini_fields[k][0]):
# Use the hard coded value found in this file
#
v = motor_dict[m][k]
f = "HSETNX"
else:
# Use the ini file value
#
xlate = hard_ini_fields[k][1]
v = xlate(hi.get( motor_dict[m]["hard_ini"], hard_ini_fields[k][0]))
f = "HSET"
print "%s %s.%s.%s VALUE '%s'" % (f, head, m, k, v)
print "PUBLISH mk_pgpmac_redis %s.%s.%s" % (f, head, m)
# Throw out the default default value for fields not found any other way
#
for field, default, dbrtype in motor_field_lists:
print "HSETNX %s.%s.%s VALUE '%s'" % (head, m, field, default)
print "PUBLISH mk_pgpmac_redis %s.%s.%s" % (head, m, field)
print "HSETNX %s.%s.%s DBRTYPE '%s'" % (head, m, field, dbrtype)
# Add the presets
#
if pref_ini:
pi = iniParser.iniParser( pref_ini)
pi.read()
i = 0;
if motor_presets.has_key( m):
for pname, ppos, ptune, section, option in motor_presets[m]:
print "HSETNX %s.%s.presets.%d.name VALUE %s" % (head, m, i, pname)
print "PUBLISH mk_pgpmac_redis %s.%s.presets.%d.name" % (head, m, i)
f = "HSETNX"
if pref_ini and section and option and pi.has_section( section) and pi.has_option( section, option):
ppos = pi.get( section, option)
f = "HSET"
print "%s %s.%s.presets.%d.position VALUE %s" % ( f, head, m, i, ppos)
print "PUBLISH mk_pgpmac_redis %s.%s.presets.%d.position" % (head, m, i)
if ptune != None:
print "HSETNX %s.%s.presets.%d.canTune VALUE %s" % ( head, m, i, ppos)
print "PUBLISH mk_pgpmac_redis %s.%s.presets.%d.canTune" % (head, m, i)
i += 1
print "HSET %s.%s.presets.length VALUE %d" % ( head, m, i)
print "PUBLISH mk_pgpmac_redis %s.%s.presets.length" % (head, m)
# omega reference angle is unique
if m=="omega":
if pref_ini and pi.has_section( "CentringXYTable") and pi.has_option( "CentringXYTable", "PhiReference"):
ppos = pi.get( "CentringXYTable", "PhiReference")
print "HSET %s.omega.reference VALUE %s" % (head, ppos)
print "PUBLISH mk_pgpmac_redis %s.omega.reference" % (head)
# light and zoom settings
for lev, f, b, p, x, y, section in zoom_settings:
fnc = "HSETNX"
if pref_ini != None and pi.has_section( section) and pi.has_option( section, "FrontLightIntensity"):
f = pi.get( section, "FrontLightIntensity")
fnc = "HSET"
print "%s %s.cam.zoom.%d.FrontLightIntensity VALUE %s" % (fnc, head, lev, f)
print "PUBLISH mk_pgpmac_redis %s.cam.zoom.%d.FrontLightIntensity" % (head, lev)
fnc = "HSETNX"
if pref_ini != None and pi.has_section( section) and pi.has_option( section, "LightIntensity"):
b = pi.get( section, "LightIntensity")
fnc = "HSET"
print "%s %s.cam.zoom.%d.LightIntensity VALUE %s" % (fnc, head, lev, b)
print "PUBLISH mk_pgpmac_redis %s.cam.zoom.%d.LightIntensity" % (head, lev)
fnc = "HSETNX"
if pref_ini != None and pi.has_section( section) and pi.has_option( section, "MotorPosition"):
p = pi.get( section, "MotorPosition")
fnc = "HSET"
print "%s %s.cam.zoom.%d.MotorPosition VALUE %s" % (fnc, head, lev, p)
print "PUBLISH mk_pgpmac_redis %s.cam.zoom.%d.MotorPosition" % (head, lev)
fnc = "HSETNX"
if pref_ini != None and pi.has_section( section) and pi.has_option( section, "ScaleX"):
x = pi.get( section, "ScaleX")
fnc = "HSET"
print "%s %s.cam.zoom.%d.ScaleX VALUE %s" % (fnc, head, lev, x)
print "PUBLISH mk_pgpmac_redis %s.cam.zoom.%d.ScaleX" % (head, lev)
fnc = "HSETNX"
if pref_ini != None and pi.has_section( section) and pi.has_option( section, "ScaleY"):
y = pi.get( section, "ScaleY")
fnc = "HSET"
print "%s %s.cam.zoom.%d.ScaleY VALUE %s" % (fnc, head, lev, y)
print "PUBLISH mk_pgpmac_redis %s.cam.zoom.%d.ScaleY" % (head, lev)
plcc2_file = open( "%s-plcc2.pmc" % (head), "w")
plcc2_file.write( "OPEN PLCC2 CLEAR\n")
plcc2_file.write( ";\n")
plcc2_file.write( "; Auto generated by mk_pgpmac_redis.py on %s\n" % datetime.datetime.isoformat(datetime.datetime.now()))
plcc2_file.write( "; Insert into your .pmc file (replacing plcc 2 completely) and reload with the pmac executive program.\n")
plcc2_file.write( ";\n")
plcc2_file.write( "M522=M520; Used for A&B registers set up.\n")
plcc2_file.write( "\n");
for m in plcc2_dict.keys():
if not motor_dict.has_key( m) or not motor_dict[m].has_key( "motor_num"):
continue
motor_num = int( motor_dict[m]["motor_num"])
if motor_num < 1 or motor_num > 32:
continue
plcc2_file.write( "%s=M%d ; %s Status 1\n" % (plcc2_dict[m]["status1"], motor_num, m))
plcc2_file.write( "%s=M%d ; %s Status 2\n" % (plcc2_dict[m]["status2"], motor_num + 90, m))
plcc2_file.write( "%s=(M%d/(I%d*32)) ; %s Position\n" % (plcc2_dict[m]["position"], motor_num+180, motor_num*100 + 8, m))
plcc2_file.write( "M5070=M1048 ; FShutterIsOpen\n")
plcc2_file.write( "M5071=P3002 ; PhiScan\n")
plcc2_file.write( "M5072=P3001 ; FastShutterHasOpened\n")
plcc2_file.write( "M5073=P3005 ; FastShutterHasGloballyOpened\n")
plcc2_file.write( "M5074=P177 ; Number of passes (FShutterIsOpen false and FastShutterHasOpened true and npasses=1 means we can read the detector)\n")
plcc2_file.write( "CLOSE\n")
plcc2_file.close();
|
gpl-3.0
| 2,189,701,493,260,907,300
| 47.952318
| 172
| 0.516572
| false
| 2.69695
| false
| false
| false
|
listen-lavender/webcrawl
|
webcrawl/queue/lib/queue.py
|
1
|
8585
|
"""A multi-producer, multi-consumer queue."""
from time import time as _time
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
from collections import deque
import heapq
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue(object):
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = _threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = _threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = _threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = _threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!)."""
self.mutex.acquire()
n = not self._qsize()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not reliable!)."""
self.mutex.acquire()
n = 0 < self.maxsize == self._qsize()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
self.not_full.acquire()
try:
if self.maxsize > 0:
if not block:
if self._qsize() == self.maxsize:
raise Full
elif timeout is None:
while self._qsize() == self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while self._qsize() == self.maxsize:
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self.not_empty.acquire()
try:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while not self._qsize():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self, len=len):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
|
mit
| 4,989,844,749,189,693,000
| 34.184426
| 85
| 0.588818
| false
| 4.409348
| false
| false
| false
|
ptressel/sahana-eden-madpub
|
models/000_config.py
|
1
|
14638
|
# -*- coding: utf-8 -*-
"""
Deployment settings
All settings which are typically edited for a deployment should be done here
Deployers shouldn't typically need to edit any other files.
"""
# Remind admin to edit this file
FINISHED_EDITING_CONFIG_FILE = True # change to True after you finish editing this file
if not FINISHED_EDITING_CONFIG_FILE:
raise HTTP(501, body="Please edit models/000_config.py first")
s3cfg = local_import("s3cfg")
deployment_settings = s3cfg.S3Config(T)
# Database settings
deployment_settings.database.db_type = "sqlite"
deployment_settings.database.host = "localhost"
deployment_settings.database.port = None # use default
deployment_settings.database.database = "sahana"
deployment_settings.database.username = "sahana"
deployment_settings.database.password = "password"
deployment_settings.database.pool_size = 30
# Authentication settings
# This setting should be changed _before_ registering the 1st user
deployment_settings.auth.hmac_key = "aliceinwonderland"
# These settings should be changed _after_ the 1st (admin) user is
# registered in order to secure the deployment
deployment_settings.auth.registration_requires_verification = False
deployment_settings.auth.registration_requires_approval = False
deployment_settings.auth.openid = False
# Base settings
# Set this to the Public URL of the instance
deployment_settings.base.public_url = "http://127.0.0.1:8000"
# Set this to True to switch to Debug mode
# Debug mode means that uncompressed CSS/JS files are loaded
# JS Debug messages are also available in the Console
# can also load an individual page in debug mode by appending URL with
# ?debug=1
deployment_settings.base.debug = False
# Switch to "False" in Production for a Performance gain
# (need to set to "True" again when Table definitions are changed)
deployment_settings.base.migrate = True
# Enable/disable pre-population of the database.
# Should be True on 1st_run to pre-populate the database
# - unless doing a manual DB migration
# Then set to False in Production (to save 1x DAL hit every page)
# NOTE: the web UI will not be accessible while the DB is empty,
# instead run:
# python web2py.py -N -S eden -M
# to create the db structure, then exit and re-import the data.
deployment_settings.base.prepopulate = True
# Set this to True to use Content Delivery Networks to speed up Internet-facing sites
deployment_settings.base.cdn = False
# Email settings
# Outbound server
deployment_settings.mail.server = "127.0.0.1:25"
# Useful for Windows Laptops:
#deployment_settings.mail.server = "smtp.gmail.com:587"
#deployment_settings.mail.login = "username:password"
# From Address
deployment_settings.mail.sender = "'Sahana' <sahana@sahanafoundation.org>"
# Address to which mails get sent to approve new users
deployment_settings.mail.approver = "ptressel@myuw.net"
# Twitter settings:
# Register an app at http://twitter.com/apps
# (select Aplication Type: Client)
# You'll get your consumer_key and consumer_secret from Twitter
# You can keep these empty if you don't need Twitter integration
deployment_settings.twitter.oauth_consumer_key = ""
deployment_settings.twitter.oauth_consumer_secret = ""
# L10n settings
# Uncomment this if the deployment is just in a few countries
# (used in the GIS Location Selector & maybe in future: Messaging)
#deployment_settings.L10n.countries = ["PK"]
# Languages used in the deployment (used for Language Toolbar & GIS Locations)
# http://www.loc.gov/standards/iso639-2/php/code_list.php
deployment_settings.L10n.languages = {
"en":T("English"),
"es":T("Spanish"),
#"fr":T("French"),
#"pa":T("Punjabi"),
#"ps":T("Pashto"),
#"sd":T("Sindhi"),
"ja":T("Japanese"),
"ur":T("Urdu"),
"zh-tw":T("Chinese (Taiwan)"),
}
# Default language for Language Toolbar (& GIS Locations in future)
deployment_settings.L10n.default_language = "en"
# Display the language toolbar
deployment_settings.L10n.display_toolbar = True
# Default timezone for users
deployment_settings.L10n.utc_offset = "UTC +0000"
# Religions used in Person Registry
# @ToDo: find a better code
# http://eden.sahanafoundation.org/ticket/594
deployment_settings.L10n.religions = {
"none":T("none"),
"christian":T("Christian"),
"muslim":T("Muslim"),
"jew":T("Jew"),
"buddhist":T("Buddhist"),
"hindu":T("Hindu"),
"bahai":T("Bahai"),
"other":T("other")
}
# GIS (Map) settings
# Provide a tool to select locations via a map on all forms with location_id
deployment_settings.gis.map_selector = True
# Display Resources recorded to Admin-Level Locations on the map
deployment_settings.gis.display_L0 = False
# Currently unused
#deployment_settings.gis.display_L1 = True
# Allow non-MapAdmins to edit Admin locations?
# (defaults to True, if not set)
deployment_settings.gis.edit_L0 = False
deployment_settings.gis.edit_L1 = True
#deployment_settings.gis.edit_L2 = True
deployment_settings.gis.locations_hierarchy = {
"L0":T("Country"),
"L1":T("Province"),
"L2":T("District"),
"L3":T("Town"),
"L4":T("Village"),
"L5":T("Location"), # Street Address
"XX":T("Imported")
}
# Maximum Marker Size
# (takes effect only on display)
deployment_settings.gis.marker_max_height = 35
deployment_settings.gis.marker_max_width = 30
# Duplicate Features so that they show wrapped across the Date Line?
# Points only for now
# lon<0 have a duplicate at lon+360
# lon>0 have a duplicate at lon-360
deployment_settings.gis.duplicate_features = False
# Mouse Position: 'normal', 'mgrs' or 'off'
deployment_settings.gis.mouse_position = "normal"
# Print Service URL: http://eden.sahanafoundation.org/wiki/BluePrintGISPrinting
#deployment_settings.gis.print_service = "/geoserver/pdf/"
# Do we have a spatial DB available? (currently unused. Will support PostGIS & Spatialite.)
deployment_settings.gis.spatialdb = False
# GeoServer (Currently used by GeoExplorer. Will allow REST control of GeoServer.)
# NB Needs to be publically-accessible URL for querying via client JS
#deployment_settings.gis.geoserver_url = "http://localhost/geoserver"
#deployment_settings.gis.geoserver_username = "admin"
#deployment_settings.gis.geoserver_password = "password"
# OpenStreetMap settings:
# Register your app by logging in to www.openstreetmap.org & then selecting 'oauth settings'
deployment_settings.osm.oauth_consumer_key = ""
deployment_settings.osm.oauth_consumer_secret = ""
# Security Policy settings
# Lock-down access to Map Editing
#deployment_settings.security.map = True
# Security Policy (defaults to 1 = Simple)
#deployment_settings.security.policy = 2 # Editor
# Should users be allowed to register themselves?
deployment_settings.security.self_registration = True
# Use 'soft' deletes
deployment_settings.security.archive_not_delete = True
# Audit settings
# We Audit if either the Global or Module asks us to
# (ignore gracefully if module author hasn't implemented this)
# NB Auditing (especially Reads) slows system down & consumes diskspace
#deployment_settings.security.audit_write = False
#deployment_settings.security.audit_read = False
# UI/Workflow options
# Should user be prompted to save before navigating away?
#deployment_settings.ui.navigate_away_confirm = False
# Should potentially large dropdowns be turned into autocompletes?
# (unused currently)
#deployment_settings.ui.autocomplete = True
# Comment/uncomment modules here to disable/enable them
# Modules menu is defined in 01_menu.py
from gluon.storage import Storage
deployment_settings.modules = Storage(
default = Storage(
name_nice = T("Home"),
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = 0 # This item is always 1st in the menu
),
admin = Storage(
name_nice = T("Administration"),
description = T("Site Administration"),
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = 0 # This item is handled separately in the menu
),
gis = Storage(
name_nice = T("Map"),
description = T("Situation Awareness & Geospatial Analysis"),
module_type = 1, # 1st item in the menu
resources = Storage(
gis_location = {"importer" : True}
)
),
doc = Storage(
name_nice = T("Documents and Photos"),
description = T("A library of digital resources, such as photos, documents and reports"),
module_type = 10,
),
msg = Storage(
name_nice = T("Messaging"),
description = T("Sends & Receives Alerts via Email & SMS"),
module_type = 10,
),
pr = Storage(
name_nice = T("Person Registry"),
description = T("Central point to record details on People"),
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10,
resources = Storage(
pr_address = {"importer" : True},
pr_pe_contact = {"importer" : True},
pr_presence = {"importer" : True},
pr_identity = {"importer" : True},
pr_person = {"importer" : True},
pr_group = {"importer" : True},
pr_group_membership = {"importer" : True},
)
),
pf = Storage(
name_nice = T("Person Finder"),
description = T("Helps to report and search for Missing Persons"),
module_type = 10,
),
dvi = Storage(
name_nice = T("Disaster Victim Identification"),
description = T("Disaster Victim Identification"),
module_type = 10,
#access = "|DVI|", # Only users with the DVI role can see this module in the default menu & access the controller
#audit_read = True, # Can enable Audit for just an individual module here
#audit_write = True,
resources = Storage(
dvi_recreq = {"importer" : True},
)
),
#dvr = Storage(
# name_nice = T("Disaster Victim Registry"),
# description = T("Traces internally displaced people (IDPs) and their needs"),
# module_type = 10
# ),
org = Storage(
name_nice = T("Organization Registry"),
description = T('Lists "who is doing what & where". Allows relief agencies to coordinate their activities'),
module_type = 10,
resources = Storage(
org_organisation = {"importer" : True},
org_office = {"importer" : True},
org_staff = {"importer" : True}
)
),
project = Storage(
name_nice = T("Project Tracking"),
description = T("Tracking of Projects, Activities and Tasks"),
module_type = 10
),
# NB Budget module depends on Project Tracking Module
budget = Storage(
name_nice = T("Budgeting Module"),
description = T("Allows a Budget to be drawn up"),
module_type = 10,
resources = Storage(
budget_item = {"importer" : True},
budget_kit = {"importer" : True},
budget_bundle = {"importer" : True},
)
),
logs = Storage(
name_nice = T("Logistics Management"),
description = T("Managing, Storing and Distributing Relief Items"),
module_type = 10
),
rms = Storage(
name_nice = T("Requests"),
description = T("Tracks requests for aid and matches them against donors who have pledged aid"),
module_type = 3,
resources = Storage(
rms_req = {"importer" : True},
)
),
cr = Storage(
name_nice = T("Shelter Registry"),
description = T("Tracks the location, distibution, capacity and breakdown of victims in Shelters"),
module_type = 10,
resources = Storage(
cr_shelter = {"importer" : True }
)
),
hms = Storage(
name_nice = T("Hospitals"),
description = T("Helps to monitor status of hospitals"),
module_type = 10,
resources = Storage(
hms_hospital = {"importer" : True}
)
),
vol = Storage(
name_nice = T("Volunteers"),
description = T("Manage volunteers by capturing their skills, availability and allocation"),
module_type = 10,
),
irs = Storage(
name_nice = T("Incident Reporting"),
description = T("Incident Reporting System"),
module_type = 10
),
assess = Storage(
name_nice = "Assessments",
description = "Rapid Assessments & Flexible Impact Assessments",
module_type = 2,
),
survey = Storage(
name_nice = "Survey Module",
description = "Create, enter, and manage surveys.",
module_type = 10,
),
delphi = Storage(
name_nice = T("Delphi Decision Maker"),
description = T("Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list."),
module_type = 10,
),
importer = Storage(
name_nice = "Spreadsheet Importer",
description = "Used to import data from spreadsheets into the database",
module_type = 10,
),
#flood = Storage(
# name_nice = T("Flood Alerts"),
# description = T("Flood Alerts show water levels in various parts of the country"),
# module_type = 10
# ),
#ticket = Storage(
# name_nice = T("Ticketing Module"),
# description = T("Master Message Log to process incoming reports & requests"),
# module_type = 10,
# ),
#lms = Storage(
# name_nice = T("Logistics Management System"),
# description = T("An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities."),
# module_type = 10
# ),
mad = Storage(
name_nice = "Mobile Assessment of Damage",
description = "Uploads damage information and images from mobile devices",
module_type = 10,
),
)
|
mit
| 1,233,628,247,905,388,000
| 39.325069
| 192
| 0.646331
| false
| 3.877616
| false
| false
| false
|
MirkoDziadzka/pyhkdf
|
src/hkdf.py
|
1
|
3000
|
"""
This is a straight forward implementation of RFC 5869
HMAC-based Extract-and-Expand Key Derivation Function (HKDF)
http://tools.ietf.org/html/rfc5869
"""
import warnings
from Crypto.Hash import SHA512, HMAC
class HKDF:
"""
HMAC-based Extract-and-Expand Key Derivation Function (RFC 5869)
usage:
>> engine = HKDF(b"password", b"salt", digestmod=SHA256)
>> key1 = engine.expand(b"info", length)
This is equvalent to
>> prk = HKDF.rfc_extract(b"password", b"salt", digest=SHA256)
>> key1 = HKDF.rfc_expand(prk, b"info", lengta, digest=SHA256h)
"""
@staticmethod
def rfc_extract(key: bytes, salt: bytes=b"", digest=SHA512) -> bytes:
""" The extract step from RFC 5869
Coverts the key and the salt to a pseudorandom key using
the given hash function.
"""
if not salt:
salt = b'\0' * digest.digest_size
return HMAC.new(salt, key, digestmod=digest).digest()
@staticmethod
def rfc_expand(prk: bytes, info: bytes, length: int, digest=SHA512) -> bytes:
""" The expand step from RFC 5896
Take the result of rfc_extract (given as prk) and
compute a key from this based on info and a requested length.
digest must be the same as in the extract step.
"""
if length < 0:
raise ValueError("Parameter length must be greater or equal 0")
if length > digest.digest_size * 255:
raise ValueError(f"Parameter length must be less or equal {digest.digest_size * 255}")
# generate key stream, stop when we have enought bytes
keystream = []
keystream_length = 0
block_index = 0
key_block = b""
while keystream_length < length:
block_index += 1
data = key_block + info + bytes([block_index % 256])
key_block = HMAC.new(prk, data, digestmod=digest).digest()
keystream.append(key_block)
keystream_length += len(key_block)
return b"".join(keystream)[:length]
def __init__(self, key: bytes, salt: bytes=b"", digestmod=SHA512):
self.__digest = digestmod
self.__prk = self.rfc_extract(key, salt, digestmod)
@property
def digest_length(self):
""" return the digest_length of the hash module """
return self.__digest.digest_size
@property
def _prk(self):
""" the pseudorandom key, computed from the input key and the salt
"""
return self.__prk
def expand(self, info: bytes, length: int) -> bytes:
""" expand a key for the given context (info) in the given length
"""
return self.rfc_expand(self.__prk, info, length, digest=self.__digest)
def extract_key(self, info: bytes, length: int) -> bytes:
""" Deprecated: use expand() instead """
warnings.warn("deprecated, use expand() instead", DeprecationWarning)
return self.expand(info, length)
|
mit
| -7,948,960,739,531,117,000
| 29.927835
| 98
| 0.609
| false
| 3.821656
| false
| false
| false
|
ninjin/contra
|
gtbtokenize.py
|
1
|
12308
|
#!/usr/bin/env python
# Implements a GENIA Treebank - like tokenization.
# This is a python translation of my GTB-tokenize.pl, which in turn
# draws in part on Robert MacIntyre's 1995 PTB tokenizer,
# (http://www.cis.upenn.edu/~treebank/tokenizer.sed) and Yoshimasa
# Tsuruoka's GENIA tagger tokenization (tokenize.cpp;
# www-tsujii.is.s.u-tokyo.ac.jp/GENIA/tagger)
# by Sampo Pyysalo, 2011. Licensed under the MIT license.
# http://www.opensource.org/licenses/mit-license.php
# NOTE: intended differences to GTB tokenization:
# - Does not break "protein(s)" -> "protein ( s )"
from __future__ import with_statement
import re
INPUT_ENCODING = "UTF-8"
OUTPUT_ENCODING = "UTF-8"
DEBUG_GTB_TOKENIZATION = False
# Penn treebank bracket escapes (others excluded)
PTB_ESCAPES = [('(', '-LRB-'),
(')', '-RRB-'),
('[', '-LSB-'),
(']', '-RSB-'),
('{', '-LCB-'),
('}', '-RCB-'),
]
def PTB_escape(s):
for u, e in PTB_ESCAPES:
s = s.replace(u, e)
return s
def PTB_unescape(s):
for u, e in PTB_ESCAPES:
s = s.replace(e, u)
return s
# processing in three stages: "initial" regexs run first, then
# "repeated" run as long as there are changes, and then "final"
# run. As the tokenize() function itself is trivial, comments relating
# to regexes given with the re.compiles.
__initial, __repeated, __final = [], [], []
# separate but do not break ellipsis
__initial.append((re.compile(r'\.\.\.'), r' ... '))
# To avoid breaking names of chemicals, protein complexes and similar,
# only add space to related special chars if there's already space on
# at least one side.
__initial.append((re.compile(r'([,;:@#]) '), r' \1 '))
__initial.append((re.compile(r' ([,;:@#])'), r' \1 '))
# always separated
__initial.append((re.compile(r'\$'), r' $ '))
__initial.append((re.compile(r'\%'), r' % '))
__initial.append((re.compile(r'\&'), r' & '))
# separate punctuation followed by space even if there's closing
# brackets or quotes in between, but only sentence-final for
# periods (don't break e.g. "E. coli").
__initial.append((re.compile(r'([,:;])([\[\]\)\}\>\"\']* +)'), r' \1\2'))
__initial.append((re.compile(r'(\.+)([\[\]\)\}\>\"\']* +)$'), r' \1\2'))
# these always
__initial.append((re.compile(r'\?'), ' ? '))
__initial.append((re.compile(r'\!'), ' ! '))
# separate greater than and less than signs, avoiding breaking
# "arrows" (e.g. "-->", ">>") and compound operators (e.g. "</=")
__initial.append((re.compile(r'((?:=\/)?<+(?:\/=|--+>?)?)'), r' \1 '))
__initial.append((re.compile(r'((?:<?--+|=\/)?>+(?:\/=)?)'), r' \1 '))
# separate dashes, not breaking up "arrows"
__initial.append((re.compile(r'(<?--+\>?)'), r' \1 '))
# Parens only separated when there's space around a balanced
# bracketing. This aims to avoid splitting e.g. beta-(1,3)-glucan,
# CD34(+), CD8(-)CD3(-).
# Previously had a proper recursive implementation for this, but it
# was much too slow for large-scale use. The following is
# comparatively fast but a bit of a hack:
# First "protect" token-internal brackets by replacing them with
# their PTB escapes. "Token-internal" brackets are defined as
# matching brackets of which at least one has no space on either
# side. To match GTB tokenization for cases like "interleukin
# (IL)-mediated", and "p65(RelA)/p50", treat following dashes and
# slashes as space. Nested brackets are resolved inside-out;
# to get this right, add a heuristic considering boundary
# brackets as "space".
# (First a special case (rareish): "protect" cases with dashes after
# paranthesized expressions that cannot be abbreviations to avoid
# breaking up e.g. "(+)-pentazocine". Here, "cannot be abbreviations"
# is taken as "contains no uppercase charater".)
__initial.append((re.compile(r'\(([^ A-Z()\[\]{}]+)\)-'), r'-LRB-\1-RRB--'))
# These are repeated until there's no more change (per above comment)
__repeated.append((re.compile(r'(?<![ (\[{])\(([^ ()\[\]{}]*)\)'), r'-LRB-\1-RRB-'))
__repeated.append((re.compile(r'\(([^ ()\[\]{}]*)\)(?![ )\]}\/-])'), r'-LRB-\1-RRB-'))
__repeated.append((re.compile(r'(?<![ (\[{])\[([^ ()\[\]{}]*)\]'), r'-LSB-\1-RSB-'))
__repeated.append((re.compile(r'\[([^ ()\[\]{}]*)\](?![ )\]}\/-])'), r'-LSB-\1-RSB-'))
__repeated.append((re.compile(r'(?<![ (\[{])\{([^ ()\[\]{}]*)\}'), r'-LCB-\1-RCB-'))
__repeated.append((re.compile(r'\{([^ ()\[\]{}]*)\}(?![ )\]}\/-])'), r'-LCB-\1-RCB-'))
# Remaining brackets are not token-internal and should be
# separated.
__final.append((re.compile(r'\('), r' -LRB- '))
__final.append((re.compile(r'\)'), r' -RRB- '))
__final.append((re.compile(r'\['), r' -LSB- '))
__final.append((re.compile(r'\]'), r' -RSB- '))
__final.append((re.compile(r'\{'), r' -LCB- '))
__final.append((re.compile(r'\}'), r' -RCB- '))
# initial single quotes always separated
__final.append((re.compile(r' (\'+)'), r' \1 '))
# final with the exception of 3' and 5' (rough heuristic)
__final.append((re.compile(r'(?<![35\'])(\'+) '), r' \1 '))
# This more frequently disagreed than agreed with GTB
# # Separate slashes preceded by space (can arise from
# # e.g. splitting "p65(RelA)/p50"
# __final.append((re.compile(r' \/'), r' \/ '))
# Standard from PTB (TODO: pack)
__final.append((re.compile(r'\'s '), ' \'s '))
__final.append((re.compile(r'\'S '), ' \'S '))
__final.append((re.compile(r'\'m '), ' \'m '))
__final.append((re.compile(r'\'M '), ' \'M '))
__final.append((re.compile(r'\'d '), ' \'d '))
__final.append((re.compile(r'\'D '), ' \'D '))
__final.append((re.compile(r'\'ll '), ' \'ll '))
__final.append((re.compile(r'\'re '), ' \'re '))
__final.append((re.compile(r'\'ve '), ' \'ve '))
__final.append((re.compile(r'n\'t '), ' n\'t '))
__final.append((re.compile(r'\'LL '), ' \'LL '))
__final.append((re.compile(r'\'RE '), ' \'RE '))
__final.append((re.compile(r'\'VE '), ' \'VE '))
__final.append((re.compile(r'N\'T '), ' N\'T '))
__final.append((re.compile(r' Cannot '), ' Can not '))
__final.append((re.compile(r' cannot '), ' can not '))
__final.append((re.compile(r' D\'ye '), ' D\' ye '))
__final.append((re.compile(r' d\'ye '), ' d\' ye '))
__final.append((re.compile(r' Gimme '), ' Gim me '))
__final.append((re.compile(r' gimme '), ' gim me '))
__final.append((re.compile(r' Gonna '), ' Gon na '))
__final.append((re.compile(r' gonna '), ' gon na '))
__final.append((re.compile(r' Gotta '), ' Got ta '))
__final.append((re.compile(r' gotta '), ' got ta '))
__final.append((re.compile(r' Lemme '), ' Lem me '))
__final.append((re.compile(r' lemme '), ' lem me '))
__final.append((re.compile(r' More\'n '), ' More \'n '))
__final.append((re.compile(r' more\'n '), ' more \'n '))
__final.append((re.compile(r'\'Tis '), ' \'T is '))
__final.append((re.compile(r'\'tis '), ' \'t is '))
__final.append((re.compile(r'\'Twas '), ' \'T was '))
__final.append((re.compile(r'\'twas '), ' \'t was '))
__final.append((re.compile(r' Wanna '), ' Wan na '))
__final.append((re.compile(r' wanna '), ' wan na '))
# clean up possible extra space
__final.append((re.compile(r' +'), r' '))
def _tokenize(s):
"""
Tokenizer core. Performs GTP-like tokenization, using PTB escapes
for brackets (but not quotes). Assumes given string has initial
and terminating space. You probably want to use tokenize() instead
of this function.
"""
# see re.complies for comments
for r, t in __initial:
s = r.sub(t, s)
while True:
o = s
for r, t in __repeated:
s = r.sub(t, s)
if o == s: break
for r, t in __final:
s = r.sub(t, s)
return s
def tokenize(s, ptb_escaping=False, use_single_quotes_only=False,
escape_token_internal_parens=False):
"""
Tokenizes the given string with a GTB-like tokenization. Input
will adjusted by removing surrounding space, if any. Arguments
hopefully self-explanatory.
"""
if DEBUG_GTB_TOKENIZATION:
orig = s
# Core tokenization needs starting and ending space and no newline;
# store to return string ending similarly
# TODO: this isn't this difficult ... rewrite nicely
s = re.sub(r'^', ' ', s)
m = re.match(r'^((?:.+|\n)*?) *(\n*)$', s)
assert m, "INTERNAL ERROR on '%s'" % s # should always match
s, s_end = m.groups()
s = re.sub(r'$', ' ', s)
if ptb_escaping:
if use_single_quotes_only:
# special case for McCCJ: escape into single quotes.
s = re.sub(r'([ \(\[\{\<])\"', r'\1 '+"' ", s)
else:
# standard PTB quote escaping
s = re.sub(r'([ \(\[\{\<])\"', r'\1 `` ', s)
else:
# no escaping, just separate
s = re.sub(r'([ \(\[\{\<])\"', r'\1 " ', s)
s = _tokenize(s)
# as above (not quite sure why this is after primary tokenization...)
if ptb_escaping:
if use_single_quotes_only:
s = s.replace('"', " ' ")
else:
s = s.replace('"', " '' ")
else:
s = s.replace('"', ' " ')
if not ptb_escaping:
if not escape_token_internal_parens:
# standard unescape for PTB escapes introduced in core
# tokenization
s = PTB_unescape(s)
else:
# only unescape if a space can be matched on both
# sides of the bracket.
s = re.sub(r'(?<= )-LRB-(?= )', '(', s)
s = re.sub(r'(?<= )-RRB-(?= )', ')', s)
s = re.sub(r'(?<= )-LSB-(?= )', '[', s)
s = re.sub(r'(?<= )-RSB-(?= )', ']', s)
s = re.sub(r'(?<= )-LCB-(?= )', '{', s)
s = re.sub(r'(?<= )-RCB-(?= )', '}', s)
# Clean up added space (well, maybe other also)
s = re.sub(r' +', ' ', s)
s = re.sub(r'^ +', '', s)
s = re.sub(r' +$', '', s)
# Only do final comparison in debug mode.
if DEBUG_GTB_TOKENIZATION:
# revised must match original when whitespace, quotes (etc.)
# and escapes are ignored
# TODO: clean this up
r1 = PTB_unescape(orig.replace(' ', '').replace('\n','').replace("'",'').replace('"','').replace('``',''))
r2 = PTB_unescape(s.replace(' ', '').replace('\n','').replace("'",'').replace('"','').replace('``',''))
if r1 != r2:
print >> sys.stderr, "tokenize(): error: text mismatch (returning original):\nORIG: '%s'\nNEW: '%s'" % (orig, s)
s = orig
return s+s_end
def __argparser():
import argparse
ap=argparse.ArgumentParser(description="Perform GENIA Treebank-like text tokenization.")
ap.add_argument("-ptb", default=False, action="store_true", help="Use Penn Treebank escapes")
ap.add_argument("-mccc", default=False, action="store_true", help="Special processing for McClosky-Charniak-Johnson parser input")
ap.add_argument("-sp", default=False, action="store_true", help="Special processing for Stanford parser+PTBEscapingProcessor input. (not necessary for Stanford Parser version 1.6.5 and newer)")
ap.add_argument("files", metavar="FILE", nargs="*", help="Files to tokenize.")
return ap
def main(argv):
import sys
import codecs
arg = __argparser().parse_args(argv[1:])
# sorry, the special cases are a bit of a mess
ptb_escaping, use_single_quotes_only, escape_token_internal_parens = False, False, False
if arg.ptb:
ptb_escaping = True
if arg.mccc:
ptb_escaping = True
# current version of McCCJ has trouble with double quotes
use_single_quotes_only = True
if arg.sp:
# current version of Stanford parser PTBEscapingProcessor
# doesn't correctly escape word-internal parentheses
escape_token_internal_parens = True
# for testing, read stdin if no args
if len(arg.files) == 0:
arg.files.append('/dev/stdin')
for fn in arg.files:
try:
with codecs.open(fn, encoding=INPUT_ENCODING) as f:
for l in f:
t = tokenize(l, ptb_escaping=ptb_escaping,
use_single_quotes_only=use_single_quotes_only,
escape_token_internal_parens=escape_token_internal_parens)
sys.stdout.write(t.encode(OUTPUT_ENCODING))
except Exception, e:
print >> sys.stderr, "Failed to read", fn, ":", e
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv))
|
isc
| 1,279,164,132,318,494,200
| 37.704403
| 197
| 0.576129
| false
| 3.052579
| false
| false
| false
|
openmips/stbgui
|
lib/python/Components/Harddisk.py
|
1
|
27772
|
import os
import time
from Tools.CList import CList
from SystemInfo import SystemInfo
from Components.Console import Console
from boxbranding import getBoxType, getMachineBuild
import Task
def readFile(filename):
file = open(filename)
data = file.read().strip()
file.close()
return data
def getProcMounts():
try:
mounts = open("/proc/mounts", 'r')
except IOError, ex:
print "[Harddisk] Failed to open /proc/mounts", ex
return []
result = [line.strip().split(' ') for line in mounts]
for item in result:
# Spaces are encoded as \040 in mounts
item[1] = item[1].replace('\\040', ' ')
return result
def isFileSystemSupported(filesystem):
try:
for fs in open('/proc/filesystems', 'r'):
if fs.strip().endswith(filesystem):
return True
return False
except Exception, ex:
print "[Harddisk] Failed to read /proc/filesystems:", ex
def findMountPoint(path):
'Example: findMountPoint("/media/hdd/some/file") returns "/media/hdd"'
path = os.path.abspath(path)
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
DEVTYPE_UDEV = 0
DEVTYPE_DEVFS = 1
class Harddisk:
def __init__(self, device, removable=False):
self.device = device
if os.access("/dev/.udev", 0) or os.access("/run/udev/data", 0):
self.type = DEVTYPE_UDEV
elif os.access("/dev/.devfsd", 0):
self.type = DEVTYPE_DEVFS
else:
print "[Harddisk] Unable to determine structure of /dev"
self.card = False
self.max_idle_time = 0
self.idle_running = False
self.last_access = time.time()
self.last_stat = 0
self.timer = None
self.is_sleeping = False
self.dev_path = ''
self.disk_path = ''
self.mount_path = None
self.mount_device = None
self.phys_path = os.path.realpath(self.sysfsPath('device'))
self.removable = removable
self.internal = "pci" in self.phys_path or "ahci" in self.phys_path or "sata" in self.phys_path
try:
data = open("/sys/block/%s/queue/rotational" % device, "r").read().strip()
self.rotational = int(data)
except:
self.rotational = True
if self.type == DEVTYPE_UDEV:
self.dev_path = '/dev/' + self.device
self.disk_path = self.dev_path
self.card = "sdhci" in self.phys_path
elif self.type == DEVTYPE_DEVFS:
tmp = readFile(self.sysfsPath('dev')).split(':')
s_major = int(tmp[0])
s_minor = int(tmp[1])
for disc in os.listdir("/dev/discs"):
dev_path = os.path.realpath('/dev/discs/' + disc)
disk_path = dev_path + '/disc'
try:
rdev = os.stat(disk_path).st_rdev
except OSError:
continue
if s_major == os.major(rdev) and s_minor == os.minor(rdev):
self.dev_path = dev_path
self.disk_path = disk_path
break
self.card = self.device[:2] == "hd" and "host0" not in self.dev_path
print "[Harddisk] new device", self.device, '->', self.dev_path, '->', self.disk_path
if not removable and not self.card:
self.startIdle()
def __lt__(self, ob):
return self.device < ob.device
def partitionPath(self, n):
if self.type == DEVTYPE_UDEV:
if self.dev_path.startswith('/dev/mmcblk0'):
return self.dev_path + "p" + n
else:
return self.dev_path + n
elif self.type == DEVTYPE_DEVFS:
return self.dev_path + '/part' + n
def sysfsPath(self, filename):
return os.path.join('/sys/block/', self.device, filename)
def stop(self):
if self.timer:
self.timer.stop()
self.timer.callback.remove(self.runIdle)
def bus(self):
ret = _("External")
# SD/MMC(F1 specific)
if self.type == DEVTYPE_UDEV:
type_name = " (SD/MMC)"
# CF(7025 specific)
elif self.type == DEVTYPE_DEVFS:
type_name = " (CF)"
if self.card:
ret += type_name
else:
if self.internal:
ret = _("Internal")
if not self.rotational:
ret += " (SSD)"
return ret
def diskSize(self):
cap = 0
try:
line = readFile(self.sysfsPath('size'))
cap = int(line)
return cap / 1000 * 512 / 1000
except:
dev = self.findMount()
if dev:
try:
stat = os.statvfs(dev)
cap = int(stat.f_blocks * stat.f_bsize)
return cap / 1000 / 1000
except:
pass
return cap
def capacity(self):
cap = self.diskSize()
if cap == 0:
return ""
if cap < 1000:
return _("%03d MB") % cap
return _("%d.%03d GB") % (cap/1000, cap%1000)
def model(self):
try:
if self.device[:2] == "hd":
return readFile('/proc/ide/' + self.device + '/model')
elif self.device[:2] == "sd":
vendor = readFile(self.sysfsPath('device/vendor'))
model = readFile(self.sysfsPath('device/model'))
return vendor + '(' + model + ')'
elif self.device.startswith('mmcblk0'):
return readFile(self.sysfsPath('device/name'))
else:
raise Exception, "[Harddisk] no hdX or sdX or mmcX"
except Exception, e:
print "[Harddisk] Failed to get model:", e
return "-?-"
def free(self):
dev = self.findMount()
if dev:
stat = os.statvfs(dev)
return (stat.f_bfree/1000) * (stat.f_bsize/1024)
return -1
def numPartitions(self):
numPart = -1
if self.type == DEVTYPE_UDEV:
try:
devdir = os.listdir('/dev')
except OSError:
return -1
for filename in devdir:
if filename.startswith(self.device):
numPart += 1
elif self.type == DEVTYPE_DEVFS:
try:
idedir = os.listdir(self.dev_path)
except OSError:
return -1
for filename in idedir:
if filename.startswith("disc"):
numPart += 1
if filename.startswith("part"):
numPart += 1
return numPart
def mountDevice(self):
for parts in getProcMounts():
if os.path.realpath(parts[0]).startswith(self.dev_path):
self.mount_device = parts[0]
self.mount_path = parts[1]
return parts[1]
return None
def enumMountDevices(self):
for parts in getProcMounts():
if os.path.realpath(parts[0]).startswith(self.dev_path):
yield parts[1]
def findMount(self):
if self.mount_path is None:
return self.mountDevice()
return self.mount_path
def unmount(self):
dev = self.mountDevice()
if dev is None:
# not mounted, return OK
return 0
cmd = 'umount ' + dev
print "[Harddisk] ", cmd
res = os.system(cmd)
return (res >> 8)
def createPartition(self):
cmd = 'printf "8,\n;0,0\n;0,0\n;0,0\ny\n" | sfdisk -f -uS ' + self.disk_path
res = os.system(cmd)
return (res >> 8)
def mkfs(self):
# No longer supported, use createInitializeJob instead
return 1
def mount(self):
# try mounting through fstab first
if self.mount_device is None:
dev = self.partitionPath("1")
else:
# if previously mounted, use the same spot
dev = self.mount_device
try:
fstab = open("/etc/fstab")
lines = fstab.readlines()
fstab.close()
except IOError:
return -1
for line in lines:
parts = line.strip().split(" ")
fspath = os.path.realpath(parts[0])
if fspath == dev:
print "[Harddisk] mounting:", fspath
cmd = "mount -t auto " + fspath
res = os.system(cmd)
return (res >> 8)
# device is not in fstab
res = -1
if self.type == DEVTYPE_UDEV:
# we can let udev do the job, re-read the partition table
res = os.system('hdparm -z ' + self.disk_path)
# give udev some time to make the mount, which it will do asynchronously
from time import sleep
sleep(3)
return (res >> 8)
def fsck(self):
# No longer supported, use createCheckJob instead
return 1
def killPartitionTable(self):
zero = 512 * '\0'
h = open(self.dev_path, 'wb')
# delete first 9 sectors, which will likely kill the first partition too
for i in range(9):
h.write(zero)
h.close()
def killPartition(self, n):
zero = 512 * '\0'
part = self.partitionPath(n)
h = open(part, 'wb')
for i in range(3):
h.write(zero)
h.close()
def createInitializeJob(self):
job = Task.Job(_("Initializing storage device..."))
size = self.diskSize()
print "[Harddisk] size: %s MB" % size
task = UnmountTask(job, self)
task = Task.PythonTask(job, _("Removing partition table"))
task.work = self.killPartitionTable
task.weighting = 1
task = Task.LoggingTask(job, _("Rereading partition table"))
task.weighting = 1
task.setTool('hdparm')
task.args.append('-z')
task.args.append(self.disk_path)
task = Task.ConditionTask(job, _("Waiting for partition"), timeoutCount=20)
task.check = lambda: not os.path.exists(self.partitionPath("1"))
task.weighting = 1
if os.path.exists('/usr/sbin/parted'):
use_parted = True
else:
if size > 2097151:
addInstallTask(job, 'parted')
use_parted = True
else:
use_parted = False
task = Task.LoggingTask(job, _("Creating partition"))
task.weighting = 5
if use_parted:
task.setTool('parted')
if size < 1024:
# On very small devices, align to block only
alignment = 'min'
else:
# Prefer optimal alignment for performance
alignment = 'opt'
if size > 2097151:
parttype = 'gpt'
else:
parttype = 'msdos'
task.args += ['-a', alignment, '-s', self.disk_path, 'mklabel', parttype, 'mkpart', 'primary', '0%', '100%']
else:
task.setTool('sfdisk')
task.args.append('-f')
task.args.append('-uS')
task.args.append(self.disk_path)
if size > 128000:
# Start at sector 8 to better support 4k aligned disks
print "[Harddisk] Detected >128GB disk, using 4k alignment"
task.initial_input = "8,,L\n;0,0\n;0,0\n;0,0\ny\n"
else:
# Smaller disks (CF cards, sticks etc) don't need that
task.initial_input = ",,L\n;\n;\n;\ny\n"
task = Task.ConditionTask(job, _("Waiting for partition"))
task.check = lambda: os.path.exists(self.partitionPath("1"))
task.weighting = 1
task = MkfsTask(job, _("Creating filesystem"))
big_o_options = ["dir_index"]
if isFileSystemSupported("ext4"):
task.setTool("mkfs.ext4")
else:
task.setTool("mkfs.ext3")
if size > 250000:
# No more than 256k i-nodes (prevent problems with fsck memory requirements)
task.args += ["-T", "largefile", "-N", "262144"]
big_o_options.append("sparse_super")
elif size > 16384:
# between 16GB and 250GB: 1 i-node per megabyte
task.args += ["-T", "largefile"]
big_o_options.append("sparse_super")
elif size > 2048:
# Over 2GB: 32 i-nodes per megabyte
task.args += ["-T", "largefile", "-N", str(size * 32)]
task.args += ["-m0", "-O", ",".join(big_o_options), self.partitionPath("1")]
task = MountTask(job, self)
task.weighting = 3
task = Task.ConditionTask(job, _("Waiting for mount"), timeoutCount=20)
task.check = self.mountDevice
task.weighting = 1
return job
def initialize(self):
# no longer supported
return -5
def check(self):
# no longer supported
return -5
def createCheckJob(self):
job = Task.Job(_("Checking filesystem..."))
if self.findMount():
# Create unmount task if it was not mounted
UnmountTask(job, self)
dev = self.mount_device
else:
# otherwise, assume there is one partition
dev = self.partitionPath("1")
task = Task.LoggingTask(job, "fsck")
task.setTool('fsck.ext3')
task.args.append('-f')
task.args.append('-p')
task.args.append(dev)
MountTask(job, self)
task = Task.ConditionTask(job, _("Waiting for mount"))
task.check = self.mountDevice
return job
def createExt4ConversionJob(self):
if not isFileSystemSupported('ext4'):
raise Exception, _("[Harddisk] You system does not support ext4")
job = Task.Job(_("Converting ext3 to ext4..."))
if not os.path.exists('/sbin/tune2fs'):
addInstallTask(job, 'e2fsprogs-tune2fs')
if self.findMount():
# Create unmount task if it was not mounted
UnmountTask(job, self)
dev = self.mount_device
else:
# otherwise, assume there is one partition
dev = self.partitionPath("1")
task = Task.LoggingTask(job, "fsck")
task.setTool('fsck.ext3')
task.args.append('-p')
task.args.append(dev)
task = Task.LoggingTask(job, "tune2fs")
task.setTool('tune2fs')
task.args.append('-O')
task.args.append('extents,uninit_bg,dir_index')
task.args.append('-o')
task.args.append('journal_data_writeback')
task.args.append(dev)
task = Task.LoggingTask(job, "fsck")
task.setTool('fsck.ext4')
task.postconditions = [] # ignore result, it will always "fail"
task.args.append('-f')
task.args.append('-p')
task.args.append('-D')
task.args.append(dev)
MountTask(job, self)
task = Task.ConditionTask(job, _("Waiting for mount"))
task.check = self.mountDevice
return job
def getDeviceDir(self):
return self.dev_path
def getDeviceName(self):
return self.disk_path
# the HDD idle poll daemon.
# as some harddrives have a buggy standby timer, we are doing this by hand here.
# first, we disable the hardware timer. then, we check every now and then if
# any access has been made to the disc. If there has been no access over a specifed time,
# we set the hdd into standby.
def readStats(self):
try:
l = open("/sys/block/%s/stat" % self.device).read()
except IOError:
return -1,-1
data = l.split(None,5)
return (int(data[0]), int(data[4]))
def startIdle(self):
from enigma import eTimer
# disable HDD standby timer
if self.bus() == _("External"):
Console().ePopen(("sdparm", "sdparm", "--set=SCT=0", self.disk_path))
else:
Console().ePopen(("hdparm", "hdparm", "-S0", self.disk_path))
self.timer = eTimer()
self.timer.callback.append(self.runIdle)
self.idle_running = True
self.setIdleTime(self.max_idle_time) # kick the idle polling loop
def runIdle(self):
if not self.max_idle_time:
return
t = time.time()
idle_time = t - self.last_access
stats = self.readStats()
l = sum(stats)
if l != self.last_stat and l >= 0: # access
self.last_stat = l
self.last_access = t
idle_time = 0
self.is_sleeping = False
if idle_time >= self.max_idle_time and not self.is_sleeping:
self.setSleep()
self.is_sleeping = True
def setSleep(self):
if self.bus() == _("External"):
Console().ePopen(("sdparm", "sdparm", "--flexible", "--readonly", "--command=stop", self.disk_path))
else:
Console().ePopen(("hdparm", "hdparm", "-y", self.disk_path))
def setIdleTime(self, idle):
self.max_idle_time = idle
if self.idle_running:
if not idle:
self.timer.stop()
else:
self.timer.start(idle * 100, False) # poll 10 times per period.
def isSleeping(self):
return self.is_sleeping
class Partition:
# for backward compatibility, force_mounted actually means "hotplug"
def __init__(self, mountpoint, device = None, description = "", force_mounted = False):
self.mountpoint = mountpoint
self.description = description
self.force_mounted = mountpoint and force_mounted
self.is_hotplug = force_mounted # so far; this might change.
self.device = device
def __str__(self):
return "Partition(mountpoint=%s,description=%s,device=%s)" % (self.mountpoint,self.description,self.device)
def stat(self):
if self.mountpoint:
return os.statvfs(self.mountpoint)
else:
raise OSError, "Device %s is not mounted" % self.device
def free(self):
try:
s = self.stat()
return s.f_bavail * s.f_bsize
except OSError:
return None
def total(self):
try:
s = self.stat()
return s.f_blocks * s.f_bsize
except OSError:
return None
def tabbedDescription(self):
if self.mountpoint.startswith('/media/net') or self.mountpoint.startswith('/media/autofs'):
# Network devices have a user defined name
return self.description
return self.description + '\t' + self.mountpoint
def mounted(self, mounts = None):
# THANK YOU PYTHON FOR STRIPPING AWAY f_fsid.
# TODO: can os.path.ismount be used?
if self.force_mounted:
return True
if self.mountpoint:
if mounts is None:
mounts = getProcMounts()
for parts in mounts:
if self.mountpoint.startswith(parts[1]): # use startswith so a mount not ending with '/' is also detected.
return True
return False
def filesystem(self, mounts = None):
if self.mountpoint:
if mounts is None:
mounts = getProcMounts()
for fields in mounts:
if self.mountpoint.endswith('/') and not self.mountpoint == '/':
if fields[1] + '/' == self.mountpoint:
return fields[2]
else:
if fields[1] == self.mountpoint:
return fields[2]
return ''
def addInstallTask(job, package):
task = Task.LoggingTask(job, "update packages")
task.setTool('opkg')
task.args.append('update')
task = Task.LoggingTask(job, "Install " + package)
task.setTool('opkg')
task.args.append('install')
task.args.append(package)
class HarddiskManager:
def __init__(self):
self.hdd = [ ]
self.cd = ""
self.partitions = [ ]
self.devices_scanned_on_init = [ ]
self.on_partition_list_change = CList()
self.enumerateBlockDevices()
# Find stuff not detected by the enumeration
p = (
("/media/hdd", _("Hard disk")),
("/media/card", _("Card")),
("/media/cf", _("Compact flash")),
("/media/mmc1", _("MMC card")),
("/media/net", _("Network mount")),
("/media/net1", _("Network mount %s") % ("1")),
("/media/net2", _("Network mount %s") % ("2")),
("/media/net3", _("Network mount %s") % ("3")),
("/media/ram", _("Ram disk")),
("/media/usb", _("USB stick")),
("/", _("Internal flash"))
)
known = set([os.path.normpath(a.mountpoint) for a in self.partitions if a.mountpoint])
for m,d in p:
if (m not in known) and os.path.ismount(m):
self.partitions.append(Partition(mountpoint=m, description=d))
def getBlockDevInfo(self, blockdev):
devpath = "/sys/block/" + blockdev
error = False
removable = False
blacklisted = False
is_cdrom = False
partitions = []
try:
if os.path.exists(devpath + "/removable"):
removable = bool(int(readFile(devpath + "/removable")))
if os.path.exists(devpath + "/dev"):
dev = int(readFile(devpath + "/dev").split(':')[0])
else:
dev = None
blacklisted = dev in [1, 7, 31, 253, 254] + (SystemInfo["HasMMC"] and [179] or []) #ram, loop, mtdblock, romblock, ramzswap, mmc
if blockdev[0:2] == 'sr':
is_cdrom = True
if blockdev[0:2] == 'hd':
try:
media = readFile("/proc/ide/%s/media" % blockdev)
if "cdrom" in media:
is_cdrom = True
except IOError:
error = True
# check for partitions
if not is_cdrom and os.path.exists(devpath):
for partition in os.listdir(devpath):
if partition[0:len(blockdev)] != blockdev:
continue
partitions.append(partition)
else:
self.cd = blockdev
except IOError:
error = True
# check for medium
medium_found = True
try:
open("/dev/" + blockdev).close()
except IOError, err:
if err.errno == 159: # no medium present
medium_found = False
return error, blacklisted, removable, is_cdrom, partitions, medium_found
def enumerateBlockDevices(self):
print "[Harddisk] enumerating block devices..."
for blockdev in os.listdir("/sys/block"):
error, blacklisted, removable, is_cdrom, partitions, medium_found = self.addHotplugPartition(blockdev)
if not error and not blacklisted and medium_found:
for part in partitions:
self.addHotplugPartition(part)
self.devices_scanned_on_init.append((blockdev, removable, is_cdrom, medium_found))
def getAutofsMountpoint(self, device):
r = self.getMountpoint(device)
if r is None:
return "/media/" + device
return r
def getMountpoint(self, device):
dev = "/dev/%s" % device
for item in getProcMounts():
if item[0] == dev:
return item[1]
return None
def addHotplugPartition(self, device, physdev = None):
# device is the device name, without /dev
# physdev is the physical device path, which we (might) use to determine the userfriendly name
if not physdev:
dev, part = self.splitDeviceName(device)
try:
physdev = os.path.realpath('/sys/block/' + dev + '/device')[4:]
except OSError:
physdev = dev
print "[Harddisk] couldn't determine blockdev physdev for device", device
error, blacklisted, removable, is_cdrom, partitions, medium_found = self.getBlockDevInfo(device)
if not blacklisted and medium_found:
description = self.getUserfriendlyDeviceName(device, physdev)
p = Partition(mountpoint = self.getMountpoint(device), description = description, force_mounted = True, device = device)
self.partitions.append(p)
if p.mountpoint: # Plugins won't expect unmounted devices
self.on_partition_list_change("add", p)
# see if this is a harddrive
l = len(device)
if l and (not device[l-1].isdigit() or device == 'mmcblk0'):
self.hdd.append(Harddisk(device, removable))
self.hdd.sort()
SystemInfo["Harddisk"] = True
return error, blacklisted, removable, is_cdrom, partitions, medium_found
def addHotplugAudiocd(self, device, physdev = None):
# device is the device name, without /dev
# physdev is the physical device path, which we (might) use to determine the userfriendly name
if not physdev:
dev, part = self.splitDeviceName(device)
try:
physdev = os.path.realpath('/sys/block/' + dev + '/device')[4:]
except OSError:
physdev = dev
print "couldn't determine blockdev physdev for device", device
error, blacklisted, removable, is_cdrom, partitions, medium_found = self.getBlockDevInfo(device)
if not blacklisted and medium_found:
description = self.getUserfriendlyDeviceName(device, physdev)
p = Partition(mountpoint = "/media/audiocd", description = description, force_mounted = True, device = device)
self.partitions.append(p)
self.on_partition_list_change("add", p)
SystemInfo["Harddisk"] = False
return error, blacklisted, removable, is_cdrom, partitions, medium_found
def removeHotplugPartition(self, device):
for x in self.partitions[:]:
if x.device == device:
self.partitions.remove(x)
if x.mountpoint: # Plugins won't expect unmounted devices
self.on_partition_list_change("remove", x)
l = len(device)
if l and not device[l-1].isdigit():
for hdd in self.hdd:
if hdd.device == device:
hdd.stop()
self.hdd.remove(hdd)
break
SystemInfo["Harddisk"] = len(self.hdd) > 0
def HDDCount(self):
return len(self.hdd)
def HDDList(self):
list = [ ]
for hd in self.hdd:
hdd = hd.model() + " - " + hd.bus()
cap = hd.capacity()
if cap != "":
hdd += " (" + cap + ")"
list.append((hdd, hd))
return list
def getCD(self):
return self.cd
def getMountedPartitions(self, onlyhotplug = False, mounts=None):
if mounts is None:
mounts = getProcMounts()
parts = [x for x in self.partitions if (x.is_hotplug or not onlyhotplug) and x.mounted(mounts)]
devs = set([x.device for x in parts])
for devname in devs.copy():
if not devname:
continue
dev, part = self.splitDeviceName(devname)
if part and dev in devs: # if this is a partition and we still have the wholedisk, remove wholedisk
devs.remove(dev)
# return all devices which are not removed due to being a wholedisk when a partition exists
return [x for x in parts if not x.device or x.device in devs]
def splitDeviceName(self, devname):
# this works for: sdaX, hdaX, sr0 (which is in fact dev="sr0", part=""). It doesn't work for other names like mtdblock3, but they are blacklisted anyway.
dev = devname[:3]
part = devname[3:]
for p in part:
if not p.isdigit():
return devname, 0
return dev, part and int(part) or 0
def getUserfriendlyDeviceName(self, dev, phys):
print "[Harddisk] device: ", dev
print "[Harddisk] physical: ", phys
dev, part = self.splitDeviceName(dev)
description = _("External Storage %s") % dev
try:
description = readFile("/sys" + phys + "/model")
except IOError, s:
print "[Harddisk] couldn't read model: ", s
# not wholedisk and not partition 1
if part and part != 1:
description += _(" (Partition %d)") % part
print "[Harddisk] description: ", description
return description
def addMountedPartition(self, device, desc):
for x in self.partitions:
if x.mountpoint == device:
#already_mounted
return
self.partitions.append(Partition(mountpoint=device, description=desc))
def removeMountedPartition(self, mountpoint):
for x in self.partitions[:]:
if x.mountpoint == mountpoint:
self.partitions.remove(x)
self.on_partition_list_change("remove", x)
def setDVDSpeed(self, device, speed = 0):
ioctl_flag=int(0x5322)
if not device.startswith('/'):
device = "/dev/" + device
try:
from fcntl import ioctl
cd = open(device)
ioctl(cd.fileno(), ioctl_flag, speed)
cd.close()
except Exception, ex:
print "[Harddisk] Failed to set %s speed to %s" % (device, speed), ex
class UnmountTask(Task.LoggingTask):
def __init__(self, job, hdd):
Task.LoggingTask.__init__(self, job, _("Unmount"))
self.hdd = hdd
self.mountpoints = []
def prepare(self):
try:
dev = self.hdd.disk_path.split('/')[-1]
open('/dev/nomount.%s' % dev, "wb").close()
except Exception, e:
print "[Harddisk] ERROR: Failed to create /dev/nomount file:", e
self.setTool('umount')
self.args.append('-f')
for dev in self.hdd.enumMountDevices():
self.args.append(dev)
self.postconditions.append(Task.ReturncodePostcondition())
self.mountpoints.append(dev)
if not self.mountpoints:
print "[Harddisk] UnmountTask: No mountpoints found?"
self.cmd = 'true'
self.args = [self.cmd]
def afterRun(self):
for path in self.mountpoints:
try:
os.rmdir(path)
except Exception, ex:
print "[Harddisk] Failed to remove path '%s':" % path, ex
class MountTask(Task.LoggingTask):
def __init__(self, job, hdd):
Task.LoggingTask.__init__(self, job, _("Mount"))
self.hdd = hdd
def prepare(self):
try:
dev = self.hdd.disk_path.split('/')[-1]
os.unlink('/dev/nomount.%s' % dev)
except Exception, e:
print "[Harddisk] ERROR: Failed to remove /dev/nomount file:", e
# try mounting through fstab first
if self.hdd.mount_device is None:
dev = self.hdd.partitionPath("1")
else:
# if previously mounted, use the same spot
dev = self.hdd.mount_device
fstab = open("/etc/fstab")
lines = fstab.readlines()
fstab.close()
for line in lines:
parts = line.strip().split(" ")
fspath = os.path.realpath(parts[0])
if os.path.realpath(fspath) == dev:
self.setCmdline("mount -t auto " + fspath)
self.postconditions.append(Task.ReturncodePostcondition())
return
# device is not in fstab
if self.hdd.type == DEVTYPE_UDEV:
# we can let udev do the job, re-read the partition table
# Sorry for the sleep 2 hack...
self.setCmdline('sleep 2; hdparm -z ' + self.hdd.disk_path)
self.postconditions.append(Task.ReturncodePostcondition())
class MkfsTask(Task.LoggingTask):
def prepare(self):
self.fsck_state = None
def processOutput(self, data):
print "[Mkfs]", data
if 'Writing inode tables:' in data:
self.fsck_state = 'inode'
elif 'Creating journal' in data:
self.fsck_state = 'journal'
self.setProgress(80)
elif 'Writing superblocks ' in data:
self.setProgress(95)
elif self.fsck_state == 'inode':
if '/' in data:
try:
d = data.strip(' \x08\r\n').split('/',1)
if '\x08' in d[1]:
d[1] = d[1].split('\x08',1)[0]
self.setProgress(80*int(d[0])/int(d[1]))
except Exception, e:
print "[Mkfs] E:", e
return # don't log the progess
self.log.append(data)
harddiskmanager = HarddiskManager()
def isSleepStateDevice(device):
ret = os.popen("hdparm -C %s" % device).read()
if 'SG_IO' in ret or 'HDIO_DRIVE_CMD' in ret:
return None
if 'drive state is: standby' in ret or 'drive state is: idle' in ret:
return True
elif 'drive state is: active/idle' in ret:
return False
return None
def internalHDDNotSleeping(external=False):
state = False
if harddiskmanager.HDDCount():
for hdd in harddiskmanager.HDDList():
if hdd[1].internal or external:
if hdd[1].idle_running and hdd[1].max_idle_time and not hdd[1].isSleeping():
state = True
return state
SystemInfo["ext4"] = isFileSystemSupported("ext4")
|
gpl-2.0
| 3,402,724,838,510,467,000
| 28.67094
| 155
| 0.662214
| false
| 2.941015
| false
| false
| false
|
localmed/django-assetfiles
|
assetfiles/filters/sass.py
|
1
|
4949
|
from __future__ import unicode_literals
import os
from django.conf import settings
from django.contrib.staticfiles.finders import find
from assetfiles.filters import BaseFilter, CommandMixin, ExtensionMixin
import assetfiles.settings
from assetfiles.exceptions import SassFilterError
class SassFilter(ExtensionMixin, CommandMixin, BaseFilter):
"""
Filters Sass files into CSS.
Attributes:
sass_path: The full path to the Sass command. This defaults to a
customized binstub that allows for better Bundler integration.
functions_path: The full path to the Sass extension functions for
Django integration. Set to None or False to bypass adding
these functions.
"""
SCRIPTS_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../scripts'))
input_exts = ('sass', 'scss')
output_ext = 'css'
sass_path = 'sass'
sass_env_path = os.path.join(SCRIPTS_PATH, 'sass_env.rb')
sass_functions_path = os.path.join(SCRIPTS_PATH, 'sass_functions.rb')
def __init__(self, options=None, *args, **kwargs):
super(SassFilter, self).__init__(*args, **kwargs)
if options is None:
options = {}
sass_options = assetfiles.settings.SASS_OPTIONS
self.sass_path = options.get(
'sass_path',
sass_options.get('sass_path', self.sass_path)
)
self.sass_env_path = options.get(
'sass_env_path',
sass_options.get('sass_env_path', self.sass_env_path)
)
self.sass_functions_path = options.get(
'sass_functions_path',
sass_options.get('sass_functions_path', self.sass_functions_path)
)
options['compass'] = options.get(
'compass',
sass_options.get('compass', self._detect_compass())
)
for option in ('style', 'precision', 'quiet', 'debug_info',
'line_numbers', 'cache_location', 'no_cache'):
if option not in options:
options[option] = sass_options.get(option)
options['require'] = (
sass_options.get('require', []) +
options.get('require', [])
)
if self.sass_functions_path:
options['require'].insert(0, self.sass_functions_path)
if self.sass_env_path:
options['require'].insert(0, self.sass_env_path)
options['load_paths'] = (
sass_load_paths +
sass_options.get('load_paths', []) +
options.get('load_paths', [])
)
self.options = options
def filter(self, input):
command = '{command} {args} {input}'.format(
command=self.sass_path,
args=self._build_args(),
input=self.format_option_value(input),
)
return self.run_command(
command,
extra_env={'DJANGO_STATIC_URL': settings.STATIC_URL},
exception_type=SassFilterError
)
def is_filterable(self, output_path):
"""
Skips files prefixed with a '_'. These are Sass dependencies.
"""
_, file_name = os.path.split(output_path)
return not file_name.startswith('_')
def _build_args(self):
"""
Returns a list of arguments for the Sass command.
"""
args = []
args += self.format_option_array('require', self.options['require'])
args += self.format_option_array('load_path', self.options['load_paths'])
value_options = ('style', 'precision', 'cache_location')
for option in value_options:
if self.options[option]:
args.append(self.format_option(option, self.options[option]))
bool_options = ('quiet', 'compass', 'debug_info',
'line_numbers', 'no_cache')
for option in bool_options:
if self.options[option]:
args.append(self.format_option_name(option))
return ' '.join(args)
def _detect_compass(self):
"""
Returns true if Compass integration is available.
"""
return os.system('which compass > /dev/null') is 0
def get_static_sass_dirs(dirs=None):
"""
Returns the directories with Sass files within the static directories.
Args:
dirs: A list or tuple of directory names that contain Sass files.
Can be configured with the ASSETFILES_SASS_DIRS setting, which by
default is `('css',)`
Returns:
A list of directory paths containing Sass files.
"""
if not dirs:
dirs = assetfiles.settings.SASS_DIRS
load_paths = []
for dir in dirs:
load_paths += find(dir, all=True) or []
return load_paths
"""
Directories that will be added to the Sass load path.
By default, these are 'css' directories within the static directories.
"""
sass_load_paths = get_static_sass_dirs()
|
mit
| 3,942,540,000,699,276,300
| 31.136364
| 81
| 0.589614
| false
| 3.887667
| false
| false
| false
|
greenbender/pynntp
|
nntp/iodict.py
|
1
|
3453
|
"""
Case-insentitive ordered dictionary (useful for headers).
Copyright (C) 2013-2020 Byron Platt
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from collections import OrderedDict, namedtuple
try:
from collections.abc import MutableMapping, Mapping
except ImportError:
from collections import MutableMapping, Mapping
from .polyfill import cached_property
__all__ = ['IODict']
class IKey(object):
def __init__(self, orig):
self.orig = orig
@classmethod
def _uncase(cls, value):
if hasattr(value, 'casefold'):
return value.casefold()
if hasattr(value, 'lower'):
return value.lower()
if isinstance(value, tuple):
return tuple(cls._uncase(v) for v in value)
return value
@cached_property
def value(self):
return self._uncase(self.orig)
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if not isinstance(other, IKey):
return self == IKey(other)
return self.value == other.value
def __repr__(self):
return repr(self.orig)
def __str__(self):
return str(self.orig)
class IODict(MutableMapping):
"""Case in-sensitive ordered dictionary.
>>> iod = IODict([('ABC', 1), ('DeF', 'A'), (('gHi', 'jkl', 20), 'b')])
>>> iod['ABC'], iod['abc'], iod['aBc']
(1, 1, 1)
>>> iod['DeF'], iod['def'], iod['dEf']
('A', 'A', 'A')
>>> iod[('gHi', 'jkl', 20)], iod[('ghi', 'jKL', 20)]
('b', 'b')
>>> iod == {'aBc': 1, 'deF': 'A', ('Ghi', 'JKL', 20): 'b'}
True
>>> iod.popitem()
(('gHi', 'jkl', 20), 'b')
"""
def __init__(self, *args, **kwargs):
self.__proxy = OrderedDict()
for arg in args:
self.update(arg)
self.update(kwargs)
def __getitem__(self, key):
return self.__proxy[IKey(key)]
def __setitem__(self, key, value):
self.__proxy[IKey(key)] = value
def __delitem__(self, key):
del self.__proxy[IKey(key)]
def __iter__(self):
for key in self.__proxy:
yield key.orig
def __len__(self):
return len(self.__proxy)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
if not isinstance(other, IODict):
return self == IODict(other)
return self.__proxy == other.__proxy
def __repr__(self):
clsname = type(self).__name__
return '%s(%r)' % (clsname, list(self.__proxy.items()))
def keys(self):
for key in self.__proxy:
yield key.orig
def items(self):
for key in self.__proxy:
yield key.orig, self[key.orig]
def popitem(self):
key, value = self.__proxy.popitem()
return key.orig, value
if __name__ == "__main__":
import doctest
doctest.testmod()
|
gpl-3.0
| 4,563,081,144,591,528,400
| 25.976563
| 75
| 0.585578
| false
| 3.724919
| false
| false
| false
|
InsightSoftwareConsortium/ITKExamples
|
src/Filtering/MathematicalMorphology/ErodeAGrayscaleImage/Code.py
|
1
|
1553
|
#!/usr/bin/env python
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import itk
itk.auto_progress(2)
if len(sys.argv) != 4:
print("Usage: " + sys.argv[0] + " <inputImage> <outputImage> <radius>")
sys.exit(1)
inputImage = sys.argv[1]
outputImage = sys.argv[2]
radiusValue = int(sys.argv[3])
PixelType = itk.UC
Dimension = 2
ImageType = itk.Image[PixelType, Dimension]
ReaderType = itk.ImageFileReader[ImageType]
reader = ReaderType.New()
reader.SetFileName(inputImage)
StructuringElementType = itk.FlatStructuringElement[Dimension]
structuringElement = StructuringElementType.Ball(radiusValue)
GrayscaleFilterType = itk.GrayscaleErodeImageFilter[
ImageType, ImageType, StructuringElementType
]
grayscaleFilter = GrayscaleFilterType.New()
grayscaleFilter.SetInput(reader.GetOutput())
grayscaleFilter.SetKernel(structuringElement)
WriterType = itk.ImageFileWriter[ImageType]
writer = WriterType.New()
writer.SetFileName(outputImage)
writer.SetInput(grayscaleFilter.GetOutput())
writer.Update()
|
apache-2.0
| 1,056,718,304,177,390,500
| 27.759259
| 75
| 0.773342
| false
| 3.413187
| false
| false
| false
|
mganeva/mantid
|
scripts/Muon/GUI/Common/muon_data_context.py
|
1
|
20168
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import Muon.GUI.Common.utilities.load_utils as load_utils
import Muon.GUI.Common.utilities.xml_utils as xml_utils
from Muon.GUI.Common.ADSHandler.muon_workspace_wrapper import MuonWorkspaceWrapper
from Muon.GUI.Common.muon_group import MuonGroup
from Muon.GUI.Common.muon_pair import MuonPair
from Muon.GUI.Common.muon_load_data import MuonLoadData
from Muon.GUI.Common.utilities.run_string_utils import run_list_to_string
from Muon.GUI.Common.ADSHandler.workspace_naming import (get_raw_data_workspace_name, get_group_data_workspace_name,
get_pair_data_workspace_name, get_base_data_directory,
get_raw_data_directory, get_group_data_directory,
get_pair_data_directory, get_group_asymmetry_name)
from Muon.GUI.Common.calculate_pair_and_group import calculate_group_data, calculate_pair_data, estimate_group_asymmetry_data
from Muon.GUI.Common.utilities.muon_file_utils import allowed_instruments
from collections import OrderedDict
from mantid.api import WorkspaceGroup
from mantid.kernel import ConfigServiceImpl, ConfigService
from Muon.GUI.Common.observer_pattern import Observable
def get_default_grouping(workspace, instrument, main_field_direction):
parameter_name = "Default grouping file"
if instrument == "MUSR" or instrument == 'CHRONUS':
parameter_name += " - " + main_field_direction
try:
if isinstance(workspace, WorkspaceGroup):
grouping_file = workspace[0].getInstrument().getStringParameter(parameter_name)[0]
else:
grouping_file = workspace.getInstrument().getStringParameter(parameter_name)[0]
except IndexError:
return [], []
instrument_directory = ConfigServiceImpl.Instance().getInstrumentDirectory()
filename = instrument_directory + grouping_file
new_groups, new_pairs, description = xml_utils.load_grouping_from_XML(filename)
return new_groups, new_pairs
def construct_empty_group(group_names, group_index=0):
"""
Create an empty MuonGroup appropriate for adding to the current grouping table.
"""
new_group_name = "group_" + str(group_index)
while new_group_name in group_names:
# modify the name until it is unique
group_index += 1
new_group_name = "group_" + str(group_index)
return MuonGroup(group_name=new_group_name, detector_ids=[1])
def construct_empty_pair(group_names, pair_names, pair_index=0):
"""
Create an empty MuonPair appropriate for adding to the current pairing table.
"""
new_pair_name = "pair_" + str(pair_index)
while new_pair_name in pair_names:
# modify the name until it is unique
pair_index += 1
new_pair_name = "pair_" + str(pair_index)
if len(group_names) == 1:
group1 = group_names[0]
group2 = group_names[0]
elif len(group_names) >= 2:
group1 = group_names[0]
group2 = group_names[1]
else:
group1 = None
group2 = None
return MuonPair(pair_name=new_pair_name,
forward_group_name=group1, backward_group_name=group2, alpha=1.0)
class MuonDataContext(object):
"""
The MuonContext is the core class for the MuonAnalysis 2 interface. It stores all the data and parameters used
in the interface and serves as the model part of the MVP design pattern for every widget in the interface.
By sharing a common instance of this class, the interface remains synchronized by use of the observer pattern to
notify subcribers of changes, whi will then respond by updating their view from this commonly shared model.
The actual processing of data occurs via this class (as it should as the model).
"""
# ADS base directory for all workspaces
base_directory = "Muon Data"
def __init__(self, load_data=MuonLoadData()):
"""
Currently, only a single run is loaded into the Home/Grouping tab at once. This is held in the _current_data
member. The load widget may load multiple runs at once, these are stored in the _loaded_data member.
Groups and Pairs associated to the current run are stored in _grousp and _pairs as ordered dictionaries.
"""
self._groups = OrderedDict()
self._pairs = OrderedDict()
self._loaded_data = load_data
self._gui_variables = {'SummedPeriods': [1], 'SubtractedPeriods': []}
self._current_data = {"workspace": load_utils.empty_loaded_data()} # self.get_result(False)
self._current_runs = []
self._main_field_direction = ''
self._instrument = ConfigService.getInstrument().name() if ConfigService.getInstrument().name()\
in allowed_instruments else 'EMU'
self.instrumentNotifier = MuonDataContext.InstrumentNotifier(self)
self.message_notifier = MuonDataContext.MessageNotifier(self)
self.gui_variables_notifier = MuonDataContext.GuiVariablesNotifier(self)
def is_data_loaded(self):
return self._loaded_data.num_items() > 0
def is_multi_period(self):
return len(self.current_data["OutputWorkspace"]) > 1
@property
def current_data(self):
return self._current_data["workspace"]
@property
def instrument(self):
return self._instrument
@instrument.setter
def instrument(self, value):
ConfigService['default.instrument'] = value
self._instrument = value
self.main_field_direction = ''
self.set_groups_and_pairs_to_default()
self.instrumentNotifier.notify_subscribers(self._instrument)
@property
def current_run(self):
return self._current_data["run"]
@property
def run(self):
try:
# refer to the output of the loading widget (e.g. for co-adding)
runs = run_list_to_string(self.current_run)
except Exception:
# extract from sample logs
run_log = self.get_sample_log("run_number")
if run_log:
runs = run_log.value
else:
runs = 0
return runs
@property
def group_names(self):
return self._groups.keys()
@property
def pair_names(self):
return self._pairs.keys()
@property
def groups(self):
return self._groups
@property
def pairs(self):
return self._pairs
@property
def gui_variables(self):
return self._gui_variables
@property
def current_runs(self):
return self._current_runs
@current_runs.setter
def current_runs(self, value):
if not self.check_run_list_are_all_same_field(value):
self.message_notifier.notify_subscribers(self.create_multiple_field_directions_error_message(value))
self._current_runs = value
@property
def current_filenames(self):
current_filenames = []
for run in self.current_runs:
if self._loaded_data.get_data(run=run, instrument=self.instrument):
current_filenames.append(self._loaded_data.get_data(run=run, instrument=self.instrument)['filename'])
return current_filenames
@property
def current_workspaces(self):
current_workspaces = []
for run in self.current_runs:
current_workspaces.append(self._loaded_data.get_data(run=run, instrument=self.instrument)['workspace'])
return current_workspaces
@property
def first_good_data(self):
if self.gui_variables['FirstGoodDataFromFile']:
return self.loaded_data(self.current_runs[-1])["FirstGoodData"]
else:
return self.gui_variables['FirstGoodData']
def add_group(self, group):
assert isinstance(group, MuonGroup)
if self.check_group_contains_valid_detectors(group):
self._groups[group.name] = group
else:
raise ValueError('Invalid detectors in group {}'.format(group.name))
def add_pair(self, pair):
assert isinstance(pair, MuonPair)
self._pairs[pair.name] = pair
def update_current_data(self):
# Update the current data; resetting the groups and pairs to their default values
if len(self.current_runs) > 0:
self._current_data = self._loaded_data.get_data(run=self.current_runs[0], instrument=self.instrument)
self.main_field_direction = self.current_data['MainFieldDirection']
if not self.groups:
self.set_groups_and_pairs_to_default()
else:
self._current_data = {"workspace": load_utils.empty_loaded_data()}
def loaded_data(self, run):
loaded_dict = self._loaded_data.get_data(run=run, instrument=self.instrument)
if loaded_dict:
return self._loaded_data.get_data(run=run, instrument=self.instrument)['workspace']
else:
return None
@property
def loaded_workspace(self):
return self.current_data["OutputWorkspace"][0].workspace
def loaded_workspace_as_group(self, run):
if self.is_multi_period():
workspace_group = WorkspaceGroup()
for workspace_wrapper in self._loaded_data.get_data(run=run, instrument=self.instrument)['workspace']['OutputWorkspace']:
workspace_group.addWorkspace(workspace_wrapper.workspace)
return workspace_group
else:
return self._loaded_data.get_data(run=run, instrument=self.instrument)['workspace']['OutputWorkspace'][0].workspace
def period_string(self, run=None):
summed_periods = self.gui_variables["SummedPeriods"] if 'SummedPeriods' in self.gui_variables else [1]
subtracted_periods = self.gui_variables["SubtractedPeriods"] if 'SubtractedPeriods' in self.gui_variables else []
if subtracted_periods:
return '+'.join([str(period) for period in summed_periods]) + '-' + '-'.join([str(period) for period in subtracted_periods])
else:
return '+'.join([str(period) for period in summed_periods])
@property
def num_detectors(self):
try:
n_det = self.loaded_workspace.detectorInfo().size()
except AttributeError:
# default to 1
n_det = 1
return n_det
def num_periods(self, run):
return len(self._loaded_data.get_data(run=run, instrument=self.instrument)['workspace']['OutputWorkspace'])
@property
def main_field_direction(self):
return self._main_field_direction
@main_field_direction.setter
def main_field_direction(self, value):
if value and value != self._main_field_direction and self._main_field_direction:
self.message_notifier.notify_subscribers('MainFieldDirection has changed between'
' data sets, click default to reset grouping if required')
self._main_field_direction = value
@property
def dead_time_table(self):
return self.current_data["DeadTimeTable"]
def get_sample_logs(self):
logs = None
try:
logs = self.loaded_workspace.getSampleDetails()
except Exception:
print("Cannot find sample logs")
return logs
def get_sample_log(self, log_name):
logs = self.get_sample_logs()
try:
log = logs.getLogData(log_name)
except Exception:
log = None
return log
# ------------------------------------------------------------------------------------------------------------------
# Clearing data
# ------------------------------------------------------------------------------------------------------------------
def clear_groups(self):
self._groups = OrderedDict()
def clear_pairs(self):
self._pairs = OrderedDict()
def clear(self):
self.clear_groups()
self.clear_pairs()
self._current_data = {"workspace": load_utils.empty_loaded_data()}
def _base_run_name(self, run=None):
""" e.g. EMU0001234 """
if not run:
run = self.run
if isinstance(run, int):
return str(self.instrument) + str(run)
else:
return str(self.instrument) + run
# ------------------------------------------------------------------------------------------------------------------
# Showing workspaces in the ADS
# ------------------------------------------------------------------------------------------------------------------
def show_raw_data(self):
for run in self.current_runs:
run_string = run_list_to_string(run)
loaded_workspace = self._loaded_data.get_data(run=run, instrument=self.instrument)['workspace']['OutputWorkspace']
directory = get_base_data_directory(self, run_string) + get_raw_data_directory(self, run_string)
if len(loaded_workspace) > 1:
# Multi-period data
for i, single_ws in enumerate(loaded_workspace):
name = directory + get_raw_data_workspace_name(self, run_string, period=str(i + 1))
single_ws.show(name)
else:
# Single period data
name = directory + get_raw_data_workspace_name(self, run_string)
loaded_workspace[0].show(name)
def show_all_groups(self):
for group_name in self._groups.keys():
self.show_group_data(group_name)
if self.do_rebin():
for group_name in self._groups.keys():
self.show_group_data(group_name, rebin=True)
def show_group_data(self, group_name, show=True, rebin=False):
for run in self.current_runs:
run_as_string = run_list_to_string(run)
group_workspace = calculate_group_data(self, group_name, run, rebin)
group_asymmetry = estimate_group_asymmetry_data(self, group_name, run, rebin)
directory = get_base_data_directory(self, run_as_string) + get_group_data_directory(self, run_as_string)
name = get_group_data_workspace_name(self, group_name, run_as_string, rebin)
asym_name = get_group_asymmetry_name(self, group_name, run_as_string, rebin)
if not rebin:
self._groups[group_name]._workspace[str(run)] = MuonWorkspaceWrapper(group_workspace)
self._groups[group_name]._asymmetry_estimate[str(run)] = MuonWorkspaceWrapper(group_asymmetry)
if show:
self._groups[group_name].workspace[str(run)].show(directory + name)
self._groups[group_name]._asymmetry_estimate[str(run)].show(directory + asym_name)
else:
self._groups[group_name]._workspace_rebin[str(run)] = MuonWorkspaceWrapper(group_workspace)
self._groups[group_name]._asymmetry_estimate_rebin[str(run)] = MuonWorkspaceWrapper(group_asymmetry)
if show:
self._groups[group_name]._workspace_rebin[str(run)].show(directory + name)
self._groups[group_name]._asymmetry_estimate_rebin[str(run)].show(directory + asym_name)
def show_all_pairs(self):
for pair_name in self._pairs.keys():
self.show_pair_data(pair_name)
if self.do_rebin():
for pair_name in self._pairs.keys():
self.show_pair_data(pair_name, rebin=True)
def show_pair_data(self, pair_name, show=True, rebin=False):
for run in self.current_runs:
run_as_string = run_list_to_string(run)
name = get_pair_data_workspace_name(self, pair_name, run_as_string, rebin)
directory = get_base_data_directory(self, run_as_string) + get_pair_data_directory(self, run_as_string)
pair_workspace = calculate_pair_data(self, pair_name, run, rebin)
if not rebin:
self._pairs[pair_name].workspace[str(run)] = MuonWorkspaceWrapper(pair_workspace)
if show:
self._pairs[pair_name].workspace[str(run)].show(directory + name)
else:
self._pairs[pair_name].workspace_rebin[str(run)] = MuonWorkspaceWrapper(pair_workspace)
if show:
self._pairs[pair_name].workspace_rebin[str(run)].show(directory + name)
def calculate_all_groups(self):
for group_name in self._groups.keys():
calculate_group_data(self, group_name)
def set_groups_and_pairs_to_default(self):
groups, pairs = get_default_grouping(self.loaded_workspace, self.instrument, self.main_field_direction)
self.clear_groups()
for group in groups:
self.add_group(group)
self.clear_pairs()
for pair in pairs:
self.add_pair(pair)
def check_group_contains_valid_detectors(self, group):
if max(group.detectors) > self.num_detectors or min(group.detectors) < 1:
return False
else:
return True
def add_or_replace_gui_variables(self, **kwargs):
if all([key in self._gui_variables and self._gui_variables[key] == kwargs[key] for key in kwargs.keys()]) and kwargs:
return
self._gui_variables.update(kwargs)
self.gui_variables_notifier.notify_subscribers()
def do_rebin(self):
return (self.gui_variables['RebinType'] == 'Fixed' and
'RebinFixed' in self.gui_variables and self.gui_variables['RebinFixed']) or\
(self.gui_variables['RebinType'] == 'Variable' and
'RebinVariable' in self.gui_variables and self.gui_variables['RebinVariable'])
def check_run_list_are_all_same_field(self, run_list):
if not run_list:
return True
first_field = self._loaded_data.get_main_field_direction(run=run_list[0], instrument=self.instrument)
return all(first_field==self._loaded_data.get_main_field_direction(run=run, instrument=self.instrument)
for run in run_list)
def create_multiple_field_directions_error_message(self, run_list):
transverse = []
longitudinal = []
for run in run_list:
field_direction = self._loaded_data.get_main_field_direction(run=run, instrument=self.instrument)
if field_direction.lower() == 'transverse':
transverse += run
elif field_direction.lower() == 'longitudinal':
longitudinal += run
else:
return 'Unrecognised field direction {} for run {}'.format(field_direction, run)
message = 'MainFieldDirection changes within current run set:\n'
message += 'transverse field runs {}\n'.format(run_list_to_string(transverse))
message += 'longitudinal field runs {}\n'.format(run_list_to_string(longitudinal))
return message
class InstrumentNotifier(Observable):
def __init__(self, outer):
Observable.__init__(self)
self.outer = outer # handle to containing class
def notify_subscribers(self, *args, **kwargs):
Observable.notify_subscribers(self, *args)
class MessageNotifier(Observable):
def __init__(self, outer):
Observable.__init__(self)
self.outer = outer # handle to containing class
def notify_subscribers(self, *args, **kwargs):
Observable.notify_subscribers(self, *args)
class GuiVariablesNotifier(Observable):
def __init__(self, outer):
Observable.__init__(self)
self.outer = outer # handle to containing class
def notify_subscribers(self, *args, **kwargs):
Observable.notify_subscribers(self, *args)
|
gpl-3.0
| 6,414,852,969,859,135,000
| 40.669421
| 136
| 0.61662
| false
| 4.008746
| true
| false
| false
|
andrewgailey/robogen
|
robogen/rgkit/backup bots/SfparI.py
|
1
|
19116
|
##################################
## ##
## ____ __ ##
## / ___| / _|_ __ __ _ _ __ ##
## \___ \| |_| '_ \ / _` | '__| ##
## ___) | _| |_) | (_| | | ##
## |____/|_| | .__/ \__,_|_| ##
## |_| ##
## ##
## by Spferical ##
## ##
## Feel free to modify/improve! ##
## ##
##################################
import rg
# global variable to store the future moves of each ally robot
# we can use this to avoid friendly collisions
future_moves = []
future_attacks = []
# this is used to store the current turn considered by the future_moves array
future_moves_turn = 0
def cant_easily_leave_spawn(loc, game):
"""Returns whether a bot would need 2+ moves to exit the spawn area.
(i.e. the bot is in spawn and all of the locations around it are occupied/
obstacle/invalid)"""
if 'spawn' in rg.loc_types(loc):
adjacent_locs = rg.locs_around(loc,
filter_out=['spawn', 'obstacle', 'invalid'])
all_bots = game.get('robots')
for loc in adjacent_locs:
if loc in all_bots:
adjacent_locs.remove(loc)
return (len(adjacent_locs) == 0)
# if the bot is not in spawn, then it can easily leave it
# by standing still, hehe.
return False
def bot_is_in_trouble(bot, game):
"""Returns whether a bot is in trouble.
If a bot could die in the next turn, it is in trouble."""
return could_die_in_loc(bot.hp, bot.location, bot.player_id, game)
def could_die_in_loc(hp, loc, player_id, game):
"""Returns whether or not a bot could die in a given location,
based on its hp and player_id.
Considers the number of enemy bots nearby and whether or not
the robot is standing on a spawn tile just before more will spawn."""
adjacent_bots = get_bots_next_to(loc, game)
adjacent_enemies = [b for b in adjacent_bots if b.player_id != player_id]
# each adjacent enemy can deal up to 10 damage in a turn
possible_hp_loss = len(adjacent_enemies) * 10
if possible_hp_loss >= hp:
# could die if all of the adjacent_enemies attack
return True
if 'spawn' in rg.loc_types(loc):
if game['turn'] % 10 == 0:
# next turn, if we remain on the spawn square, it could die
return True
return False
def get_weakest_bot(bots):
"""Returns the weakest bot out of a list of bots."""
assert len(bots) != 0
# bots have 50 hp max
least_hp = 51
weakest_bot = None
for bot in bots:
if bot.hp < least_hp:
weakest_bot = bot
least_hp = bot.hp
return weakest_bot
def get_bots_next_to(location, game):
"""Returns all bots next to a location."""
all_bots = game.get('robots')
bots = []
for loc in all_bots.keys():
if loc in rg.locs_around(location):
bots.append(all_bots[loc])
return bots
def get_bot_in_location(location, game):
"""Returns the bot in the given location."""
bots = game.get('robots')
if location in bots.keys():
return bots[location]
else:
return None
def is_possible_suicider(bot, game):
"""Returns whether a bot is a possible suicider based on a kinda
restrictive algorithm.
Returns true if the sum of the hp of all enemy bots is greater than
the bot's hp and there are more than 1 adjacent enemy bots and
there is at least one adjacent bot that would die."""
# get all adjacent enemies of suicider
adjacent_bots = get_bots_next_to(bot.location, game)
for bot2 in adjacent_bots:
if bot2.player_id == bot.player_id:
adjacent_bots.remove(bot2)
# whether the total possible hp hit would outweigh the
# hp lost
if (sum([min(bot2.hp, 15) for bot2 in adjacent_bots]) > bot.hp):
if len(adjacent_bots) > 1:
for bot2 in adjacent_bots:
if bot2.hp <= 15:
return True
return False
class Robot:
def sort_bots_closest_first(self, bots):
"""Sorts a list of bots sorted closest to farthest away."""
return sorted(bots, key=lambda b: rg.wdist(self.location, b.location))
def get_enemy_bots_next_to(self, location, game):
"""Returns the enemy bots next to a location."""
enemies = []
for loc in rg.locs_around(location):
bot = get_bot_in_location(loc, game)
if (bot) and (bot.player_id != self.player_id):
enemies.append(bot)
return enemies
def get_friendlies_next_to(self, location, game):
"""Returns the friendly bots next to a location.
Note: does not return /this/ robot.(filters out any robot whose
location is equal to this robot's location)"""
friendlies = []
for loc in rg.locs_around(location):
bot = get_bot_in_location(loc, game)
if (bot) and (bot.player_id == self.player_id):
if bot.location != self.location:
friendlies.append(bot)
return friendlies
def get_adjacent_enemy_bots(self, game):
"""Returns a list of the adjacent enemy bots."""
return self.get_enemy_bots_next_to(self.location, game)
def is_suiciding_beneficial(self, game):
"""Returns whether or not the bot should suicide on this turn."""
# get the adjacent bots
adjacent_bots = self.get_adjacent_enemy_bots(game)
if (sum([min(bot.hp, 15) for bot in adjacent_bots]) > self.hp):
# see if the bot can escape to any adjacent location
for loc in rg.locs_around(self.location,
filter_out=['invalid', 'obstacle']):
# the bot can't escape to the location if there's an enemy in it
if not could_die_in_loc(self.hp, loc, self.player_id, game):
bot_in_loc = get_bot_in_location(loc, game)
if bot_in_loc and bot_in_loc.player_id != self.player_id:
continue
else:
return False
return True
def get_distance_to_closest_bot(self, game, loc=None,
friendly=False, enemy=False):
"""Returns the distance from the given location (or, by default,
this robot's location) to the nearest enemy."""
if not loc: loc = self.location
bots = game.get('robots')
shortest_distance = 99999
for bot in bots.values():
if bot.location != loc and bot.location != self.location:
if (friendly == enemy == False) or \
(enemy and (bot.player_id != self.player_id)) or \
(friendly and (bot.player_id == self.player_id)):
dist = rg.wdist(loc, bot.location)
shortest_distance = min(dist, shortest_distance)
return shortest_distance
def act(self, game):
"""The function called by game.py itself: returns the action the robot
should take this turn."""
action = []
# update the future_moves array if necessary
# only the first robot will do this
global future_moves_turn, future_moves, future_attacks
if future_moves_turn != game['turn']:
future_moves = []
future_attacks = []
future_moves_turn = game['turn']
#adjacent_bots = self.get_adjacent_enemy_bots(game)
if self.is_suiciding_beneficial(game):
action = ['suicide']
else:
locs = [self.location] + rg.locs_around(self.location,
filter_out=['invalid', 'obstacle'])
target_loc = self.get_best_loc(locs, game)
if target_loc != self.location:
action = ['move', target_loc]
else:
attack_locs = rg.locs_around(self.location,
filter_out=['invalid', 'obstacle'])
action = ['attack', self.get_best_attack_loc(attack_locs, game)]
if action[0] == 'move':
assert not action[1] in future_moves
future_moves.append(action[1])
if action[1] == self.location:
action = ['guard']
else:
pass
elif action[0] != 'suicide':
pass#future_moves.append(self.location)
if action[0] == 'attack':
future_attacks.append(action[1])
return action
def get_best_loc(self, locs, game):
"""Returns the best location out of a list.
The 'goodness' of a tile is determined by get_tile_goodness()."""
best_loc_weight = -9999
best_loc = None
for loc in locs:
loc_weight = self.get_tile_goodness(loc, game)
if loc_weight > best_loc_weight:
best_loc = loc
best_loc_weight = loc_weight
assert best_loc
return best_loc
def get_tile_goodness(self, loc, game):
"""Returns how 'good' a tile is to move to or stay on.
Based on a whole bunch of factors. Fine-tuning necessary."""
types = rg.loc_types(loc)
enemies_next_to_loc = self.get_enemy_bots_next_to(loc, game)
enemies_next_to_loc_fighting_friendlies = []
for enemy in enemies_next_to_loc:
if self.get_friendlies_next_to(enemy.location, game):
enemies_next_to_loc_fighting_friendlies.append(enemy)
enemies_next_to_loc_to_fight_friendlies = []
for enemy in enemies_next_to_loc:
for pos in rg.locs_around(enemy.location):
if pos in future_moves:
enemies_next_to_loc_to_fight_friendlies.append(enemy)
break
friendlies_next_to_loc = self.get_friendlies_next_to(loc, game)
nearby_friendlies_in_spawn = []
nearby_friendlies_in_deep_spawn = []
for friendly in friendlies_next_to_loc:
if 'spawn' in rg.loc_types(friendly.location):
nearby_friendlies_in_spawn.append(friendly)
if cant_easily_leave_spawn(friendly.location, game):
nearby_friendlies_in_deep_spawn.append(friendly)
friendly_in_loc = enemy_in_loc = False
if loc != self.location:
bot_in_location = get_bot_in_location(loc, game)
if bot_in_location:
if bot_in_location.player_id == self.player_id:
friendly_in_loc = True
else:
enemy_in_loc = True
else:
bot_in_location = None
distance_to_closest_enemy = self.get_distance_to_closest_bot(game,
loc=loc, enemy=True)
distance_to_closest_friendly = self.get_distance_to_closest_bot(game,
loc=loc,friendly=True)
nearby_friendlies_in_trouble = []
for friendly in friendlies_next_to_loc:
if bot_is_in_trouble(friendly, game):
nearby_friendlies_in_trouble.append(friendly)
goodness = 0
# get out of spawn areas, especially if things are about to spawn
# highest priority: +20 pts if things are about to spawn
if game['turn'] <= 90:
goodness -= ('spawn' in types) * ((game['turn'] % 10 == 0) * 20 + 1)
# if the bot can't easily leave spawn (e.g. has to move through
# more spawn area or an enemy to get out) in the location, that's bad
# the closer to the spawn timer we are, the worse this is, so
# multiply it by the game turn % 10
if game['turn'] <= 90:
goodness -= cant_easily_leave_spawn(loc, game) * (
game['turn'] % 10) * 0.5
# if enemies next to the location are fighting or will fight
# other friendlies, help them
goodness += len(enemies_next_to_loc_fighting_friendlies) * 2.5
goodness += len(enemies_next_to_loc_to_fight_friendlies) * 0.5
# more enemies next to a location, the worse.
# even worse if a friendly is already in the location
# (so the enemies will target that loc)
# even worse if our hp is low
goodness -= len(enemies_next_to_loc) ** 2 + friendly_in_loc
goodness -= friendly_in_loc * 4
# slight bias towards NOT moving right next to friendlies
# a sort of lattics, like
# X X X X
# X X X
# X X X X
# is the best shape, I think
#goodness -= len(friendlies_next_to_loc) * 0.05
# nearby friendlies in trouble will definitely want to escape this turn
goodness -= len(nearby_friendlies_in_trouble) * 9
if could_die_in_loc(self.hp, loc, self.player_id, game):
# /try/ not to go where the bot can die
# seriously
goodness -= 20
# all else remaining the same, move towards the center
goodness -= rg.dist(loc, rg.CENTER_POINT) * 0.01
# bias towards remaining in place and attacking
goodness += (loc == self.location) * \
(0.25 + 0.75 * (len(enemies_next_to_loc) == 1))
# especailly if we're only fighting one bot
if self.hp > 15:
# if we are strong enough, move close to (2 squares away) the
#nearest enemy
goodness -= max(distance_to_closest_enemy, 2)
else:
#otherwise, run away from the nearest enemy, up to 2 squares away
goodness += min(distance_to_closest_enemy, 2)
# friendlies should group together
# if a bot is caught alone, bots that actively hunt and surround,
# e.g. Chaos Witch Quelaang, will murder them
# so move up to two tiles from the nearest friendly
goodness -= min(distance_to_closest_friendly, 2) * 0.5
# don't move into an enemy
# it's slightly more ok to move into an enemy that could die in the
# next turn by staying here, cause he's likely to either run or die
# it's perfectly alright, maybe even encouraged, to move into a bot
# that would die from bumping into you anyways (<=5hp)
if enemy_in_loc:
goodness -= enemy_in_loc * (30 - 29 * \
bot_is_in_trouble(bot_in_location, game))
goodness += 3 * (bot_in_location.hp <= 5)
# don't block friendlies trying to move out of spawn!
# only matters when things will still spawn in the future, of course
if game['turn'] <= 90:
# if they can escape through us
if not 'spawn' in types:
goodness -= len(nearby_friendlies_in_spawn) * 2
#especially don't block those who can't easily leave spawn
# (the two lists overlap, so no extra weighting needed)
goodness -= len(nearby_friendlies_in_deep_spawn) * 2
# don't move next to possible suiciders if our hp is low enough to die
# from them
for enemy in enemies_next_to_loc_fighting_friendlies:
if is_possible_suicider(enemy, game) and (self.hp <= 15):
goodness -= 2
# the more enemies that could move next to the loc, the worse
# (the more this bot could be surrounded)
goodness -= min(len(self.get_enemies_that_could_move_next_to(
loc, game)), 1) * 0.5
# don't move into a square if another bot already plans to move there
goodness -= 999 * (loc in future_moves)
#allies attacking the same spot is bad, but not the end of the world..
# e.g. if a robot needs to go through a spot being attacked by an
# ally to leave spawn, he DEFINITELY still needs to move there
goodness -= 9 * (loc in future_attacks)
return goodness
def get_enemies_that_could_move_next_to(self, loc, game):
enemies = []
for bot in game.get('robots').values():
if bot.player_id != self.player_id:
if rg.wdist(bot.location, loc) == 2:
enemies.append(bot)
return enemies
def get_attack_goodness(self, loc, game):
"""Returns how 'good' attacking a certain location is.
Based upon the number of friendlies and enemies next to the location,
any bot that is in the location, etc."""
types = rg.loc_types(loc)
enemies_next_to_loc = self.get_enemy_bots_next_to(loc, game)
friendlies_next_to_loc = self.get_friendlies_next_to(loc, game)
nearby_friendlies_in_trouble = []
for friendly in friendlies_next_to_loc:
if bot_is_in_trouble(friendly, game):
nearby_friendlies_in_trouble.append(friendly)
nearby_enemies_in_trouble = []
for enemy in enemies_next_to_loc:
if bot_is_in_trouble(enemy, game):
nearby_enemies_in_trouble.append(enemy)
robot = get_bot_in_location(loc, game)
goodness = 0
if robot:
if robot.player_id == self.player_id:
# we're attacking a friendly's location
# no enemy's gonna move into them...
goodness -= 5
else:
#attacking an enemy is good
goodness += (100 - robot.hp) / 50.0 * 20
else:
# no bot is at the location
# so base the goodness on how likely it is for bots to move there
#more enemies that can move into the location, the better
# weighted by 3 because even if there are two other friendlies
# next to the loc, we still want to attack if it's the only square
# an enemy is next to
goodness += len(enemies_next_to_loc) * 3
#enemies aren't too likely to move next to a friendly
goodness -= len(friendlies_next_to_loc)
# if there are enemies in trouble nearby, we want to try and catch
# them escaping!
goodness += len(nearby_enemies_in_trouble) * 5
# nearby friendlies in trouble will definitely want to escape this
# turn
# maybe to this square
goodness -= len(nearby_friendlies_in_trouble)
# don't attack where an ally is already moving to
# or attacking, at least not too much
if loc in future_moves:
goodness -= 20
elif loc in future_attacks:
goodness -= 3
return goodness
def get_best_attack_loc(self, locs, game):
"""Determines the best location to attack out of a list of locations.
Uses get_attack_goodness() to weigh the locations."""
best_loc_weight = -9999
best_loc = None
for loc in locs:
loc_weight = self.get_attack_goodness(loc, game)
if loc_weight > best_loc_weight:
best_loc = loc
best_loc_weight = loc_weight
return best_loc
|
unlicense
| -6,406,700,526,533,794,000
| 38.172131
| 80
| 0.568372
| false
| 3.755599
| false
| false
| false
|
praekelt/malaria24-django
|
malaria24/settings/base.py
|
1
|
8348
|
"""
Django settings for base malaria24.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
from os.path import abspath, dirname, join
from os import environ
from django.conf import global_settings
from django.utils.translation import ugettext_lazy as _
from datetime import timedelta
import dj_database_url
from celery.schedules import crontab
# Absolute filesystem path to the Django project directory:
PROJECT_ROOT = dirname(dirname(dirname(abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
DEFAULT_SECRET_KEY = 'please-change-me'
SECRET_KEY = environ.get('SECRET_KEY') or DEFAULT_SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Base URL to use when referring to full URLs within the Wagtail admin
# backend - e.g. in notification emails. Don't include '/admin' or
# a trailing slash
BASE_URL = environ.get("BASE_URL") or 'http://example.com'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'compressor',
'taggit',
'modelcluster',
'wagtail.wagtailcore',
'wagtail.wagtailadmin',
'wagtail.wagtaildocs',
'wagtail.wagtailsnippets',
'wagtail.wagtailusers',
'wagtail.wagtailsites',
'wagtail.wagtailimages',
'wagtail.wagtailembeds',
'wagtail.wagtailsearch',
'wagtail.wagtailredirects',
'wagtail.wagtailforms',
'molo.core',
'malaria24',
'malaria24.ona',
'djcelery',
'raven.contrib.django.raven_compat',
'rest_framework',
'rest_framework.authtoken',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
ROOT_URLCONF = 'malaria24.urls'
WSGI_APPLICATION = 'malaria24.wsgi.application'
# SITE stuff
SITE_ID = 1
# CELERY stuff
BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
CELERYBEAT_SCHEDULE = {
'poll-ona-fetch-forms': {
'task': 'malaria24.ona.tasks.ona_fetch_forms',
'schedule': timedelta(hours=1),
},
'poll-ona-reported-cases': {
'task': 'malaria24.ona.tasks.ona_fetch_reported_cases',
'schedule': timedelta(minutes=10),
},
'send-weekly-digest': {
'task': 'malaria24.ona.tasks.compile_and_send_digest_email',
'schedule': crontab(hour=8, minute=15, day_of_week='mon'),
},
}
DEFAULT_FROM_EMAIL = 'MalariaConnect <malaria24@praekelt.com>'
# JEMBI settings
# Send to them by default
FORWARD_TO_JEMBI = environ.get('FORWARD_TO_JEMBI', 'true').lower() == 'true'
JEMBI_URL = environ.get('JEMBI_URL') or 'http://jembi.org/malaria24'
JEMBI_USERNAME = environ.get('JEMBI_USERNAME') or 'fake@example.com'
JEMBI_PASSWORD = environ.get('JEMBI_PASSWORD') or 'not_a_real_password'
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': ('raven.contrib.django.raven_compat.'
'handlers.SentryHandler'),
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
},
}
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# SQLite (simplest install)
DATABASES = {'default': dj_database_url.config(
default='sqlite:///%s' % (join(PROJECT_ROOT, 'db.sqlite3'),))}
# PostgreSQL (Recommended, but requires the psycopg2 library and Postgresql
# development headers)
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'base',
# 'USER': 'postgres',
# 'PASSWORD': '',
# 'HOST': '', # Set to empty string for localhost.
# 'PORT': '', # Set to empty string for default.
# # number of seconds database connections should persist for
# 'CONN_MAX_AGE': 600,
# }
# }
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Native South African languages are currently not included in the default
# list of languges in django
# https://github.com/django/django/blob/master/django/conf/global_settings.py#L50
LANGUAGES = global_settings.LANGUAGES + (
('zu', _('Zulu')),
('xh', _('Xhosa')),
('st', _('Sotho')),
('ve', _('Venda')),
('tn', _('Tswana')),
('ts', _('Tsonga')),
('ss', _('Swati')),
('nr', _('Ndebele')),
)
LOCALE_PATHS = (
join(PROJECT_ROOT, "locale"),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
MEDIA_ROOT = join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
# Django compressor settings
# http://django-compressor.readthedocs.org/en/latest/settings/
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
# Template configuration
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
'molo.core.context_processors.locale',
)
# Wagtail settings
LOGIN_URL = 'wagtailadmin_login'
LOGIN_REDIRECT_URL = 'wagtailadmin_home'
WAGTAIL_SITE_NAME = "base"
# Use Elasticsearch as the search backend for extra performance and better
# search results:
# http://wagtail.readthedocs.org/en/latest/howto/performance.html#search
# http://wagtail.readthedocs.org/en/latest/core_components/
# search/backends.html#elasticsearch-backend
#
# WAGTAILSEARCH_BACKENDS = {
# 'default': {
# 'BACKEND': ('wagtail.wagtailsearch.backends.'
# 'elasticsearch.ElasticSearch'),
# 'INDEX': 'base',
# },
# }
# Whether to use face/feature detection to improve image
# cropping - requires OpenCV
WAGTAILIMAGES_FEATURE_DETECTION_ENABLED = False
# REST Framework conf defaults
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
# email host settings
EMAIL_HOST = environ.get('EMAIL_HOST', 'localhost')
EMAIL_PORT = environ.get('EMAIL_PORT', 25)
EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', '')
EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', '')
EMAIL_USE_TLS = environ.get('EMAIL_USE_TLS', 'false').lower() == 'true'
|
bsd-2-clause
| -8,339,247,801,502,718,000
| 26.826667
| 81
| 0.65333
| false
| 3.458161
| false
| false
| false
|
litui/openparliament
|
parliament/default_settings.py
|
1
|
5763
|
import os
DEBUG = True
ADMINS = [
('Michael Mulley', 'michael@michaelmulley.com'),
]
MANAGERS = ADMINS
PROJ_ROOT = os.path.dirname(os.path.realpath(__file__))
CACHE_MIDDLEWARE_KEY_PREFIX = 'parl'
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
# Set to True to disable functionality where user-provided data is saved
PARLIAMENT_DB_READONLY = False
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'America/Montreal'
# Language code for this installation.
# MUST BE either 'en' or 'fr'
LANGUAGE_CODE = 'en'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = [os.path.join(PROJ_ROOT, 'locale')]
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.realpath(os.path.join(PROJ_ROOT, '..', '..', 'mediafiles'))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
STATICFILES_DIRS = [os.path.join(PROJ_ROOT, 'static')]
STATIC_ROOT = os.path.realpath(os.path.join(PROJ_ROOT, '..', '..', 'staticfiles'))
STATIC_URL = '/static/'
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
]
COMPRESS_CSS_FILTERS = [
'parliament.core.utils.AutoprefixerFilter',
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.rCSSMinFilter'
]
COMPRESS_JS_FILTERS = []
COMPRESS_OFFLINE = True
COMPRESS_ENABLED = False
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
('es6', 'cat {infile} | ./node_modules/.bin/babel --presets es2015 > {outfile}'),
)
COMPRESS_CACHEABLE_PRECOMPILERS = ['es6']
PARLIAMENT_LANGUAGE_MODEL_PATH = os.path.realpath(os.path.join(PROJ_ROOT, '..', '..', 'language_models'))
PARLIAMENT_GENERATE_TEXT_ANALYSIS = False
APPEND_SLASH = False
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_AGE = 60*60*24*60 # 60 days
SESSION_COOKIE_SECURE = True
PARLIAMENT_API_HOST = 'api.openparliament.ca'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(PROJ_ROOT, 'templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'parliament.accounts.context_processors.auth',
'parliament.core.utils.lang_context',
],
},
},
]
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'parliament.accounts.middleware.AuthenticatedEmailMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
#'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'parliament.core.api.FetchFromCacheMiddleware',
]
ROOT_URLCONF = 'parliament.urls'
WSGI_APPLICATION = 'parliament.wsgi.application'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.flatpages',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'django_extensions',
'haystack',
'imagekit',
'compressor',
'parliament.core',
'parliament.accounts',
'parliament.hansards',
'parliament.elections',
'parliament.bills',
'parliament.politicians',
'parliament.activity',
'parliament.alerts',
'parliament.committees',
'parliament.search',
'parliament.text_analysis',
]
LOGGING = {
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(module)s %(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
}
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'parliament': {
'handlers': ['console'],
'level': 'WARNING',
}
},
}
|
agpl-3.0
| 7,099,144,173,799,089,000
| 28.860104
| 105
| 0.639597
| false
| 3.548645
| false
| false
| false
|
matthiask/zivinetz
|
tests/testapp/test_changes.py
|
1
|
1099
|
from datetime import date
from django.test import TestCase
from testapp import factories
from zivinetz.models import AssignmentChange
class ChangesTestCase(TestCase):
def test_change_tracking(self):
assignment = factories.AssignmentFactory.create()
self.assertEqual(AssignmentChange.objects.count(), 1)
assignment.status = assignment.ARRANGED
assignment.arranged_on = date.today()
assignment.save()
self.assertEqual(AssignmentChange.objects.count(), 2)
assignment.status = assignment.MOBILIZED
assignment.mobilized_on = date.today()
assignment.save()
self.assertEqual(AssignmentChange.objects.count(), 3)
assignment.delete()
self.assertEqual(AssignmentChange.objects.count(), 4)
# Test the listing view.
admin = factories.UserFactory.create(is_staff=True, is_superuser=True)
self.client.login(username=admin.username, password="test")
self.assertContains(
self.client.get("/zivinetz/reporting/assignmentchanges/"), "by unknown", 4
)
|
mit
| 6,544,142,790,884,116,000
| 27.921053
| 86
| 0.688808
| false
| 4.309804
| true
| false
| false
|
kozistr/Awesome-GANs
|
awesome_gans/magan/magan_train.py
|
1
|
7449
|
import time
import numpy as np
import tensorflow as tf
import awesome_gans.image_utils as iu
import awesome_gans.magan.magan_model as magan
from awesome_gans.datasets import CelebADataSet as DataSet
from awesome_gans.datasets import DataIterator
results = {'output': './gen_img/', 'model': './model/MAGAN-model.ckpt'}
train_step = {
'epochs': 50,
'batch_size': 64,
'global_step': 200001,
'logging_interval': 1000,
}
def main():
start_time = time.time() # Clocking start
# loading CelebA DataSet
ds = DataSet(
height=64,
width=64,
channel=3,
ds_image_path="D:/DataSet/CelebA/CelebA-64.h5",
ds_label_path="D:/DataSet/CelebA/Anno/list_attr_celeba.txt",
# ds_image_path="D:/DataSet/CelebA/Img/img_align_celeba/",
ds_type="CelebA",
use_save=False,
save_file_name="D:/DataSet/CelebA/CelebA-64.h5",
save_type="to_h5",
use_img_scale=False,
img_scale="-1,1",
)
# saving sample images
test_images = np.reshape(iu.transform(ds.images[:100], inv_type='127'), (100, 64, 64, 3))
iu.save_images(test_images, size=[10, 10], image_path=results['output'] + 'sample.png', inv_type='127')
ds_iter = DataIterator(x=ds.images, y=None, batch_size=train_step['batch_size'], label_off=True)
# GPU configure
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as s:
# MAGAN Model
model = magan.MAGAN(s)
# Initializing
s.run(tf.global_variables_initializer())
# Load model & Graph & Weights
saved_global_step = 0
ckpt = tf.train.get_checkpoint_state('./model/')
if ckpt and ckpt.model_checkpoint_path:
model.saver.restore(s, ckpt.model_checkpoint_path)
saved_global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print("[+] global step : %s" % saved_global_step, " successfully loaded")
else:
print('[-] No checkpoint file found')
n_steps = ds.num_images // model.batch_size # training set size
# Pre-Train
print("[*] pre-training - getting proper Margin")
margin = 0 # 3.0585415484215974
if margin == 0:
sum_d_loss = 0.0
for i in range(2):
for batch_x in ds_iter.iterate():
batch_x = np.reshape(
iu.transform(batch_x, inv_type='127'),
(model.batch_size, model.height, model.width, model.channel),
)
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
_, d_real_loss = s.run(
[model.d_op, model.d_real_loss],
feed_dict={
model.x: batch_x,
model.z: batch_z,
model.m: 0.0,
},
)
sum_d_loss += d_real_loss
print("[*] Epoch {:1d} Sum of d_real_loss : {:.8f}".format(i + 1, sum_d_loss))
# Initial margin value
margin = sum_d_loss / n_steps
print("[+] Margin : {0}".format(margin))
old_margin = margin
s_g_0 = np.inf # Sg_0 = infinite
global_step = saved_global_step
start_epoch = global_step // (ds.num_images // model.batch_size) # recover n_epoch
ds_iter.pointer = saved_global_step % (ds.num_images // model.batch_size) # recover n_iter
for epoch in range(start_epoch, train_step['epochs']):
s_d, s_g = 0.0, 0.0
for batch_x in ds_iter.iterate():
batch_x = iu.transform(batch_x, inv_type='127')
batch_x = np.reshape(batch_x, (model.batch_size, model.height, model.width, model.channel))
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
# Update D network
_, d_loss, d_real_loss = s.run(
[model.d_op, model.d_loss, model.d_real_loss],
feed_dict={
model.x: batch_x,
model.z: batch_z,
model.m: margin,
},
)
# Update D real sample
s_d += np.sum(d_real_loss)
# Update G network
_, g_loss, d_fake_loss = s.run(
[model.g_op, model.g_loss, model.d_fake_loss],
feed_dict={
model.x: batch_x,
model.z: batch_z,
model.m: margin,
},
)
# Update G fake sample
s_g += np.sum(d_fake_loss)
# Logging
if global_step % train_step['logging_interval'] == 0:
summary = s.run(
model.merged,
feed_dict={
model.x: batch_x,
model.z: batch_z,
model.m: margin,
},
)
# Print loss
print(
"[+] Epoch %03d Global Step %05d => " % (epoch, global_step),
" D loss : {:.8f}".format(d_loss),
" G loss : {:.8f}".format(g_loss),
)
# Training G model with sample image and noise
sample_z = np.random.uniform(-1.0, 1.0, [model.sample_num, model.z_dim]).astype(np.float32)
samples = s.run(
model.g,
feed_dict={
model.z: sample_z,
model.m: margin,
},
)
# Summary saver
model.writer.add_summary(summary, global_step)
# Export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_dir = results['output'] + 'train_{:08d}.png'.format(global_step)
# Generated image save
iu.save_images(
samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127'
)
# Model save
model.saver.save(s, results['model'], global_step)
global_step += 1
# Update margin
if s_d / n_steps < margin and s_d < s_g and s_g_0 <= s_g:
margin = s_d / n_steps
print("[*] Margin updated from {:8f} to {:8f}".format(old_margin, margin))
old_margin = margin
s_g_0 = s_g
# Convergence Measure
e_d = s_d / n_steps
e_g = s_g / n_steps
l_ = e_d + np.abs(e_d - e_g)
print("[+] Epoch %03d " % epoch, " L : {:.8f}".format(l_))
end_time = time.time() - start_time # Clocking end
# Elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# Close tf.Session
s.close()
if __name__ == '__main__':
main()
|
mit
| 8,328,749,761,969,198,000
| 34.303318
| 118
| 0.470936
| false
| 3.733835
| true
| false
| false
|
napjon/moocs_solution
|
introcs-udacity/Search Engine(jonappsearch)/main.py
|
1
|
1817
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
form = """
<html>
<head>
<title>Search Engine</title>
</head>
<body>
<h2>Search Engine</h2>
<form method="post">
<textarea name="text">%(text)s</textarea>
<br>
<input type="submit">
<br>
<br>
<br>
%(links)s
</form>
</body>
</html>
"""
import webapp2
import cgi
from search import lucky_search
from crawler import crawl_web, compute_ranks
class MainHandler(webapp2.RequestHandler):
def render(self, text = "", links = ""):
return self.response.write(form%{'text' :self.escape_html(text),
'links':self.escape_html(links)})
def get(self):
self.render()
def escape_html(self,s):
return cgi.escape(s, quote = True)
def post(self):
corpus, graph = crawl_web('http://udacity.com/cs101x/urank/index.html')
ranks = compute_ranks(graph)
query = self.request.get('text')
result = lucky_search(corpus, ranks, query)
if not result:
self.render(text = "", links = "try www.google.com")
else:
self.render(text = query, links = result)
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
|
mit
| -7,123,328,385,974,137,000
| 23.890411
| 79
| 0.628509
| false
| 3.576772
| false
| false
| false
|
IPVL/swift_test
|
swift/proxy/server.py
|
1
|
2944
|
from swift.ipvl.inspect_custom import whoami, whosdaddy
pass # (WIS) print __name__
required_filters = [
{'name': 'catch_errors'},
{'name': 'gatekeeper',
'after_fn': lambda pipe: (['catch_errors']
if pipe.startswith('catch_errors')
else [])},
{'name': 'dlo', 'after_fn': lambda _junk: [
'staticweb', 'tempauth', 'keystoneauth',
'catch_errors', 'gatekeeper', 'proxy_logging']}]
class Application(object):
"""docstring for Application"""
def __init__(self, arg=None):
pass # (WIS) print "%s %s (%s -> %s)" % (__name__, self.__class__.__name__, whosdaddy(), whoami())
self.arg = arg
def __call__(self, env, start_response):
pass # (WIS) print "%s %s" % (self.__class__.__name__, env)
start_response('200 OK', [('Content-Type', 'text/plain')])
return self.__class__.__name__
def modify_wsgi_pipeline(self, pipe):
"""
Called during WSGI pipeline creation. Modifies the WSGI pipeline
context to ensure that mandatory middleware is present in the pipeline.
:param pipe: A PipelineWrapper object
"""
pipeline_was_modified = False
print 'pipe: %s ' % pipe
for filter_spec in reversed(required_filters):
filter_name = filter_spec['name']
print 'filter name : %s ' % filter_name
if filter_name not in pipe:
afters = filter_spec.get('after_fn', lambda _junk: [])(pipe)
print '%s after : %s ' % (filter_name, afters)
insert_at = 0
for after in afters:
try:
insert_at = max(insert_at, pipe.index(after) + 1)
except ValueError: # not in pipeline; ignore it
pass
# self.logger.info(
# 'Adding required filter %s to pipeline at position %d' %
# (filter_name, insert_at))
print 'Adding required filter %s to pipeline at position %d' % (filter_name, insert_at)
ctx = pipe.create_filter(filter_name)
pipe.insert_filter(ctx, index=insert_at)
pipeline_was_modified = True
if pipeline_was_modified:
# self.logger.info("Pipeline was modified. New pipeline is \"%s\".",
# pipe)
print "Pipeline was modified. New pipeline is \"%s\".", pipe
else:
# self.logger.debug("Pipeline is \"%s\"", pipe)
print "Pipeline is \"%s\"", pipe
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI proxy apps."""
pass # (WIS) print "%s (%s -> %s)" % (__name__, whosdaddy(), whoami())
conf = global_conf.copy()
conf.update(local_conf)
app = Application(conf)
# app.check_config()
return app
|
mit
| -6,019,711,509,896,874,000
| 39.328767
| 107
| 0.529891
| false
| 4.010899
| false
| false
| false
|
vnevoa/DiffTrike
|
SoapBox/sb_joystick.py
|
1
|
2438
|
#
# Copyright 2011 Vasco Nevoa.
#
# This file is part of DiffTrike.
#
# DiffTrike is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DiffTrike is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DiffTrike. If not, see <http://www.gnu.org/licenses/>.
#
# This module implements the controller's joystick device driver.
# It depends on python-pygame.
#
import pygame, time
from pygame.joystick import *
class Joystick():
"""initializes and gets data from the joystick"""
def __init__(self, joystick_num):
init = pygame.joystick.get_init()
if not init:
print "Initializing Joystick module."
pygame.joystick.init()
count = pygame.joystick.get_count()
self.buttons = 0
if (count > joystick_num):
self.dev = pygame.joystick.Joystick(joystick_num)
print 'Initializing Joystick ' + str(joystick_num) + ': ' + self.dev.get_name()
self.dev.init()
self.buttons = self.dev.get_numbuttons()
self.hats = self.dev.get_numhats()
self.trackballs = self.dev.get_numballs()
print "Joystick has "+ str(self.buttons) + " buttons, " + str(self.hats) + " hats, " + str(self.trackballs) + " trackballs."
self.present = 1
else:
print "Joystick not found."
self.present = 0
def getXY(self):
if self.present:
return ( self.dev.get_axis(0), self.dev.get_axis(1) )
else:
return ( 0.0, 0.0 )
def getButtons(self, highest = 1):
ret = []
for b in range(min(highest, self.buttons)):
ret.append(self.dev.get_button(b))
return ret
# This is a simple test routine that only runs if this module is
# called directly with "python sb_joystick.py"
if __name__ == '__main__':
pygame.init();
joy = Joystick(0);
while True:
t0 = time.time()
pygame.event.pump()
p = joy.getXY()
b = joy.getButtons(4)
t1 = time.time()
print "X=%0.2f Y=%0.2f B0=%d B1=%d B2=%d B3=%d T=%0.1f" % (p[0],p[1],b[0],b[1],b[2],b[3],(t1-t0)*1000)
time.sleep(0.25)
|
gpl-3.0
| -457,062,874,747,265,800
| 31.078947
| 127
| 0.653815
| false
| 2.944444
| false
| false
| false
|
Infinidat/lanister
|
manage.py
|
1
|
7158
|
#! /usr/bin/python
from __future__ import print_function
import os
import sys
import time
import random
import string
import subprocess
from _lib.bootstrapping import bootstrap_env, from_project_root, requires_env, from_env_bin
from _lib.ansible import ensure_ansible
bootstrap_env(["base"])
from _lib.params import APP_NAME
from _lib.source_package import prepare_source_package
from _lib.db import db
from _lib.celery import celery
from _lib.utils import interact
from _lib.deployment import run_gunicorn
import click
import requests
import logbook
##### ACTUAL CODE ONLY BENEATH THIS POINT ######
@click.group()
def cli():
pass
cli.add_command(run_gunicorn)
cli.add_command(db)
cli.add_command(celery)
@cli.command('ensure-secret')
@click.argument("conf_file")
def ensure_secret(conf_file):
dirname = os.path.dirname(conf_file)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if os.path.exists(conf_file):
return
with open(conf_file, "w") as f:
print('SECRET_KEY: "{0}"'.format(_generate_secret()), file=f)
print('SECURITY_PASSWORD_SALT: "{0}"'.format(_generate_secret()), file=f)
def _generate_secret(length=50):
return "".join([random.choice(string.ascii_letters) for i in range(length)])
@cli.command()
@click.option("--develop", is_flag=True)
@click.option("--app", is_flag=True)
def bootstrap(develop, app):
deps = ["base"]
if develop:
deps.append("develop")
if app:
deps.append("app")
bootstrap_env(deps)
click.echo(click.style("Environment up to date", fg='green'))
@cli.command()
@click.option('--livereload/--no-livereload', is_flag=True, default=True)
@click.option('-p', '--port', default=8000, envvar='TESTSERVER_PORT')
@click.option('--tmux/--no-tmux', is_flag=True, default=True)
@requires_env("app", "develop")
def testserver(tmux, livereload, port):
if tmux:
return _run_tmux_frontend(port=port)
from flask_app.app import create_app
app = create_app({'DEBUG': True, 'TESTING': True, 'SECRET_KEY': 'dummy', 'SECURITY_PASSWORD_SALT': 'dummy'})
extra_files=[
from_project_root("flask_app", "app.yml")
]
app = create_app({'DEBUG': True, 'TESTING': True, 'SECRET_KEY': 'dummy'})
if livereload:
from livereload import Server
s = Server(app)
for filename in extra_files:
s.watch(filename)
s.watch('flask_app')
logbook.StreamHandler(sys.stderr, level='DEBUG').push_application()
s.serve(port=port, liveport=35729)
else:
app.run(port=port, extra_files=extra_files)
def _run_tmux_frontend(port):
tmuxp = from_env_bin('tmuxp')
os.execve(tmuxp, [tmuxp, 'load', from_project_root('_lib', 'frontend_tmux.yml')], dict(os.environ, TESTSERVER_PORT=str(port), CONFIG_DIRECTORY=from_project_root("conf.d")))
@cli.command()
@click.option("--dest", type=click.Choice(["production", "staging", "localhost", "vagrant", "custom"]), help="Deployment target", required=True)
@click.option("-i", "--inventory", type=str, default=None, help="Path to an inventory file. Should be specified only when \"--dest custom\" is set")
@click.option("--vagrant-machine", type=str, default="", help="Vagrant machine to provision")
@click.option("--sudo/--no-sudo", default=False)
@click.option("--ask-sudo-pass/--no-ask-sudo-pass", default=False)
def deploy(dest, sudo, ask_sudo_pass, vagrant_machine, inventory):
prepare_source_package()
ansible = ensure_ansible()
if dest == "vagrant":
# Vagrant will invoke ansible
environ = os.environ.copy()
environ["PATH"] = "{}:{}".format(os.path.dirname(ansible), environ["PATH"])
# "vagrant up --provision" doesn't call provision if the virtual machine is already up,
# so we have to call vagrant provision explicitly
click.echo(click.style("Running deployment on Vagrant. This may take a while...", fg='magenta'))
subprocess.check_call('vagrant up ' + vagrant_machine, shell=True, env=environ)
subprocess.check_call('vagrant provision ' + vagrant_machine, shell=True, env=environ)
else:
if dest == "custom":
if inventory is None:
raise click.ClickException("-i/--inventory should be specified together with \"--dest custom\"")
if not os.path.exists(inventory):
raise click.ClickException("Custom inventory file {} doesn't exist".format(inventory))
else:
if inventory is not None:
raise click.ClickException("-i/--inventory should be specified only when \"--dest custom\" is specified")
inventory = from_project_root("ansible", "inventories", dest)
click.echo(click.style("Running deployment on {}. This may take a while...".format(inventory), fg='magenta'))
cmd = [ansible, "-i", inventory]
if dest in ("localhost",):
cmd.extend(["-c", "local"])
if dest == "localhost":
cmd.append("--sudo")
if sudo:
cmd.append('--sudo')
if ask_sudo_pass:
cmd.append('--ask-sudo-pass')
cmd.append(from_project_root("ansible", "site.yml"))
subprocess.check_call(cmd)
@cli.command()
def unittest():
_run_unittest()
@requires_env("app", "develop")
def _run_unittest():
subprocess.check_call(
[from_env_bin("py.test"), "tests/test_ut"], cwd=from_project_root())
@cli.command()
@click.argument('pytest_args', nargs=-1)
def pytest(pytest_args):
_run_pytest(pytest_args)
@requires_env("app", "develop")
def _run_pytest(pytest_args=()):
subprocess.check_call(
[from_env_bin("py.test")]+list(pytest_args), cwd=from_project_root())
@cli.command()
def fulltest():
_run_fulltest()
@requires_env("app", "develop")
def _run_fulltest(extra_args=()):
subprocess.check_call([from_env_bin("py.test"), "tests"]
+ list(extra_args), cwd=from_project_root())
@cli.command('travis-test')
def travis_test():
subprocess.check_call('createdb {0}'.format(APP_NAME), shell=True)
_run_unittest()
subprocess.check_call('dropdb {0}'.format(APP_NAME), shell=True)
def _wait_for_travis_availability():
click.echo(click.style("Waiting for service to become available on travis", fg='magenta'))
time.sleep(10)
for _ in range(10):
click.echo("Checking service...")
resp = requests.get("http://localhost/")
click.echo("Request returned {0}".format(resp.status_code))
if resp.status_code == 200:
break
time.sleep(5)
else:
raise RuntimeError("Web service did not become responsive")
click.echo(click.style("Service is up", fg='green'))
def _db_container_name():
return '{0}-db'.format(APP_NAME)
@cli.command()
@requires_env("app", "develop")
def shell():
from flask_app.app import create_app
from flask_app import models
app = create_app()
with app.app_context():
interact({
'db': db,
'app': app,
'models': models,
'db': models.db,
})
if __name__ == "__main__":
cli()
|
bsd-3-clause
| -4,284,564,785,641,032,700
| 31.536364
| 176
| 0.637748
| false
| 3.479825
| true
| false
| false
|
ipeterov/convenient-rpc
|
task_server/lib/tasks.py
|
1
|
3496
|
import uuid
import time
from functools import partial
from collections import Counter
from threading import Thread
from queue import Queue, Empty
class TaskManager:
@staticmethod
def hash_task(task):
return hash(''.join(str(task.get(key, '')) for key in ('package', 'version', 'function')))
def __init__(self):
self.wait_interval = 0.01
self.tasks = {}
self.answers = {}
self.unsent_tasks = []
self.streams = {}
self.task_performance = {}
def get_tasks(self):
return self.tasks
def start_stream(self, ids, unordered=False):
def fill_queue(queue, iterable):
for item in iterable:
queue.put(item)
stream_id = str(uuid.uuid4())
answer_queue = Queue()
answer_gen = self.get_answers(ids, unordered=unordered)
self.streams[stream_id] = {
'generator': answer_gen,
'queue': answer_queue,
'worker': Thread(target=partial(fill_queue, answer_queue, answer_gen)),
'left': len(ids)
}
self.streams[stream_id]['worker'].start()
return stream_id
def get_from_stream(self, stream_id):
if stream_id not in self.streams:
raise WrongIDException()
answers = []
while True:
try:
answers.append(self.streams[stream_id]['queue'].get_nowait())
except Empty:
break
self.streams[stream_id]['left'] -= len(answers)
last = self.streams[stream_id]['left'] == 0
if last:
self.streams.pop(stream_id)
return answers, last
def add_task(self, task):
id_ = str(uuid.uuid4())
self.tasks[id_] = task
self.unsent_tasks.append(id_)
return id_
def get_answers(self, ids, unordered=False):
if unordered:
while ids:
for id_ in ids.copy():
if id_ in self.answers:
ids.remove(id_)
yield self.answers.pop(id_)
time.sleep(self.wait_interval)
else:
for id_ in ids:
while id_ not in self.answers:
time.sleep(self.wait_interval)
yield self.answers.pop(id_)
def get_task(self):
while True:
try:
id_ = self.unsent_tasks.pop(0)
break
except IndexError:
time.sleep(self.wait_interval)
task = self.tasks[id_]
return id_, task
def add_answer(self, id_, answer, time=None):
if id_ in self.tasks:
task = self.tasks[id_]
del self.tasks[id_]
else:
raise WrongIDException()
hash_key = self.hash_task(task)
self.answers[id_] = answer
if time != None:
self.task_performance.setdefault(hash_key, []).append(time)
def estimate_runtime(self, hash_key):
if hash_key in self.task_performance:
times = self.task_performance[hash_key]
return sum(times) / len(times)
else:
return 0
def estimate_time_left(self):
tasks = Counter(self.hash_task(self.tasks[id_]) for id_ in self.unsent_tasks).items()
return sum(self.estimate_runtime(hash_key) * count for hash_key, count in tasks)
class NotReadyException(Exception):
pass
class WrongIDException(Exception):
pass
|
mit
| 3,637,333,381,968,971,300
| 25.484848
| 98
| 0.543764
| false
| 4.065116
| false
| false
| false
|
acesonl/remotecare
|
remotecare/core/backends.py
|
1
|
1343
|
# -*- coding: utf-8 -*-
"""
The standard email backend is replaced by a custom
ModelBackend that supports getting the user based
on the stored hmac email value.
:subtitle:`Class definitions:`
"""
from django.contrib.auth.backends import ModelBackend
from apps.account.models import User
from django.contrib.auth.hashers import check_password
class EmailBackend(ModelBackend):
'''
Custom authentication backend which uses the hmac
email address rather than the username to authenticate.
'''
def authenticate(self, email=None, password=None, username=None, **kwargs):
"""
Processes an authentication attempt
args:
- email: not used
- password: the password to check
- username: the plain-text email address to search for
Returns:
the user if found and password correct else None.
"""
try:
# match the user's HMAC email address to the
# entered 'username'
# The hmac_email field will automatically HMAC the username.lower()
# value
user = User.objects.get(hmac_email=username)
if check_password(password, user.password):
return user
else:
return None
except User.DoesNotExist:
return None
|
gpl-3.0
| 3,425,033,322,685,303,300
| 30.97619
| 79
| 0.630678
| false
| 4.95572
| false
| false
| false
|
rukai/GameToy
|
timer.py
|
1
|
1829
|
class Timer:
def __init__(self, interrupts):
self.interrupts = interrupts
self.div = 0 # FF04
self.sub_div = 0
self.tima = 0 # FF05
self.sub_tima = 0
self.tma = 0 # FF06
# FF07 bits 2-0
self.timer_run = False
self.clock_select = 0
self.clock = 0
def update(self, cycles):
self.sub_div += cycles
if self.sub_div >= 256:
self.div = (self.div + 1) % 0x100
if self.timer_run:
self.sub_tima += cycles
else:
self.sub_tima = 0 # Assuming timer progress is lost when disabled
if self.sub_tima >= self.clock:
if self.tima == 0xFF:
self.tima = self.tma
self.interrupts.callTimer()
else:
self.tima += 1
# Divider Register
def readDIV(self):
return self.div
def writeDIV(self, value):
self.div = 0
# Timer Counter
def readTIMA(self):
return self.tima
def writeTIMA(self, value):
self.tima = value
# Timer Modulo
def readTMA(self):
return self.tma
def writeTMA(self, value):
self.tma = value
# Timer Controller
def readTAC(self):
value = int(timer_run) << 2
value |= clock_select
return value
def writeTAC(self, value):
self.timer_run = bool(value & 0b00000100)
self.clock_select = value & 0b00000011
if self.clock_select == 0:
self.clock = 1024
elif self.clock_select == 1:
self.clock = 16
elif self.clock_select == 2:
self.clock = 64
elif self.clock_select == 3:
self.clock = 256
else:
assert(False)
|
gpl-3.0
| -91,548,163,793,290,540
| 24.402778
| 77
| 0.503007
| false
| 3.747951
| false
| false
| false
|
icodemachine/Stem
|
stem/descriptor/tordnsel.py
|
1
|
3950
|
# Copyright 2013-2014, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for `TorDNSEL <https://www.torproject.org/projects/tordnsel.html.en>`_
exit list files.
::
TorDNSEL - Exit list provided by TorDNSEL
"""
import stem.util.connection
import stem.util.str_tools
import stem.util.tor_tools
from stem.descriptor import (
Descriptor,
_read_until_keywords,
_get_descriptor_components,
)
def _parse_file(tordnsel_file, validate = False, **kwargs):
"""
Iterates over a tordnsel file.
:returns: iterator for :class:`~stem.descriptor.tordnsel.TorDNSEL`
instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is **True**
* **IOError** if the file can't be read
"""
# skip content prior to the first ExitNode
_read_until_keywords('ExitNode', tordnsel_file, skip = True)
while True:
contents = _read_until_keywords('ExitAddress', tordnsel_file)
contents += _read_until_keywords('ExitNode', tordnsel_file)
if contents:
yield TorDNSEL(bytes.join(b'', contents), validate, **kwargs)
else:
break # done parsing file
class TorDNSEL(Descriptor):
"""
TorDNSEL descriptor (`exitlist specification
<https://www.torproject.org/tordnsel/exitlist-spec.txt>`_)
:var str fingerprint: **\*** authority's fingerprint
:var datetime published: **\*** time in UTC when this descriptor was made
:var datetime last_status: **\*** time in UTC when the relay was seen in a v2 network status
:var list exit_addresses: **\*** list of (str address, datetime date) tuples consisting of the found IPv4 exit address and the time
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
def __init__(self, raw_contents, validate):
super(TorDNSEL, self).__init__(raw_contents)
raw_contents = stem.util.str_tools._to_unicode(raw_contents)
entries = _get_descriptor_components(raw_contents, validate)
self.fingerprint = None
self.published = None
self.last_status = None
self.exit_addresses = []
self._parse(entries, validate)
def _parse(self, entries, validate):
for keyword, values in list(entries.items()):
value, block_type, block_content = values[0]
if validate and block_content:
raise ValueError('Unexpected block content: %s' % block_content)
if keyword == 'ExitNode':
if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % value)
self.fingerprint = value
elif keyword == 'Published':
try:
self.published = stem.util.str_tools._parse_timestamp(value)
except ValueError:
if validate:
raise ValueError("Published time wasn't parsable: %s" % value)
elif keyword == 'LastStatus':
try:
self.last_status = stem.util.str_tools._parse_timestamp(value)
except ValueError:
if validate:
raise ValueError("LastStatus time wasn't parsable: %s" % value)
elif keyword == 'ExitAddress':
for value, block_type, block_content in values:
address, date = value.split(' ', 1)
if validate:
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError("ExitAddress isn't a valid IPv4 address: %s" % address)
elif block_content:
raise ValueError('Unexpected block content: %s' % block_content)
try:
date = stem.util.str_tools._parse_timestamp(date)
self.exit_addresses.append((address, date))
except ValueError:
if validate:
raise ValueError("ExitAddress found time wasn't parsable: %s" % value)
elif validate:
raise ValueError('Unrecognized keyword: %s' % keyword)
|
lgpl-3.0
| -5,719,108,573,632,208,000
| 32.760684
| 133
| 0.661772
| false
| 3.883972
| false
| false
| false
|
our-city-app/oca-backend
|
src/rogerthat/pages/service_page.py
|
1
|
7102
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import json
import logging
import webapp2
from google.appengine.ext import webapp
from mcfw.properties import azzert
from rogerthat.bizz.communities.communities import get_community
from rogerthat.bizz.friend_helper import FriendHelper
from rogerthat.bizz.service.i18n import excel_export, excel_import
from rogerthat.dal.friend import get_friends_map
from rogerthat.dal.profile import get_service_profile
from rogerthat.dal.service import get_friend_serviceidentity_connection
from rogerthat.models import ProfileHashIndex
from rogerthat.rpc import users
from rogerthat.rpc.service import BusinessException
from rogerthat.templates import render
from rogerthat.to.friends import FriendTO, FRIEND_TYPE_SERVICE
from rogerthat.translations import DEFAULT_LANGUAGE
from rogerthat.utils import safe_file_name, filename_friendly_time
from rogerthat.utils.channel import broadcast_via_iframe_result
from rogerthat.utils.crypto import md5_hex
from rogerthat.utils.service import add_slash_default
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class ServicePageHandler(webapp.RequestHandler):
def get(self):
service_email = self.request.GET.get('service')
azzert(service_email)
user = users.get_current_user()
service_identity_user = add_slash_default(users.User(service_email))
azzert(get_friend_serviceidentity_connection(user, service_identity_user),
"%s tried to get service page of service %s, but is not connected" % (user.email(), service_identity_user.email()))
params = {'service_email': service_email, 'container_id': 'servicePageContainer_%s' % md5_hex(service_email)}
self.response.out.write(render('service_page', [DEFAULT_LANGUAGE], params, 'web'))
class ServiceMenuItemBrandingHandler(webapp.RequestHandler):
def get(self):
service_email = self.request.GET.get('service')
azzert(service_email)
user = users.get_current_user()
service_identity_user = add_slash_default(users.User(service_email))
azzert(get_friend_serviceidentity_connection(user, service_identity_user),
"%s tried to get a menu item page of service %s, but is not connected" % (user.email(), service_identity_user.email()))
branding = self.request.GET.get('branding')
azzert(branding)
params = {'container_id': 'smi_branding_container_%s' %
branding, 'branding': branding, 'service_email': service_email}
self.response.out.write(render('smi_branding', [DEFAULT_LANGUAGE], params, 'web'))
class ServiceAboutPageHandler(webapp.RequestHandler):
def get(self):
service_email = self.request.GET.get('service')
azzert(service_email)
user = users.get_current_user()
service_identity_user = add_slash_default(users.User(service_email))
azzert(get_friend_serviceidentity_connection(user, service_identity_user),
"%s tried to get About page of service %s, but is not connected" % (user.email(), service_identity_user.email()))
helper = FriendHelper.from_data_store(service_identity_user, FRIEND_TYPE_SERVICE)
service = FriendTO.fromDBFriendMap(helper, get_friends_map(user), service_identity_user,
includeServiceDetails=True, targetUser=user)
azzert(service.type == FriendTO.TYPE_SERVICE)
params = {'service': service,
'service_name': service.name or service.email,
'container_id': 'serviceAboutPageContainer_%s' % md5_hex(service_email)}
self.response.out.write(render('service_about', [DEFAULT_LANGUAGE], params, 'web'))
class EditableTranslationSetExcelDownloadHandler(webapp2.RequestHandler):
def get(self):
browser_timezone_str = self.request.get('tz_offset', '0')
try:
browser_timezone = int(browser_timezone_str)
except ValueError:
logging.warning("Invalid browser timezone offset: [%s]" % browser_timezone_str)
browser_timezone = 0
if abs(browser_timezone) > 24 * 3600:
logging.warning("Invalid browser timezone offset: [%s]" % browser_timezone_str)
browser_timezone = 0
service_user = users.get_current_user()
book, latest_export_timestamp = excel_export(service_user, browser_timezone)
# Return
output = StringIO()
book.save(output)
output.seek(0)
filename = "Rogerthat_%s_%s.xls" % (filename_friendly_time(latest_export_timestamp), service_user.email())
self.response.headers['Content-Type'] = 'application/vnd.ms-excel'
self.response.headers['Content-Disposition'] = 'attachment; filename=%s' % safe_file_name(filename)
self.response.out.write(output.getvalue())
class PostEditableTranslationSetExcelHandler(webapp2.RequestHandler):
def post(self):
import xlrd
try:
service_user = users.get_current_user()
file_ = self.request.POST.get('file').file
book = xlrd.open_workbook(file_contents=file_.read())
excel_import(service_user, book)
except BusinessException as be:
self.response.out.write(broadcast_via_iframe_result(
u'rogerthat.service.translations.post_result', error=be.message))
return
except:
self.response.out.write(broadcast_via_iframe_result(
u'rogerthat.service.translations.post_result', error=u"Unknown error has occurred."))
logging.exception("Failure receiving translations!")
return
self.response.out.write(broadcast_via_iframe_result(u'rogerthat.service.translations.post_result'))
class GetServiceAppHandler(webapp2.RequestHandler):
def get_default_app_id(self, user_hash):
index = ProfileHashIndex.get(ProfileHashIndex.create_key(user_hash))
if not index:
logging.debug('No profile found with user_hash %s', user_hash)
return None
profile = get_service_profile(index.user)
if not profile:
logging.debug('Profile not found: %s', index.user)
community = get_community(profile.community_id)
return community.default_app
def get(self):
user_hash = self.request.GET['user']
self.response.out.write(json.dumps({'app_id': self.get_default_app_id(user_hash)}))
|
apache-2.0
| 8,330,832,025,943,103,000
| 41.023669
| 134
| 0.689383
| false
| 3.861881
| false
| false
| false
|
edx/ecommerce
|
ecommerce/extensions/offer/migrations/0046_offerassignmentemailsentrecord.py
|
1
|
1471
|
# Generated by Django 2.2.16 on 2020-11-02 07:04
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('offer', '0045_codeassignmentnudgeemails'),
]
operations = [
migrations.CreateModel(
name='OfferAssignmentEmailSentRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('enterprise_customer', models.UUIDField(help_text='UUID for an EnterpriseCustomer from the Enterprise Service.')),
('email_type', models.CharField(choices=[('assign', 'Assign'), ('remind', 'Remind'), ('revoke', 'Revoke')], max_length=32)),
('template_id', models.PositiveIntegerField(null=True)),
('template_content_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'get_latest_by': 'modified',
'abstract': False,
},
),
]
|
agpl-3.0
| -4,716,142,410,958,759,000
| 44.96875
| 148
| 0.622706
| false
| 4.326471
| false
| false
| false
|
kzcashteam/kzcash
|
share/qt/extract_strings_qt.py
|
1
|
1857
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/kzcashstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *kzcash_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("kzcash-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
mit
| 8,326,102,238,316,943,000
| 22.807692
| 79
| 0.581583
| false
| 3.451673
| false
| false
| false
|
ebeshero/Pittsburgh_Frankenstein
|
collateXPrep/python/Part9-allWitnessIM_collation_to_xml.py
|
1
|
11044
|
from collatex import *
from xml.dom import pulldom
import re
import glob
from datetime import datetime, date
# import pytz
# from tzlocal import get_localzone
# today = date.today()
# utc_dt = datetime(today, tzinfo=pytz.utc)
# dateTime = utc_dt.astimezone(get_localzone())
# strDateTime = str(dateTime)
now = datetime.utcnow()
nowStr = str(now)
print('test: ', dir(Collation))
regexWhitespace = re.compile(r'\s+')
regexNonWhitespace = re.compile(r'\S+')
regexEmptyTag = re.compile(r'/>$')
regexBlankLine = re.compile(r'\n{2,}')
regexLeadingBlankLine = re.compile(r'^\n')
regexPageBreak = re.compile(r'<pb.+?/>')
RE_MARKUP = re.compile(r'<.+?>')
RE_PARA = re.compile(r'<p\s[^<]+?/>')
RE_INCLUDE = re.compile(r'<include[^<]*/>')
RE_MILESTONE = re.compile(r'<milestone[^<]*/>')
RE_HEAD = re.compile(r'<head[^<]*/>')
RE_AB = re.compile(r'<ab[^<]*/>')
# 2018-10-1 ebb: ampersands are apparently not treated in python regex as entities any more than angle brackets.
# RE_AMP_NSB = re.compile(r'\S&\s')
# RE_AMP_NSE = re.compile(r'\s&\S')
# RE_AMP_SQUISH = re.compile(r'\S&\S')
# RE_AMP = re.compile(r'\s&\s')
RE_AMP = re.compile(r'&')
# RE_MULTICAPS = re.compile(r'(?<=\W|\s|\>)[A-Z][A-Z]+[A-Z]*\s')
# RE_INNERCAPS = re.compile(r'(?<=hi\d"/>)[A-Z]+[A-Z]+[A-Z]+[A-Z]*')
# TITLE_MultiCaps = match(RE_MULTICAPS).lower()
RE_DELSTART = re.compile(r'<del[^<]*>')
RE_ADDSTART = re.compile(r'<add[^<]*>')
RE_MDEL = re.compile(r'<mdel[^<]*>.+?</mdel>')
RE_SHI = re.compile(r'<shi[^<]*>.+?</shi>')
RE_METAMARK = re.compile(r'<metamark[^<]*>.+?</metamark>')
RE_HI = re.compile(r'<hi\s[^<]*/>')
RE_PB = re.compile(r'<pb[^<]*/>')
RE_LB = re.compile(r'<lb[^<]*/>')
RE_LG = re.compile(r'<lg[^<]*/>')
RE_L = re.compile(r'<l\s[^<]*/>')
RE_CIT = re.compile(r'<cit\s[^<]*/>')
RE_QUOTE = re.compile(r'<quote\s[^<]*/>')
RE_OPENQT = re.compile(r'“')
RE_CLOSEQT = re.compile(r'”')
RE_GAP = re.compile(r'<gap\s[^<]*/>')
# <milestone unit="tei:p"/>
RE_sgaP = re.compile(r'<milestone\sunit="tei:p"[^<]*/>')
# ebb: RE_MDEL = those pesky deletions of two letters or less that we want to normalize out of the collation, but preserve in the output.
# Element types: xml, div, head, p, hi, pb, note, lg, l; comment()
# Tags to ignore, with content to keep: xml, comment, anchor
# Structural elements: div, p, lg, l
# Inline elements (empty) retained in normalization: pb, milestone, xi:include
# Inline and block elements (with content) retained in normalization: note, hi, head, ab
# GIs fall into one three classes
# 2017-05-21 ebb: Due to trouble with pulldom parsing XML comments, I have converted these to comment elements,
# 2017-05-21 ebb: to be ignored during collation.
# 2017-05-30 ebb: Determined that comment elements cannot really be ignored when they have text nodes (the text is
# 2017-05-30 ebb: collated but the tags are not). Decision to make the comments into self-closing elements with text
# 2017-05-30 ebb: contents as attribute values, and content such as tags simplified to be legal attribute values.
# 2017-05-22 ebb: I've set anchor elements with @xml:ids to be the indicators of collation "chunks" to process together
ignore = ['sourceDoc', 'xml', 'comment', 'w', 'mod', 'anchor', 'include', 'delSpan', 'addSpan', 'add', 'handShift', 'damage', 'restore', 'zone', 'surface', 'graphic', 'unclear', 'retrace']
blockEmpty = ['pb', 'p', 'div', 'milestone', 'lg', 'l', 'note', 'cit', 'quote', 'bibl', 'ab', 'head']
inlineEmpty = ['lb', 'gap', 'del', 'hi']
# 2018-05-12 (mysteriously removed but reinstated 2018-09-27) ebb: I'm setting a white space on either side of the inlineEmpty elements in line 103
# 2018-07-20: ebb: CHECK: are there white spaces on either side of empty elements in the output?
inlineContent = ['metamark', 'mdel', 'shi']
#2018-07-17 ebb: I moved the following list up into inlineEmpty, since they are all now empty elements: blockElement = ['lg', 'l', 'note', 'cit', 'quote', 'bibl']
# ebb: Tried removing 'comment', from blockElement list above, because we don't want these to be collated.
# 10-23-2017 ebb rv:
def normalizeSpace(inText):
"""Replaces all whitespace spans with single space characters"""
if regexNonWhitespace.search(inText):
return regexWhitespace.sub('\n', inText)
else:
return ''
def extract(input_xml):
"""Process entire input XML document, firing on events"""
# Start pulling; it continues automatically
doc = pulldom.parse(input_xml)
output = ''
for event, node in doc:
# elements to ignore: xml
if event == pulldom.START_ELEMENT and node.localName in ignore:
continue
# copy comments intact
elif event == pulldom.COMMENT:
doc.expandNode(node)
output += node.toxml()
# ebb: Next (below): empty block elements: pb, milestone, lb, lg, l, p, ab, head, hi,
# We COULD set white spaces around these like this ' ' + node.toxml() + ' '
# but what seems to happen is that the white spaces get added to tokens; they aren't used to
# isolate the markup into separate tokens, which is really what we'd want.
# So, I'm removing the white spaces here.
# NOTE: Removing the white space seems to improve/expand app alignment
elif event == pulldom.START_ELEMENT and node.localName in blockEmpty:
output += node.toxml()
# ebb: empty inline elements that do not take surrounding white spaces:
elif event == pulldom.START_ELEMENT and node.localName in inlineEmpty:
output += node.toxml()
# non-empty inline elements: mdel, shi, metamark
elif event == pulldom.START_ELEMENT and node.localName in inlineContent:
output += regexEmptyTag.sub('>', node.toxml())
elif event == pulldom.END_ELEMENT and node.localName in inlineContent:
output += '</' + node.localName + '>'
# elif event == pulldom.START_ELEMENT and node.localName in blockElement:
# output += '\n<' + node.localName + '>\n'
#elif event == pulldom.END_ELEMENT and node.localName in blockElement:
# output += '\n</' + node.localName + '>'
elif event == pulldom.CHARACTERS:
output += normalizeSpace(node.data)
else:
continue
return output
def normalize(inputText):
# 2018-09-23 ebb THIS WORKS, SOMETIMES, BUT NOT EVERWHERE: RE_MULTICAPS.sub(format(re.findall(RE_MULTICAPS, inputText, flags=0)).title(), \
# RE_INNERCAPS.sub(format(re.findall(RE_INNERCAPS, inputText, flags=0)).lower(), \
return RE_MILESTONE.sub('', \
RE_INCLUDE.sub('', \
RE_AB.sub('', \
RE_HEAD.sub('', \
RE_AMP.sub('and', \
RE_MDEL.sub('', \
RE_SHI.sub('', \
RE_HI.sub('', \
RE_LB.sub('', \
RE_PB.sub('', \
RE_PARA.sub('<p/>', \
RE_sgaP.sub('<p/>', \
RE_LG.sub('<lg/>', \
RE_L.sub('<l/>', \
RE_CIT.sub('', \
RE_QUOTE.sub('', \
RE_OPENQT.sub('"', \
RE_CLOSEQT.sub('"', \
RE_GAP.sub('', \
RE_DELSTART.sub('<del>', \
RE_ADDSTART.sub('<add>', \
RE_METAMARK.sub('', inputText)))))))))))))))))))))).lower()
# to lowercase the normalized tokens, add .lower() to the end.
# return regexPageBreak('',inputText)
# ebb: The normalize function makes it possible to return normalized tokens that screen out some markup, but not all.
def processToken(inputText):
return {"t": inputText + ' ', "n": normalize(inputText)}
def processWitness(inputWitness, id):
return {'id': id, 'tokens': [processToken(token) for token in inputWitness]}
for name in glob.glob('../collChunks-Part9/1818_fullFlat_*'):
try:
matchString = name.split("fullFlat_", 1)[1]
# ebb: above gets C30.xml for example
matchStr = matchString.split(".", 1)[0]
# ebb: above strips off the file extension
with open(name, 'rb') as f1818file, \
open('../collChunks-Part9/1823_fullFlat_' + matchString, 'rb') as f1823file, \
open('../collChunks-Part9/Thomas_fullFlat_' + matchString, 'rb') as fThomasfile, \
open('../collChunks-Part9/1831_fullFlat_' + matchString, 'rb') as f1831file, \
open('../collChunks-Part9/msColl_' + matchString, 'rb') as fMSfile, \
open('../Full_Part9_xmlOutput/collation_' + matchStr + '.xml', 'w') as outputFile:
# open('collationChunks/msColl_c56_' + matchString, 'rb') as fMSc56file, \
# open('collationChunks/msColl_c58_' + matchString, 'rb') as fMSc58file, \
# open('collationChunks/msColl_c57Frag_' + matchString, 'rb') as fMSc57Fragfile, \
# open('collationChunks/msColl_c58Frag_' + matchString, 'rb') as fMSc58Fragfile, \
# fMSc56_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(fMSc56file))).split('\n')
# fMSc58_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(fMSc58file))).split('\n')
# fMSc57Frag_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(fMSc57Fragfile))).split('\n')
# fMSc58Frag_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(fMSc58Fragfile))).split('\n')
f1818_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(f1818file))).split('\n')
fThomas_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(fThomasfile))).split('\n')
f1823_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(f1823file))).split('\n')
f1831_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(f1831file))).split('\n')
fMS_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(fMSfile))).split('\n')
f1818_tokenlist = processWitness(f1818_tokens, 'f1818')
fThomas_tokenlist = processWitness(fThomas_tokens, 'fThomas')
f1823_tokenlist = processWitness(f1823_tokens, 'f1823')
f1831_tokenlist = processWitness(f1831_tokens, 'f1831')
fMS_tokenlist = processWitness(fMS_tokens, 'fMS')
# fMSc56_tokenlist = processWitness(fMSc56_tokens, 'fMSc56')
# fMSc58_tokenlist = processWitness(fMSc58_tokens, 'fMSc58')
# fMSc57Frag_tokenlist = processWitness(fMSc57Frag_tokens, 'fMSc57Frag')
# fMSc58Frag_tokenlist = processWitness(fMSc58Frag_tokens, 'fMSc58Frag')
collation_input = {"witnesses": [f1818_tokenlist, f1823_tokenlist, fThomas_tokenlist, f1831_tokenlist, fMS_tokenlist]}
# table = collate(collation_input, output='tei', segmentation=True)
# table = collate(collation_input, segmentation=True, layout='vertical')
table = collate(collation_input, output='xml', segmentation=True)
print(table + '<!-- ' + nowStr + ' -->', file=outputFile)
# print(table, file=outputFile)
except IOError:
pass
|
agpl-3.0
| -5,091,329,732,725,098,000
| 50.830986
| 188
| 0.631431
| false
| 3.159702
| false
| false
| false
|
mganeva/mantid
|
scripts/Muon/GUI/Common/utilities/xml_utils.py
|
1
|
4492
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import os
import xml.etree.ElementTree as ET
import Muon.GUI.Common.utilities.run_string_utils as run_string_utils
from Muon.GUI.Common.muon_group import MuonGroup
from Muon.GUI.Common.muon_pair import MuonPair
def _create_XML_subElement_for_groups(root_node, groups):
group_nodes = []
for group in groups:
child = ET.SubElement(root_node, 'group', name=group.name)
id_string = run_string_utils.run_list_to_string(group.detectors)
ids = ET.SubElement(child, 'ids', val=id_string)
child.extend(ids)
group_nodes += [child]
return group_nodes
def _create_XML_subElement_for_pairs(root_node, pairs):
pair_nodes = []
for pair in pairs:
child = ET.SubElement(root_node, 'pair', name=pair.name)
fwd_group = ET.SubElement(child, 'forward-group', val=pair.forward_group)
bwd_group = ET.SubElement(child, 'backward-group', val=pair.backward_group)
alpha = ET.SubElement(child, 'alpha', val=str(pair.alpha))
child.extend(fwd_group)
child.extend(bwd_group)
child.extend(alpha)
pair_nodes += [child]
return pair_nodes
def save_grouping_to_XML(groups, pairs, filename, save=True, description=''):
"""
Save a set of muon group and pair parameters to XML format file. Fewer checks are performed
than with the XML loading.
:param groups: A list of MuonGroup objects to save.
:param pairs: A list of MuonPair objects to save.
:param filename: The name of the XML file to save to.
:param save: Whether to actually save the file.
:return: the XML tree (used in testing).
"""
# some basic checks
if filename == "":
raise AttributeError("File must be specified for saving to XML")
if os.path.splitext(filename)[-1].lower() != ".xml":
raise AttributeError("File extension must be XML")
if sum([0 if isinstance(group, MuonGroup) else 1 for group in groups]) > 0:
raise AttributeError("groups must be MuonGroup type")
if sum([0 if isinstance(pair, MuonPair) else 1 for pair in pairs]) > 0:
raise AttributeError("pairs must be MuonPair type")
root = ET.Element("detector-grouping")
if description:
root.set('description', description)
# handle groups
_create_XML_subElement_for_groups(root, groups)
# handle pairs
_create_XML_subElement_for_pairs(root, pairs)
tree = ET.ElementTree(root)
if save:
tree.write(filename)
return tree
def load_grouping_from_XML(filename):
"""
Load group/pair data from an XML file (which can be produced using the save_grouping_to_XML() function
:param filename: Full filepath to an xml file.
:return: (groups, pairs), lists of MuonGroup, MuonPair objects respectively.
"""
tree = ET.parse(filename)
root = tree.getroot()
description = root.get('description')
group_names, group_ids = _get_groups_from_XML(root)
pair_names, pair_groups, pair_alphas = _get_pairs_from_XML(root)
groups, pairs = [], []
for i, group_name in enumerate(group_names):
groups += [MuonGroup(group_name=group_name,
detector_ids=group_ids[i])]
for i, pair_name in enumerate(pair_names):
pairs += [MuonPair(pair_name=pair_name,
forward_group_name=pair_groups[i][0],
backward_group_name=pair_groups[i][1],
alpha=pair_alphas[i])]
return groups, pairs, description
def _get_groups_from_XML(root):
names, ids = [], []
for child in root:
if child.tag == "group":
names += [child.attrib['name']]
ids += [run_string_utils.run_string_to_list(child.find('ids').attrib['val'])]
return names, ids
def _get_pairs_from_XML(root):
names, groups, alphas = [], [], []
for child in root:
if child.tag == "pair":
names += [child.attrib['name']]
groups += [[child.find('forward-group').attrib['val'], child.find('backward-group').attrib['val']]]
alphas += [child.find('alpha').attrib['val']]
return names, groups, alphas
|
gpl-3.0
| 2,430,794,358,631,994,000
| 35.819672
| 111
| 0.642698
| false
| 3.637247
| false
| false
| false
|
ZobairAlijan/osf.io
|
api_tests/base/test_serializers.py
|
1
|
5406
|
# -*- coding: utf-8 -*-
import httplib as http
from nose.tools import * # flake8: noqa
from tests.base import ApiTestCase, DbTestCase
from tests import factories
from tests.utils import make_drf_request
from api.base.settings.defaults import API_BASE
from api.base.serializers import JSONAPISerializer
from api.nodes.serializers import NodeSerializer, RelationshipField
class TestApiBaseSerializers(ApiTestCase):
def setUp(self):
super(TestApiBaseSerializers, self).setUp()
self.node = factories.ProjectFactory(is_public=True)
for i in range(5):
factories.ProjectFactory(is_public=True, parent=self.node)
self.url = '/{}nodes/{}/'.format(API_BASE, self.node._id)
def test_counts_not_included_in_link_fields_by_default(self):
res = self.app.get(self.url)
relationships = res.json['data']['relationships']
for relation in relationships.values():
link = relation['links'].values()[0]
assert_not_in('count', link['meta'])
def test_counts_included_in_link_fields_with_related_counts_query_param(self):
res = self.app.get(self.url, params={'related_counts': True})
relationships = res.json['data']['relationships']
for key, relation in relationships.iteritems():
field = NodeSerializer._declared_fields[key]
if (field.related_meta or {}).get('count'):
link = relation['links'].values()[0]
assert_in('count', link['meta'])
def test_related_counts_excluded_query_param_false(self):
res = self.app.get(self.url, params={'related_counts': False})
relationships = res.json['data']['relationships']
for relation in relationships.values():
link = relation['links'].values()[0]
assert_not_in('count', link['meta'])
def test_invalid_param_raises_bad_request(self):
res = self.app.get(self.url, params={'related_counts': 'fish'}, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
class TestRelationshipField(DbTestCase):
# We need a Serializer to test the Relationship field (needs context)
class BasicNodeSerializer(JSONAPISerializer):
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'}
)
parent_with_meta = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_count', 'extra': 'get_extra'},
)
self_and_related_field = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
self_view='nodes:node-contributors',
self_view_kwargs={'node_id': '<pk>'},
)
two_url_kwargs = RelationshipField(
# fake url, for testing purposes
related_view='nodes:node-pointer-detail',
related_view_kwargs={'node_id': '<pk>', 'node_link_id': '<pk>'},
)
not_attribute_on_target = RelationshipField(
# fake url, for testing purposes
related_view='nodes:node-children',
related_view_kwargs={'node_id': '12345'}
)
class Meta:
type_ = 'nodes'
def get_count(self, obj):
return 1
def get_extra(self, obj):
return 'foo'
# TODO: Expand tests
# Regression test for https://openscience.atlassian.net/browse/OSF-4832
def test_serializing_meta(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
meta = data['relationships']['parent_with_meta']['links']['related']['meta']
assert_not_in('count', meta)
assert_in('extra', meta)
assert_equal(meta['extra'], 'foo')
def test_self_and_related_fields(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
relationship_field = data['relationships']['self_and_related_field']['links']
assert_in('/v2/nodes/{}/contributors/'.format(node._id), relationship_field['self']['href'])
assert_in('/v2/nodes/{}/'.format(node._id), relationship_field['related']['href'])
def test_field_with_two_kwargs(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
field = data['relationships']['two_url_kwargs']['links']
assert_in('/v2/nodes/{}/node_links/{}/'.format(node._id, node._id), field['related']['href'])
def test_field_with_non_attribute(self):
req = make_drf_request()
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(node, context={'request': req}).data['data']
field = data['relationships']['not_attribute_on_target']['links']
assert_in('/v2/nodes/{}/children/'.format('12345'), field['related']['href'])
|
apache-2.0
| -2,315,594,354,815,570,400
| 37.340426
| 101
| 0.620607
| false
| 3.937363
| true
| false
| false
|
fernandog/Medusa
|
ext/tornado/gen.py
|
1
|
47881
|
"""``tornado.gen`` implements generator-based coroutines.
.. note::
The "decorator and generator" approach in this module is a
precursor to native coroutines (using ``async def`` and ``await``)
which were introduced in Python 3.5. Applications that do not
require compatibility with older versions of Python should use
native coroutines instead. Some parts of this module are still
useful with native coroutines, notably `multi`, `sleep`,
`WaitIterator`, and `with_timeout`. Some of these functions have
counterparts in the `asyncio` module which may be used as well,
although the two may not necessarily be 100% compatible.
Coroutines provide an easier way to work in an asynchronous
environment than chaining callbacks. Code using coroutines is
technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler:
.. testcode::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
could be written with ``gen`` as:
.. testcode::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its ``Future.result``.
You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished:
.. testcode::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
.. testoutput::
:hide:
If the `~functools.singledispatch` library is available (standard in
Python 3.4, available via the `singledispatch
<https://pypi.python.org/pypi/singledispatch>`_ package on older
versions), additional types of objects may be yielded. Tornado includes
support for ``asyncio.Future`` and Twisted's ``Deferred`` class when
``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported.
See the `convert_yielded` function to extend this mechanism.
.. versionchanged:: 3.2
Dict support added.
.. versionchanged:: 4.1
Support added for yielding ``asyncio`` Futures and Twisted Deferreds
via ``singledispatch``.
"""
from __future__ import absolute_import, division, print_function
import collections
import functools
import itertools
import os
import sys
import types
from tornado.concurrent import (Future, is_future, chain_future, future_set_exc_info,
future_add_done_callback, future_set_result_unless_cancelled)
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
from tornado.util import PY3, raise_exc_info, TimeoutError
try:
try:
# py34+
from functools import singledispatch # type: ignore
except ImportError:
from singledispatch import singledispatch # backport
except ImportError:
# In most cases, singledispatch is required (to avoid
# difficult-to-diagnose problems in which the functionality
# available differs depending on which invisble packages are
# installed). However, in Google App Engine third-party
# dependencies are more trouble so we allow this module to be
# imported without it.
if 'APPENGINE_RUNTIME' not in os.environ:
raise
singledispatch = None
try:
try:
# py35+
from collections.abc import Generator as GeneratorType # type: ignore
except ImportError:
from backports_abc import Generator as GeneratorType # type: ignore
try:
# py35+
from inspect import isawaitable # type: ignore
except ImportError:
from backports_abc import isawaitable
except ImportError:
if 'APPENGINE_RUNTIME' not in os.environ:
raise
from types import GeneratorType
def isawaitable(x): # type: ignore
return False
if PY3:
import builtins
else:
import __builtin__ as builtins
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
def _value_from_stopiteration(e):
try:
# StopIteration has a value attribute beginning in py33.
# So does our Return class.
return e.value
except AttributeError:
pass
try:
# Cython backports coroutine functionality by putting the value in
# e.args[0].
return e.args[0]
except (AttributeError, IndexError):
return None
def _create_future():
future = Future()
# Fixup asyncio debug info by removing extraneous stack entries
source_traceback = getattr(future, "_source_traceback", ())
while source_traceback:
# Each traceback entry is equivalent to a
# (filename, self.lineno, self.name, self.line) tuple
filename = source_traceback[-1][0]
if filename == __file__:
del source_traceback[-1]
else:
break
return future
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future_add_done_callback(future, stack_context.wrap(final_callback))
return wrapper
def coroutine(func, replace_callback=True):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
.. warning::
When exceptions occur inside a coroutine, the exception
information will be stored in the `.Future` object. You must
examine the result of the `.Future` object, or the exception
may go unnoticed by your code. This means yielding the function
if called from another coroutine, using something like
`.IOLoop.run_sync` for top-level calls, or passing the `.Future`
to `.IOLoop.add_future`.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
The two decorators differ in their treatment of the ``callback``
argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``.
"""
# On Python 3.5, set the coroutine flag on our generator, to allow it
# to be used with 'await'.
wrapped = func
if hasattr(types, 'coroutine'):
func = types.coroutine(func)
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
future = _create_future()
if replace_callback and 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = _value_from_stopiteration(e)
except Exception:
future_set_exc_info(future, sys.exc_info())
try:
return future
finally:
# Avoid circular references
future = None
else:
if isinstance(result, GeneratorType):
# Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to
# use "optional" coroutines in critical path code without
# performance penalty for the synchronous case.
try:
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = _create_future()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future_set_result_unless_cancelled(future, _value_from_stopiteration(e))
except Exception:
future_set_exc_info(future, sys.exc_info())
else:
# Provide strong references to Runner objects as long
# as their result future objects also have strong
# references (typically from the parent coroutine's
# Runner). This keeps the coroutine's Runner alive.
# We do this by exploiting the public API
# add_done_callback() instead of putting a private
# attribute on the Future.
# (Github issues #1769, #2229).
runner = Runner(result, future, yielded)
future.add_done_callback(lambda _: runner)
yielded = None
try:
return future
finally:
# Subtle memory optimization: if next() raised an exception,
# the future's exc_info contains a traceback which
# includes this stack frame. This creates a cycle,
# which will be collected at the next full GC but has
# been shown to greatly increase memory usage of
# benchmarks (relative to the refcount-based scheme
# used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it.
future = None
future_set_result_unless_cancelled(future, result)
return future
wrapper.__wrapped__ = wrapped
wrapper.__tornado_coroutine__ = True
return wrapper
def is_coroutine_function(func):
"""Return whether *func* is a coroutine function, i.e. a function
wrapped with `~.gen.coroutine`.
.. versionadded:: 4.5
"""
return getattr(func, '__tornado_coroutine__', False)
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
# Cython recognizes subclasses of StopIteration with a .args tuple.
self.args = (value,)
class WaitIterator(object):
"""Provides an iterator to yield the results of futures as they finish.
Yielding a set of futures like this:
``results = yield [future1, future2]``
pauses the coroutine until both ``future1`` and ``future2``
return, and then restarts the coroutine with the results of both
futures. If either future is an exception, the expression will
raise that exception and all the results will be lost.
If you need to get the result of each future as soon as possible,
or if you need the result of some futures even if others produce
errors, you can use ``WaitIterator``::
wait_iterator = gen.WaitIterator(future1, future2)
while not wait_iterator.done():
try:
result = yield wait_iterator.next()
except Exception as e:
print("Error {} from {}".format(e, wait_iterator.current_future))
else:
print("Result {} received from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
Because results are returned as soon as they are available the
output from the iterator *will not be in the same order as the
input arguments*. If you need to know which future produced the
current result, you can use the attributes
``WaitIterator.current_future``, or ``WaitIterator.current_index``
to get the index of the future from the input list. (if keyword
arguments were used in the construction of the `WaitIterator`,
``current_index`` will use the corresponding keyword).
On Python 3.5, `WaitIterator` implements the async iterator
protocol, so it can be used with the ``async for`` statement (note
that in this version the entire iteration is aborted if any value
raises an exception, while the previous example can continue past
individual errors)::
async for result in gen.WaitIterator(future1, future2):
print("Result {} received from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
.. versionadded:: 4.1
.. versionchanged:: 4.3
Added ``async for`` support in Python 3.5.
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise ValueError(
"You must provide args or kwargs, not both")
if kwargs:
self._unfinished = dict((f, k) for (k, f) in kwargs.items())
futures = list(kwargs.values())
else:
self._unfinished = dict((f, i) for (i, f) in enumerate(args))
futures = args
self._finished = collections.deque()
self.current_index = self.current_future = None
self._running_future = None
for future in futures:
future_add_done_callback(future, self._done_callback)
def done(self):
"""Returns True if this iterator has no more results."""
if self._finished or self._unfinished:
return False
# Clear the 'current' values when iteration is done.
self.current_index = self.current_future = None
return True
def next(self):
"""Returns a `.Future` that will yield the next available result.
Note that this `.Future` will not be the same object as any of
the inputs.
"""
self._running_future = Future()
if self._finished:
self._return_result(self._finished.popleft())
return self._running_future
def _done_callback(self, done):
if self._running_future and not self._running_future.done():
self._return_result(done)
else:
self._finished.append(done)
def _return_result(self, done):
"""Called set the returned future's state that of the future
we yielded, and set the current future for the iterator.
"""
chain_future(done, self._running_future)
self.current_future = done
self.current_index = self._unfinished.pop(done)
def __aiter__(self):
return self
def __anext__(self):
if self.done():
# Lookup by name to silence pyflakes on older versions.
raise getattr(builtins, 'StopAsyncIteration')()
return self.next()
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
def Task(func, *args, **kwargs):
"""Adapts a callback-based asynchronous function for use in coroutines.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
.. versionchanged:: 4.0
``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when
yielded.
"""
future = _create_future()
def handle_exception(typ, value, tb):
if future.done():
return False
future_set_exc_info(future, (typ, value, tb))
return True
def set_result(result):
if future.done():
return
future_set_result_unless_cancelled(future, result)
with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs)
return future
class YieldFuture(YieldPoint):
def __init__(self, future):
"""Adapts a `.Future` to the `YieldPoint` interface.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
self.future = future
self.io_loop = IOLoop.current()
def start(self, runner):
if not self.future.done():
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result_fn = self.future.result
def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key)
else:
return True
def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result()
else:
return self.result_fn()
def _contains_yieldpoint(children):
"""Returns True if ``children`` contains any YieldPoints.
``children`` may be a dict or a list, as used by `MultiYieldPoint`
and `multi_future`.
"""
if isinstance(children, dict):
return any(isinstance(i, YieldPoint) for i in children.values())
if isinstance(children, list):
return any(isinstance(i, YieldPoint) for i in children)
return False
def multi(children, quiet_exceptions=()):
"""Runs multiple asynchronous operations in parallel.
``children`` may either be a list or a dict whose values are
yieldable objects. ``multi()`` returns a new yieldable
object that resolves to a parallel structure containing their
results. If ``children`` is a list, the result is a list of
results in the same order; if it is a dict, the result is a dict
with the same keys.
That is, ``results = yield multi(list_of_futures)`` is equivalent
to::
results = []
for future in list_of_futures:
results.append(yield future)
If any children raise exceptions, ``multi()`` will raise the first
one. All others will be logged, unless they are of types
contained in the ``quiet_exceptions`` argument.
If any of the inputs are `YieldPoints <YieldPoint>`, the returned
yieldable object is a `YieldPoint`. Otherwise, returns a `.Future`.
This means that the result of `multi` can be used in a native
coroutine if and only if all of its children can be.
In a ``yield``-based coroutine, it is not normally necessary to
call this function directly, since the coroutine runner will
do it automatically when a list or dict is yielded. However,
it is necessary in ``await``-based coroutines, or to pass
the ``quiet_exceptions`` argument.
This function is available under the names ``multi()`` and ``Multi()``
for historical reasons.
Cancelling a `.Future` returned by ``multi()`` does not cancel its
children. `asyncio.gather` is similar to ``multi()``, but it does
cancel its children.
.. versionchanged:: 4.2
If multiple yieldables fail, any exceptions after the first
(which is raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. versionchanged:: 4.3
Replaced the class ``Multi`` and the function ``multi_future``
with a unified function ``multi``. Added support for yieldables
other than `YieldPoint` and `.Future`.
"""
if _contains_yieldpoint(children):
return MultiYieldPoint(children, quiet_exceptions=quiet_exceptions)
else:
return multi_future(children, quiet_exceptions=quiet_exceptions)
Multi = multi
class MultiYieldPoint(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
This class is similar to `multi`, but it always creates a stack
context even when no children require it. It is not compatible with
native coroutines.
.. versionchanged:: 4.2
If multiple ``YieldPoints`` fail, any exceptions after the first
(which is raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. versionchanged:: 4.3
Renamed from ``Multi`` to ``MultiYieldPoint``. The name ``Multi``
remains as an alias for the equivalent `multi` function.
.. deprecated:: 4.3
Use `multi` instead.
"""
def __init__(self, children, quiet_exceptions=()):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if not isinstance(i, YieldPoint):
i = convert_yielded(i)
if is_future(i):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
self.quiet_exceptions = quiet_exceptions
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result_list = []
exc_info = None
for f in self.children:
try:
result_list.append(f.get_result())
except Exception as e:
if exc_info is None:
exc_info = sys.exc_info()
else:
if not isinstance(e, self.quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
if exc_info is not None:
raise_exc_info(exc_info)
if self.keys is not None:
return dict(zip(self.keys, result_list))
else:
return list(result_list)
def multi_future(children, quiet_exceptions=()):
"""Wait for multiple asynchronous futures in parallel.
This function is similar to `multi`, but does not support
`YieldPoints <YieldPoint>`.
.. versionadded:: 4.0
.. versionchanged:: 4.2
If multiple ``Futures`` fail, any exceptions after the first (which is
raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. deprecated:: 4.3
Use `multi` instead.
"""
if isinstance(children, dict):
keys = list(children.keys())
children = children.values()
else:
keys = None
children = list(map(convert_yielded, children))
assert all(is_future(i) or isinstance(i, _NullFuture) for i in children)
unfinished_children = set(children)
future = _create_future()
if not children:
future_set_result_unless_cancelled(future,
{} if keys is not None else [])
def callback(f):
unfinished_children.remove(f)
if not unfinished_children:
result_list = []
for f in children:
try:
result_list.append(f.result())
except Exception as e:
if future.done():
if not isinstance(e, quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
else:
future_set_exc_info(future, sys.exc_info())
if not future.done():
if keys is not None:
future_set_result_unless_cancelled(future,
dict(zip(keys, result_list)))
else:
future_set_result_unless_cancelled(future, result_list)
listening = set()
for f in children:
if f not in listening:
listening.add(f)
future_add_done_callback(f, callback)
return future
def maybe_future(x):
"""Converts ``x`` into a `.Future`.
If ``x`` is already a `.Future`, it is simply returned; otherwise
it is wrapped in a new `.Future`. This is suitable for use as
``result = yield gen.maybe_future(f())`` when you don't know whether
``f()`` returns a `.Future` or not.
.. deprecated:: 4.3
This function only handles ``Futures``, not other yieldable objects.
Instead of `maybe_future`, check for the non-future result types
you expect (often just ``None``), and ``yield`` anything unknown.
"""
if is_future(x):
return x
else:
fut = _create_future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, quiet_exceptions=()):
"""Wraps a `.Future` (or other yieldable object) in a timeout.
Raises `tornado.util.TimeoutError` if the input future does not
complete before ``timeout``, which may be specified in any form
allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or
an absolute time relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
Does not support `YieldPoint` subclasses.
The wrapped `.Future` is not canceled when the timeout expires,
permitting it to be reused. `asyncio.wait_for` is similar to this
function but it does cancel the wrapped `.Future` on timeout.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
.. versionchanged:: 4.4
Added support for yieldable objects other than `.Future`.
"""
# TODO: allow YieldPoints in addition to other yieldables?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
future = convert_yielded(future)
result = _create_future()
chain_future(future, result)
io_loop = IOLoop.current()
def error_callback(future):
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error("Exception in Future %r after timeout",
future, exc_info=True)
def timeout_callback():
if not result.done():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future_add_done_callback(future, error_callback)
timeout_handle = io_loop.add_timeout(
timeout, timeout_callback)
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future_add_done_callback(
future, lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
def sleep(duration):
"""Return a `.Future` that resolves after the given number of seconds.
When used with ``yield`` in a coroutine, this is a non-blocking
analogue to `time.sleep` (which should not be used in coroutines
because it is blocking)::
yield gen.sleep(0.5)
Note that calling this function on its own does nothing; you must
wait on the `.Future` it returns (usually by yielding it).
.. versionadded:: 4.1
"""
f = _create_future()
IOLoop.current().call_later(duration,
lambda: future_set_result_unless_cancelled(f, None))
return f
class _NullFuture(object):
"""_NullFuture resembles a Future that finished with a result of None.
It's not actually a `Future` to avoid depending on a particular event loop.
Handled as a special case in the coroutine runner.
"""
def result(self):
return None
def done(self):
return True
# _null_future is used as a dummy value in the coroutine runner. It differs
# from moment in that moment always adds a delay of one IOLoop iteration
# while _null_future is processed as soon as possible.
_null_future = _NullFuture()
moment = _NullFuture()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
.. versionadded:: 4.0
.. deprecated:: 4.5
``yield None`` (or ``yield`` with no argument) is now equivalent to
``yield gen.moment``.
"""
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.Future`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
self.result_future = result_future
self.future = _null_future
self.yield_point = None
self.pending_callbacks = None
self.results = None
self.running = False
self.finished = False
self.had_exception = False
self.io_loop = IOLoop.current()
# For efficiency, we do not create a stack context until we
# reach a YieldPoint (stack contexts are required for the historical
# semantics of YieldPoints, but not for Futures). When we have
# done so, this field will be set and must be called at the end
# of the coroutine.
self.stack_context_deactivate = None
if self.handle_yield(first_yielded):
gen = result_future = first_yielded = None
self.run()
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if self.pending_callbacks is None:
# Lazily initialize the old-style YieldPoint data structures.
self.pending_callbacks = set()
self.results = {}
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
future_set_result_unless_cancelled(self.future,
self.yield_point.get_result())
except:
future_set_exc_info(self.future, sys.exc_info())
self.yield_point = None
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if not future.done():
return
self.future = None
try:
orig_stack_contexts = stack_context._state.contexts
exc_info = None
try:
value = future.result()
except Exception:
self.had_exception = True
exc_info = sys.exc_info()
future = None
if exc_info is not None:
try:
yielded = self.gen.throw(*exc_info)
finally:
# Break up a reference to itself
# for faster GC on CPython.
exc_info = None
else:
yielded = self.gen.send(value)
if stack_context._state.contexts is not orig_stack_contexts:
self.gen.throw(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
future_set_result_unless_cancelled(self.result_future,
_value_from_stopiteration(e))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
future_set_exc_info(self.result_future, sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
if not self.handle_yield(yielded):
return
yielded = None
finally:
self.running = False
def handle_yield(self, yielded):
# Lists containing YieldPoints require stack contexts;
# other lists are handled in convert_yielded.
if _contains_yieldpoint(yielded):
yielded = multi(yielded)
if isinstance(yielded, YieldPoint):
# YieldPoints are too closely coupled to the Runner to go
# through the generic convert_yielded mechanism.
self.future = Future()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
future_set_result_unless_cancelled(self.future, yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = Future()
future_set_exc_info(self.future, sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
else:
try:
self.future = convert_yielded(yielded)
except BadYieldError:
self.future = Future()
future_set_exc_info(self.future, sys.exc_info())
if self.future is moment:
self.io_loop.add_callback(self.run)
return False
elif not self.future.done():
def inner(f):
# Break a reference cycle to speed GC.
f = None # noqa
self.run()
self.io_loop.add_future(
self.future, inner)
return False
return True
def result_callback(self, key):
return stack_context.wrap(_argument_adapter(
functools.partial(self.set_result, key)))
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = Future()
future_set_exc_info(self.future, (typ, value, tb))
self.run()
return True
else:
return False
def _deactivate_stack_context(self):
if self.stack_context_deactivate is not None:
self.stack_context_deactivate()
self.stack_context_deactivate = None
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
def _argument_adapter(callback):
"""Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
"""
def wrapper(*args, **kwargs):
if kwargs or len(args) > 1:
callback(Arguments(args, kwargs))
elif args:
callback(args[0])
else:
callback(None)
return wrapper
# Convert Awaitables into Futures.
try:
import asyncio
except ImportError:
# Py2-compatible version for use with Cython.
# Copied from PEP 380.
@coroutine
def _wrap_awaitable(x):
if hasattr(x, '__await__'):
_i = x.__await__()
else:
_i = iter(x)
try:
_y = next(_i)
except StopIteration as _e:
_r = _value_from_stopiteration(_e)
else:
while 1:
try:
_s = yield _y
except GeneratorExit as _e:
try:
_m = _i.close
except AttributeError:
pass
else:
_m()
raise _e
except BaseException as _e:
_x = sys.exc_info()
try:
_m = _i.throw
except AttributeError:
raise _e
else:
try:
_y = _m(*_x)
except StopIteration as _e:
_r = _value_from_stopiteration(_e)
break
else:
try:
if _s is None:
_y = next(_i)
else:
_y = _i.send(_s)
except StopIteration as _e:
_r = _value_from_stopiteration(_e)
break
raise Return(_r)
else:
try:
_wrap_awaitable = asyncio.ensure_future
except AttributeError:
# asyncio.ensure_future was introduced in Python 3.4.4, but
# Debian jessie still ships with 3.4.2 so try the old name.
_wrap_awaitable = getattr(asyncio, 'async')
def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled earlier.
if yielded is None or yielded is moment:
return moment
elif yielded is _null_future:
return _null_future
elif isinstance(yielded, (list, dict)):
return multi(yielded)
elif is_future(yielded):
return yielded
elif isawaitable(yielded):
return _wrap_awaitable(yielded)
else:
raise BadYieldError("yielded unknown object %r" % (yielded,))
if singledispatch is not None:
convert_yielded = singledispatch(convert_yielded)
|
gpl-3.0
| 5,848,268,275,463,006,000
| 34.91973
| 93
| 0.60454
| false
| 4.525187
| false
| false
| false
|
parroyo/python_menu
|
python_menu/menu.py
|
1
|
5783
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from dialog import Dialog, _common_args_syntax
import sys
import os
import inspect
MENU_KEY_TYPE = 'type'
MENU_KEY_COMMON = 'common'
MENU_KEY_ACTION = 'action'
MENU_KEY_GO = 'go '
MENU_KEY_BACK = 'back'
MENU_KEY_EXIT = 'exit'
MENU_KEY_CLEAR = 'clear'
MENU_KEY_CHOICES = 'choices'
class UndefinedScreen(Exception):
""" Screen is not present in the model
"""
def __init__(self, screen_name):
super(UndefinedScreen, self).__init__(
"Screen '{0}' is not present in the model".format(screen_name))
class InvalidMethod(Exception):
""" Invalid Method name
"""
def __init__(self, method_name):
super(InvalidMethod, self).__init__(
"Invalid Method name '{0}'".format(method_name))
class Menu(object):
""" Class Menu
"""
def __init__(self, menu_data, debug=False):
self._screens = []
self._screen_values = {}
self._dialog = Dialog()
self._dialog_methods = dict(inspect.getmembers(self._dialog))
self._custom_methods = dict(inspect.getmembers(self))
self._common_args = list(_common_args_syntax.keys())
self._debug_enable = debug
if sys.version_info.major == 2:
self.debug = self.debug_python2
self._menu = menu_data
self._load_common()
def show(self, screen_name):
""" Show the screen
Args:
screen_name(string): name of the screen to show
Raises:
UndefinedScreen
InvalidMethod
"""
self._screens.append(screen_name)
while (self._screens != []):
self._show_current_screen()
def debug_python2(self, msg):
if self._debug_enable:
raw_input(msg)
def debug(self, msg):
if self._debug_enable:
input(msg)
def clear(self):
""" Clear the screen
"""
os.system('cls' if os.name == 'nt' else 'clear')
def get_value(self, screen_name):
""" Get the value stored by the screen
Args:
screen_name(string): name of the screen to get the value
"""
value = None
if screen_name in self._screen_values:
value = self._screen_values[screen_name]
return value
def _load_common(self):
self._common = {}
for item in self._menu[MENU_KEY_COMMON]:
self._common[item] = self._menu[MENU_KEY_COMMON][item]
def _show_current_screen(self):
current_screen = self._screens[-1]
(dialog_exit, dialog_value) = self._show_dialog(current_screen)
self._screen_values[current_screen] = dialog_value
self._do_actions(current_screen, dialog_exit, dialog_value)
def _show_dialog(self, item):
try:
dialog_type = self._menu[item][MENU_KEY_TYPE]
except KeyError as e:
raise UndefinedScreen(str(e))
if dialog_type in self._dialog_methods:
screen = self._dialog_methods[dialog_type]
(allowed_args, varargs, keywords, locals) = inspect.getargspec(screen)
args = self._common.copy()
screen_args = dict([(i, self._menu[item][i]) for i in self._menu[item] if i in allowed_args or i in self._common_args])
args.update(screen_args)
self.debug("args: %s" % args)
dialog_exit = self._dialog_methods[dialog_type](**args)
dialog_value = [None]
if type(dialog_exit) is tuple:
dialog_exit, dialog_value = dialog_exit[0], dialog_exit[1:]
return (dialog_exit, dialog_value)
def _do_actions(self, item, dialog_exit, dialog_value):
""" Do actions
"""
action = self._menu[item].get(MENU_KEY_ACTION, {}).get(dialog_exit)
if action is None:
return
if type(action) is dict:
action = action.get(dialog_value[0])
if type(action) is str:
self._do_action(action)
if type(action) is list:
for action_item in action:
self._do_action(action_item)
def _do_action(self, action):
""" Do action
"""
if MENU_KEY_EXIT in action:
self._screens = []
elif MENU_KEY_GO in action:
new_screen = action.split(' ')[1]
if new_screen == MENU_KEY_BACK:
self._screens.pop()
else:
self._screens.append(new_screen)
else:
# Custom method
self._call_custom_method(action)
def _call_custom_method(self, action):
""" Call custom method
"""
method_name = action
parameters = {}
if type(action) is list:
if len(action) > 0:
method_name = action[0]
if len(action) > 1:
parameters = action[1]
if method_name in self._custom_methods:
self._custom_methods[method_name](**parameters)
else:
raise InvalidMethod(action)
|
lgpl-3.0
| -3,854,620,723,446,406,000
| 30.950276
| 131
| 0.583434
| false
| 3.915369
| false
| false
| false
|
jserver/pagila
|
pagila/settings.py
|
1
|
2178
|
"""
Django settings for pagila project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['PAGILA_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'films',
'store',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'pagila.urls'
WSGI_APPLICATION = 'pagila.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'pagila',
'USER': os.environ['PAGILA_DB_USER'],
'PASSWORD': os.environ['PAGILA_DB_PASS'],
'HOST': os.environ['PAGILA_DB'],
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Media
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
bsd-3-clause
| -8,704,175,519,857,630,000
| 21.926316
| 71
| 0.705234
| false
| 3.275188
| false
| false
| false
|
adamnew123456/jobmon
|
jobmon/ticker.py
|
1
|
3108
|
"""
A tickers are responsible for calling into the supervisor periodically, and
getting it to handle restarts.
"""
import logging
import os
import select
import threading
import time
from jobmon import util
LOGGER = logging.getLogger('jobmon.ticker')
class Ticker(threading.Thread, util.TerminableThreadMixin):
"""
A ticker is responsible for keeping track of a bunch of timeouts (each of
which is associated with a key), and then calling a function with
that key when the timeout expires.
"""
def __init__(self, callback):
threading.Thread.__init__(self)
util.TerminableThreadMixin.__init__(self)
# This is used to force ticks when new events are registered
reader, writer = os.pipe()
self.tick_reader = os.fdopen(reader, 'rb')
self.tick_writer = os.fdopen(writer, 'wb')
self.timeout_lock = threading.Lock()
self.timeouts = {}
self.callback = callback
def __contains__(self, key):
return key in self.timeouts
def register(self, key, abstime):
"""
Registers a new timeout, to be run at the given absolute time.
"""
LOGGER.info('Registering %s at %d', key, abstime)
with self.timeout_lock:
self.timeouts[key] = abstime
self.tick_writer.write(b' ')
self.tick_writer.flush()
def unregister(self, key):
"""
Removes a timeout from the ticker, if it already exists.
"""
LOGGER.info('Removing %s', key)
with self.timeout_lock:
if key in self.timeouts:
del self.timeouts[key]
def run_timeouts(self):
"""
Runs all the expired timeouts.
"""
expired = []
now = time.time()
with self.timeout_lock:
for key, timeout in self.timeouts.items():
if timeout <= now:
expired.append(key)
for key in expired:
LOGGER.info('Running callback on %s', key)
self.callback(key)
self.unregister(key)
@util.log_crashes(LOGGER, 'Error in ticker')
def run(self):
"""
Runs the timeout loop, calling the timeout function when appropriate.
"""
while True:
try:
min_wait_time = min(self.timeouts.values()) - time.time()
if min_wait_time < 0:
min_wait_time = 0
except ValueError:
min_wait_time = None
readers, _, _ = select.select(
[self.tick_reader, self.exit_reader], [], [],
min_wait_time)
self.run_timeouts()
if self.exit_reader in readers:
break
if self.tick_reader in readers:
# Flush the pipe, since we don't want it to get backed up
LOGGER.info('Woken up by registration')
self.tick_reader.read(1)
LOGGER.info('Closing...')
self.cleanup()
self.tick_reader.close()
self.tick_writer.close()
|
bsd-2-clause
| 7,884,936,955,516,481,000
| 27.777778
| 78
| 0.559524
| false
| 4.316667
| false
| false
| false
|
lindemann09/pyForceDAQ
|
forceDAQ/data_handling/convert.py
|
1
|
8601
|
#!/usr/bin/env python
"""
Functions to convert force data
This module can be also executed.
"""
__author__ = 'Oliver Lindemann'
import os
import sys
import gzip
import numpy as np
from .read_force_data import read_raw_data, data_frame_to_text
PAUSE_CRITERION = 500
MSEC_PER_SAMPLES = 1
REF_SAMPLE_PROBE = 1000
MIN_DELAY_ENDSTREAM = 2
CONVERTED_SUFFIX = ".conv.csv.gz"
CONVERTED_SUBFOLDER = "converted"
def _periods_from_daq_events(daq_events):
periods = {}
started = None
sensor_id = None
evt = np.array(daq_events["value"])
times = np.array(daq_events["time"]).astype(int)
idx = np.argsort(times)
for t, v in zip(times[idx], evt[idx]):
try:
sensor_id = int(v.split(":")[1])
except:
sensor_id = None
if sensor_id not in periods:
periods[sensor_id] = []
if v.startswith("started"):
if started is None:
started = t
else:
periods[sensor_id].append((started, None))
started = None
elif v.startswith("pause"):
periods[sensor_id].append((started, t))
started = None
# sort remaining
if started is not None:
periods[sensor_id].append((started, None))
return periods
def _pauses_idx_from_timeline(time, pause_criterion):
pauses_idx = np.where(np.diff(time) > pause_criterion)[0]
last_pause = -1
rtn = []
for idx in np.append(pauses_idx, len(time)-1):
rtn.append((last_pause+1, idx))
last_pause = idx
return rtn
def _most_frequent_value(values):
(v, cnt) = np.unique(values, return_counts=True)
idx = np.argmax(cnt)
return v[idx]
def print_histogram(values):
(v, cnt) = np.unique(values, return_counts=True)
for a,b in zip(v,cnt):
print("{} -- {}".format(a,b))
def _end_stream_sample(timestamps, min_delay=MIN_DELAY_ENDSTREAM):
"""finds end of the data stream, that is, sample before next long waiting
sample or returns None if no end can be detected"""
next_t_diffs = np.diff(timestamps)
try:
return np.where(next_t_diffs >= min_delay)[0][0] #+1-1
except:
return None
def _linear_timeline_matched_by_single_reference_sample(irregular_timeline,
id_ref_sample, msec_per_sample):
"""match timeline that differences between the two is minimal
new times can not be after irregular times
"""
t_ref = irregular_timeline[id_ref_sample]
t_first = t_ref - (id_ref_sample*msec_per_sample)
t_last = t_first + ((len(irregular_timeline) - 1) * msec_per_sample)
return np.arange(t_first, t_last + msec_per_sample, step=msec_per_sample)
def _timeline_matched_by_delay_chunked_samples(times, msec_per_sample):
rtn = np.empty(len(times))*np.NaN
p = 0
while p<len(times):
next_ref_sample = _end_stream_sample(times[p:])
if next_ref_sample is not None:
ref_time = times[p+next_ref_sample]
rtn[p:(p+next_ref_sample+1)] = np.arange(
start = ref_time - (next_ref_sample*msec_per_sample),
stop = ref_time + msec_per_sample,
step = msec_per_sample)
p = p + next_ref_sample + 1
else:
# no further refence samples
rtn[p:] = times[p:]
break
return rtn
class Method(object):
types = {1: "single reference sample (forced linearity)",
2: "multiple delayed chunked samples (no linearity assumed)"}
def __init__(self, id):
if id not in Method.types:
raise RuntimeError("Unkown resampling method")
self.id = id
@property
def description(self):
return Method.types[self.id]
@staticmethod
def get_method_from_description(description):
for id, desc in Method.types.items():
if desc == description:
return Method(id)
return None
def _adjusted_timestamps(timestamps, pauses_idx, evt_periods, method):
"""
method=Method(1): _linear_timeline_matched_by_single_reference_sample
method=Method(2): _timeline_matched_by_delay_chunked_samples
"""
# adapting timestamps
rtn = np.empty(len(timestamps))*np.NaN
period_counter = 0
for idx, evt_per in zip(pauses_idx, evt_periods):
# loop over periods
# logging
period_counter += 1
n_samples = idx[1] - idx[0] + 1
if evt_per[1]: # end time
sample_diff = n_samples - (1+(evt_per[1]-evt_per[0])//MSEC_PER_SAMPLES)
if sample_diff!=0:
print("Period {}: Sample difference of {}".format(
period_counter, sample_diff))
else:
print("Period {}: No pause sampling time.".format(period_counter))
#convert times
times = timestamps[idx[0]:idx[1] + 1]
if method.id==1:
# match refe samples
next_ref = _end_stream_sample(times[REF_SAMPLE_PROBE:(REF_SAMPLE_PROBE + 1000)])
if next_ref is None:
next_ref = 0
newtimes = _linear_timeline_matched_by_single_reference_sample(
times, id_ref_sample=REF_SAMPLE_PROBE + next_ref,
msec_per_sample=MSEC_PER_SAMPLES)
elif method.id==2:
# using delays
newtimes = _timeline_matched_by_delay_chunked_samples(times,
msec_per_sample=MSEC_PER_SAMPLES)
else:
newtimes = times
rtn[idx[0]:idx[1] + 1] = newtimes
return rtn.astype(int)
def converted_filename(flname):
"""returns path and filename of the converted data file"""
if flname.endswith(".gz"):
tmp = flname[:-7]
else:
tmp = flname[:-4]
path, new_filename = os.path.split(tmp)
converted_path = os.path.join(path, CONVERTED_SUBFOLDER)
return converted_path, new_filename + CONVERTED_SUFFIX
def convert_raw_data(filepath, method, save_time_adjustments=False,
keep_delay_variable=False):
"""preprocessing raw pyForceData:
"""
# todo only one sensor
assert(isinstance(method, Method))
filepath = os.path.join(os.path.split(sys.argv[0])[0], filepath)
print("Converting {}".format(filepath))
print("Method: {}".format(method.description))
data, udp_event, daq_events, comments = read_raw_data(filepath)
print("{} samples".format(len(data["time"])))
sensor_id = 1
if not keep_delay_variable:
data.pop("delay", None)
timestamps = np.array(data["time"]).astype(int)
#pauses
pauses_idx = _pauses_idx_from_timeline(timestamps, pause_criterion=PAUSE_CRITERION)
evt_periods = _periods_from_daq_events(daq_events)
if len(pauses_idx) != len(evt_periods[sensor_id]):
raise RuntimeError("Pauses in DAQ events do not match recording pauses")
else:
data["time"] = _adjusted_timestamps(timestamps=timestamps,
pauses_idx=pauses_idx,
evt_periods=evt_periods[
sensor_id],
method=method)
if save_time_adjustments:
data["time_adjustment"] = timestamps-data["time"]
#print("Time difference historgram")
#print_histogram(data["time_adjustment"])
#save
folder, new_filename = converted_filename(filepath)
try:
os.makedirs(folder)
except:
pass
new_filename = os.path.join(folder, new_filename)
with gzip.open(new_filename, "wt") as fl:
fl.write(comments.strip() + "\n")
fl.write(data_frame_to_text(data))
def get_all_data_files(folder):
rtn = []
for flname in os.listdir(folder):
if (flname.endswith(".csv") or flname.endswith(".csv.gz")) and not \
flname.endswith(CONVERTED_SUFFIX):
flname = os.path.join(folder, flname)
rtn.append(flname)
return rtn
def get_all_unconverted_data_files(folder):
rtn = []
files = get_all_data_files(folder)
try: # make subfolder
c_path, _ = converted_filename(files[0])
converted_files = os.listdir(c_path)
except:
converted_files = []
for flname in files:
_, c_flname = converted_filename(flname)
if c_flname not in converted_files:
rtn.append(flname)
return rtn
|
mit
| -2,560,907,721,749,583,000
| 30.276364
| 99
| 0.58063
| false
| 3.636786
| false
| false
| false
|
Donkyhotay/MoonPy
|
twisted/protocols/sip.py
|
1
|
41973
|
# -*- test-case-name: twisted.test.test_sip -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Session Initialization Protocol.
Documented in RFC 2543.
[Superceded by 3261]
This module contains a deprecated implementation of HTTP Digest authentication.
See L{twisted.cred.credentials} and L{twisted.cred._digest} for its new home.
"""
# system imports
import socket, time, sys, random, warnings
from zope.interface import implements, Interface
# twisted imports
from twisted.python import log, util
from twisted.python.deprecate import deprecated
from twisted.python.versions import Version
from twisted.python.hashlib import md5
from twisted.internet import protocol, defer, reactor
from twisted import cred
import twisted.cred.error
from twisted.cred.credentials import UsernameHashedPassword, UsernamePassword
# sibling imports
from twisted.protocols import basic
PORT = 5060
# SIP headers have short forms
shortHeaders = {"call-id": "i",
"contact": "m",
"content-encoding": "e",
"content-length": "l",
"content-type": "c",
"from": "f",
"subject": "s",
"to": "t",
"via": "v",
}
longHeaders = {}
for k, v in shortHeaders.items():
longHeaders[v] = k
del k, v
statusCodes = {
100: "Trying",
180: "Ringing",
181: "Call Is Being Forwarded",
182: "Queued",
183: "Session Progress",
200: "OK",
300: "Multiple Choices",
301: "Moved Permanently",
302: "Moved Temporarily",
303: "See Other",
305: "Use Proxy",
380: "Alternative Service",
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
406: "Not Acceptable",
407: "Proxy Authentication Required",
408: "Request Timeout",
409: "Conflict", # Not in RFC3261
410: "Gone",
411: "Length Required", # Not in RFC3261
413: "Request Entity Too Large",
414: "Request-URI Too Large",
415: "Unsupported Media Type",
416: "Unsupported URI Scheme",
420: "Bad Extension",
421: "Extension Required",
423: "Interval Too Brief",
480: "Temporarily Unavailable",
481: "Call/Transaction Does Not Exist",
482: "Loop Detected",
483: "Too Many Hops",
484: "Address Incomplete",
485: "Ambiguous",
486: "Busy Here",
487: "Request Terminated",
488: "Not Acceptable Here",
491: "Request Pending",
493: "Undecipherable",
500: "Internal Server Error",
501: "Not Implemented",
502: "Bad Gateway", # no donut
503: "Service Unavailable",
504: "Server Time-out",
505: "SIP Version not supported",
513: "Message Too Large",
600: "Busy Everywhere",
603: "Decline",
604: "Does not exist anywhere",
606: "Not Acceptable",
}
specialCases = {
'cseq': 'CSeq',
'call-id': 'Call-ID',
'www-authenticate': 'WWW-Authenticate',
}
def dashCapitalize(s):
''' Capitalize a string, making sure to treat - as a word seperator '''
return '-'.join([ x.capitalize() for x in s.split('-')])
def unq(s):
if s[0] == s[-1] == '"':
return s[1:-1]
return s
def DigestCalcHA1(
pszAlg,
pszUserName,
pszRealm,
pszPassword,
pszNonce,
pszCNonce,
):
m = md5()
m.update(pszUserName)
m.update(":")
m.update(pszRealm)
m.update(":")
m.update(pszPassword)
HA1 = m.digest()
if pszAlg == "md5-sess":
m = md5()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
m.update(pszCNonce)
HA1 = m.digest()
return HA1.encode('hex')
DigestCalcHA1 = deprecated(Version("Twisted", 9, 0, 0))(DigestCalcHA1)
def DigestCalcResponse(
HA1,
pszNonce,
pszNonceCount,
pszCNonce,
pszQop,
pszMethod,
pszDigestUri,
pszHEntity,
):
m = md5()
m.update(pszMethod)
m.update(":")
m.update(pszDigestUri)
if pszQop == "auth-int":
m.update(":")
m.update(pszHEntity)
HA2 = m.digest().encode('hex')
m = md5()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
if pszNonceCount and pszCNonce: # pszQop:
m.update(pszNonceCount)
m.update(":")
m.update(pszCNonce)
m.update(":")
m.update(pszQop)
m.update(":")
m.update(HA2)
hash = m.digest().encode('hex')
return hash
DigestCalcResponse = deprecated(Version("Twisted", 9, 0, 0))(DigestCalcResponse)
_absent = object()
class Via(object):
"""
A L{Via} is a SIP Via header, representing a segment of the path taken by
the request.
See RFC 3261, sections 8.1.1.7, 18.2.2, and 20.42.
@ivar transport: Network protocol used for this leg. (Probably either "TCP"
or "UDP".)
@type transport: C{str}
@ivar branch: Unique identifier for this request.
@type branch: C{str}
@ivar host: Hostname or IP for this leg.
@type host: C{str}
@ivar port: Port used for this leg.
@type port C{int}, or None.
@ivar rportRequested: Whether to request RFC 3581 client processing or not.
@type rportRequested: C{bool}
@ivar rportValue: Servers wishing to honor requests for RFC 3581 processing
should set this parameter to the source port the request was received
from.
@type rportValue: C{int}, or None.
@ivar ttl: Time-to-live for requests on multicast paths.
@type ttl: C{int}, or None.
@ivar maddr: The destination multicast address, if any.
@type maddr: C{str}, or None.
@ivar hidden: Obsolete in SIP 2.0.
@type hidden: C{bool}
@ivar otherParams: Any other parameters in the header.
@type otherParams: C{dict}
"""
def __init__(self, host, port=PORT, transport="UDP", ttl=None,
hidden=False, received=None, rport=_absent, branch=None,
maddr=None, **kw):
"""
Set parameters of this Via header. All arguments correspond to
attributes of the same name.
To maintain compatibility with old SIP
code, the 'rport' argument is used to determine the values of
C{rportRequested} and C{rportValue}. If None, C{rportRequested} is set
to True. (The deprecated method for doing this is to pass True.) If an
integer, C{rportValue} is set to the given value.
Any arguments not explicitly named here are collected into the
C{otherParams} dict.
"""
self.transport = transport
self.host = host
self.port = port
self.ttl = ttl
self.hidden = hidden
self.received = received
if rport is True:
warnings.warn(
"rport=True is deprecated since Twisted 9.0.",
DeprecationWarning,
stacklevel=2)
self.rportValue = None
self.rportRequested = True
elif rport is None:
self.rportValue = None
self.rportRequested = True
elif rport is _absent:
self.rportValue = None
self.rportRequested = False
else:
self.rportValue = rport
self.rportRequested = False
self.branch = branch
self.maddr = maddr
self.otherParams = kw
def _getrport(self):
"""
Returns the rport value expected by the old SIP code.
"""
if self.rportRequested == True:
return True
elif self.rportValue is not None:
return self.rportValue
else:
return None
def _setrport(self, newRPort):
"""
L{Base._fixupNAT} sets C{rport} directly, so this method sets
C{rportValue} based on that.
@param newRPort: The new rport value.
@type newRPort: C{int}
"""
self.rportValue = newRPort
self.rportRequested = False
rport = property(_getrport, _setrport)
def toString(self):
"""
Serialize this header for use in a request or response.
"""
s = "SIP/2.0/%s %s:%s" % (self.transport, self.host, self.port)
if self.hidden:
s += ";hidden"
for n in "ttl", "branch", "maddr", "received":
value = getattr(self, n)
if value is not None:
s += ";%s=%s" % (n, value)
if self.rportRequested:
s += ";rport"
elif self.rportValue is not None:
s += ";rport=%s" % (self.rport,)
etc = self.otherParams.items()
etc.sort()
for k, v in etc:
if v is None:
s += ";" + k
else:
s += ";%s=%s" % (k, v)
return s
def parseViaHeader(value):
"""
Parse a Via header.
@return: The parsed version of this header.
@rtype: L{Via}
"""
parts = value.split(";")
sent, params = parts[0], parts[1:]
protocolinfo, by = sent.split(" ", 1)
by = by.strip()
result = {}
pname, pversion, transport = protocolinfo.split("/")
if pname != "SIP" or pversion != "2.0":
raise ValueError, "wrong protocol or version: %r" % value
result["transport"] = transport
if ":" in by:
host, port = by.split(":")
result["port"] = int(port)
result["host"] = host
else:
result["host"] = by
for p in params:
# it's the comment-striping dance!
p = p.strip().split(" ", 1)
if len(p) == 1:
p, comment = p[0], ""
else:
p, comment = p
if p == "hidden":
result["hidden"] = True
continue
parts = p.split("=", 1)
if len(parts) == 1:
name, value = parts[0], None
else:
name, value = parts
if name in ("rport", "ttl"):
value = int(value)
result[name] = value
return Via(**result)
class URL:
"""A SIP URL."""
def __init__(self, host, username=None, password=None, port=None,
transport=None, usertype=None, method=None,
ttl=None, maddr=None, tag=None, other=None, headers=None):
self.username = username
self.host = host
self.password = password
self.port = port
self.transport = transport
self.usertype = usertype
self.method = method
self.tag = tag
self.ttl = ttl
self.maddr = maddr
if other == None:
self.other = []
else:
self.other = other
if headers == None:
self.headers = {}
else:
self.headers = headers
def toString(self):
l = []; w = l.append
w("sip:")
if self.username != None:
w(self.username)
if self.password != None:
w(":%s" % self.password)
w("@")
w(self.host)
if self.port != None:
w(":%d" % self.port)
if self.usertype != None:
w(";user=%s" % self.usertype)
for n in ("transport", "ttl", "maddr", "method", "tag"):
v = getattr(self, n)
if v != None:
w(";%s=%s" % (n, v))
for v in self.other:
w(";%s" % v)
if self.headers:
w("?")
w("&".join([("%s=%s" % (specialCases.get(h) or dashCapitalize(h), v)) for (h, v) in self.headers.items()]))
return "".join(l)
def __str__(self):
return self.toString()
def __repr__(self):
return '<URL %s:%s@%s:%r/%s>' % (self.username, self.password, self.host, self.port, self.transport)
def parseURL(url, host=None, port=None):
"""Return string into URL object.
URIs are of of form 'sip:user@example.com'.
"""
d = {}
if not url.startswith("sip:"):
raise ValueError("unsupported scheme: " + url[:4])
parts = url[4:].split(";")
userdomain, params = parts[0], parts[1:]
udparts = userdomain.split("@", 1)
if len(udparts) == 2:
userpass, hostport = udparts
upparts = userpass.split(":", 1)
if len(upparts) == 1:
d["username"] = upparts[0]
else:
d["username"] = upparts[0]
d["password"] = upparts[1]
else:
hostport = udparts[0]
hpparts = hostport.split(":", 1)
if len(hpparts) == 1:
d["host"] = hpparts[0]
else:
d["host"] = hpparts[0]
d["port"] = int(hpparts[1])
if host != None:
d["host"] = host
if port != None:
d["port"] = port
for p in params:
if p == params[-1] and "?" in p:
d["headers"] = h = {}
p, headers = p.split("?", 1)
for header in headers.split("&"):
k, v = header.split("=")
h[k] = v
nv = p.split("=", 1)
if len(nv) == 1:
d.setdefault("other", []).append(p)
continue
name, value = nv
if name == "user":
d["usertype"] = value
elif name in ("transport", "ttl", "maddr", "method", "tag"):
if name == "ttl":
value = int(value)
d[name] = value
else:
d.setdefault("other", []).append(p)
return URL(**d)
def cleanRequestURL(url):
"""Clean a URL from a Request line."""
url.transport = None
url.maddr = None
url.ttl = None
url.headers = {}
def parseAddress(address, host=None, port=None, clean=0):
"""Return (name, uri, params) for From/To/Contact header.
@param clean: remove unnecessary info, usually for From and To headers.
"""
address = address.strip()
# simple 'sip:foo' case
if address.startswith("sip:"):
return "", parseURL(address, host=host, port=port), {}
params = {}
name, url = address.split("<", 1)
name = name.strip()
if name.startswith('"'):
name = name[1:]
if name.endswith('"'):
name = name[:-1]
url, paramstring = url.split(">", 1)
url = parseURL(url, host=host, port=port)
paramstring = paramstring.strip()
if paramstring:
for l in paramstring.split(";"):
if not l:
continue
k, v = l.split("=")
params[k] = v
if clean:
# rfc 2543 6.21
url.ttl = None
url.headers = {}
url.transport = None
url.maddr = None
return name, url, params
class SIPError(Exception):
def __init__(self, code, phrase=None):
if phrase is None:
phrase = statusCodes[code]
Exception.__init__(self, "SIP error (%d): %s" % (code, phrase))
self.code = code
self.phrase = phrase
class RegistrationError(SIPError):
"""Registration was not possible."""
class Message:
"""A SIP message."""
length = None
def __init__(self):
self.headers = util.OrderedDict() # map name to list of values
self.body = ""
self.finished = 0
def addHeader(self, name, value):
name = name.lower()
name = longHeaders.get(name, name)
if name == "content-length":
self.length = int(value)
self.headers.setdefault(name,[]).append(value)
def bodyDataReceived(self, data):
self.body += data
def creationFinished(self):
if (self.length != None) and (self.length != len(self.body)):
raise ValueError, "wrong body length"
self.finished = 1
def toString(self):
s = "%s\r\n" % self._getHeaderLine()
for n, vs in self.headers.items():
for v in vs:
s += "%s: %s\r\n" % (specialCases.get(n) or dashCapitalize(n), v)
s += "\r\n"
s += self.body
return s
def _getHeaderLine(self):
raise NotImplementedError
class Request(Message):
"""A Request for a URI"""
def __init__(self, method, uri, version="SIP/2.0"):
Message.__init__(self)
self.method = method
if isinstance(uri, URL):
self.uri = uri
else:
self.uri = parseURL(uri)
cleanRequestURL(self.uri)
def __repr__(self):
return "<SIP Request %d:%s %s>" % (id(self), self.method, self.uri.toString())
def _getHeaderLine(self):
return "%s %s SIP/2.0" % (self.method, self.uri.toString())
class Response(Message):
"""A Response to a URI Request"""
def __init__(self, code, phrase=None, version="SIP/2.0"):
Message.__init__(self)
self.code = code
if phrase == None:
phrase = statusCodes[code]
self.phrase = phrase
def __repr__(self):
return "<SIP Response %d:%s>" % (id(self), self.code)
def _getHeaderLine(self):
return "SIP/2.0 %s %s" % (self.code, self.phrase)
class MessagesParser(basic.LineReceiver):
"""A SIP messages parser.
Expects dataReceived, dataDone repeatedly,
in that order. Shouldn't be connected to actual transport.
"""
version = "SIP/2.0"
acceptResponses = 1
acceptRequests = 1
state = "firstline" # or "headers", "body" or "invalid"
debug = 0
def __init__(self, messageReceivedCallback):
self.messageReceived = messageReceivedCallback
self.reset()
def reset(self, remainingData=""):
self.state = "firstline"
self.length = None # body length
self.bodyReceived = 0 # how much of the body we received
self.message = None
self.setLineMode(remainingData)
def invalidMessage(self):
self.state = "invalid"
self.setRawMode()
def dataDone(self):
# clear out any buffered data that may be hanging around
self.clearLineBuffer()
if self.state == "firstline":
return
if self.state != "body":
self.reset()
return
if self.length == None:
# no content-length header, so end of data signals message done
self.messageDone()
elif self.length < self.bodyReceived:
# aborted in the middle
self.reset()
else:
# we have enough data and message wasn't finished? something is wrong
raise RuntimeError, "this should never happen"
def dataReceived(self, data):
try:
basic.LineReceiver.dataReceived(self, data)
except:
log.err()
self.invalidMessage()
def handleFirstLine(self, line):
"""Expected to create self.message."""
raise NotImplementedError
def lineLengthExceeded(self, line):
self.invalidMessage()
def lineReceived(self, line):
if self.state == "firstline":
while line.startswith("\n") or line.startswith("\r"):
line = line[1:]
if not line:
return
try:
a, b, c = line.split(" ", 2)
except ValueError:
self.invalidMessage()
return
if a == "SIP/2.0" and self.acceptResponses:
# response
try:
code = int(b)
except ValueError:
self.invalidMessage()
return
self.message = Response(code, c)
elif c == "SIP/2.0" and self.acceptRequests:
self.message = Request(a, b)
else:
self.invalidMessage()
return
self.state = "headers"
return
else:
assert self.state == "headers"
if line:
# XXX support multi-line headers
try:
name, value = line.split(":", 1)
except ValueError:
self.invalidMessage()
return
self.message.addHeader(name, value.lstrip())
if name.lower() == "content-length":
try:
self.length = int(value.lstrip())
except ValueError:
self.invalidMessage()
return
else:
# CRLF, we now have message body until self.length bytes,
# or if no length was given, until there is no more data
# from the connection sending us data.
self.state = "body"
if self.length == 0:
self.messageDone()
return
self.setRawMode()
def messageDone(self, remainingData=""):
assert self.state == "body"
self.message.creationFinished()
self.messageReceived(self.message)
self.reset(remainingData)
def rawDataReceived(self, data):
assert self.state in ("body", "invalid")
if self.state == "invalid":
return
if self.length == None:
self.message.bodyDataReceived(data)
else:
dataLen = len(data)
expectedLen = self.length - self.bodyReceived
if dataLen > expectedLen:
self.message.bodyDataReceived(data[:expectedLen])
self.messageDone(data[expectedLen:])
return
else:
self.bodyReceived += dataLen
self.message.bodyDataReceived(data)
if self.bodyReceived == self.length:
self.messageDone()
class Base(protocol.DatagramProtocol):
"""Base class for SIP clients and servers."""
PORT = PORT
debug = False
def __init__(self):
self.messages = []
self.parser = MessagesParser(self.addMessage)
def addMessage(self, msg):
self.messages.append(msg)
def datagramReceived(self, data, addr):
self.parser.dataReceived(data)
self.parser.dataDone()
for m in self.messages:
self._fixupNAT(m, addr)
if self.debug:
log.msg("Received %r from %r" % (m.toString(), addr))
if isinstance(m, Request):
self.handle_request(m, addr)
else:
self.handle_response(m, addr)
self.messages[:] = []
def _fixupNAT(self, message, (srcHost, srcPort)):
# RFC 2543 6.40.2,
senderVia = parseViaHeader(message.headers["via"][0])
if senderVia.host != srcHost:
senderVia.received = srcHost
if senderVia.port != srcPort:
senderVia.rport = srcPort
message.headers["via"][0] = senderVia.toString()
elif senderVia.rport == True:
senderVia.received = srcHost
senderVia.rport = srcPort
message.headers["via"][0] = senderVia.toString()
def deliverResponse(self, responseMessage):
"""Deliver response.
Destination is based on topmost Via header."""
destVia = parseViaHeader(responseMessage.headers["via"][0])
# XXX we don't do multicast yet
host = destVia.received or destVia.host
port = destVia.rport or destVia.port or self.PORT
destAddr = URL(host=host, port=port)
self.sendMessage(destAddr, responseMessage)
def responseFromRequest(self, code, request):
"""Create a response to a request message."""
response = Response(code)
for name in ("via", "to", "from", "call-id", "cseq"):
response.headers[name] = request.headers.get(name, [])[:]
return response
def sendMessage(self, destURL, message):
"""Send a message.
@param destURL: C{URL}. This should be a *physical* URL, not a logical one.
@param message: The message to send.
"""
if destURL.transport not in ("udp", None):
raise RuntimeError, "only UDP currently supported"
if self.debug:
log.msg("Sending %r to %r" % (message.toString(), destURL))
self.transport.write(message.toString(), (destURL.host, destURL.port or self.PORT))
def handle_request(self, message, addr):
"""Override to define behavior for requests received
@type message: C{Message}
@type addr: C{tuple}
"""
raise NotImplementedError
def handle_response(self, message, addr):
"""Override to define behavior for responses received.
@type message: C{Message}
@type addr: C{tuple}
"""
raise NotImplementedError
class IContact(Interface):
"""A user of a registrar or proxy"""
class Registration:
def __init__(self, secondsToExpiry, contactURL):
self.secondsToExpiry = secondsToExpiry
self.contactURL = contactURL
class IRegistry(Interface):
"""Allows registration of logical->physical URL mapping."""
def registerAddress(domainURL, logicalURL, physicalURL):
"""Register the physical address of a logical URL.
@return: Deferred of C{Registration} or failure with RegistrationError.
"""
def unregisterAddress(domainURL, logicalURL, physicalURL):
"""Unregister the physical address of a logical URL.
@return: Deferred of C{Registration} or failure with RegistrationError.
"""
def getRegistrationInfo(logicalURL):
"""Get registration info for logical URL.
@return: Deferred of C{Registration} object or failure of LookupError.
"""
class ILocator(Interface):
"""Allow looking up physical address for logical URL."""
def getAddress(logicalURL):
"""Return physical URL of server for logical URL of user.
@param logicalURL: a logical C{URL}.
@return: Deferred which becomes URL or fails with LookupError.
"""
class Proxy(Base):
"""SIP proxy."""
PORT = PORT
locator = None # object implementing ILocator
def __init__(self, host=None, port=PORT):
"""Create new instance.
@param host: our hostname/IP as set in Via headers.
@param port: our port as set in Via headers.
"""
self.host = host or socket.getfqdn()
self.port = port
Base.__init__(self)
def getVia(self):
"""Return value of Via header for this proxy."""
return Via(host=self.host, port=self.port)
def handle_request(self, message, addr):
# send immediate 100/trying message before processing
#self.deliverResponse(self.responseFromRequest(100, message))
f = getattr(self, "handle_%s_request" % message.method, None)
if f is None:
f = self.handle_request_default
try:
d = f(message, addr)
except SIPError, e:
self.deliverResponse(self.responseFromRequest(e.code, message))
except:
log.err()
self.deliverResponse(self.responseFromRequest(500, message))
else:
if d is not None:
d.addErrback(lambda e:
self.deliverResponse(self.responseFromRequest(e.code, message))
)
def handle_request_default(self, message, (srcHost, srcPort)):
"""Default request handler.
Default behaviour for OPTIONS and unknown methods for proxies
is to forward message on to the client.
Since at the moment we are stateless proxy, thats basically
everything.
"""
def _mungContactHeader(uri, message):
message.headers['contact'][0] = uri.toString()
return self.sendMessage(uri, message)
viaHeader = self.getVia()
if viaHeader.toString() in message.headers["via"]:
# must be a loop, so drop message
log.msg("Dropping looped message.")
return
message.headers["via"].insert(0, viaHeader.toString())
name, uri, tags = parseAddress(message.headers["to"][0], clean=1)
# this is broken and needs refactoring to use cred
d = self.locator.getAddress(uri)
d.addCallback(self.sendMessage, message)
d.addErrback(self._cantForwardRequest, message)
def _cantForwardRequest(self, error, message):
error.trap(LookupError)
del message.headers["via"][0] # this'll be us
self.deliverResponse(self.responseFromRequest(404, message))
def deliverResponse(self, responseMessage):
"""Deliver response.
Destination is based on topmost Via header."""
destVia = parseViaHeader(responseMessage.headers["via"][0])
# XXX we don't do multicast yet
host = destVia.received or destVia.host
port = destVia.rport or destVia.port or self.PORT
destAddr = URL(host=host, port=port)
self.sendMessage(destAddr, responseMessage)
def responseFromRequest(self, code, request):
"""Create a response to a request message."""
response = Response(code)
for name in ("via", "to", "from", "call-id", "cseq"):
response.headers[name] = request.headers.get(name, [])[:]
return response
def handle_response(self, message, addr):
"""Default response handler."""
v = parseViaHeader(message.headers["via"][0])
if (v.host, v.port) != (self.host, self.port):
# we got a message not intended for us?
# XXX note this check breaks if we have multiple external IPs
# yay for suck protocols
log.msg("Dropping incorrectly addressed message")
return
del message.headers["via"][0]
if not message.headers["via"]:
# this message is addressed to us
self.gotResponse(message, addr)
return
self.deliverResponse(message)
def gotResponse(self, message, addr):
"""Called with responses that are addressed at this server."""
pass
class IAuthorizer(Interface):
def getChallenge(peer):
"""Generate a challenge the client may respond to.
@type peer: C{tuple}
@param peer: The client's address
@rtype: C{str}
@return: The challenge string
"""
def decode(response):
"""Create a credentials object from the given response.
@type response: C{str}
"""
class BasicAuthorizer:
"""Authorizer for insecure Basic (base64-encoded plaintext) authentication.
This form of authentication is broken and insecure. Do not use it.
"""
implements(IAuthorizer)
def __init__(self):
"""
This method exists solely to issue a deprecation warning.
"""
warnings.warn(
"twisted.protocols.sip.BasicAuthorizer was deprecated "
"in Twisted 9.0.0",
category=DeprecationWarning,
stacklevel=2)
def getChallenge(self, peer):
return None
def decode(self, response):
# At least one SIP client improperly pads its Base64 encoded messages
for i in range(3):
try:
creds = (response + ('=' * i)).decode('base64')
except:
pass
else:
break
else:
# Totally bogus
raise SIPError(400)
p = creds.split(':', 1)
if len(p) == 2:
return UsernamePassword(*p)
raise SIPError(400)
class DigestedCredentials(UsernameHashedPassword):
"""Yet Another Simple Digest-MD5 authentication scheme"""
def __init__(self, username, fields, challenges):
warnings.warn(
"twisted.protocols.sip.DigestedCredentials was deprecated "
"in Twisted 9.0.0",
category=DeprecationWarning,
stacklevel=2)
self.username = username
self.fields = fields
self.challenges = challenges
def checkPassword(self, password):
method = 'REGISTER'
response = self.fields.get('response')
uri = self.fields.get('uri')
nonce = self.fields.get('nonce')
cnonce = self.fields.get('cnonce')
nc = self.fields.get('nc')
algo = self.fields.get('algorithm', 'MD5')
qop = self.fields.get('qop-options', 'auth')
opaque = self.fields.get('opaque')
if opaque not in self.challenges:
return False
del self.challenges[opaque]
user, domain = self.username.split('@', 1)
if uri is None:
uri = 'sip:' + domain
expected = DigestCalcResponse(
DigestCalcHA1(algo, user, domain, password, nonce, cnonce),
nonce, nc, cnonce, qop, method, uri, None,
)
return expected == response
class DigestAuthorizer:
CHALLENGE_LIFETIME = 15
implements(IAuthorizer)
def __init__(self):
warnings.warn(
"twisted.protocols.sip.DigestAuthorizer was deprecated "
"in Twisted 9.0.0",
category=DeprecationWarning,
stacklevel=2)
self.outstanding = {}
def generateNonce(self):
c = tuple([random.randrange(sys.maxint) for _ in range(3)])
c = '%d%d%d' % c
return c
def generateOpaque(self):
return str(random.randrange(sys.maxint))
def getChallenge(self, peer):
c = self.generateNonce()
o = self.generateOpaque()
self.outstanding[o] = c
return ','.join((
'nonce="%s"' % c,
'opaque="%s"' % o,
'qop-options="auth"',
'algorithm="MD5"',
))
def decode(self, response):
response = ' '.join(response.splitlines())
parts = response.split(',')
auth = dict([(k.strip(), unq(v.strip())) for (k, v) in [p.split('=', 1) for p in parts]])
try:
username = auth['username']
except KeyError:
raise SIPError(401)
try:
return DigestedCredentials(username, auth, self.outstanding)
except:
raise SIPError(400)
class RegisterProxy(Proxy):
"""A proxy that allows registration for a specific domain.
Unregistered users won't be handled.
"""
portal = None
registry = None # should implement IRegistry
authorizers = {
'digest': DigestAuthorizer(),
}
def __init__(self, *args, **kw):
Proxy.__init__(self, *args, **kw)
self.liveChallenges = {}
def handle_ACK_request(self, message, (host, port)):
# XXX
# ACKs are a client's way of indicating they got the last message
# Responding to them is not a good idea.
# However, we should keep track of terminal messages and re-transmit
# if no ACK is received.
pass
def handle_REGISTER_request(self, message, (host, port)):
"""Handle a registration request.
Currently registration is not proxied.
"""
if self.portal is None:
# There is no portal. Let anyone in.
self.register(message, host, port)
else:
# There is a portal. Check for credentials.
if not message.headers.has_key("authorization"):
return self.unauthorized(message, host, port)
else:
return self.login(message, host, port)
def unauthorized(self, message, host, port):
m = self.responseFromRequest(401, message)
for (scheme, auth) in self.authorizers.iteritems():
chal = auth.getChallenge((host, port))
if chal is None:
value = '%s realm="%s"' % (scheme.title(), self.host)
else:
value = '%s %s,realm="%s"' % (scheme.title(), chal, self.host)
m.headers.setdefault('www-authenticate', []).append(value)
self.deliverResponse(m)
def login(self, message, host, port):
parts = message.headers['authorization'][0].split(None, 1)
a = self.authorizers.get(parts[0].lower())
if a:
try:
c = a.decode(parts[1])
except SIPError:
raise
except:
log.err()
self.deliverResponse(self.responseFromRequest(500, message))
else:
c.username += '@' + self.host
self.portal.login(c, None, IContact
).addCallback(self._cbLogin, message, host, port
).addErrback(self._ebLogin, message, host, port
).addErrback(log.err
)
else:
self.deliverResponse(self.responseFromRequest(501, message))
def _cbLogin(self, (i, a, l), message, host, port):
# It's stateless, matey. What a joke.
self.register(message, host, port)
def _ebLogin(self, failure, message, host, port):
failure.trap(cred.error.UnauthorizedLogin)
self.unauthorized(message, host, port)
def register(self, message, host, port):
"""Allow all users to register"""
name, toURL, params = parseAddress(message.headers["to"][0], clean=1)
contact = None
if message.headers.has_key("contact"):
contact = message.headers["contact"][0]
if message.headers.get("expires", [None])[0] == "0":
self.unregister(message, toURL, contact)
else:
# XXX Check expires on appropriate URL, and pass it to registry
# instead of having registry hardcode it.
if contact is not None:
name, contactURL, params = parseAddress(contact, host=host, port=port)
d = self.registry.registerAddress(message.uri, toURL, contactURL)
else:
d = self.registry.getRegistrationInfo(toURL)
d.addCallbacks(self._cbRegister, self._ebRegister,
callbackArgs=(message,),
errbackArgs=(message,)
)
def _cbRegister(self, registration, message):
response = self.responseFromRequest(200, message)
if registration.contactURL != None:
response.addHeader("contact", registration.contactURL.toString())
response.addHeader("expires", "%d" % registration.secondsToExpiry)
response.addHeader("content-length", "0")
self.deliverResponse(response)
def _ebRegister(self, error, message):
error.trap(RegistrationError, LookupError)
# XXX return error message, and alter tests to deal with
# this, currently tests assume no message sent on failure
def unregister(self, message, toURL, contact):
try:
expires = int(message.headers["expires"][0])
except ValueError:
self.deliverResponse(self.responseFromRequest(400, message))
else:
if expires == 0:
if contact == "*":
contactURL = "*"
else:
name, contactURL, params = parseAddress(contact)
d = self.registry.unregisterAddress(message.uri, toURL, contactURL)
d.addCallback(self._cbUnregister, message
).addErrback(self._ebUnregister, message
)
def _cbUnregister(self, registration, message):
msg = self.responseFromRequest(200, message)
msg.headers.setdefault('contact', []).append(registration.contactURL.toString())
msg.addHeader("expires", "0")
self.deliverResponse(msg)
def _ebUnregister(self, registration, message):
pass
class InMemoryRegistry:
"""A simplistic registry for a specific domain."""
implements(IRegistry, ILocator)
def __init__(self, domain):
self.domain = domain # the domain we handle registration for
self.users = {} # map username to (IDelayedCall for expiry, address URI)
def getAddress(self, userURI):
if userURI.host != self.domain:
return defer.fail(LookupError("unknown domain"))
if self.users.has_key(userURI.username):
dc, url = self.users[userURI.username]
return defer.succeed(url)
else:
return defer.fail(LookupError("no such user"))
def getRegistrationInfo(self, userURI):
if userURI.host != self.domain:
return defer.fail(LookupError("unknown domain"))
if self.users.has_key(userURI.username):
dc, url = self.users[userURI.username]
return defer.succeed(Registration(int(dc.getTime() - time.time()), url))
else:
return defer.fail(LookupError("no such user"))
def _expireRegistration(self, username):
try:
dc, url = self.users[username]
except KeyError:
return defer.fail(LookupError("no such user"))
else:
dc.cancel()
del self.users[username]
return defer.succeed(Registration(0, url))
def registerAddress(self, domainURL, logicalURL, physicalURL):
if domainURL.host != self.domain:
log.msg("Registration for domain we don't handle.")
return defer.fail(RegistrationError(404))
if logicalURL.host != self.domain:
log.msg("Registration for domain we don't handle.")
return defer.fail(RegistrationError(404))
if self.users.has_key(logicalURL.username):
dc, old = self.users[logicalURL.username]
dc.reset(3600)
else:
dc = reactor.callLater(3600, self._expireRegistration, logicalURL.username)
log.msg("Registered %s at %s" % (logicalURL.toString(), physicalURL.toString()))
self.users[logicalURL.username] = (dc, physicalURL)
return defer.succeed(Registration(int(dc.getTime() - time.time()), physicalURL))
def unregisterAddress(self, domainURL, logicalURL, physicalURL):
return self._expireRegistration(logicalURL.username)
|
gpl-3.0
| -9,162,486,065,959,605,000
| 30.464018
| 119
| 0.565435
| false
| 4.031988
| false
| false
| false
|
pakpoomton/CellmodellerShadow
|
Models/Biofilm_g40.py
|
1
|
2119
|
import random
from CellModeller.Regulation.ModuleRegulator import ModuleRegulator
from CellModeller.Biophysics.BacterialModels.CLBacterium import CLBacterium
from CellModeller.GUI import Renderers
import numpy
import math
max_cells = 400000
#cell_colors = {0:[0.0, 1.0, 0.0],
# 1:[0.0, 0.0, 1.0],
# 2:[1.0, 0.0, 0.0],
# 3:[0.0, 1.0, 1.0]}
cell_colors = numpy.random.uniform(0,1,(9,3))
def setup(sim):
# Set biophysics, signalling, and regulation models
biophys = CLBacterium(sim, max_substeps=8, max_cells=max_cells, max_contacts=32, max_sqs=192**2, jitter_z=False, reg_param=0.04, gamma=40)
#biophys.addPlane((0,0,-0.5), (0,0,1), 1.0)
#biophys.addPlane((0,0,0.5), (0,0,-1), math.sqrt(7.5e-4))
regul = ModuleRegulator(sim, __file__) # use this file for reg too
# Only biophys and regulation
sim.init(biophys, regul, None, None)
sim.addCell(cellType=0, pos=(0,0,0))
#sim.addCell(cellType=0, pos=(0,-10.0,0))
#sim.addCell(cellType=1, pos=(0,10.0,0))
#sim.addCell(cellType=0, pos=(16,16,0))
#sim.addCell(cellType=1, pos=(0,16,0))
#sim.addCell(cellType=2, pos=(-16,16,0))
#sim.addCell(cellType=3, pos=(16,0,0))
#sim.addCell(cellType=4, pos=(0,0,0))
#sim.addCell(cellType=5, pos=(-16,0,0))
#sim.addCell(cellType=6, pos=(16,-16,0))
#sim.addCell(cellType=7, pos=(0,-16,0))
#sim.addCell(cellType=8, pos=(-16,-16,0))
# Add some objects to draw the models
therenderer = Renderers.GLBacteriumRenderer(sim)
sim.addRenderer(therenderer)
sim.savePickle = True
sim.pickleSteps = 20
def init(cell):
cell.targetVol = 3.5 + random.uniform(0.0,0.5)
cell.growthRate = 1.0
def numSignals():
return 0
def numSpecies():
return 0
def update(cells):
for (id, cell) in cells.iteritems():
cell.color = cell_colors[cell.cellType]
if cell.volume > cell.targetVol:
cell.asymm = [1,1]
cell.divideFlag = True
def divide(parent, d1, d2):
d1.targetVol = 3.5 + random.uniform(0.0,0.5)
d2.targetVol = 3.5 + random.uniform(0.0,0.5)
|
bsd-3-clause
| -3,343,838,500,989,668,000
| 29.271429
| 142
| 0.632374
| false
| 2.584146
| false
| false
| false
|
markokr/sysca
|
sysca/keys.py
|
1
|
6591
|
"""Key handling
"""
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec, rsa, dsa
from cryptography.hazmat.primitives.hashes import SHA256, SHA384, SHA512
from cryptography.hazmat.primitives.serialization import (Encoding, PublicFormat)
from .exceptions import UnsupportedParameter
from .compat import (
ed25519, ed448, EDDSA_PRIVKEY_CLASSES,
EC_CURVES, PUBKEY_CLASSES, PRIVKEY_CLASSES,
)
__all__ = (
"get_curve_for_name", "get_ec_curves", "get_hash_algo", "get_key_name",
"is_safe_bits", "is_safe_curve",
"new_dsa_key", "new_ec_key", "new_key", "new_rsa_key",
"new_serial_number", "same_pubkey", "set_unsafe",
"valid_privkey", "valid_pubkey", "get_invalid_key_usage",
)
#
# Key parameters
#
UNSAFE = False
# safe choices
SAFE_BITS_RSA = (2048, 3072, 4096)
SAFE_BITS_DSA = (2048, 3072)
SAFE_CURVES = ("secp256r1", "secp384r1", "secp521r1", "ed25519", "ed448",
"brainpoolp256r1", "brainpoolp384r1", "brainpoolp512r1")
def get_curve_for_name(name):
"""Lookup curve by name.
"""
name2 = name.lower()
if name2 not in EC_CURVES:
raise UnsupportedParameter("Unknown curve: %s" % name)
if not is_safe_curve(name2):
raise UnsupportedParameter("Unsafe curve: %s" % name)
return EC_CURVES[name2]
def same_pubkey(o1, o2):
"""Compare public keys.
"""
k1, k2 = o1, o2
if not isinstance(k1, PUBKEY_CLASSES):
k1 = o1.public_key()
if k1 is None:
raise ValueError("object %r gave None .public_key()" % o1)
if not isinstance(k2, PUBKEY_CLASSES):
k2 = k2.public_key()
if k2 is None:
raise ValueError("object %r gave None .public_key()" % o2)
fmt = PublicFormat.SubjectPublicKeyInfo
p1 = k1.public_bytes(Encoding.PEM, fmt)
p2 = k2.public_bytes(Encoding.PEM, fmt)
return p1 == p2
def get_hash_algo(privkey, ctx):
"""Return signature hash algo based on privkey.
"""
if isinstance(privkey, EDDSA_PRIVKEY_CLASSES):
return None
if isinstance(privkey, ec.EllipticCurvePrivateKey):
if privkey.key_size > 500:
return SHA512()
if privkey.key_size > 300:
return SHA384()
return SHA256()
def get_invalid_key_usage(pubkey):
"""KeyUsage types not supported by key"""
bad = ("key_encipherment", "data_encipherment", "encipher_only", "decipher_only", "key_agreement")
if UNSAFE or isinstance(pubkey, rsa.RSAPublicKey) or pubkey is None:
return ()
return bad
def is_safe_bits(bits, bitlist):
"""Allow bits"""
return UNSAFE or bits in bitlist
def is_safe_curve(name):
"""Allow curve"""
return UNSAFE or name.lower() in SAFE_CURVES
def get_ec_curves():
"""Return supported curve names.
"""
lst = list(EC_CURVES.keys())
if ed25519 is not None:
lst.append("ed25519")
if ed448 is not None:
lst.append("ed448")
return [n for n in sorted(lst) if is_safe_curve(n)]
def new_ec_key(name="secp256r1"):
"""New Elliptic Curve key
"""
name = name.lower()
if name == "ed25519":
if ed25519 is not None:
return ed25519.Ed25519PrivateKey.generate()
raise UnsupportedParameter("ed25519 not supported")
if name == "ed448":
if ed448 is not None:
return ed448.Ed448PrivateKey.generate()
raise UnsupportedParameter("ed448 not supported")
curve = get_curve_for_name(name)
return ec.generate_private_key(curve=curve, backend=default_backend())
def new_rsa_key(bits=2048):
"""New RSA key.
"""
if not is_safe_bits(bits, SAFE_BITS_RSA):
raise UnsupportedParameter("Bad value for RSA bits: %d" % bits)
return rsa.generate_private_key(key_size=bits, public_exponent=65537, backend=default_backend())
def new_dsa_key(bits=2048):
"""New DSA key.
"""
if not is_safe_bits(bits, SAFE_BITS_DSA):
raise UnsupportedParameter("Bad value for DSA bits: %d" % bits)
return dsa.generate_private_key(key_size=bits, backend=default_backend())
def new_key(keydesc="ec"):
"""Create new key.
"""
short = {"ec": "ec:secp256r1", "rsa": "rsa:2048", "dsa": "dsa:2048"}
keydesc = short.get(keydesc, keydesc)
# create key
tmp = keydesc.lower().split(":")
if len(tmp) != 2:
raise UnsupportedParameter("Bad key spec: %s" % keydesc)
t, v = tmp
if t == "ec":
return new_ec_key(v)
elif t == "rsa":
return new_rsa_key(int(v))
elif t == "dsa":
return new_dsa_key(int(v))
raise UnsupportedParameter("Bad key type: %s" % keydesc)
def valid_pubkey(pubkey):
"""Return True if usable public key.
"""
if isinstance(pubkey, rsa.RSAPublicKey):
return is_safe_bits(pubkey.key_size, SAFE_BITS_RSA)
if isinstance(pubkey, dsa.DSAPublicKey):
return is_safe_bits(pubkey.key_size, SAFE_BITS_DSA)
if isinstance(pubkey, ec.EllipticCurvePublicKey):
return is_safe_curve(pubkey.curve.name)
return isinstance(pubkey, PUBKEY_CLASSES)
def valid_privkey(privkey):
"""Return True if usable private key.
"""
if isinstance(privkey, rsa.RSAPrivateKey):
return is_safe_bits(privkey.key_size, SAFE_BITS_RSA)
if isinstance(privkey, dsa.DSAPrivateKey):
return is_safe_bits(privkey.key_size, SAFE_BITS_DSA)
if isinstance(privkey, ec.EllipticCurvePrivateKey):
return is_safe_curve(privkey.curve.name)
return isinstance(privkey, PRIVKEY_CLASSES)
def get_key_name(key):
"""Return key type.
"""
if isinstance(key, (rsa.RSAPublicKey, rsa.RSAPrivateKey)):
return "rsa:%d" % key.key_size
if isinstance(key, (dsa.DSAPublicKey, dsa.DSAPrivateKey)):
return "dsa:%d" % key.key_size
if isinstance(key, (ec.EllipticCurvePublicKey, ec.EllipticCurvePrivateKey)):
return "ec:%s" % key.curve.name
if ed25519 is not None and isinstance(key, (ed25519.Ed25519PublicKey, ed25519.Ed25519PrivateKey)):
return "ec:ed25519"
if ed448 is not None and isinstance(key, (ed448.Ed448PublicKey, ed448.Ed448PrivateKey)):
return "ec:ed448"
return "<unknown key type>"
def set_unsafe(flag):
global UNSAFE
UNSAFE = flag
def new_serial_number():
"""Return serial number with max allowed entropy.
"""
# serial should have at least 20 bits of entropy and fit into 20 bytes
seed = int.from_bytes(os.urandom(20), "big", signed=False)
# avoid sign problems by setting highest bit
return (seed >> 1) | (1 << 158)
|
isc
| -4,393,379,388,745,196,000
| 29.655814
| 102
| 0.648308
| false
| 3.23723
| false
| false
| false
|
pendingchaos/WIP12
|
scripts/update_extensions.py
|
1
|
1594
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
extensions = os.listdir("../include/extensions")
try:
extensions.remove("extensions.h")
except ValueError:
pass
output = open("../src/extensions/extensions.cpp", "w")
output.write("""//Generated by update_extensions.py. Do not edit. Edit update_extensions.py instead
#include "extensions/extensions.h"
#include "memory.h"
""")
for extension in extensions:
output.write("extern void *init_%s();\nextern void deinit_%s(void *ext);\n" % (extension, extension))
output.write("\nstruct Extensions\n{\n")
for extension in extensions:
output.write(" void *%s;\n" % (extension))
output.write("};\n")
output.write("""
void *initExtensions()
{
Extensions *ext = NEW(Extensions);
""")
for extension in extensions:
output.write(" ((Extensions *)ext)->%s = init_%s();\n" % (extension, extension))
output.write(" return ext;\n}\n\nvoid deinitExtensions(void *ext)\n{\n")
for extension in extensions:
output.write(" deinit_%s(((Extensions *)ext)->%s);\n" % (extension, extension))
output.write(" DELETE((Extensions *)ext);\n}\n\n")
for extension in extensions:
output.write("void *get_%s(void *exts)\n{\n return ((Extensions *)exts)->%s;\n}\n\n" % (extension, extension))
output = open("../include/extensions/extensions.h", "w")
output.write("""#ifndef EXTENSIONS_H
#define EXTENSIONS_H
void *initExtensions();
void deinitExtensions(void *ext);
""")
for extension in extensions:
output.write("void *get_%s(void *exts);\n" % (extension))
output.write("#endif // EXTENSIONS_H\n")
|
gpl-3.0
| 6,310,849,901,766,840,000
| 24.709677
| 117
| 0.666876
| false
| 3.239837
| false
| false
| false
|
Adrimel/pdb-tools
|
pdb_fetch.py
|
1
|
3126
|
#!/usr/bin/env python
"""
Fetches a PDB file (optionally the biological unit) from the RCSB database.
usage: python pdb_fetch.py [-biounit] <pdb id>
example: python pdb_fetch.py 1CTF
Author: {0} ({1})
This program is part of the PDB tools distributed with HADDOCK
or with the HADDOCK tutorial. The utilities in this package
can be used to quickly manipulate PDB files, with the benefit
of 'piping' several different commands. This is a rewrite of old
FORTRAN77 code that was taking too much effort to compile. RIP.
"""
from __future__ import print_function
import gzip
import os
import re
import sys
import cStringIO
import urllib2
__author__ = "Joao Rodrigues"
__email__ = "j.p.g.l.m.rodrigues@gmail.com"
USAGE = __doc__.format(__author__, __email__)
def check_input(args):
"""Checks whether to read from stdin/file and validates user input/options."""
if len(args) == 1:
if not re.match('[0-9a-zA-Z]{4}$', args[0]):
sys.stderr.write('Invalid PDB code: ' + args[0] + '\n')
sys.stderr.write(USAGE)
sys.exit(1)
pdb_id = args[0]
biounit = False
elif len(args) == 2:
# Chain & File
if not re.match('\-biounit$', args[0]):
sys.stderr.write('Invalid option: ' + args[0] + '\n')
sys.stderr.write(USAGE)
sys.exit(1)
if not re.match('[0-9a-zA-Z]{4}$', args[1]):
sys.stderr.write('Invalid PDB code: ' + args[1] + '\n')
sys.stderr.write(USAGE)
sys.exit(1)
biounit = True
pdb_id = args[1]
else:
sys.stderr.write(USAGE)
sys.exit(1)
return (pdb_id, biounit)
def _fetch_structure(pdbid, biounit=False):
"""Enclosing logic in a function"""
base_url = 'http://www.rcsb.org/pdb/files/'
pdb_type = '.pdb1' if biounit else '.pdb'
pdb_url = base_url + pdbid.lower() + pdb_type + '.gz'
try:
request = urllib2.Request(pdb_url)
opener = urllib2.build_opener()
url_data = opener.open(request).read()
except urllib2.HTTPError as e:
print('[!] Error fetching structure: ({0}) {1}'.format(e.code, e.msg), file=sys.stderr)
return
else:
try:
buf = cStringIO.StringIO(url_data)
gz_handle = gzip.GzipFile(fileobj=buf, mode='rb')
for line in gz_handle:
yield line
except IOError as e:
print('[!] Error fetching structure: {0}'.format(e.msg), file=sys.stderr)
return
finally:
gz_handle.close()
if __name__ == '__main__':
# Check Input
pdb_id, biounit = check_input(sys.argv[1:])
# Do the job
pdb_structure = _fetch_structure(pdb_id, biounit)
if not pdb_structure:
sys.exit(1)
try:
sys.stdout.write(''.join(pdb_structure))
sys.stdout.flush()
except IOError:
# This is here to catch Broken Pipes
# for example to use 'head' or 'tail' without
# the error message showing up
pass
# last line of the script
# We can close it even if it is sys.stdin
sys.exit(0)
|
mit
| -5,713,528,853,672,365,000
| 28.214953
| 95
| 0.590531
| false
| 3.408942
| false
| false
| false
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/toon/DistributedNPCSpecialQuestGiverAI.py
|
1
|
8582
|
from direct.task.Task import Task
from panda3d.core import *
from panda3d.direct import *
from DistributedNPCToonBaseAI import *
from toontown.quest import Quests
class DistributedNPCSpecialQuestGiverAI(DistributedNPCToonBaseAI):
def __init__(self, air, npcId, questCallback = None, hq = 0):
DistributedNPCToonBaseAI.__init__(self, air, npcId, questCallback)
self.hq = hq
self.tutorial = 0
self.pendingAvId = None
return
def getTutorial(self):
return self.tutorial
def setTutorial(self, val):
self.tutorial = val
def getHq(self):
return self.hq
def avatarEnter(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('avatar enter ' + str(avId))
self.air.questManager.requestInteract(avId, self)
DistributedNPCToonBaseAI.avatarEnter(self)
def chooseQuest(self, questId, quest = None):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseQuest: avatar %s choseQuest %s' % (avId, questId))
if not self.pendingAvId:
self.notify.warning('chooseQuest: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseQuest: not expecting an answer from this avatar: %s' % avId)
return
if questId == 0:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarCancelled(avId)
self.cancelChoseQuest(avId)
return
for quest in self.pendingQuests:
if questId == quest[0]:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarChoseQuest(avId, self, *quest)
return
self.air.questManager.avatarChoseQuest(avId, self, *quest)
self.notify.warning('chooseQuest: avatar: %s chose a quest not offered: %s' % (avId, questId))
self.pendingAvId = None
self.pendingQuests = None
return
def chooseTrack(self, trackId):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseTrack: avatar %s choseTrack %s' % (avId, trackId))
if not self.pendingAvId:
self.notify.warning('chooseTrack: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseTrack: not expecting an answer from this avatar: %s' % avId)
return
if trackId == -1:
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.air.questManager.avatarCancelled(avId)
self.cancelChoseTrack(avId)
return
for track in self.pendingTracks:
if trackId == track:
self.air.questManager.avatarChoseTrack(avId, self, self.pendingTrackQuest, trackId)
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
self.notify.warning('chooseTrack: avatar: %s chose a track not offered: %s' % (avId, trackId))
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
def sendTimeoutMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIMEOUT,
self.npcId,
self.busy,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
self.sendClearMovie(None)
self.busy = 0
return Task.done
def sendClearMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.busy = 0
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_CLEAR,
self.npcId,
0,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
return Task.done
def rejectAvatar(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_REJECT,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def rejectAvatarTierNotDone(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIER_NOT_DONE,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def completeQuest(self, avId, questId, rewardId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_COMPLETE,
self.npcId,
avId,
[questId, rewardId, 0],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def incompleteQuest(self, avId, questId, completeStatus, toNpcId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_INCOMPLETE,
self.npcId,
avId,
[questId, completeStatus, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def assignQuest(self, avId, questId, rewardId, toNpcId):
self.busy = avId
if self.questCallback:
self.questCallback()
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_ASSIGN,
self.npcId,
avId,
[questId, rewardId, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def presentQuestChoice(self, avId, quests):
self.busy = avId
self.pendingAvId = avId
self.pendingQuests = quests
flatQuests = []
for quest in quests:
flatQuests.extend(quest)
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE,
self.npcId,
avId,
flatQuests,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def presentTrackChoice(self, avId, questId, tracks):
self.busy = avId
self.pendingAvId = avId
self.pendingTracks = tracks
self.pendingTrackQuest = questId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE,
self.npcId,
avId,
tracks,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def cancelChoseQuest(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE_CANCEL,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def cancelChoseTrack(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE_CANCEL,
self.npcId,
avId,
[],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def setMovieDone(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('setMovieDone busy: %s avId: %s' % (self.busy, avId))
if self.busy == avId:
taskMgr.remove(self.uniqueName('clearMovie'))
self.sendClearMovie(None)
elif self.busy:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCToonAI.setMovieDone busy with %s' % self.busy)
self.notify.warning('somebody called setMovieDone that I was not busy with! avId: %s' % avId)
return
|
apache-2.0
| -1,137,695,837,540,636,300
| 36.973451
| 119
| 0.62433
| false
| 3.624155
| false
| false
| false
|
munin/munin
|
utils/add_padding_script.py
|
1
|
4569
|
#!/usr/bin/python3
# This file is part of Munin.
# Munin is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Munin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Munin; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This work is Copyright (C)2006 by Andreas Jacobsen
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
import sys
import psycopg2
import psycopg2.extras
class migrator:
def __init__(self, cursor):
self.cursor = cursor
def add_padding(self):
for i in range(1, 92):
prop = self.find_single_prop_by_id(i)
if not prop or prop["active"] or prop["padding"]:
continue
(voters, yes, no) = self.get_voters_for_prop(prop["id"])
(
winners,
losers,
winning_total,
losing_total,
) = self.get_winners_and_losers(voters, yes, no)
query = "UPDATE %s_proposal SET " % (prop["prop_type"],)
query += " vote_result=%s,compensation=%s"
query += " WHERE id=%s"
args = (["no", "yes"][yes > no], losing_total, prop["id"])
print(query % args)
self.cursor.execute(query, args)
if self.cursor.rowcount < 1:
print("argh!")
def find_single_prop_by_id(self, prop_id):
query = "SELECT id, prop_type, proposer, person, created, padding, comment_text, active, closed FROM ("
query += "SELECT t1.id AS id, 'invite' AS prop_type, t2.pnick AS proposer, t1.person AS person, t1.padding AS padding, t1.created AS created,"
query += (
" t1.comment_text AS comment_text, t1.active AS active, t1.closed AS closed"
)
query += " FROM invite_proposal AS t1 INNER JOIN user_list AS t2 ON t1.proposer_id=t2.id UNION ("
query += " SELECT t3.id AS id, 'kick' AS prop_type, t4.pnick AS proposer, t5.pnick AS person, t3.padding AS padding, t3.created AS created,"
query += (
" t3.comment_text AS comment_text, t3.active AS active, t3.closed AS closed"
)
query += " FROM kick_proposal AS t3"
query += " INNER JOIN user_list AS t4 ON t3.proposer_id=t4.id"
query += (
" INNER JOIN user_list AS t5 ON t3.person_id=t5.id)) AS t6 WHERE t6.id=%s"
)
self.cursor.execute(query, (prop_id,))
return self.cursor.fetchone()
def get_winners_and_losers(self, voters, yes, no):
if yes > no:
losers = voters["no"]
winners = voters["yes"]
winning_total = yes
losing_total = no
else:
winners = voters["no"]
losers = voters["yes"]
winning_total = no
losing_total = yes
return (winners, losers, winning_total, losing_total)
def get_voters_for_prop(self, prop_id):
query = "SELECT t1.vote AS vote,t1.carebears AS carebears"
query += ", t1.prop_id AS prop_idd,t1.voter_id AS voter_id,t2.pnick AS pnick"
query += " FROM prop_vote AS t1"
query += " INNER JOIN user_list AS t2 ON t1.voter_id=t2.id"
query += " WHERE prop_id=%s"
self.cursor.execute(query, (prop_id,))
voters = {}
voters["yes"] = []
voters["no"] = []
voters["abstain"] = []
yes = 0
no = 0
for r in self.cursor.fetchall():
if r["vote"] == "yes":
yes += r["carebears"]
voters["yes"].append(r)
elif r["vote"] == "no":
no += r["carebears"]
voters["no"].append(r)
elif r["vote"] == "abstain":
voters["abstain"].append(r)
return (voters, yes, no)
user = "munin"
db = "patools30"
conn = psycopg2.connect("user=%s dbname=%s" % (user, db))
conn.serialize()
conn.autocommit()
curs = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
m = migrator(curs)
m.add_padding()
|
gpl-2.0
| -1,024,861,984,553,755,300
| 36.760331
| 150
| 0.585905
| false
| 3.456127
| false
| false
| false
|
fedora-infra/gilmsg
|
setup.py
|
1
|
1729
|
# This file is part of gilmsg.
# Copyright (C) 2015 Red Hat, Inc.
#
# gilmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# gilmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with gilmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 0.1.2.15.0 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
f = open('README.rst')
long_description = f.read().strip()
long_description = long_description.split('split here', 1)[-1]
f.close()
install_requires = [
'fedmsg',
'fedmsg[crypto]',
'fedmsg[consumers]',
'fedmsg[commands]',
]
setup(
name='gilmsg',
version='0.1.2',
description="A reliability layer on top of fedmsg",
long_description=long_description,
author='Ralph Bean',
author_email='rbean@redhat.com',
url='https://github.com/fedora-infra/gilmsg/',
license='LGPLv2+',
install_requires=install_requires,
py_modules=['gilmsg'],
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
"gilmsg-logger=gilmsg:logger_cli",
],
},
)
|
lgpl-2.1
| -1,347,992,148,444,289,300
| 28.305085
| 78
| 0.693464
| false
| 3.57971
| false
| false
| false
|
pydsigner/taskit
|
taskit/common.py
|
1
|
4251
|
import time
import json
import pickle
import sys
from .log import null_logger, ERROR
__all__ = ['DEFAULT_PORT', 'STOP', 'KILL', 'STATUS', 'bytes', 'basestring',
'show_err', 'FirstBytesCorruptionError', 'FirstBytesProtocol',
'JSONCodec', 'PickleCodec']
DEFAULT_PORT = 54543
if bytes is str:
# Python 2
def bytes(s, enc):
return s
basestring = basestring
else:
# Python 3
bytes = bytes
basestring = str
STOP = '<stop>'
KILL = '<kill>'
STATUS = '<status>'
def show_err():
sys.excepthook(*sys.exc_info())
class FirstBytesCorruptionError(Exception):
"""
Exception raised when the first byte of a FB LMTP message is not a 0 or 1.
"""
class FirstBytesProtocol(object):
"""
A mixin class that has methods for sending and receiving information using
the First Bytes long message transfer protocol.
"""
first = 4
# '%0<first>x'
size_insert = '%04x'
def __init__(self, logger=null_logger, data_size=2048):
"""
data_size -- The maximum length of the data slices created. Will not be
exceeded, but in many cases will not ever be reached. This
value can be any positive "short", but the real-world
network concerns mentioned in the official documentation
for `socket.recv()` apply here -- be kind to the program
that your program is communicating with!
"""
self.set_size(data_size)
self.log = logger
def _size_bytes(self, size):
return bytes(self.size_insert % size, 'utf-8')
def _wire_recv(self, sock, size):
left = size
data = ''
while left:
chunk = sock.recv(left).decode()
if not chunk:
raise FirstBytesCorruptionError(
'Socket connection or remote codebase is broken!')
data += chunk
left -= len(chunk)
return data
def set_size(self, data_size):
"""
Set the data slice size.
"""
if len(str(data_size)) > self.first:
raise ValueError(
'Send size is too large for message size-field width!')
self.data_size = data_size
def recv(self, sock):
data = ''
# Cache the header size for speed
hsize = self.first + 1
while 1:
header = self._wire_recv(sock, hsize)
bit = header[0]
if bit not in ('0', '1'):
self.log(ERROR, 'First char %r not one of "0" or "1"!' % bit)
raise FirstBytesCorruptionError(
'Protocol corruption detected -- '
'first char in message was not a 0 or a 1!'
)
# So, how big a piece do we need to grab?
size = int(header[1:], 16)
# Get it.
data += self._wire_recv(sock, size)
# If nothing else will be sent, then we are finished.
if bit == '0':
return data
def send(self, sock, data):
# Cache max data size for speed
ds = self.data_size
# Also cache the "max data size"-sized-data prefix
norm = b'1' + self._size_bytes(ds)
data = bytes(data, 'utf-8')
while data:
dlen = len(data)
if dlen < ds:
pre = b'0' + self._size_bytes(dlen)
else:
pre = norm
sock.sendall(pre + data[:ds])
data = data[ds:]
class JSONCodec(object):
"""
Standard codec using JSON. Good balance of scope and support.
"""
@staticmethod
def encode(obj):
return json.dumps(obj)
@staticmethod
def decode(enc):
return json.loads(enc)
class PickleCodec(object):
"""
Basic codec using pickle (default version) for encoding. Do not use if
cross-language support is desired.
"""
@staticmethod
def encode(obj):
return pickle.dumps(obj)
@staticmethod
def decode(enc):
return pickle.loads(enc)
|
lgpl-3.0
| -2,156,986,932,203,192,000
| 25.905063
| 80
| 0.530699
| false
| 4.22145
| false
| false
| false
|
xu2243051/easyui-menu
|
easyui/utils.py
|
1
|
1633
|
#coding:utf-8
'''
这个通用的自定义功能页面
'''
from django.conf.urls import patterns, url
def model_serialize(queryset, extra_fields=[], remove_fields = [], fields = []):
"""
@param queryset queryset
@return a list of dict [{}, {}]
自定义的json转换函数,跟extramixin中的get_fields密切相关
"""
return_list = []
for object in queryset:
value_dict = dict(object.get_fields(field_verbose=False, value_verbose=True,
fields=fields, remove_fields=remove_fields, extra_fields=extra_fields))
return_list.append(value_dict)
return return_list
def register_views(app_name, view_filename, urlpatterns=None):
"""
app_name APP名
view_filename views 所在的文件
urlpatterns url中已经存在的urlpatterns
return urlpatterns
只导入View结尾的,是类的视图
"""
app_module = __import__(app_name)
view_module = getattr(app_module, view_filename)
views = dir(view_module)
for view_name in views:
if view_name.endswith('View'):
view = getattr(view_module, view_name)
if isinstance(view, object):
if urlpatterns:
urlpatterns += patterns('',
url(r'^(?i)%s/$' % view_name, view.as_view(), name=view_name),
)
else:
urlpatterns = patterns('',
url(r'^(?i)%s/$' % view_name, view.as_view(), name=view_name),
)
else:
pass
return urlpatterns
|
apache-2.0
| 5,187,833,718,034,250,000
| 30.854167
| 91
| 0.551341
| false
| 3.580796
| false
| false
| false
|
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/services/mail/signedmessage.py
|
1
|
6389
|
# Copyright 2009-2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Classes for simpler handling of PGP signed email messages."""
__metaclass__ = type
__all__ = [
'SignedMessage',
'signed_message_from_string',
'strip_pgp_signature',
]
import email
import re
from zope.interface import implements
from lp.services.mail.interfaces import ISignedMessage
clearsigned_re = re.compile(
r'-----BEGIN PGP SIGNED MESSAGE-----'
'.*?(?:\r\n|\n)(?:\r\n|\n)(.*)(?:\r\n|\n)'
'(-----BEGIN PGP SIGNATURE-----'
'.*'
'-----END PGP SIGNATURE-----)',
re.DOTALL)
# Regexp for matching the signed content in multipart messages.
multipart_signed_content = (
r'%(boundary)s\n(?P<signed_content>.*?)\n%(boundary)s\n.*?\n%(boundary)s')
# Lines that start with '-' are escaped with '- '.
dash_escaped = re.compile('^- ', re.MULTILINE)
def signed_message_from_string(string):
"""Parse the string and return a SignedMessage.
It makes sure that the SignedMessage instance has access to the
parsed string.
"""
msg = email.message_from_string(string, _class=SignedMessage)
msg.parsed_string = string
return msg
class SignedMessage(email.Message.Message):
"""Provides easy access to signed content and the signature"""
implements(ISignedMessage)
parsed_string = None
def _getSignatureAndSignedContent(self):
"""Returns the PGP signature and the content that's signed.
The signature is returned as a string, and the content is
returned as a string.
If the message isn't signed, both signature and the content is
None.
"""
assert self.parsed_string is not None, (
'Use signed_message_from_string() to create the message.')
signed_content = signature = None
# Check for MIME/PGP signed message first.
# See: RFC3156 - MIME Security with OpenPGP
# RFC3156 says that in order to be a complient signed message, there
# must be two and only two parts and that the second part must have
# content_type 'application/pgp-signature'.
if self.is_multipart():
payload = self.get_payload()
if len(payload) == 2:
content_part, signature_part = payload
sig_content_type = signature_part.get_content_type()
if sig_content_type == 'application/pgp-signature':
# We need to extract the signed content from the
# parsed string, since content_part.as_string()
# isn't guarenteed to return the exact string it was
# created from.
boundary = '--' + self.get_boundary()
match = re.search(
multipart_signed_content % {
'boundary': re.escape(boundary)},
self.parsed_string, re.DOTALL)
signed_content = match.group('signed_content')
signature = signature_part.get_payload()
return signature, signed_content
# If we still have no signature, then we have one of several cases:
# 1) We do not have a multipart message
# 2) We have a multipart message with two parts, but the second part
# isn't a signature. E.g.
# multipart/mixed
# text/plain <- clear signed review comment
# text/x-diff <- patch
# 3) We have a multipart message with more than two parts.
# multipart/mixed
# text/plain <- clear signed body text
# text/x-diff <- patch or merge directoive
# application/pgp-signature <- detached signature
# Now we can handle one and two by walking the content and stopping at
# the first part that isn't multipart, and getting a signature out of
# that. We can partly handle number three by at least checking the
# clear text signed message, but we don't check the detached signature
# for the attachment.
for part in self.walk():
if part.is_multipart():
continue
match = clearsigned_re.search(part.get_payload())
if match is not None:
signed_content_unescaped = match.group(1)
signed_content = dash_escaped.sub(
'', signed_content_unescaped)
signature = match.group(2)
return signature, signed_content
# Stop processing after the first non-multipart part.
break
return signature, signed_content
@property
def signedMessage(self):
"""Returns the PGP signed content as a message.
Returns None if the message wasn't signed.
"""
signature, signed_content = self._getSignatureAndSignedContent()
if signed_content is None:
return None
else:
if (not self.is_multipart() and
clearsigned_re.search(self.get_payload())):
# Add a new line so that a message with no headers will
# be created.
signed_content = '\n' + signed_content
return signed_message_from_string(signed_content)
@property
def signedContent(self):
"""Returns the PGP signed content as a string.
Returns None if the message wasn't signed.
"""
signature, signed_content = self._getSignatureAndSignedContent()
return signed_content
@property
def signature(self):
"""Returns the PGP signature used to sign the message.
Returns None if the message wasn't signed.
"""
signature, signed_content = self._getSignatureAndSignedContent()
return signature
@property
def raw_length(self):
"""Return the length in bytes of the underlying raw form."""
return len(self.parsed_string)
def strip_pgp_signature(text):
"""Strip any PGP signature from the supplied text."""
signed_message = signed_message_from_string(text)
# For unsigned text the signedContent will be None.
if signed_message.signedContent is not None:
return signed_message.signedContent
else:
return text
|
agpl-3.0
| -9,070,205,200,195,638,000
| 36.804734
| 78
| 0.604633
| false
| 4.458479
| false
| false
| false
|
shichao-an/ctci
|
chapter9/question9.5.py
|
1
|
1696
|
from __future__ import print_function
"""
Write a method to compute all permutations of a string
"""
def get_permutations1(s):
"""
Append (or prepend) every character to each permutation of the
string which does not contain the current character
"""
if not s:
return ['']
else:
res = []
for i, c in enumerate(s):
rest_s = s[:i] + s[i + 1:]
rest_perms = get_permutations1(rest_s)
for perm in rest_perms:
res.append(perm + c)
return res
def insert_at(s, c, i):
return s[:i] + c + s[i:]
def get_permutations2(s):
"""
Insert the first (or last) character to every spot of each permutation
of the remaining string after this character
"""
if not s:
return ['']
else:
res = []
c = s[0]
rest_s = s[1:]
rest_perms = get_permutations2(rest_s)
for perm in rest_perms:
for i in range(len(perm) + 1):
ns = insert_at(perm, c, i)
res.append(ns)
return res
def get_permutations3_aux(s, cand, res):
"""Backtrack"""
if not s:
res.append(cand)
else:
for i, c in enumerate(s):
get_permutations3_aux(s[:i] + s[i + 1:], cand + c, res)
def get_permutations3(s):
res = []
cand = ''
get_permutations3_aux(s, cand, res)
return res
def _test():
pass
def _print():
s1 = 'abc'
r1 = get_permutations1(s1)
r2 = get_permutations2(s1)
r3 = get_permutations3(s1)
r1.sort()
r2.sort()
r3.sort()
print(r1)
print(r2)
print(r3)
if __name__ == '__main__':
_test()
_print()
|
bsd-2-clause
| 3,653,016,667,504,779,000
| 19.190476
| 74
| 0.527123
| false
| 3.351779
| false
| false
| false
|
jzrake/mara-tools
|
mara_tools/lic/lic.py
|
1
|
1617
|
import numpy as np
def lic_flow(vectors,len_pix=10):
vectors = np.asarray(vectors)
m,n,two = vectors.shape
if two!=2:
raise ValueError
result = np.zeros((2*len_pix+1,m,n,2),dtype=np.int32) # FIXME: int16?
center = len_pix
result[center,:,:,0] = np.arange(m)[:,np.newaxis]
result[center,:,:,1] = np.arange(n)[np.newaxis,:]
for i in range(m):
for j in range(n):
y = i
x = j
fx = 0.5
fy = 0.5
for k in range(len_pix):
vx, vy = vectors[y,x]
print x, y, vx, vy
if vx>=0:
tx = (1-fx)/vx
else:
tx = -fx/vx
if vy>=0:
ty = (1-fy)/vy
else:
ty = -fy/vy
if tx<ty:
print "x step"
if vx>0:
x+=1
fy+=vy*tx
fx=0.
else:
x-=1
fy+=vy*tx
fx=1.
else:
print "y step"
if vy>0:
y+=1
fx+=vx*ty
fy=0.
else:
y-=1
fx+=vx*ty
fy=1.
if x<0: x=0
if y<0: y=0
if x>=n: x=n-1
if y>=m: y=m-1
result[center+k+1,i,j,:] = y, x
return result
|
gpl-2.0
| -3,010,048,274,979,160,600
| 25.95
| 73
| 0.30303
| false
| 3.963235
| false
| false
| false
|
SupayrPoney/RopeSkippingTimer
|
main.py
|
1
|
7577
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import PyQt5
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import time
import math
import winsound
EXERCISETIME = 60
class MainWidget(QWidget):
freqChangeSignal = pyqtSignal(object)
togglerSignal = pyqtSignal()
"""docstring for MainWidget"""
def __init__(self, parent=None):
super(MainWidget, self).__init__(parent)
self.mainLayout = QVBoxLayout()
self.buttonLayout = QHBoxLayout()
self.barsLayout = QHBoxLayout()
self.inputLayout = QHBoxLayout()
## self.frequencyLayout = QHBoxLayout()
self.input1Layout = QVBoxLayout()
self.input2Layout = QVBoxLayout()
self.input3Layout = QVBoxLayout()
self.startButton = QPushButton("Start")
self.startButton.clicked.connect(self.start)
self.buttonLayout.addWidget(self.startButton)
self.stopButton = QPushButton("Stop")
self.stopButton.clicked.connect(self.stop)
self.buttonLayout.addWidget(self.stopButton)
self.resetButton = QPushButton("Reset")
self.resetButton.clicked.connect(self.reset)
self.buttonLayout.addWidget(self.resetButton)
self.minFreqInput = QLineEdit()
self.minFreqInput.setValidator(QIntValidator(1, 65535, self))
self.minFreqText = QLabel("Minimum Frequency")
self.input1Layout.addWidget(self.minFreqText)
self.input1Layout.addWidget(self.minFreqInput)
self.maxFreqInput = QLineEdit()
self.maxFreqInput.setValidator(QIntValidator(1, 65535, self))
self.maxFreqText = QLabel("Maximum Frequency")
self.input2Layout.addWidget(self.maxFreqText)
self.input2Layout.addWidget(self.maxFreqInput)
self.intervalInput = QLineEdit()
self.intervalInput.setValidator(QIntValidator(1, 65535, self))
self.intervalText = QLabel("Interval")
self.input3Layout.addWidget(self.intervalText)
self.input3Layout.addWidget(self.intervalInput)
self.inputLayout.addLayout(self.input1Layout)
self.inputLayout.addLayout(self.input2Layout)
self.inputLayout.addLayout(self.input3Layout)
## self.frequency = QLabel("0")
## f = QFont("Arial", 40)
## self.frequency.setFont( f)
## self.frequencyLayout.addWidget(self.frequency)
## self.frequencyLayout.setAlignment(Qt.AlignHCenter)
self.progressBar = QProgressBar()
self.progressBar.setTextVisible(False)
self.barsLayout.addWidget(self.progressBar)
self.mainLayout.addLayout(self.inputLayout)
self.mainLayout.addLayout(self.buttonLayout)
## self.mainLayout.addLayout(self.frequencyLayout)
self.mainLayout.addLayout(self.barsLayout)
self.setLayout(self.mainLayout)
self.setWindowTitle('Rope skipping Metronoom')
def start(self):
interval = self.intervalInput.text()
minFreq = self.minFreqInput.text()
maxFreq = self.maxFreqInput.text()
if interval == "":
interval = "5"
if minFreq != "" and maxFreq!="":
self.interval = int(interval)
self.maxFreq = int(maxFreq)
self.minFreq = int(minFreq)
self.timerThread = TimerThread(self.interval, self.minFreq, self.maxFreq)
self.timerThread.tick.connect(self.update)
self.timerThread.stopSignal.connect(self.stop)
self.timerThread.start()
self.beeperThread = Beeper(self.minFreq, self.freqChangeSignal, self.togglerSignal, self.interval)
self.beeperThread.start()
else:
QMessageBox.warning(self, "Input missing", "No frequency.", QMessageBox.Ok)
def update(self, currentFreq, updateFreq, percentage):
## if updateFreq:
## self.frequency.setText(str(round(currentFreq)))
self.progressBar.setValue(100*percentage)
self.freqChangeSignal.emit(currentFreq)
def stop(self):
self.timerThread.stop()
self.togglerSignal.emit()
def reset(self):
self.stop()
## self.frequency.setText("0")
self.progressBar.setValue(0)
class TimerThread(QThread):
tick = pyqtSignal(object, object, object)
stopSignal = pyqtSignal()
def __init__(self, interval, minFreq, maxFreq):
QThread.__init__(self)
self._isStopped = False
self.interval = interval
self.minFreq = minFreq
self.maxFreq = maxFreq
self.deltaFreq = 2 * (self.interval * (self.maxFreq - self.minFreq))/ EXERCISETIME
def run(self):
startTime = time.time()
currentTime = time.time()
currentFreq = self.minFreq
counter = 0
while counter <= EXERCISETIME/2:
counter += 1
if not self._isStopped:
currentFreq += self.deltaFreq/self.interval
updateFreq = counter%self.interval == 0
self.tick.emit(min(currentFreq,self.maxFreq), updateFreq, 2*counter/EXERCISETIME)
time.sleep(1)
while counter <= EXERCISETIME:
counter += 1
if not self._isStopped:
currentFreq -= self.deltaFreq/self.interval
updateFreq = counter%self.interval == 0
self.tick.emit(min(currentFreq,self.maxFreq), updateFreq, 2 - 2*counter/EXERCISETIME)
time.sleep(1)
self.stopSignal.emit()
def stop(self):
self._isStopped = True
class Beeper(QThread):
"""docstring for Beeper"""
def __init__(self, freq, freqChangesignal, togglerSignal, interval):
super(Beeper, self).__init__()
self.freq = freq
self.signal = freqChangesignal
self.signal.connect(self.setFreq)
self.timerToggle = togglerSignal
self.timerToggle.connect(self.toggle)
self.stop = False
self.timeToSleep = 1/(self.freq/60)
self.timeToSleepInc = self.timeToSleep/100
self.freqChange = False
self.interval = interval
def setFreq(self, newFreq):
self.freq = newFreq
self.newTimeToSleep = 1/(self.freq/60)
def run(self):
while True:
if not self.stop:
acc = self.timeToSleep
timeSlept = 0
self.playSound()
while timeSlept < acc:
minimum = min(self.interval,self.timeToSleep)
time.sleep(minimum)
timeSlept += minimum
acc = min(self.timeToSleep-minimum,self.newTimeToSleep)
self.timeToSleep = self.newTimeToSleep
# acc = self.timeToSleep
# print(self.freq)
# print(self.timeToSleep)
# print()
# for i in range(10):
# if self.freqChange:
# self.freqChange = False
# break
# else:
# time.sleep(self.timeToSleep/10)
def playSound(self):
winsound.PlaySound('Ticking-clock-sound.wav', winsound.SND_FILENAME)
def toggle(self):
self.stop = True
def get_elapsed(start):
return time.time() - start
def main():
app = QApplication(sys.argv)
w = MainWidget()
w.resize(450, 150)
w.move(300, 300)
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
mit
| -8,393,405,739,307,940,000
| 30.702929
| 110
| 0.605649
| false
| 3.93406
| false
| false
| false
|
note286/lan-encryption-file-transfer
|
recvUI.py
|
1
|
4152
|
from tkinter.filedialog import *
import lan_ip
import tkinter as tk
from tkinter import ttk
from fileTransfer import *
from receiver import *
import _thread
from custom_function import *
from tkinter import messagebox
class RecvPage(tk.Frame):
def __init__(self, parent, root):
super().__init__(parent)
def pickfile():
self.key_file.set(askopenfilename(initialdir = 'C:/Users/mcc'))
def set_save_path():
self.file_path.set(askdirectory(initialdir = 'C:/Users/mcc'))
def prepare_recv():
"""
检测参数,处理发送过程
"""
keys = get_key(self.key_input.get())
ip = self.recevIP.get()
port = self.port.get()
file_path = self.file_path.get()
file_name = self.file_name.get()
# 如果保存路径为空,不加斜杠
file = file_name if file_path=="" else file_path +'\\'+ file_name
encrypFlag = self.encrypFlag.get()
file = file.replace('/','\\')
# 检测ip和端口
if prepare(ip,port):
# 如果ip和端口合法,进行socket通信,根据返回值判断结果
if receiver(ip,int(port),file,encrypFlag,keys):
messagebox.showinfo(message='接收成功')
else :
messagebox.showinfo(message='接收失败')
else:
return
def input_key():
if self.encrypFlag.get() == '1':
key_entry['state'] = 'valid'
else:
key_entry['state'] ="readonly"
root.title("文件传输 接收端")
self.port = StringVar()
self.port.set(9995)
self.recevIP = StringVar()
self.encrypFlag = StringVar()
self.encrypFlag.set(0)
self.file_path = StringVar()
self.key_input = StringVar()
self.file_name = StringVar()
self.recevIP.set(lan_ip.get_lan_ip())
self.recvFlag = StringVar()
mainframe = self
#输入框
# #文本标签
ttk.Label(mainframe, text="接收方IP").grid( row=1,column=1, sticky=W)
# #框架名,样式,显示文本
recevIP_entry = recevIP_entry = ttk.Entry(self, width=10, textvariable=self.recevIP)
recevIP_entry.grid( row=1, column=2,sticky=(W, E))
ttk.Label(mainframe, text="端口").grid( row=1,column=3, sticky=W)
port_entry = ttk.Entry(mainframe, width=8, textvariable=self.port)
port_entry.grid( row=1, column=4,sticky=(W, E))
#按钮
ttk.Button(mainframe, text="选择保存路径", command=set_save_path).grid( row=2,column=1, sticky=W)
filename_entry = ttk.Entry(mainframe, width=25, textvariable=self.file_path)
filename_entry.grid(row=2, column=2,columnspan=3, sticky=(W, E))
ttk.Label(mainframe, text="保存为 文件名").grid(row=3,column=1, sticky=W)
filename_entry = ttk.Entry(mainframe, textvariable=self.file_name)
filename_entry.grid(row=3, column=2,columnspan=3, sticky=(W, E))
#单选按钮
R1 = ttk.Radiobutton(mainframe, text="不加密", variable=self.encrypFlag, value=0,command=input_key)
R2 = ttk.Radiobutton(mainframe, text="加密", variable=self.encrypFlag, value=1,command=input_key)
R1.grid(row = 4,column=1,sticky=(W, E))
R2.grid(row = 4,column=2,sticky=(W, E))
ttk.Label(mainframe, text="输入密钥").grid( row=5,column=1, sticky=W)
key_entry = ttk.Entry(mainframe, width=15, textvariable=self.key_input,state='readonly')
key_entry.grid( row=5, column=2,columnspan=3,sticky=(W, E))
ttk.Button(mainframe, text="开始接收",command=prepare_recv,).grid( row=6,column=2,columnspan=2, sticky=W)
button1 = ttk.Button(self, text="转到发送", command=lambda: root.show_frame_send()).grid( row=6,column=1, sticky=W)
for child in mainframe.winfo_children(): child.grid_configure(padx=5, pady=5)
|
mit
| -3,757,951,539,771,199,500
| 39.553191
| 119
| 0.574027
| false
| 2.959818
| false
| false
| false
|
NRGI/resource-contracts-etl
|
ocr-tests/ABBYY/AbbyyOnlineSdk.py
|
1
|
3234
|
#!/usr/bin/python
# Usage: process.py <input file> <output file> [-language <Language>] [-pdf|-txt|-rtf|-docx|-xml]
import argparse
import base64
import getopt
import MultipartPostHandler
import os
import re
import sys
import time
import urllib2
import urllib
import xml.dom.minidom
class ProcessingSettings:
Language = "English"
OutputFormat = "docx"
class Task:
Status = "Unknown"
Id = None
DownloadUrl = None
def IsActive( self ):
if self.Status == "InProgress" or self.Status == "Queued":
return True
else:
return False
class AbbyyOnlineSdk:
ServerUrl = "http://cloud.ocrsdk.com/"
# To create an application and obtain a password,
# register at http://cloud.ocrsdk.com/Account/Register
# More info on getting your application id and password at
# http://ocrsdk.com/documentation/faq/#faq3
ApplicationId = "rcdotorg"
Password = os.environ.get('ABBYY_PASS')
Proxy = None
enableDebugging = 0
def ProcessImage( self, filePath, settings ):
urlParams = urllib.urlencode({
"language" : settings.Language,
"exportFormat" : settings.OutputFormat
})
requestUrl = self.ServerUrl + "processImage?" + urlParams
bodyParams = { "file" : open( filePath, "rb" ) }
request = urllib2.Request( requestUrl, None, self.buildAuthInfo() )
response = self.getOpener().open(request, bodyParams).read()
if response.find( '<Error>' ) != -1 :
return None
# Any response other than HTTP 200 means error - in this case exception will be thrown
# parse response xml and extract task ID
task = self.DecodeResponse( response )
return task
def GetTaskStatus( self, task ):
urlParams = urllib.urlencode( { "taskId" : task.Id } )
statusUrl = self.ServerUrl + "getTaskStatus?" + urlParams
request = urllib2.Request( statusUrl, None, self.buildAuthInfo() )
response = self.getOpener().open( request ).read()
task = self.DecodeResponse( response )
return task
def DownloadResult( self, task, outputPath ):
getResultParams = urllib.urlencode( { "taskId" : task.Id } )
getResultUrl = self.ServerUrl + "getResult?" + getResultParams
request = urllib2.Request( getResultUrl, None, self.buildAuthInfo() )
fileResponse = self.getOpener().open( request ).read()
resultFile = open( outputPath, "wb" )
resultFile.write( fileResponse )
def DecodeResponse( self, xmlResponse ):
""" Decode xml response of the server. Return Task object """
dom = xml.dom.minidom.parseString( xmlResponse )
taskNode = dom.getElementsByTagName( "task" )[0]
task = Task()
task.Id = taskNode.getAttribute( "id" )
task.Status = taskNode.getAttribute( "status" )
if task.Status == "Completed":
task.DownloadUrl = taskNode.getAttribute( "resultUrl" )
return task
def buildAuthInfo( self ):
return { "Authorization" : "Basic %s" % base64.encodestring( "%s:%s" % (self.ApplicationId, self.Password) ) }
def getOpener( self ):
if self.Proxy == None:
self.opener = urllib2.build_opener( MultipartPostHandler.MultipartPostHandler,
urllib2.HTTPHandler(debuglevel=self.enableDebugging))
else:
self.opener = urllib2.build_opener(
self.Proxy,
MultipartPostHandler.MultipartPostHandler,
urllib2.HTTPHandler(debuglevel=self.enableDebugging))
return self.opener
|
gpl-2.0
| -7,730,318,793,496,702,000
| 30.398058
| 112
| 0.717378
| false
| 3.313525
| false
| false
| false
|
google/tink
|
python/tink/hybrid/_hybrid_wrapper.py
|
1
|
4204
|
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HybridDecrypt wrapper."""
from __future__ import absolute_import
from __future__ import division
# Placeholder for import for type annotations
from __future__ import print_function
from typing import Type
from absl import logging
from tink import core
from tink.hybrid import _hybrid_decrypt
from tink.hybrid import _hybrid_encrypt
class _WrappedHybridDecrypt(_hybrid_decrypt.HybridDecrypt):
"""Implements HybridDecrypt for a set of HybridDecrypt primitives."""
def __init__(self, pset: core.PrimitiveSet):
self._primitive_set = pset
def decrypt(self, ciphertext: bytes, context_info: bytes) -> bytes:
if len(ciphertext) > core.crypto_format.NON_RAW_PREFIX_SIZE:
prefix = ciphertext[:core.crypto_format.NON_RAW_PREFIX_SIZE]
ciphertext_no_prefix = ciphertext[core.crypto_format.NON_RAW_PREFIX_SIZE:]
for entry in self._primitive_set.primitive_from_identifier(prefix):
try:
return entry.primitive.decrypt(ciphertext_no_prefix,
context_info)
except core.TinkError as e:
logging.info(
'ciphertext prefix matches a key, but cannot decrypt: %s', e)
# Let's try all RAW keys.
for entry in self._primitive_set.raw_primitives():
try:
return entry.primitive.decrypt(ciphertext, context_info)
except core.TinkError as e:
pass
# nothing works.
raise core.TinkError('Decryption failed.')
class HybridDecryptWrapper(core.PrimitiveWrapper[_hybrid_decrypt.HybridDecrypt,
_hybrid_decrypt.HybridDecrypt]
):
"""HybridDecryptWrapper is the PrimitiveWrapper for HybridDecrypt.
The returned primitive works with a keyset (rather than a single key). To
decrypt, the primitive uses the prefix of the ciphertext to efficiently select
the right key in the set. If the keys associated with the prefix do not work,
the primitive tries all keys with OutputPrefixType RAW.
"""
def wrap(self,
pset: core.PrimitiveSet) -> _hybrid_decrypt.HybridDecrypt:
return _WrappedHybridDecrypt(pset)
def primitive_class(self) -> Type[_hybrid_decrypt.HybridDecrypt]:
return _hybrid_decrypt.HybridDecrypt
def input_primitive_class(self) -> Type[_hybrid_decrypt.HybridDecrypt]:
return _hybrid_decrypt.HybridDecrypt
class _WrappedHybridEncrypt(_hybrid_encrypt.HybridEncrypt):
"""Implements HybridEncrypt for a set of HybridEncrypt primitives."""
def __init__(self, pset: core.PrimitiveSet):
self._primitive_set = pset
def encrypt(self, plaintext: bytes, context_info: bytes) -> bytes:
primary = self._primitive_set.primary()
return primary.identifier + primary.primitive.encrypt(
plaintext, context_info)
class HybridEncryptWrapper(core.PrimitiveWrapper[_hybrid_encrypt.HybridEncrypt,
_hybrid_encrypt.HybridEncrypt]
):
"""HybridEncryptWrapper is the PrimitiveWrapper for HybridEncrypt.
The returned primitive works with a keyset (rather than a single key). To
encrypt a plaintext, it uses the primary key in the keyset, and prepends to
the ciphertext a certain prefix associated with the primary key.
"""
def wrap(self,
pset: core.PrimitiveSet) -> _hybrid_encrypt.HybridEncrypt:
return _WrappedHybridEncrypt(pset)
def primitive_class(self) -> Type[_hybrid_encrypt.HybridEncrypt]:
return _hybrid_encrypt.HybridEncrypt
def input_primitive_class(self) -> Type[_hybrid_encrypt.HybridEncrypt]:
return _hybrid_encrypt.HybridEncrypt
|
apache-2.0
| -5,839,068,032,982,058,000
| 37.568807
| 80
| 0.705519
| false
| 3.992403
| false
| false
| false
|
fintanr/hcup-stats
|
parse-hcup-results.py
|
1
|
19302
|
#!/usr/bin/python
#
# Extract data from the Wikipedia Heineken Cup Pool Stages
# and create a tidy data set for use in R
#
# A lot of this code handles corner cases and deals with variants
# in the wikipedia pages, and it hasn't been refactored
#
import re
import sys
import unicodedata
from urllib2 import urlopen
from bs4 import BeautifulSoup
infile = "input-pages.txt"
urlbase = "http://en.wikipedia.org/w/index.php?title"
urls = {}
ourData = []
headers = "season,poolId,matchDate,matchTime,homeTeam,awayTeam,matchReferee"
headers = "%s,matchAttendance,matchScore,homeTries,homePenaltyTries" % headers
headers = "%s,homeTriesTimes,homeConversions,awayTries,awayPenaltyTries" % headers
headers = "%s,awayTriesTimes,awayConversons,homePenalties,homePeanaltiesTimes,homeDrops" % headers
headers = "%s,homeDropTimes,awayPenalties,awayPenaltiesTimes" % headers
headers = "%s,awayDrops,awayDropsTimes" % headers
ourData.append(headers)
def getTries(inString):
myString = unicodedata.normalize('NFKD', inString).encode('ascii', 'ignore')
tries = re.split("'", myString)
penaltyTryCount = 0
theseTries = []
for thistry in tries:
thisTime = re.match("(.*)\s(\d+)", thistry)
if ( thisTime ):
theseTries.append(thisTime.group(2))
penaltyMatch = re.match(".*penalty try.*", thistry, re.IGNORECASE)
if ( penaltyMatch ):
penaltyTryCount += 1
return (penaltyTryCount, theseTries)
def getConversions(inString):
myString = unicodedata.normalize('NFKD', inString).encode('ascii', 'ignore')
# this is a little risky, but seems to get every case
# there are a number of commented out values, but our regex elimiates
# these
cons = re.split("\)", myString)
totalConversions = 0
for con in cons:
thisConCount = re.match(".*\[\[.*\|.*\]\]\s\((\d+)\/\d+", con)
if ( thisConCount ):
totalConversions += int(thisConCount.group(1))
return totalConversions
def getPenOrDrop(inString):
myString = unicodedata.normalize('NFKD', inString).encode('ascii', 'ignore')
pens = re.split("'", myString)
thesePenalties = []
for pen in pens:
penMatch = re.match(".*\s(\d+)(,|)", pen)
if ( penMatch ):
thesePenalties.append(penMatch.group(1))
return thesePenalties
def getMatchScore(inString):
myString = unicodedata.normalize('NFKD', inString).encode('ascii', 'ignore')
matchScore = re.sub("–", "-", myString)
return myString
def getReferee(inString):
myString = unicodedata.normalize('NFKD', inString).encode('ascii', 'ignore')
ref = "NA"
# 2012/ 13 match
refMatch = re.match(".*\[\[(.*)\]\](\s+|<).*", myString)
if ( refMatch ):
subTest = re.match(".*\|(.*)", refMatch.group(1))
if ( subTest ):
ref = subTest.group(1)
else:
ref = refMatch.group(1)
else:
# 2010 / 11 format, e.g.
# John Lacey ([[Irish Rugby Football Union|Ireland]])
refMatch = re.match("(.*)\s\(\[\[.*\]\]\)", myString)
if ( refMatch ):
ref = refMatch.group(1)
return ref
def getTeamName(inString):
myString = unicodedata.normalize('NFKD', inString).encode('ascii', 'ignore')
# teamMatch has a couple of possible formats, work through them
# all until we get the correct match and then extract the team name
team = "Not Found"
teamMatch = re.match(".*\[\[(.*)\]\].*", myString)
if ( teamMatch ):
filterMatch = re.match(".*\|(.*)", teamMatch.group(1))
if ( filterMatch ):
team = filterMatch.group(1)
else:
team = teamMatch.group(1)
else:
# 2010 / 11 formatting for team names
teamMatch = re.match("\s+{{.*}}\s+(.*)", myString)
if ( teamMatch ):
team = teamMatch.group(1)
else:
teamMatch = re.match("(.*)\s{{.*}}", myString)
if ( teamMatch ):
team = teamMatch.group(1)
# tidy up the whitespace around the name
team = re.sub("^\s+","", re.sub("\s+$", "", team))
return team
def buildTidyData(season, poolId, inData):
matchDate = re.sub("\s+$", "", inData.get('date'))
matchTime = re.sub("^\s+", "", re.sub("\s+$", "", inData.get('time')))
matchScore = re.sub("–", "-", inData.get('score'))
#matchScore = unicodedata.normalize('NFKD', matchScore).encode('utf8', 'ignore')
#matchScore = getMatchScore(inData.get('score'))
matchAttendance = inData.get('attendance')
matchAttendance = re.sub(",", "", matchAttendance)
homeTeam = getTeamName(inData.get('home'))
awayTeam = getTeamName(inData.get('away'))
matchReferee = getReferee(inData.get('referee'))
# default scoring data
homePenaltyTries = 0
homeTries = []
homeTriesTimes = ""
homeConversions = 0
awayPenaltyTries = 0
awayTries = []
awayTriesTimes = ""
awayConversions = 0
homePenalties = []
homePenaltiesTimes = ""
awayPenalties = []
awayPenaltiesTimes = ""
homeDrops = []
homeDropsTimes = ""
awayDrops = []
awayDropsTimes = ""
if 'try1' in inData.keys():
(homePenaltyTries, homeTries) = getTries(inData.get('try1'))
homeTriesTimes = extractTimes(homeTries)
if 'con1' in inData.keys():
homeConversions = getConversions(inData.get('con1'))
if 'try2' in inData.keys():
(awayPenaltyTries, awayTries) = getTries(inData.get('try2'))
awayTriesTimes = extractTimes(awayTries)
if 'con2' in inData.keys():
awayConversions = getConversions(inData.get('con2'))
if 'pen1' in inData.keys():
homePenalties = getPenOrDrop(inData.get('pen1'))
homePenaltiesTimes = extractTimes(homePenalties)
if 'pen2' in inData.keys():
awayPenalties = getPenOrDrop(inData.get('pen2'))
awayPenaltiesTimes = extractTimes(awayPenalties)
if 'drop1' in inData.keys():
homeDrops = getPenOrDrop(inData.get('drop1'))
homeDropsTimes = extractTimes(homeDrops)
if 'drop2' in inData.keys():
awayDrops = getPenOrDrop(inData.get('drop2'))
awayDropsTimes = extractTimes(awayDrops)
part1 = "%s,%s,%s,%s" % (season.decode('utf-8'), poolId, matchDate, matchTime )
part2 = "%s,%s,%s,%s,%s" % ( homeTeam, awayTeam, matchReferee, matchAttendance, matchScore)
part3 = "%s,%s,%s,%s" % ( len(homeTries), homePenaltyTries, homeTriesTimes, homeConversions)
part4 = "%s,%s,%s,%s" % ( len(awayTries), awayPenaltyTries, awayTriesTimes, awayConversions)
part5 = "%s,%s,%s,%s" % ( len(homePenalties), homePenaltiesTimes, len(homeDrops), homeDropsTimes)
part6 = "%s,%s,%s,%s" % ( len(awayPenalties), awayPenaltiesTimes, len(awayDrops), awayDropsTimes)
outString = "%s,%s,%s,%s,%s,%s" % ( part1, part2, part3, part4, part5, part6)
ourData.append(outString)
def loadUrls(inFile):
for s in (line.strip() for line in open(inFile)):
thisUrl = s.split('/', 4)[4]
season = thisUrl.split('_',4)[0]
# okay this is a horrible hack, if we use urllib2 unquote
# we end up with a "long" str so just sub out the values
# instead
season = re.sub("%E2%80%93","-", season)
fullUrl = "%s=%s&action=edit" % (urlbase, thisUrl)
urls[season] = fullUrl
return urls
def extractTimes(timeList):
pipedTimes = ""
for j in timeList:
pipedTimes = "%s|%s" % ( pipedTimes, j)
pipedTimes = re.sub("^\|","", pipedTimes)
return(pipedTimes)
def extractSeasonData(urls):
for season, url in urls.iteritems():
print "Extracting Data for Season: %s:" % season
u = urlopen(url)
r = u.read()
soup = BeautifulSoup(r)
textarea = soup.find_all('textarea')[0].contents
splitarea = re.split("\n",textarea[0])
text = iter(splitarea)
# this is rather horrible, but depending on the season we need to parse
# the data in different ways... so...
if ( season in ['2006-07', '2007-08', '2008-09', '2009-10']):
parseZeroNineSeasonData(season, text)
else:
parseSeasonData(season, text)
# we need to add functions for 2006-07 and 2007-08
# 2005 - 06 is missing too much data to be useful
def parseZeroNineSeasonData(season, text):
gameCounter = 0
for line in text:
pool = re.match("==(=|)Pool\s+(\d+)(|=)==", line)
if ( pool ):
while ( gameCounter < 12 ):
poolId = pool.group(2)
line = next(text)
foundMatch = re.match("\{\{rugbybox(|\s+\|)", line)
localData = {}
while ( foundMatch ):
line = next(text)
# and another horrible hack, if a line starts with a <!-
# skip it and go to the next
if ( re.match("<!-", line ) ):
line = next(text)
# in the 09 - 10 season lines end with referee = <blah> }}
foundEnd = re.match("(\||)referee(\s+|)=(.*)(}}|)", line)
if ( foundEnd ):
foundMatch = None
refBasic = foundEnd.group(3)
localData['referee'] = refBasic
buildTidyData(season, poolId, localData)
gameCounter += 1
else:
# we have some blank referee values, we need to deal
# with these
# add these values into our structure
# we take the re.split as a list and do some processing
# here for corner casee
myList = re.split("=", line)
if ( len(myList) > 2 ):
# we have gotten one of these odd elments with
# extra strings after the date
myTmp = re.split("<", myList[1])
thisKey = myList[0]
thisVal = myTmp[0]
else:
thisKey = myList[0]
if ( len(myList) < 2 ):
thisVal = "NA"
else:
thisVal = myList[1]
thisValCheck = re.match("(.*)\s+\|", thisVal)
if ( thisValCheck ):
thisVal = thisValCheck.group(1)
# homescore and awayscore are all one bundle in some of the
# earlier pages, so we need to split them out
thisKey = re.match("(\||)(\s+|)(.*)(\s+|)(\||)", thisKey)
thisKey = re.sub("\s+", "", thisKey.group(3))
if ( ( thisKey == 'homescore' ) or ( thisKey == 'awayscore' ) ):
( keySuffix, tries, conversions, penalties,
dropgoals ) = parseZeroNineScores(thisKey, thisVal)
tryName = "try%s" % keySuffix
conName = "con%s" % keySuffix
penName = "pen%s" % keySuffix
dropName = "drop%s" % keySuffix
if ( tries is not None ):
localData[tryName] = tries
if ( conversions is not None ):
localData[conName] = conversions
if ( penalties is not None ):
localData[penName] = penalties
if ( dropgoals is not None ):
localData[dropName] = dropgoals
else:
if ( thisKey == "date" ):
thisDateCheck = re.match("(.*)<br />(.*)", thisVal)
if ( thisDateCheck ):
thisVal = thisDateCheck.group(1)
localData['time'] = thisDateCheck.group(2)
if ( thisKey == "score"):
thisVal = unicodedata.normalize('NFKD', thisVal).encode('utf8')
thisVal = re.sub("–", "-", thisVal)
thisScoreSplit = re.match("(\s+|)(\d+)(\s+|)(-|\\xe2\\x80\\x93)(\s+|)(\d+)(\s+|)",
thisVal)
thisVal = "%s-%s" % (thisScoreSplit.group(2), thisScoreSplit.group(6))
localData[thisKey] = thisVal
gameCounter = 0
pool = None
def parseZeroNineScores(key, val):
# okay so these strings are bit all over the place, we need to
# firstly see if we tries in the string, if we do, lets try to
if ( key == 'homescore' ):
keySuffix = "1"
else:
keySuffix = "2"
tryName = "try%s" % keySuffix
conName = "con%s" % keySuffix
penName = "pen%s" % keySuffix
dropName = "drop%s" % keySuffix
triesString = None
penaltiesString = None
conversionsString = None
dropGoalsString = None
# this is absolutely horrible, but it allows us to carve up
# the away and home scores details
# clear out the trailing | for the 07-08 season
val = re.sub("\|$", "", val)
tries = re.match("(\s+|)'''(Tries|Try):'''(.*)", val)
if ( tries ):
# next see if we there were any conversions, if so extract those
# of course there is another exception here, so lets try a few
# combinations
conversions = re.match("(.*)'''Con:'''(.*)", tries.group(3))
if ( conversions ):
# split out penalties, and then drop goals
triesString = conversions.group(1)
penalties = re.match("(.*)'''Pen:'''(.*)", conversions.group(2))
if ( penalties ):
# final check for drop goals
conversionsString = penalties.group(1)
dropgoals = re.match("(.*)'''Drop:'''(.*)", penalties.group(2))
if ( dropgoals ):
penaltiesString = dropgoals.group(1)
dropGoalString = dropgoals.group(2)
else:
penaltiesString = penalties.group(2)
else:
penalties = re.match("(.*)'''Pen:'''(.*)", tries.group(3))
if ( penalties ):
triesString = penalties.group(1)
dropgoals = re.match("(.*)'''Drop:'''(.*)", penalties.group(2))
if ( dropgoals ):
penaltiesString = dropgoals.group(1)
dropGoalsString = dropgoals.group(2)
else:
penaltiesString = penalties.group(2)
else:
triesString = tries.group(2)
else:
# check for penalties, drop goals and so forth
penalties = re.match("(\s+|)'''Pen:'''(.*)", val)
if ( penalties ):
# check for drop goals
dropgoals = re.match("(.*)'''Drop:'''(.*)", penalties.group(2))
if ( dropgoals ):
penaltiesString = dropgoals.group(1)
dropGoalsString = dropgoals.group(2)
else:
penaltiesString = penalties.group(1)
else:
# check for drop goals (and then penalties, just in case
dropgoals = re.match("(\s+|)'''Drop:'''(.*)", val)
if ( dropgoals ):
penalties = re.match("(.*)'''Pen:'''(.*)", val)
if ( penalties ):
dropGoalsString = penalties.group(1)
penaltiesString = penalties.group(2)
else:
dropGoalsString = dropgoals.group(1)
return(keySuffix, triesString, conversionsString, penaltiesString,
dropGoalsString)
def parseSeasonData(season, text):
gameCounter = 0
for line in text:
pool = re.match("===Pool\s+(\d+)===", line)
if ( pool ):
# okay we have a pool, so we a pool, we have 12 games too
# extract data about
while ( gameCounter < 12 ):
poolId = pool.group(1)
line = next(text)
foundMatch = re.match("\{\{rugbybox", line)
localData = {}
while ( foundMatch ):
line = next(text)
# okay we now need to extract out each line, until we hit a }}
foundEnd = re.match("\}\}", line)
if ( foundEnd ):
foundMatch = None
buildTidyData(season, poolId, localData)
gameCounter += 1
else:
# add these values into our structure
# we take the re.split as a list and do some processing
# here for corner casee
myList = re.split("=", line)
if ( len(myList) > 2 ):
# we have gotten one of these odd elments with
# extra strings after the date
myTmp = re.split("<", myList[1])
thisKey = myList[0]
thisVal = myTmp[0]
else:
thisKey = myList[0]
thisVal = myList[1]
thisKey = re.match("^(\||\s+\|)(.*)\s+", thisKey)
thisKey = re.sub("\s+", "", thisKey.group(2))
# some years don't have a time aspect, its included
# in the date... .
if ( thisKey == "date" ):
thisDateCheck = re.match("(.*)<br />(.*)", thisVal)
if ( thisDateCheck ):
thisVal = thisDateCheck.group(1)
localData['time'] = thisDateCheck.group(2)
# scores are in a few different formats, and they get
# really messed up in unicode and are unusable in R
# we do some procesing here to avoid this
#
# to be clear this is a horrible hack...
#
if ( thisKey == "score"):
thisVal = unicodedata.normalize('NFKD', thisVal).encode('utf8')
thisVal = re.sub("–", "-", thisVal)
thisScoreSplit = re.match("(\s+|)(\d+)(\s+|)(-|\\xe2\\x80\\x93)(\s+|)(\d+)(\s+|)",
thisVal)
thisVal = "%s-%s" % (thisScoreSplit.group(2), thisScoreSplit.group(6))
localData[thisKey] = thisVal
gameCounter = 0
pool = None
urls = loadUrls(infile)
extractSeasonData(urls)
f = open("tidydata.csv", "w")
for line in ourData:
print >>f, line.encode('utf8')
f.close()
|
apache-2.0
| 4,643,953,281,704,777,000
| 37.604
| 111
| 0.510258
| false
| 3.910454
| false
| false
| false
|
Emmunaf/WarmaneAutovote
|
Mlogin.py
|
1
|
6146
|
#!/usr/bin/python
# Thanks: Glusk for the GREAT help
# karapidiola for the base script
from socket import *
import hashlib
try:
import _srp as srp
except:
print("Need py_srp")
exit(1)
def generate_K(S):
"""Generate K from S with SHA1 Interleaved"""
s_bytes = srp.long_to_bytes(S)
# Hash the odd bytes of S (session key)
hash_object = hashlib.sha1(s_bytes[::2])
odd_hashed = hash_object.digest()
# Hash the even bytes of S
hash_object = hashlib.sha1(s_bytes[1::2])
even_hashed = hash_object.digest()
K = ""
# Create K as alternate string concatenation
for o, e in zip(odd_hashed, even_hashed):
K += o + e # K = odd[0],even[0],odd[1],..
return K
class Mpacket:
def hex_print(self, data):
b = ""
for i in range(0, len(data)):
b += "%02x" % ord(data[i])
return b
def alchallenge_packet(self, username):
packet = "\x00" # Opcode (Auth Logon Challenge)
packet += "\x08" # (Error) da wireshark
packet += chr(30 + len(username))
packet += "\x00\x57\x6f\x57\x00" # Game name: <WoW>
packet += "\x03\x03\x05" # Version[1,2,3]: <335>
packet += "\x34\x30" # Build: <12340>
packet += "\x36\x38\x78\x00" # Platform: <x86>
packet += "\x6e\x69\x57\x00" # O.S. : <Win>
packet += "\x53\x55\x6e\x65" # Country: <enUS>
packet += "\x3c\x00\x00\x00" # Timezone bias: <60>
packet += "\xc0\xa8\x01\x02" # IP address: <192.168.1.2> #?? Need real local one, or is it the same?
packet += chr(len(username)) # SRP I length
packet += username.upper() # SRP I value
return packet
def alproof_packet(self, M1, A):
packet = "\x01" # Opcode (Auth Logon Proof)
# For CRC don't need real value (annoying, sha1 files)
CRC = "\xa4\x1f\xd3\xe0\x1f\x72\x40\x46\xa7\xd2\xe7\x44\x9e\x1d\x36\xcf\xaf\x72\xa3\x3a"
NULL_PAD = "\x00\x00"
A = srp.long_to_bytes(long(A))
print "------------------------------------------------------------------------------"
for i in range(0, 32):
packet += A[i]
for i in range(0, 20):
packet += M1[i]
packet += CRC
packet += NULL_PAD
return packet
def decode_packet(self, data):
opcodes = [("AUTH_LOGON_CHALLENGE", "\x00"), ("AUTH_LOGON_PROOF", "\x01")]
srp_vals = []
opcode = data[0]
for p in opcodes:
if opcode == p[1]:
error = data[1]
srp_vals.append(data[3:35]) # B, skip 1 field (Length_g)
srp_vals.append(data[36:37]) # g, skip 1 field (Length_n)
srp_vals.append(data[38:38 + 32]) # n
srp_vals.append(data[38 + 32:38 + (32 * 2)]) # s [salt]
srp_vals.append(data[38 + (32 * 2):len(data) - 1]) # CRC
print p[0] + " with error :" + hex(ord(error))
print "SRP B :" + self.hex_print(srp_vals[0]) + " " + str(len(srp_vals[0]))
print "SRP g :" + self.hex_print(srp_vals[1]) + " " + str(len(srp_vals[1]))
print "SRP N :" + self.hex_print(srp_vals[2]) + " " + str(len(srp_vals[2]))
print "SRP s :" + self.hex_print(srp_vals[3]) + " " + str(len(srp_vals[3]))
print "CRC :" + self.hex_print(srp_vals[4]) + " " + str(len(srp_vals[4]))
print srp_vals
return srp_vals
if opcode == p[2]:
print "We got it!"
X = Mpacket()
# Server data
host = "54.213.244.47"
port = 3724
# Login data (alexlorens, lolloasd) is a testing account
user = "alexlorens".upper()
password = "lolloasd".upper()
sck = socket(AF_INET, SOCK_STREAM)
sck.connect((host, port))
n_make = ""
b_make = ""
s_make = ""
sck.send(X.alchallenge_packet(user)) # Send Auth Logon Challenge
SRP_ARRAY = X.decode_packet(sck.recv(1024)) # Read SRP value for sending Logon Proof
############################################################################
g = srp.bytes_to_long(SRP_ARRAY[1])
N = srp.bytes_to_long(SRP_ARRAY[2])
hash_class = srp._hash_map[srp.SHA1] # Using sha1 hashing for everything except K (Sha1-Interleaved)
k = 3 # SRP-6
I = user
p = password
# Generate A
a = srp.get_random(32)
A = srp.reverse(pow(srp.reverse(g), srp.reverse(a), srp.reverse(N))) # Big endian
#
## PRINT TEST1
print("Calcolo A")
print ('a:', a)
print ('g:', SRP_ARRAY[1])
print ('N:', SRP_ARRAY[2])
print ('A:', A)
##END PRINT TEST 1
v = None
M = None
K = None
H_AMK = None
s = srp.bytes_to_long(SRP_ARRAY[3])
B = srp.bytes_to_long(SRP_ARRAY[0])
#print('B: ->', B)
#print('B: [bytes_to_long] ->',srp.bytes_to_long(SRP_ARRAY[0]))
#print('B: [reversed, used for calc] ->',srp.reverse(B))
if (B % N) == 0:
print "Error"
u = srp.H(hash_class, A, B)
x = srp.gen_x(hash_class, s, I, p) #
v = srp.reverse(pow(srp.reverse(g), srp.reverse(x), srp.reverse(N))) # Big endian
S = srp.reverse(pow((srp.reverse(B) - srp.reverse(k) * srp.reverse(v)),
srp.reverse(a) + srp.reverse(u) * srp.reverse(x), srp.reverse(N))) # Big endian
## PRINT TEST3
print "--------------####-----------------------"
print("Valori utili")
print ('N:', SRP_ARRAY[2])
print ('g:', SRP_ARRAY[1])
print ('I:', I)
print ('p:', p)
print ('s:', SRP_ARRAY[3])
print ('B:', SRP_ARRAY[0])
print ('[a]:', srp.long_to_bytes(a))
print "---------------####----------------------"
##END PRINT TEST 3
## PRINT TEST2
print "----------------------------------------"
print("Calcolo u, x, S")
print ('u:', u)
print ('x:', x)
print ('v:', v)
print ('S:', S)
print "----------------------------------------"
##END PRINT TEST 2
K = generate_K(S)
print ('K:', K)
M = srp.calculate_M(hash_class, N, g, I, s, A, B, K)
print ('K:', M)
############################################################################
sck.send(X.alproof_packet(M, A))
sck.recv(1024) # REALM_AUTH_NO_MATCH...:(
sck.send("\x10\x00\x00\x00\x00")
print sck.recv(1024)
# x.RecvedData(sck.recv(1024))
'''Note:
Use little endian for hashing,
Big endian while doing math:
(*,+,^,ModPow,...)
'''
|
mit
| -2,810,643,457,059,818,500
| 31.347368
| 109
| 0.522291
| false
| 2.808958
| true
| false
| false
|
DaMonkey/hankypanky
|
lazylibrarian/gr.py
|
1
|
2378
|
import time, threading, urllib, urllib2, sys
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
import lazylibrarian
from lazylibrarian import logger, formatter, database
class GoodReads:
# http://www.goodreads.com/api/
def __init__(self, name=None, type=None):
self.name = name.replace('.', '')
self.type = type
self.params = {"key": lazylibrarian.GR_API}
def find_author_id(self):
URL = 'http://www.goodreads.com/api/author_url/' + urllib.quote(self.name) + '.xml?' + urllib.urlencode(self.params)
logger.info("Searching for author with name: %s" % self.name)
try:
sourcexml = ElementTree.parse(urllib2.urlopen(URL, timeout=20))
except (urllib2.URLError, IOError, EOFError), e:
logger.error("Error fetching authorid: ", e)
rootxml = sourcexml.getroot()
resultxml = rootxml.getiterator('author')
authorlist = []
if not len(rootxml):
logger.info('No authors found with name: %s' % self.name)
return authorlist
else:
for author in resultxml:
authorid = author.attrib.get("id")
logger.info('Found author: %s with GoodReads-id: %s' % (author[0].text, authorid))
time.sleep(1)
authorlist = self.get_author_info(authorid)
return authorlist
def get_author_info(self, authorid=None):
URL = 'http://www.goodreads.com/author/show/' + authorid + '.xml?' + urllib.urlencode(self.params)
sourcexml = ElementTree.parse(urllib2.urlopen(URL, timeout=20))
rootxml = sourcexml.getroot()
resultxml = rootxml.find('author')
author_dict = {}
if not len(rootxml):
logger.info('No author found with ID: ' + authorid)
else:
logger.info("Processing info for authorID: %s" % authorid)
author_dict = {
'authorid': resultxml[0].text,
'authorlink': resultxml.find('link').text,
'authorimg': resultxml.find('image_url').text,
'authorborn': resultxml.find('born_at').text,
'authordeath': resultxml.find('died_at').text,
'totalbooks': resultxml.find('works_count').text
}
return author_dict
|
gpl-3.0
| 303,320,833,390,499,000
| 36.15625
| 124
| 0.589151
| false
| 3.817014
| false
| false
| false
|
merlinpatt/py-trello
|
trello/board.py
|
1
|
7269
|
from __future__ import absolute_import
from .card import Card
from .list import List
from .member import Member
class Board(object):
"""
Class representing a Trello board. Board attributes are stored as normal
Python attributes; access to all sub-objects, however, is always
an API call (Lists, Cards).
"""
@property
def lists(self):
"""
Lazily loads and returns the lists
"""
if self._lists is None:
self._lists = self.all_lists()
return self._lists
@property
def members(self):
"""
Lazily loads and returns the members
"""
if self._members is None:
self._members = self.all_members()
return self._members
def __init__(self, client=None, board_id=None, organization=None, name=''):
"""
:trello: Reference to a Trello object
:board_id: ID for the board
Alternative Constructor
:organization: reference to the parent organization
:board_id: ID for this board
"""
if organization is None:
self.client = client
else:
self.organization = organization
self.client = organization.client
self.id = board_id
self.name = name
self._lists = None
self._members = None
@classmethod
def from_json(cls, trello_client=None, organization=None, json_obj=None):
"""
Deserialize the board json object to a Board object
:trello_client: the trello client
:json_obj: the board json object
Alternative contrustraction:
Deserialize the board json object to a board object
:organization: the organization object that the board belongs to
:json_obj: the json board object
"""
if organization is None:
board = Board(client=trello_client, board_id=json_obj['id'], name=json_obj['name'].encode('utf-8'))
else:
board = Board(organization=organization, board_id=json_obj['id'], name=json_obj['name'].encode('utf-8'))
board.description = json_obj.get('desc', '').encode('utf-8')
board.closed = json_obj['closed']
board.url = json_obj['url']
return board
def __repr__(self):
return '<Board %s>' % self.name
def search_lists(self, query):
lists = [tlist for tlist in self.lists if query.lower() in tlist.name.lower()]
return lists[0] if len(lists) == 1 else lists
def search_members(self, query):
members = [member for member in self.members if query.lower() in member.full_name.lower()]
return members[0] if len(members) == 1 else members
def fetch(self):
"""Fetch all attributes for this board"""
json_obj = self.client.fetch_json('/boards/' + self.id)
self.name = json_obj['name']
self.description = json_obj.get('desc', '')
self.closed = json_obj['closed']
self.url = json_obj['url']
def save(self):
pass
def close(self):
self.client.fetch_json(
'/boards/' + self.id + '/closed',
http_method='PUT',
post_args={'value': 'true', }, )
self.closed = True
def get_list(self, list_id):
obj = self.client.fetch_json('/lists/' + list_id)
return List.from_json(board=self, json_obj=obj)
def all_lists(self):
"""Returns all lists on this board"""
return self.get_lists('all')
def open_lists(self):
"""Returns all open lists on this board"""
return self.get_lists('open')
def closed_lists(self):
"""Returns all closed lists on this board"""
return self.get_lists('closed')
def get_lists(self, list_filter):
# error checking
json_obj = self.client.fetch_json(
'/boards/' + self.id + '/lists',
query_params={'cards': 'none', 'filter': list_filter})
return [List.from_json(board=self, json_obj=obj) for obj in json_obj]
def add_list(self, name):
"""Add a list to this board
:name: name for the list
:return: the list
"""
obj = self.client.fetch_json(
'/lists',
http_method='POST',
post_args={'name': name, 'idBoard': self.id}, )
return List.from_json(board=self, json_obj=obj)
def all_cards(self):
"""Returns all cards on this board"""
filters = {
'filter': 'all',
'fields': 'all'
}
return self.get_cards(filters)
def open_cards(self):
"""Returns all open cards on this board"""
filters = {
'filter': 'open',
'fields': 'all'
}
return self.get_cards(filters)
def closed_cards(self):
"""Returns all closed cards on this board"""
filters = {
'filter': 'closed',
'fields': 'all'
}
return self.get_cards(filters)
def get_cards(self, filters=None):
"""
:card_filter: filters on card status ('open', 'closed', 'all')
:query_params: dict containing query parameters. Eg. {'fields': 'all'}
More info on card queries:
https://trello.com/docs/api/board/index.html#get-1-boards-board-id-cards
"""
json_obj = self.client.fetch_json(
'/boards/' + self.id + '/cards',
query_params=filters
)
return list([Card.from_json(self, json) for json in json_obj])
def all_members(self):
"""Returns all members on this board"""
filters = {
'filter': 'all',
'fields': 'all'
}
return self.get_members(filters)
def normal_members(self):
"""Returns all normal members on this board"""
filters = {
'filter': 'normal',
'fields': 'all'
}
return self.get_members(filters)
def admin_members(self):
"""Returns all admin members on this board"""
filters = {
'filter': 'admins',
'fields': 'all'
}
return self.get_members(filters)
def owner_members(self):
"""Returns all owner members on this board"""
filters = {
'filter': 'owners',
'fields': 'all'
}
return self.get_members(filters)
def get_members(self, filters=None):
json_obj = self.client.fetch_json(
'/boards/' + self.id + '/members',
query_params=filters)
members = list()
for obj in json_obj:
m = Member(self.client, obj['id'])
m.status = obj['status'].encode('utf-8')
m.id = obj.get('id', '')
m.bio = obj.get('bio', '')
m.url = obj.get('url', '')
m.username = obj['username'].encode('utf-8')
m.full_name = obj['fullName'].encode('utf-8')
m.initials = obj['initials'].encode('utf-8')
members.append(m)
return members
def fetch_actions(self, action_filter):
json_obj = self.client.fetch_json(
'/boards/' + self.id + '/actions',
query_params={'filter': action_filter})
self.actions = json_obj
|
bsd-3-clause
| -675,035,804,408,850,800
| 29.670886
| 116
| 0.551245
| false
| 3.991763
| false
| false
| false
|
waterblue13/tensor2tensor
|
tensor2tensor/data_generators/ptb.py
|
1
|
5009
|
# coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for PTB data-sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import sys
import tarfile
# Dependency imports
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.utils import registry
import tensorflow as tf
EOS = text_encoder.EOS
PTB_URL = "http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz"
def _read_words(filename):
"""Reads words from a file."""
with tf.gfile.GFile(filename, "r") as f:
if sys.version_info[0] >= 3:
return f.read().replace("\n", " %s " % EOS).split()
else:
return f.read().decode("utf-8").replace("\n", " %s " % EOS).split()
def _build_vocab(filename, vocab_path, vocab_size):
"""Reads a file to build a vocabulary of `vocab_size` most common words.
The vocabulary is sorted by occurrence count and has one word per line.
Originally from:
https://github.com/tensorflow/models/blob/master/tutorials/rnn/ptb/reader.py
Args:
filename: file to read list of words from.
vocab_path: path where to save the vocabulary.
vocab_size: size of the vocablulary to generate.
"""
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
words = words[:vocab_size]
with open(vocab_path, "w") as f:
f.write("\n".join(words))
def _get_token_encoder(vocab_dir, vocab_name, filename):
"""Reads from file and returns a `TokenTextEncoder` for the vocabulary."""
vocab_path = os.path.join(vocab_dir, vocab_name)
if not tf.gfile.Exists(vocab_path):
_build_vocab(filename, vocab_path, 10000)
return text_encoder.TokenTextEncoder(vocab_path)
class PTBProblem(problem.Text2TextProblem):
"""A class for generating PTB data."""
@property
def has_inputs(self):
return False
@property
def target_space_id(self):
if self.is_character_level:
return problem.SpaceID.EN_CHR
return problem.SpaceID.EN_TOK
@property
def num_shards(self):
return 10
@property
def vocab_name(self):
return "vocab.lmptb_10k"
@property
def use_subword_tokenizer(self):
return False
@property
def targeted_vocab_size(self):
return 10000
def generator(self, data_dir, tmp_dir, train):
filename = os.path.basename(PTB_URL)
compressed_filepath = generator_utils.maybe_download(
tmp_dir, filename, PTB_URL)
ptb_files = []
ptb_char_files = []
with tarfile.open(compressed_filepath, "r:gz") as tgz:
files = []
# Selecting only relevant files.
for m in tgz.getmembers():
if "ptb" in m.name and ".txt" in m.name:
if "char" in m.name:
ptb_char_files += [m.name]
else:
ptb_files += [m.name]
files += [m]
tgz.extractall(tmp_dir, members=files)
if self.is_character_level:
files = ptb_char_files
else:
files = ptb_files
train_file, valid_file = None, None
for filename in files:
if "train" in filename:
train_file = os.path.join(tmp_dir, filename)
elif "valid" in filename:
valid_file = os.path.join(tmp_dir, filename)
assert train_file, "Training file not found"
assert valid_file, "Validation file not found"
if self.is_character_level:
encoder = text_encoder.ByteTextEncoder()
else:
encoder = _get_token_encoder(data_dir, self.vocab_file, train_file)
if train:
return self._generator(train_file, encoder)
return self._generator(valid_file, encoder)
def _generator(self, filename, encoder):
with tf.gfile.GFile(filename, "r") as f:
for line in f:
line = " ".join(line.replace("\n", " %s " % EOS).split())
tok = encoder.encode(line)
if tok:
yield {"inputs": [0], "targets": tok}
@registry.register_problem
class LanguagemodelPtb10k(PTBProblem):
"""A class for generating PTB data, 10k vocab."""
@property
def is_character_level(self):
return False
@registry.register_problem
class LanguagemodelPtbCharacters(PTBProblem):
"""A class for generating PTB data, character-level."""
@property
def is_character_level(self):
return True
|
apache-2.0
| 6,008,215,858,963,581,000
| 27.622857
| 79
| 0.678978
| false
| 3.495464
| false
| false
| false
|
trabucayre/gnuradio
|
grc/core/blocks/dummy.py
|
1
|
1190
|
# Copyright 2016 Free Software Foundation, Inc.
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
from . import Block, register_build_in
from ._build import build_params
@register_build_in
class DummyBlock(Block):
is_dummy_block = True
label = 'Missing Block'
key = '_dummy'
def __init__(self, parent, missing_block_id, parameters, **_):
self.key = missing_block_id
self.parameters_data = build_params([],False, False,self.flags, self.key)
super(DummyBlock, self).__init__(parent=parent)
param_factory = self.parent_platform.make_param
for param_id in parameters:
self.params.setdefault(param_id, param_factory(parent=self, id=param_id, dtype='string'))
def is_valid(self):
return False
@property
def enabled(self):
return False
def add_missing_port(self, port_id, direction):
port = self.parent_platform.make_port(
parent=self, direction=direction, id=port_id, name='?', dtype='',
)
if port.is_source:
self.sources.append(port)
else:
self.sinks.append(port)
return port
|
gpl-3.0
| 8,029,031,467,999,280,000
| 26.045455
| 101
| 0.626891
| false
| 3.695652
| false
| false
| false
|
sagiss/sardana
|
test/test_ctrl/WaterPapCtrl_stat1.py
|
1
|
4673
|
import PyTango
import socket
import MotorController
class IcePapController(MotorController.MotorController):
MaxDevice = 6
def __init__(self,inst,props):
print "PYTHON -> IcePapController ctor for instance",inst
MotorController.MotorController.__init__(self,inst,props)
self.nb_call = 0;
self.socket_connected = False;
self.db = PyTango.Database()
self.ct_name = "IcePapController/" + self.inst_name
#
# Get controller properties
#
prop_list = ['host','port','timeout']
prop = self.db.get_property(self.ct_name,prop_list)
if len(prop["host"]) != 0:
self.host = prop["host"][0]
else:
print "Property host not defined for controller",self.ct_name
self.host = "nada"
if len(prop["port"]) != 0:
self.port = int(prop["port"][0])
else:
print "Property port not defined for controller",self.ct_name
self.port = 0
if len(prop["timeout"]) != 0:
self.timeout = int(prop["timeout"][0])
else:
print "Property timeout not defined for controller",self.ct_name
self.timeout = 3
#
# Connect to the icepap
#
print "PYTHON -> IcePap on",self.host," and port",self.port," with timeout = ",self.timeout
# self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self.sock.settimeout(self.timeout)
# self.sock.connect(("icepap", self.port))
# self.socket_connected = True
print "PYTHON -> Connected to", self.host, " on port", self.port
#
# Check that the Icepap is OK
#
# ans = self.IceWriteRead("?ID")
def AddDevice(self,axis):
print "PYTHON -> IcePapController/",self.inst_name,": In AddDevice method for axis",axis
# raise RuntimeError,"Hola la la"
def DeleteDevice(self,axis):
print "PYTHON -> IcePapController/",self.inst_name,": In DeleteDevice method for axis",axis
def StateOne(self,axis):
print "PYTHON -> IcePapController/",self.inst_name,": In StateOne method for axis",axis
tup = (PyTango.DevState.FAULT,0,"Hola tio")
return tup
def PreReadAll(self):
print "PYTHON -> IcePapController/",self.inst_name,": In PreReadAll method"
def PreReadOne(self,axis):
print "PYTHON -> IcePapController/",self.inst_name,": In PreReadOne method for axis",axis
def ReadAll(self):
print "PYTHON -> IcePapController/",self.inst_name,": In ReadAll method"
def ReadOne(self,axis):
print "PYTHON -> IcePapController/",self.inst_name,": In ReadOne method for axis",axis
return 123
def PreStartAll(self):
print "PYTHON -> IcePapController/",self.inst_name,": In PreStartAll method"
def PreStartOne(self,axis,pos):
print "PYTHON -> IcePapController/",self.inst_name,": In PreStartOne method for axis",axis," with pos",pos
return True
def StartOne(self,axis,pos):
print "PYTHON -> IcePapController/",self.inst_name,": In StartOne method for axis",axis," with pos",pos
def StartAll(self):
print "PYTHON -> IcePapController/",self.inst_name,": In StartAll method"
def SetPar(self,axis,name,value):
print "PYTHON -> IcePapController/",self.inst_name,": In SetPar method for axis",axis," name=",name," value=",value
def GetPar(self,axis,name):
print "PYTHON -> IcePapController/",self.inst_name,": In GetPar method for axis",axis," name=",name
return 12.34
def IceWrite(self,data):
data = data + "\n"
byteSent = self.sock.send(data)
print "PYTHON -> Sent", byteSent, "bytes to icepap"
def IceWriteRead(self,data):
self.IceWrite(data)
byteReceived = self.sock.recv(1024)
print "PYTHON -> Icepap answered:",byteReceived
return byteReceived
def IceResetFifo(self):
self.IceWrite("fiforst")
def IceCheckError(self,ice_answer):
if (ice_answer.find("ERROR") != -1):
new_ans = self.IceWriteRead("?ERR 1")
print "Error from IcePap =",new_ans
def __del__(self):
print "PYTHON -> IcePapController/",self.inst_name,": Aarrrrrg, I am dying"
#
# Reset IcePap FIFO
#
if (self.socket_connected == True):
print "PYTHON -> Closing connection"
self.IceResetFifo()
self.sock.close()
if __name__ == "__main__":
obj = IcePapController('test')
# obj.AddDevice(2)
# obj.DeleteDevice(2)
|
lgpl-3.0
| -6,052,067,277,264,365,000
| 31.908451
| 123
| 0.598973
| false
| 3.60571
| false
| false
| false
|
jmread/cerebro
|
cerebro/RTF.py
|
1
|
3665
|
from numpy import *
from functions import sigmoid
set_printoptions(precision=4)
class RTF():
'''
Recurrent Basis/Transformation Function
---------------------------------------
Turn x into \phi in a recurrent manner.
'''
W_hh = None
W_ih = None
z = None
def __init__(self, N_i, N_h, f=sigmoid, density=0.1):
'''
'''
self.f = f # non-linearity
self.N_i = N_i # inputs
# Generate nodes
self.z = zeros(N_h) # nodes
self.z[0] = 1. # output bias node
# Generate random weights
self.W_ih = random.randn(N_i,N_h-1) * 1.0 * (random.rand(N_i,N_h-1) <= density)
self.W_hh = random.randn(N_h-1,N_h-1) * 1.0 * (random.rand(N_h-1,N_h-1) <= density)
# Calculate the eigenvectors (V) of W_hh
V,U = linalg.eig(self.W_hh)
# Check that we won't be dividing by 0
if max(absolute(V)) <= 0.:
V = V + 0.01
# Scale the initial weights to a spectral radius of 1.
self.W_hh = self.W_hh / max(absolute(V))
#self.b_ih = random.randn(N_h-1) * 0.1
def store_y(self,y):
print "we can store y (the PREVIOUS output) so as to use it in the transformamtion"
def phi(self,x):
#print "+++++++++++"
#print self.W_hh.shape
#print self.W_ih.shape
##print self.b_ih.shape
#print x.shape
#print self.z.shape
#print "==========="
self.z[1:] = self.f( dot(self.W_hh, self.z[1:]) + dot(self.W_ih.T, x) ) #self.b_ih + <--- I don't think bias is needed for ESN??
return self.z
def reset(self):
self.z = self.z * 0.
self.z[0] = 1.
class RTFv2(RTF):
'''
Like RTF, but includes (@TODO)
- output feedback loop
- regularization (noise to the input)
- efficient sparse solution (each node is connected to exactly N other nodes) -- similary to Markov Chain code for Jaakko's seminar course.
'''
W_oh = None
y = None
v = None
def __init__(self, N_i, N_h, N_o, f=sigmoid, density=0.1, state_noise=0.01):
RTF.__init__(self,N_i,N_h,f,density)
self.N_o = N_o # outputs
self.W_oh = random.randn(N_o,N_h-1) * 1.0 * (random.rand(N_o,N_h-1) <= density) # NEW
self.v = state_noise
def store_y(self,y):
self.y = y
def phi(self,x):
self.z[0:-1] = self.f( dot(self.W_hh, self.z[0:-1]) + dot(self.W_ih.T, x) + dot(self.W_oh.T, self.y)) + random.randn(len(self.z)-1) * self.v
return self.z
def demo():
D = 2
H = 10
N = 100
rtf = RTF(D,H,f=sigmoid,density=0.2)
#print rtf.W
X = random.randn(N,D) #(random.rand(N,D) > 0.5) * 1.
X[:,0] = 1.
X[10:20,:] = 0.
X[40:60,:] = 0.
X[80:100,:] = 0.
Z = zeros((N,H))
for i in range(N):
Z[i] = rtf.phi(X[i])
import matplotlib
matplotlib.use('Qt4Agg')
from matplotlib.pyplot import *
fig = figure()
ax = fig.add_subplot(111)
ax.set_xlim([0,N])
ax.set_ylim([-0.1,1.1])
lines = [None for i in range(H+D)]
for j in range(D):
lines[j], = ax.plot([0,0],"k:",label=""+str(j),linewidth=2)
for j in range(D,H+D):
lines[j], = ax.plot([0,0],label=""+str(j),linewidth=2)
ion()
for lim in range(1,N):
for j in range(D):
lines[j].set_data(range(0,lim),X[0:lim,j])
for j in range(H):
lines[j].set_data(range(0,lim),Z[0:lim,j])
pause(0.1)
grid(True)
legend()
show()
ioff()
if __name__ == '__main__':
demo()
|
gpl-3.0
| -5,572,325,840,859,599,000
| 27.192308
| 151
| 0.505048
| false
| 2.870008
| false
| false
| false
|
benauthor/pykafka
|
pykafka/test/utils.py
|
1
|
1395
|
import time
import os
from pykafka.test.kafka_instance import KafkaInstance, KafkaConnection
def get_cluster():
"""Gets a Kafka cluster for testing, using one already running is possible.
An already-running cluster is determined by environment variables:
BROKERS, ZOOKEEPER, KAFKA_BIN. This is used primarily to speed up tests
in our Travis-CI environment.
"""
if os.environ.get('BROKERS', None) and \
os.environ.get('ZOOKEEPER', None) and \
os.environ.get('KAFKA_BIN', None):
# Broker is already running. Use that.
return KafkaConnection(os.environ['KAFKA_BIN'],
os.environ['BROKERS'],
os.environ['ZOOKEEPER'])
else:
return KafkaInstance(num_instances=3)
def stop_cluster(cluster):
"""Stop a created cluster, or merely flush a pre-existing one."""
if isinstance(cluster, KafkaInstance):
cluster.terminate()
else:
cluster.flush()
def retry(assertion_callable, retry_time=10, wait_between_tries=0.1, exception_to_retry=AssertionError):
"""Retry assertion callable in a loop"""
start = time.time()
while True:
try:
return assertion_callable()
except exception_to_retry as e:
if time.time() - start >= retry_time:
raise e
time.sleep(wait_between_tries)
|
apache-2.0
| -317,677,155,204,829,200
| 32.214286
| 104
| 0.630824
| false
| 3.896648
| false
| false
| false
|
ESA-VirES/eoxserver-magnetism
|
eoxsmagnetism/ows/wms/capabilitiesrenderer.py
|
1
|
3134
|
#-------------------------------------------------------------------------------
# $Id$
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2011 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
import logging
from eoxserver.core import Component, implements
from eoxserver.core.config import get_eoxserver_config
from eoxserver.contrib import mapserver as ms
from eoxserver.resources.coverages.crss import CRSsConfigReader
from eoxserver.services.result import (
result_set_from_raw_data, get_content_type
)
from eoxserver.services.exceptions import RenderException
from eoxserver.services.ows.wms.exceptions import InvalidCRS, InvalidFormat
from eoxserver.services.ows.wms.interfaces import (
WMSCapabilitiesRendererInterface
)
logger = logging.getLogger(__name__)
class MapServerCapabilitiesRenderer(Component):
""" Base class for various WMS render components using MapServer.
"""
implements(WMSCapabilitiesRendererInterface)
def render(self):
mapfile_path = get_eoxserver_config().get("wmm", "mapfile")
map_ = ms.mapObj(mapfile_path) #TODO: path to map
map_.setMetaData("ows_enable_request", "*")
map_.setProjection("EPSG:4326")
map_.imagecolor.setRGB(0, 0, 0)
# set supported CRSs
decoder = CRSsConfigReader(get_eoxserver_config())
crss_string = " ".join(
map(lambda crs: "EPSG:%d" % crs, decoder.supported_crss_wms)
)
map_.setMetaData("ows_srs", crss_string)
map_.setMetaData("wms_srs", crss_string)
ms_request = ms.create_request((
("service", "WMS"),
("version", "1.3.0"),
("request", "GetCapabilities"),
))
raw_result = map_.dispatch(ms_request)
result = result_set_from_raw_data(raw_result)
return result, get_content_type(result)
|
mit
| 4,156,629,531,796,532,700
| 38.670886
| 80
| 0.659221
| false
| 4.140026
| true
| false
| false
|
psiwczak/openstack
|
nova/db/sqlalchemy/models.py
|
1
|
38288
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for nova data.
"""
from sqlalchemy.orm import relationship, backref, object_mapper
from sqlalchemy import Column, Integer, BigInteger, String, schema
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import ForeignKeyConstraint
from nova.db.sqlalchemy.session import get_session
from nova import exception
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
BASE = declarative_base()
class NovaBase(object):
"""Base class for Nova Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
__table_initialized__ = False
created_at = Column(DateTime, default=utils.utcnow)
updated_at = Column(DateTime, onupdate=utils.utcnow)
deleted_at = Column(DateTime)
deleted = Column(Boolean, default=False)
metadata = None
def save(self, session=None):
"""Save this object."""
if not session:
session = get_session()
session.add(self)
try:
session.flush()
except IntegrityError, e:
if str(e).endswith('is not unique'):
raise exception.Duplicate(str(e))
else:
raise
def delete(self, session=None):
"""Delete this object."""
self.deleted = True
self.deleted_at = utils.utcnow()
self.save(session=session)
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
def __iter__(self):
self._i = iter(object_mapper(self).columns)
return self
def next(self):
n = self._i.next().name
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict"""
for k, v in values.iteritems():
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins."""
local = dict(self)
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
if not k[0] == '_'])
local.update(joined)
return local.iteritems()
class Service(BASE, NovaBase):
"""Represents a running service on a host."""
__tablename__ = 'services'
id = Column(Integer, primary_key=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
binary = Column(String(255))
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
availability_zone = Column(String(255), default='nova')
class ComputeNode(BASE, NovaBase):
"""Represents a running compute service on a host."""
__tablename__ = 'compute_nodes'
id = Column(Integer, primary_key=True)
service_id = Column(Integer, ForeignKey('services.id'), nullable=True)
service = relationship(Service,
backref=backref('compute_node'),
foreign_keys=service_id,
primaryjoin='and_('
'ComputeNode.service_id == Service.id,'
'ComputeNode.deleted == False)')
vcpus = Column(Integer)
memory_mb = Column(Integer)
local_gb = Column(Integer)
vcpus_used = Column(Integer)
memory_mb_used = Column(Integer)
local_gb_used = Column(Integer)
hypervisor_type = Column(Text)
hypervisor_version = Column(Integer)
hypervisor_hostname = Column(String(255))
# Free Ram, amount of activity (resize, migration, boot, etc) and
# the number of running VM's are a good starting point for what's
# important when making scheduling decisions.
#
# NOTE(sandy): We'll need to make this extensible for other schedulers.
free_ram_mb = Column(Integer)
free_disk_gb = Column(Integer)
current_workload = Column(Integer)
running_vms = Column(Integer)
# Note(masumotok): Expected Strings example:
#
# '{"arch":"x86_64",
# "model":"Nehalem",
# "topology":{"sockets":1, "threads":2, "cores":3},
# "features":["tdtscp", "xtpr"]}'
#
# Points are "json translatable" and it must have all dictionary keys
# above, since it is copied from <cpu> tag of getCapabilities()
# (See libvirt.virtConnection).
cpu_info = Column(Text, nullable=True)
disk_available_least = Column(Integer)
class Certificate(BASE, NovaBase):
"""Represents a an x509 certificate"""
__tablename__ = 'certificates'
id = Column(Integer, primary_key=True)
user_id = Column(String(255))
project_id = Column(String(255))
file_name = Column(String(255))
class Instance(BASE, NovaBase):
"""Represents a guest vm."""
__tablename__ = 'instances'
injected_files = []
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
try:
base_name = FLAGS.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
for key, value in self.iteritems():
# prevent recursion if someone specifies %(name)s
# %(name)s will not be valid.
if key == 'name':
continue
info[key] = value
try:
base_name = FLAGS.instance_name_template % info
except KeyError:
base_name = self.uuid
if getattr(self, '_rescue', False):
base_name += "-rescue"
return base_name
user_id = Column(String(255))
project_id = Column(String(255))
image_ref = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
server_name = Column(String(255))
# image_ref = Column(Integer, ForeignKey('images.id'), nullable=True)
# kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True)
# ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True)
# ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id))
# kernel = relationship(Kernel, backref=backref('instances', order_by=id))
# project = relationship(Project, backref=backref('instances', order_by=id))
launch_index = Column(Integer)
key_name = Column(String(255))
key_data = Column(Text)
power_state = Column(Integer)
vm_state = Column(String(255))
task_state = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
hostname = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
# *not* flavor_id
instance_type_id = Column(Integer)
user_data = Column(Text)
reservation_id = Column(String(255))
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
availability_zone = Column(String(255))
# User editable field for display in user-facing UIs
display_name = Column(String(255))
display_description = Column(String(255))
# To remember on which host a instance booted.
# An instance may have moved to another host by live migraiton.
launched_on = Column(Text)
locked = Column(Boolean)
os_type = Column(String(255))
architecture = Column(String(255))
vm_mode = Column(String(255))
uuid = Column(String(36))
root_device_name = Column(String(255))
default_ephemeral_device = Column(String(255), nullable=True)
default_swap_device = Column(String(255), nullable=True)
config_drive = Column(String(255))
# User editable field meant to represent what ip should be used
# to connect to the instance
access_ip_v4 = Column(String(255))
access_ip_v6 = Column(String(255))
auto_disk_config = Column(Boolean())
progress = Column(Integer)
# EC2 instance_initiated_shutdown_teminate
# True: -> 'terminate'
# False: -> 'stop'
shutdown_terminate = Column(Boolean(), default=True, nullable=False)
# EC2 disable_api_termination
disable_terminate = Column(Boolean(), default=False, nullable=False)
# OpenStack compute cell name
cell_name = Column(String(255))
class InstanceInfoCache(BASE, NovaBase):
"""
Represents a cache of information about an instance
"""
__tablename__ = 'instance_info_caches'
id = Column(Integer, primary_key=True, autoincrement=True)
# text column used for storing a json object of network data for api
network_info = Column(Text)
instance_id = Column(String(36), ForeignKey('instances.uuid'),
nullable=False, unique=True)
instance = relationship(Instance,
backref=backref('info_cache', uselist=False),
foreign_keys=instance_id,
primaryjoin=instance_id == Instance.uuid)
class InstanceTypes(BASE, NovaBase):
"""Represent possible instance_types or flavor of VM offered"""
__tablename__ = "instance_types"
id = Column(Integer, primary_key=True)
name = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
flavorid = Column(String(255))
swap = Column(Integer, nullable=False, default=0)
rxtx_factor = Column(Float, nullable=False, default=1)
vcpu_weight = Column(Integer, nullable=True)
instances = relationship(Instance,
backref=backref('instance_type', uselist=False),
foreign_keys=id,
primaryjoin='and_('
'Instance.instance_type_id == '
'InstanceTypes.id, '
'InstanceTypes.deleted == False)')
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
id = Column(String(36), primary_key=True)
@property
def name(self):
return FLAGS.volume_name_template % self.id
ec2_id = Column(Integer)
user_id = Column(String(255))
project_id = Column(String(255))
snapshot_id = Column(String(36))
host = Column(String(255)) # , ForeignKey('hosts.id'))
size = Column(Integer)
availability_zone = Column(String(255)) # TODO(vish): foreign key?
instance_uuid = Column(String(36))
mountpoint = Column(String(255))
attach_time = Column(String(255)) # TODO(vish): datetime
status = Column(String(255)) # TODO(vish): enum?
attach_status = Column(String(255)) # TODO(vish): enum
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
provider_location = Column(String(255))
provider_auth = Column(String(255))
volume_type_id = Column(Integer)
class VolumeMetadata(BASE, NovaBase):
"""Represents a metadata key/value pair for a volume"""
__tablename__ = 'volume_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
volume = relationship(Volume, backref="volume_metadata",
foreign_keys=volume_id,
primaryjoin='and_('
'VolumeMetadata.volume_id == Volume.id,'
'VolumeMetadata.deleted == False)')
class VolumeTypes(BASE, NovaBase):
"""Represent possible volume_types of volumes offered"""
__tablename__ = "volume_types"
id = Column(Integer, primary_key=True)
name = Column(String(255))
volumes = relationship(Volume,
backref=backref('volume_type', uselist=False),
foreign_keys=id,
primaryjoin='and_('
'Volume.volume_type_id == VolumeTypes.id, '
'VolumeTypes.deleted == False)')
class VolumeTypeExtraSpecs(BASE, NovaBase):
"""Represents additional specs as key/value pairs for a volume_type"""
__tablename__ = 'volume_type_extra_specs'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
volume_type_id = Column(Integer, ForeignKey('volume_types.id'),
nullable=False)
volume_type = relationship(VolumeTypes, backref="extra_specs",
foreign_keys=volume_type_id,
primaryjoin='and_('
'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,'
'VolumeTypeExtraSpecs.deleted == False)')
class Quota(BASE, NovaBase):
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then the
default for the quota class is used. If there is no row for a
given quota class and resource, then the default for the
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
id = Column(Integer, primary_key=True)
project_id = Column(String(255), index=True)
resource = Column(String(255))
hard_limit = Column(Integer, nullable=True)
class QuotaClass(BASE, NovaBase):
"""Represents a single quota override for a quota class.
If there is no row for a given quota class and resource, then the
default for the deployment is used. If the row is present but the
hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quota_classes'
id = Column(Integer, primary_key=True)
class_name = Column(String(255), index=True)
resource = Column(String(255))
hard_limit = Column(Integer, nullable=True)
class Snapshot(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'snapshots'
id = Column(String(36), primary_key=True)
@property
def name(self):
return FLAGS.snapshot_name_template % self.id
@property
def volume_name(self):
return FLAGS.volume_name_template % self.volume_id
user_id = Column(String(255))
project_id = Column(String(255))
volume_id = Column(String(36))
status = Column(String(255))
progress = Column(String(255))
volume_size = Column(Integer)
display_name = Column(String(255))
display_description = Column(String(255))
class BlockDeviceMapping(BASE, NovaBase):
"""Represents block device mapping that is defined by EC2"""
__tablename__ = "block_device_mapping"
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(Integer, ForeignKey('instances.uuid'),
nullable=False)
instance = relationship(Instance,
backref=backref('balock_device_mapping'),
foreign_keys=instance_uuid,
primaryjoin='and_(BlockDeviceMapping.'
'instance_uuid=='
'Instance.uuid,'
'BlockDeviceMapping.deleted=='
'False)')
device_name = Column(String(255), nullable=False)
# default=False for compatibility of the existing code.
# With EC2 API,
# default True for ami specified device.
# default False for created with other timing.
delete_on_termination = Column(Boolean, default=False)
# for ephemeral device
virtual_name = Column(String(255), nullable=True)
# for snapshot or volume
snapshot_id = Column(String(36), ForeignKey('snapshots.id'))
# outer join
snapshot = relationship(Snapshot,
foreign_keys=snapshot_id)
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True)
volume = relationship(Volume,
foreign_keys=volume_id)
volume_size = Column(Integer, nullable=True)
# for no device to suppress devices.
no_device = Column(Boolean, nullable=True)
connection_info = Column(Text, nullable=True)
class IscsiTarget(BASE, NovaBase):
"""Represates an iscsi target for a given host"""
__tablename__ = 'iscsi_targets'
__table_args__ = (schema.UniqueConstraint("target_num", "host"),
{'mysql_engine': 'InnoDB'})
id = Column(Integer, primary_key=True)
target_num = Column(Integer)
host = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True)
volume = relationship(Volume,
backref=backref('iscsi_target', uselist=False),
foreign_keys=volume_id,
primaryjoin='and_(IscsiTarget.volume_id==Volume.id,'
'IscsiTarget.deleted==False)')
class SecurityGroupInstanceAssociation(BASE, NovaBase):
__tablename__ = 'security_group_instance_association'
id = Column(Integer, primary_key=True)
security_group_id = Column(Integer, ForeignKey('security_groups.id'))
instance_id = Column(Integer, ForeignKey('instances.id'))
class SecurityGroup(BASE, NovaBase):
"""Represents a security group."""
__tablename__ = 'security_groups'
id = Column(Integer, primary_key=True)
name = Column(String(255))
description = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
instances = relationship(Instance,
secondary="security_group_instance_association",
primaryjoin='and_('
'SecurityGroup.id == '
'SecurityGroupInstanceAssociation.security_group_id,'
'SecurityGroupInstanceAssociation.deleted == False,'
'SecurityGroup.deleted == False)',
secondaryjoin='and_('
'SecurityGroupInstanceAssociation.instance_id == Instance.id,'
# (anthony) the condition below shouldn't be necessary now that the
# association is being marked as deleted. However, removing this
# may cause existing deployments to choke, so I'm leaving it
'Instance.deleted == False)',
backref='security_groups')
class SecurityGroupIngressRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'security_group_rules'
id = Column(Integer, primary_key=True)
parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
parent_group = relationship("SecurityGroup", backref="rules",
foreign_keys=parent_group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == False)')
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(String(255))
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
group_id = Column(Integer, ForeignKey('security_groups.id'))
grantee_group = relationship("SecurityGroup",
foreign_keys=group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == False)')
class ProviderFirewallRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'provider_fw_rules'
id = Column(Integer, primary_key=True)
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(String(255))
class KeyPair(BASE, NovaBase):
"""Represents a public key pair for ssh."""
__tablename__ = 'key_pairs'
id = Column(Integer, primary_key=True)
name = Column(String(255))
user_id = Column(String(255))
fingerprint = Column(String(255))
public_key = Column(Text)
class Migration(BASE, NovaBase):
"""Represents a running host-to-host migration."""
__tablename__ = 'migrations'
id = Column(Integer, primary_key=True, nullable=False)
# NOTE(tr3buchet): the ____compute variables are instance['host']
source_compute = Column(String(255))
dest_compute = Column(String(255))
# NOTE(tr3buchet): dest_host, btw, is an ip address
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
instance_uuid = Column(String(255), ForeignKey('instances.uuid'),
nullable=True)
#TODO(_cerberus_): enum
status = Column(String(255))
class Network(BASE, NovaBase):
"""Represents a network."""
__tablename__ = 'networks'
__table_args__ = (schema.UniqueConstraint("vpn_public_address",
"vpn_public_port"),
{'mysql_engine': 'InnoDB'})
id = Column(Integer, primary_key=True)
label = Column(String(255))
injected = Column(Boolean, default=False)
cidr = Column(String(255), unique=True)
cidr_v6 = Column(String(255), unique=True)
multi_host = Column(Boolean, default=False)
gateway_v6 = Column(String(255))
netmask_v6 = Column(String(255))
netmask = Column(String(255))
bridge = Column(String(255))
bridge_interface = Column(String(255))
gateway = Column(String(255))
broadcast = Column(String(255))
dns1 = Column(String(255))
dns2 = Column(String(255))
vlan = Column(Integer)
vpn_public_address = Column(String(255))
vpn_public_port = Column(Integer)
vpn_private_address = Column(String(255))
dhcp_start = Column(String(255))
rxtx_base = Column(Integer)
project_id = Column(String(255))
priority = Column(Integer)
host = Column(String(255)) # , ForeignKey('hosts.id'))
uuid = Column(String(36))
class VirtualInterface(BASE, NovaBase):
"""Represents a virtual interface on an instance."""
__tablename__ = 'virtual_interfaces'
id = Column(Integer, primary_key=True)
address = Column(String(255), unique=True)
network_id = Column(Integer, nullable=False)
instance_id = Column(Integer, nullable=False)
uuid = Column(String(36))
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase):
"""Represents a fixed ip for an instance."""
__tablename__ = 'fixed_ips'
id = Column(Integer, primary_key=True)
address = Column(String(255))
network_id = Column(Integer, nullable=True)
virtual_interface_id = Column(Integer, nullable=True)
instance_id = Column(Integer, nullable=True)
# associated means that a fixed_ip has its instance_id column set
# allocated means that a fixed_ip has a its virtual_interface_id column set
allocated = Column(Boolean, default=False)
# leased means dhcp bridge has leased the ip
leased = Column(Boolean, default=False)
reserved = Column(Boolean, default=False)
host = Column(String(255))
class FloatingIp(BASE, NovaBase):
"""Represents a floating ip that dynamically forwards to a fixed ip."""
__tablename__ = 'floating_ips'
id = Column(Integer, primary_key=True)
address = Column(String(255))
fixed_ip_id = Column(Integer, nullable=True)
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
auto_assigned = Column(Boolean, default=False, nullable=False)
pool = Column(String(255))
interface = Column(String(255))
class AuthToken(BASE, NovaBase):
"""Represents an authorization token for all API transactions.
Fields are a string representing the actual token and a user id for
mapping to the actual user
"""
__tablename__ = 'auth_tokens'
token_hash = Column(String(255), primary_key=True)
user_id = Column(String(255))
server_management_url = Column(String(255))
storage_url = Column(String(255))
cdn_management_url = Column(String(255))
class User(BASE, NovaBase):
"""Represents a user."""
__tablename__ = 'users'
id = Column(String(255), primary_key=True)
name = Column(String(255))
access_key = Column(String(255))
secret_key = Column(String(255))
is_admin = Column(Boolean)
class Project(BASE, NovaBase):
"""Represents a project."""
__tablename__ = 'projects'
id = Column(String(255), primary_key=True)
name = Column(String(255))
description = Column(String(255))
project_manager = Column(String(255), ForeignKey(User.id))
members = relationship(User,
secondary='user_project_association',
backref='projects')
class DNSDomain(BASE, NovaBase):
"""Represents a DNS domain with availability zone or project info."""
__tablename__ = 'dns_domains'
domain = Column(String(512), primary_key=True)
scope = Column(String(255))
availability_zone = Column(String(255))
project_id = Column(String(255))
project = relationship(Project,
primaryjoin=project_id == Project.id,
foreign_keys=[Project.id],
uselist=False)
class UserProjectRoleAssociation(BASE, NovaBase):
__tablename__ = 'user_project_role_association'
user_id = Column(String(255), primary_key=True)
user = relationship(User,
primaryjoin=user_id == User.id,
foreign_keys=[User.id],
uselist=False)
project_id = Column(String(255), primary_key=True)
project = relationship(Project,
primaryjoin=project_id == Project.id,
foreign_keys=[Project.id],
uselist=False)
role = Column(String(255), primary_key=True)
ForeignKeyConstraint(['user_id',
'project_id'],
['user_project_association.user_id',
'user_project_association.project_id'])
class UserRoleAssociation(BASE, NovaBase):
__tablename__ = 'user_role_association'
user_id = Column(String(255), ForeignKey('users.id'), primary_key=True)
user = relationship(User, backref='roles')
role = Column(String(255), primary_key=True)
class UserProjectAssociation(BASE, NovaBase):
__tablename__ = 'user_project_association'
user_id = Column(String(255), ForeignKey(User.id), primary_key=True)
project_id = Column(String(255), ForeignKey(Project.id), primary_key=True)
class ConsolePool(BASE, NovaBase):
"""Represents pool of consoles on the same physical node."""
__tablename__ = 'console_pools'
id = Column(Integer, primary_key=True)
address = Column(String(255))
username = Column(String(255))
password = Column(String(255))
console_type = Column(String(255))
public_hostname = Column(String(255))
host = Column(String(255))
compute_host = Column(String(255))
class Console(BASE, NovaBase):
"""Represents a console session for an instance."""
__tablename__ = 'consoles'
id = Column(Integer, primary_key=True)
instance_name = Column(String(255))
instance_id = Column(Integer)
password = Column(String(255))
port = Column(Integer, nullable=True)
pool_id = Column(Integer, ForeignKey('console_pools.id'))
pool = relationship(ConsolePool, backref=backref('consoles'))
class InstanceMetadata(BASE, NovaBase):
"""Represents a user-provided metadata key/value pair for an instance"""
__tablename__ = 'instance_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False)
instance = relationship(Instance, backref="metadata",
foreign_keys=instance_id,
primaryjoin='and_('
'InstanceMetadata.instance_id == Instance.id,'
'InstanceMetadata.deleted == False)')
class InstanceSystemMetadata(BASE, NovaBase):
"""Represents a system-owned metadata key/value pair for an instance"""
__tablename__ = 'instance_system_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'),
nullable=False)
primary_join = ('and_(InstanceSystemMetadata.instance_uuid == '
'Instance.uuid, InstanceSystemMetadata.deleted == False)')
instance = relationship(Instance, backref="system_metadata",
foreign_keys=instance_uuid,
primaryjoin=primary_join)
class InstanceTypeExtraSpecs(BASE, NovaBase):
"""Represents additional specs as key/value pairs for an instance_type"""
__tablename__ = 'instance_type_extra_specs'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
instance_type = relationship(InstanceTypes, backref="extra_specs",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
'InstanceTypeExtraSpecs.deleted == False)')
class Cell(BASE, NovaBase):
"""Represents parent and child cells of this cell."""
__tablename__ = 'cells'
id = Column(Integer, primary_key=True)
name = Column(String(255))
api_url = Column(String(255))
username = Column(String(255))
password = Column(String(255))
weight_offset = Column(Float(), default=0.0)
weight_scale = Column(Float(), default=1.0)
is_parent = Column(Boolean())
rpc_host = Column(String(255))
rpc_port = Column(Integer())
rpc_virtual_host = Column(String(255))
class AggregateHost(BASE, NovaBase):
"""Represents a host that is member of an aggregate."""
__tablename__ = 'aggregate_hosts'
id = Column(Integer, primary_key=True, autoincrement=True)
host = Column(String(255), unique=True)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class AggregateMetadata(BASE, NovaBase):
"""Represents a metadata key/value pair for an aggregate."""
__tablename__ = 'aggregate_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255), nullable=False)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class Aggregate(BASE, NovaBase):
"""Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255), unique=True)
operational_state = Column(String(255), nullable=False)
availability_zone = Column(String(255), nullable=False)
_hosts = relationship(AggregateHost,
secondary="aggregate_hosts",
primaryjoin='and_('
'Aggregate.id == AggregateHost.aggregate_id,'
'AggregateHost.deleted == False,'
'Aggregate.deleted == False)',
secondaryjoin='and_('
'AggregateHost.aggregate_id == Aggregate.id, '
'AggregateHost.deleted == False,'
'Aggregate.deleted == False)',
backref='aggregates')
_metadata = relationship(AggregateMetadata,
secondary="aggregate_metadata",
primaryjoin='and_('
'Aggregate.id == AggregateMetadata.aggregate_id,'
'AggregateMetadata.deleted == False,'
'Aggregate.deleted == False)',
secondaryjoin='and_('
'AggregateMetadata.aggregate_id == Aggregate.id, '
'AggregateMetadata.deleted == False,'
'Aggregate.deleted == False)',
backref='aggregates')
@property
def hosts(self):
return [h.host for h in self._hosts]
@property
def metadetails(self):
return dict([(m.key, m.value) for m in self._metadata])
class AgentBuild(BASE, NovaBase):
"""Represents an agent build."""
__tablename__ = 'agent_builds'
id = Column(Integer, primary_key=True)
hypervisor = Column(String(255))
os = Column(String(255))
architecture = Column(String(255))
version = Column(String(255))
url = Column(String(255))
md5hash = Column(String(255))
class BandwidthUsage(BASE, NovaBase):
"""Cache for instance bandwidth usage data pulled from the hypervisor"""
__tablename__ = 'bw_usage_cache'
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
mac = Column(String(255), nullable=False)
start_period = Column(DateTime, nullable=False)
last_refreshed = Column(DateTime)
bw_in = Column(BigInteger)
bw_out = Column(BigInteger)
class S3Image(BASE, NovaBase):
"""Compatibility layer for the S3 image service talking to Glance"""
__tablename__ = 's3_images'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class VolumeIdMapping(BASE, NovaBase):
"""Compatability layer for the EC2 volume service"""
__tablename__ = 'volume_id_mappings'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class SnapshotIdMapping(BASE, NovaBase):
"""Compatability layer for the EC2 snapshot service"""
__tablename__ = 'snapshot_id_mappings'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class SMFlavors(BASE, NovaBase):
"""Represents a flavor for SM volumes."""
__tablename__ = 'sm_flavors'
id = Column(Integer(), primary_key=True)
label = Column(String(255))
description = Column(String(255))
class SMBackendConf(BASE, NovaBase):
"""Represents the connection to the backend for SM."""
__tablename__ = 'sm_backend_config'
id = Column(Integer(), primary_key=True)
flavor_id = Column(Integer, ForeignKey('sm_flavors.id'), nullable=False)
sr_uuid = Column(String(255))
sr_type = Column(String(255))
config_params = Column(String(2047))
class SMVolume(BASE, NovaBase):
__tablename__ = 'sm_volume'
id = Column(String(36), ForeignKey(Volume.id), primary_key=True)
backend_id = Column(Integer, ForeignKey('sm_backend_config.id'),
nullable=False)
vdi_uuid = Column(String(255))
class InstanceFault(BASE, NovaBase):
__tablename__ = 'instance_faults'
id = Column(Integer(), primary_key=True, autoincrement=True)
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'),
nullable=False)
code = Column(Integer(), nullable=False)
message = Column(String(255))
details = Column(Text)
def register_models():
"""Register Models and create metadata.
Called from nova.db.sqlalchemy.__init__ as part of loading the driver,
it will never need to be called explicitly elsewhere unless the
connection is lost and needs to be reestablished.
"""
from sqlalchemy import create_engine
models = (AgentBuild,
Aggregate,
AggregateHost,
AggregateMetadata,
AuthToken,
Certificate,
Cell,
Console,
ConsolePool,
FixedIp,
FloatingIp,
Instance,
InstanceFault,
InstanceMetadata,
InstanceTypeExtraSpecs,
InstanceTypes,
IscsiTarget,
Migration,
Network,
Project,
SecurityGroup,
SecurityGroupIngressRule,
SecurityGroupInstanceAssociation,
Service,
SMBackendConf,
SMFlavors,
SMVolume,
User,
Volume,
VolumeMetadata,
VolumeTypeExtraSpecs,
VolumeTypes,
VolumeIdMapping,
SnapshotIdMapping,
)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
|
apache-2.0
| -8,264,634,571,435,047,000
| 34.951174
| 79
| 0.619385
| false
| 4.154063
| false
| false
| false
|
kyamagu/psd2svg
|
src/psd2svg/__main__.py
|
1
|
1695
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import argparse
import logging
import os
from psd2svg import psd2svg
def main():
parser = argparse.ArgumentParser(description='Convert PSD file to SVG')
parser.add_argument(
'input', metavar='INPUT', type=str, help='Input PSD file path or URL')
parser.add_argument(
'output', metavar='PATH', type=str, nargs='?', default='.',
help='Output file or directory. When directory is specified, filename'
' is automatically inferred from input')
parser.add_argument(
'--resource-path', metavar='PATH', type=str, default=None,
help='Resource path relative to output.')
parser.add_argument(
'--rasterizer', metavar='METHOD', default='chromium', type=str,
help='Specify which rasterizer to use. default chromium.')
parser.add_argument(
'--loglevel', metavar='LEVEL', default='WARNING',
help='Logging level, default WARNING')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.loglevel.upper(),
'WARNING'))
prefix, ext = os.path.splitext(args.output)
if ext.lower() in (".png", ".jpg", ".jpeg", ".gif" ".tiff"):
from psd2svg.rasterizer import create_rasterizer
rasterizer = create_rasterizer(args.rasterizer)
svg_file = prefix + ".svg"
psd2svg(args.input, svg_file, resource_path=args.resource_path)
image = rasterizer.rasterize(svg_file)
image.save(args.output)
else:
psd2svg(args.input, args.output, resource_path=args.resource_path)
if __name__ == '__main__':
main()
|
mit
| 7,406,820,582,032,174,000
| 38.418605
| 78
| 0.637168
| false
| 3.826185
| false
| false
| false
|
phockett/ePSproc
|
epsproc/vol/setOptions.py
|
1
|
3675
|
"""
ePSproc vol module setOptions
Functions to read & write default set of plotting options to file.
If run as main:
- Check existing file from passed arg, or in default location (epsproc/vol/plotOptions.json)
- Read file if exists.
- If file is missing, prompt to write defaults to file.
08/08/20 v1, dev. See also set_plot_options_json.ipynb
"""
import json
import pprint
pp = pprint.PrettyPrinter(indent=4)
import sys
import os
import inspect
from pathlib import Path
def setLocalOptions():
optionsLocal = {}
globalSettings = {"note":"Global plot settings, used as defaults. To change for session, overwrite in local dict. To change permanently, overwrite in file plotOptions.json. To reset, use `epsproc/vol/set_plot_options_json.ipynb` or .py.",
"pType":"Abs", "interactive":True, "inline":True, "animate":False,
"isoLevels":6, "isoValsAbs":None, "isoValsPC":None, "isoValsGlobal":True,
"opacity":0.5,
"subplot":False
# "plotter":"" # Set plotter dynamically based on options above...?
}
optionsLocal["global"] = globalSettings
BGplotterSettings = {"addAxis" : True,
"kwargs" : {} # Set empty kwargs dict for passing any other params at run time.
}
optionsLocal["BGplotter"] = BGplotterSettings
return optionsLocal
# def setOptionsFile(optionsFile = None):
def readOptionsFile(optionsFile, verbose = False):
# Set path wrapper in case str was passed.
optionsFile = Path(optionsFile)
if optionsFile.is_file():
# try:
with open(optionsFile) as json_file:
optionsFileJSON = json.load(json_file)
print(f"\n*** Read existing plot options from file {optionsFile} OK.")
if verbose:
print(json.dumps(optionsFileJSON, sort_keys=False, indent=4))
return optionsFileJSON
else:
print(f"\n*** Plot options file {optionsFile} not found, using defaults.")
return setLocalOptions()
def writeOptionsFile(optionsFile, optionsLocal, owFlag = False):
# Set path wrapper in case str was passed.
optionsFile = Path(optionsFile)
print(f"*** Writing plot options to file {optionsFile}")
if optionsFile.is_file():
owFlag = input(f"File {optionsFile} exists, overwrite (y/n)? ")
else:
owFlag = 'y'
with open(optionsFile, 'w') as json_file:
if owFlag == 'y':
json.dump(optionsLocal, json_file, indent=4, sort_keys=False) # Set indent + sort keys for nicer (HF) file output.
if __name__ == "__main__":
# Check passed args
if len(sys.argv > 1):
optionsFile = Path(sys.argv[1])
else:
optionsFile = None
# Set default path based on file location - may not be robust?
# From https://stackoverflow.com/a/12154601
if optionsFile is None:
optionsFile = Path((os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))),'plotOptions.json')
writeFlag = True
# Read file
optionsLocal = setLocalOptions()
# if optionsFile.is_file():
# with open(optionsFile) as json_file:
# optionsFileJSON = json.load(json_file)
#
# print(f"*** Read existing file {optionsFile} OK, contents:")
# print(json.dumps(optionsFileJSON, sort_keys=False, indent=4))
#
# else:
# print(f"*** File {optionsFile} not found.")
if writeFlag:
ow = input("Write defaults to {optionsFile} (y/n)? ")
if ow == 'y':
writeOptionsFile(optionsFile, setLocalOptions())
|
gpl-3.0
| -1,978,875,038,658,223,400
| 29.122951
| 242
| 0.626939
| false
| 3.79257
| false
| false
| false
|
jralls/gramps
|
gramps/gui/views/treemodels/flatbasemodel.py
|
1
|
32063
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
This module provides the flat treemodel that is used for all flat treeviews.
For performance, Gramps does not use Gtk.TreeStore, as that would mean keeping
the entire database table of an object in memory.
Instead, it suffices to keep in memory the sortkey and the matching handle,
as well as a map of sortkey,handle to treeview path, and vice versa.
For a flat view, the index of sortkey,handle will be the path, so it suffices
to keep in memory a map that given a sortkey,handle returns the path.
As we need to be able to insert/delete/update objects, and for that the handle
is all we know initially, and as sortkey,handle is uniquely determined by
handle, instead of keeping a map of sortkey,handle to path, we keep a map of
handle to path
As a user selects another column to sort, the sortkey must be rebuild, and the
map remade.
The class FlatNodeMap keeps a sortkeyhandle list with (sortkey, handle) entries,
and a handle2path dictionary. As the Map is flat, the index in sortkeyhandle
corresponds to the path.
The class FlatBaseModel, is the base class for all flat treeview models.
It keeps a FlatNodeMap, and obtains data from database as needed
"""
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
import logging
import bisect
import time
_LOG = logging.getLogger(".gui.basetreemodel")
#-------------------------------------------------------------------------
#
# GNOME/GTK modules
#
#-------------------------------------------------------------------------
from gi.repository import GObject
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.filters import SearchFilter, ExactSearchFilter
from gramps.gen.const import GRAMPS_LOCALE as glocale
from .basemodel import BaseModel
#-------------------------------------------------------------------------
#
# FlatNodeMap
#
#-------------------------------------------------------------------------
UEMPTY = ""
class FlatNodeMap:
"""
A NodeMap for a flat treeview. In such a TreeView, the paths possible are
0, 1, 2, ..., n-1, where n is the number of items to show. For the model
it is needed to keep the Path to Iter mappings of the TreeView in memory
The order of what is shown is based on the unique key: (sortkey, handle)
Naming:
* srtkey : key on which to sort
* hndl : handle of the object, makes it possible to retrieve the
object from the database. As handle is unique, it is used
in the iter for the TreeView
* index : the index in the internal lists. When a view is in reverse,
this is not kept physically, but instead via an offset
* path : integer path in the TreeView. This will be index if view is
ascending, but will begin at back of list if view shows
the entries in reverse.
* index2hndl : list of (srtkey, hndl) tuples. The index gives the
(srtkey, hndl) it belongs to.
This normally is only a part of all possible data
* hndl2index : dictionary of *hndl: index* values
The implementation provides a list of (srtkey, hndl) of which the index is
the path, and a dictionary mapping hndl to index.
To obtain index given a path, method real_index() is available
..Note: glocale.sort_key is applied to the underlying sort key,
so as to have localized sort
"""
def __init__(self):
"""
Create a new instance.
"""
self._index2hndl = []
self._fullhndl = self._index2hndl
self._identical = True
self._hndl2index = {}
self._reverse = False
self.__corr = (0, 1)
#We create a stamp to recognize invalid iterators. From the docs:
#Set the stamp to be equal to your model's stamp, to mark the
#iterator as valid. When your model's structure changes, you should
#increment your model's stamp to mark all older iterators as invalid.
#They will be recognised as invalid because they will then have an
#incorrect stamp.
self.stamp = 0
def destroy(self):
"""
Unset all elements that can prevent garbage collection
"""
self._index2hndl = None
self._fullhndl = None
self._hndl2index = None
def set_path_map(self, index2hndllist, fullhndllist, identical=True,
reverse=False):
"""
This is the core method to set up the FlatNodeMap
Input is a list of (srtkey, handle), of which the index is the path
Calling this method sets the index2hndllist, and creates the hndl2index
map.
fullhndllist is the entire list of (srtkey, handle) that is possible,
normally index2hndllist is only part of this list as determined by
filtering. To avoid memory, if both lists are the same, pass only one
list twice and set identical to True.
Reverse sets up how the path is determined from the index. If True the
first index is the last path
:param index2hndllist: the ascending sorted (sortkey, handle) values
as they will appear in the flat treeview. This often is
a subset of all possible data.
:type index2hndllist: a list of (sortkey, handle) tuples
:param fullhndllist: the list of all possilbe ascending sorted
(sortkey, handle) values as they will appear in the flat
treeview if all data is shown.
:type fullhndllist: a list of (sortkey, handl) tuples
:param identical: identify if index2hndllist and fullhndllist are the
same list, so only one is kept in memory.
:type identical: bool
"""
self.stamp += 1
self._index2hndl = index2hndllist
self._hndl2index = {}
self._identical = identical
self._fullhndl = self._index2hndl if identical else fullhndllist
self._reverse = reverse
self.reverse_order()
def full_srtkey_hndl_map(self):
"""
The list of all possible (sortkey, handle) tuples.
This is stored in FlatNodeMap so that it would not be needed to
reiterate over the database to obtain all posibilities.
"""
return self._fullhndl
def reverse_order(self):
"""
This method keeps the index2hndl map, but sets it up the index in
reverse order. If the hndl2index map does not exist yet, it is created
in the acending order as given in index2hndl
The result is always a hndl2index map wich is correct, so or ascending
order, or reverse order.
"""
if self._hndl2index:
#if hndl2index is build already, invert order, otherwise keep
# requested order
self._reverse = not self._reverse
if self._reverse:
self.__corr = (len(self._index2hndl) - 1, -1)
else:
self.__corr = (0, 1)
if not self._hndl2index:
self._hndl2index = dict((key[1], index)
for index, key in enumerate(self._index2hndl))
def real_path(self, index):
"""
Given the index in the maps, return the real path.
If reverse = False, then index is path, otherwise however, the
path must be calculated so that the last index is the first path
"""
return self.__corr[0] + self.__corr[1] * index
def real_index(self, path):
"""
Given the path in the view, return the real index.
If reverse = False, then path is index, otherwise however, the
index must be calculated so that the last index is the first path
"""
return self.__corr[0] + self.__corr[1] * path
def clear_map(self):
"""
Clears out the index2hndl and the hndl2index
"""
self._index2hndl = []
self._hndl2index = {}
self._fullhndl = self._index2hndl
self._identical = True
def get_path(self, iter):
"""
Return the path from the passed iter.
:param handle: the key of the object for which the path in the treeview
is needed
:type handle: an object handle
:Returns: the path, or None if handle does not link to a path
"""
index = iter.user_data
##GTK3: user data may only be an integer, we store the index
##PROBLEM: pygobject 3.8 stores 0 as None, we need to correct
## when using user_data for that!
##upstream bug: https://bugzilla.gnome.org/show_bug.cgi?id=698366
if index is None:
index = 0
return Gtk.TreePath((self.real_path(index),))
def get_path_from_handle(self, handle):
"""
Return the path from the passed handle
:param handle: the key of the object for which the path in the treeview
is needed
:type handle: an object handle
:Returns: the path, or None if handle does not link to a path
"""
index = self._hndl2index.get(handle)
if index is None:
return None
return Gtk.TreePath((self.real_path(index),))
def get_sortkey(self, handle):
"""
Return the sortkey used for the passed handle.
:param handle: the key of the object for which the sortkey
is needed
:type handle: an object handle
:Returns: the sortkey, or None if handle is not present
"""
index = self._hndl2index.get(handle)
return None if index is None else self._index2hndl[index][0]
def new_iter(self, handle):
"""
Return a new iter containing the handle
"""
iter = Gtk.TreeIter()
iter.stamp = self.stamp
##GTK3: user data may only be an integer, we store the index
##PROBLEM: pygobject 3.8 stores 0 as None, we need to correct
## when using user_data for that!
##upstream bug: https://bugzilla.gnome.org/show_bug.cgi?id=698366
iter.user_data = self._hndl2index[handle]
return iter
def get_iter(self, path):
"""
Return an iter from the path. The path is assumed to be an integer.
This is accomplished by indexing into the index2hndl
iters are always created afresh
Will raise IndexError if the maps are not filled yet, or if it is empty.
Caller should take care of this if it allows calling with invalid path
:param path: path as it appears in the treeview
:type path: integer
"""
iter = self.new_iter(self._index2hndl[self.real_index(path)][1])
return iter
def get_handle(self, path):
"""
Return the handle from the path. The path is assumed to be an integer.
This is accomplished by indexing into the index2hndl
Will raise IndexError if the maps are not filled yet, or if it is empty.
Caller should take care of this if it allows calling with invalid path
:param path: path as it appears in the treeview
:type path: integer
:return handle: unicode form of the handle
"""
return self._index2hndl[self.real_index(path)][1]
def iter_next(self, iter):
"""
Increments the iter y finding the index associated with the iter,
adding or substracting one.
False is returned if no next handle
:param iter: Gtk.TreeModel iterator
:param type: Gtk.TreeIter
"""
index = iter.user_data
if index is None:
##GTK3: user data may only be an integer, we store the index
##PROBLEM: pygobject 3.8 stores 0 as None, we need to correct
## when using user_data for that!
##upstream bug: https://bugzilla.gnome.org/show_bug.cgi?id=698366
index = 0
if self._reverse :
index -= 1
if index < 0:
# -1 does not raise IndexError, as -1 is last element. Catch.
return False
else:
index += 1
if index >= len(self._index2hndl):
return False
iter.user_data = index
return True
def get_first_iter(self):
"""
Return the first handle that must be shown (corresponding to path 0)
Will raise IndexError if the maps are not filled yet, or if it is empty.
Caller should take care of this if it allows calling with invalid path
"""
return self.get_iter(0)
def __len__(self):
"""
Return the number of entries in the map.
"""
return len(self._index2hndl)
def max_rows(self):
"""
Return maximum number of entries that might be present in the
map
"""
return len(self._fullhndl)
def insert(self, srtkey_hndl, allkeyonly=False):
"""
Insert a node. Given is a tuple (sortkey, handle), and this is added
in the correct place, while the hndl2index map is updated.
Returns the path of the inserted row
:param srtkey_hndl: the (sortkey, handle) tuple that must be inserted
:type srtkey_hndl: sortkey key already transformed by self.sort_func, object handle
:Returns: path of the row inserted in the treeview
:Returns type: Gtk.TreePath or None
"""
if srtkey_hndl[1] in self._hndl2index:
print(('WARNING: Attempt to add row twice to the model (%s)' %
srtkey_hndl[1]))
return
if not self._identical:
bisect.insort_left(self._fullhndl, srtkey_hndl)
if allkeyonly:
#key is not part of the view
return None
insert_pos = bisect.bisect_left(self._index2hndl, srtkey_hndl)
self._index2hndl.insert(insert_pos, srtkey_hndl)
#make sure the index map is updated
for srt_key,hndl in self._index2hndl[insert_pos+1:]:
self._hndl2index[hndl] += 1
self._hndl2index[srtkey_hndl[1]] = insert_pos
#update self.__corr so it remains correct
if self._reverse:
self.__corr = (len(self._index2hndl) - 1, -1)
return Gtk.TreePath((self.real_path(insert_pos),))
def delete(self, srtkey_hndl):
"""
Delete the row with the given (sortkey, handle).
This then rebuilds the hndl2index, subtracting one from each item
greater than the deleted index.
path of deleted row is returned
If handle is not present, None is returned
:param srtkey_hndl: the (sortkey, handle) tuple that must be inserted
:Returns: path of the row deleted from the treeview
:Returns type: Gtk.TreePath or None
"""
#remove it from the full list first
if not self._identical:
del_pos = bisect.bisect_left(self._fullhndl, srtkey_hndl)
#check that indeed this is correct:
if not self._fullhndl[del_pos][1] == srtkey_hndl[1]:
raise KeyError('Handle %s not in list of all handles' % \
srtkey_hndl[1])
del self._fullhndl[del_pos]
#now remove it from the index maps
handle = srtkey_hndl[1]
try:
index = self._hndl2index[handle]
except KeyError:
# key not present in the treeview
return None
del self._index2hndl[index]
del self._hndl2index[handle]
#update self.__corr so it remains correct
delpath = self.real_path(index)
if self._reverse:
self.__corr = (len(self._index2hndl) - 1, -1)
#update the handle2path map so it remains correct
for srt_key,hndl in self._index2hndl[index:]:
self._hndl2index[hndl] -= 1
return Gtk.TreePath((delpath,))
#-------------------------------------------------------------------------
#
# FlatBaseModel
#
#-------------------------------------------------------------------------
class FlatBaseModel(GObject.GObject, Gtk.TreeModel, BaseModel):
"""
The base class for all flat treeview models.
It keeps a FlatNodeMap, and obtains data from database as needed
..Note: glocale.sort_key is applied to the underlying sort key,
so as to have localized sort
"""
def __init__(self, db, uistate, scol=0, order=Gtk.SortType.ASCENDING,
search=None, skip=set(),
sort_map=None):
cput = time.clock()
GObject.GObject.__init__(self)
BaseModel.__init__(self)
#inheriting classes must set self.map to obtain the data
self.prev_handle = None
self.prev_data = None
#GTK3 We leak ref, yes??
#self.set_property("leak_references", False)
self.db = db
#normally sort on first column, so scol=0
if sort_map:
#sort_map is the stored order of the columns and if they are
#enabled or not. We need to store on scol of that map
self.sort_map = [ f for f in sort_map if f[0]]
#we need the model col, that corresponds with scol
col = self.sort_map[scol][1]
else:
col = scol
# get the function that maps data to sort_keys
self.sort_func = lambda x: glocale.sort_key(self.smap[col](x))
self.sort_col = scol
self.skip = skip
self._in_build = False
self.node_map = FlatNodeMap()
self.set_search(search)
self._reverse = (order == Gtk.SortType.DESCENDING)
self.rebuild_data()
_LOG.debug(self.__class__.__name__ + ' __init__ ' +
str(time.clock() - cput) + ' sec')
def destroy(self):
"""
Unset all elements that prevent garbage collection
"""
BaseModel.destroy(self)
self.db = None
self.sort_func = None
if self.node_map:
self.node_map.destroy()
self.node_map = None
self.rebuild_data = None
self.search = None
def set_search(self, search):
"""
Change the search function that filters the data in the model.
When this method is called, make sure:
# you call self.rebuild_data() to recalculate what should be seen
in the model
# you reattach the model to the treeview so that the treeview updates
with the new entries
"""
if search:
if search[0]:
#following is None if no data given in filter sidebar
self.search = search[1]
self.rebuild_data = self._rebuild_filter
else:
if search[1]: # Search from topbar in columns
# we have search[1] = (index, text_unicode, inversion)
col = search[1][0]
text = search[1][1]
inv = search[1][2]
func = lambda x: self._get_value(x, col) or UEMPTY
if search[2]:
self.search = ExactSearchFilter(func, text, inv)
else:
self.search = SearchFilter(func, text, inv)
else:
self.search = None
self.rebuild_data = self._rebuild_search
else:
self.search = None
self.rebuild_data = self._rebuild_search
def total(self):
"""
Total number of items that maximally can be shown
"""
return self.node_map.max_rows()
def displayed(self):
"""
Number of items that are currently displayed
"""
return len(self.node_map)
def reverse_order(self):
"""
reverse the sort order of the sort column
"""
self._reverse = not self._reverse
self.node_map.reverse_order()
def color_column(self):
"""
Return the color column.
"""
return None
def sort_keys(self):
"""
Return the (sort_key, handle) list of all data that can maximally
be shown.
This list is sorted ascending, via localized string sort.
"""
# use cursor as a context manager
with self.gen_cursor() as cursor:
#loop over database and store the sort field, and the handle
srt_keys=[(self.sort_func(data), key)
for key, data in cursor]
srt_keys.sort()
return srt_keys
def _rebuild_search(self, ignore=None):
""" function called when view must be build, given a search text
in the top search bar
"""
self.clear_cache()
self._in_build = True
if (self.db is not None) and self.db.is_open():
allkeys = self.node_map.full_srtkey_hndl_map()
if not allkeys:
allkeys = self.sort_keys()
if self.search and self.search.text:
dlist = [h for h in allkeys
if self.search.match(h[1], self.db) and
h[1] not in self.skip and h[1] != ignore]
ident = False
elif ignore is None and not self.skip:
#nothing to remove from the keys present
ident = True
dlist = allkeys
else:
ident = False
dlist = [h for h in allkeys
if h[1] not in self.skip and h[1] != ignore]
self.node_map.set_path_map(dlist, allkeys, identical=ident,
reverse=self._reverse)
else:
self.node_map.clear_map()
self._in_build = False
def _rebuild_filter(self, ignore=None):
""" function called when view must be build, given filter options
in the filter sidebar
"""
self.clear_cache()
self._in_build = True
if (self.db is not None) and self.db.is_open():
allkeys = self.node_map.full_srtkey_hndl_map()
if not allkeys:
allkeys = self.sort_keys()
if self.search:
ident = False
if ignore is None:
dlist = self.search.apply(self.db, allkeys, tupleind=1)
else:
dlist = self.search.apply(self.db,
[ k for k in allkeys if k[1] != ignore],
tupleind=1)
elif ignore is None :
ident = True
dlist = allkeys
else:
ident = False
dlist = [ k for k in allkeys if k[1] != ignore ]
self.node_map.set_path_map(dlist, allkeys, identical=ident,
reverse=self._reverse)
else:
self.node_map.clear_map()
self._in_build = False
def add_row_by_handle(self, handle):
"""
Add a row. This is called after object with handle is created.
Row is only added if search/filter data is such that it must be shown
"""
assert isinstance(handle, str)
if self.node_map.get_path_from_handle(handle) is not None:
return # row is already displayed
data = self.map(handle)
insert_val = (self.sort_func(data), handle)
if not self.search or \
(self.search and self.search.match(handle, self.db)):
#row needs to be added to the model
insert_path = self.node_map.insert(insert_val)
if insert_path is not None:
node = self.do_get_iter(insert_path)[1]
self.row_inserted(insert_path, node)
else:
self.node_map.insert(insert_val, allkeyonly=True)
def delete_row_by_handle(self, handle):
"""
Delete a row, called after the object with handle is deleted
"""
assert isinstance(handle, str)
if self.node_map.get_path_from_handle(handle) is None:
return # row is not currently displayed
self.clear_cache(handle)
delete_val = (self.node_map.get_sortkey(handle), handle)
delete_path = self.node_map.delete(delete_val)
#delete_path is an integer from 0 to n-1
if delete_path is not None:
self.row_deleted(delete_path)
def update_row_by_handle(self, handle):
"""
Update a row, called after the object with handle is changed
"""
if self.node_map.get_path_from_handle(handle) is None:
return # row is not currently displayed
self.clear_cache(handle)
oldsortkey = self.node_map.get_sortkey(handle)
newsortkey = self.sort_func(self.map(handle))
if oldsortkey is None or oldsortkey != newsortkey:
#or the changed object is not present in the view due to filtering
#or the order of the object must change.
self.delete_row_by_handle(handle)
self.add_row_by_handle(handle)
else:
#the row is visible in the view, is changed, but the order is fixed
path = self.node_map.get_path_from_handle(handle)
node = self.do_get_iter(path)[1]
self.row_changed(path, node)
def get_iter_from_handle(self, handle):
"""
Get the iter for a gramps handle.
"""
if self.node_map.get_path_from_handle(handle) is None:
return None
return self.node_map.new_iter(handle)
def get_handle_from_iter(self, iter):
"""
Get the gramps handle for an iter.
"""
index = iter.user_data
if index is None:
##GTK3: user data may only be an integer, we store the index
##PROBLEM: pygobject 3.8 stores 0 as None, we need to correct
## when using user_data for that!
##upstream bug: https://bugzilla.gnome.org/show_bug.cgi?id=698366
index = 0
path = self.node_map.real_path(index)
return self.node_map.get_handle(path)
# The following implement the public interface of Gtk.TreeModel
def do_get_flags(self):
"""
Returns the GtkTreeModelFlags for this particular type of model
See Gtk.TreeModel
"""
#print 'do_get_flags'
return Gtk.TreeModelFlags.LIST_ONLY #| Gtk.TreeModelFlags.ITERS_PERSIST
def do_get_n_columns(self):
"""Internal method. Don't inherit"""
return self.on_get_n_columns()
def on_get_n_columns(self):
"""
Return the number of columns. Must be implemented in the child objects
See Gtk.TreeModel. Inherit as needed
"""
#print 'do_get_n_col'
raise NotImplementedError
def do_get_path(self, iter):
"""
Return the tree path (a tuple of indices at the various
levels) for a particular iter. We use handles for unique key iters
See Gtk.TreeModel
"""
#print 'do_get_path', iter
return self.node_map.get_path(iter)
def do_get_column_type(self, index):
"""
See Gtk.TreeModel
"""
#print 'do_get_col_type'
return str
def do_get_iter_first(self):
#print 'get iter first'
raise NotImplementedError
def do_get_iter(self, path):
"""
See Gtk.TreeModel
"""
#print 'do_get_iter', path
for p in path:
break
try:
return True, self.node_map.get_iter(p)
except IndexError:
return False, Gtk.TreeIter()
def _get_value(self, handle, col):
"""
Given handle and column, return unicode value in the column
We need this to search in the column in the GUI
"""
if handle != self.prev_handle:
cached, data = self.get_cached_value(handle, col)
if not cached:
data = self.map(handle)
self.set_cached_value(handle, col, data)
if data is None:
#object is no longer present
return ''
self.prev_data = data
self.prev_handle = handle
return self.fmap[col](self.prev_data)
def do_get_value(self, iter, col):
"""
See Gtk.TreeModel.
col is the model column that is needed, not the visible column!
"""
#print ('do_get_val', iter, iter.user_data, col)
index = iter.user_data
if index is None:
##GTK3: user data may only be an integer, we store the index
##PROBLEM: pygobject 3.8 stores 0 as None, we need to correct
## when using user_data for that!
##upstream bug: https://bugzilla.gnome.org/show_bug.cgi?id=698366
index = 0
handle = self.node_map._index2hndl[index][1]
val = self._get_value(handle, col)
#print 'val is', val, type(val)
return val
def do_iter_previous(self, iter):
#print 'do_iter_previous'
raise NotImplementedError
def do_iter_next(self, iter):
"""
Sets iter to the next node at this level of the tree
See Gtk.TreeModel
"""
return self.node_map.iter_next(iter)
def do_iter_children(self, iterparent):
"""
Return the first child of the node
See Gtk.TreeModel
"""
#print 'do_iter_children'
print('ERROR: iter children, should not be called in flat base!!')
raise NotImplementedError
if handle is None and len(self.node_map):
return self.node_map.get_first_handle()
return None
def do_iter_has_child(self, iter):
"""
Returns true if this node has children
See Gtk.TreeModel
"""
#print 'do_iter_has_child'
print('ERROR: iter has_child', iter, 'should not be called in flat base')
return False
if handle is None:
return len(self.node_map) > 0
return False
def do_iter_n_children(self, iter):
"""
See Gtk.TreeModel
"""
#print 'do_iter_n_children'
print('ERROR: iter_n_children', iter, 'should not be called in flat base')
return 0
if handle is None:
return len(self.node_map)
return 0
def do_iter_nth_child(self, iter, nth):
"""
See Gtk.TreeModel
"""
#print 'do_iter_nth_child', iter, nth
if iter is None:
return True, self.node_map.get_iter(nth)
return False, None
def do_iter_parent(self, iter):
"""
Returns the parent of this node
See Gtk.TreeModel
"""
#print 'do_iter_parent'
return False, None
|
gpl-2.0
| 737,878,693,955,550,700
| 36.32596
| 91
| 0.571781
| false
| 4.108008
| false
| false
| false
|
tensorflow/models
|
official/vision/beta/ops/box_ops.py
|
1
|
24043
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Box related ops."""
# Import libraries
import numpy as np
import tensorflow as tf
EPSILON = 1e-8
BBOX_XFORM_CLIP = np.log(1000. / 16.)
def yxyx_to_xywh(boxes):
"""Converts boxes from ymin, xmin, ymax, xmax to xmin, ymin, width, height.
Args:
boxes: a numpy array whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
Returns:
boxes: a numpy array whose shape is the same as `boxes` in new format.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
boxes_ymin = boxes[..., 0]
boxes_xmin = boxes[..., 1]
boxes_width = boxes[..., 3] - boxes[..., 1]
boxes_height = boxes[..., 2] - boxes[..., 0]
new_boxes = np.stack(
[boxes_xmin, boxes_ymin, boxes_width, boxes_height], axis=-1)
return new_boxes
def jitter_boxes(boxes, noise_scale=0.025):
"""Jitter the box coordinates by some noise distribution.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates of
boxes in ymin, xmin, ymax, xmax order.
noise_scale: a python float which specifies the magnitude of noise. The rule
of thumb is to set this between (0, 0.1]. The default value is found to
mimic the noisy detections best empirically.
Returns:
jittered_boxes: a tensor whose shape is the same as `boxes` representing
the jittered boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('jitter_boxes'):
bbox_jitters = tf.random.normal(tf.shape(boxes), stddev=noise_scale)
ymin = boxes[..., 0:1]
xmin = boxes[..., 1:2]
ymax = boxes[..., 2:3]
xmax = boxes[..., 3:4]
width = xmax - xmin
height = ymax - ymin
new_center_x = (xmin + xmax) / 2.0 + bbox_jitters[..., 0:1] * width
new_center_y = (ymin + ymax) / 2.0 + bbox_jitters[..., 1:2] * height
new_width = width * tf.math.exp(bbox_jitters[..., 2:3])
new_height = height * tf.math.exp(bbox_jitters[..., 3:4])
jittered_boxes = tf.concat(
[new_center_y - new_height * 0.5, new_center_x - new_width * 0.5,
new_center_y + new_height * 0.5, new_center_x + new_width * 0.5],
axis=-1)
return jittered_boxes
def normalize_boxes(boxes, image_shape):
"""Converts boxes to the normalized coordinates.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
normalized_boxes: a tensor whose shape is the same as `boxes` representing
the normalized boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('normalize_boxes'):
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height = image_shape[..., 0:1]
width = image_shape[..., 1:2]
ymin = boxes[..., 0:1] / height
xmin = boxes[..., 1:2] / width
ymax = boxes[..., 2:3] / height
xmax = boxes[..., 3:4] / width
normalized_boxes = tf.concat([ymin, xmin, ymax, xmax], axis=-1)
return normalized_boxes
def denormalize_boxes(boxes, image_shape):
"""Converts boxes normalized by [height, width] to pixel coordinates.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
denormalized_boxes: a tensor whose shape is the same as `boxes` representing
the denormalized boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
with tf.name_scope('denormalize_boxes'):
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height, width = tf.split(image_shape, 2, axis=-1)
ymin, xmin, ymax, xmax = tf.split(boxes, 4, axis=-1)
ymin = ymin * height
xmin = xmin * width
ymax = ymax * height
xmax = xmax * width
denormalized_boxes = tf.concat([ymin, xmin, ymax, xmax], axis=-1)
return denormalized_boxes
def clip_boxes(boxes, image_shape):
"""Clips boxes to image boundaries.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
clipped_boxes: a tensor whose shape is the same as `boxes` representing the
clipped boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('clip_boxes'):
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width = image_shape
max_length = [height, width, height, width]
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height, width = tf.unstack(image_shape, axis=-1)
max_length = tf.stack([height, width, height, width], axis=-1)
clipped_boxes = tf.math.maximum(tf.math.minimum(boxes, max_length), 0.0)
return clipped_boxes
def compute_outer_boxes(boxes, image_shape, scale=1.0):
"""Compute outer box encloses an object with a margin.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates of
boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
scale: a float number specifying the scale of output outer boxes to input
`boxes`.
Returns:
outer_boxes: a tensor whose shape is the same as `boxes` representing the
outer boxes.
"""
if scale < 1.0:
raise ValueError(
'scale is {}, but outer box scale must be greater than 1.0.'.format(
scale))
centers_y = (boxes[..., 0] + boxes[..., 2]) / 2.0
centers_x = (boxes[..., 1] + boxes[..., 3]) / 2.0
box_height = (boxes[..., 2] - boxes[..., 0]) * scale
box_width = (boxes[..., 3] - boxes[..., 1]) * scale
outer_boxes = tf.stack(
[centers_y - box_height / 2.0, centers_x - box_width / 2.0,
centers_y + box_height / 2.0, centers_x + box_width / 2.0],
axis=1)
outer_boxes = clip_boxes(outer_boxes, image_shape)
return outer_boxes
def encode_boxes(boxes, anchors, weights=None):
"""Encode boxes to targets.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
anchors: a tensor whose shape is the same as, or `broadcastable` to `boxes`,
representing the coordinates of anchors in ymin, xmin, ymax, xmax order.
weights: None or a list of four float numbers used to scale coordinates.
Returns:
encoded_boxes: a tensor whose shape is the same as `boxes` representing the
encoded box targets.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('encode_boxes'):
boxes = tf.cast(boxes, dtype=anchors.dtype)
ymin = boxes[..., 0:1]
xmin = boxes[..., 1:2]
ymax = boxes[..., 2:3]
xmax = boxes[..., 3:4]
box_h = ymax - ymin
box_w = xmax - xmin
box_yc = ymin + 0.5 * box_h
box_xc = xmin + 0.5 * box_w
anchor_ymin = anchors[..., 0:1]
anchor_xmin = anchors[..., 1:2]
anchor_ymax = anchors[..., 2:3]
anchor_xmax = anchors[..., 3:4]
anchor_h = anchor_ymax - anchor_ymin
anchor_w = anchor_xmax - anchor_xmin
anchor_yc = anchor_ymin + 0.5 * anchor_h
anchor_xc = anchor_xmin + 0.5 * anchor_w
encoded_dy = (box_yc - anchor_yc) / anchor_h
encoded_dx = (box_xc - anchor_xc) / anchor_w
encoded_dh = tf.math.log(box_h / anchor_h)
encoded_dw = tf.math.log(box_w / anchor_w)
if weights:
encoded_dy *= weights[0]
encoded_dx *= weights[1]
encoded_dh *= weights[2]
encoded_dw *= weights[3]
encoded_boxes = tf.concat(
[encoded_dy, encoded_dx, encoded_dh, encoded_dw], axis=-1)
return encoded_boxes
def decode_boxes(encoded_boxes, anchors, weights=None):
"""Decode boxes.
Args:
encoded_boxes: a tensor whose last dimension is 4 representing the
coordinates of encoded boxes in ymin, xmin, ymax, xmax order.
anchors: a tensor whose shape is the same as, or `broadcastable` to `boxes`,
representing the coordinates of anchors in ymin, xmin, ymax, xmax order.
weights: None or a list of four float numbers used to scale coordinates.
Returns:
encoded_boxes: a tensor whose shape is the same as `boxes` representing the
decoded box targets.
"""
if encoded_boxes.shape[-1] != 4:
raise ValueError(
'encoded_boxes.shape[-1] is {:d}, but must be 4.'
.format(encoded_boxes.shape[-1]))
with tf.name_scope('decode_boxes'):
encoded_boxes = tf.cast(encoded_boxes, dtype=anchors.dtype)
dy = encoded_boxes[..., 0:1]
dx = encoded_boxes[..., 1:2]
dh = encoded_boxes[..., 2:3]
dw = encoded_boxes[..., 3:4]
if weights:
dy /= weights[0]
dx /= weights[1]
dh /= weights[2]
dw /= weights[3]
dh = tf.math.minimum(dh, BBOX_XFORM_CLIP)
dw = tf.math.minimum(dw, BBOX_XFORM_CLIP)
anchor_ymin = anchors[..., 0:1]
anchor_xmin = anchors[..., 1:2]
anchor_ymax = anchors[..., 2:3]
anchor_xmax = anchors[..., 3:4]
anchor_h = anchor_ymax - anchor_ymin
anchor_w = anchor_xmax - anchor_xmin
anchor_yc = anchor_ymin + 0.5 * anchor_h
anchor_xc = anchor_xmin + 0.5 * anchor_w
decoded_boxes_yc = dy * anchor_h + anchor_yc
decoded_boxes_xc = dx * anchor_w + anchor_xc
decoded_boxes_h = tf.math.exp(dh) * anchor_h
decoded_boxes_w = tf.math.exp(dw) * anchor_w
decoded_boxes_ymin = decoded_boxes_yc - 0.5 * decoded_boxes_h
decoded_boxes_xmin = decoded_boxes_xc - 0.5 * decoded_boxes_w
decoded_boxes_ymax = decoded_boxes_ymin + decoded_boxes_h
decoded_boxes_xmax = decoded_boxes_xmin + decoded_boxes_w
decoded_boxes = tf.concat(
[decoded_boxes_ymin, decoded_boxes_xmin,
decoded_boxes_ymax, decoded_boxes_xmax],
axis=-1)
return decoded_boxes
def filter_boxes(boxes, scores, image_shape, min_size_threshold):
"""Filter and remove boxes that are too small or fall outside the image.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates of
boxes in ymin, xmin, ymax, xmax order.
scores: a tensor whose shape is the same as tf.shape(boxes)[:-1]
representing the original scores of the boxes.
image_shape: a tensor whose shape is the same as, or `broadcastable` to
`boxes` except the last dimension, which is 2, representing [height,
width] of the scaled image.
min_size_threshold: a float representing the minimal box size in each side
(w.r.t. the scaled image). Boxes whose sides are smaller than it will be
filtered out.
Returns:
filtered_boxes: a tensor whose shape is the same as `boxes` but with
the position of the filtered boxes are filled with 0.
filtered_scores: a tensor whose shape is the same as 'scores' but with
the positinon of the filtered boxes filled with 0.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('filter_boxes'):
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height = image_shape[..., 0]
width = image_shape[..., 1]
ymin = boxes[..., 0]
xmin = boxes[..., 1]
ymax = boxes[..., 2]
xmax = boxes[..., 3]
h = ymax - ymin
w = xmax - xmin
yc = ymin + 0.5 * h
xc = xmin + 0.5 * w
min_size = tf.cast(
tf.math.maximum(min_size_threshold, 0.0), dtype=boxes.dtype)
filtered_size_mask = tf.math.logical_and(
tf.math.greater(h, min_size), tf.math.greater(w, min_size))
filtered_center_mask = tf.logical_and(
tf.math.logical_and(tf.math.greater(yc, 0.0), tf.math.less(yc, height)),
tf.math.logical_and(tf.math.greater(xc, 0.0), tf.math.less(xc, width)))
filtered_mask = tf.math.logical_and(
filtered_size_mask, filtered_center_mask)
filtered_scores = tf.where(filtered_mask, scores, tf.zeros_like(scores))
filtered_boxes = tf.cast(
tf.expand_dims(filtered_mask, axis=-1), dtype=boxes.dtype) * boxes
return filtered_boxes, filtered_scores
def filter_boxes_by_scores(boxes, scores, min_score_threshold):
"""Filter and remove boxes whose scores are smaller than the threshold.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates of
boxes in ymin, xmin, ymax, xmax order.
scores: a tensor whose shape is the same as tf.shape(boxes)[:-1]
representing the original scores of the boxes.
min_score_threshold: a float representing the minimal box score threshold.
Boxes whose score are smaller than it will be filtered out.
Returns:
filtered_boxes: a tensor whose shape is the same as `boxes` but with
the position of the filtered boxes are filled with -1.
filtered_scores: a tensor whose shape is the same as 'scores' but with
the
"""
if boxes.shape[-1] != 4:
raise ValueError('boxes.shape[1] is {:d}, but must be 4.'.format(
boxes.shape[-1]))
with tf.name_scope('filter_boxes_by_scores'):
filtered_mask = tf.math.greater(scores, min_score_threshold)
filtered_scores = tf.where(filtered_mask, scores, -tf.ones_like(scores))
filtered_boxes = tf.cast(
tf.expand_dims(filtered_mask, axis=-1), dtype=boxes.dtype) * boxes
return filtered_boxes, filtered_scores
def gather_instances(selected_indices, instances, *aux_instances):
"""Gather instances by indices.
Args:
selected_indices: a Tensor of shape [batch, K] which indicates the selected
indices in instance dimension (2nd dimension).
instances: a Tensor of shape [batch, N, ...] where the 2nd dimension is
the instance dimension to be selected from.
*aux_instances: the additional Tensors whose shapes are in [batch, N, ...]
which are the tensors to be selected from using the `selected_indices`.
Returns:
selected_instances: the tensor of shape [batch, K, ...] which corresponds to
the selected instances of the `instances` tensor.
selected_aux_instances: the additional tensors of shape [batch, K, ...]
which corresponds to the selected instances of the `aus_instances`
tensors.
"""
batch_size = instances.shape[0]
if batch_size == 1:
selected_instances = tf.squeeze(
tf.gather(instances, selected_indices, axis=1), axis=1)
if aux_instances:
selected_aux_instances = [
tf.squeeze(
tf.gather(a, selected_indices, axis=1), axis=1)
for a in aux_instances
]
return tuple([selected_instances] + selected_aux_instances)
else:
return selected_instances
else:
indices_shape = tf.shape(selected_indices)
batch_indices = (
tf.expand_dims(tf.range(indices_shape[0]), axis=-1) *
tf.ones([1, indices_shape[-1]], dtype=tf.int32))
gather_nd_indices = tf.stack(
[batch_indices, selected_indices], axis=-1)
selected_instances = tf.gather_nd(instances, gather_nd_indices)
if aux_instances:
selected_aux_instances = [
tf.gather_nd(a, gather_nd_indices) for a in aux_instances
]
return tuple([selected_instances] + selected_aux_instances)
else:
return selected_instances
def top_k_boxes(boxes, scores, k):
"""Sort and select top k boxes according to the scores.
Args:
boxes: a tensor of shape [batch_size, N, 4] representing the coordinate of
the boxes. N is the number of boxes per image.
scores: a tensor of shsape [batch_size, N] representing the socre of the
boxes.
k: an integer or a tensor indicating the top k number.
Returns:
selected_boxes: a tensor of shape [batch_size, k, 4] representing the
selected top k box coordinates.
selected_scores: a tensor of shape [batch_size, k] representing the selected
top k box scores.
"""
with tf.name_scope('top_k_boxes'):
selected_scores, top_k_indices = tf.nn.top_k(scores, k=k, sorted=True)
selected_boxes = gather_instances(top_k_indices, boxes)
return selected_boxes, selected_scores
def get_non_empty_box_indices(boxes):
"""Get indices for non-empty boxes."""
# Selects indices if box height or width is 0.
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
indices = tf.where(tf.logical_and(tf.greater(height, 0),
tf.greater(width, 0)))
return indices[:, 0]
def bbox_overlap(boxes, gt_boxes):
"""Calculates the overlap between proposal and ground truth boxes.
Some `boxes` or `gt_boxes` may have been padded. The returned `iou` tensor
for these boxes will be -1.
Args:
boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The
last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form.
gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This
tensor might have paddings with a negative value.
Returns:
iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES].
"""
with tf.name_scope('bbox_overlap'):
bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(
value=boxes, num_or_size_splits=4, axis=2)
gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(
value=gt_boxes, num_or_size_splits=4, axis=2)
# Calculates the intersection area.
i_xmin = tf.math.maximum(bb_x_min, tf.transpose(gt_x_min, [0, 2, 1]))
i_xmax = tf.math.minimum(bb_x_max, tf.transpose(gt_x_max, [0, 2, 1]))
i_ymin = tf.math.maximum(bb_y_min, tf.transpose(gt_y_min, [0, 2, 1]))
i_ymax = tf.math.minimum(bb_y_max, tf.transpose(gt_y_max, [0, 2, 1]))
i_area = (
tf.math.maximum((i_xmax - i_xmin), 0) *
tf.math.maximum((i_ymax - i_ymin), 0))
# Calculates the union area.
bb_area = (bb_y_max - bb_y_min) * (bb_x_max - bb_x_min)
gt_area = (gt_y_max - gt_y_min) * (gt_x_max - gt_x_min)
# Adds a small epsilon to avoid divide-by-zero.
u_area = bb_area + tf.transpose(gt_area, [0, 2, 1]) - i_area + 1e-8
# Calculates IoU.
iou = i_area / u_area
# Fills -1 for IoU entries between the padded ground truth boxes.
gt_invalid_mask = tf.less(
tf.reduce_max(gt_boxes, axis=-1, keepdims=True), 0.0)
padding_mask = tf.logical_or(
tf.zeros_like(bb_x_min, dtype=tf.bool),
tf.transpose(gt_invalid_mask, [0, 2, 1]))
iou = tf.where(padding_mask, -tf.ones_like(iou), iou)
# Fills -1 for for invalid (-1) boxes.
boxes_invalid_mask = tf.less(
tf.reduce_max(boxes, axis=-1, keepdims=True), 0.0)
iou = tf.where(boxes_invalid_mask, -tf.ones_like(iou), iou)
return iou
def box_matching(boxes, gt_boxes, gt_classes):
"""Match boxes to groundtruth boxes.
Given the proposal boxes and the groundtruth boxes and classes, perform the
groundtruth matching by taking the argmax of the IoU between boxes and
groundtruth boxes.
Args:
boxes: a tensor of shape of [batch_size, N, 4] representing the box
coordiantes to be matched to groundtruth boxes.
gt_boxes: a tensor of shape of [batch_size, MAX_INSTANCES, 4] representing
the groundtruth box coordinates. It is padded with -1s to indicate the
invalid boxes.
gt_classes: [batch_size, MAX_INSTANCES] representing the groundtruth box
classes. It is padded with -1s to indicate the invalid classes.
Returns:
matched_gt_boxes: a tensor of shape of [batch_size, N, 4], representing
the matched groundtruth box coordinates for each input box. If the box
does not overlap with any groundtruth boxes, the matched boxes of it
will be set to all 0s.
matched_gt_classes: a tensor of shape of [batch_size, N], representing
the matched groundtruth classes for each input box. If the box does not
overlap with any groundtruth boxes, the matched box classes of it will
be set to 0, which corresponds to the background class.
matched_gt_indices: a tensor of shape of [batch_size, N], representing
the indices of the matched groundtruth boxes in the original gt_boxes
tensor. If the box does not overlap with any groundtruth boxes, the
index of the matched groundtruth will be set to -1.
matched_iou: a tensor of shape of [batch_size, N], representing the IoU
between the box and its matched groundtruth box. The matched IoU is the
maximum IoU of the box and all the groundtruth boxes.
iou: a tensor of shape of [batch_size, N, K], representing the IoU matrix
between boxes and the groundtruth boxes. The IoU between a box and the
invalid groundtruth boxes whose coordinates are [-1, -1, -1, -1] is -1.
"""
# Compute IoU between boxes and gt_boxes.
# iou <- [batch_size, N, K]
iou = bbox_overlap(boxes, gt_boxes)
# max_iou <- [batch_size, N]
# 0.0 -> no match to gt, or -1.0 match to no gt
matched_iou = tf.reduce_max(iou, axis=-1)
# background_box_mask <- bool, [batch_size, N]
background_box_mask = tf.less_equal(matched_iou, 0.0)
argmax_iou_indices = tf.argmax(iou, axis=-1, output_type=tf.int32)
matched_gt_boxes, matched_gt_classes = gather_instances(
argmax_iou_indices, gt_boxes, gt_classes)
matched_gt_boxes = tf.where(
tf.tile(tf.expand_dims(background_box_mask, axis=-1), [1, 1, 4]),
tf.zeros_like(matched_gt_boxes, dtype=matched_gt_boxes.dtype),
matched_gt_boxes)
matched_gt_classes = tf.where(
background_box_mask,
tf.zeros_like(matched_gt_classes),
matched_gt_classes)
matched_gt_indices = tf.where(
background_box_mask,
-tf.ones_like(argmax_iou_indices),
argmax_iou_indices)
return (matched_gt_boxes, matched_gt_classes, matched_gt_indices,
matched_iou, iou)
|
apache-2.0
| 3,766,865,018,543,478,000
| 36.625978
| 80
| 0.655284
| false
| 3.496655
| false
| false
| false
|
jo-tez/aima-python
|
nlp.py
|
1
|
21959
|
"""Natural Language Processing; Chart Parsing and PageRanking (Chapter 22-23)"""
from collections import defaultdict
from utils import weighted_choice
import urllib.request
import re
# ______________________________________________________________________________
# Grammars and Lexicons
def Rules(**rules):
"""Create a dictionary mapping symbols to alternative sequences.
>>> Rules(A = "B C | D E")
{'A': [['B', 'C'], ['D', 'E']]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [alt.strip().split() for alt in rhs.split('|')]
return rules
def Lexicon(**rules):
"""Create a dictionary mapping symbols to alternative words.
>>> Lexicon(Article = "the | a | an")
{'Article': ['the', 'a', 'an']}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [word.strip() for word in rhs.split('|')]
return rules
class Grammar:
def __init__(self, name, rules, lexicon):
"""A grammar has a set of rules and a lexicon."""
self.name = name
self.rules = rules
self.lexicon = lexicon
self.categories = defaultdict(list)
for lhs in lexicon:
for word in lexicon[lhs]:
self.categories[word].append(lhs)
def rewrites_for(self, cat):
"""Return a sequence of possible rhs's that cat can be rewritten as."""
return self.rules.get(cat, ())
def isa(self, word, cat):
"""Return True iff word is of category cat"""
return cat in self.categories[word]
def cnf_rules(self):
"""Returns the tuple (X, Y, Z) for rules in the form:
X -> Y Z"""
cnf = []
for X, rules in self.rules.items():
for (Y, Z) in rules:
cnf.append((X, Y, Z))
return cnf
def generate_random(self, S='S'):
"""Replace each token in S by a random entry in grammar (recursively)."""
import random
def rewrite(tokens, into):
for token in tokens:
if token in self.rules:
rewrite(random.choice(self.rules[token]), into)
elif token in self.lexicon:
into.append(random.choice(self.lexicon[token]))
else:
into.append(token)
return into
return ' '.join(rewrite(S.split(), []))
def __repr__(self):
return '<Grammar {}>'.format(self.name)
def ProbRules(**rules):
"""Create a dictionary mapping symbols to alternative sequences,
with probabilities.
>>> ProbRules(A = "B C [0.3] | D E [0.7]")
{'A': [(['B', 'C'], 0.3), (['D', 'E'], 0.7)]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = []
rhs_separate = [alt.strip().split() for alt in rhs.split('|')]
for r in rhs_separate:
prob = float(r[-1][1:-1]) # remove brackets, convert to float
rhs_rule = (r[:-1], prob)
rules[lhs].append(rhs_rule)
return rules
def ProbLexicon(**rules):
"""Create a dictionary mapping symbols to alternative words,
with probabilities.
>>> ProbLexicon(Article = "the [0.5] | a [0.25] | an [0.25]")
{'Article': [('the', 0.5), ('a', 0.25), ('an', 0.25)]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = []
rhs_separate = [word.strip().split() for word in rhs.split('|')]
for r in rhs_separate:
prob = float(r[-1][1:-1]) # remove brackets, convert to float
word = r[:-1][0]
rhs_rule = (word, prob)
rules[lhs].append(rhs_rule)
return rules
class ProbGrammar:
def __init__(self, name, rules, lexicon):
"""A grammar has a set of rules and a lexicon.
Each rule has a probability."""
self.name = name
self.rules = rules
self.lexicon = lexicon
self.categories = defaultdict(list)
for lhs in lexicon:
for word, prob in lexicon[lhs]:
self.categories[word].append((lhs, prob))
def rewrites_for(self, cat):
"""Return a sequence of possible rhs's that cat can be rewritten as."""
return self.rules.get(cat, ())
def isa(self, word, cat):
"""Return True iff word is of category cat"""
return cat in [c for c, _ in self.categories[word]]
def cnf_rules(self):
"""Returns the tuple (X, Y, Z, p) for rules in the form:
X -> Y Z [p]"""
cnf = []
for X, rules in self.rules.items():
for (Y, Z), p in rules:
cnf.append((X, Y, Z, p))
return cnf
def generate_random(self, S='S'):
"""Replace each token in S by a random entry in grammar (recursively).
Returns a tuple of (sentence, probability)."""
import random
def rewrite(tokens, into):
for token in tokens:
if token in self.rules:
non_terminal, prob = weighted_choice(self.rules[token])
into[1] *= prob
rewrite(non_terminal, into)
elif token in self.lexicon:
terminal, prob = weighted_choice(self.lexicon[token])
into[0].append(terminal)
into[1] *= prob
else:
into[0].append(token)
return into
rewritten_as, prob = rewrite(S.split(), [[], 1])
return (' '.join(rewritten_as), prob)
def __repr__(self):
return '<Grammar {}>'.format(self.name)
E0 = Grammar('E0',
Rules( # Grammar for E_0 [Figure 22.4]
S='NP VP | S Conjunction S',
NP='Pronoun | Name | Noun | Article Noun | Digit Digit | NP PP | NP RelClause',
VP='Verb | VP NP | VP Adjective | VP PP | VP Adverb',
PP='Preposition NP',
RelClause='That VP'),
Lexicon( # Lexicon for E_0 [Figure 22.3]
Noun="stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east",
Verb="is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel", # noqa
Adjective="right | left | east | south | back | smelly",
Adverb="here | there | nearby | ahead | right | left | east | south | back",
Pronoun="me | you | I | it",
Name="John | Mary | Boston | Aristotle",
Article="the | a | an",
Preposition="to | in | on | near",
Conjunction="and | or | but",
Digit="0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9",
That="that"
))
E_ = Grammar('E_', # Trivial Grammar and lexicon for testing
Rules(
S='NP VP',
NP='Art N | Pronoun',
VP='V NP'),
Lexicon(
Art='the | a',
N='man | woman | table | shoelace | saw',
Pronoun='I | you | it',
V='saw | liked | feel'
))
E_NP_ = Grammar('E_NP_', # Another Trivial Grammar for testing
Rules(NP='Adj NP | N'),
Lexicon(Adj='happy | handsome | hairy',
N='man'))
E_Prob = ProbGrammar('E_Prob', # The Probabilistic Grammar from the notebook
ProbRules(
S="NP VP [0.6] | S Conjunction S [0.4]",
NP="Pronoun [0.2] | Name [0.05] | Noun [0.2] | Article Noun [0.15] \
| Article Adjs Noun [0.1] | Digit [0.05] | NP PP [0.15] | NP RelClause [0.1]",
VP="Verb [0.3] | VP NP [0.2] | VP Adjective [0.25] | VP PP [0.15] | VP Adverb [0.1]",
Adjs="Adjective [0.5] | Adjective Adjs [0.5]",
PP="Preposition NP [1]",
RelClause="RelPro VP [1]"
),
ProbLexicon(
Verb="is [0.5] | say [0.3] | are [0.2]",
Noun="robot [0.4] | sheep [0.4] | fence [0.2]",
Adjective="good [0.5] | new [0.2] | sad [0.3]",
Adverb="here [0.6] | lightly [0.1] | now [0.3]",
Pronoun="me [0.3] | you [0.4] | he [0.3]",
RelPro="that [0.5] | who [0.3] | which [0.2]",
Name="john [0.4] | mary [0.4] | peter [0.2]",
Article="the [0.5] | a [0.25] | an [0.25]",
Preposition="to [0.4] | in [0.3] | at [0.3]",
Conjunction="and [0.5] | or [0.2] | but [0.3]",
Digit="0 [0.35] | 1 [0.35] | 2 [0.3]"
))
E_Chomsky = Grammar('E_Prob_Chomsky', # A Grammar in Chomsky Normal Form
Rules(
S='NP VP',
NP='Article Noun | Adjective Noun',
VP='Verb NP | Verb Adjective',
),
Lexicon(
Article='the | a | an',
Noun='robot | sheep | fence',
Adjective='good | new | sad',
Verb='is | say | are'
))
E_Prob_Chomsky = ProbGrammar('E_Prob_Chomsky', # A Probabilistic Grammar in CNF
ProbRules(
S='NP VP [1]',
NP='Article Noun [0.6] | Adjective Noun [0.4]',
VP='Verb NP [0.5] | Verb Adjective [0.5]',
),
ProbLexicon(
Article='the [0.5] | a [0.25] | an [0.25]',
Noun='robot [0.4] | sheep [0.4] | fence [0.2]',
Adjective='good [0.5] | new [0.2] | sad [0.3]',
Verb='is [0.5] | say [0.3] | are [0.2]'
))
E_Prob_Chomsky_ = ProbGrammar('E_Prob_Chomsky_',
ProbRules(
S='NP VP [1]',
NP='NP PP [0.4] | Noun Verb [0.6]',
PP='Preposition NP [1]',
VP='Verb NP [0.7] | VP PP [0.3]',
),
ProbLexicon(
Noun='astronomers [0.18] | eyes [0.32] | stars [0.32] | telescopes [0.18]',
Verb='saw [0.5] | \'\' [0.5]',
Preposition='with [1]'
))
# ______________________________________________________________________________
# Chart Parsing
class Chart:
"""Class for parsing sentences using a chart data structure.
>>> chart = Chart(E0)
>>> len(chart.parses('the stench is in 2 2'))
1
"""
def __init__(self, grammar, trace=False):
"""A datastructure for parsing a string; and methods to do the parse.
self.chart[i] holds the edges that end just before the i'th word.
Edges are 5-element lists of [start, end, lhs, [found], [expects]]."""
self.grammar = grammar
self.trace = trace
def parses(self, words, S='S'):
"""Return a list of parses; words can be a list or string."""
if isinstance(words, str):
words = words.split()
self.parse(words, S)
# Return all the parses that span the whole input
# 'span the whole input' => begin at 0, end at len(words)
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
# assert j == len(words)
if i == 0 and lhs == S and expects == []]
def parse(self, words, S='S'):
"""Parse a list of words; according to the grammar.
Leave results in the chart."""
self.chart = [[] for i in range(len(words)+1)]
self.add_edge([0, 0, 'S_', [], [S]])
for i in range(len(words)):
self.scanner(i, words[i])
return self.chart
def add_edge(self, edge):
"""Add edge to chart, and see if it extends or predicts another edge."""
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print('Chart: added {}'.format(edge))
if not expects:
self.extender(edge)
else:
self.predictor(edge)
def scanner(self, j, word):
"""For each edge expecting a word of this category here, extend the edge."""
for (i, j, A, alpha, Bb) in self.chart[j]:
if Bb and self.grammar.isa(word, Bb[0]):
self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
def predictor(self, edge):
"""Add to chart any rules for B that could help extend this edge."""
(i, j, A, alpha, Bb) = edge
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs])
def extender(self, edge):
"""See what edges can be extended by this edge."""
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
# ______________________________________________________________________________
# CYK Parsing
def CYK_parse(words, grammar):
""" [Figure 23.5] """
# We use 0-based indexing instead of the book's 1-based.
N = len(words)
P = defaultdict(float)
# Insert lexical rules for each word.
for (i, word) in enumerate(words):
for (X, p) in grammar.categories[word]:
P[X, i, 1] = p
# Combine first and second parts of right-hand sides of rules,
# from short to long.
for length in range(2, N+1):
for start in range(N-length+1):
for len1 in range(1, length): # N.B. the book incorrectly has N instead of length
len2 = length - len1
for (X, Y, Z, p) in grammar.cnf_rules():
P[X, start, length] = max(P[X, start, length],
P[Y, start, len1] * P[Z, start+len1, len2] * p)
return P
# ______________________________________________________________________________
# Page Ranking
# First entry in list is the base URL, and then following are relative URL pages
examplePagesSet = ["https://en.wikipedia.org/wiki/", "Aesthetics", "Analytic_philosophy",
"Ancient_Greek", "Aristotle", "Astrology", "Atheism", "Baruch_Spinoza",
"Belief", "Betrand Russell", "Confucius", "Consciousness",
"Continental Philosophy", "Dialectic", "Eastern_Philosophy",
"Epistemology", "Ethics", "Existentialism", "Friedrich_Nietzsche",
"Idealism", "Immanuel_Kant", "List_of_political_philosophers", "Logic",
"Metaphysics", "Philosophers", "Philosophy", "Philosophy_of_mind", "Physics",
"Plato", "Political_philosophy", "Pythagoras", "Rationalism",
"Social_philosophy", "Socrates", "Subjectivity", "Theology",
"Truth", "Western_philosophy"]
def loadPageHTML(addressList):
"""Download HTML page content for every URL address passed as argument"""
contentDict = {}
for addr in addressList:
with urllib.request.urlopen(addr) as response:
raw_html = response.read().decode('utf-8')
# Strip raw html of unnessecary content. Basically everything that isn't link or text
html = stripRawHTML(raw_html)
contentDict[addr] = html
return contentDict
def initPages(addressList):
"""Create a dictionary of pages from a list of URL addresses"""
pages = {}
for addr in addressList:
pages[addr] = Page(addr)
return pages
def stripRawHTML(raw_html):
"""Remove the <head> section of the HTML which contains links to stylesheets etc.,
and remove all other unnessecary HTML"""
# TODO: Strip more out of the raw html
return re.sub("<head>.*?</head>", "", raw_html, flags=re.DOTALL) # remove <head> section
def determineInlinks(page):
"""Given a set of pages that have their outlinks determined, we can fill
out a page's inlinks by looking through all other page's outlinks"""
inlinks = []
for addr, indexPage in pagesIndex.items():
if page.address == indexPage.address:
continue
elif page.address in indexPage.outlinks:
inlinks.append(addr)
return inlinks
def findOutlinks(page, handleURLs=None):
"""Search a page's HTML content for URL links to other pages"""
urls = re.findall(r'href=[\'"]?([^\'" >]+)', pagesContent[page.address])
if handleURLs:
urls = handleURLs(urls)
return urls
def onlyWikipediaURLS(urls):
"""Some example HTML page data is from wikipedia. This function converts
relative wikipedia links to full wikipedia URLs"""
wikiURLs = [url for url in urls if url.startswith('/wiki/')]
return ["https://en.wikipedia.org"+url for url in wikiURLs]
# ______________________________________________________________________________
# HITS Helper Functions
def expand_pages(pages):
"""Adds in every page that links to or is linked from one of
the relevant pages."""
expanded = {}
for addr, page in pages.items():
if addr not in expanded:
expanded[addr] = page
for inlink in page.inlinks:
if inlink not in expanded:
expanded[inlink] = pagesIndex[inlink]
for outlink in page.outlinks:
if outlink not in expanded:
expanded[outlink] = pagesIndex[outlink]
return expanded
def relevant_pages(query):
"""Relevant pages are pages that contain all of the query words. They are obtained by
intersecting the hit lists of the query words."""
hit_intersection = {addr for addr in pagesIndex}
query_words = query.split()
for query_word in query_words:
hit_list = set()
for addr in pagesIndex:
if query_word.lower() in pagesContent[addr].lower():
hit_list.add(addr)
hit_intersection = hit_intersection.intersection(hit_list)
return {addr: pagesIndex[addr] for addr in hit_intersection}
def normalize(pages):
"""Normalize divides each page's score by the sum of the squares of all
pages' scores (separately for both the authority and hub scores).
"""
summed_hub = sum(page.hub**2 for _, page in pages.items())
summed_auth = sum(page.authority**2 for _, page in pages.items())
for _, page in pages.items():
page.hub /= summed_hub**0.5
page.authority /= summed_auth**0.5
class ConvergenceDetector(object):
"""If the hub and authority values of the pages are no longer changing, we have
reached a convergence and further iterations will have no effect. This detects convergence
so that we can stop the HITS algorithm as early as possible."""
def __init__(self):
self.hub_history = None
self.auth_history = None
def __call__(self):
return self.detect()
def detect(self):
curr_hubs = [page.hub for addr, page in pagesIndex.items()]
curr_auths = [page.authority for addr, page in pagesIndex.items()]
if self.hub_history is None:
self.hub_history, self.auth_history = [], []
else:
diffsHub = [abs(x-y) for x, y in zip(curr_hubs, self.hub_history[-1])]
diffsAuth = [abs(x-y) for x, y in zip(curr_auths, self.auth_history[-1])]
aveDeltaHub = sum(diffsHub)/float(len(pagesIndex))
aveDeltaAuth = sum(diffsAuth)/float(len(pagesIndex))
if aveDeltaHub < 0.01 and aveDeltaAuth < 0.01: # may need tweaking
return True
if len(self.hub_history) > 2: # prevent list from getting long
del self.hub_history[0]
del self.auth_history[0]
self.hub_history.append([x for x in curr_hubs])
self.auth_history.append([x for x in curr_auths])
return False
def getInlinks(page):
if not page.inlinks:
page.inlinks = determineInlinks(page)
return [addr for addr, p in pagesIndex.items() if addr in page.inlinks]
def getOutlinks(page):
if not page.outlinks:
page.outlinks = findOutlinks(page)
return [addr for addr, p in pagesIndex.items() if addr in page.outlinks]
# ______________________________________________________________________________
# HITS Algorithm
class Page(object):
def __init__(self, address, inlinks=None, outlinks=None, hub=0, authority=0):
self.address = address
self.hub = hub
self.authority = authority
self.inlinks = inlinks
self.outlinks = outlinks
pagesContent = {} # maps Page relative or absolute URL/location to page's HTML content
pagesIndex = {}
convergence = ConvergenceDetector() # assign function to variable to mimic pseudocode's syntax
def HITS(query):
"""The HITS algorithm for computing hubs and authorities with respect to a query."""
pages = expand_pages(relevant_pages(query))
for p in pages.values():
p.authority = 1
p.hub = 1
while not convergence():
authority = {p: pages[p].authority for p in pages}
hub = {p: pages[p].hub for p in pages}
for p in pages:
# p.authority ← ∑i Inlinki(p).Hub
pages[p].authority = sum(hub[x] for x in getInlinks(pages[p]))
# p.hub ← ∑i Outlinki(p).Authority
pages[p].hub = sum(authority[x] for x in getOutlinks(pages[p]))
normalize(pages)
return pages
|
mit
| 3,861,374,006,818,428,000
| 37.578207
| 114
| 0.515056
| false
| 3.727458
| false
| false
| false
|
w495/python-video-shot-detector
|
etc/experiments/test_pyav.py
|
1
|
2469
|
# -*- coding: utf8 -*-
from __future__ import absolute_import, division, print_function
import av
from av.video.frame import VideoFrame
from av.video.stream import VideoStream
# В этом списке будем хранить кадры в виде numpy-векторов.
array_list = []
# Откроем контейнер на чтение
input_container = av.open('input.mp4')
# Применим «инверсное мультиплексирование» =)
# Получим пакеты из потока.
input_packets = input_container.demux()
# Получии все кадры видео и положим их в `array_list`.
for packet in input_packets:
if isinstance(packet.stream, VideoStream):
# Получим все кадры пакета
frames = packet.decode()
for raw_frame in frames:
# Переформатируем кадры, к нужному размеру и виду.
# Это лучше делать средствами pyav (libav)
# потому что быстрее.
frame = raw_frame.reformat(32, 32, 'rgb24')
# Превратить каждый кадр в numpy-вектор (dtype=int).
array = frame.to_nd_array()
# Положим в список numpy-векторов.
array_list += [array]
# Откроем контейнер на запись.
output_container = av.open('out.mp4', mode='w', format='mp4')
# Добавим к контейнеру поток c кодеком h264.
output_stream = output_container.add_stream('h264', rate=25)
# В этом списке будем хранить пакеты выходного потока.
output_packets = []
# Пройдем по списку векторов и упакуем их в пакеты выходного протока.
for array in array_list:
# Построим видео-кадр по вектору.
frame = VideoFrame.from_ndarray(array, format='rgb24')
# Запакуем полученный кадр.
packet = output_stream.encode(frame)
# Положим в список пакетов.
output_packets += [packet]
# Применим «прямое мультиплексирование» =)
# Для каждого пакета вызовем мультиплексор.
for packet in output_packets:
if packet:
output_container.mux(packet)
output_container.close()
|
bsd-3-clause
| -1,050,391,235,802,443,000
| 31.241379
| 69
| 0.688235
| false
| 1.956067
| false
| false
| false
|
mburakergenc/Malware-Detection-using-Machine-Learning
|
cuckoo/analyzer/windows/modules/packages/ppt.py
|
1
|
2066
|
# Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from _winreg import HKEY_CURRENT_USER
from lib.common.abstracts import Package
class PPT(Package):
"""PowerPoint analysis package."""
PATHS = [
("ProgramFiles", "Microsoft Office", "POWERPNT.EXE"),
("ProgramFiles", "Microsoft Office", "Office10", "POWERPNT.EXE"),
("ProgramFiles", "Microsoft Office", "Office11", "POWERPNT.EXE"),
("ProgramFiles", "Microsoft Office", "Office12", "POWERPNT.EXE"),
("ProgramFiles", "Microsoft Office", "Office14", "POWERPNT.EXE"),
("ProgramFiles", "Microsoft Office", "Office15", "POWERPNT.EXE"),
("ProgramFiles", "Microsoft Office", "Office16", "POWERPNT.EXE"),
("ProgramFiles", "Microsoft Office 15", "root", "office15", "POWERPNT.EXE"),
]
REGKEYS = [
[
HKEY_CURRENT_USER,
"Software\\Microsoft\\Office\\12.0\\Common\\General",
{
# "Welcome to the 2007 Microsoft Office system"
"ShownOptIn": 1,
},
],
[
HKEY_CURRENT_USER,
"Software\\Microsoft\\Office\\12.0\\Powerpoint\\Security",
{
# Enable VBA macros in Office 2007.
"VBAWarnings": 1,
"AccessVBOM": 1,
# "The file you are trying to open .xyz is in a different
# format than specified by the file extension. Verify the file
# is not corrupted and is from trusted source before opening
# the file. Do you want to open the file now?"
"ExtensionHardening": 0,
},
],
]
def start(self, path):
powerpoint = self.get_path("Microsoft Office PowerPoint")
return self.execute(
powerpoint, args=["/S", path], mode="office",
trigger="file:%s" % path
)
|
mit
| 172,675,020,858,053,400
| 37.259259
| 84
| 0.56728
| false
| 3.818854
| false
| false
| false
|
freifunk-darmstadt/ffda-jarvis
|
willie/willie/modules/seen.py
|
1
|
1825
|
# coding=utf8
"""
seen.py - Willie Seen Module
Copyright 2008, Sean B. Palmer, inamidst.com
Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
"""
from __future__ import unicode_literals
import time
import datetime
from willie.tools import Identifier, get_timezone, format_time
from willie.module import commands, rule, priority, thread
@commands('seen')
def seen(bot, trigger):
"""Reports when and where the user was last seen."""
if not trigger.group(2):
bot.say(".seen <nick> - Reports when <nick> was last seen.")
return
nick = trigger.group(2).strip()
timestamp = bot.db.get_nick_value(nick, 'seen_timestamp')
if timestamp:
channel = bot.db.get_nick_value(nick, 'seen_channel')
message = bot.db.get_nick_value(nick, 'seen_message')
tz = get_timezone(bot.db, bot.config, None, trigger.nick,
trigger.sender)
saw = datetime.datetime.utcfromtimestamp(timestamp)
timestamp = format_time(bot.db, bot.config, tz, trigger.nick,
trigger.sender, saw)
msg = "I last saw {} at {}".format(nick, timestamp)
if Identifier(channel) == trigger.sender:
msg = msg + " in here, saying " + message
else:
msg += " in another channel."
bot.say(str(trigger.nick) + ': ' + msg)
else:
bot.say("Sorry, I haven't seen {} around.".format(nick))
@thread(False)
@rule('(.*)')
@priority('low')
def note(bot, trigger):
if not trigger.is_privmsg:
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
bot.db.set_nick_value(trigger.nick, 'seen_channel', trigger.sender)
bot.db.set_nick_value(trigger.nick, 'seen_message', trigger)
|
mit
| -5,590,594,171,366,750,000
| 33.415094
| 75
| 0.631579
| false
| 3.428571
| false
| false
| false
|
fedoraredteam/elem
|
setup.py
|
1
|
1996
|
from distutils.core import setup
from distutils.core import Command
import os
import sys
import unittest
import setuptools
class CleanPycCommand(Command):
user_options = []
def initialize_options(self):
"""Abstract method that is required to be overwritten"""
pass
def finalize_options(self):
"""Abstract method that is required to be overwritten"""
pass
def run(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
filenames = [os.path.join(d, x)
for d, _, files in os.walk(dir_path)
for x in files if os.path.splitext(x)[1] == '.pyc']
for filename in filenames:
os.remove(filename)
ELEM_CONF_ENV = 'ELEMCONFPATH'
if os.getenv(ELEM_CONF_ENV):
path = os.getenv(ELEM_CONF_ENV)
elif hasattr(sys, 'real_prefix'):
path = os.path.join(sys.prefix, '.elem')
else:
path = os.path.join(os.path.expanduser("~"), '.elem')
setup(name='elem',
packages=['elem', 'elem.host', 'elem.score', 'elem.vulnerability', 'elem.exploit'],
package_data={'elem': ['config/elem.conf']},
install_requires=['requests', 'python-dateutil', 'argparse', 'cpe', 'redteamcore'],
data_files=[(path, ['elem/config/elem.conf'])],
version='0.3.0',
description='Tool to correlate published CVE\'s against Enterprise Linux against known exploits.',
author='Kenneth Evensen',
author_email='kevensen@redhat.com',
license='GPLv3',
url='https://github.com/fedoraredteam/elem',
download_url='https://github.com/fedoraredteam/elem/archive/0.3.0.tar.gz',
keywords=['cve', 'exploit', 'linux'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 2.7',
],
scripts=['bin/elem'],
platforms=['Linux'],
test_suite='tests',
cmdclass={'tidy': CleanPycCommand})
|
gpl-3.0
| -1,796,376,005,776,839,000
| 33.413793
| 104
| 0.617735
| false
| 3.655678
| false
| false
| false
|
hipnusleo/laserjet
|
resource/pypi/cffi-1.9.1/cffi/verifier.py
|
1
|
11834
|
#
# DEPRECATED: implementation for ffi.verify()
#
import sys, os, binascii, shutil, io
from . import __version_verifier_modules__
from . import ffiplatform
if sys.version_info >= (3, 3):
import importlib.machinery
def _extension_suffixes():
return importlib.machinery.EXTENSION_SUFFIXES[:]
else:
import imp
def _extension_suffixes():
return [suffix for suffix, _, type in imp.get_suffixes()
if type == imp.C_EXTENSION]
if sys.version_info >= (3,):
NativeIO = io.StringIO
else:
class NativeIO(io.BytesIO):
def write(self, s):
if isinstance(s, unicode):
s = s.encode('ascii')
super(NativeIO, self).write(s)
def _hack_at_distutils():
# Windows-only workaround for some configurations: see
# https://bugs.python.org/issue23246 (Python 2.7 with
# a specific MS compiler suite download)
if sys.platform == "win32":
try:
import setuptools # for side-effects, patches distutils
except ImportError:
pass
class Verifier(object):
def __init__(self, ffi, preamble, tmpdir=None, modulename=None,
ext_package=None, tag='', force_generic_engine=False,
source_extension='.c', flags=None, relative_to=None, **kwds):
if ffi._parser._uses_new_feature:
raise ffiplatform.VerificationError(
"feature not supported with ffi.verify(), but only "
"with ffi.set_source(): %s" % (ffi._parser._uses_new_feature,))
self.ffi = ffi
self.preamble = preamble
if not modulename:
flattened_kwds = ffiplatform.flatten(kwds)
vengine_class = _locate_engine_class(ffi, force_generic_engine)
self._vengine = vengine_class(self)
self._vengine.patch_extension_kwds(kwds)
self.flags = flags
self.kwds = self.make_relative_to(kwds, relative_to)
#
if modulename:
if tag:
raise TypeError("can't specify both 'modulename' and 'tag'")
else:
key = '\x00'.join([sys.version[:3], __version_verifier_modules__,
preamble, flattened_kwds] +
ffi._cdefsources)
if sys.version_info >= (3,):
key = key.encode('utf-8')
k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff)
k1 = k1.lstrip('0x').rstrip('L')
k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff)
k2 = k2.lstrip('0').rstrip('L')
modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key,
k1, k2)
suffix = _get_so_suffixes()[0]
self.tmpdir = tmpdir or _caller_dir_pycache()
self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension)
self.modulefilename = os.path.join(self.tmpdir, modulename + suffix)
self.ext_package = ext_package
self._has_source = False
self._has_module = False
def write_source(self, file=None):
"""Write the C source code. It is produced in 'self.sourcefilename',
which can be tweaked beforehand."""
with self.ffi._lock:
if self._has_source and file is None:
raise ffiplatform.VerificationError(
"source code already written")
self._write_source(file)
def compile_module(self):
"""Write the C source code (if not done already) and compile it.
This produces a dynamic link library in 'self.modulefilename'."""
with self.ffi._lock:
if self._has_module:
raise ffiplatform.VerificationError("module already compiled")
if not self._has_source:
self._write_source()
self._compile_module()
def load_library(self):
"""Get a C module from this Verifier instance.
Returns an instance of a FFILibrary class that behaves like the
objects returned by ffi.dlopen(), but that delegates all
operations to the C module. If necessary, the C code is written
and compiled first.
"""
with self.ffi._lock:
if not self._has_module:
self._locate_module()
if not self._has_module:
if not self._has_source:
self._write_source()
self._compile_module()
return self._load_library()
def get_module_name(self):
basename = os.path.basename(self.modulefilename)
# kill both the .so extension and the other .'s, as introduced
# by Python 3: 'basename.cpython-33m.so'
basename = basename.split('.', 1)[0]
# and the _d added in Python 2 debug builds --- but try to be
# conservative and not kill a legitimate _d
if basename.endswith('_d') and hasattr(sys, 'gettotalrefcount'):
basename = basename[:-2]
return basename
def get_extension(self):
_hack_at_distutils() # backward compatibility hack
if not self._has_source:
with self.ffi._lock:
if not self._has_source:
self._write_source()
sourcename = ffiplatform.maybe_relative_path(self.sourcefilename)
modname = self.get_module_name()
return ffiplatform.get_extension(sourcename, modname, **self.kwds)
def generates_python_module(self):
return self._vengine._gen_python_module
def make_relative_to(self, kwds, relative_to):
if relative_to and os.path.dirname(relative_to):
dirname = os.path.dirname(relative_to)
kwds = kwds.copy()
for key in ffiplatform.LIST_OF_FILE_NAMES:
if key in kwds:
lst = kwds[key]
if not isinstance(lst, (list, tuple)):
raise TypeError("keyword '%s' should be a list or tuple"
% (key,))
lst = [os.path.join(dirname, fn) for fn in lst]
kwds[key] = lst
return kwds
# ----------
def _locate_module(self):
if not os.path.isfile(self.modulefilename):
if self.ext_package:
try:
pkg = __import__(self.ext_package, None, None, ['__doc__'])
except ImportError:
return # cannot import the package itself, give up
# (e.g. it might be called differently before installation)
path = pkg.__path__
else:
path = None
filename = self._vengine.find_module(self.get_module_name(), path,
_get_so_suffixes())
if filename is None:
return
self.modulefilename = filename
self._vengine.collect_types()
self._has_module = True
def _write_source_to(self, file):
self._vengine._f = file
try:
self._vengine.write_source_to_f()
finally:
del self._vengine._f
def _write_source(self, file=None):
if file is not None:
self._write_source_to(file)
else:
# Write our source file to an in memory file.
f = NativeIO()
self._write_source_to(f)
source_data = f.getvalue()
# Determine if this matches the current file
if os.path.exists(self.sourcefilename):
with open(self.sourcefilename, "r") as fp:
needs_written = not (fp.read() == source_data)
else:
needs_written = True
# Actually write the file out if it doesn't match
if needs_written:
_ensure_dir(self.sourcefilename)
with open(self.sourcefilename, "w") as fp:
fp.write(source_data)
# Set this flag
self._has_source = True
def _compile_module(self):
# compile this C source
tmpdir = os.path.dirname(self.sourcefilename)
outputfilename = ffiplatform.compile(tmpdir, self.get_extension())
try:
same = ffiplatform.samefile(outputfilename, self.modulefilename)
except OSError:
same = False
if not same:
_ensure_dir(self.modulefilename)
shutil.move(outputfilename, self.modulefilename)
self._has_module = True
def _load_library(self):
assert self._has_module
if self.flags is not None:
return self._vengine.load_library(self.flags)
else:
return self._vengine.load_library()
# ____________________________________________________________
_FORCE_GENERIC_ENGINE = False # for tests
def _locate_engine_class(ffi, force_generic_engine):
if _FORCE_GENERIC_ENGINE:
force_generic_engine = True
if not force_generic_engine:
if '__pypy__' in sys.builtin_module_names:
force_generic_engine = True
else:
try:
import _cffi_backend
except ImportError:
_cffi_backend = '?'
if ffi._backend is not _cffi_backend:
force_generic_engine = True
if force_generic_engine:
from . import vengine_gen
return vengine_gen.VGenericEngine
else:
from . import vengine_cpy
return vengine_cpy.VCPythonEngine
# ____________________________________________________________
_TMPDIR = None
def _caller_dir_pycache():
if _TMPDIR:
return _TMPDIR
result = os.environ.get('CFFI_TMPDIR')
if result:
return result
filename = sys._getframe(2).f_code.co_filename
return os.path.abspath(os.path.join(os.path.dirname(filename),
'__pycache__'))
def set_tmpdir(dirname):
"""Set the temporary directory to use instead of __pycache__."""
global _TMPDIR
_TMPDIR = dirname
def cleanup_tmpdir(tmpdir=None, keep_so=False):
"""Clean up the temporary directory by removing all files in it
called `_cffi_*.{c,so}` as well as the `build` subdirectory."""
tmpdir = tmpdir or _caller_dir_pycache()
try:
filelist = os.listdir(tmpdir)
except OSError:
return
if keep_so:
suffix = '.c' # only remove .c files
else:
suffix = _get_so_suffixes()[0].lower()
for fn in filelist:
if fn.lower().startswith('_cffi_') and (
fn.lower().endswith(suffix) or fn.lower().endswith('.c')):
try:
os.unlink(os.path.join(tmpdir, fn))
except OSError:
pass
clean_dir = [os.path.join(tmpdir, 'build')]
for dir in clean_dir:
try:
for fn in os.listdir(dir):
fn = os.path.join(dir, fn)
if os.path.isdir(fn):
clean_dir.append(fn)
else:
os.unlink(fn)
except OSError:
pass
def _get_so_suffixes():
suffixes = _extension_suffixes()
if not suffixes:
# bah, no C_EXTENSION available. Occurs on pypy without cpyext
if sys.platform == 'win32':
suffixes = [".pyd"]
else:
suffixes = [".so"]
return suffixes
def _ensure_dir(filename):
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
|
apache-2.0
| -3,877,184,796,602,867,700
| 35.449367
| 86
| 0.533885
| false
| 4.273745
| false
| false
| false
|
termoshtt/unite-bibtex
|
src/unite_bibtex.py
|
1
|
2073
|
# -*- coding: utf-8 -*-
import os.path
from pybtex.database.input import bibtex
class unite_bibtex(object):
"""
Name space for unite_bibtex.vim
(not to pollute global name space)
"""
@staticmethod
def _read_file(filename):
parser = bibtex.Parser()
return parser.parse_file(filename)
@staticmethod
def _check_path(path):
path = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(path):
raise RuntimeError("file:%s not found" % path)
return path
@staticmethod
def entry_to_str(entry):
try:
persons = entry.persons[u'author']
authors = [unicode(au) for au in persons]
except:
authors = [u'unknown']
title = entry.fields[u"title"] if u"title" in entry.fields else ""
journal = entry.fields[u"journal"] if u"journal" in entry.fields else ""
year = entry.fields[u"year"] if u"year" in entry.fields else ""
desc = u"%s %s %s(%s)" % (",".join(authors), title, journal, year)
return desc.replace("'", "").replace("\\", "")
@staticmethod
def get_entries(bibpath_list):
entries = {}
for bibpath in bibpath_list:
try:
path = unite_bibtex._check_path(bibpath)
bibdata = unite_bibtex._read_file(path)
except Exception as e:
print("Fail to read {}".format(bibpath))
print("Message: {}".format(str(e)))
continue
for key in bibdata.entries:
try:
k = key.encode("utf-8")
except:
print("Cannot encode bibtex key, skip: {}".format(k))
continue
entries[k] = unite_bibtex.entry_to_str(bibdata.entries[key]).encode("utf-8")
return entries
if __name__ == '__main__':
import sys
bibpath_list = sys.argv[1:]
entries = unite_bibtex.get_entries(bibpath_list)
for k, v in entries.items():
print("{}:{}".format(k, v))
|
mit
| -5,800,734,522,061,089,000
| 31.904762
| 92
| 0.544139
| false
| 3.831793
| false
| false
| false
|
salvoventura/pyunsplash
|
pyunsplash/tests/documentation_test.py
|
1
|
9927
|
###############################################################################
# Copyright (c) 2017 Salvatore Ventura <salvoventura@gmail.com>
#
# File: documentation_test.py
#
# Author: Salvatore Ventura <salvoventura@gmail.com>
# Date: 07 Sep 2017
# Purpose: Test examples in documentation
#
# Revision: 1
# Comment: What's new in revision 1
# Unlike the main unit-tests, this requires live connection.
# Given the rate limit of 50/hr, these can't be run in a single
# shot; although all issues are fixed, still valuable to keep
# around. Name is purposely not following unit test standard.
#
###############################################################################
from __future__ import print_function
import pyunsplash
import os
import logging
api_key = os.environ.get('APPLICATION_ID', None)
# Initialize app logging
logger = logging.getLogger()
logging.basicConfig(filename='app.log', level=logging.DEBUG)
# pyunsplash logger defaults to level logging.ERROR
# If you need to change that, use getLogger/setLevel
# on the module logger, like this:
logging.getLogger("pyunsplash").setLevel(logging.DEBUG)
def funzione_1():
pu = pyunsplash.PyUnsplash(api_key=api_key)
return pu
def funzione_2(pu):
logger.info('Funzione_2')
this_user = pu.user('salvoventura', w=100, h=100)
def funzione_3(pu):
logger.info('Funzione_3')
# retrieve a page from the featured collections, with a maximum
# of 5 collections per-page
collections_page = pu.collections(type_='featured', per_page=5)
def funzione_4(pu):
logger.info('Funzione_4')
#
#
search = pu.search(type_='photos', query='red,car')
for entry in search.entries:
print(entry.link_html)
def funzione_5(pu):
logger.info('Funzione_5')
stats = pu.stats()
print(stats.total) # this is json
def funzione_6(pu):
logger.info('Funzione_6')
# use the PyUnsplash objects: all logs will be recorded to log file
# API: Class Collection
def funzione_7(pu):
logger.info('Funzione_7')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
collection.refresh()
print(collection.id)
def funzione_8(pu):
logger.info('Funzione_8')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
print(collection.id)
def funzione_9(pu):
logger.info('Funzione_9')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
print(collection.title)
def funzione_10(pu):
logger.info('Funzione_10')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
print(collection.description)
def funzione_11(pu):
logger.info('Funzione_11')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
print(collection.user)
def funzione_12(pu):
logger.info('Funzione_12')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
print(collection.link_photos)
def funzione_13(pu):
logger.info('Funzione_13')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
print(collection.link_related)
def funzione_14(pu):
logger.info('Funzione_14')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
photos = collection.photos(order_by='popular', per_page=3)
for photo in photos.entries:
print(photo.id, photo.link_download)
def funzione_15(pu):
logger.info('Funzione_15')
collections_page = pu.collections(type_='featured', per_page=5)
for collection in collections_page.entries:
related_collections = collection.related
for rel_collection in related_collections.entries:
print(rel_collection.title, rel_collection.description)
# API: Class Collections
def funzione_16(pu):
logger.info('Funzione_16')
this_user = pu.user('salvoventura', w=100, h=100)
collections = this_user.collections(page=1, per_page=5)
for collection in collections.entries:
print(collection.id, collection.title)
# API: Class Photo
def funzione_17(pu):
logger.info('Funzione_17')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.photos() # photos is an instance of class Photos
for photo in photos.entries:
photo.refresh()
print(photo.id, photo.link_download)
def funzione_18(pu):
logger.info('Funzione_18')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.photos() # photos is an instance of class Photos
for photo in photos.entries:
photo.refresh()
print(photo.id, photo.link_download)
def funzione_19(pu):
logger.info('Funzione_19')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.photos() # photos is an instance of class Photos
for photo in photos.entries:
print(photo.id, photo.link_html)
def funzione_20(pu):
logger.info('Funzione_20')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.photos() # photos is an instance of class Photos
for photo in photos.entries:
print(photo.id, photo.link_download)
def funzione_21(pu):
logger.info('Funzione_21')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.photos() # photos is an instance of class Photos
for photo in photos.entries:
print(photo.stats)
# API: Class Photos
def funzione_22(pu):
logger.info('Funzione_22')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.photos() # photos is an instance of class Photos
for photo in photos.entries:
print(photo.id, photo.link_download)
# API: Class Search
def funzione_23(pu):
logger.info('Funzione_23')
search = pu.search(type_='photos', query='red,car')
for photo in search.entries:
print(photo.id, photo.link_download)
# API: Class Stats
def funzione_24(pu):
logger.info('Funzione_24')
stats = pu.stats()
print(stats.total)
# API: Class User
def funzione_25(pu):
logger.info('Funzione_25')
this_user = pu.user('salvoventura', w=100, h=100)
this_user.refresh()
def funzione_26(pu):
logger.info('Funzione_26')
this_user = pu.user('salvoventura', w=100, h=100)
print(this_user.id)
def funzione_27(pu):
logger.info('Funzione_27')
this_user = pu.user('salvoventura', w=100, h=100)
print(this_user.links)
def funzione_28(pu):
logger.info('Funzione_28')
this_user = pu.user('salvoventura', w=100, h=100)
print(this_user.link_html)
def funzione_29(pu):
logger.info('Funzione_29')
this_user = pu.user('salvoventura', w=100, h=100)
print(this_user.link_portfolio)
def funzione_30(pu):
logger.info('Funzione_30')
this_user = pu.user('salvoventura', w=100, h=100)
print(this_user.link_followers)
def funzione_31(pu):
logger.info('Funzione_31')
this_user = pu.user('salvoventura', w=100, h=100)
print(this_user.link_following)
def funzione_32(pu):
logger.info('Funzione_32')
this_user = pu.user('salvoventura', w=100, h=100)
print(this_user.link_photos)
def funzione_33(pu):
logger.info('Funzione_33')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.photos(per_page=5)
for photo in photos.entries:
print(photo.id, photo.link_download)
def funzione_34(pu):
logger.info('Funzione_34')
this_user = pu.user('salvoventura', w=100, h=100)
followers = this_user.followers()
for user in followers.entries:
print(user.id, user.body.get('first_name'), user.body.get('last_name'))
def funzione_35(pu):
logger.info('Funzione_35')
this_user = pu.user('salvoventura', w=100, h=100)
following = this_user.following()
for user in following.entries:
print(user.id, user.body.get('first_name'), user.body.get('last_name'))
def funzione_36(pu):
logger.info('Funzione_36')
this_user = pu.user('salvoventura', w=100, h=100)
photos = this_user.likes(per_page=5)
for photo in photos.entries:
print(photo.id, photo.link_download)
def funzione_37(pu):
logger.info('Funzione_37')
this_user = pu.user('salvoventura', w=100, h=100)
collections = this_user.collections(page=1, per_page=5)
for collection in collections.entries:
print(collection.id, collection.title)
# API: Class Users
def funzione_38(pu):
logger.info('Funzione_38')
this_user = pu.user('salvoventura', w=100, h=100)
followers = this_user.followers() # followers is an instance of class Users
for user in followers.entries:
print(user.id, user.body.get('first_name'), user.body.get('last_name'))
def main():
pu = funzione_1()
# first chunk
funzione_2(pu)
funzione_3(pu)
funzione_4(pu)
funzione_5(pu)
funzione_6(pu)
funzione_7(pu)
funzione_8(pu)
funzione_9(pu)
funzione_10(pu)
funzione_11(pu)
funzione_12(pu)
funzione_13(pu)
funzione_14(pu)
funzione_15(pu)
funzione_16(pu)
funzione_17(pu)
funzione_18(pu)
# second chunk
funzione_19(pu)
funzione_20(pu)
funzione_21(pu)
funzione_22(pu)
funzione_23(pu)
funzione_24(pu)
funzione_25(pu)
funzione_26(pu)
funzione_27(pu)
funzione_28(pu)
funzione_29(pu)
funzione_30(pu)
funzione_31(pu)
funzione_32(pu)
funzione_33(pu)
funzione_34(pu)
funzione_35(pu)
funzione_36(pu)
funzione_37(pu)
funzione_38(pu)
if __name__ == '__main__':
main()
|
mit
| -1,336,868,179,278,288,600
| 25.975543
| 80
| 0.652664
| false
| 3.045092
| false
| false
| false
|
xalt/xalt
|
py_src/xalt_extract_linker.py
|
1
|
1691
|
#-----------------------------------------------------------------------
# XALT: A tool that tracks users jobs and environments on a cluster.
# Copyright (C) 2013-2015 University of Texas at Austin
# Copyright (C) 2013-2015 University of Tennessee
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
#-----------------------------------------------------------------------
#
# Git Version: @git@
from __future__ import print_function
import os, sys, json
dirNm, execName = os.path.split(os.path.realpath(sys.argv[0]))
sys.path.insert(1,os.path.realpath(os.path.join(dirNm, "../libexec")))
sys.path.insert(1,os.path.realpath(os.path.join(dirNm, "../site")))
from xalt_util import extract_compiler
def main():
compiler, full_path_cmplr, link_line = extract_compiler()
resultT = { 'compiler' : compiler,
'full_path' : full_path_cmplr,
'link_line' : link_line
}
jsonStr = json.dumps(resultT)
print(jsonStr)
if ( __name__ == '__main__'): main()
|
lgpl-2.1
| 7,246,546,160,335,109,000
| 36.577778
| 72
| 0.641632
| false
| 3.79148
| false
| false
| false
|
airbnb/caravel
|
superset/migrations/versions/6c7537a6004a_models_for_email_reports.py
|
1
|
4101
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""models for email reports
Revision ID: 6c7537a6004a
Revises: e502db2af7be
Create Date: 2018-05-15 20:28:51.977572
"""
# revision identifiers, used by Alembic.
revision = '6c7537a6004a'
down_revision = 'a61b40f9f57f'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('dashboard_email_schedules',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('crontab', sa.String(length=50), nullable=True),
sa.Column('recipients', sa.Text(), nullable=True),
sa.Column('deliver_as_group', sa.Boolean(), nullable=True),
sa.Column('delivery_type', sa.Enum('attachment', 'inline', name='emaildeliverytype'), nullable=True),
sa.Column('dashboard_id', sa.Integer(), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['dashboard_id'], ['dashboards.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_dashboard_email_schedules_active'), 'dashboard_email_schedules', ['active'], unique=False)
op.create_table('slice_email_schedules',
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('crontab', sa.String(length=50), nullable=True),
sa.Column('recipients', sa.Text(), nullable=True),
sa.Column('deliver_as_group', sa.Boolean(), nullable=True),
sa.Column('delivery_type', sa.Enum('attachment', 'inline', name='emaildeliverytype'), nullable=True),
sa.Column('slice_id', sa.Integer(), nullable=True),
sa.Column('email_format', sa.Enum('visualization', 'data', name='sliceemailreportformat'), nullable=True),
sa.Column('created_by_fk', sa.Integer(), nullable=True),
sa.Column('changed_by_fk', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
sa.ForeignKeyConstraint(['slice_id'], ['slices.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_slice_email_schedules_active'), 'slice_email_schedules', ['active'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_slice_email_schedules_active'), table_name='slice_email_schedules')
op.drop_table('slice_email_schedules')
op.drop_index(op.f('ix_dashboard_email_schedules_active'), table_name='dashboard_email_schedules')
op.drop_table('dashboard_email_schedules')
# ### end Alembic commands ###
|
apache-2.0
| -3,753,114,214,703,253,500
| 47.247059
| 119
| 0.691539
| false
| 3.51113
| false
| false
| false
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/shtiker/OptionsPageGUI.py
|
1
|
3792
|
from direct.gui.DirectGui import DirectButton, DirectLabel
from panda3d.core import TextNode, Vec4
Preloaded = {}
def loadModels():
if Preloaded:
return
gui = loader.loadModel('phase_3.5/models/gui/fishingBook.bam')
Preloaded['tab1'] = gui.find('**/tabs/polySurface1')
Preloaded['tab2'] = gui.find('**/tabs/polySurface2')
gui.removeNode()
del gui
guiButton = loader.loadModel('phase_3/models/gui/quit_button')
Preloaded['button1'] = guiButton.find('**/QuitBtn_UP')
Preloaded['button2'] = guiButton.find('**/QuitBtn_DN')
Preloaded['button3'] = guiButton.find('**/QuitBtn_RLVR')
guiButton.removeNode()
del guiButton
normalColor = (1, 1, 1, 1)
clickColor = (0.8, 0.8, 0, 1)
rolloverColor = (0.15, 0.82, 1.0, 1)
diabledColor = (1.0, 0.98, 0.15, 1)
class OptionTab(DirectButton):
def __init__(self, tabType=2, parent=None, **kw):
loadModels()
if parent is None:
parent = aspect2d
if tabType == 1:
image = Preloaded['tab1']
elif tabType == 2:
image = Preloaded['tab2']
else:
image = None
optiondefs = (
('relief', None, None),
('text_align', TextNode.ALeft, None),
('text_fg', Vec4(0.2, 0.1, 0, 1), None),
('image', image, None),
('image_color', normalColor, None),
('image1_color', clickColor, None),
('image2_color', rolloverColor, None),
('image3_color', diabledColor, None),
('image_scale', (0.033, 0.033, 0.035), None),
('image_hpr', (0, 0, -90), None)
)
self.defineoptions(kw, optiondefs)
DirectButton.__init__(self, parent)
self.initialiseoptions(OptionTab)
buttonbase_xcoord = 0.35
buttonbase_ycoord = 0.45
class OptionButton(DirectButton):
def __init__(self, parent=None, wantLabel=False, z=buttonbase_ycoord, labelZ=None,
labelOrientation='left', labelPos=None, labelText='', image_scale=(0.7, 1, 1), text='', **kw):
loadModels()
if parent is None:
parent = aspect2d
pos = (buttonbase_xcoord, 0, z) if not kw.get('pos') else kw['pos']
optiondefs = (
('relief', None, None),
('image', (Preloaded['button1'], Preloaded['button2'], Preloaded['button3']), None),
('image_scale', image_scale, None),
('text', text, None),
('text_scale', 0.052, None),
('text_pos', (0, -0.02), None),
('pos', pos, None),
)
self.defineoptions(kw, optiondefs)
DirectButton.__init__(self, parent)
self.initialiseoptions(OptionButton)
if wantLabel:
self.label=OptionLabel(parent=self, z=labelZ, pos=labelPos, orientation=labelOrientation,
text=labelText)
titleHeight = 0.61
textStartHeight = 0.45
leftMargin = -0.72
class OptionLabel(DirectLabel):
def __init__(self, parent=None, z=textStartHeight, text_wordwrap=16, text='',
orientation='left', **kw):
loadModels()
if parent is None:
parent = aspect2d
if orientation == 'left':
pos = (leftMargin, 0, z)
text_align = TextNode.ALeft
else:
pos = kw['pos']
text_align = TextNode.ACenter
optiondefs = (
('relief', None, None),
('pos', pos, None),
('text_align', text_align, None),
('text_scale', 0.052, None),
('text_wordwrap', text_wordwrap, None),
('text', text, None)
)
self.defineoptions(kw, optiondefs)
DirectLabel.__init__(self, parent)
self.initialiseoptions(OptionLabel)
|
apache-2.0
| -5,793,325,956,545,810,000
| 31.144068
| 111
| 0.554325
| false
| 3.530726
| false
| false
| false
|
ISISComputingGroup/EPICS-inst_servers
|
CollisionAvoidanceMonitor/configurations/config_larmor.py
|
1
|
4210
|
from math import radians
from CollisionAvoidanceMonitor.transform import Transformation
import os
# Config happens here:
# Colors for each body
colors = [(0.6, 0.6, 0.6), (1, 0, 1), (1, 1, 0), (0, 1, 1), (0, 1, 0), (1, 0.5, 0), (0.2, 0.2, 1), (1, 1, 1)]
# PV prefix
pv_prefix = os.environ["MYPVPREFIX"]
# PV prefix for controlling the system
control_pv = "{}COLLIDE:".format(pv_prefix)
# Define the geometry of the system in mm
# Coordinate origin at arc centre, with nominal beam height
z_stage = dict(name="Z_Stage", size=(1000.0, 1000.0, 630.0), color=colors[0])
rot_stage = dict(name="Rotation", size=(600.0, 600.0, 165.0), color=colors[1])
bot_arc = dict(name="Bottom_Arc", size=(600.0, 600.0, 120.0), color=colors[2])
top_arc = dict(name="Top_Arc", size=(600.0, 600.0, 120.0), color=colors[3])
fine_z = dict(name="Fine_Z", size=(600.0, 600.0, 120.0), color=colors[4])
y_base = dict(name="Y_Stage", size=(900.0, 1200.0, 50.0), color=colors[4])
y_stage = dict(name="Y_Carriage", size=(600.0, 300.0, 20.0), color=colors[5])
x_stage = dict(name="X_Carriage", size=(520.0, 300.0, 20.0), color=colors[6])
sample = dict(name="Sample", size=(250.0, 250.0, 150.0), color=colors[6])
snout = dict(name="Snout", position=(-300, 0, 0), size=(500, 70, 70), color=colors[7])
slits = dict(name="Slits", position=(450, 0, 0), size=(100, 300, 300), color=colors[7])
# Define some variables to describe the geometry
centre_arc = 750.0
beam_ref = 1625.0
# Define some search parameters
coarse = 20.0
fine = 0.5
# Define the oversized-ness of each body - a global value in mm
oversize = coarse / 4
# List of pairs to ignore [0, 1]...[7, 8]
ignore = []
for i in range(0, 9):
for j in range(i, 9):
ignore.append([i, j])
def move_everything(axes):
# Z stage
t = Transformation()
size = axes[0] + z_stage['size'][2]
t.translate(z=-beam_ref + size / 2)
yield t, dict(z=size)
# Rotation
t = Transformation()
t.translate(z=-beam_ref + axes[0] + z_stage['size'][2] + rot_stage['size'][2] / 2)
t.rotate(rz=radians(axes[1]))
yield t
# Bottom arc
t = Transformation()
t.translate(z=-centre_arc - (bot_arc['size'][2] / 2 + top_arc['size'][2]))
t.rotate(ry=radians(axes[2]))
t.translate(z=centre_arc + (bot_arc['size'][2] / 2 + top_arc['size'][2]))
t.translate(z=-beam_ref + axes[0] + z_stage['size'][2] + rot_stage['size'][2] + bot_arc['size'][2] / 2)
t.rotate(rz=radians(axes[1]))
yield t
# Top arc
t = Transformation(t)
t.translate(z=+(centre_arc + top_arc['size'][2] / 2), forward=False)
t.rotate(rx=radians(axes[3]), forward=False)
t.translate(z=-(centre_arc + top_arc['size'][2] / 2), forward=False)
t.translate(z=top_arc['size'][2] / 2 + bot_arc['size'][2] / 2, forward=False)
yield t
# Fine Z
u = Transformation(t)
size = axes[4] + fine_z['size'][2]
u.translate(z=size / 2 + top_arc['size'][2] / 2, forward=False)
yield u, dict(z=size)
# Base of Y stage (top of fine Z)
t = Transformation(t)
size = axes[4] + fine_z['size'][2]
t.translate(z=size + top_arc['size'][2] / 2 + y_base['size'][2] / 2, forward=False)
yield t
# Y stage
t = Transformation(t)
t.translate(y=axes[5], z=y_base['size'][2] / 2 + y_stage['size'][2] / 2, forward=False)
yield t
# X stage
t = Transformation(t)
t.translate(x=axes[6], z=y_stage['size'][2] / 2 + x_stage['size'][2] / 2, forward=False)
yield t
# Sample
t = Transformation(t)
t.translate(z=x_stage['size'][2] / 2 + sample['size'][2] / 2, forward=False)
yield t
moves = move_everything
# Put them in a list
geometries = [z_stage, rot_stage, bot_arc, top_arc, fine_z, y_base, y_stage, x_stage, sample, snout, slits]
# Attach monitors to readbacks
pvs = ["{}MOT:MTR0101",
"{}MOT:MTR0102",
"{}MOT:MTR0103",
"{}MOT:MTR0104",
"{}MOT:MTR0105",
"{}MOT:MTR0106",
"{}MOT:MTR0107"]
pvs = [pv.format(pv_prefix) for pv in pvs]
hardlimits = [[-220, 100],
[-180.0, 180.0],
[-20, 20.0],
[-20.0, 20.0],
[0.0, 30.0],
[-300, 300],
[-37.5, 37.5]]
|
bsd-3-clause
| 8,660,980,917,994,422,000
| 27.255034
| 109
| 0.585273
| false
| 2.683238
| false
| false
| false
|
AndreasHeger/alignlib
|
python/tests/test_MultAlignment.py
|
1
|
9238
|
# alignlib - a library for aligning protein sequences
#
# $Id: test_Alignment.py,v 1.3 2004/01/23 17:34:58 aheger Exp $
#
# Copyright (C) 2004 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import unittest, sys, os
from alignlib import *
class MultAlignmentTestCase( unittest.TestCase ):
mReferenceSequence = "0123456789"
mNumSequences = 3
def setUp( self ):
self.mAlignandum = makeSequence( self.mReferenceSequence )
self.mContainer = makeAlignmentBlocks()
def constructMali(self):
mali = makeMultAlignment()
ali = self.mContainer.getNew()
ali.addDiagonal( 0,3,+2 );
ali.addDiagonal( 3,6,+4 );
mali.add( ali );
ali = self.mContainer.getNew()
ali.addDiagonal( 0,1,+1 );
ali.addDiagonal( 1,6,+3 );
mali.add( ali );
mali.add( ali );
seqs = StringVector()
for x in range( self.mNumSequences):
seqs.append( self.mReferenceSequence )
return mali, seqs
def testBuild(self):
mali, seqs = self.constructMali()
self.assertEqual( mali.getNumSequences(), len(seqs) )
self.assertEqual( mali.getLength(), 6 )
def testExpandSimple(self):
"""expand mali without sequences."""
mali, seqs = self.constructMali()
mali.expand( AlignandumVector() )
format = MultAlignmentFormatPlain( mali, seqs )
result = [ x.split("\t") for x in str(format).split("\n") ]
self.assertEqual( result[0], ["2", "2----3456789", "10" ] )
self.assertEqual( result[1], ["1", "123--45--678", "9" ] )
self.assertEqual( result[2], ["1", "1--2345--678", "9" ] )
def testExpandFull(self):
"""expand mali with sequences."""
mali, seqs = self.constructMali()
v = AlignandumVector()
for x in seqs: v.append( makeSequence(x) )
mali.expand( v )
format = MultAlignmentFormatPlain( mali, seqs )
result = [ x.split("\t") for x in str(format).split("\n") ]
self.assertEqual( result[0], ["0", "01--2----3456789--", "10" ] )
self.assertEqual( result[1], ["0", "--0-123--45--6789-", "10" ] )
self.assertEqual( result[2], ["0", "---01--2345--678-9", "10" ] )
def testGetGapsSum(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
counts = mali.getGapCounts( AlignandumVector(), AggSum )
self.assertEqual( tuple(counts), (0,4,0,2,0,0,0) )
def testGetGapsCount(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
counts = mali.getGapCounts( AlignandumVector(), AggCount )
self.assertEqual( tuple(counts), (0,2,0,1,0,0,0) )
def testGetGapsMin(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
counts = mali.getGapCounts( AlignandumVector(), AggMin )
self.assertEqual( tuple(counts[1:-1]), (0,0,0,0,0) )
def testGetGapsMax(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
counts = mali.getGapCounts( AlignandumVector(), AggMax )
self.assertEqual( tuple(counts), (0,2,0,2,0,0,0) )
def testGetGapsSumFull(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
v = AlignandumVector()
for x in seqs: v.append( makeSequence(x) )
counts = mali.getGapCounts( v, AggSum )
self.assertEqual( tuple(counts), (4,4,0,2,0,0,2) )
def testGetGapsCountFull(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
v = AlignandumVector()
for x in seqs: v.append( makeSequence(x) )
counts = mali.getGapCounts( v, AggCount )
self.assertEqual( tuple(counts), (3,2,0,1,0,0,2) )
def testGetGapsMinFull(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
v = AlignandumVector()
for x in seqs: v.append( makeSequence(x) )
counts = mali.getGapCounts( v, AggMin )
self.assertEqual( tuple(counts), (1,0,0,0,0,0,0) )
def testGetGapsMaxFull(self):
"""test the gap count function."""
mali, seqs = self.constructMali()
v = AlignandumVector()
for x in seqs: v.append( makeSequence(x) )
counts = mali.getGapCounts( v, AggMax )
self.assertEqual( tuple(counts), (2,2,0,2,0,0,1) )
def testMatrix(self):
mali, seqs = self.constructMali()
test_matrix = ( "234789",
"145678",
"145678")
matrix = mali.getPositionMatrix()
self.assertEqual( matrix.getNumRows(), len(test_matrix) )
self.assertEqual( matrix.getNumCols(), len(test_matrix[0]) )
for x in range( len(test_matrix ) ):
for y in range( len(test_matrix[0] ) ):
self.assertEqual( matrix.getValue( x, y), int(test_matrix[x][y]) )
def testRealign(self):
"""test realignment."""
mali, seqs = self.constructMali()
v = AlignandumVector()
seqs = [ "IIACDIIEFG" ,
"IAILCDEFGI" ,
"KALKCDEFGK" ,
]
for x in seqs: v.append( makeSequence(x) )
counts = mali.getGapCounts( v, AggCount )
ma = makeMultipleAlignatorSimple( makeAlignatorDPFull( ALIGNMENT_LOCAL, 0, 0 ) )
map_old2new = makeAlignmentVector()
offset = 0
fragments = []
for col in range(len(counts)):
# realign columns with more than one sequence with
# unaligned preceding residues
if counts[col] > 1:
for s in range(len(seqs)):
ali = mali.getRow( s )
y = col - 1
while y >= 0 and ali.mapRowToCol( y ) < 0:
y -= 1
if y < 0: start = 0
else: start = ali.mapRowToCol( y ) + 1
if col == mali.getLength(): end = len(seqs[s])
else: end = ali.mapRowToCol( col )
v[s].useSegment( start, end )
result = makeMultAlignment()
ma.align( result, v )
# sort out where the fragment belongs and move
# into the right place
l = result.getLength()
result.move( col + offset )
fragments.append( result )
offset += l
map_old2new.addPair( col, col+offset )
# insert gaps into the original
mali.map( map_old2new, RC )
# merge the partial alignments inside
for fragment in fragments:
mali.merge( fragment )
format = MultAlignmentFormatPlain( mali, v )
result = [ x.split("\t") for x in str(format).split("\n") ]
self.assertEqual( result[0], ['0', 'II-A---CDEFG--', '10'] )
self.assertEqual( result[1], ['0', 'I--AIL-CDEFGI-', '10'] )
self.assertEqual( result[2], ['0', '--KA-LKCDEFG-K', '10'] )
class MultAlignmentBlocksTestCase( MultAlignmentTestCase ):
def setUp( self ):
MultAlignmentTestCase.setUp( self )
self.mContainer = makeAlignmentBlocks()
class MultAlignmentSetTestCase( MultAlignmentTestCase ):
def setUp( self ):
MultAlignmentTestCase.setUp( self )
self.mContainer = makeAlignmentSet()
class MultAlignmentHashTestCase( MultAlignmentTestCase ):
def setUp( self ):
MultAlignmentTestCase.setUp( self )
self.mContainer = makeAlignmentHash()
class MultAlignmentSetColTestCase( MultAlignmentTestCase ):
def setUp( self ):
MultAlignmentTestCase.setUp( self )
self.mContainer = makeAlignmentSetCol()
class MultAlignmentHashDiagonalTestCase( MultAlignmentTestCase ):
def setUp( self ):
MultAlignmentTestCase.setUp( self )
self.mContainer = makeAlignmentHashDiagonal()
def suite():
suite = unittest.TestSuite()
suite.addTest(MultAlignmentTestCase)
suite.addTest(MultAlignmentBlocksTestCase )
suite.addTest(MultAlignmentSetTestCase)
suite.addTest(MultAlignmentHashTestCase)
suite.addTest(MultAlignmentSetColTestCase)
suite.addTest(MultAlignmentHashDiagonalTestCase)
return suite
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
| -4,594,516,625,187,176,000
| 35.513834
| 88
| 0.582485
| false
| 3.662966
| true
| false
| false
|
sjTaylor/cmd_queue
|
client.py
|
1
|
2997
|
import socket
import select
import codes
import funs
import os
import subprocess
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('server_ip', type=str, help='address of the server (e.g. 198.123.1.3)')
parser.add_argument('--server_port', type=int, default=12345, required=False, help='port server is listening on')
args = parser.parse_args()
myid = 0
output_prefix = None
padding = None
try:
connection = socket.create_connection((args.server_ip, args.server_port))
except:
print('could not connect to server', flush=True)
raise SystemError
running = True
cmd_timeout = None
log = funs.get_logger(__name__)
while running:
readable, foo1, foo2 = select.select([connection], [], [], 2)
for qq in funs.getinput():
if 'exit' in qq:
running = False
funs.send(connection, funs.encode(codes.disconnecting))
for s in readable:
message = funs.recv(s)
code, data = funs.decode(message)
json_data = data
if code == codes.send_config:
assert 'client_id' in json_data and 'working_directory' in json_data and 'output_prefix' in json_data
assert 'padding' in json_data and 'timeout' in json_data
os.chdir(json_data['working_directory'])
myid = json_data['client_id']
output_prefix = json_data['output_prefix']
padding = json_data['padding']
cmd_timeout = json_data['timeout']
elif code == codes.send_cmd:
assert 'command' in json_data and 'cmd_number' in json_data
command = json_data['command']
cmdnumber = json_data['cmd_number']
log.info('Recieved command number : %d' % cmdnumber)
log.info('--executing : %s' % command)
log.info('will write out to: |%s|' % funs.do_dir(output_prefix, padding, 'stdout', cmdnumber))
log.info('will write err to: |%s|' % funs.do_dir(output_prefix, padding, 'stderr', cmdnumber))
with open(funs.do_dir(output_prefix, padding, 'stdout', cmdnumber), 'w') as sstdout:
with open(funs.do_dir(output_prefix, padding, 'stderr', cmdnumber), 'w') as sstderr:
return_code = subprocess.call(command,
shell=True,
stdout=sstdout,
stderr=sstderr,
timeout=cmd_timeout)
if return_code is None:
log.info('--return_code is None')
return_code = 1
# cmd number, client id, return code
funs.send(connection, funs.encode(codes.finished, (cmdnumber, myid, return_code)))
if code == codes.exiting:
log.info('got signal to stop and shut down')
running = False
else:
funs.send(connection, funs.encode(codes.idle))
connection.close()
|
mit
| -2,199,648,740,256,074,200
| 36
| 113
| 0.575909
| false
| 3.912533
| false
| false
| false
|
jrconlin/server-key-exchange
|
keyexchange/util.py
|
1
|
3643
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Sync Server
#
# The Initial Developer of the Original Code is the Mozilla Foundation.
# Portions created by the Initial Developer are Copyright (C) 2010
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Tarek Ziade (tarek@mozilla.com)
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
""" Various helpers.
"""
import json
from webob import Response
from services.util import randchar
CID_CHARS = '23456789abcdefghijkmnpqrstuvwxyz'
def json_response(data, dump=True, **kw):
"""Returns Response containing a json string"""
if dump:
data = json.dumps(data)
return Response(data, content_type='application/json', **kw)
def generate_cid(size=4):
"""Returns a random channel id."""
return ''.join([randchar(CID_CHARS) for i in range(size)])
class MemoryClient(dict):
"""Fallback if a memcache client is not installed.
"""
def __init__(self, servers):
pass
def set(self, key, value, time=0):
self[key] = value
return True
cas = set
def add(self, key, value, time=0):
if key in self:
return False
self[key] = value
return True
def replace(self, key, value, time=0):
if key not in self:
return False
self[key] = value
return True
def delete(self, key):
if not key in self:
return True # that's how memcache libs do...
del self[key]
return True
def incr(self, key):
val = self[key]
self[key] = str(int(val) + 1)
class PrefixedCache(object):
def __init__(self, cache, prefix=''):
self.cache = cache
self.prefix = ''
def incr(self, key):
return self.cache.incr(self.prefix + key)
def get(self, key):
return self.cache.get(self.prefix + key)
def set(self, key, value, **kw):
return self.cache.set(self.prefix + key, value, **kw)
def delete(self, key):
return self.cache.delete(self.prefix + key)
def add(self, key, value, **kw):
return self.cache.add(self.prefix + key, value, **kw)
def get_memcache_class(memory=False):
"""Returns the memcache class."""
if memory:
return MemoryClient
import memcache
return memcache.Client
|
mpl-2.0
| -8,685,065,241,772,336,000
| 29.613445
| 77
| 0.664562
| false
| 3.830705
| false
| false
| false
|
jddixon/bindex
|
setup.py
|
1
|
1138
|
#!/usr/bin/python3
# bindex/setup.py
""" Setuptools project configuration for bindex. """
from os.path import exists
from setuptools import setup
LONG_DESC = None
if exists('README.md'):
with open('README.md', 'r') as file:
LONG_DESC = file.read()
setup(name='bindex',
version='0.0.24',
author='Jim Dixon',
author_email='jddixon@gmail.com',
long_description=LONG_DESC,
packages=['bindex'],
package_dir={'': 'src'},
py_modules=[],
include_package_data=False,
zip_safe=False,
scripts=[],
description='index content-keyed files',
url='https://jddixon.github.io/bindex',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python 2.7',
'Programming Language :: Python 3.5',
'Programming Language :: Python 3.6',
'Programming Language :: Python 3.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],)
|
mit
| -3,196,861,824,986,049,500
| 29.756757
| 73
| 0.593146
| false
| 3.965157
| false
| false
| false
|
a2ialabelme/LabelMeAnnotationTool
|
toolBar.py
|
1
|
1837
|
#
# Copyright (C) 2011 Michael Pitidis, Hussein Abdulwahid.
#
# This file is part of Labelme.
#
# Labelme is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Labelme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Labelme. If not, see <http://www.gnu.org/licenses/>.
#
#from PyQt4.QtGui import *
#from PyQt4.QtCore import *
from PySide.QtGui import *
from PySide.QtCore import *
class ToolBar(QToolBar):
def __init__(self, title):
super(ToolBar, self).__init__(title)
layout = self.layout()
m = (0, 0, 0, 0)
layout.setSpacing(0)
layout.setContentsMargins(*m)
self.setContentsMargins(*m)
self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
def addAction(self, action):
if isinstance(action, QWidgetAction):
return super(ToolBar, self).addAction(action)
btn = ToolButton()
btn.setDefaultAction(action)
btn.setToolButtonStyle(self.toolButtonStyle())
self.addWidget(btn)
class ToolButton(QToolButton):
"""ToolBar companion class which ensures all buttons have the same size."""
minSize = (60, 60)
def minimumSizeHint(self):
ms = super(ToolButton, self).minimumSizeHint()
w1, h1 = ms.width(), ms.height()
w2, h2 = self.minSize
ToolButton.minSize = max(w1, w2), max(h1, h2)
return QSize(*ToolButton.minSize)
|
gpl-3.0
| 8,428,212,745,408,746,000
| 33.660377
| 79
| 0.684268
| false
| 3.630435
| false
| false
| false
|
patochectp/navitia
|
source/jormungandr/jormungandr/__init__.py
|
1
|
3983
|
# encoding: utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import logging
import logging.config
import os
from flask import Flask, got_request_exception
from flask_restful import Api
from flask_caching import Cache
from flask_cors import CORS
import sys
import six
from jormungandr import init
app = Flask(__name__) # type: Flask
init.load_configuration(app)
init.logger(app)
# we want to patch gevent as early as possible
if app.config.get(str('PATCH_WITH_GEVENT_SOCKET'), False):
init.patch_http()
from jormungandr import new_relic
new_relic.init(app.config.get(str('NEWRELIC_CONFIG_PATH'), None))
from jormungandr.exceptions import log_exception
from jormungandr.helper import ReverseProxied, NavitiaRequest, NavitiaRule
from jormungandr import compat, utils
app.url_rule_class = NavitiaRule
app.request_class = NavitiaRequest
CORS(
app,
vary_headers=True,
allow_credentials=True,
send_wildcard=False,
headers=['Access-Control-Request-Headers', 'Authorization'],
)
app.config[str('CORS_HEADERS')] = 'Content-Type'
app.wsgi_app = ReverseProxied(app.wsgi_app) # type: ignore
got_request_exception.connect(log_exception, app)
# we want the old behavior for reqparse
compat.patch_reqparse()
rest_api = Api(app, catch_all_404s=True, serve_challenge_on_401=True)
from navitiacommon.models import db
db.init_app(app)
cache = Cache(app, config=app.config[str('CACHE_CONFIGURATION')]) # type: Cache
memory_cache = Cache(app, config=app.config[str('MEMORY_CACHE_CONFIGURATION')]) # type: Cache
if app.config[str('AUTOCOMPLETE_SYSTEMS')] is not None:
global_autocomplete = {k: utils.create_object(v) for k, v in app.config[str('AUTOCOMPLETE_SYSTEMS')].items()}
else:
from jormungandr.autocomplete.kraken import Kraken
global_autocomplete = {'kraken': Kraken()}
from jormungandr.equipments.equipment_provider_manager import EquipmentProviderManager
equipment_provider_manager = EquipmentProviderManager(app.config[str('EQUIPMENT_DETAILS_PROVIDERS')])
from jormungandr.instance_manager import InstanceManager
i_manager = InstanceManager(
instances_dir=app.config.get(str('INSTANCES_DIR'), None),
instance_filename_pattern=app.config.get(str('INSTANCES_FILENAME_PATTERN'), '*.json'),
start_ping=app.config.get(str('START_MONITORING_THREAD'), True),
)
i_manager.initialisation()
from jormungandr.stat_manager import StatManager
stat_manager = StatManager()
bss_provider_manager = init.bss_providers(app)
from jormungandr.parking_space_availability.car.car_park_provider_manager import CarParkingProviderManager
car_park_provider_manager = CarParkingProviderManager(app.config[str('CAR_PARK_PROVIDER')])
from jormungandr import api
def setup_package():
i_manager.stop()
|
agpl-3.0
| -1,356,667,356,695,377,400
| 30.611111
| 113
| 0.762491
| false
| 3.358347
| true
| false
| false
|
OSSESAC/odoopubarquiluz
|
addons/hr_timesheet_sheet/hr_timesheet_sheet.py
|
1
|
30074
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import netsvc
class hr_timesheet_sheet(osv.osv):
_name = "hr_timesheet_sheet.sheet"
_inherit = "mail.thread"
_table = 'hr_timesheet_sheet_sheet'
_order = "id desc"
_description="Timesheet"
def _total(self, cr, uid, ids, name, args, context=None):
""" Compute the attendances, analytic lines timesheets and differences between them
for all the days of a timesheet and the current day
"""
res = {}
for sheet in self.browse(cr, uid, ids, context=context or {}):
res.setdefault(sheet.id, {
'total_attendance': 0.0,
'total_timesheet': 0.0,
'total_difference': 0.0,
})
for period in sheet.period_ids:
res[sheet.id]['total_attendance'] += period.total_attendance
res[sheet.id]['total_timesheet'] += period.total_timesheet
res[sheet.id]['total_difference'] += period.total_attendance - period.total_timesheet
return res
def check_employee_attendance_state(self, cr, uid, sheet_id, context=None):
ids_signin = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_in')])
ids_signout = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_out')])
if len(ids_signin) != len(ids_signout):
raise osv.except_osv(('Warning!'),_('The timesheet cannot be validated as it does not contain an equal number of sign ins and sign outs.'))
return True
def copy(self, cr, uid, ids, *args, **argv):
raise osv.except_osv(_('Error!'), _('You cannot duplicate a timesheet.'))
def create(self, cr, uid, vals, context=None):
if 'employee_id' in vals:
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must assign it to a user.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product, like \'Consultant\'.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
return super(hr_timesheet_sheet, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'employee_id' in vals:
new_user_id = self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id.id or False
if not new_user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must assign it to a user.'))
if not self._sheet_date(cr, uid, ids, forced_user_id=new_user_id, context=context):
raise osv.except_osv(_('Error!'), _('You cannot have 2 timesheets that overlap!\nYou should use the menu \'My Timesheet\' to avoid this problem.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
# In addition to the date order, deleting attendances are done before inserting attendances
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
res = super(hr_timesheet_sheet, self).write(cr, uid, ids, vals, context=context)
if vals.get('attendances_ids'):
for timesheet in self.browse(cr, uid, ids):
if not self.pool['hr.attendance']._altern_si_so(cr, uid, [att.id for att in timesheet.attendances_ids]):
raise osv.except_osv(_('Warning !'), _('Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)'))
return res
def sort_attendances(self, cr, uid, attendance_tuples, context=None):
date_attendances = []
for att_tuple in attendance_tuples:
if att_tuple[0] in [0,1,4]:
if att_tuple[0] in [0,1]:
name = att_tuple[2]['name']
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
date_attendances.append((1, name, att_tuple))
elif att_tuple[0] in [2,3]:
date_attendances.append((0, self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name, att_tuple))
else:
date_attendances.append((0, False, att_tuple))
date_attendances.sort()
return [att[2] for att in date_attendances]
def button_confirm(self, cr, uid, ids, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id and sheet.employee_id.parent_id and sheet.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [sheet.id], user_ids=[sheet.employee_id.parent_id.user_id.id], context=context)
self.check_employee_attendance_state(cr, uid, sheet.id, context=context)
di = sheet.user_id.company_id.timesheet_max_difference
if (abs(sheet.total_difference) < di) or not di:
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'hr_timesheet_sheet.sheet', sheet.id, 'confirm', cr)
else:
raise osv.except_osv(_('Warning!'), _('Please verify that the total difference of the sheet is lower than %.2f.') %(di,))
return True
def attendance_action_change(self, cr, uid, ids, context=None):
hr_employee = self.pool.get('hr.employee')
employee_ids = []
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id.id not in employee_ids: employee_ids.append(sheet.employee_id.id)
return hr_employee.attendance_action_change(cr, uid, employee_ids, context=context)
_columns = {
'name': fields.char('Note', size=64, select=1,
states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'user_id': fields.related('employee_id', 'user_id', type="many2one", relation="res.users", store=True, string="User", required=False, readonly=True),#fields.many2one('res.users', 'User', required=True, select=1, states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'date_from': fields.date('Date from', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'date_to': fields.date('Date to', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'timesheet_ids' : fields.one2many('hr.analytic.timesheet', 'sheet_id',
'Timesheet lines',
readonly=True, states={
'draft': [('readonly', False)],
'new': [('readonly', False)]}
),
'attendances_ids' : fields.one2many('hr.attendance', 'sheet_id', 'Attendances'),
'state' : fields.selection([
('new', 'New'),
('draft','Open'),
('confirm','Waiting Approval'),
('done','Approved')], 'Status', select=True, required=True, readonly=True,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed timesheet. \
\n* The \'Confirmed\' status is used for to confirm the timesheet by user. \
\n* The \'Done\' status is used when users timesheet is accepted by his/her senior.'),
'state_attendance' : fields.related('employee_id', 'state', type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Current Status', readonly=True),
'total_attendance': fields.function(_total, method=True, string='Total Attendance', multi="_total"),
'total_timesheet': fields.function(_total, method=True, string='Total Timesheet', multi="_total"),
'total_difference': fields.function(_total, method=True, string='Difference', multi="_total"),
'period_ids': fields.one2many('hr_timesheet_sheet.sheet.day', 'sheet_id', 'Period', readonly=True),
'account_ids': fields.one2many('hr_timesheet_sheet.sheet.account', 'sheet_id', 'Analytic accounts', readonly=True),
'company_id': fields.many2one('res.company', 'Company'),
'department_id':fields.many2one('hr.department','Department'),
}
def _default_date_from(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return time.strftime('%Y-%m-01')
elif r=='week':
return (datetime.today() + relativedelta(weekday=0, days=-6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-01-01')
return time.strftime('%Y-%m-%d')
def _default_date_to(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return (datetime.today() + relativedelta(months=+1,day=1,days=-1)).strftime('%Y-%m-%d')
elif r=='week':
return (datetime.today() + relativedelta(weekday=6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-12-31')
return time.strftime('%Y-%m-%d')
def _default_employee(self, cr, uid, context=None):
emp_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context)
return emp_ids and emp_ids[0] or False
_defaults = {
'date_from' : _default_date_from,
'date_to' : _default_date_to,
'state': 'new',
'employee_id': _default_employee,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr_timesheet_sheet.sheet', context=c)
}
def _sheet_date(self, cr, uid, ids, forced_user_id=False, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
new_user_id = forced_user_id or sheet.user_id and sheet.user_id.id
if new_user_id:
cr.execute('SELECT id \
FROM hr_timesheet_sheet_sheet \
WHERE (date_from <= %s and %s <= date_to) \
AND user_id=%s \
AND id <> %s',(sheet.date_to, sheet.date_from, new_user_id, sheet.id))
if cr.fetchall():
return False
return True
_constraints = [
(_sheet_date, 'You cannot have 2 timesheets that overlap!\nPlease use the menu \'My Current Timesheet\' to avoid this problem.', ['date_from','date_to']),
]
def action_set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
wf_service = netsvc.LocalService('workflow')
for id in ids:
wf_service.trg_create(uid, self._name, id, cr)
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (long, int)):
ids = [ids]
return [(r['id'], _('Week ')+datetime.strptime(r['date_from'], '%Y-%m-%d').strftime('%U')) \
for r in self.read(cr, uid, ids, ['date_from'],
context=context, load='_classic_write')]
def unlink(self, cr, uid, ids, context=None):
sheets = self.read(cr, uid, ids, ['state','total_attendance'], context=context)
for sheet in sheets:
if sheet['state'] in ('confirm', 'done'):
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which is already confirmed.'))
elif sheet['total_attendance'] <> 0.00:
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which have attendance entries.'))
return super(hr_timesheet_sheet, self).unlink(cr, uid, ids, context=context)
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
department_id = False
user_id = False
if employee_id:
empl_id = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
department_id = empl_id.department_id.id
user_id = empl_id.user_id.id
return {'value': {'department_id': department_id, 'user_id': user_id,}}
# ------------------------------------------------
# OpenChatter methods and notifications
# ------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
if not empids:
return False
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
return dom
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
#get the default date (should be: today)
res = super(account_analytic_line, self)._get_default_date(cr, uid, context=context)
#if we got the dates from and to from the timesheet and if the default date is in between, we use the default
#but if the default isn't included in those dates, we use the date start of the timesheet as default
if context.get('timesheet_date_from') and context.get('timesheet_date_to'):
if context['timesheet_date_from'] <= res <= context['timesheet_date_to']:
return res
return context.get('timesheet_date_from')
#if we don't get the dates from the timesheet, we return the default value from super()
return res
class hr_timesheet_line(osv.osv):
_inherit = "hr.analytic.timesheet"
def _sheet(self, cursor, user, ids, name, args, context=None):
sheet_obj = self.pool.get('hr_timesheet_sheet.sheet')
res = {}.fromkeys(ids, False)
for ts_line in self.browse(cursor, user, ids, context=context):
sheet_ids = sheet_obj.search(cursor, user,
[('date_to', '>=', ts_line.date), ('date_from', '<=', ts_line.date),
('employee_id.user_id', '=', ts_line.user_id.id)],
context=context)
if sheet_ids:
# [0] because only one sheet possible for an employee between 2 dates
res[ts_line.id] = sheet_obj.name_get(cursor, user, sheet_ids, context=context)[0]
return res
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
ts_line_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT l.id
FROM hr_analytic_timesheet l
INNER JOIN account_analytic_line al
ON (l.line_id = al.id)
WHERE %(date_to)s >= al.date
AND %(date_from)s <= al.date
AND %(user_id)s = al.user_id
GROUP BY l.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
ts_line_ids.extend([row[0] for row in cr.fetchall()])
return ts_line_ids
def _get_account_analytic_line(self, cr, uid, ids, context=None):
ts_line_ids = self.pool.get('hr.analytic.timesheet').search(cr, uid, [('line_id', 'in', ids)])
return ts_line_ids
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet', select="1",
type='many2one', relation='hr_timesheet_sheet.sheet', ondelete="cascade",
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'account.analytic.line': (_get_account_analytic_line, ['user_id', 'date'], 10),
'hr.analytic.timesheet': (lambda self,cr,uid,ids,context=None: ids, None, 10),
},
),
}
def _check_sheet_state(self, cr, uid, ids, context=None):
if context is None:
context = {}
for timesheet_line in self.browse(cr, uid, ids, context=context):
if timesheet_line.sheet_id and timesheet_line.sheet_id.state not in ('draft', 'new'):
return False
return True
_constraints = [
(_check_sheet_state, 'You cannot modify an entry in a Confirmed/Done timesheet !', ['state']),
]
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_timesheet_line,self).unlink(cr, uid, ids,*args, **kwargs)
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet.'))
return True
def multi_on_change_account_id(self, cr, uid, ids, account_ids, context=None):
return dict([(el, self.on_change_account_id(cr, uid, ids, el, context.get('user_id', uid))) for el in account_ids])
hr_timesheet_line()
class hr_attendance(osv.osv):
_inherit = "hr.attendance"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
if 'name' in context:
return context['name'] + time.strftime(' %H:%M:%S')
return time.strftime('%Y-%m-%d %H:%M:%S')
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
attendance_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT a.id
FROM hr_attendance a
INNER JOIN hr_employee e
INNER JOIN resource_resource r
ON (e.resource_id = r.id)
ON (a.employee_id = e.id)
WHERE %(date_to)s >= date_trunc('day', a.name)
AND %(date_from)s <= a.name
AND %(user_id)s = r.user_id
GROUP BY a.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
attendance_ids.extend([row[0] for row in cr.fetchall()])
return attendance_ids
def _sheet(self, cursor, user, ids, name, args, context=None):
sheet_obj = self.pool.get('hr_timesheet_sheet.sheet')
res = {}.fromkeys(ids, False)
for attendance in self.browse(cursor, user, ids, context=context):
date_to = datetime.strftime(datetime.strptime(attendance.name[0:10], '%Y-%m-%d'), '%Y-%m-%d %H:%M:%S')
sheet_ids = sheet_obj.search(cursor, user,
[('date_to', '>=', date_to), ('date_from', '<=', attendance.name),
('employee_id', '=', attendance.employee_id.id)],
context=context)
if sheet_ids:
# [0] because only one sheet possible for an employee between 2 dates
res[attendance.id] = sheet_obj.name_get(cursor, user, sheet_ids, context=context)[0]
return res
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet',
type='many2one', relation='hr_timesheet_sheet.sheet',
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'hr.attendance': (lambda self,cr,uid,ids,context=None: ids, ['employee_id', 'name', 'day'], 10),
},
)
}
_defaults = {
'name': _get_default_date,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if 'sheet_id' in context:
ts = self.pool.get('hr_timesheet_sheet.sheet').browse(cr, uid, context['sheet_id'], context=context)
if ts.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet.'))
res = super(hr_attendance,self).create(cr, uid, vals, context=context)
if 'sheet_id' in context:
if context['sheet_id'] != self.browse(cr, uid, res, context=context).sheet_id.id:
raise osv.except_osv(_('User Error!'), _('You cannot enter an attendance ' \
'date outside the current timesheet dates.'))
return res
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_attendance,self).unlink(cr, uid, ids,*args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
res = super(hr_attendance,self).write(cr, uid, ids, vals, context=context)
if 'sheet_id' in context:
for attendance in self.browse(cr, uid, ids, context=context):
if context['sheet_id'] != attendance.sheet_id.id:
raise osv.except_osv(_('User Error!'), _('You cannot enter an attendance ' \
'date outside the current timesheet dates.'))
return res
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet'))
return True
hr_attendance()
class hr_timesheet_sheet_sheet_day(osv.osv):
_name = "hr_timesheet_sheet.sheet.day"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.date('Date', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True, select="1"),
'total_timesheet': fields.float('Total Timesheet', readonly=True),
'total_attendance': fields.float('Attendance', readonly=True),
'total_difference': fields.float('Difference', readonly=True),
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_day as
SELECT
id,
name,
sheet_id,
total_timesheet,
total_attendance,
cast(round(cast(total_attendance - total_timesheet as Numeric),2) as Double Precision) AS total_difference
FROM
((
SELECT
MAX(id) as id,
name,
sheet_id,
SUM(total_timesheet) as total_timesheet,
CASE WHEN SUM(total_attendance) < 0
THEN (SUM(total_attendance) +
CASE WHEN current_date <> name
THEN 1440
ELSE (EXTRACT(hour FROM current_time AT TIME ZONE 'UTC') * 60) + EXTRACT(minute FROM current_time AT TIME ZONE 'UTC')
END
)
ELSE SUM(total_attendance)
END /60 as total_attendance
FROM
((
select
min(hrt.id) as id,
l.date::date as name,
s.id as sheet_id,
sum(l.unit_amount) as total_timesheet,
0.0 as total_attendance
from
hr_analytic_timesheet hrt
JOIN account_analytic_line l ON l.id = hrt.line_id
LEFT JOIN hr_timesheet_sheet_sheet s ON s.id = hrt.sheet_id
group by l.date::date, s.id
) union (
select
-min(a.id) as id,
a.name::date as name,
s.id as sheet_id,
0.0 as total_timesheet,
SUM(((EXTRACT(hour FROM a.name) * 60) + EXTRACT(minute FROM a.name)) * (CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END)) as total_attendance
from
hr_attendance a
LEFT JOIN hr_timesheet_sheet_sheet s
ON s.id = a.sheet_id
WHERE action in ('sign_in', 'sign_out')
group by a.name::date, s.id
)) AS foo
GROUP BY name, sheet_id
)) AS bar""")
hr_timesheet_sheet_sheet_day()
class hr_timesheet_sheet_sheet_account(osv.osv):
_name = "hr_timesheet_sheet.sheet.account"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.many2one('account.analytic.account', 'Project / Analytic Account', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True),
'total': fields.float('Total Time', digits=(16,2), readonly=True),
'invoice_rate': fields.many2one('hr_timesheet_invoice.factor', 'Invoice rate', readonly=True),
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_account as (
select
min(hrt.id) as id,
l.account_id as name,
s.id as sheet_id,
sum(l.unit_amount) as total,
l.to_invoice as invoice_rate
from
hr_analytic_timesheet hrt
left join (account_analytic_line l
LEFT JOIN hr_timesheet_sheet_sheet s
ON (s.date_to >= l.date
AND s.date_from <= l.date
AND s.user_id = l.user_id))
on (l.id = hrt.line_id)
group by l.account_id, s.id, l.to_invoice
)""")
hr_timesheet_sheet_sheet_account()
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'timesheet_range': fields.selection(
[('day','Day'),('week','Week'),('month','Month')], 'Timesheet range',
help="Periodicity on which you validate your timesheets."),
'timesheet_max_difference': fields.float('Timesheet allowed difference(Hours)',
help="Allowed difference in hours between the sign in/out and the timesheet " \
"computation for one sheet. Set this to 0 if you do not want any control."),
}
_defaults = {
'timesheet_range': lambda *args: 'week',
'timesheet_max_difference': lambda *args: 0.0
}
res_company()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 3,142,552,518,188,816,400
| 50.320819
| 290
| 0.557093
| false
| 3.930214
| false
| false
| false
|
RuiNascimento/krepo
|
script.module.lambdascrapers/lib/lambdascrapers/sources_ lambdascrapers/pl/trt.py
|
1
|
3892
|
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.blamo
import re, urllib, urlparse, base64, json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['pl']
self.domains = ['trt.pl']
self.base_link = 'http://www.trt.pl/'
self.search_link = 'szukaj-filmy/%s'
def movie(self, imdb, title, localtitle, aliases, year):
return title + ' ' + year
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
return tvshowtitle;
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
return url + ' s' + season.zfill(2) + 'e' + episode.zfill(2)
def contains_word(self, str_to_check, word):
return re.search(r'\b' + word + r'\b', str_to_check, re.IGNORECASE)
def contains_all_wors(self, str_to_check, words):
for word in words:
if not self.contains_word(str_to_check, word):
return False
return True
def sources(self, url, hostDict, hostprDict):
try:
words = cleantitle.getsearch(url).split(' ')
search_url = urlparse.urljoin(self.base_link, self.search_link) % urllib.quote_plus(url);
result = client.request(search_url)
sources = []
result = client.parseDOM(result, 'div', attrs={'class':'tile-container'})
for el in result :
main = client.parseDOM(el, 'h3');
link = client.parseDOM(main, 'a', ret='href')[0];
found_title = client.parseDOM(main, 'a')[0];
if not self.contains_all_wors(found_title, words):
continue
quality = client.parseDOM(el, 'a', attrs={'class':'qualityLink'});
q = 'SD'
if quality:
if(quality[0] == '720p'):
q='HD'
if(quality[0]=='1080p'):
q='1080p'
lang, info = self.get_lang_by_type(found_title)
sources.append({'source': 'trt', 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def get_lang_by_type(self, lang_type):
if self.contains_word(lang_type, 'lektor') :
return 'pl', 'Lektor'
if self.contains_word(lang_type, 'Dubbing') :
return 'pl', 'Dubbing'
if self.contains_word(lang_type, 'Napisy') :
return 'pl', 'Napisy'
if self.contains_word(lang_type, 'Polski') :
return 'pl', None
return 'en', None
def resolve(self, url):
try:
return urlparse.urljoin(self.base_link, url);
except:
return
|
gpl-2.0
| 2,059,342,441,990,086,000
| 37.92
| 146
| 0.460689
| false
| 4.276923
| false
| false
| false
|
rboman/progs
|
apps/pdf2ppt/pdf2ppt.py
|
1
|
1429
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 Romain Boman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Splitte un pdf PPT 4pages/feuilles (nécessite ImageMagick dans le PATH)
#
# . nommer le pdf "cours.pdf"
# . exporter le pdf en PNG en 300DPI
# . lancer le script
# . dans Acrobat: Create PDF => From Multiple Files
#
# ref: http://www-etud.iro.umontreal.ca/~buisteri/info/pdfen.html
import os
import glob
fname = "cours_Page_*.pdf"
for f in glob.glob("Cours_Page_*.png"):
f2 = f.replace('.png', '-crop.png')
cmd = "convert -crop 95x95%%+0+0 %s %s" % (f, f2) # vire le numero
print(cmd)
os.system(cmd)
cmd = "convert -crop 50x50%% %s %s" % (f2, f)
print(cmd)
os.system(cmd)
os.remove(f2)
for g in glob.glob("%s-*.png" % f.replace('.png', '')):
cmd = "mogrify -trim %s" % g
print(cmd)
os.system(cmd)
os.remove(f)
|
apache-2.0
| 2,452,702,609,578,649,000
| 30.733333
| 76
| 0.654062
| false
| 3
| false
| false
| false
|
danja/elfquake
|
prednet_/ingv_train.py
|
1
|
3488
|
'''
Train PredNet on INGV sequences.
'''
import os
import numpy as np
np.random.seed(123)
from six.moves import cPickle
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Dense, Flatten
from keras.layers import LSTM
from keras.layers import TimeDistributed
from keras.callbacks import LearningRateScheduler, ModelCheckpoint
from keras.optimizers import Adam
from prednet import PredNet
from data_utils import SequenceGenerator
from ingv_settings import *
save_model = True # if weights will be saved
weights_file = os.path.join(WEIGHTS_DIR, 'prednet_ingv_weights.hdf5') # where weights will be saved
json_file = os.path.join(WEIGHTS_DIR, 'prednet_ingv_model.json')
# Data files
train_file = os.path.join(DATA_DIR, 'X_train.hkl')
train_sources = os.path.join(DATA_DIR, 'sources_train.hkl')
val_file = os.path.join(DATA_DIR, 'X_val.hkl')
val_sources = os.path.join(DATA_DIR, 'sources_val.hkl')
# Training parameters
nb_epoch = 50 # was 150
batch_size = 2 # was 4
samples_per_epoch = 250 # was 500
N_seq_val = 100 # number of sequences to use for validation
# Model parameters
n_channels, im_height, im_width = (3, 128, 160)
input_shape = (n_channels, im_height, im_width) if K.image_data_format() == 'channels_first' else (im_height, im_width, n_channels)
stack_sizes = (n_channels, 48, 96, 192)
R_stack_sizes = stack_sizes
A_filt_sizes = (3, 3, 3)
Ahat_filt_sizes = (3, 3, 3, 3)
R_filt_sizes = (3, 3, 3, 3)
layer_loss_weights = np.array([1., 0., 0., 0.]) # weighting for each layer in final loss; "L_0" model: [1, 0, 0, 0], "L_all": [1, 0.1, 0.1, 0.1]
layer_loss_weights = np.expand_dims(layer_loss_weights, 1)
nt = 10 # number of timesteps used for sequences in training
time_loss_weights = 1./ (nt - 1) * np.ones((nt,1)) # equally weight all timesteps except the first
time_loss_weights[0] = 0
prednet = PredNet(stack_sizes, R_stack_sizes,
A_filt_sizes, Ahat_filt_sizes, R_filt_sizes,
output_mode='error', return_sequences=True)
inputs = Input(shape=(nt,) + input_shape)
errors = prednet(inputs) # errors will be (batch_size, nt, nb_layers)
errors_by_time = TimeDistributed(Dense(1, weights=[layer_loss_weights, np.zeros(1)], trainable=False), trainable=False)(errors) # calculate weighted error by layer
errors_by_time = Flatten()(errors_by_time) # will be (batch_size, nt)
final_errors = Dense(1, weights=[time_loss_weights, np.zeros(1)], trainable=False)(errors_by_time) # weight errors by time
model = Model(inputs=inputs, outputs=final_errors)
model.compile(loss='mean_absolute_error', optimizer='adam')
train_generator = SequenceGenerator(train_file, train_sources, nt, batch_size=batch_size, shuffle=True)
val_generator = SequenceGenerator(val_file, val_sources, nt, batch_size=batch_size, N_seq=N_seq_val)
lr_schedule = lambda epoch: 0.001 if epoch < 75 else 0.0001 # start with lr of 0.001 and then drop to 0.0001 after 75 epochs
callbacks = [LearningRateScheduler(lr_schedule)]
if save_model:
if not os.path.exists(WEIGHTS_DIR): os.mkdir(WEIGHTS_DIR)
callbacks.append(ModelCheckpoint(filepath=weights_file, monitor='val_loss', save_best_only=True))
history = model.fit_generator(train_generator, samples_per_epoch / batch_size, nb_epoch, callbacks=callbacks,
validation_data=val_generator, validation_steps=N_seq_val / batch_size)
if save_model:
json_string = model.to_json()
with open(json_file, "w") as f:
f.write(json_string)
|
apache-2.0
| 4,245,735,531,598,125,000
| 42.061728
| 164
| 0.716456
| false
| 3.033043
| false
| false
| false
|
TariqEE/PrivEx
|
S2/S2-netified/exitListener.py
|
1
|
5097
|
from collections import defaultdict
from privexUtils import q, epoch, dc_start_delay, dc_reg_delay
from router import router
from tkgserver import tkgserver
from twisted.internet import reactor, protocol, task, ssl
from twisted.protocols import basic
import time
import json
import argparse
import pprint
parser = argparse.ArgumentParser(description='')
parser.add_argument('-i','--input', help='Input website list, one on each line',required=True)
parser.add_argument('-tkg','--tkgList', help='Input tkg list, IP and port, one on each line',required=True)
parser.add_argument('-thp','--tally', help='Input tally server IP and port.',required=True)
parser.add_argument('-p','--port', help='port to listen on',required=True)
parser.add_argument('-f','--fingerprint', help='fingerprint file of exit',required=True)
parser.add_argument('-c','--consensus', help='consensus file of exit',required=True)
args = parser.parse_args()
class exitListener(protocol.Protocol):
def dataReceived(self, data):
action, channelID, circuitID, website = data.split(" ", 3)
action = action.strip()
channelID = int(channelID.strip())
circuitID = int(circuitID.strip())
website = website.strip()
if action == "a":
if channelID not in site_seen:
site_seen[channelID] = {}
if circuitID not in site_seen[channelID]:
site_seen[channelID][circuitID] = {}
if website not in site_seen[channelID][circuitID]:
site_seen[channelID][circuitID][website] = 1
if website != "Other" and website != "Censored":
if website in labels:
r.inc(website)
r.inc("Censored")
# print website + " incremented exitListener!\n"
else:
r.inc("Other")
# print "Other incremented exitListener!\n"
class exitRegister(basic.LineReceiver):
def __init__(self):
self.delimiter = '\n'
def connectionMade(self):
self.register_exit()
self.transport.loseConnection()
def register_exit(self):
global msg
print "DC: Registered with a TKG!"
#self.sendLine(msg[0])
#self.send_msg = json.dumps(msg[0])
#pprint.pprint(self.send_msg)
#self.sendLine(self.send_msg)
self.sendLine(repr(msg[0]))
msg.pop(0)
class exitStatSend(basic.LineReceiver):
def connectionMade(self):
self.send_stats()
self.transport.loseConnection()
def send_stats(self):
global r
global msg
global site_seen
self.send_data = json.dumps(r.publish())
print "DC: Sending TS our stats!"
self.sendLine(self.send_data)
#clean up objects and refresh
site_seen.clear()
r = None
msg = []
r = router(q, labels, tkgs, args.fingerprint, args.consensus)
for kid, a in zip(r.keys, tkgs):
msg.append(r.authority_msg(kid))
time.sleep(dc_reg_delay)
for host, port in tkg_info:
reactor.connectSSL(host, int(port), c_factory, ssl.ClientContextFactory())
if __name__ == "__main__":
labels = []
tkgs = []
site_seen = {}
r = None
tkg_info = []
msg = []
with open(args.input,'r') as f1:
for line in f1:
site = line.strip()
if site not in labels:
labels.append(site)
labels.append("Other")
labels.append("Censored")
with open(args.tally,'r') as f3:
for tallyline in f3:
tallyhost, tallyport = tallyline.strip().split()
with open(args.tkgList,'r') as f2:
for tkgline in f2:
tkgs.append(tkgserver(tkgline.strip()))
host, port = tkgline.strip().split()
tkg_info.append((host, port))
r = router(q, labels, tkgs, args.fingerprint, args.consensus)
for kid, a in zip(r.keys, tkgs):
msg.append(r.authority_msg(kid))
time.sleep((epoch - int(time.time())%epoch) + dc_start_delay)
print "DC starting up..."
last_epoch_start = int(time.time())/epoch
def epoch_change():
global last_epoch_start
global should_send
now = int(time.time())/epoch
if now > last_epoch_start:
last_epoch_start = now
print "Epoch Change!\n"
reactor.connectSSL(tallyhost, int(tallyport), sendtallyfactory, ssl.ClientContextFactory())
epoch_check = task.LoopingCall(epoch_change)
epoch_check.start(1)
sendtallyfactory = protocol.ClientFactory()
sendtallyfactory.protocol = exitStatSend
c_factory = protocol.ClientFactory()
c_factory.protocol = exitRegister
time.sleep(dc_reg_delay)
for host, port in tkg_info:
reactor.connectSSL(host, int(port), c_factory, ssl.ClientContextFactory())
s_factory = protocol.ServerFactory()
s_factory.protocol = exitListener
reactor.listenTCP(int(args.port), s_factory, interface='127.0.0.1') # Local Tor connection
print "DC ready!"
reactor.run()
|
bsd-3-clause
| 1,234,578,571,938,410,000
| 30.85625
| 107
| 0.613106
| false
| 3.698839
| false
| false
| false
|
exp-publishing/cloudbot-plugins
|
plugins/tell.py
|
1
|
5133
|
"""
tell.py
Created By:
- CloudBot IRC <https://github.com/ClodbotIRC>
Modified By:
- Josh Elsasser <https://github.com/jaelsasser>
License:
GNU General Public License (Version 3)
"""
import re
from datetime import datetime
from sqlalchemy import Table, Column, String, Boolean, DateTime
from sqlalchemy.sql import select
from cloudbot import hook
from cloudbot.util import timeformat, database
from cloudbot.event import EventType
table = Table(
'expp-tells',
database.metadata,
Column('connection', String(25)),
Column('channel', String(25, collation='NOCASE')),
Column('sender', String(25, collation='NOCASE')),
Column('target', String(25, collation='NOCASE')),
Column('message', String(500)),
Column('is_read', Boolean),
Column('time_sent', DateTime),
Column('time_read', DateTime),
extend_existing=True
)
@hook.on_start
def load_cache(db):
"""
:type db: sqlalchemy.orm.Session
"""
global tell_cache
tell_cache = []
for row in db.execute(table.select().where(table.c.is_read == 0)):
conn = row["connection"]
chan = row["channel"]
target = row["target"]
tell_cache.append((conn, chan, target))
def get_unread(db, server, target, channel='*'):
clauses = [table.c.channel == '*', table.c.channel == channel.lower()]
query = select([table.c.sender, table.c.channel, table.c.message, table.c.time_sent]) \
.where(table.c.connection == server.lower()) \
.where((table.c.channel == '*') | (table.c.channel == channel.lower())) \
.where(table.c.target == target.lower()) \
.where(table.c.is_read == 0) \
.order_by(table.c.time_sent)
return db.execute(query).fetchall()
def count_unread(db, server, target):
query = select([table]) \
.where(table.c.connection == server.lower()) \
.where(table.c.target == target.lower()) \
.where(table.c.is_read == 0) \
.alias("count") \
.count()
return db.execute(query).fetchone()[0]
def read_tell(db, server, channel, target, message):
query = table.update() \
.where(table.c.connection == server.lower()) \
.where(table.c.channel == channel.lower()) \
.where(table.c.target == target) \
.where(table.c.message == message) \
.values(is_read=1)
db.execute(query)
db.commit()
load_cache(db)
def add_tell(db, server, channel, sender, target, message):
query = table.insert().values(
connection=server.lower(),
channel=channel.lower(),
sender=sender,
target=target.lower(),
message=message,
is_read=False,
time_sent=datetime.today()
)
db.execute(query)
db.commit()
load_cache(db)
def tell_check(conn, nick):
for _conn, _chan, _target in tell_cache:
if (conn.lower(), nick.lower()) == (_conn.lower(), _target.lower()):
return True
@hook.event([EventType.message, EventType.action], singlethread=True)
def tell_watch(event, conn, db, chan, nick, ctcp, reply):
"""
:type event: cloudbot.event.Event
:type conn: cloudbot.client.Client
:type db: sqlalchemy.orm.Session
"""
if tell_check(conn.name, nick):
tells = get_unread(db, conn.name, nick, chan)
else:
return
sent = 0
ratelimit = 5
for _from, _channel, _message, _sent in tells:
# format the send time
reltime = timeformat.time_since(_sent, simple=True, count=1)
if reltime == 0:
reltime = "just now"
else:
reltime += " ago"
out = "[{}, {}] {}".format(_from, reltime, _message)
read_tell(db, conn.name, _channel, nick, _message)
if sent < ratelimit:
reply(out)
else:
if sent == ratelimit + 1:
reply("{} more tells sent privately.".format(len(tells) - sent))
ctcp(out)
sent += 1
@hook.command("tell")
def tell_cmd(text, nick, db, notice, conn, chan):
"""tell <nick> <message> -- Relay <message> to <nick> when <nick> is around."""
query = text.split(' ', 1)
if len(query) != 2:
notice(conn.config("command_prefix") + tell_cmd.__doc__)
return
target = query[0]
message = query[1].strip()
sender = nick
if target.lower() == sender.lower():
notice("Bad user. Bad. Stop trying to .tell yourself")
return
# we can't send messages to ourselves
if target.lower() == conn.nick.lower():
notice("Invalid nick '{}'.".format(target))
return
if not re.match("^[a-z0-9_|.\-\]\[]*$", target.lower()):
notice("Invalid nick '{}'.".format(target))
return
# tells received via PM can be received anywhere
if chan.lower() == nick.lower():
chan = '*'
if count_unread(db, conn.name, target) >= 25:
notice("{} has too many messages queued already. Try again later"
.format(target))
return
add_tell(db, conn.name, chan, sender, target, message)
notice("I'll pass that on when {} is around.".format(target))
|
gpl-3.0
| -2,896,529,931,401,494,000
| 28.331429
| 91
| 0.591467
| false
| 3.53756
| false
| false
| false
|
talon-one/talon_one.py
|
test/test_set_discount_effect_props.py
|
1
|
2126
|
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.set_discount_effect_props import SetDiscountEffectProps # noqa: E501
from talon_one.rest import ApiException
class TestSetDiscountEffectProps(unittest.TestCase):
"""SetDiscountEffectProps unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test SetDiscountEffectProps
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.set_discount_effect_props.SetDiscountEffectProps() # noqa: E501
if include_optional :
return SetDiscountEffectProps(
name = '0',
value = 1.337,
scope = '0'
)
else :
return SetDiscountEffectProps(
name = '0',
value = 1.337,
)
def testSetDiscountEffectProps(self):
"""Test SetDiscountEffectProps"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
mit
| 7,183,432,051,782,559,000
| 36.964286
| 647
| 0.670273
| false
| 4.011321
| true
| false
| false
|
CanonicalLtd/subiquity
|
subiquity/models/identity.py
|
1
|
1628
|
# Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import attr
log = logging.getLogger('subiquity.models.identity')
@attr.s
class User(object):
realname = attr.ib()
username = attr.ib()
password = attr.ib()
class IdentityModel(object):
""" Model representing user identity
"""
def __init__(self):
self._user = None
self._hostname = None
def add_user(self, identity_data):
self._hostname = identity_data.hostname
d = {}
d['realname'] = identity_data.realname
d['username'] = identity_data.username
d['password'] = identity_data.crypted_password
if not d['realname']:
d['realname'] = identity_data.username
self._user = User(**d)
@property
def hostname(self):
return self._hostname
@property
def user(self):
return self._user
def __repr__(self):
return "<LocalUser: {} {}>".format(self.user, self.hostname)
|
agpl-3.0
| 4,035,972,600,284,715,000
| 27.068966
| 74
| 0.665233
| false
| 4.049751
| false
| false
| false
|
open-craft/opencraft
|
instance/migrations/0002_auto_20150530_1255.py
|
1
|
1562
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('instance', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('text', models.TextField()),
('level', models.CharField(max_length=5, default='info', choices=[('debug', 'Debug'), ('info', 'Info'), ('warn', 'Warning'), ('error', 'Error')], db_index=True)),
],
),
migrations.AlterField(
model_name='openedxinstance',
name='email',
field=models.EmailField(max_length=254, default='contact@example.com'),
),
migrations.AlterField(
model_name='openedxinstance',
name='github_organization_name',
field=models.CharField(max_length=50, default='open-craft', db_index=True),
),
migrations.AlterField(
model_name='openedxinstance',
name='github_repository_name',
field=models.CharField(max_length=50, default='opencraft', db_index=True),
),
migrations.AddField(
model_name='logentry',
name='instance',
field=models.ForeignKey(to='instance.OpenEdXInstance', on_delete=django.db.models.deletion.CASCADE),
),
]
|
agpl-3.0
| -7,473,166,844,276,405,000
| 35.325581
| 178
| 0.576184
| false
| 4.30303
| false
| false
| false
|
geotrellis/geotrellis-osm-elevation
|
ingest/src/main/python/geotrellis/osme/ingest/translate.py
|
1
|
9548
|
# 1. function create_object_links() gets a bucket path and returns a list of the link of each .img file
# 2. s3://azavea-datahub/emr/bootstrap.sh: install python2.7: sudo yum install -y python27;
# install gdal;
# install gdal_retile.py: sudo yum install -y gdal-python.x86_64;
# 3. change spark conf file in the master node:
# sudo sed -i '$ a export PYSPARK_PYTHON=/usr/bin/python2.7' /usr/lib/spark/conf/spark-env.sh
# usage: nohup /usr/lib/spark/bin/spark-submit translate.py /path/of/raw/tiles /path/of/workspace jobId &
# example: nohup /usr/lib/spark/bin/spark-submit translate.py s3://azavea-datahub/raw/ned-13arcsec/ s3://osm-elevation/chunk/geotiff emr-test-job-full &
#!/usr/bin/env python
import os
import sys
import json
import errno
import shutil
import zipfile
import tempfile
import traceback
from urlparse import urlparse
from collections import namedtuple
from subprocess import call, check_output
APP_NAME = "OSM Elevation Data Conversion"
def create_tmp_directory(prefix):
tmp = tempfile.mktemp(prefix=prefix, dir=os.path.join(os.environ['PWD'], "translate-temp"))
return makedirs_p(tmp)
def makedirs_p(d):
if not os.path.exists(d):
os.makedirs(d)
return d
def get_local_copy(uri, local_dir):
parsed = urlparse(uri)
local_path = tempfile.mktemp(dir=local_dir)
if parsed.scheme == "s3":
cmd = ["aws", "s3", "cp", uri, local_path]
elif parsed.scheme == "https":
cmd = ["wget", "-O", local_path, uri]
else:
cmd = ["cp", uri, local_path]
c = call(cmd)
return local_path
def create_object_links(bucket):
cmd = ["aws", "s3", "ls", bucket]
ls = check_output(cmd)
lines = ls.splitlines()
links = []
for line in lines:
obj = line.split()[-1]
if ".img" in obj:
links.append(bucket+obj)
return links
def unzip(source_path):
unzipped_dir = source_path + "-unzipped"
with zipfile.ZipFile(source_path) as zf:
zf.extractall(unzipped_dir)
names = zf.namelist()
extensions = ['.flt', '.hdr']
unzipped_paths = {}
for name in names:
for extension in extensions:
if extension in name:
unzipped_paths[extension] = unzipped_dir+'/'+name
return unzipped_paths
def upload_to_working(local_src, dest):
parsed = urlparse(dest)
if parsed.scheme == "s3":
cmd = ["aws", "s3", "cp",
local_src, dest]
else:
d = os.path.dirname(dest)
if not os.path.exists(d):
os.makedirs(d)
cmd = ["cp", local_src, dest]
call(cmd)
return dest
def get_filename(uri):
p = urlparse(uri)
return os.path.splitext(os.path.join(p.netloc, p.path[1:]))[0]
def mkdir_p(dir):
try:
os.makedirs(dir)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(dir):
pass
else: raise
UriSet = namedtuple('UriSet', 'source_uri workspace_target workspace_source_uri image_folder order')
def vsi_curlify(uri):
"""
Creates a GDAL-readable path from the given URI
"""
parsed = urlparse(uri)
result_uri = ""
if not parsed.scheme:
result_uri = uri
else:
if parsed.scheme == "s3":
result_uri = "/vsicurl/http://%s.s3.amazonaws.com%s" % (parsed.netloc, parsed.path)
elif parsed.scheme.startswith("http"):
result_uri = "/vsicurl/%s" % uri
else:
raise Exception("Unsupported scheme: %s" % parsed.schem)
return result_uri
def process_flt(source_uri, order, workspace_uri):
# Download the file and retile
results = []
workspace_prefix = get_filename(source_uri)
local_dir = create_tmp_directory(workspace_prefix)
try :
MAX_HEIGHT = 1024 * 2
MAX_WIDTH = 1024 * 2
local_path = get_local_copy(source_uri, local_dir)
unzipped_paths = unzip(local_path)
# make sure gdal can recognize flt files
hdr = unzipped_paths['.hdr']
flt = unzipped_paths['.flt']
cmd1 = ["gdalinfo"] + [hdr]
cmd2 = ["gdalinfo"] + [flt]
call(cmd1)
call(cmd2)
local_path = flt
# translate
translated_path = local_path + "-translated.tif"
cmd = ["gdal_translate"] + ["-of", "GTiff",
"-co", "compress=deflate",
"-co", "predictor=3",
"-co", "tiled=yes",
"-co", "blockxsize=512",
"-co", "blockysize=512",
local_path,
translated_path]
call(cmd)
# retile
tiled_dir = local_path + "-tiled"
os.mkdir(tiled_dir)
cmd = ["gdal_retile.py"] + ["-co", "compress=deflate",
"-co", "predictor=3",
"-ps",
str(MAX_WIDTH),
str(MAX_HEIGHT),
"-targetDir",
tiled_dir,
translated_path]
call(cmd)
tile_filenames = os.listdir(tiled_dir)
workspace_basename = os.path.basename(workspace_prefix)
translated_path_name = os.path.splitext(os.path.basename(translated_path))[0]
# upload
for tile_filename in tile_filenames:
workspace_key = os.path.splitext(os.path.join(workspace_prefix, tile_filename.replace(translated_path_name, workspace_basename)))[0]
workspace_target = os.path.join(workspace_uri, workspace_key + "-working.tif")
upload_to_working(os.path.join(tiled_dir, tile_filename), workspace_target)
workspace_source_uri = vsi_curlify(workspace_target)
image_folder = os.path.join(workspace_uri, workspace_key)
uri_set = UriSet(source_uri = source_uri,
workspace_target = workspace_target,
workspace_source_uri = workspace_source_uri,
image_folder = image_folder,
order = order)
results.append(uri_set)
shutil.rmtree(local_dir)
finally:
if local_dir:
shutil.rmtree(local_dir, ignore_errors=True)
return results
def process_img(source_uri, order, workspace_uri):
# Download the file and retile
results = []
workspace_prefix = get_filename(source_uri)
local_dir = create_tmp_directory(workspace_prefix)
try :
MAX_HEIGHT = 1024 * 2
MAX_WIDTH = 1024 * 2
local_path = get_local_copy(source_uri, local_dir)
# translate
translated_path = local_path + "-translated.tif"
cmd = ["gdal_translate"] + ["-of", "GTiff",
"-co", "compress=deflate",
"-co", "predictor=3",
"-co", "tiled=yes",
"-co", "blockxsize=512",
"-co", "blockysize=512",
local_path,
translated_path]
call(cmd)
# retile
tiled_dir = local_path + "-tiled"
os.mkdir(tiled_dir)
cmd = ["gdal_retile.py"] + ["-co", "compress=deflate",
"-co", "predictor=3",
"-ps",
str(MAX_WIDTH),
str(MAX_HEIGHT),
"-targetDir",
tiled_dir,
translated_path]
call(cmd)
tile_filenames = os.listdir(tiled_dir)
workspace_basename = os.path.basename(workspace_prefix)
translated_path_name = os.path.splitext(os.path.basename(translated_path))[0]
# upload
for tile_filename in tile_filenames:
workspace_key = os.path.splitext(os.path.join(workspace_prefix.split("/")[-2], tile_filename.replace(translated_path_name, workspace_basename)))[0]
workspace_target = os.path.join(workspace_uri, workspace_key + ".tif")
upload_to_working(os.path.join(tiled_dir, tile_filename), workspace_target)
workspace_source_uri = vsi_curlify(workspace_target)
image_folder = os.path.join(workspace_uri, workspace_key)
uri_set = UriSet(source_uri = source_uri,
workspace_target = workspace_target,
workspace_source_uri = workspace_source_uri,
image_folder = image_folder,
order = order)
results.append(uri_set)
shutil.rmtree(local_dir)
finally:
if local_dir:
shutil.rmtree(local_dir, ignore_errors=True)
return results
if __name__ == '__main__':
from pyspark import SparkConf, SparkContext
bucket = sys.argv[1]
source_uris = create_object_links(bucket)
workspace = sys.argv[2]
jobId = sys.argv[3]
conf = SparkConf().setAppName(APP_NAME)
sc = SparkContext(conf=conf)
uri_sets = sc.parallelize(enumerate(source_uris)).flatMap(lambda (o, i): process_img(i, o, workspace))
source_tile_count = uri_sets.cache().count()
print "Done."
|
apache-2.0
| 8,990,720,175,938,981,000
| 33.348921
| 159
| 0.542417
| false
| 3.85622
| false
| false
| false
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/win_rm_listener.py
|
1
|
1814
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WinRMListener(Model):
"""Describes Protocol and thumbprint of Windows Remote Management listener.
:param protocol: Specifies the protocol of listener. <br><br> Possible
values are: <br>**http** <br><br> **https**. Possible values include:
'Http', 'Https'
:type protocol: str or
~azure.mgmt.compute.v2017_03_30.models.ProtocolTypes
:param certificate_url: This is the URL of a certificate that has been
uploaded to Key Vault as a secret. For adding a secret to the Key Vault,
see [Add a key or secret to the key
vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add).
In this case, your certificate needs to be It is the Base64 encoding of
the following JSON Object which is encoded in UTF-8: <br><br> {<br>
"data":"<Base64-encoded-certificate>",<br> "dataType":"pfx",<br>
"password":"<pfx-file-password>"<br>}
:type certificate_url: str
"""
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'ProtocolTypes'},
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
}
def __init__(self, protocol=None, certificate_url=None):
super(WinRMListener, self).__init__()
self.protocol = protocol
self.certificate_url = certificate_url
|
mit
| -2,108,547,658,817,203,000
| 42.190476
| 83
| 0.624587
| false
| 4.058166
| false
| false
| false
|
googleapis/googleapis-gen
|
google/cloud/ondemandscanning/v1/ondemandscanning-v1-py/google/cloud/ondemandscanning_v1/services/scanner_service/transports/base.py
|
1
|
7756
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.ondemandscanning_v1.types import scanner_service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-ondemandscanning',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class ScannerServiceTransport(abc.ABC):
"""Abstract transport class for ScannerService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
)
DEFAULT_HOST: str = 'ondemandscanning.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials is service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.analyze_packages: gapic_v1.method.wrap_method(
self.analyze_packages,
default_timeout=None,
client_info=client_info,
),
self.list_vulnerabilities: gapic_v1.method.wrap_method(
self.list_vulnerabilities,
default_timeout=None,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def analyze_packages(self) -> Callable[
[scanner_service.AnalyzePackagesRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
@property
def list_vulnerabilities(self) -> Callable[
[scanner_service.ListVulnerabilitiesRequest],
Union[
scanner_service.ListVulnerabilitiesResponse,
Awaitable[scanner_service.ListVulnerabilitiesResponse]
]]:
raise NotImplementedError()
__all__ = (
'ScannerServiceTransport',
)
|
apache-2.0
| -98,890,217,601,211,180
| 40.037037
| 161
| 0.640923
| false
| 4.411832
| false
| false
| false
|
kevana/corpscores
|
dci_notify/database.py
|
1
|
2185
|
# -*- coding: utf-8 -*-
'''
Database module, including the SQLAlchemy database object and DB-related
utilities.
'''
from sqlalchemy.orm import relationship
from .extensions import db
# Alias common SQLAlchemy names
Column = db.Column
relationship = relationship
class CRUDMixin(object):
'''Mixin that adds convenience methods for CRUD operations.'''
@classmethod
def create(cls, **kwargs):
'''Create a new record and save it the database.'''
instance = cls(**kwargs)
return instance.save()
def update(self, commit=True, **kwargs):
'''Update specific fields of a record.'''
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
return commit and self.save() or self
def save(self, commit=True):
'''Save the record.'''
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
'''Remove the record from the database.'''
db.session.delete(self)
return commit and db.session.commit()
class Model(CRUDMixin, db.Model):
'''Base model class that includes CRUD convenience methods.'''
__abstract__ = True
# From Mike Bayer's "Building the app" talk
# https://speakerdeck.com/zzzeek/building-the-app
class SurrogatePK(object):
'''A mixin that adds a surrogate integer 'primary key' column named
``id`` to any declarative-mapped class.
'''
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
@classmethod
def get_by_id(cls, id):
if any(
(isinstance(id, basestring) and id.isdigit(),
isinstance(id, (int, float))),
):
return cls.query.get(int(id))
return None
def ReferenceCol(tablename, nullable=False, pk_name='id', **kwargs):
'''Column that adds primary key foreign key reference.
Usage: ::
category_id = ReferenceCol('category')
category = relationship('Category', backref='categories')
'''
return db.Column(
db.ForeignKey("{0}.{1}".format(tablename, pk_name)),
nullable=nullable, **kwargs)
|
bsd-3-clause
| 5,581,491,092,407,811,000
| 26.658228
| 72
| 0.628375
| false
| 4.138258
| false
| false
| false
|
Symantec/py-statsd
|
pystatsd/pystatsagent.py
|
1
|
2187
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import json
try:
import pystats_config
except ImportError:
import pystatsd.pystats_config as pystats_config
class UDPClient(object):
def __init__(self, server_ip, server_port):
"""Initalize client"""
self.server_ip = server_ip
self.server_port = server_port
self.sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
def send_msg(self, msg):
"""Send message"""
self.sock.sendto(msg, (self.server_ip, self.server_port))
class PystatAgent(object):
def __init__(self):
self.cfg = pystats_config.PyStatConfig()
if self.cfg.parsedyaml is not None:
self.remote_addr = self.cfg.parsedyaml.get('bind_address',
'localhost')
self.remote_port = self.cfg.parsedyaml.get('bind_port', 5090)
else:
self.remote_addr = 'localhost'
self.remote_port = 5090
self.host = socket.gethostname()
self.udpclient = UDPClient(self.remote_addr, self.remote_port)
def trace(self, metric_name, trace_info):
data = self.format_msg_data(metric_name, 'trace', trace_info, None)
self.udpclient.send_msg(data)
def guage(self, metric_name, value, trace_info):
data = self.format_msg_data(metric_name, 'guage', trace_info, value)
self.udpclient.send_msg(data)
def format_msg_data(self, metric_name, metric_type, trace_info, value):
msg = trace_info
msg['metric_name'] = metric_name
msg['metric_type'] = metric_type
msg['host'] = self.host
# Attach additional user provided tags to the msg.
if self.cfg.parsedyaml is not None and \
self.cfg.parsedyaml.get('agent', None) is not None:
agent_tags = self.cfg.parsedyaml['agent'].get('tags', None)
if agent_tags is not None:
for tag in agent_tags:
msg[tag] = agent_tags[tag]
if metric_type == "guage":
msg['value'] = value
jdata = json.dumps(msg)
return jdata
|
apache-2.0
| 7,166,294,631,678,672,000
| 32.646154
| 76
| 0.581619
| false
| 3.713073
| false
| false
| false
|
lukovkin/ufcnn-keras
|
models/create_signals_bid_ask.py
|
1
|
13710
|
from __future__ import absolute_import
from __future__ import print_function
import sys
from copy import copy, deepcopy
import numpy as np
#import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 1000)
from signals import *
def find_all_signals(_df, comission=0.0, max_position_size=1, debug=False):
"""
Function finds and returns all signals that could result in profitable deals taking into account comission.
E.g. it will return Buy and Sell signal if ask price at Buy is lower than bid price at Sell minus the comission.
Then it will move one step forward and consider already seen Sell signal and the next Buy for the possible
profitable short deal.
"""
df = deepcopy(_df)
df['Buy'] = np.zeros(df.shape[0])
df['Sell'] = np.zeros(df.shape[0])
df['Buy Mod'] = np.zeros(df.shape[0])
df['Sell Mod'] = np.zeros(df.shape[0])
inflection_points = pd.DataFrame({'Buy': df["askpx_"].diff().shift(-1) > 0, 'Sell': df["bidpx_"].diff().shift(-1) < 0})
iterator = inflection_points.iterrows()
max_count = 0
position_size = 0
try:
while True:
#for i in range(0, 100):
idx_open, next_idx, row_open, sig_type_open = next_signal(iterator, df)
iterator = inflection_points.loc[next_idx:].iterrows()
iterator.next()
df[sig_type_open][idx_open] = 1
except TypeError:
print("Iteration stopped")
print("Buy candidates: {} Sell candidates: {}".format(df[df['Buy'] != 0].count()['Buy'], df[df['Sell'] != 0].count()['Sell']))
candidates = df[(df['Buy'] != 0) | (df['Sell'] != 0)].iterrows()
idx_open, row_open = candidates.next()
for idx, row in candidates:
if row_open['Buy'] == 1 and (df["bidpx_"][idx] > (df["askpx_"][idx_open] + comission)):
df['Buy Mod'][idx_open] += 1
df['Sell Mod'][idx] += 1
elif row_open['Sell'] == 1 and (df["askpx_"][idx] < (df["bidpx_"][idx_open] - comission)):
df['Sell Mod'][idx_open] += 1
df['Buy Mod'][idx] += 1
idx_open = idx
row_open = row
df = df.rename(columns={"Buy": "Buy Candidates", "Sell": "Sell Candidtates"})
df['Buy'] = np.zeros(df.shape[0])
df['Sell'] = np.zeros(df.shape[0])
df['Buy'][df['Buy Mod'] != 0] = 1
df['Sell'][df['Sell Mod'] != 0] = 1
print("Buy: {} Sell: {}".format(df[df['Buy Mod'] != 0].count()['Buy Mod'], df[df['Sell Mod'] != 0].count()['Sell Mod']))
print("Buy: {} Sell: {}".format(df[df['Buy'] != 0].count()['Buy'], df[df['Sell'] != 0].count()['Sell']))
return df
def next_signal(iterator, df=None, sig_type=None, outer_idx=None, outer_row=None):
"""
Recursive function to find best signal (Buy or Sell) of the sequnce of possible candidates (inflection points).
It compares current candidate and next candidates, if one of the next candidates of the same type is better,
e.g. if current candidate is Buy with ask price 20 and next candidate (1) is Buy with ask price 10,
then next candidate (2) is Buy with ask price 15, the function should return next candidate (1) with ask price 10
when it will face first consequtive Sell candidate.
"""
prev_idx = outer_idx
best_idx = outer_idx
best_row = outer_row
for idx, row in iterator:
# print(idx, row)
if row['Buy'] or row['Sell']:
inner_sig_type = 'Buy' if row['Buy'] else 'Sell'
print("Inner signal: ", idx, inner_sig_type)
if sig_type:
print("Outer signal: ", outer_idx, sig_type)
if inner_sig_type == sig_type:
print("Compare {} bid: {} ask: {} with {} bid: {} ask: {}".
format(best_idx, df["bidpx_"][best_idx], df["askpx_"][best_idx], idx, df["bidpx_"][idx], df["askpx_"][idx]))
if sig_type == 'Buy' and df["askpx_"][idx] < df["askpx_"][best_idx]:
print("Better {} candidate at {} with price {}".format(sig_type, idx, df["askpx_"][idx]))
best_idx, best_row = idx, row
#return idx, idx, row, sig_type
if sig_type == 'Sell' and df["bidpx_"][idx] > df["bidpx_"][best_idx]:
print("Better {} candidate at {} with price {}".format(sig_type, idx, df["bidpx_"][idx]))
best_idx, best_row = idx, row
#return idx, idx, row, sig_type
prev_idx = idx
else:
print("Best {} candidate at {}, break...".format(sig_type, outer_idx))
return best_idx, prev_idx, best_row, sig_type
else:
print("Recursion")
return next_signal(iterator, df, inner_sig_type, idx, row)
def set_positions(_df):
df = deepcopy(_df)
df['Pos'] = np.zeros(df.shape[0])
last_position = 0
longs = 0
shorts = 0
iterator = df.iterrows()
last_idx, last_row = iterator.next()
for idx, row in iterator:
df.loc[idx]['Pos'] = row['Buy Mod'] - row ['Sell Mod'] + last_row['Pos']
last_idx, last_row = idx, row
if df.loc[idx]['Pos'] != last_position and df.loc[idx]['Pos'] > 0:
longs += 1
elif df.loc[idx]['Pos'] != last_position and df.loc[idx]['Pos'] < 0:
shorts += 1
last_position = df.loc[idx]['Pos']
print("Long positions: {} Short positions: {}".format(longs, shorts))
return df
def find_signals(df, sig_type, comission=0.0, debug=False):
colnames = {"Buy": ("Buy", "Sell Close"),
"Sell": ("Sell", "Buy Close")}
inflection_points_buy = df["askpx_"].diff().shift(-1) > 0
inflection_points_sell = df["bidpx_"].diff().shift(-1) < 0
iterator = inflection_points_buy.iteritems() if sig_type == "Buy" else inflection_points_sell.iteritems()
inflection_points = inflection_points_buy if sig_type == "Buy" else inflection_points_sell
inner_inflection_points = inflection_points_sell if sig_type == "Buy" else inflection_points_buy
max_count = 0
(major_colname, minor_colname) = colnames[sig_type]
df[major_colname] = np.zeros(df.shape[0])
df[minor_colname] = np.zeros(df.shape[0])
for idx, val in iterator:
if max_count > 10000 and debug:
print("Max count reached, break...")
break
inner_iterator = inner_inflection_points.loc[idx:].iteritems()
if df[df[minor_colname]==1].empty:
can_open = True
else:
can_open = idx > df[df[minor_colname]==1].index[-1]
max_count += 1
if val and can_open:
print("{} candidate at {} with price {}".format(sig_type, idx, df["askpx_"][idx]))
for inner_idx, inner_val in inner_iterator:
if inner_idx > idx:
if sig_type == "Buy":
if df["askpx_"][inner_idx] < df["askpx_"][idx] and inflection_points[inner_idx]:
print("Better {} candidate at {} with price {}, break...".format(sig_type, inner_idx, df["askpx_"][inner_idx]))
break
if df["bidpx_"][inner_idx] > (df["askpx_"][idx] + comission) and inner_val:
df[major_colname][idx] = 1
df[minor_colname][inner_idx] = 1
print("Buy at {} with price {}".format(idx, df["askpx_"][idx]))
print("Sell at {} with price {}".format(inner_idx, df["bidpx_"][inner_idx]))
break
elif sig_type == "Sell":
if df["bidpx_"][inner_idx] > df["bidpx_"][idx] and inflection_points[inner_idx]:
print("Better {} candidate at {} with price {}, break...".format(sig_type, inner_idx, df["bidpx_"][inner_idx]))
break
if df["askpx_"][inner_idx] < (df["bidpx_"][idx] - comission) and inner_val:
df[major_colname][idx] = 1
df[minor_colname][inner_idx] = 1
print("Sell at {} with price {}".format(idx, df["bidpx_"][idx]))
print("Buy at {} with price {}".format(inner_idx, df["askpx_"][inner_idx]))
break
return df
def filter_signals(df):
buys = df["Buy"] + df["Buy Close"]
df["Buy Mod"] = np.zeros(df.shape[0])
df["Buy Mod"][buys == 2] = 1
sells = df["Sell"] + df["Sell Close"]
df["Sell Mod"] = np.zeros(df.shape[0])
df["Sell Mod"][sells == 2] = 1
iterator = df.iterrows()
current_signal = 0
for idx, row in iterator:
current_signal = row["Buy Mod"] - row["Sell Mod"]
if current_signal != 0:
print("Signal {} at {}".format(current_signal, idx))
inner_iterator = df.loc[idx:].iterrows()
inner_iterator.next()
for inner_idx, inner_row in inner_iterator:
next_signal = inner_row["Buy Mod"] - inner_row["Sell Mod"]
if next_signal == current_signal:
print("Consecutive similar signal {} at {}".format(next_signal, inner_idx))
if current_signal == 1:
df_slice = df.loc[idx:inner_idx]
candidates = df_slice[df_slice["Sell"] == 1]
best_candidate = candidates["bidpx_"].idxmax()
print(df.loc[best_candidate])
df["Sell Mod"].loc[best_candidate] = 1
break
elif current_signal == -1:
df_slice = df.loc[idx:inner_idx]
candidates = df_slice[df_slice["Buy"] == 1]
best_candidate = candidates["askpx_"].idxmin()
print(df.loc[best_candidate])
df["Buy Mod"].loc[best_candidate] = 1
break
elif next_signal != 0 and next_signal != current_signal:
break
df["Buy Open"] = df["Buy"]
df["Sell Open"] = df["Sell"]
df = df.drop(["Buy", "Sell"], axis=1)
print(df.columns)
df = df.rename(columns={"Buy Mod": "Buy", "Sell Mod": "Sell"})
print(df.columns)
# df = df.drop(["Buy Close", "Sell Close"], axis=1)
return df
def make_spans(df, sig_type):
span_colname = "Buys" if sig_type == "Buy" else "Sells"
reversed_df = df[::-1]
df[span_colname] = np.zeros(df.shape[0])
for idx in df[sig_type][df[sig_type] == 1].index:
signal_val = df.loc[idx]
iterator = reversed_df.loc[idx:].iterrows()
_d = print("Outer loop:", idx, signal_val["askpx_"]) if sig_type == "Buy" else print("Outer loop:", idx, signal_val["bidpx_"])
for i, val in iterator:
# _d = print("Inner loop:", i, val["askpx_"]) if sig_type == "Buy" else print("Inner loop:", i, val["bidpx_"])
if sig_type == "Buy":
if val["askpx_"] == signal_val["askpx_"]:
# print("Add to buys")
df[span_colname][i] = 1
else:
break
elif sig_type == "Sell":
if val["bidpx_"] == signal_val["bidpx_"]:
# print("Add to sells")
df[span_colname][i] = 1
else:
break
return df
def pnl(df, chained=False):
deals = []
pnl = 0
if not chained:
for idx, row in df[(df['Buy Mod'] != 0) | (df['Sell Mod'] != 0)].iterrows():
current_trade = row['Sell Mod'] * row["bidpx_"] - row['Buy Mod'] * row["askpx_"]
pnl = pnl + current_trade
deals.append(current_trade)
print("Running PnL: ", pnl)
print("Check PnL: {} vs {}".format(pnl, np.sum(deals)))
return pnl, len(deals)
else:
is_opened = False
for idx, row in df.iterrows():
if row["Buy"]:
if is_opened:
deals.append(-row["askpx_"])
deals.append(-row["askpx_"])
is_opened = True
elif row["Sell"]:
if is_opened:
deals.append(row["bidpx_"])
deals.append(row["bidpx_"])
is_opened = True
print(len(deals))
deals.pop()
print(len(deals))
return np.sum(deals), len(deals)
def __main__():
"""
Trading Simulator from curriculumvite trading competition
see also the arvix Paper from Roni Mittelman http://arxiv.org/pdf/1508.00317v1
Modified by Ernst.Tmp@gmx.at
produces data to train a neural net
"""
# Trades smaller than this will be omitted
min_trade_amount = None
comission = 0.0
if len(sys.argv) < 2 :
print ("Usage: day_trading_file, NOT target_price-file ")
sys.exit()
day_file = sys.argv[1]
try:
write_spans = True if sys.argv[2] == "--spans" else False
except IndexError:
write_spans = False
try:
chained_deals = True if sys.argv[3] == "--chained-deals" else False
except IndexError:
chained_deals = False
generate_signals_for_file(day_file, comission, write_spans, chained_deals)
__main__();
|
mit
| 75,075,838,899,933,100
| 40.41994
| 139
| 0.515536
| false
| 3.542636
| false
| false
| false
|
ingadhoc/odoo-infrastructure
|
infrastructure/wizard/instance_update_add_instances.py
|
1
|
1450
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api
class instance_update_add_instances(models.TransientModel):
_name = 'instance.update.add_instances'
@api.model
def get_update(self):
return self.env['infrastructure.instance.update'].browse(
self.env.context.get('active_id', False))
update_id = fields.Many2one(
'infrastructure.instance.update',
'Update',
default=get_update,
required=True,
ondelete='cascade',
)
actual_instance_ids = fields.Many2many(
'infrastructure.instance',
compute='get_actual_instances',
)
instance_ids = fields.Many2many(
'infrastructure.instance',
string='Instances',
)
@api.one
@api.depends('update_id')
def get_actual_instances(self):
self.actual_instance_ids = self.update_id.detail_ids.mapped(
'instance_id')
@api.multi
def confirm(self):
self.ensure_one()
for instance in self.instance_ids:
vals = {
'instance_id': instance.id,
'update_id': self.update_id.id,
}
self.update_id.detail_ids.create(vals)
|
agpl-3.0
| -7,664,425,907,704,967,000
| 29.851064
| 78
| 0.536552
| false
| 4.264706
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.