idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
54,900
def draw ( self ) : str_wires = [ [ "-" ] * 7 * self . depth ] str_wires [ 0 ] [ 0 ] = "0" str_wires [ 0 ] [ 1 ] = " o" str_spaces = [ ] for i in range ( 1 , self . dimension ) : str_wires . append ( [ "-" ] * 7 * self . depth ) str_spaces . append ( [ " " ] * 7 * self . depth ) str_wires [ i ] [ 0 ] = str ( i ) str_wires [ i ] [ 1 ] = " o" for index , level in enumerate ( self ) : for wire1 , wire2 in level : str_wires [ wire1 ] [ ( index + 1 ) * 6 ] = "x" str_wires [ wire2 ] [ ( index + 1 ) * 6 ] = "x" for i in range ( wire1 , wire2 ) : str_spaces [ i ] [ ( index + 1 ) * 6 + 1 ] = "|" for i in range ( wire1 + 1 , wire2 ) : str_wires [ i ] [ ( index + 1 ) * 6 ] = "|" network_draw = "" . join ( str_wires [ 0 ] ) for line , space in zip ( str_wires [ 1 : ] , str_spaces ) : network_draw += "\n" network_draw += "" . join ( space ) network_draw += "\n" network_draw += "" . join ( line ) return network_draw
Return an ASCII representation of the network .
54,901
def getWorkersName ( data ) : names = [ fichier for fichier in data . keys ( ) ] names . sort ( ) try : names . remove ( "broker" ) except ValueError : pass return names
Returns the list of the names of the workers sorted alphabetically
54,902
def importData ( directory ) : dataTask = OrderedDict ( ) dataQueue = OrderedDict ( ) for fichier in sorted ( os . listdir ( directory ) ) : try : with open ( "{directory}/{fichier}" . format ( ** locals ( ) ) , 'rb' ) as f : fileName , fileType = fichier . rsplit ( '-' , 1 ) if fileType == "QUEUE" : dataQueue [ fileName ] = pickle . load ( f ) else : dataTask [ fileName ] = pickle . load ( f ) except : pass return dataTask , dataQueue
Parse the input files and return two dictionnaries
54,903
def getTimes ( dataTasks ) : global begin_time start_time , end_time = float ( 'inf' ) , 0 for fichier , vals in dataTask . items ( ) : try : if hasattr ( vals , 'values' ) : tmp_start_time = min ( [ a [ 'start_time' ] for a in vals . values ( ) ] ) [ 0 ] if tmp_start_time < start_time : start_time = tmp_start_time tmp_end_time = max ( [ a [ 'end_time' ] for a in vals . values ( ) ] ) [ 0 ] if tmp_end_time > end_time : end_time = tmp_end_time except ValueError : continue begin_time = 1000 * start_time return 1000 * start_time , 1000 * end_time
Get the start time and the end time of data in milliseconds
54,904
def WorkersDensity ( dataTasks ) : start_time , end_time = getTimes ( dataTasks ) graphdata = [ ] for name in getWorkersName ( dataTasks ) : vals = dataTasks [ name ] if hasattr ( vals , 'values' ) : workerdata = [ ] print ( "Plotting density map for {}" . format ( name ) ) try : for graphtime in timeRange ( start_time , end_time , DENSITY_MAP_TIME_AXIS_LENGTH ) : for a in vals . values ( ) : if not all ( ( a [ 'start_time' ] , a [ 'end_time' ] ) ) : print ( "Invalid data:" , a [ 'start_time' ] , a [ 'end_time' ] ) workerdata . append ( sum ( [ a [ 'start_time' ] [ 0 ] <= float ( graphtime ) / 1000. <= a [ 'end_time' ] [ 0 ] for a in vals . values ( ) if a [ 'start_time' ] and a [ 'end_time' ] ] ) ) except OverflowError : print ( "Error processing {0} or {1}" . format ( start_time , end_time ) ) graphdata . append ( workerdata ) if args . binarydensity : maxval = max ( graphdata [ - 1 ] ) if maxval > 1 : maxval = maxval - 1 graphdata [ - 1 ] = [ x - maxval for x in graphdata [ - 1 ] ] return graphdata
Return the worker density data for the graph .
54,905
def plotDensity ( dataTask , filename ) : def format_time ( x , pos = None ) : start_time , end_time = [ ( a - begin_time ) / 1000 for a in getTimes ( dataTask ) ] return int ( end_time * x / DENSITY_MAP_TIME_AXIS_LENGTH ) graphdata = WorkersDensity ( dataTask ) if len ( graphdata ) : fig = plt . figure ( ) ax = fig . add_subplot ( 111 ) box = ax . get_position ( ) ax . set_position ( [ box . x0 + 0.15 * box . width , box . y0 , box . width , box . height ] ) if args . binarydensity : cmap = ListedColormap ( [ 'r' , 'g' ] ) norm = BoundaryNorm ( [ 0 , 0.5 , 1 ] , cmap . N ) cax = ax . imshow ( graphdata , interpolation = 'nearest' , aspect = 'auto' , cmap = cmap , norm = norm ) else : cax = ax . imshow ( graphdata , interpolation = 'nearest' , aspect = 'auto' ) plt . xlabel ( 'time (s)' ) plt . ylabel ( 'Worker' ) ax . set_title ( 'Work density' ) ax . yaxis . set_ticks ( range ( len ( graphdata ) ) ) ax . tick_params ( axis = 'both' , which = 'major' , labelsize = 6 ) interval_size = DENSITY_MAP_TIME_AXIS_LENGTH // 4 ax . xaxis . set_ticks ( range ( 0 , DENSITY_MAP_TIME_AXIS_LENGTH + interval_size , interval_size ) ) ax . xaxis . set_major_formatter ( ticker . FuncFormatter ( format_time ) ) if args . binarydensity : cax . set_clim ( 0 , 1 ) cbar = fig . colorbar ( cax , ticks = [ 0 , 1 ] ) else : cbar = fig . colorbar ( cax ) fig . savefig ( filename )
Plot the worker density graph
54,906
def plotBrokerQueue ( dataTask , filename ) : print ( "Plotting broker queue length for {0}." . format ( filename ) ) plt . figure ( ) plt . subplot ( 211 ) for fichier , vals in dataTask . items ( ) : if type ( vals ) == list : timestamps = list ( map ( datetime . fromtimestamp , map ( int , list ( zip ( * vals ) ) [ 0 ] ) ) ) plt . plot_date ( timestamps , list ( zip ( * vals ) ) [ 2 ] , linewidth = 1.0 , marker = 'o' , markersize = 2 , label = fichier ) plt . title ( 'Broker queue length' ) plt . ylabel ( 'Tasks' ) plt . subplot ( 212 ) for fichier , vals in dataTask . items ( ) : if type ( vals ) == list : timestamps = list ( map ( datetime . fromtimestamp , map ( int , list ( zip ( * vals ) ) [ 0 ] ) ) ) plt . plot_date ( timestamps , list ( zip ( * vals ) ) [ 3 ] , linewidth = 1.0 , marker = 'o' , markersize = 2 , label = fichier ) plt . title ( 'Broker pending requests' ) plt . xlabel ( 'time (s)' ) plt . ylabel ( 'Requests' ) plt . savefig ( filename )
Generates the broker queue length graphic .
54,907
def getWorkerInfo ( dataTask ) : workertime = [ ] workertasks = [ ] for fichier , vals in dataTask . items ( ) : if hasattr ( vals , 'values' ) : totaltime = sum ( [ a [ 'executionTime' ] for a in vals . values ( ) ] ) totaltasks = sum ( [ 1 for a in vals . values ( ) ] ) workertime . append ( totaltime ) workertasks . append ( totaltasks ) return workertime , workertasks
Returns the total execution time and task quantity by worker
54,908
def timelines ( fig , y , xstart , xstop , color = 'b' ) : fig . hlines ( y , xstart , xstop , color , lw = 4 ) fig . vlines ( xstart , y + 0.03 , y - 0.03 , color , lw = 2 ) fig . vlines ( xstop , y + 0.03 , y - 0.03 , color , lw = 2 )
Plot timelines at y from xstart to xstop with given color .
54,909
def plotTimeline ( dataTask , filename ) : fig = plt . figure ( ) ax = fig . gca ( ) worker_names = [ x for x in dataTask . keys ( ) if "broker" not in x ] min_time = getMinimumTime ( dataTask ) ystep = 1. / ( len ( worker_names ) + 1 ) y = 0 for worker , vals in dataTask . items ( ) : if "broker" in worker : continue y += ystep if hasattr ( vals , 'values' ) : for future in vals . values ( ) : start_time = [ future [ 'start_time' ] [ 0 ] - min_time ] end_time = [ future [ 'end_time' ] [ 0 ] - min_time ] timelines ( ax , y , start_time , end_time ) ax . set_yticks ( np . arange ( ystep , 1 , ystep ) ) ax . set_yticklabels ( worker_names ) ax . set_ylim ( 0 , 1 ) ax . set_xlabel ( 'Time' ) fig . savefig ( filename )
Build a timeline
54,910
def setWorker ( self , * args , ** kwargs ) : try : la = self . LAUNCHING_ARGUMENTS ( * args , ** kwargs ) except TypeError as e : scoop . logger . error ( ( "addWorker failed to convert args %s and kwargs %s " "to namedtuple (requires %s arguments (names %s)" ) % ( args , kwargs , len ( self . LAUNCHING_ARGUMENTS . _fields ) , self . LAUNCHING_ARGUMENTS . _fields ) ) self . workersArguments = la
Add a worker assignation Arguments and order to pass are defined in LAUNCHING_ARGUMENTS Using named args is advised .
54,911
def _WorkerCommand_environment ( self ) : worker = self . workersArguments c = [ ] if worker . prolog : c . extend ( [ "source" , worker . prolog , "&&" , ] ) if worker . pythonPath and not self . isLocal ( ) : c . extend ( [ "env" , "PYTHONPATH={0}:$PYTHONPATH" . format ( worker . pythonPath ) , ] ) elif worker . pythonPath and self . isLocal ( ) : c . extend ( [ "env" , "PYTHONPATH={0}:{1}" . format ( worker . pythonPath , os . environ . get ( "PYTHONPATH" , "" ) , ) , ] ) return c
Return list of shell commands to prepare the environment for bootstrap .
54,912
def _WorkerCommand_launcher ( self ) : return [ self . workersArguments . pythonExecutable , '-m' , 'scoop.launch.__main__' , str ( self . workerAmount ) , str ( self . workersArguments . verbose ) , ]
Return list commands to start the bootstrap process
54,913
def _WorkerCommand_options ( self ) : worker = self . workersArguments c = [ ] if self . hostname == worker . brokerHostname : broker = "127.0.0.1" else : broker = worker . brokerHostname if worker . nice is not None : c . extend ( [ '--nice' , str ( worker . nice ) ] ) c . extend ( [ '--size' , str ( worker . size ) ] ) if self . isLocal ( ) : c . extend ( [ '--workingDirectory' , str ( worker . path ) ] ) else : c . extend ( [ '--workingDirectory' , '"{0}"' . format ( str ( worker . path ) ) ] ) c . extend ( [ '--brokerHostname' , broker ] ) c . extend ( [ '--externalBrokerHostname' , worker . brokerHostname ] ) c . extend ( [ '--taskPort' , str ( worker . brokerPorts [ 0 ] ) ] ) c . extend ( [ '--metaPort' , str ( worker . brokerPorts [ 1 ] ) ] ) if worker . origin and worker . executable : c . append ( '--origin' ) if worker . debug : c . append ( '--debug' ) if worker . profiling : c . append ( '--profile' ) if worker . backend : c . append ( '--backend={0}' . format ( worker . backend ) ) if worker . verbose >= 1 : c . append ( '-' + 'v' * worker . verbose ) return c
Return list of options for bootstrap
54,914
def _WorkerCommand_executable ( self ) : worker = self . workersArguments c = [ ] if worker . executable : c . append ( worker . executable ) if worker . args : if self . isLocal ( ) : c . extend ( [ '{0}' . format ( a ) for a in worker . args ] ) else : c . extend ( [ '"{0}"' . format ( a . replace ( '"' , '\\\"' ) ) for a in worker . args ] ) return c
Return executable and any options to be executed by bootstrap
54,915
def _getWorkerCommandList ( self ) : c = [ ] c . extend ( self . _WorkerCommand_environment ( ) ) c . extend ( self . _WorkerCommand_launcher ( ) ) c . extend ( self . _WorkerCommand_options ( ) ) c . extend ( self . _WorkerCommand_executable ( ) ) return c
Generate the workerCommand as list
54,916
def launch ( self , tunnelPorts = None ) : if self . isLocal ( ) : c = self . _getWorkerCommandList ( ) self . subprocesses . append ( subprocess . Popen ( c ) ) else : BASE_SSH [ 0 ] = self . ssh_executable sshCmd = BASE_SSH if not self . rsh else BASE_RSH if tunnelPorts is not None : sshCmd += [ '-R {0}:127.0.0.1:{0}' . format ( tunnelPorts [ 0 ] ) , '-R {0}:127.0.0.1:{0}' . format ( tunnelPorts [ 1 ] ) , ] self . subprocesses . append ( subprocess . Popen ( sshCmd + [ self . hostname , self . getCommand ( ) ] , bufsize = - 1 , stdout = None , stderr = None , stdin = subprocess . PIPE ) ) return self . subprocesses
Launch every worker assigned on this host .
54,917
def _switch ( self , future ) : scoop . _control . current = self assert self . greenlet is not None , ( "No greenlet to switch to:" "\n{0}" . format ( self . __dict__ ) ) return self . greenlet . switch ( future )
Switch greenlet .
54,918
def cancel ( self ) : if self in scoop . _control . execQueue . movable : self . exceptionValue = CancelledError ( ) scoop . _control . futureDict [ self . id ] . _delete ( ) scoop . _control . execQueue . remove ( self ) return True return False
If the call is currently being executed or sent for remote execution then it cannot be cancelled and the method will return False otherwise the call will be cancelled and the method will return True .
54,919
def done ( self ) : try : scoop . _control . execQueue . remove ( self ) scoop . _control . execQueue . socket . sendFuture ( self ) except ValueError as e : pass scoop . _control . execQueue . updateQueue ( ) return self . _ended ( )
Returns True if the call was successfully cancelled or finished running False otherwise . This function updates the executionQueue so it receives all the awaiting message .
54,920
def add_done_callback ( self , callable_ , inCallbackType = CallbackType . standard , inCallbackGroup = None ) : self . callback . append ( callbackEntry ( callable_ , inCallbackType , inCallbackGroup ) ) if self . _ended ( ) : self . callback [ - 1 ] . func ( self )
Attach a callable to the future that will be called when the future is cancelled or finishes running . Callable will be called with the future as its only argument .
54,921
def append ( self , future ) : if future . _ended ( ) and future . index is None : self . inprogress . add ( future ) elif future . _ended ( ) and future . index is not None : self . ready . append ( future ) elif future . greenlet is not None : self . inprogress . add ( future ) else : self . movable . append ( future ) over_hwm = self . timelen ( self . movable ) > self . highwatermark while over_hwm and len ( self . movable ) > 1 : sending_future = self . movable . popleft ( ) if sending_future . id [ 0 ] != scoop . worker : sending_future . _delete ( ) self . socket . sendFuture ( sending_future ) over_hwm = self . timelen ( self . movable ) > self . highwatermark
Append a future to the queue .
54,922
def askForPreviousFutures ( self ) : if time . time ( ) < self . lastStatus + POLLING_TIME / 1000 : return self . lastStatus = time . time ( ) for future in scoop . _control . futureDict . values ( ) : if scoop . IS_ORIGIN and future . id == ( scoop . worker , 0 ) : continue if future not in self . inprogress : self . socket . sendStatusRequest ( future )
Request a status for every future to the broker .
54,923
def pop ( self ) : self . updateQueue ( ) if self . timelen ( self ) < self . lowwatermark : self . requestFuture ( ) if len ( self . ready ) != 0 : return self . ready . popleft ( ) elif len ( self . movable ) != 0 : return self . movable . popleft ( ) else : self . lastStatus = time . time ( ) while len ( self ) == 0 : self . askForPreviousFutures ( ) self . socket . _poll ( POLLING_TIME ) self . updateQueue ( ) if len ( self . ready ) != 0 : return self . ready . popleft ( ) elif len ( self . movable ) != 0 : return self . movable . popleft ( )
Pop the next future from the queue ; in progress futures have priority over those that have not yet started ; higher level futures have priority over lower level ones ;
54,924
def flush ( self ) : for elem in self : if elem . id [ 0 ] != scoop . worker : elem . _delete ( ) self . socket . sendFuture ( elem ) self . ready . clear ( ) self . movable . clear ( )
Empty the local queue and send its elements to be executed remotely .
54,925
def updateQueue ( self ) : for future in self . socket . recvFuture ( ) : if future . _ended ( ) : try : thisFuture = scoop . _control . futureDict [ future . id ] except KeyError : scoop . logger . warn ( '{0}: Received an unexpected future: ' '{1}' . format ( scoop . worker , future . id ) ) continue thisFuture . resultValue = future . resultValue thisFuture . exceptionValue = future . exceptionValue thisFuture . executor = future . executor thisFuture . isDone = future . isDone thisFuture . _execute_callbacks ( CallbackType . standard ) self . append ( thisFuture ) future . _delete ( ) elif future . id not in scoop . _control . futureDict : scoop . _control . futureDict [ future . id ] = future self . append ( scoop . _control . futureDict [ future . id ] ) else : self . append ( scoop . _control . futureDict [ future . id ] )
Process inbound communication buffer . Updates the local queue with elements from the broker .
54,926
def sendResult ( self , future ) : future . greenlet = None assert future . _ended ( ) , "The results are not valid" self . socket . sendResult ( future )
Send back results to broker for distribution to parent task .
54,927
def shutdown ( self ) : self . socket . shutdown ( ) if scoop : if scoop . DEBUG : from scoop import _debug _debug . writeWorkerDebug ( scoop . _control . debug_stats , scoop . _control . QueueLength , )
Shutdown the ressources used by the queue
54,928
def redirectSTDOUTtoDebugFile ( ) : import sys kwargs = { } if sys . version_info >= ( 3 , ) : kwargs [ "encoding" ] = "utf8" sys . stdout = open ( os . path . join ( getDebugDirectory ( ) , "{0}.stdout" . format ( getDebugIdentifier ( ) ) , ) , "w" , 1 , ** kwargs ) sys . stderr = open ( os . path . join ( getDebugDirectory ( ) , "{0}.stderr" . format ( getDebugIdentifier ( ) ) , ) , "w" , 1 , ** kwargs )
Redirects the stdout and stderr of the current process to a file .
54,929
def writeWorkerDebug ( debugStats , queueLength , path_suffix = "" ) : createDirectory ( path_suffix ) origin_prefix = "origin-" if scoop . IS_ORIGIN else "" statsFilename = os . path . join ( getDebugDirectory ( ) , path_suffix , "{1}worker-{0}-STATS" . format ( getDebugIdentifier ( ) , origin_prefix ) ) lengthFilename = os . path . join ( getDebugDirectory ( ) , path_suffix , "{1}worker-{0}-QUEUE" . format ( getDebugIdentifier ( ) , origin_prefix ) ) with open ( statsFilename , 'wb' ) as f : pickle . dump ( debugStats , f ) with open ( lengthFilename , 'wb' ) as f : pickle . dump ( queueLength , f )
Serialize the execution data using pickle and writes it into the debug directory .
54,930
def main ( ) : parser = makeParser ( ) args = parser . parse_args ( ) hosts = utils . getHosts ( args . hostfile , args . hosts ) if args . n : n = args . n else : n = utils . getWorkerQte ( hosts ) assert n >= 0 , ( "Scoop couldn't determine the number of worker to start.\n" "Use the '-n' flag to set it manually." ) if not args . external_hostname : args . external_hostname = [ utils . externalHostname ( hosts ) ] thisScoopApp = ScoopApp ( hosts , n , args . b , args . verbose if not args . quiet else 0 , args . python_interpreter , args . external_hostname [ 0 ] , args . executable , args . args , args . tunnel , args . path , args . debug , args . nice , utils . getEnv ( ) , args . profile , args . pythonpath [ 0 ] , args . prolog [ 0 ] , args . backend , args . rsh , args . ssh_executable ) rootTaskExitCode = False interruptPreventer = Thread ( target = thisScoopApp . close ) try : rootTaskExitCode = thisScoopApp . run ( ) except Exception as e : logging . error ( 'Error while launching SCOOP subprocesses:' ) logging . error ( traceback . format_exc ( ) ) rootTaskExitCode = - 1 finally : interruptPreventer . start ( ) interruptPreventer . join ( ) if rootTaskExitCode : sys . exit ( rootTaskExitCode )
Execution of the SCOOP module . Parses its command - line arguments and launch needed resources .
54,931
def initLogging ( self ) : verbose_levels = { 0 : logging . WARNING , 1 : logging . INFO , 2 : logging . DEBUG , } logging . basicConfig ( level = verbose_levels [ self . verbose ] , format = "[%(asctime)-15s] %(module)-9s %(levelname)-7s %(message)s" ) return logging . getLogger ( self . __class__ . __name__ )
Configures the logger .
54,932
def divideHosts ( self , hosts , qty ) : maximumWorkers = sum ( host [ 1 ] for host in hosts ) if qty > maximumWorkers : index = 0 while qty > maximumWorkers : hosts [ index ] = ( hosts [ index ] [ 0 ] , hosts [ index ] [ 1 ] + 1 ) index = ( index + 1 ) % len ( hosts ) maximumWorkers += 1 elif qty < maximumWorkers : while qty < maximumWorkers : maximumWorkers -= hosts [ - 1 ] [ 1 ] if qty > maximumWorkers : hosts [ - 1 ] = ( hosts [ - 1 ] [ 0 ] , qty - maximumWorkers ) maximumWorkers += hosts [ - 1 ] [ 1 ] else : del hosts [ - 1 ] if self . externalHostname in utils . loopbackReferences and len ( hosts ) > 1 and not self . tunnel : raise Exception ( "\n" "Could not find route from external worker to the " "broker: Unresolvable hostname or IP address.\n " "Please specify your externally routable hostname " "or IP using the --external-hostname parameter or " "use the --tunnel flag." ) return hosts
Divide processes among hosts .
54,933
def showHostDivision ( self , headless ) : scoop . logger . info ( 'Worker d--istribution: ' ) for worker , number in self . worker_hosts : first_worker = ( worker == self . worker_hosts [ 0 ] [ 0 ] ) scoop . logger . info ( ' {0}:\t{1} {2}' . format ( worker , number - 1 if first_worker or headless else str ( number ) , "+ origin" if first_worker or headless else "" , ) )
Show the worker distribution over the hosts .
54,934
def setWorkerInfo ( self , hostname , workerAmount , origin ) : scoop . logger . debug ( 'Initialising {0}{1} worker {2} [{3}].' . format ( "local" if hostname in utils . localHostnames else "remote" , " origin" if origin else "" , self . workersLeft , hostname , ) ) add_args , add_kwargs = self . _setWorker_args ( origin ) self . workers [ - 1 ] . setWorker ( * add_args , ** add_kwargs ) self . workers [ - 1 ] . setWorkerAmount ( workerAmount )
Sets the worker information for the current host .
54,935
def close ( self ) : if self . debug : time . sleep ( 10 ) for host in self . workers : host . close ( ) for broker in self . brokers : try : broker . close ( ) except AttributeError : pass scoop . logger . info ( 'Finished cleaning spawned subprocesses.' )
Subprocess cleanup .
54,936
def processConfig ( self , worker_config ) : self . config [ 'headless' ] |= worker_config . get ( "headless" , False ) if self . config [ 'headless' ] : if not self . discovery_thread : self . discovery_thread = discovery . Advertise ( port = "," . join ( str ( a ) for a in self . getPorts ( ) ) , )
Update the pool configuration with a worker configuration .
54,937
def main ( self ) : if self . args is None : self . parse ( ) self . log = utils . initLogging ( self . verbose ) if self . args . workingDirectory : os . chdir ( self . args . workingDirectory ) if not self . args . brokerHostname : self . log . info ( "Discovering SCOOP Brokers on network..." ) pools = discovery . Seek ( ) if not pools : self . log . error ( "Could not find a SCOOP Broker broadcast." ) sys . exit ( - 1 ) self . log . info ( "Found a broker named {name} on {host} port " "{ports}" . format ( name = pools [ 0 ] . name , host = pools [ 0 ] . host , ports = pools [ 0 ] . ports , ) ) self . args . brokerHostname = pools [ 0 ] . host self . args . taskPort = pools [ 0 ] . ports [ 0 ] self . args . metaPort = pools [ 0 ] . ports [ 0 ] self . log . debug ( "Using following addresses:\n{brokerAddress}\n" "{metaAddress}" . format ( brokerAddress = self . args . brokerAddress , metaAddress = self . args . metaAddress , ) ) self . args . origin = True self . setScoop ( ) self . run ( )
Bootstrap an arbitrary script . If no agruments were passed use discovery module to search and connect to a broker .
54,938
def makeParser ( self ) : self . parser = argparse . ArgumentParser ( description = 'Starts the executable.' , prog = ( "{0} -m scoop.bootstrap" ) . format ( sys . executable ) ) self . parser . add_argument ( '--origin' , help = "To specify that the worker is the origin" , action = 'store_true' ) self . parser . add_argument ( '--brokerHostname' , help = "The routable hostname of a broker" , default = "" ) self . parser . add_argument ( '--externalBrokerHostname' , help = "Externally routable hostname of local " "worker" , default = "" ) self . parser . add_argument ( '--taskPort' , help = "The port of the broker task socket" , type = int ) self . parser . add_argument ( '--metaPort' , help = "The port of the broker meta socket" , type = int ) self . parser . add_argument ( '--size' , help = "The size of the worker pool" , type = int , default = 1 ) self . parser . add_argument ( '--nice' , help = "Adjust the niceness of the process" , type = int , default = 0 ) self . parser . add_argument ( '--debug' , help = "Activate the debug" , action = 'store_true' ) self . parser . add_argument ( '--profile' , help = "Activate the profiler" , action = 'store_true' ) self . parser . add_argument ( '--workingDirectory' , help = "Set the working directory for the " "execution" , default = os . path . expanduser ( "~" ) ) self . parser . add_argument ( '--backend' , help = "Choice of communication backend" , choices = [ 'ZMQ' , 'TCP' ] , default = 'ZMQ' ) self . parser . add_argument ( 'executable' , nargs = '?' , help = 'The executable to start with scoop' ) self . parser . add_argument ( 'args' , nargs = argparse . REMAINDER , help = 'The arguments to pass to the executable' , default = [ ] ) self . parser . add_argument ( '--verbose' , '-v' , action = 'count' , help = ( "Verbosity level of this launch script" "(-vv for more)" ) , default = 0 )
Generate the argparse parser object containing the bootloader accepted parameters
54,939
def parse ( self ) : if self . parser is None : self . makeParser ( ) self . args = self . parser . parse_args ( ) self . verbose = self . args . verbose
Generate a argparse parser and parse the command - line arguments
54,940
def setScoop ( self ) : scoop . IS_RUNNING = True scoop . IS_ORIGIN = self . args . origin scoop . BROKER = BrokerInfo ( self . args . brokerHostname , self . args . taskPort , self . args . metaPort , self . args . externalBrokerHostname if self . args . externalBrokerHostname else self . args . brokerHostname , ) scoop . SIZE = self . args . size scoop . DEBUG = self . args . debug scoop . MAIN_MODULE = self . args . executable scoop . CONFIGURATION = { 'headless' : not bool ( self . args . executable ) , 'backend' : self . args . backend , } scoop . WORKING_DIRECTORY = self . args . workingDirectory scoop . logger = self . log if self . args . nice : if not psutil : scoop . logger . error ( "psutil not installed." ) raise ImportError ( "psutil is needed for nice functionnality." ) p = psutil . Process ( os . getpid ( ) ) p . set_nice ( self . args . nice ) if scoop . DEBUG or self . args . profile : from scoop import _debug if scoop . DEBUG : _debug . createDirectory ( )
Setup the SCOOP constants .
54,941
def myFunc ( parameter ) : print ( 'Hello World from {0}!' . format ( scoop . worker ) ) print ( shared . getConst ( 'myVar' ) [ 2 ] ) return parameter + 1
This function will be executed on the remote host even if it was not available at launch .
54,942
def sendResult ( self , future ) : future = copy . copy ( future ) future . callable = future . args = future . kargs = future . greenlet = None if not future . sendResultBack : future . resultValue = None self . _sendReply ( future . id . worker , pickle . dumps ( future , pickle . HIGHEST_PROTOCOL , ) , )
Send a terminated future back to its parent .
54,943
def getSize ( string ) : try : with urllib . request . urlopen ( string , None , 1 ) as f : return sum ( len ( line ) for line in f ) except ( urllib . error . URLError , socket . timeout ) as e : return 0
This functions opens a web sites and then calculate the total size of the page in bytes . This is for the sake of the example . Do not use this technique in real code as it is not a very bright way to do this .
54,944
def getValue ( words ) : value = 0 for word in words : for letter in word : value += shared . getConst ( 'lettersValue' ) [ letter ] return value
Computes the sum of the values of the words .
54,945
def _run_module_code ( code , init_globals = None , mod_name = None , mod_fname = None , mod_loader = None , pkg_name = None ) : with _ModifiedArgv0 ( mod_fname ) : with _TempModule ( mod_name ) as temp_module : mod_globals = temp_module . module . __dict__ _run_code ( code , mod_globals , init_globals , mod_name , mod_fname , mod_loader , pkg_name ) return mod_globals . copy ( )
Helper to run code in new namespace with sys modified
54,946
def run_module ( mod_name , init_globals = None , run_name = None , alter_sys = False ) : mod_name , loader , code , fname = _get_module_details ( mod_name ) if run_name is None : run_name = mod_name pkg_name = mod_name . rpartition ( '.' ) [ 0 ] if alter_sys : return _run_module_code ( code , init_globals , run_name , fname , loader , pkg_name ) else : return _run_code ( code , { } , init_globals , run_name , fname , loader , pkg_name )
Execute a module s code without importing it
54,947
def _get_importer ( path_name ) : cache = sys . path_importer_cache try : importer = cache [ path_name ] except KeyError : cache [ path_name ] = None for hook in sys . path_hooks : try : importer = hook ( path_name ) break except ImportError : pass else : try : importer = imp . NullImporter ( path_name ) except ImportError : return None cache [ path_name ] = importer return importer
Python version of PyImport_GetImporter C API function
54,948
def run_path ( path_name , init_globals = None , run_name = None ) : if run_name is None : run_name = "<run_path>" importer = _get_importer ( path_name ) if isinstance ( importer , imp . NullImporter ) : code = _get_code_from_file ( path_name ) return _run_module_code ( code , init_globals , run_name , path_name ) else : sys . path . insert ( 0 , path_name ) try : main_name = "__main__" saved_main = sys . modules [ main_name ] del sys . modules [ main_name ] try : mod_name , loader , code , fname = _get_main_module_details ( ) finally : sys . modules [ main_name ] = saved_main pkg_name = "" with _ModifiedArgv0 ( path_name ) : with _TempModule ( run_name ) as temp_module : mod_globals = temp_module . module . __dict__ return _run_code ( code , mod_globals , init_globals , run_name , fname , loader , pkg_name ) . copy ( ) finally : try : sys . path . remove ( path_name ) except ValueError : pass
Execute code located at the specified filesystem location
54,949
def maxTreeDepthDivide ( rootValue , currentDepth = 0 , parallelLevel = 2 ) : thisRoot = shared . getConst ( 'myTree' ) . search ( rootValue ) if currentDepth >= parallelLevel : return thisRoot . maxDepth ( currentDepth ) else : if not any ( [ thisRoot . left , thisRoot . right ] ) : return currentDepth if not all ( [ thisRoot . left , thisRoot . right ] ) : return thisRoot . maxDepth ( currentDepth ) return max ( futures . map ( maxTreeDepthDivide , [ thisRoot . left . payload , thisRoot . right . payload , ] , cycle ( [ currentDepth + 1 ] ) , cycle ( [ parallelLevel ] ) , ) )
Finds a tree node that represents rootValue and computes the max depth of this tree branch . This function will emit new futures until currentDepth = parallelLevel
54,950
def insert ( self , value ) : if not self . payload or value == self . payload : self . payload = value else : if value <= self . payload : if self . left : self . left . insert ( value ) else : self . left = BinaryTreeNode ( value ) else : if self . right : self . right . insert ( value ) else : self . right = BinaryTreeNode ( value )
Insert a value in the tree
54,951
def maxDepth ( self , currentDepth = 0 ) : if not any ( ( self . left , self . right ) ) : return currentDepth result = 0 for child in ( self . left , self . right ) : if child : result = max ( result , child . maxDepth ( currentDepth + 1 ) ) return result
Compute the depth of the longest branch of the tree
54,952
def search ( self , value ) : if self . payload == value : return self else : if value <= self . payload : if self . left : return self . left . search ( value ) else : if self . right : return self . right . search ( value ) return None
Find an element in the tree
54,953
def createZMQSocket ( self , sock_type ) : sock = self . ZMQcontext . socket ( sock_type ) sock . setsockopt ( zmq . LINGER , LINGER_TIME ) sock . setsockopt ( zmq . IPV4ONLY , 0 ) sock . setsockopt ( zmq . SNDHWM , 0 ) sock . setsockopt ( zmq . RCVHWM , 0 ) try : sock . setsockopt ( zmq . IMMEDIATE , 1 ) except : pass if sock_type == zmq . ROUTER : sock . setsockopt ( zmq . ROUTER_MANDATORY , 1 ) return sock
Create a socket of the given sock_type and deactivate message dropping
54,954
def _reportFutures ( self ) : try : while True : time . sleep ( scoop . TIME_BETWEEN_STATUS_REPORTS ) fids = set ( x . id for x in scoop . _control . execQueue . movable ) fids . update ( set ( x . id for x in scoop . _control . execQueue . ready ) ) fids . update ( set ( x . id for x in scoop . _control . execQueue . inprogress ) ) self . socket . send_multipart ( [ STATUS_UPDATE , pickle . dumps ( fids ) , ] ) except AttributeError : pass
Sends futures status updates to broker at intervals of scoop . TIME_BETWEEN_STATUS_REPORTS seconds . Is intended to be run by a separate thread .
54,955
def _sendReply ( self , destination , fid , * args ) : self . addPeer ( destination ) try : self . direct_socket . send_multipart ( [ destination , REPLY , ] + list ( args ) , flags = zmq . NOBLOCK ) except zmq . error . ZMQError as e : scoop . logger . debug ( "{0}: Could not send result directly to peer {1}, routing through " "broker." . format ( scoop . worker , destination ) ) self . socket . send_multipart ( [ REPLY , ] + list ( args ) + [ destination , ] ) self . socket . send_multipart ( [ STATUS_DONE , fid , ] )
Send a REPLY directly to its destination . If it doesn t work launch it back to the broker .
54,956
def _startup ( rootFuture , * args , ** kargs ) : import greenlet global _controller _controller = greenlet . greenlet ( control . runController ) try : result = _controller . switch ( rootFuture , * args , ** kargs ) except scoop . _comm . Shutdown : result = None control . execQueue . shutdown ( ) return result
Initializes the SCOOP environment .
54,957
def _recursiveReduce ( mapFunc , reductionFunc , scan , * iterables ) : if iterables : half = min ( len ( x ) // 2 for x in iterables ) data_left = [ list ( x ) [ : half ] for x in iterables ] data_right = [ list ( x ) [ half : ] for x in iterables ] else : data_left = data_right = [ [ ] ] out_futures = [ None , None ] out_results = [ None , None ] for index , data in enumerate ( [ data_left , data_right ] ) : if any ( len ( x ) <= 1 for x in data ) : out_results [ index ] = mapFunc ( * list ( zip ( * data ) ) [ 0 ] ) else : out_futures [ index ] = submit ( _recursiveReduce , mapFunc , reductionFunc , scan , * data ) for index , future in enumerate ( out_futures ) : if future : out_results [ index ] = future . result ( ) if scan : last_results = copy . copy ( out_results ) if type ( out_results [ 0 ] ) is not list : out_results [ 0 ] = [ out_results [ 0 ] ] else : last_results [ 0 ] = out_results [ 0 ] [ - 1 ] if type ( out_results [ 1 ] ) is list : out_results [ 0 ] . extend ( out_results [ 1 ] [ : - 1 ] ) last_results [ 1 ] = out_results [ 1 ] [ - 1 ] out_results [ 0 ] . append ( reductionFunc ( * last_results ) ) return out_results [ 0 ] return reductionFunc ( * out_results )
Generates the recursive reduction tree . Used by mapReduce .
54,958
def _createFuture ( func , * args , ** kwargs ) : assert callable ( func ) , ( "The provided func parameter is not a callable." ) if scoop . IS_ORIGIN and "SCOOP_WORKER" not in sys . modules : sys . modules [ "SCOOP_WORKER" ] = sys . modules [ "__main__" ] lambdaType = type ( lambda : None ) funcIsLambda = isinstance ( func , lambdaType ) and func . __name__ == '<lambda>' funcIsMethod = ismethod ( func ) if funcIsLambda or funcIsMethod : from . shared import SharedElementEncapsulation func = SharedElementEncapsulation ( func ) return Future ( control . current . id , func , * args , ** kwargs )
Helper function to create a future .
54,959
def _waitAny ( * children ) : n = len ( children ) for index , future in enumerate ( children ) : if future . exceptionValue : raise future . exceptionValue if future . _ended ( ) : future . _delete ( ) yield future n -= 1 else : future . index = index future = control . current while n > 0 : future . stopWatch . halt ( ) childFuture = _controller . switch ( future ) future . stopWatch . resume ( ) if childFuture . exceptionValue : raise childFuture . exceptionValue if childFuture in children : childFuture . _delete ( ) yield childFuture n -= 1
Waits on any child Future created by the calling Future .
54,960
def wait ( fs , timeout = - 1 , return_when = ALL_COMPLETED ) : DoneAndNotDoneFutures = namedtuple ( 'DoneAndNotDoneFutures' , 'done not_done' ) if timeout < 0 : if return_when == FIRST_COMPLETED : next ( _waitAny ( * fs ) ) elif return_when in [ ALL_COMPLETED , FIRST_EXCEPTION ] : for _ in _waitAll ( * fs ) : pass done = set ( f for f in fs if f . done ( ) ) not_done = set ( fs ) - done return DoneAndNotDoneFutures ( done , not_done ) elif timeout == 0 : control . execQueue . flush ( ) control . execQueue . updateQueue ( ) done = set ( f for f in fs if f . _ended ( ) ) not_done = set ( fs ) - done return DoneAndNotDoneFutures ( done , not_done ) else : done = set ( ) start_time = time . time ( ) while time . time ( ) - start_time < timeout : control . execQueue . flush ( ) control . execQueue . socket . _poll ( time . time ( ) - start_time ) control . execQueue . updateQueue ( ) for f in fs : if f . _ended ( ) : done . add ( f ) not_done = set ( fs ) - done if return_when == FIRST_COMPLETED and len ( done ) > 0 : break if len ( not_done ) == 0 : break return DoneAndNotDoneFutures ( done , not_done )
Wait for the futures in the given sequence to complete . Using this function may prevent a worker from executing .
54,961
def advertiseBrokerWorkerDown ( exctype , value , traceback ) : if not scoop . SHUTDOWN_REQUESTED : execQueue . shutdown ( ) sys . __excepthook__ ( exctype , value , traceback )
Hook advertizing the broker if an impromptu shutdown is occuring .
54,962
def delFutureById ( futureId , parentId ) : try : del futureDict [ futureId ] except KeyError : pass try : toDel = [ a for a in futureDict [ parentId ] . children if a . id == futureId ] for f in toDel : del futureDict [ parentId ] . children [ f ] except KeyError : pass
Delete future on id basis
54,963
def delFuture ( afuture ) : try : del futureDict [ afuture . id ] except KeyError : pass try : del futureDict [ afuture . parentId ] . children [ afuture ] except KeyError : pass
Delete future afuture
54,964
def runFuture ( future ) : global debug_stats global QueueLength if scoop . DEBUG : init_debug ( ) debug_stats [ future . id ] [ 'start_time' ] . append ( time . time ( ) ) future . waitTime = future . stopWatch . get ( ) future . stopWatch . reset ( ) try : uniqueReference = [ cb . groupID for cb in future . callback ] [ 0 ] except IndexError : uniqueReference = None future . executor = ( scoop . worker , uniqueReference ) try : future . resultValue = future . callable ( * future . args , ** future . kargs ) except BaseException as err : future . exceptionValue = err future . exceptionTraceback = str ( traceback . format_exc ( ) ) scoop . logger . debug ( "The following error occured on a worker:\n%r\n%s" , err , traceback . format_exc ( ) , ) future . executionTime = future . stopWatch . get ( ) future . isDone = True if future . executionTime != 0. and hasattr ( future . callable , '__name__' ) : execStats [ hash ( future . callable ) ] . appendleft ( future . executionTime ) if scoop . DEBUG : t = time . time ( ) debug_stats [ future . id ] [ 'end_time' ] . append ( t ) debug_stats [ future . id ] . update ( { 'executionTime' : future . executionTime , 'worker' : scoop . worker , 'creationTime' : future . creationTime , 'callable' : str ( future . callable . __name__ ) if hasattr ( future . callable , '__name__' ) else 'No name' , 'parent' : future . parentId } ) QueueLength . append ( ( t , len ( execQueue ) , execQueue . timelen ( execQueue ) ) ) future . _execute_callbacks ( CallbackType . universal ) future . _delete ( ) return future
Callable greenlet in charge of running tasks .
54,965
def runController ( callable_ , * args , ** kargs ) : global execQueue rootId = ( - 1 , 0 ) if execQueue is None : execQueue = FutureQueue ( ) sys . excepthook = advertiseBrokerWorkerDown if scoop . DEBUG : from scoop import _debug _debug . redirectSTDOUTtoDebugFile ( ) headless = scoop . CONFIGURATION . get ( "headless" , False ) if not scoop . MAIN_MODULE : main = scoop . shared . getConst ( '__MAIN_MODULE__' , timeout = float ( 'inf' ) ) directory_name = tempfile . mkdtemp ( ) os . chdir ( directory_name ) scoop . MAIN_MODULE = main . writeFile ( directory_name ) from . bootstrap . __main__ import Bootstrap as SCOOPBootstrap newModule = SCOOPBootstrap . setupEnvironment ( ) sys . modules [ '__main__' ] = newModule elif scoop . IS_ORIGIN and headless and scoop . MAIN_MODULE : scoop . shared . setConst ( __MAIN_MODULE__ = scoop . encapsulation . ExternalEncapsulation ( scoop . MAIN_MODULE , ) ) if scoop . IS_ORIGIN : future = Future ( rootId , callable_ , * args , ** kargs ) else : future = execQueue . pop ( ) future . greenlet = greenlet . greenlet ( runFuture ) future = future . _switch ( future ) if scoop . DEBUG : lastDebugTs = time . time ( ) while not scoop . IS_ORIGIN or future . parentId != rootId or not future . _ended ( ) : if scoop . DEBUG and time . time ( ) - lastDebugTs > scoop . TIME_BETWEEN_PARTIALDEBUG : _debug . writeWorkerDebug ( debug_stats , QueueLength , "debug/partial-{0}" . format ( round ( time . time ( ) , - 1 ) ) ) lastDebugTs = time . time ( ) if future . _ended ( ) : if future . id [ 0 ] != scoop . worker : execQueue . sendResult ( future ) future = execQueue . pop ( ) else : if future . index is not None : try : parent = futureDict [ future . parentId ] except KeyError : future = execQueue . pop ( ) else : if parent . exceptionValue is None : future = parent . _switch ( future ) else : future = execQueue . pop ( ) else : future = execQueue . pop ( ) else : future = execQueue . pop ( ) if not future . _ended ( ) and future . greenlet is None : future . greenlet = greenlet . greenlet ( runFuture ) future = future . _switch ( future ) execQueue . shutdown ( ) if future . exceptionValue : print ( future . exceptionTraceback ) sys . exit ( 1 ) return future . resultValue
Callable greenlet implementing controller logic .
54,966
def mode ( self ) : mu = self . mean ( ) sigma = self . std ( ) ret_val = math . exp ( mu - sigma ** 2 ) if math . isnan ( ret_val ) : ret_val = float ( "inf" ) return ret_val
Computes the mode of a log - normal distribution built with the stats data .
54,967
def median ( self ) : mu = self . mean ( ) ret_val = math . exp ( mu ) if math . isnan ( ret_val ) : ret_val = float ( "inf" ) return ret_val
Computes the median of a log - normal distribution built with the stats data .
54,968
def _decode_string ( buf , pos ) : for i in range ( pos , len ( buf ) ) : if buf [ i : i + 1 ] == _compat_bytes ( '\x00' ) : try : return ( buf [ pos : i ] . decode ( _CHARSET ) , i + 1 ) except UnicodeDecodeError : raise MinusconfError ( 'Not a valid ' + _CHARSET + ' string: ' + repr ( buf [ pos : i ] ) ) raise MinusconfError ( "Premature end of string (Forgot trailing \\0?), buf=" + repr ( buf ) )
Decodes a string in the buffer buf starting at position pos . Returns a tupel of the read string and the next byte to read .
54,969
def _find_sock ( ) : if socket . has_ipv6 : try : return socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM ) except socket . gaierror : pass return socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
Create a UDP socket
54,970
def _compat_inet_pton ( family , addr ) : if family == socket . AF_INET : res = _compat_bytes ( '' ) parts = addr . split ( '.' ) if len ( parts ) != 4 : raise ValueError ( 'Expected 4 dot-separated numbers' ) for part in parts : intval = int ( part , 10 ) if intval < 0 or intval > 0xff : raise ValueError ( "Invalid integer value in IPv4 address: " + str ( intval ) ) res = res + struct . pack ( '!B' , intval ) return res elif family == socket . AF_INET6 : wordcount = 8 res = _compat_bytes ( '' ) dotpos = addr . find ( '.' ) if dotpos >= 0 : v4start = addr . rfind ( ':' , 0 , dotpos ) if v4start == - 1 : raise ValueException ( "Missing colons in an IPv6 address" ) wordcount = 6 res = socket . inet_aton ( addr [ v4start + 1 : ] ) addr = addr [ : v4start ] + '!' compact_pos = addr . find ( '::' ) if compact_pos >= 0 : if compact_pos == 0 : addr = '0' + addr compact_pos += 1 if compact_pos == len ( addr ) - len ( '::' ) : addr = addr + '0' addr = ( addr [ : compact_pos ] + ':' + ( '0:' * ( wordcount - ( addr . count ( ':' ) - '::' . count ( ':' ) ) - 2 ) ) + addr [ compact_pos + len ( '::' ) : ] ) if addr . endswith ( '!' ) : addr = addr [ : - len ( '!' ) ] words = addr . split ( ':' ) if len ( words ) != wordcount : raise ValueError ( 'Invalid number of IPv6 hextets, expected ' + str ( wordcount ) + ', got ' + str ( len ( words ) ) ) for w in reversed ( words ) : if 'x' in w or '-' in w : raise ValueError ( "Invalid character in IPv6 address" ) intval = int ( w , 16 ) if intval > 0xffff : raise ValueError ( "IPv6 address componenent too big" ) res = struct . pack ( '!H' , intval ) + res return res else : raise ValueError ( "Unknown protocol family " + family )
socket . inet_pton for platforms that don t have it
54,971
def start_blocking ( self ) : self . _cav_started . clear ( ) self . start ( ) self . _cav_started . wait ( )
Start the advertiser in the background but wait until it is ready
54,972
def _send_queries ( self ) : res = 0 addrs = _resolve_addrs ( self . addresses , self . port , self . ignore_senderrors , [ self . _sock . family ] ) for addr in addrs : try : self . _send_query ( addr [ 1 ] ) res += 1 except : if not self . ignore_senderrors : raise return res
Sends queries to multiple addresses . Returns the number of successful queries .
54,973
def clean ( self ) : raise forms . ValidationError ( self . error_messages [ 'invalid_login' ] , code = 'invalid_login' , params = { 'username' : self . username_field . verbose_name } )
Always raise the default error message because we don t care what they entered here .
54,974
def types ( ** requirements ) : def predicate ( args ) : for name , kind in sorted ( requirements . items ( ) ) : assert hasattr ( args , name ) , "missing required argument `%s`" % name if not isinstance ( kind , tuple ) : kind = ( kind , ) if not any ( isinstance ( getattr ( args , name ) , k ) for k in kind ) : return False return True return condition ( "the types of arguments must be valid" , predicate , True )
Specify a precondition based on the types of the function s arguments .
54,975
def ensure ( arg1 , arg2 = None ) : assert ( isinstance ( arg1 , str ) and isfunction ( arg2 ) ) or ( isfunction ( arg1 ) and arg2 is None ) description = "" predicate = lambda x : x if isinstance ( arg1 , str ) : description = arg1 predicate = arg2 else : description = get_function_source ( arg1 ) predicate = arg1 return condition ( description , predicate , False , True )
Specify a precondition described by description and tested by predicate .
54,976
def invariant ( arg1 , arg2 = None ) : desc = "" predicate = lambda x : x if isinstance ( arg1 , str ) : desc = arg1 predicate = arg2 else : desc = get_function_source ( arg1 ) predicate = arg1 def invariant ( c ) : def check ( name , func ) : exceptions = ( "__getitem__" , "__setitem__" , "__lt__" , "__le__" , "__eq__" , "__ne__" , "__gt__" , "__ge__" , "__init__" ) if name . startswith ( "__" ) and name . endswith ( "__" ) and name not in exceptions : return False if not ismethod ( func ) and not isfunction ( func ) : return False if getattr ( func , "__self__" , None ) is c : return False return True class InvariantContractor ( c ) : pass for name , value in [ ( name , getattr ( c , name ) ) for name in dir ( c ) ] : if check ( name , value ) : setattr ( InvariantContractor , name , condition ( desc , predicate , name != "__init__" , True , True ) ( value ) ) return InvariantContractor return invariant
Specify a class invariant described by description and tested by predicate .
54,977
def mkpassword ( length = 16 , chars = None , punctuation = None ) : if chars is None : chars = string . ascii_letters + string . digits data = [ random . choice ( chars ) for _ in range ( length ) ] if punctuation : data = data [ : - punctuation ] for _ in range ( punctuation ) : data . append ( random . choice ( PUNCTUATION ) ) random . shuffle ( data ) return '' . join ( data )
Generates a random ascii string - useful to generate authinfos
54,978
def disk_check_size ( ctx , param , value ) : if value : if isinstance ( value , tuple ) : val = value [ 1 ] else : val = value if val % 1024 : raise click . ClickException ( 'Size must be a multiple of 1024.' ) return value
Validation callback for disk size parameter .
54,979
def create ( cls , fqdn , flags , algorithm , public_key ) : fqdn = fqdn . lower ( ) params = { 'flags' : flags , 'algorithm' : algorithm , 'public_key' : public_key , } result = cls . call ( 'domain.dnssec.create' , fqdn , params ) return result
Create a dnssec key .
54,980
def from_name ( cls , name ) : snps = cls . list ( { 'name' : name } ) if len ( snps ) == 1 : return snps [ 0 ] [ 'id' ] elif not snps : return raise DuplicateResults ( 'snapshot profile name %s is ambiguous.' % name )
Retrieve a snapshot profile accsociated to a name .
54,981
def list ( cls , options = None , target = None ) : options = options or { } result = [ ] if not target or target == 'paas' : for profile in cls . safe_call ( 'paas.snapshotprofile.list' , options ) : profile [ 'target' ] = 'paas' result . append ( ( profile [ 'id' ] , profile ) ) if not target or target == 'vm' : for profile in cls . safe_call ( 'hosting.snapshotprofile.list' , options ) : profile [ 'target' ] = 'vm' result . append ( ( profile [ 'id' ] , profile ) ) result = sorted ( result , key = lambda item : item [ 0 ] ) return [ profile for id_ , profile in result ]
List all snapshot profiles .
54,982
def records ( cls , fqdn , sort_by = None , text = False ) : meta = cls . get_fqdn_info ( fqdn ) url = meta [ 'domain_records_href' ] kwargs = { } if text : kwargs = { 'headers' : { 'Accept' : 'text/plain' } } return cls . json_get ( cls . get_sort_url ( url , sort_by ) , ** kwargs )
Display records information about a domain .
54,983
def add_record ( cls , fqdn , name , type , value , ttl ) : data = { "rrset_name" : name , "rrset_type" : type , "rrset_values" : value , } if ttl : data [ 'rrset_ttl' ] = int ( ttl ) meta = cls . get_fqdn_info ( fqdn ) url = meta [ 'domain_records_href' ] return cls . json_post ( url , data = json . dumps ( data ) )
Create record for a domain .
54,984
def update_record ( cls , fqdn , name , type , value , ttl , content ) : data = { "rrset_name" : name , "rrset_type" : type , "rrset_values" : value , } if ttl : data [ 'rrset_ttl' ] = int ( ttl ) meta = cls . get_fqdn_info ( fqdn ) if content : url = meta [ 'domain_records_href' ] kwargs = { 'headers' : { 'Content-Type' : 'text/plain' } , 'data' : content } return cls . json_put ( url , ** kwargs ) url = '%s/domains/%s/records/%s/%s' % ( cls . api_url , fqdn , name , type ) return cls . json_put ( url , data = json . dumps ( data ) )
Update all records for a domain .
54,985
def del_record ( cls , fqdn , name , type ) : meta = cls . get_fqdn_info ( fqdn ) url = meta [ 'domain_records_href' ] delete_url = url if name : delete_url = '%s/%s' % ( delete_url , name ) if type : delete_url = '%s/%s' % ( delete_url , type ) return cls . json_delete ( delete_url )
Delete record for a domain .
54,986
def keys ( cls , fqdn , sort_by = None ) : meta = cls . get_fqdn_info ( fqdn ) url = meta [ 'domain_keys_href' ] return cls . json_get ( cls . get_sort_url ( url , sort_by ) )
Display keys information about a domain .
54,987
def keys_info ( cls , fqdn , key ) : return cls . json_get ( '%s/domains/%s/keys/%s' % ( cls . api_url , fqdn , key ) )
Retrieve key information .
54,988
def keys_create ( cls , fqdn , flag ) : data = { "flags" : flag , } meta = cls . get_fqdn_info ( fqdn ) url = meta [ 'domain_keys_href' ] ret , headers = cls . json_post ( url , data = json . dumps ( data ) , return_header = True ) return cls . json_get ( headers [ 'location' ] )
Create new key entry for a domain .
54,989
def list ( gandi , datacenter , id , subnet , gateway ) : output_keys = [ 'name' , 'state' , 'dc' ] if id : output_keys . append ( 'id' ) if subnet : output_keys . append ( 'subnet' ) if gateway : output_keys . append ( 'gateway' ) datacenters = gandi . datacenter . list ( ) vlans = gandi . vlan . list ( datacenter ) for num , vlan in enumerate ( vlans ) : if num : gandi . separator_line ( ) output_vlan ( gandi , vlan , datacenters , output_keys ) return vlans
List vlans .
54,990
def info ( gandi , resource , ip ) : output_keys = [ 'name' , 'state' , 'dc' , 'subnet' , 'gateway' ] datacenters = gandi . datacenter . list ( ) vlan = gandi . vlan . info ( resource ) gateway = vlan [ 'gateway' ] if not ip : output_vlan ( gandi , vlan , datacenters , output_keys , justify = 11 ) return vlan gateway_exists = False vms = dict ( [ ( vm_ [ 'id' ] , vm_ ) for vm_ in gandi . iaas . list ( ) ] ) ifaces = gandi . vlan . ifaces ( resource ) for iface in ifaces : for ip in iface [ 'ips' ] : if gateway == ip [ 'ip' ] : gateway_exists = True if gateway_exists : vlan . pop ( 'gateway' ) else : vlan [ 'gateway' ] = ( "%s don't exists" % gateway if gateway else 'none' ) output_vlan ( gandi , vlan , datacenters , output_keys , justify = 11 ) output_keys = [ 'vm' , 'bandwidth' ] for iface in ifaces : gandi . separator_line ( ) output_iface ( gandi , iface , datacenters , vms , output_keys , justify = 11 ) for ip in iface [ 'ips' ] : output_ip ( gandi , ip , None , None , None , [ 'ip' ] ) if gateway == ip [ 'ip' ] : output_line ( gandi , 'gateway' , 'true' , justify = 11 ) return vlan
Display information about a vlan .
54,991
def create ( gandi , name , datacenter , subnet , gateway , background ) : try : gandi . datacenter . is_opened ( datacenter , 'iaas' ) except DatacenterLimited as exc : gandi . echo ( '/!\ Datacenter %s will be closed on %s, ' 'please consider using another datacenter.' % ( datacenter , exc . date ) ) result = gandi . vlan . create ( name , datacenter , subnet , gateway , background ) if background : gandi . pretty_echo ( result ) return result
Create a new vlan
54,992
def update ( gandi , resource , name , gateway , create , bandwidth ) : params = { } if name : params [ 'name' ] = name vlan_id = gandi . vlan . usable_id ( resource ) try : if gateway : IP ( gateway ) params [ 'gateway' ] = gateway except ValueError : vm = gandi . iaas . info ( gateway ) ips = [ ip for sublist in [ [ ip [ 'ip' ] for ip in iface [ 'ips' ] if ip [ 'version' ] == 4 ] for iface in vm [ 'ifaces' ] if iface [ 'vlan' ] and iface [ 'vlan' ] . get ( 'id' ) == vlan_id ] for ip in sublist ] if len ( ips ) > 1 : gandi . echo ( "This vm has two ips in the vlan, don't know which one" ' to choose (%s)' % ( ', ' . join ( ips ) ) ) return if not ips and not create : gandi . echo ( "Can't find '%s' in '%s' vlan" % ( gateway , resource ) ) return if not ips and create : gandi . echo ( 'Will create a new ip in this vlan for vm %s' % gateway ) oper = gandi . ip . create ( '4' , vm [ 'datacenter_id' ] , bandwidth , vm [ 'hostname' ] , resource ) iface_id = oper [ 'iface_id' ] iface = gandi . iface . info ( iface_id ) ips = [ ip [ 'ip' ] for ip in iface [ 'ips' ] if ip [ 'version' ] == 4 ] params [ 'gateway' ] = ips [ 0 ] result = gandi . vlan . update ( resource , params ) return result
Update a vlan
54,993
def list_migration_choice ( cls , datacenter ) : datacenter_id = cls . usable_id ( datacenter ) dc_list = cls . list ( ) available_dcs = [ dc for dc in dc_list if dc [ 'id' ] == datacenter_id ] [ 0 ] [ 'can_migrate_to' ] choices = [ dc for dc in dc_list if dc [ 'id' ] in available_dcs ] return choices
List available datacenters for migration from given datacenter .
54,994
def is_opened ( cls , dc_code , type_ ) : options = { 'dc_code' : dc_code , '%s_opened' % type_ : True } datacenters = cls . safe_call ( 'hosting.datacenter.list' , options ) if not datacenters : options = { 'iso' : dc_code , '%s_opened' % type_ : True } datacenters = cls . safe_call ( 'hosting.datacenter.list' , options ) if not datacenters : raise DatacenterClosed ( r'/!\ Datacenter %s is closed, please ' 'choose another datacenter.' % dc_code ) datacenter = datacenters [ 0 ] if datacenter . get ( '%s_closed_for' % type_ ) == 'NEW' : dc_close_date = datacenter . get ( 'deactivate_at' , '' ) if dc_close_date : dc_close_date = dc_close_date . strftime ( '%d/%m/%Y' ) raise DatacenterLimited ( dc_close_date )
List opened datacenters for given type .
54,995
def filtered_list ( cls , name = None , obj = None ) : options = { } if name : options [ 'id' ] = cls . usable_id ( name ) def obj_ok ( dc , obj ) : if not obj or obj [ 'datacenter_id' ] == dc [ 'id' ] : return True return False return [ x for x in cls . list ( options ) if obj_ok ( x , obj ) ]
List datacenters matching name and compatible with obj
54,996
def from_iso ( cls , iso ) : result = cls . list ( { 'sort_by' : 'id ASC' } ) dc_isos = { } for dc in result : if dc [ 'iso' ] not in dc_isos : dc_isos [ dc [ 'iso' ] ] = dc [ 'id' ] return dc_isos . get ( iso )
Retrieve the first datacenter id associated to an ISO .
54,997
def from_name ( cls , name ) : result = cls . list ( ) dc_names = { } for dc in result : dc_names [ dc [ 'name' ] ] = dc [ 'id' ] return dc_names . get ( name )
Retrieve datacenter id associated to a name .
54,998
def from_country ( cls , country ) : result = cls . list ( { 'sort_by' : 'id ASC' } ) dc_countries = { } for dc in result : if dc [ 'country' ] not in dc_countries : dc_countries [ dc [ 'country' ] ] = dc [ 'id' ] return dc_countries . get ( country )
Retrieve the first datacenter id associated to a country .
54,999
def from_dc_code ( cls , dc_code ) : result = cls . list ( ) dc_codes = { } for dc in result : if dc . get ( 'dc_code' ) : dc_codes [ dc [ 'dc_code' ] ] = dc [ 'id' ] return dc_codes . get ( dc_code )
Retrieve the datacenter id associated to a dc_code