idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
3,300
def set_until ( self , frame , lineno = None ) : if lineno is None : lineno = frame . f_lineno + 1 self . _set_stopinfo ( frame , lineno )
Stop when the current line number in frame is greater than lineno or when returning from frame .
3,301
def set_trace ( self , frame = None ) : self . settrace ( False ) if not frame : frame = sys . _getframe ( ) . f_back frame . f_trace = self . trace_dispatch self . reset ( ignore_first_call_event = False , botframe = self . botframe ) self . topframe = frame while frame : if frame is self . botframe : break botframe = frame frame = frame . f_back else : self . botframe = botframe if not self . botframe . f_trace : self . botframe . f_trace = self . trace_dispatch self . settrace ( True )
Start debugging from frame .
3,302
def listdir ( directory ) : file_names = list ( ) for filename in os . listdir ( directory ) : file_path = os . path . join ( directory , filename ) if os . path . isdir ( file_path ) : filename = f'{filename}{os.path.sep}' file_names . append ( filename ) return file_names
Returns list of nested files and directories for local directory by path
3,303
def get_options ( option_type , from_options ) : _options = dict ( ) for key in option_type . keys : key_with_prefix = f'{option_type.prefix}{key}' if key not in from_options and key_with_prefix not in from_options : _options [ key ] = '' elif key in from_options : _options [ key ] = from_options . get ( key ) else : _options [ key ] = from_options . get ( key_with_prefix ) return _options
Extract options for specified option type from all options
3,304
def get_headers ( self , action , headers_ext = None ) : if action in Client . http_header : try : headers = Client . http_header [ action ] . copy ( ) except AttributeError : headers = Client . http_header [ action ] [ : ] else : headers = list ( ) if headers_ext : headers . extend ( headers_ext ) if self . webdav . token : webdav_token = f'Authorization: OAuth {self.webdav.token}' headers . append ( webdav_token ) return dict ( [ map ( lambda s : s . strip ( ) , i . split ( ':' ) ) for i in headers ] )
Returns HTTP headers of specified WebDAV actions .
3,305
def execute_request ( self , action , path , data = None , headers_ext = None ) : response = self . session . request ( method = Client . requests [ action ] , url = self . get_url ( path ) , auth = self . webdav . auth , headers = self . get_headers ( action , headers_ext ) , timeout = self . timeout , data = data ) if response . status_code == 507 : raise NotEnoughSpace ( ) if 499 < response . status_code < 600 : raise ServerException ( url = self . get_url ( path ) , code = response . status_code , message = response . content ) if response . status_code >= 400 : raise ResponseErrorCode ( url = self . get_url ( path ) , code = response . status_code , message = response . content ) return response
Generate request to WebDAV server for specified action and path and execute it .
3,306
def valid ( self ) : return True if self . webdav . valid ( ) and self . proxy . valid ( ) else False
Validates of WebDAV and proxy settings .
3,307
def download_from ( self , buff , remote_path ) : urn = Urn ( remote_path ) if self . is_dir ( urn . path ( ) ) : raise OptionNotValid ( name = 'remote_path' , value = remote_path ) if not self . check ( urn . path ( ) ) : raise RemoteResourceNotFound ( urn . path ( ) ) response = self . execute_request ( action = 'download' , path = urn . quote ( ) ) buff . write ( response . content )
Downloads file from WebDAV and writes it in buffer .
3,308
def download_directory ( self , remote_path , local_path , progress = None ) : urn = Urn ( remote_path , directory = True ) if not self . is_dir ( urn . path ( ) ) : raise OptionNotValid ( name = 'remote_path' , value = remote_path ) if os . path . exists ( local_path ) : shutil . rmtree ( local_path ) os . makedirs ( local_path ) for resource_name in self . list ( urn . path ( ) ) : _remote_path = f'{urn.path()}{resource_name}' _local_path = os . path . join ( local_path , resource_name ) self . download ( local_path = _local_path , remote_path = _remote_path , progress = progress )
Downloads directory and downloads all nested files and directories from remote WebDAV to local . If there is something on local path it deletes directories and files then creates new .
3,309
def download_sync ( self , remote_path , local_path , callback = None ) : self . download ( local_path = local_path , remote_path = remote_path ) if callback : callback ( )
Downloads remote resources from WebDAV server synchronously .
3,310
def download_async ( self , remote_path , local_path , callback = None ) : target = ( lambda : self . download_sync ( local_path = local_path , remote_path = remote_path , callback = callback ) ) threading . Thread ( target = target ) . start ( )
Downloads remote resources from WebDAV server asynchronously
3,311
def upload_directory ( self , remote_path , local_path , progress = None ) : urn = Urn ( remote_path , directory = True ) if not urn . is_dir ( ) : raise OptionNotValid ( name = 'remote_path' , value = remote_path ) if not os . path . isdir ( local_path ) : raise OptionNotValid ( name = 'local_path' , value = local_path ) if not os . path . exists ( local_path ) : raise LocalResourceNotFound ( local_path ) if self . check ( urn . path ( ) ) : self . clean ( urn . path ( ) ) self . mkdir ( remote_path ) for resource_name in listdir ( local_path ) : _remote_path = f'{urn.path()}{resource_name}' _local_path = os . path . join ( local_path , resource_name ) self . upload ( local_path = _local_path , remote_path = _remote_path , progress = progress )
Uploads directory to remote path on WebDAV server . In case directory is exist on remote server it will delete it and then upload directory with nested files and directories .
3,312
def upload_sync ( self , remote_path , local_path , callback = None ) : self . upload ( local_path = local_path , remote_path = remote_path ) if callback : callback ( )
Uploads resource to remote path on WebDAV server synchronously . In case resource is directory it will upload all nested files and directories .
3,313
def upload_async ( self , remote_path , local_path , callback = None ) : target = ( lambda : self . upload_sync ( local_path = local_path , remote_path = remote_path , callback = callback ) ) threading . Thread ( target = target ) . start ( )
Uploads resource to remote path on WebDAV server asynchronously . In case resource is directory it will upload all nested files and directories .
3,314
def parse_get_list_response ( content ) : try : tree = etree . fromstring ( content ) hrees = [ Urn . separate + unquote ( urlsplit ( hree . text ) . path ) for hree in tree . findall ( './/{DAV:}href' ) ] return [ Urn ( hree ) for hree in hrees ] except etree . XMLSyntaxError : return list ( )
Parses of response content XML from WebDAV server and extract file and directory names .
3,315
def create_free_space_request_content ( ) : root = etree . Element ( 'propfind' , xmlns = 'DAV:' ) prop = etree . SubElement ( root , 'prop' ) etree . SubElement ( prop , 'quota-available-bytes' ) etree . SubElement ( prop , 'quota-used-bytes' ) tree = etree . ElementTree ( root ) return WebDavXmlUtils . etree_to_string ( tree )
Creates an XML for requesting of free space on remote WebDAV server .
3,316
def parse_free_space_response ( content , hostname ) : try : tree = etree . fromstring ( content ) node = tree . find ( './/{DAV:}quota-available-bytes' ) if node is not None : return int ( node . text ) else : raise MethodNotSupported ( name = 'free' , server = hostname ) except TypeError : raise MethodNotSupported ( name = 'free' , server = hostname ) except etree . XMLSyntaxError : return str ( )
Parses of response content XML from WebDAV server and extract an amount of free space .
3,317
def create_get_property_request_content ( option ) : root = etree . Element ( 'propfind' , xmlns = 'DAV:' ) prop = etree . SubElement ( root , 'prop' ) etree . SubElement ( prop , option . get ( 'name' , '' ) , xmlns = option . get ( 'namespace' , '' ) ) tree = etree . ElementTree ( root ) return WebDavXmlUtils . etree_to_string ( tree )
Creates an XML for requesting of getting a property value of remote WebDAV resource .
3,318
def parse_get_property_response ( content , name ) : tree = etree . fromstring ( content ) return tree . xpath ( '//*[local-name() = $name]' , name = name ) [ 0 ] . text
Parses of response content XML from WebDAV server for getting metadata property value for some resource .
3,319
def create_set_property_batch_request_content ( options ) : root_node = etree . Element ( 'propertyupdate' , xmlns = 'DAV:' ) set_node = etree . SubElement ( root_node , 'set' ) prop_node = etree . SubElement ( set_node , 'prop' ) for option in options : opt_node = etree . SubElement ( prop_node , option [ 'name' ] , xmlns = option . get ( 'namespace' , '' ) ) opt_node . text = option . get ( 'value' , '' ) tree = etree . ElementTree ( root_node ) return WebDavXmlUtils . etree_to_string ( tree )
Creates an XML for requesting of setting a property values for remote WebDAV resource in batch .
3,320
def etree_to_string ( tree ) : buff = BytesIO ( ) tree . write ( buff , xml_declaration = True , encoding = 'UTF-8' ) return buff . getvalue ( )
Creates string from lxml . etree . ElementTree with XML declaration and UTF - 8 encoding .
3,321
def extract_response_for_path ( content , path , hostname ) : try : tree = etree . fromstring ( content ) responses = tree . findall ( '{DAV:}response' ) n_path = Urn . normalize_path ( path ) for resp in responses : href = resp . findtext ( '{DAV:}href' ) if Urn . compare_path ( n_path , href ) is True : return resp raise RemoteResourceNotFound ( path ) except etree . XMLSyntaxError : raise MethodNotSupported ( name = 'is_dir' , server = hostname )
Extracts single response for specified remote resource .
3,322
def cleanup ( config_dir ) : stdout_path = os . path . join ( config_dir , 'pueue.stdout' ) stderr_path = os . path . join ( config_dir , 'pueue.stderr' ) if os . _exists ( stdout_path ) : os . remove ( stdout_path ) if os . _exists ( stderr_path ) : os . remove ( stderr_path ) socketPath = os . path . join ( config_dir , 'pueue.sock' ) if os . path . exists ( socketPath ) : os . remove ( socketPath )
Remove temporary stderr and stdout files as well as the daemon socket .
3,323
def get_descriptor_output ( descriptor , key , handler = None ) : line = 'stub' lines = '' while line != '' : try : line = descriptor . readline ( ) lines += line except UnicodeDecodeError : error_msg = "Error while decoding output of process {}" . format ( key ) if handler : handler . logger . error ( "{} with command {}" . format ( error_msg , handler . queue [ key ] [ 'command' ] ) ) lines += error_msg + '\n' return lines . replace ( '\n' , '\n ' )
Get the descriptor output and handle incorrect UTF - 8 encoding of subprocess logs .
3,324
def request ( self , hash_ , quickkey , doc_type , page = None , output = None , size_id = None , metadata = None , request_conversion_only = None ) : if len ( hash_ ) > 4 : hash_ = hash_ [ : 4 ] query = QueryParams ( { 'quickkey' : quickkey , 'doc_type' : doc_type , 'page' : page , 'output' : output , 'size_id' : size_id , 'metadata' : metadata , 'request_conversion_only' : request_conversion_only } ) url = API_ENDPOINT + '?' + hash_ + '&' + urlencode ( query ) response = self . http . get ( url , stream = True ) if response . status_code == 204 : raise ConversionServerError ( "Unable to fulfill request. " "The document will not be converted." , response . status_code ) response . raise_for_status ( ) if response . headers [ 'content-type' ] == 'application/json' : return response . json ( ) return response
Query conversion server
3,325
def bp_commands ( self , frame , breakpoint_hits ) : effective_bp_list , temporaries = breakpoint_hits silent = True doprompt = False atleast_one_cmd = False for bp in effective_bp_list : if bp in self . commands : if not atleast_one_cmd : atleast_one_cmd = True self . setup ( frame , None ) lastcmd_back = self . lastcmd for line in self . commands [ bp ] : self . onecmd ( line ) self . lastcmd = lastcmd_back if not self . commands_silent [ bp ] : silent = False if self . commands_doprompt [ bp ] : doprompt = True tmp_to_delete = ' ' . join ( str ( bp ) for bp in temporaries ) if tmp_to_delete : self . do_clear ( tmp_to_delete ) if atleast_one_cmd : return doprompt , silent return None
Call every command that was set for the current active breakpoints .
3,326
def precmd ( self , line ) : if not line . strip ( ) : return line args = line . split ( ) while args [ 0 ] in self . aliases : line = self . aliases [ args [ 0 ] ] ii = 1 for tmpArg in args [ 1 : ] : line = line . replace ( "%" + str ( ii ) , tmpArg ) ii += 1 line = line . replace ( "%*" , ' ' . join ( args [ 1 : ] ) ) args = line . split ( ) if args [ 0 ] != 'alias' : marker = line . find ( ';;' ) if marker >= 0 : next = line [ marker + 2 : ] . lstrip ( ) self . cmdqueue . append ( next ) line = line [ : marker ] . rstrip ( ) return line
Handle alias expansion and ;; separator .
3,327
def onecmd ( self , line ) : if not self . commands_defining : return cmd . Cmd . onecmd ( self , line ) else : return self . handle_command_def ( line )
Interpret the argument as though it had been typed in response to the prompt .
3,328
def handle_command_def ( self , line ) : cmd , arg , line = self . parseline ( line ) if not cmd : return if cmd == 'silent' : self . commands_silent [ self . commands_bnum ] = True return elif cmd == 'end' : self . cmdqueue = [ ] return 1 cmdlist = self . commands [ self . commands_bnum ] if arg : cmdlist . append ( cmd + ' ' + arg ) else : cmdlist . append ( cmd ) try : func = getattr ( self , 'do_' + cmd ) except AttributeError : func = self . default if func . __name__ in self . commands_resuming : self . commands_doprompt [ self . commands_bnum ] = False self . cmdqueue = [ ] return 1 return
Handles one command line during command list definition .
3,329
def defaultFile ( self ) : filename = self . curframe . f_code . co_filename if filename == '<string>' and self . mainpyfile : filename = self . mainpyfile return filename
Produce a reasonable default .
3,330
def do_retval ( self , arg ) : locals = self . get_locals ( self . curframe ) if '__return__' in locals : self . message ( bdb . safe_repr ( locals [ '__return__' ] ) ) else : self . error ( 'Not yet returned!' )
retval Print the return value for the last return of a function .
3,331
def do_p ( self , arg ) : try : self . message ( bdb . safe_repr ( self . _getval ( arg ) ) ) except Exception : pass
p expression Print the value of the expression .
3,332
def do_pp ( self , arg ) : obj = self . _getval ( arg ) try : repr ( obj ) except Exception : self . message ( bdb . safe_repr ( obj ) ) else : self . message ( pprint . pformat ( obj ) )
pp expression Pretty - print the value of the expression .
3,333
def do_longlist ( self , arg ) : filename = self . curframe . f_code . co_filename breaklist = self . get_file_breaks ( filename ) try : lines , lineno = getsourcelines ( self . curframe , self . get_locals ( self . curframe ) ) except IOError as err : self . error ( err ) return self . _print_lines ( lines , lineno , breaklist , self . curframe )
longlist | ll List the whole source code for the current function or frame .
3,334
def do_source ( self , arg ) : try : obj = self . _getval ( arg ) except Exception : return try : lines , lineno = getsourcelines ( obj , self . get_locals ( self . curframe ) ) except ( IOError , TypeError ) as err : self . error ( err ) return self . _print_lines ( lines , lineno )
source expression Try to get source code for the given object and display it .
3,335
def _print_lines ( self , lines , start , breaks = ( ) , frame = None ) : if frame : current_lineno = frame . f_lineno exc_lineno = self . tb_lineno . get ( frame , - 1 ) else : current_lineno = exc_lineno = - 1 for lineno , line in enumerate ( lines , start ) : s = str ( lineno ) . rjust ( 3 ) if len ( s ) < 4 : s += ' ' if lineno in breaks : s += 'B' else : s += ' ' if lineno == current_lineno : s += '->' elif lineno == exc_lineno : s += '>>' self . message ( s + '\t' + line . rstrip ( ) )
Print a range of lines .
3,336
def do_whatis ( self , arg ) : try : value = self . _getval ( arg ) except Exception : return code = None try : code = value . __code__ except Exception : pass if code : self . message ( 'Function %s' % code . co_name ) return try : code = value . __func__ . __code__ except Exception : pass if code : self . message ( 'Method %s' % code . co_name ) return if value . __class__ is type : self . message ( 'Class %s.%s' % ( value . __module__ , value . __name__ ) ) return self . message ( type ( value ) )
whatis arg Print the type of the argument .
3,337
def do_unalias ( self , arg ) : args = arg . split ( ) if len ( args ) == 0 : return if args [ 0 ] in self . aliases : del self . aliases [ args [ 0 ] ]
unalias name Delete the specified alias .
3,338
def read ( self , limit = - 1 ) : remaining = self . len - self . parent_fd . tell ( ) + self . offset if limit > remaining or limit == - 1 : limit = remaining return self . parent_fd . read ( limit )
Read content . See file . read
3,339
def seek ( self , offset , whence = os . SEEK_SET ) : pos = None if whence == os . SEEK_SET : pos = self . offset + offset elif whence == os . SEEK_CUR : pos = self . tell ( ) + offset elif whence == os . SEEK_END : pos = self . offset + self . len + offset else : raise ValueError ( "invalid whence {}" . format ( whence ) ) if pos > self . offset + self . len or pos < self . offset : raise ValueError ( "seek position beyond chunk area" ) self . parent_fd . seek ( pos , os . SEEK_SET )
Seek to position in stream see file . seek
3,340
def close ( self ) : try : self . parent_fd . fileno ( ) except io . UnsupportedOperation : logger . debug ( "Not closing parent_fd - reusing existing" ) else : self . parent_fd . close ( )
Close file see file . close
3,341
def _build_query ( self , uri , params = None , action_token_type = None ) : if params is None : params = QueryParams ( ) params [ 'response_format' ] = 'json' session_token = None if action_token_type in self . _action_tokens : using_action_token = True session_token = self . _action_tokens [ action_token_type ] else : using_action_token = False if self . _session : session_token = self . _session [ 'session_token' ] if session_token : params [ 'session_token' ] = session_token keys = list ( params . keys ( ) ) keys . sort ( ) query = urlencode ( [ tuple ( [ key , params [ key ] ] ) for key in keys ] ) if not using_action_token and self . _session : secret_key_mod = int ( self . _session [ 'secret_key' ] ) % 256 signature_base = ( str ( secret_key_mod ) + self . _session [ 'time' ] + uri + '?' + query ) . encode ( 'ascii' ) query += '&signature=' + hashlib . md5 ( signature_base ) . hexdigest ( ) return query
Prepare query string
3,342
def request ( self , action , params = None , action_token_type = None , upload_info = None , headers = None ) : uri = self . _build_uri ( action ) if isinstance ( params , six . text_type ) : query = params else : query = self . _build_query ( uri , params , action_token_type ) if headers is None : headers = { } if upload_info is None : data = query headers [ 'Content-Type' ] = FORM_MIMETYPE else : uri += '?' + query if "filename" in upload_info : data = MultipartEncoder ( fields = { 'file' : ( upload_info [ "filename" ] , upload_info [ "fd" ] , UPLOAD_MIMETYPE ) } ) headers [ "Content-Type" ] = data . content_type else : data = upload_info [ "fd" ] headers [ "Content-Type" ] = UPLOAD_MIMETYPE logger . debug ( "uri=%s query=%s" , uri , query if not upload_info else None ) try : url = ( API_BASE + uri ) . encode ( 'utf-8' ) if isinstance ( data , six . text_type ) : data = data . encode ( 'utf-8' ) response = self . http . post ( url , data = data , headers = headers , stream = True ) except RequestException as ex : logger . exception ( "HTTP request failed" ) raise MediaFireConnectionError ( "RequestException: {}" . format ( ex ) ) return self . _process_response ( response )
Perform request to MediaFire API
3,343
def _regenerate_secret_key ( self ) : if self . _session and 'secret_key' in self . _session : self . _session [ 'secret_key' ] = ( int ( self . _session [ 'secret_key' ] ) * 16807 ) % 2147483647
Regenerate secret key
3,344
def session ( self , value ) : if value is None : self . _session = None return if not isinstance ( value , dict ) : raise ValueError ( "session info is required" ) session_parsed = { } for key in [ "session_token" , "time" , "secret_key" ] : if key not in value : raise ValueError ( "Missing parameter: {}" . format ( key ) ) session_parsed [ key ] = value [ key ] for key in [ "ekey" , "pkey" ] : if key in value : session_parsed [ key ] = value [ key ] self . _session = session_parsed
Set session token
3,345
def set_action_token ( self , type_ = None , action_token = None ) : if action_token is None : del self . _action_tokens [ type_ ] else : self . _action_tokens [ type_ ] = action_token
Set action tokens
3,346
def _reset ( self ) : self . _socket = None self . _pending = deque ( ) self . _out_buffer = '' self . _buffer = '' self . _identify_response = { } self . last_ready_sent = 0 self . ready = 0
Reset all of our stateful variables
3,347
def connect ( self , force = False ) : if not force and self . alive ( ) : return True self . _reset ( ) with self . _socket_lock : try : logger . info ( 'Creating socket...' ) self . _socket = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) self . _socket . settimeout ( self . _timeout ) logger . info ( 'Connecting to %s, %s' , self . host , self . port ) self . _socket . connect ( ( self . host , self . port ) ) self . _socket . setblocking ( self . _blocking ) self . _pending . append ( constants . MAGIC_V2 ) while self . pending ( ) : self . flush ( ) self . identify ( self . _identify_options ) while self . pending ( ) : self . flush ( ) self . _reconnnection_counter . success ( ) limit = time . time ( ) + self . _timeout responses = self . _read ( 1 ) while ( not responses ) and ( time . time ( ) < limit ) : responses = self . _read ( 1 ) if not responses : raise ConnectionTimeoutException ( 'Read identify response timed out (%ss)' % self . _timeout ) self . identified ( responses [ 0 ] ) return True except : logger . exception ( 'Failed to connect' ) if self . _socket : self . _socket . close ( ) self . _reconnnection_counter . failed ( ) self . _reset ( ) return False
Establish a connection
3,348
def close ( self ) : try : while self . pending ( ) : self . flush ( ) except socket . error : pass with self . _socket_lock : try : if self . _socket : self . _socket . close ( ) finally : self . _reset ( )
Close our connection
3,349
def socket ( self , blocking = True ) : if self . _socket_lock . acquire ( blocking ) : try : yield self . _socket finally : self . _socket_lock . release ( )
Blockingly yield the socket
3,350
def identified ( self , res ) : try : res . data = json . loads ( res . data ) self . _identify_response = res . data logger . info ( 'Got identify response: %s' , res . data ) except : logger . warn ( 'Server does not support feature negotiation' ) self . _identify_response = { } self . max_rdy_count = self . _identify_response . get ( 'max_rdy_count' , self . max_rdy_count ) if self . _identify_options . get ( 'tls_v1' , False ) : if not self . _identify_response . get ( 'tls_v1' , False ) : raise UnsupportedException ( 'NSQd instance does not support TLS' ) else : self . _socket = TLSSocket . wrap_socket ( self . _socket ) if self . _identify_response . get ( 'auth_required' , False ) : if not self . _auth_secret : raise UnsupportedException ( 'Auth required but not provided' ) else : self . auth ( self . _auth_secret ) if not self . _identify_response . get ( 'tls_v1' , False ) : logger . warn ( 'Using AUTH without TLS' ) elif self . _auth_secret : logger . warn ( 'Authentication secret provided but not required' ) return res
Handle a response to our identify command . Returns response
3,351
def setblocking ( self , blocking ) : for sock in self . socket ( ) : sock . setblocking ( blocking ) self . _blocking = blocking
Set whether or not this message is blocking
3,352
def flush ( self ) : total = 0 for sock in self . socket ( blocking = False ) : pending = self . _pending data = self . _out_buffer or '' . join ( pending . popleft ( ) for _ in xrange ( len ( pending ) ) ) try : total = sock . send ( data ) except socket . error as exc : if exc . args [ 0 ] not in self . WOULD_BLOCK_ERRS : raise self . _out_buffer = data else : self . _out_buffer = None finally : if total < len ( data ) : self . _pending . appendleft ( data [ total : ] ) return total
Flush some of the waiting messages returns count written
3,353
def send ( self , command , message = None ) : if message : joined = command + constants . NL + util . pack ( message ) else : joined = command + constants . NL if self . _blocking : for sock in self . socket ( ) : sock . sendall ( joined ) else : self . _pending . append ( joined )
Send a command over the socket with length endcoded
3,354
def identify ( self , data ) : return self . send ( constants . IDENTIFY , json . dumps ( data ) )
Send an identification message
3,355
def pub ( self , topic , message ) : return self . send ( ' ' . join ( ( constants . PUB , topic ) ) , message )
Publish to a topic
3,356
def mpub ( self , topic , * messages ) : return self . send ( constants . MPUB + ' ' + topic , messages )
Publish multiple messages to a topic
3,357
def rdy ( self , count ) : self . ready = count self . last_ready_sent = count return self . send ( constants . RDY + ' ' + str ( count ) )
Indicate that you re ready to receive
3,358
def _read ( self , limit = 1000 ) : for sock in self . socket ( ) : if sock is None : return [ ] try : packet = sock . recv ( 4096 ) except socket . timeout : return [ ] except socket . error as exc : if exc . args [ 0 ] in self . WOULD_BLOCK_ERRS : return [ ] else : raise self . _buffer += packet responses = [ ] total = 0 buf = self . _buffer remaining = len ( buf ) while limit and ( remaining >= 4 ) : size = struct . unpack ( '>l' , buf [ total : ( total + 4 ) ] ) [ 0 ] if ( remaining - 4 ) >= size : responses . append ( Response . from_raw ( self , buf [ ( total + 4 ) : ( total + size + 4 ) ] ) ) total += ( size + 4 ) remaining -= ( size + 4 ) limit -= 1 else : break self . _buffer = self . _buffer [ total : ] return responses
Return all the responses read
3,359
def read ( self ) : responses = self . _read ( ) self . ready -= sum ( map ( int , ( r . frame_type == Message . FRAME_TYPE for r in responses ) ) ) return responses
Responses from an established socket
3,360
def discover ( self , topic ) : logger . info ( 'Discovering on topic %s' , topic ) producers = [ ] for lookupd in self . _lookupd : logger . info ( 'Discovering on %s' , lookupd ) try : for producer in lookupd . lookup ( topic ) [ 'producers' ] : logger . info ( 'Found producer %s on %s' , producer , lookupd ) producers . append ( ( producer [ 'broadcast_address' ] , producer [ 'tcp_port' ] ) ) except ClientException : logger . exception ( 'Failed to query %s' , lookupd ) new = [ ] for host , port in producers : conn = self . _connections . get ( ( host , port ) ) if not conn : logger . info ( 'Discovered %s:%s' , host , port ) new . append ( self . connect ( host , port ) ) elif not conn . alive ( ) : logger . info ( 'Reconnecting to %s:%s' , host , port ) if conn . connect ( ) : conn . setblocking ( 0 ) self . reconnected ( conn ) else : logger . debug ( 'Connection to %s:%s still alive' , host , port ) return [ conn for conn in new if conn ]
Run the discovery mechanism
3,361
def check_connections ( self ) : logger . info ( 'Checking connections' ) if self . _lookupd : self . discover ( self . _topic ) for hostspec in self . _nsqd_tcp_addresses : logger . debug ( 'Checking nsqd instance %s' , hostspec ) host , port = hostspec . split ( ':' ) port = int ( port ) conn = self . _connections . get ( ( host , port ) , None ) if not conn : logger . info ( 'Connecting to %s:%s' , host , port ) self . connect ( host , port ) elif not conn . alive ( ) : if conn . ready_to_reconnect ( ) : logger . info ( 'Reconnecting to %s:%s' , host , port ) if conn . connect ( ) : conn . setblocking ( 0 ) self . reconnected ( conn ) else : logger . debug ( 'Checking freshness' ) now = time . time ( ) time_check = math . ceil ( now - self . last_recv_timestamp ) if time_check >= ( ( self . heartbeat_interval * 2 ) / 1000.0 ) : if conn . ready_to_reconnect ( ) : logger . info ( 'Reconnecting to %s:%s' , host , port ) if conn . connect ( ) : conn . setblocking ( 0 ) self . reconnected ( conn )
Connect to all the appropriate instances
3,362
def connection_checker ( self ) : thread = ConnectionChecker ( self ) logger . info ( 'Starting connection-checker thread' ) thread . start ( ) try : yield thread finally : logger . info ( 'Stopping connection-checker' ) thread . stop ( ) logger . info ( 'Joining connection-checker' ) thread . join ( )
Run periodic reconnection checks
3,363
def connect ( self , host , port ) : conn = connection . Connection ( host , port , reconnection_backoff = self . _reconnection_backoff , auth_secret = self . _auth_secret , timeout = self . _connect_timeout , ** self . _identify_options ) if conn . alive ( ) : conn . setblocking ( 0 ) self . add ( conn ) return conn
Connect to the provided host port
3,364
def add ( self , connection ) : key = ( connection . host , connection . port ) with self . _lock : if key not in self . _connections : self . _connections [ key ] = connection self . added ( connection ) return connection else : return None
Add a connection
3,365
def remove ( self , connection ) : key = ( connection . host , connection . port ) with self . _lock : found = self . _connections . pop ( key , None ) try : self . close_connection ( found ) except Exception as exc : logger . warn ( 'Failed to close %s: %s' , connection , exc ) return found
Remove a connection
3,366
def read ( self ) : connections = [ c for c in self . connections ( ) if c . alive ( ) ] if not connections : time . sleep ( self . _timeout ) return [ ] writes = [ c for c in connections if c . pending ( ) ] try : readable , writable , exceptable = select . select ( connections , writes , connections , self . _timeout ) except exceptions . ConnectionClosedException : logger . exception ( 'Tried selecting on closed client' ) return [ ] except select . error : logger . exception ( 'Error running select' ) return [ ] if not ( readable or writable or exceptable ) : logger . debug ( 'Timed out...' ) return [ ] responses = [ ] for conn in readable : try : for res in conn . read ( ) : if ( isinstance ( res , Response ) and res . data == HEARTBEAT ) : logger . info ( 'Sending heartbeat to %s' , conn ) conn . nop ( ) logger . debug ( 'Setting last_recv_timestamp' ) self . last_recv_timestamp = time . time ( ) continue elif isinstance ( res , Error ) : nonfatal = ( exceptions . FinFailedException , exceptions . ReqFailedException , exceptions . TouchFailedException ) if not isinstance ( res . exception ( ) , nonfatal ) : logger . error ( 'Closing %s: %s' , conn , res . exception ( ) ) self . close_connection ( conn ) responses . append ( res ) logger . debug ( 'Setting last_recv_timestamp' ) self . last_recv_timestamp = time . time ( ) except exceptions . NSQException : logger . exception ( 'Failed to read from %s' , conn ) self . close_connection ( conn ) except socket . error : logger . exception ( 'Failed to read from %s' , conn ) self . close_connection ( conn ) for conn in writable : try : conn . flush ( ) except socket . error : logger . exception ( 'Failed to flush %s' , conn ) self . close_connection ( conn ) for conn in exceptable : self . close_connection ( conn ) return responses
Read from any of the connections that need it
3,367
def random_connection ( self ) : yield random . choice ( [ conn for conn in self . connections ( ) if conn . alive ( ) ] )
Pick a random living connection
3,368
def wait_response ( self ) : responses = self . read ( ) while not responses : responses = self . read ( ) return responses
Wait for a response
3,369
def pub ( self , topic , message ) : with self . random_connection ( ) as client : client . pub ( topic , message ) return self . wait_response ( )
Publish the provided message to the provided topic
3,370
def mpub ( self , topic , * messages ) : with self . random_connection ( ) as client : client . mpub ( topic , * messages ) return self . wait_response ( )
Publish messages to a topic
3,371
def create_socket ( self ) : socket_path = os . path . join ( self . config_dir , 'pueue.sock' ) try : if os . path . exists ( socket_path ) : os . remove ( socket_path ) self . socket = socket . socket ( socket . AF_UNIX , socket . SOCK_STREAM ) self . socket . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 ) self . socket . bind ( socket_path ) self . socket . setblocking ( 0 ) self . socket . listen ( 0 ) os . chmod ( socket_path , stat . S_IRWXU ) except Exception : self . logger . error ( "Daemon couldn't socket. Aborting" ) self . logger . exception ( ) sys . exit ( 1 ) return self . socket
Create a socket for the daemon depending on the directory location .
3,372
def initialize_directories ( self , root_dir ) : if not root_dir : root_dir = os . path . expanduser ( '~' ) self . config_dir = os . path . join ( root_dir , '.config/pueue' ) if not os . path . exists ( self . config_dir ) : os . makedirs ( self . config_dir )
Create all directories needed for logs and configs .
3,373
def respond_client ( self , answer , socket ) : response = pickle . dumps ( answer , - 1 ) socket . sendall ( response ) self . read_list . remove ( socket ) socket . close ( )
Send an answer to the client .
3,374
def read_config ( self ) : config_file = os . path . join ( self . config_dir , 'pueue.ini' ) self . config = configparser . ConfigParser ( ) if os . path . exists ( config_file ) : try : self . config . read ( config_file ) return except Exception : self . logger . error ( 'Error while parsing config file. Deleting old config' ) self . logger . exception ( ) self . config [ 'default' ] = { 'resumeAfterStart' : False , 'maxProcesses' : 1 , 'customShell' : 'default' , } self . config [ 'log' ] = { 'logTime' : 60 * 60 * 24 * 14 , } self . write_config ( )
Read a previous configuration file or create a new with default values .
3,375
def write_config ( self ) : config_file = os . path . join ( self . config_dir , 'pueue.ini' ) with open ( config_file , 'w' ) as file_descriptor : self . config . write ( file_descriptor )
Write the current configuration to the config file .
3,376
def stop_daemon ( self , payload = None ) : kill_signal = signals [ '9' ] self . process_handler . kill_all ( kill_signal , True ) self . running = False return { 'message' : 'Pueue daemon shutting down' , 'status' : 'success' }
Kill current processes and initiate daemon shutdown .
3,377
def set_config ( self , payload ) : self . config [ 'default' ] [ payload [ 'option' ] ] = str ( payload [ 'value' ] ) if payload [ 'option' ] == 'maxProcesses' : self . process_handler . set_max ( payload [ 'value' ] ) if payload [ 'option' ] == 'customShell' : path = payload [ 'value' ] if os . path . isfile ( path ) and os . access ( path , os . X_OK ) : self . process_handler . set_shell ( path ) elif path == 'default' : self . process_handler . set_shell ( ) else : return { 'message' : "File in path doesn't exist or is not executable." , 'status' : 'error' } self . write_config ( ) return { 'message' : 'Configuration successfully updated.' , 'status' : 'success' }
Update the current config depending on the payload and save it .
3,378
def pipe_to_process ( self , payload ) : message = payload [ 'input' ] key = payload [ 'key' ] if not self . process_handler . is_running ( key ) : return { 'message' : 'No running process for this key' , 'status' : 'error' } self . process_handler . send_to_process ( message , key ) return { 'message' : 'Message sent' , 'status' : 'success' }
Send something to stdin of a specific process .
3,379
def send_status ( self , payload ) : answer = { } data = [ ] if self . paused : answer [ 'status' ] = 'paused' else : answer [ 'status' ] = 'running' if len ( self . queue ) > 0 : data = deepcopy ( self . queue . queue ) for key , item in data . items ( ) : if 'stderr' in item : del item [ 'stderr' ] if 'stdout' in item : del item [ 'stdout' ] else : data = 'Queue is empty' answer [ 'data' ] = data return answer
Send the daemon status and the current queue for displaying .
3,380
def reset_everything ( self , payload ) : kill_signal = signals [ '9' ] self . process_handler . kill_all ( kill_signal , True ) self . process_handler . wait_for_finish ( ) self . reset = True answer = { 'message' : 'Resetting current queue' , 'status' : 'success' } return answer
Kill all processes delete the queue and clean everything up .
3,381
def clear ( self , payload ) : self . logger . rotate ( self . queue ) self . queue . clear ( ) self . logger . write ( self . queue ) answer = { 'message' : 'Finished entries have been removed.' , 'status' : 'success' } return answer
Clear queue from any done or failed entries .
3,382
def edit_command ( self , payload ) : key = payload [ 'key' ] command = payload [ 'command' ] if self . queue [ key ] : if self . queue [ key ] [ 'status' ] in [ 'queued' , 'stashed' ] : self . queue [ key ] [ 'command' ] = command answer = { 'message' : 'Command updated' , 'status' : 'error' } else : answer = { 'message' : "Entry is not 'queued' or 'stashed'" , 'status' : 'error' } else : answer = { 'message' : 'No entry with this key' , 'status' : 'error' } return answer
Edit the command of a specific entry .
3,383
def stash ( self , payload ) : succeeded = [ ] failed = [ ] for key in payload [ 'keys' ] : if self . queue . get ( key ) is not None : if self . queue [ key ] [ 'status' ] == 'queued' : self . queue [ key ] [ 'status' ] = 'stashed' succeeded . append ( str ( key ) ) else : failed . append ( str ( key ) ) else : failed . append ( str ( key ) ) message = '' if len ( succeeded ) > 0 : message += 'Stashed entries: {}.' . format ( ', ' . join ( succeeded ) ) status = 'success' if len ( failed ) > 0 : message += '\nNo queued entry for keys: {}' . format ( ', ' . join ( failed ) ) status = 'error' answer = { 'message' : message . strip ( ) , 'status' : status } return answer
Stash the specified processes .
3,384
def kill_process ( self , payload ) : kill_signal = signals [ payload [ 'signal' ] . lower ( ) ] kill_shell = payload . get ( 'all' , False ) if payload . get ( 'keys' ) : succeeded = [ ] failed = [ ] for key in payload . get ( 'keys' ) : success = self . process_handler . kill_process ( key , kill_signal , kill_shell ) if success : succeeded . append ( str ( key ) ) else : failed . append ( str ( key ) ) message = '' if len ( succeeded ) > 0 : message += "Signal '{}' sent to processes: {}." . format ( payload [ 'signal' ] , ', ' . join ( succeeded ) ) status = 'success' if len ( failed ) > 0 : message += '\nNo running process for keys: {}' . format ( ', ' . join ( failed ) ) status = 'error' answer = { 'message' : message . strip ( ) , 'status' : status } else : self . process_handler . kill_all ( kill_signal , kill_shell ) if kill_signal == signal . SIGINT or kill_signal == signal . SIGTERM or kill_signal == signal . SIGKILL : self . paused = True answer = { 'message' : 'Signal send to all processes.' , 'status' : 'success' } return answer
Pause the daemon and kill all processes or kill a specific process .
3,385
def remove ( self , payload ) : succeeded = [ ] failed = [ ] for key in payload [ 'keys' ] : running = self . process_handler . is_running ( key ) if not running : removed = self . queue . remove ( key ) if removed : succeeded . append ( str ( key ) ) else : failed . append ( str ( key ) ) else : failed . append ( str ( key ) ) message = '' if len ( succeeded ) > 0 : message += 'Removed entries: {}.' . format ( ', ' . join ( succeeded ) ) status = 'success' if len ( failed ) > 0 : message += '\nRunning or non-existing entry for keys: {}' . format ( ', ' . join ( failed ) ) status = 'error' answer = { 'message' : message . strip ( ) , 'status' : status } return answer
Remove specified entries from the queue .
3,386
def switch ( self , payload ) : first = payload [ 'first' ] second = payload [ 'second' ] running = self . process_handler . is_running ( first ) or self . process_handler . is_running ( second ) if running : answer = { 'message' : "Can't switch running processes, " "please stop the processes before switching them." , 'status' : 'error' } else : switched = self . queue . switch ( first , second ) if switched : answer = { 'message' : 'Entries #{} and #{} switched' . format ( first , second ) , 'status' : 'success' } else : answer = { 'message' : "One or both entries do not exist or are not queued/stashed." , 'status' : 'error' } return answer
Switch the two specified entry positions in the queue .
3,387
def restart ( self , payload ) : succeeded = [ ] failed = [ ] for key in payload [ 'keys' ] : restarted = self . queue . restart ( key ) if restarted : succeeded . append ( str ( key ) ) else : failed . append ( str ( key ) ) message = '' if len ( succeeded ) > 0 : message += 'Restarted entries: {}.' . format ( ', ' . join ( succeeded ) ) status = 'success' if len ( failed ) > 0 : message += '\nNo finished entry for keys: {}' . format ( ', ' . join ( failed ) ) status = 'error' answer = { 'message' : message . strip ( ) , 'status' : status } return answer
Restart the specified entries .
3,388
def sendall ( self , data , flags = 0 ) : count = len ( data ) while count : sent = self . send ( data , flags ) data = data [ sent : ] count -= sent
Same as socket . sendall
3,389
def do_file_show ( client , args ) : for src_uri in args . uris : client . download_file ( src_uri , sys . stdout . buffer ) return True
Output file contents to stdout
3,390
def pub ( self , topic , message ) : return self . post ( 'pub' , params = { 'topic' : topic } , data = message )
Publish a message to a topic
3,391
def mpub ( self , topic , messages , binary = True ) : if binary : return self . post ( 'mpub' , data = pack ( messages ) [ 4 : ] , params = { 'topic' : topic , 'binary' : True } ) elif any ( '\n' in m for m in messages ) : raise ClientException ( 'Use `binary` flag in mpub for messages with newlines' ) else : return self . post ( '/mpub' , params = { 'topic' : topic } , data = '\n' . join ( messages ) )
Send multiple messages to a topic . Optionally pack the messages
3,392
def clean_stats ( self ) : stats = self . stats ( ) if 'topics' in stats : topics = stats [ 'topics' ] topics = dict ( ( t . pop ( 'topic_name' ) , t ) for t in topics ) for topic , data in topics . items ( ) : if 'channels' in data : channels = data [ 'channels' ] channels = dict ( ( c . pop ( 'channel_name' ) , c ) for c in channels ) data [ 'channels' ] = channels stats [ 'topics' ] = topics return stats
Stats with topics and channels keyed on topic and channel names
3,393
def execute_add ( args , root_dir = None ) : command = ' ' . join ( args [ 'command' ] ) instruction = { 'command' : command , 'path' : os . getcwd ( ) } print_command_factory ( 'add' ) ( instruction , root_dir )
Add a new command to the daemon queue .
3,394
def execute_edit ( args , root_dir = None ) : EDITOR = os . environ . get ( 'EDITOR' , 'vim' ) key = args [ 'key' ] status = command_factory ( 'status' ) ( { } , root_dir = root_dir ) if not isinstance ( status [ 'data' ] , str ) and key in status [ 'data' ] : if status [ 'data' ] [ key ] [ 'status' ] in [ 'queued' , 'stashed' ] : command = status [ 'data' ] [ key ] [ 'command' ] else : print ( "Entry is not 'queued' or 'stashed'" ) sys . exit ( 1 ) else : print ( 'No entry with this key' ) sys . exit ( 1 ) with tempfile . NamedTemporaryFile ( suffix = ".tmp" ) as tf : tf . write ( command . encode ( 'utf-8' ) ) tf . flush ( ) call ( [ EDITOR , tf . name ] ) tf . seek ( 0 ) edited_command = tf . read ( ) . decode ( 'utf-8' ) print_command_factory ( 'edit' ) ( { 'key' : key , 'command' : edited_command , } , root_dir = root_dir )
Edit a existing queue command in the daemon .
3,395
def command_factory ( command ) : def communicate ( body = { } , root_dir = None ) : client = connect_socket ( root_dir ) body [ 'mode' ] = command if 'func' in body : del body [ 'func' ] data_string = pickle . dumps ( body , - 1 ) client . send ( data_string ) response = receive_data ( client ) return response return communicate
A factory which returns functions for direct daemon communication .
3,396
def get_descriptor ( self , number ) : stdout_path = os . path . join ( self . config_dir , 'pueue_process_{}.stdout' . format ( number ) ) if os . path . exists ( stdout_path ) : os . remove ( stdout_path ) out_descriptor = open ( stdout_path , 'w+' ) stderr_path = os . path . join ( self . config_dir , 'pueue_process_{}.stderr' . format ( number ) ) if os . path . exists ( stderr_path ) : os . remove ( stderr_path ) err_descriptor = open ( stderr_path , 'w+' ) self . descriptors [ number ] = { } self . descriptors [ number ] [ 'stdout' ] = out_descriptor self . descriptors [ number ] [ 'stdout_path' ] = stdout_path self . descriptors [ number ] [ 'stderr' ] = err_descriptor self . descriptors [ number ] [ 'stderr_path' ] = stderr_path return out_descriptor , err_descriptor
Create file descriptors for process output .
3,397
def clean_descriptor ( self , number ) : self . descriptors [ number ] [ 'stdout' ] . close ( ) self . descriptors [ number ] [ 'stderr' ] . close ( ) if os . path . exists ( self . descriptors [ number ] [ 'stdout_path' ] ) : os . remove ( self . descriptors [ number ] [ 'stdout_path' ] ) if os . path . exists ( self . descriptors [ number ] [ 'stderr_path' ] ) : os . remove ( self . descriptors [ number ] [ 'stderr_path' ] )
Close file descriptor and remove underlying files .
3,398
def check_finished ( self ) : changed = False for key in list ( self . processes . keys ( ) ) : process = self . processes [ key ] process . poll ( ) if process . returncode is not None : if key not in self . stopping : output , error_output = process . communicate ( ) descriptor = self . descriptors [ key ] descriptor [ 'stdout' ] . seek ( 0 ) descriptor [ 'stderr' ] . seek ( 0 ) output = get_descriptor_output ( descriptor [ 'stdout' ] , key , handler = self ) error_output = get_descriptor_output ( descriptor [ 'stderr' ] , key , handler = self ) self . queue [ key ] [ 'returncode' ] = process . returncode if process . returncode != 0 : self . queue [ key ] [ 'status' ] = 'failed' else : self . queue [ key ] [ 'status' ] = 'done' self . queue [ key ] [ 'stdout' ] = output self . queue [ key ] [ 'stderr' ] = error_output self . queue [ key ] [ 'end' ] = str ( datetime . now ( ) . strftime ( "%H:%M" ) ) self . queue . write ( ) changed = True else : self . stopping . remove ( key ) if key in self . to_remove : self . to_remove . remove ( key ) del self . queue [ key ] else : if key in self . to_stash : self . to_stash . remove ( key ) self . queue [ key ] [ 'status' ] = 'stashed' else : self . queue [ key ] [ 'status' ] = 'queued' self . queue [ key ] [ 'start' ] = '' self . queue [ key ] [ 'end' ] = '' self . queue . write ( ) self . clean_descriptor ( key ) del self . processes [ key ] return changed
Poll all processes and handle any finished processes .
3,399
def check_for_new ( self ) : free_slots = self . max_processes - len ( self . processes ) for item in range ( free_slots ) : key = self . queue . next ( ) if key is not None : self . spawn_new ( key )
Check if we can start a new process .