idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
55,200 | def probe ( cls , resource , enable , disable , test , host , interval , http_method , http_response , threshold , timeout , url , window ) : params = { 'host' : host , 'interval' : interval , 'method' : http_method , 'response' : http_response , 'threshold' : threshold , 'timeout' : timeout , 'url' : url , 'window' : window } if enable : params [ 'enable' ] = True elif disable : params [ 'enable' ] = False if test : result = cls . call ( 'hosting.rproxy.probe.test' , cls . usable_id ( resource ) , params ) else : result = cls . call ( 'hosting.rproxy.probe.update' , cls . usable_id ( resource ) , params ) cls . display_progress ( result ) return result | Set a probe for a webaccelerator |
55,201 | def probe_enable ( cls , resource ) : oper = cls . call ( 'hosting.rproxy.probe.enable' , cls . usable_id ( resource ) ) cls . echo ( 'Activating probe on %s' % resource ) cls . display_progress ( oper ) cls . echo ( 'The probe have been activated' ) return oper | Activate a probe on a webaccelerator |
55,202 | def probe_disable ( cls , resource ) : oper = cls . call ( 'hosting.rproxy.probe.disable' , cls . usable_id ( resource ) ) cls . echo ( 'Desactivating probe on %s' % resource ) cls . display_progress ( oper ) cls . echo ( 'The probe have been desactivated' ) return oper | Disable a probe on a webaccelerator |
55,203 | def usable_id ( cls , id ) : try : qry_id = cls . from_name ( id ) if not qry_id : qry_id = cls . from_ip ( id ) if not qry_id : qry_id = cls . from_vhost ( id ) except Exception : qry_id = None if not qry_id : msg = 'unknown identifier %s' % id cls . error ( msg ) return qry_id | Retrieve id from input which can be hostname vhost id . |
55,204 | def from_name ( cls , name ) : result = cls . list ( { 'items_per_page' : 500 } ) webaccs = { } for webacc in result : webaccs [ webacc [ 'name' ] ] = webacc [ 'id' ] return webaccs . get ( name ) | Retrieve webacc id associated to a webacc name . |
55,205 | def from_ip ( cls , ip ) : result = cls . list ( { 'items_per_page' : 500 } ) webaccs = { } for webacc in result : for server in webacc [ 'servers' ] : webaccs [ server [ 'ip' ] ] = webacc [ 'id' ] return webaccs . get ( ip ) | Retrieve webacc id associated to a webacc ip |
55,206 | def from_vhost ( cls , vhost ) : result = cls . list ( { 'items_per_page' : 500 } ) webaccs = { } for webacc in result : for vhost in webacc [ 'vhosts' ] : webaccs [ vhost [ 'name' ] ] = webacc [ 'id' ] return webaccs . get ( vhost ) | Retrieve webbacc id associated to a webacc vhost |
55,207 | def descriptions ( cls ) : schema = cls . json_get ( '%s/status/schema' % cls . api_url , empty_key = True , send_key = False ) descs = { } for val in schema [ 'fields' ] [ 'status' ] [ 'value' ] : descs . update ( val ) return descs | Retrieve status descriptions from status . gandi . net . |
55,208 | def services ( cls ) : return cls . json_get ( '%s/services' % cls . api_url , empty_key = True , send_key = False ) | Retrieve services statuses from status . gandi . net . |
55,209 | def status ( cls ) : return cls . json_get ( '%s/status' % cls . api_url , empty_key = True , send_key = False ) | Retrieve global status from status . gandi . net . |
55,210 | def events ( cls , filters ) : current = filters . pop ( 'current' , False ) current_params = [ ] if current : current_params = [ ( 'current' , 'true' ) ] filter_url = uparse . urlencode ( sorted ( list ( filters . items ( ) ) ) + current_params ) events = cls . json_get ( '%s/events?%s' % ( cls . api_url , filter_url ) , empty_key = True , send_key = False ) return events | Retrieve events details from status . gandi . net . |
55,211 | def list ( gandi , state , id , vhosts , type , limit ) : options = { 'items_per_page' : limit , } if state : options [ 'state' ] = state output_keys = [ 'name' , 'state' ] if id : output_keys . append ( 'id' ) if vhosts : output_keys . append ( 'vhost' ) if type : output_keys . append ( 'type' ) paas_hosts = { } result = gandi . paas . list ( options ) for num , paas in enumerate ( result ) : paas_hosts [ paas [ 'id' ] ] = [ ] if vhosts : list_vhost = gandi . vhost . list ( { 'paas_id' : paas [ 'id' ] } ) for host in list_vhost : paas_hosts [ paas [ 'id' ] ] . append ( host [ 'name' ] ) if num : gandi . separator_line ( ) output_paas ( gandi , paas , [ ] , paas_hosts [ paas [ 'id' ] ] , output_keys ) return result | List PaaS instances . |
55,212 | def info ( gandi , resource , stat ) : output_keys = [ 'name' , 'type' , 'size' , 'memory' , 'console' , 'vhost' , 'dc' , 'sftp_server' , 'git_server' , 'snapshot' ] paas = gandi . paas . info ( resource ) paas_hosts = [ ] list_vhost = gandi . vhost . list ( { 'paas_id' : paas [ 'id' ] } ) df = gandi . paas . quota ( paas [ 'id' ] ) paas . update ( { 'df' : df } ) if stat : cache = gandi . paas . cache ( paas [ 'id' ] ) paas . update ( { 'cache' : cache } ) for host in list_vhost : paas_hosts . append ( host [ 'name' ] ) output_paas ( gandi , paas , [ ] , paas_hosts , output_keys ) return paas | Display information about a PaaS instance . |
55,213 | def clone ( gandi , name , vhost , directory , origin ) : if vhost != 'default' : directory = vhost else : directory = name if not directory else directory return gandi . paas . clone ( name , vhost , directory , origin ) | Clone a remote vhost in a local git repository . |
55,214 | def attach ( gandi , name , vhost , remote ) : return gandi . paas . attach ( name , vhost , remote ) | Add remote for an instance s default vhost to the local git repository . |
55,215 | def create ( gandi , name , size , type , quantity , duration , datacenter , vhosts , password , snapshotprofile , background , sshkey , ssl , private_key , poll_cert ) : try : gandi . datacenter . is_opened ( datacenter , 'paas' ) except DatacenterLimited as exc : gandi . echo ( '/!\ Datacenter %s will be closed on %s, ' 'please consider using another datacenter.' % ( datacenter , exc . date ) ) if not password : password = click . prompt ( 'password' , hide_input = True , confirmation_prompt = True ) if not name : name = randomstring ( 'paas' ) if vhosts and not gandi . hostedcert . activate_ssl ( vhosts , ssl , private_key , poll_cert ) : return result = gandi . paas . create ( name , size , type , quantity , duration , datacenter , vhosts , password , snapshotprofile , background , sshkey ) return result | Create a new PaaS instance and initialize associated git repository . |
55,216 | def restart ( gandi , resource , background , force ) : output_keys = [ 'id' , 'type' , 'step' ] possible_resources = gandi . paas . resource_list ( ) for item in resource : if item not in possible_resources : gandi . echo ( 'Sorry PaaS instance %s does not exist' % item ) gandi . echo ( 'Please use one of the following: %s' % possible_resources ) return if not force : instance_info = "'%s'" % ', ' . join ( resource ) proceed = click . confirm ( "Are you sure to restart PaaS instance %s?" % instance_info ) if not proceed : return opers = gandi . paas . restart ( resource , background ) if background : for oper in opers : output_generic ( gandi , oper , output_keys ) return opers | Restart a PaaS instance . |
55,217 | def types ( gandi ) : options = { } types = gandi . paas . type_list ( options ) for type_ in types : gandi . echo ( type_ [ 'name' ] ) return types | List types PaaS instances . |
55,218 | def info ( gandi , resource , id , value ) : output_keys = [ 'name' , 'fingerprint' ] if id : output_keys . append ( 'id' ) if value : output_keys . append ( 'value' ) ret = [ ] for item in resource : sshkey = gandi . sshkey . info ( item ) ret . append ( output_sshkey ( gandi , sshkey , output_keys ) ) return ret | Display information about an SSH key . |
55,219 | def create ( gandi , name , value = None , filename = None ) : if not value and not filename : raise UsageError ( 'You must set value OR filename.' ) if value and filename : raise UsageError ( 'You must not set value AND filename.' ) if filename : value = filename . read ( ) ret = gandi . sshkey . create ( name , value ) output_keys = [ 'id' , 'name' , 'fingerprint' ] return output_sshkey ( gandi , ret , output_keys ) | Create a new SSH key . |
55,220 | def creditusage ( cls ) : rating = cls . call ( 'hosting.rating.list' ) if not rating : return 0 rating = rating . pop ( ) usage = [ sum ( resource . values ( ) ) for resource in rating . values ( ) if isinstance ( resource , dict ) ] return sum ( usage ) | Get credit usage per hour |
55,221 | def all ( cls ) : account = cls . info ( ) creditusage = cls . creditusage ( ) if not creditusage : return account left = account [ 'credits' ] / creditusage years , hours = divmod ( left , 365 * 24 ) months , hours = divmod ( hours , 31 * 24 ) days , hours = divmod ( hours , 24 ) account . update ( { 'credit_usage' : creditusage , 'left' : ( years , months , days , hours ) } ) return account | Get all informations about this account |
55,222 | def list ( gandi , only_paas , only_vm ) : target = None if only_paas and not only_vm : target = 'paas' if only_vm and not only_paas : target = 'vm' output_keys = [ 'id' , 'name' , 'kept_total' , 'target' ] result = gandi . snapshotprofile . list ( { } , target = target ) for num , profile in enumerate ( result ) : if num : gandi . separator_line ( ) output_snapshot_profile ( gandi , profile , output_keys ) return result | List snapshot profiles . |
55,223 | def list ( gandi , id , altnames , csr , cert , all_status , status , dates , limit ) : options = { 'items_per_page' : limit } if not all_status : options [ 'status' ] = [ 'valid' , 'pending' ] output_keys = [ 'cn' , 'plan' ] if id : output_keys . append ( 'id' ) if status : output_keys . append ( 'status' ) if dates : output_keys . extend ( [ 'date_created' , 'date_end' ] ) if altnames : output_keys . append ( 'altnames' ) if csr : output_keys . append ( 'csr' ) if cert : output_keys . append ( 'cert' ) result = gandi . certificate . list ( options ) for num , cert in enumerate ( result ) : if num : gandi . separator_line ( ) cert [ 'plan' ] = package_desc ( gandi , cert [ 'package' ] ) output_cert ( gandi , cert , output_keys ) return result | List certificates . |
55,224 | def info ( gandi , resource , id , altnames , csr , cert , all_status ) : output_keys = [ 'cn' , 'date_created' , 'date_end' , 'plan' , 'status' ] if id : output_keys . append ( 'id' ) if altnames : output_keys . append ( 'altnames' ) if csr : output_keys . append ( 'csr' ) if cert : output_keys . append ( 'cert' ) ids = [ ] for res in resource : ids . extend ( gandi . certificate . usable_ids ( res ) ) result = [ ] for num , id_ in enumerate ( set ( ids ) ) : cert = gandi . certificate . info ( id_ ) if not all_status and cert [ 'status' ] not in [ 'valid' , 'pending' ] : continue if num : gandi . separator_line ( ) cert [ 'plan' ] = package_desc ( gandi , cert [ 'package' ] ) output_cert ( gandi , cert , output_keys ) result . append ( cert ) return result | Display information about a certificate . |
55,225 | def update ( gandi , resource , csr , private_key , country , state , city , organisation , branch , altnames , dcv_method ) : ids = gandi . certificate . usable_ids ( resource ) if len ( ids ) > 1 : gandi . echo ( 'Will not update, %s is not precise enough.' % resource ) gandi . echo ( ' * cert : ' + '\n * cert : ' . join ( [ str ( id_ ) for id_ in ids ] ) ) return id_ = ids [ 0 ] result = gandi . certificate . update ( id_ , csr , private_key , country , state , city , organisation , branch , altnames , dcv_method ) gandi . echo ( 'The certificate update operation is %s' % result [ 'id' ] ) gandi . echo ( 'You can follow it with:' ) gandi . echo ( '$ gandi certificate follow %s' % result [ 'id' ] ) gandi . echo ( 'When the operation is DONE, you can retrieve the .crt' ' with:' ) gandi . echo ( '$ gandi certificate export "%s"' % resource ) return result | Update a certificate CSR . |
55,226 | def follow ( gandi , resource ) : oper = gandi . oper . info ( int ( resource ) ) assert ( oper [ 'type' ] . startswith ( 'certificate_' ) ) output_cert_oper ( gandi , oper ) return oper | Get the operation status |
55,227 | def change_dcv ( gandi , resource , dcv_method ) : ids = gandi . certificate . usable_ids ( resource ) if len ( ids ) > 1 : gandi . echo ( 'Will not update, %s is not precise enough.' % resource ) gandi . echo ( ' * cert : ' + '\n * cert : ' . join ( [ str ( id_ ) for id_ in ids ] ) ) return id_ = ids [ 0 ] opers = gandi . oper . list ( { 'cert_id' : id_ } ) if not opers : gandi . echo ( 'Can not find any operation for this certificate.' ) return oper = opers [ 0 ] if ( oper [ 'step' ] != 'RUN' and oper [ 'params' ] [ 'inner_step' ] != 'comodo_oper_updated' ) : gandi . echo ( 'This certificate operation is not in the good step to ' 'update the DCV method.' ) return gandi . certificate . change_dcv ( oper [ 'id' ] , dcv_method ) cert = gandi . certificate . info ( id_ ) csr = oper [ 'params' ] [ 'csr' ] package = cert [ 'package' ] altnames = oper [ 'params' ] . get ( 'altnames' ) gandi . certificate . advice_dcv_method ( csr , package , altnames , dcv_method , cert_id = id_ ) | Change the DCV for a running certificate operation . |
55,228 | def resend_dcv ( gandi , resource ) : ids = gandi . certificate . usable_ids ( resource ) if len ( ids ) > 1 : gandi . echo ( 'Will not update, %s is not precise enough.' % resource ) gandi . echo ( ' * cert : ' + '\n * cert : ' . join ( [ str ( id_ ) for id_ in ids ] ) ) return id_ = ids [ 0 ] opers = gandi . oper . list ( { 'cert_id' : id_ } ) if not opers : gandi . echo ( 'Can not find any operation for this certificate.' ) return oper = opers [ 0 ] if ( oper [ 'step' ] != 'RUN' and oper [ 'params' ] [ 'inner_step' ] != 'comodo_oper_updated' ) : gandi . echo ( 'This certificate operation is not in the good step to ' 'resend the DCV.' ) return if oper [ 'params' ] [ 'dcv_method' ] != 'email' : gandi . echo ( 'This certificate operation is not in email DCV.' ) return gandi . certificate . resend_dcv ( oper [ 'id' ] ) | Resend the DCV mail . |
55,229 | def delete ( gandi , resource , background , force ) : ids = gandi . certificate . usable_ids ( resource ) if len ( ids ) > 1 : gandi . echo ( 'Will not delete, %s is not precise enough.' % resource ) gandi . echo ( ' * cert : ' + '\n * cert : ' . join ( [ str ( id_ ) for id_ in ids ] ) ) return if not force : proceed = click . confirm ( "Are you sure to delete the certificate %s?" % resource ) if not proceed : return result = gandi . certificate . delete ( ids [ 0 ] , background ) return result | Revoke the certificate . |
55,230 | def list ( gandi , id , vhosts , dates , fqdns , limit ) : justify = 10 options = { 'items_per_page' : limit , 'state' : 'created' } output_keys = [ ] if id : output_keys . append ( 'id' ) output_keys . append ( 'subject' ) if dates : output_keys . extend ( [ 'date_created' , 'date_expire' ] ) justify = 12 if fqdns : output_keys . append ( 'fqdns' ) if vhosts : output_keys . append ( 'vhosts' ) result = gandi . hostedcert . list ( options ) for num , hcert in enumerate ( result ) : if num : gandi . separator_line ( ) if fqdns or vhosts : hcert = gandi . hostedcert . info ( hcert [ 'id' ] ) output_hostedcert ( gandi , hcert , output_keys , justify ) return result | List hosted certificates . |
55,231 | def info ( gandi , resource ) : output_keys = [ 'id' , 'subject' , 'date_created' , 'date_expire' , 'fqdns' , 'vhosts' ] result = gandi . hostedcert . infos ( resource ) for num , hcert in enumerate ( result ) : if num : gandi . separator_line ( ) output_hostedcert ( gandi , hcert , output_keys ) return result | Display information about a hosted certificate . |
55,232 | def create ( gandi , private_key , certificate , certificate_id ) : if not certificate and not certificate_id : gandi . echo ( 'One of --certificate or --certificate-id is needed.' ) return if certificate and certificate_id : gandi . echo ( 'Only one of --certificate or --certificate-id is needed.' ) if os . path . isfile ( private_key ) : with open ( private_key ) as fhandle : private_key = fhandle . read ( ) if certificate : if os . path . isfile ( certificate ) : with open ( certificate ) as fhandle : certificate = fhandle . read ( ) else : cert = gandi . certificate . info ( certificate_id ) certificate = gandi . certificate . pretty_format_cert ( cert ) result = gandi . hostedcert . create ( private_key , certificate ) output_keys = [ 'id' , 'subject' , 'date_created' , 'date_expire' , 'fqdns' , 'vhosts' ] output_hostedcert ( gandi , result , output_keys ) return result | Create a new hosted certificate . |
55,233 | def delete ( gandi , resource , force ) : infos = gandi . hostedcert . infos ( resource ) if not infos : return if not force : proceed = click . confirm ( 'Are you sure to delete the following hosted ' 'certificates ?\n' + '\n' . join ( [ '%s: %s' % ( res [ 'id' ] , res [ 'subject' ] ) for res in infos ] ) + '\n' ) if not proceed : return for res in infos : gandi . hostedcert . delete ( res [ 'id' ] ) | Delete a hosted certificate . |
55,234 | def flatten ( l , types = ( list , float ) ) : l = [ item if isinstance ( item , types ) else [ item ] for item in l ] return [ item for sublist in l for item in sublist ] | Flat nested list of lists into a single list . |
55,235 | def get_width ( ) : ws = struct . pack ( "HHHH" , 0 , 0 , 0 , 0 ) ws = fcntl . ioctl ( sys . stdout . fileno ( ) , termios . TIOCGWINSZ , ws ) lines , columns , x , y = struct . unpack ( "HHHH" , ws ) width = min ( columns * 39 // 40 , columns - 2 ) return width | Get terminal width |
55,236 | def groff2man ( data ) : width = get_width ( ) cmd = 'groff -t -Tascii -m man -rLL=%dn -rLT=%dn' % ( width , width ) handle = subprocess . Popen ( cmd , shell = True , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) man_text , stderr = handle . communicate ( data ) return man_text | Read groff - formatted text and output man pages . |
55,237 | def extract_name ( self , data ) : name = re . search ( '<h1[^>]*>(.+?)</h1>' , data ) . group ( 1 ) name = re . sub ( r'<([^>]+)>' , r'' , name ) name = re . sub ( r'>' , r'>' , name ) name = re . sub ( r'<' , r'<' , name ) return name | Extract man page name from web page . |
55,238 | def cache_all ( self ) : respond = input ( 'By default, cppman fetches pages on-the-fly if corresponding ' 'page is not found in the cache. The "cache-all" option is only ' 'useful if you want to view man pages offline. ' 'Caching all contents will take several minutes, ' 'do you want to continue [y/N]? ' ) if not ( respond and 'yes' . startswith ( respond . lower ( ) ) ) : raise KeyboardInterrupt try : os . makedirs ( environ . cache_dir ) except : pass self . success_count = 0 self . failure_count = 0 if not os . path . exists ( environ . index_db ) : raise RuntimeError ( "can't find index.db" ) conn = sqlite3 . connect ( environ . index_db ) cursor = conn . cursor ( ) source = environ . config . source print ( 'Caching manpages from %s ...' % source ) data = cursor . execute ( 'SELECT * FROM "%s"' % source ) . fetchall ( ) for name , url , _ in data : print ( 'Caching %s ...' % name ) retries = 3 while retries > 0 : try : self . cache_man_page ( source , url , name ) except Exception : print ( 'Retrying ...' ) retries -= 1 else : self . success_count += 1 break else : print ( 'Error caching %s ...' % name ) self . failure_count += 1 conn . close ( ) print ( '\n%d manual pages cached successfully.' % self . success_count ) print ( '%d manual pages failed to cache.' % self . failure_count ) self . update_mandb ( False ) | Cache all available man pages |
55,239 | def cache_man_page ( self , source , url , name ) : outname = self . get_page_path ( source , name ) if os . path . exists ( outname ) and not self . forced : return try : os . makedirs ( os . path . join ( environ . cache_dir , source ) ) except OSError : pass data = util . fixupHTML ( urllib . request . urlopen ( url ) . read ( ) ) formatter = importlib . import_module ( 'cppman.formatter.%s' % source [ : - 4 ] ) groff_text = formatter . html2groff ( data , name ) with gzip . open ( outname , 'w' ) as f : f . write ( groff_text . encode ( 'utf-8' ) ) | callback to cache new man page |
55,240 | def man ( self , pattern ) : try : avail = os . listdir ( os . path . join ( environ . cache_dir , environ . source ) ) except OSError : avail = [ ] if not os . path . exists ( environ . index_db ) : raise RuntimeError ( "can't find index.db" ) conn = sqlite3 . connect ( environ . index_db ) cursor = conn . cursor ( ) try : page_name , url = cursor . execute ( 'SELECT name,url FROM "%s" ' 'WHERE name="%s" ORDER BY LENGTH(name)' % ( environ . source , pattern ) ) . fetchone ( ) except TypeError : try : page_name , url = cursor . execute ( 'SELECT name,url FROM "%s" ' 'WHERE name="std::%s" ORDER BY LENGTH(name)' % ( environ . source , pattern ) ) . fetchone ( ) except TypeError : try : page_name , url = cursor . execute ( 'SELECT name,url FROM "%s" ' 'WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)' % ( environ . source , pattern ) ) . fetchone ( ) except TypeError : raise RuntimeError ( 'No manual entry for ' + pattern ) finally : conn . close ( ) page_filename = self . get_normalized_page_name ( page_name ) if self . forced or page_filename + '.3.gz' not in avail : self . cache_man_page ( environ . source , url , page_name ) pager_type = environ . pager if sys . stdout . isatty ( ) else 'pipe' columns = ( util . get_width ( ) if self . force_columns == - 1 else self . force_columns ) pid = os . fork ( ) if pid == 0 : os . execl ( '/bin/sh' , '/bin/sh' , environ . pager_script , pager_type , self . get_page_path ( environ . source , page_name ) , str ( columns ) , environ . pager_config , page_name ) return pid | Call viewer . sh to view man page |
55,241 | def find ( self , pattern ) : if not os . path . exists ( environ . index_db ) : raise RuntimeError ( "can't find index.db" ) conn = sqlite3 . connect ( environ . index_db ) cursor = conn . cursor ( ) selected = cursor . execute ( 'SELECT * FROM "%s" WHERE name ' 'LIKE "%%%s%%" ORDER BY LENGTH(name)' % ( environ . source , pattern ) ) . fetchall ( ) pat = re . compile ( '(%s)' % re . escape ( pattern ) , re . I ) if selected : for name , url , std in selected : if os . isatty ( sys . stdout . fileno ( ) ) : print ( pat . sub ( r'\033[1;31m\1\033[0m' , name ) + ( ' \033[1;33m[%s]\033[0m' % std if std else '' ) ) else : print ( name + ( ' [%s]' % std if std else '' ) ) else : raise RuntimeError ( '%s: nothing appropriate.' % pattern ) | Find pages in database . |
55,242 | def update_mandb ( self , quiet = True ) : if not environ . config . UpdateManPath : return print ( '\nrunning mandb...' ) cmd = 'mandb %s' % ( ' -q' if quiet else '' ) subprocess . Popen ( cmd , shell = True ) . wait ( ) | Update mandb . |
55,243 | def set_default ( self ) : try : os . makedirs ( os . path . dirname ( self . _configfile ) ) except : pass self . _config = configparser . RawConfigParser ( ) self . _config . add_section ( 'Settings' ) for key , val in self . DEFAULTS . items ( ) : self . _config . set ( 'Settings' , key , val ) with open ( self . _configfile , 'w' ) as f : self . _config . write ( f ) | Set config to default . |
55,244 | def save ( self ) : try : os . makedirs ( os . path . dirname ( self . _configfile ) ) except : pass with open ( self . _configfile , 'w' ) as f : self . _config . write ( f ) | Store config back to file . |
55,245 | def get_free_gpus ( max_procs = 0 ) : logger = logging . getLogger ( __name__ ) try : py3nvml . nvmlInit ( ) except : str_ = warnings . warn ( str_ , RuntimeWarning ) logger . warn ( str_ ) return [ ] num_gpus = py3nvml . nvmlDeviceGetCount ( ) gpu_free = [ False ] * num_gpus for i in range ( num_gpus ) : try : h = py3nvml . nvmlDeviceGetHandleByIndex ( i ) except : continue procs = try_get_info ( py3nvml . nvmlDeviceGetComputeRunningProcesses , h , [ 'something' ] ) if len ( procs ) <= max_procs : gpu_free [ i ] = True py3nvml . nvmlShutdown ( ) return gpu_free | Checks the number of processes running on your GPUs . |
55,246 | def get_num_procs ( ) : logger = logging . getLogger ( __name__ ) try : py3nvml . nvmlInit ( ) except : str_ = warnings . warn ( str_ , RuntimeWarning ) logger . warn ( str_ ) return [ ] num_gpus = py3nvml . nvmlDeviceGetCount ( ) gpu_procs = [ - 1 ] * num_gpus for i in range ( num_gpus ) : try : h = py3nvml . nvmlDeviceGetHandleByIndex ( i ) except : continue procs = try_get_info ( py3nvml . nvmlDeviceGetComputeRunningProcesses , h , [ 'something' ] ) gpu_procs [ i ] = len ( procs ) py3nvml . nvmlShutdown ( ) return gpu_procs | Gets the number of processes running on each gpu |
55,247 | def _extractNVMLErrorsAsClasses ( ) : this_module = sys . modules [ __name__ ] nvmlErrorsNames = [ x for x in dir ( this_module ) if x . startswith ( "NVML_ERROR_" ) ] for err_name in nvmlErrorsNames : class_name = "NVMLError_" + string . capwords ( err_name . replace ( "NVML_ERROR_" , "" ) , "_" ) . replace ( "_" , "" ) err_val = getattr ( this_module , err_name ) def gen_new ( val ) : def new ( typ ) : obj = NVMLError . __new__ ( typ , val ) return obj return new new_error_class = type ( class_name , ( NVMLError , ) , { '__new__' : gen_new ( err_val ) } ) new_error_class . __module__ = __name__ setattr ( this_module , class_name , new_error_class ) NVMLError . _valClassMapping [ err_val ] = new_error_class | Generates a hierarchy of classes on top of NVMLError class . |
55,248 | def _LoadNvmlLibrary ( ) : global nvmlLib if ( nvmlLib is None ) : libLoadLock . acquire ( ) try : if ( nvmlLib is None ) : try : if ( sys . platform [ : 3 ] == "win" ) : searchPaths = [ os . path . join ( os . getenv ( "ProgramFiles" , r"C:\Program Files" ) , r"NVIDIA Corporation\NVSMI\nvml.dll" ) , os . path . join ( os . getenv ( "WinDir" , r"C:\Windows" ) , r"System32\nvml.dll" ) , ] nvmlPath = next ( ( x for x in searchPaths if os . path . isfile ( x ) ) , None ) if ( nvmlPath == None ) : _nvmlCheckReturn ( NVML_ERROR_LIBRARY_NOT_FOUND ) else : nvmlLib = CDLL ( nvmlPath ) else : nvmlLib = CDLL ( "libnvidia-ml.so.1" ) except OSError as ose : _nvmlCheckReturn ( NVML_ERROR_LIBRARY_NOT_FOUND ) if ( nvmlLib == None ) : _nvmlCheckReturn ( NVML_ERROR_LIBRARY_NOT_FOUND ) finally : libLoadLock . release ( ) | Load the library if it isn t loaded already |
55,249 | def encode_notifications ( tokens , notifications ) : fmt = "!BH32sH%ds" structify = lambda t , p : struct . pack ( fmt % len ( p ) , 0 , 32 , t , len ( p ) , p ) binaryify = lambda t : t . decode ( 'hex' ) if type ( notifications ) is dict and type ( tokens ) in ( str , unicode ) : tokens , notifications = ( [ tokens ] , [ notifications ] ) if type ( notifications ) is list and type ( tokens ) is list : return '' . join ( map ( lambda y : structify ( * y ) , ( ( binaryify ( t ) , json . dumps ( p , separators = ( ',' , ':' ) , ensure_ascii = False ) . encode ( 'utf-8' ) ) for t , p in zip ( tokens , notifications ) ) ) ) | Returns the encoded bytes of tokens and notifications tokens a list of tokens or a string of only one token notifications a list of notifications or a dictionary of only one |
55,250 | def write ( self , notifications ) : "Connect to the APNS service and send notifications" if not self . factory : log . msg ( 'APNSService write (connecting)' ) server , port = ( ( APNS_SERVER_SANDBOX_HOSTNAME if self . environment == 'sandbox' else APNS_SERVER_HOSTNAME ) , APNS_SERVER_PORT ) self . factory = self . clientProtocolFactory ( ) context = self . getContextFactory ( ) reactor . connectSSL ( server , port , self . factory , context ) client = self . factory . clientProtocol if client : return client . sendMessage ( notifications ) else : d = self . factory . deferred timeout = reactor . callLater ( self . timeout , lambda : d . called or d . errback ( Exception ( 'Notification timed out after %i seconds' % self . timeout ) ) ) def cancel_timeout ( r ) : try : timeout . cancel ( ) except : pass return r d . addCallback ( lambda p : p . sendMessage ( notifications ) ) d . addErrback ( log_errback ( 'apns-service-write' ) ) d . addBoth ( cancel_timeout ) return d | Connect to the APNS service and send notifications |
55,251 | def read ( self ) : "Connect to the feedback service and read all data." log . msg ( 'APNSService read (connecting)' ) try : server , port = ( ( FEEDBACK_SERVER_SANDBOX_HOSTNAME if self . environment == 'sandbox' else FEEDBACK_SERVER_HOSTNAME ) , FEEDBACK_SERVER_PORT ) factory = self . feedbackProtocolFactory ( ) context = self . getContextFactory ( ) reactor . connectSSL ( server , port , factory , context ) factory . deferred . addErrback ( log_errback ( 'apns-feedback-read' ) ) timeout = reactor . callLater ( self . timeout , lambda : factory . deferred . called or factory . deferred . errback ( Exception ( 'Feedbcak fetch timed out after %i seconds' % self . timeout ) ) ) def cancel_timeout ( r ) : try : timeout . cancel ( ) except : pass return r factory . deferred . addBoth ( cancel_timeout ) except Exception , e : log . err ( 'APNService feedback error initializing: %s' % str ( e ) ) raise return factory . deferred | Connect to the feedback service and read all data . |
55,252 | def reprovision_and_retry ( func ) : @ functools . wraps ( func ) def wrapper ( * a , ** kw ) : errback = kw . get ( 'errback' , None ) if errback is None : def errback ( e ) : raise e def errback_wrapper ( e ) : if isinstance ( e , UnknownAppID ) and 'INITIAL' in OPTIONS : try : for initial in OPTIONS [ 'INITIAL' ] : provision ( * initial ) func ( * a , ** kw ) except Exception , new_exc : errback ( new_exc ) else : errback ( e ) kw [ 'errback' ] = errback_wrapper return func ( * a , ** kw ) return wrapper | Wraps the errback callback of the API functions automatically trying to re - provision if the app ID can not be found during the operation . If that s unsuccessful it will raise the UnknownAppID error . |
55,253 | def pop ( self ) : char = self . code [ self . index ] self . index += 1 return char | removes the current character then moves to the next one returning the current character |
55,254 | def characters ( self , numberOfCharacters ) : return self . code [ self . index : self . index + numberOfCharacters ] | Returns characters at index + number of characters |
55,255 | def next_content ( self , start , amount = 1 ) : while start < len ( self . code ) and self . code [ start ] in ( ' ' , '\t' , '\n' ) : start += 1 return self . code [ start : start + amount ] | Returns the next non - whitespace characters |
55,256 | def prev_content ( self , start , amount = 1 ) : while start > 0 and self . code [ start ] in ( ' ' , '\t' , '\n' ) : start -= 1 return self . code [ ( start or amount ) - amount : start ] | Returns the prev non - whitespace characters |
55,257 | def parse_mapping ( mapping_file : Optional [ str ] ) -> configparser . ConfigParser : LOGGER . debug ( 'Parsing mapping file. Command line: %s' , mapping_file ) def parse ( mapping_file ) : config = configparser . ConfigParser ( ) config . read_file ( mapping_file ) return config if mapping_file is not None : LOGGER . debug ( 'Parsing command line mapping file' ) return parse ( mapping_file ) xdg_config_dir = xdg . BaseDirectory . load_first_config ( 'pass-git-helper' ) if xdg_config_dir is None : raise RuntimeError ( 'No mapping configured so far at any XDG config location. ' 'Please create {config_file}' . format ( config_file = DEFAULT_CONFIG_FILE ) ) mapping_file = os . path . join ( xdg_config_dir , CONFIG_FILE_NAME ) LOGGER . debug ( 'Parsing mapping file %s' , mapping_file ) with open ( mapping_file , 'r' ) as file_handle : return parse ( file_handle ) | Parse the file containing the mappings from hosts to pass entries . |
55,258 | def parse_request ( ) -> Dict [ str , str ] : in_lines = sys . stdin . readlines ( ) LOGGER . debug ( 'Received request "%s"' , in_lines ) request = { } for line in in_lines : if not line . strip ( ) : continue parts = line . split ( '=' , 1 ) assert len ( parts ) == 2 request [ parts [ 0 ] . strip ( ) ] = parts [ 1 ] . strip ( ) return request | Parse the request of the git credential API from stdin . |
55,259 | def get_password ( request , mapping ) -> None : LOGGER . debug ( 'Received request "%s"' , request ) if 'host' not in request : LOGGER . error ( 'host= entry missing in request. ' 'Cannot query without a host' ) return host = request [ 'host' ] if 'path' in request : host = '/' . join ( [ host , request [ 'path' ] ] ) def skip ( line , skip ) : return line [ skip : ] LOGGER . debug ( 'Iterating mapping to match against host "%s"' , host ) for section in mapping . sections ( ) : if fnmatch . fnmatch ( host , section ) : LOGGER . debug ( 'Section "%s" matches requested host "%s"' , section , host ) pass_target = mapping . get ( section , 'target' ) . replace ( "${host}" , request [ 'host' ] ) password_extractor = SpecificLineExtractor ( 0 , 0 , option_suffix = '_password' ) password_extractor . configure ( mapping [ section ] ) username_extractor = _username_extractors [ mapping [ section ] . get ( 'username_extractor' , fallback = _line_extractor_name ) ] username_extractor . configure ( mapping [ section ] ) LOGGER . debug ( 'Requesting entry "%s" from pass' , pass_target ) output = subprocess . check_output ( [ 'pass' , 'show' , pass_target ] ) . decode ( 'utf-8' ) lines = output . splitlines ( ) password = password_extractor . get_value ( pass_target , lines ) username = username_extractor . get_value ( pass_target , lines ) if password : print ( 'password={password}' . format ( password = password ) ) if 'username' not in request and username : print ( 'username={username}' . format ( username = username ) ) return LOGGER . warning ( 'No mapping matched' ) sys . exit ( 1 ) | Resolve the given credential request in the provided mapping definition . |
55,260 | def main ( argv : Optional [ Sequence [ str ] ] = None ) -> None : args = parse_arguments ( argv = argv ) if args . logging : logging . basicConfig ( level = logging . DEBUG ) handle_skip ( ) action = args . action request = parse_request ( ) LOGGER . debug ( 'Received action %s with request:\n%s' , action , request ) try : mapping = parse_mapping ( args . mapping ) except Exception as error : LOGGER . critical ( 'Unable to parse mapping file' , exc_info = True ) print ( 'Unable to parse mapping file: {error}' . format ( error = error ) , file = sys . stderr ) sys . exit ( 1 ) if action == 'get' : get_password ( request , mapping ) else : LOGGER . info ( 'Action %s is currently not supported' , action ) sys . exit ( 1 ) | Start the pass - git - helper script . |
55,261 | def configure ( self , config ) : self . _prefix_length = config . getint ( 'skip{suffix}' . format ( suffix = self . _option_suffix ) , fallback = self . _prefix_length ) | Configure the amount of characters to skip . |
55,262 | def insert_metric_changes ( db , metrics , metric_mapping , commit ) : values = [ [ commit . sha , metric_mapping [ metric . name ] , metric . value ] for metric in metrics if metric . value != 0 ] db . executemany ( 'INSERT INTO metric_changes (sha, metric_id, value) VALUES (?, ?, ?)' , values , ) | Insert into the metric_changes tables . |
55,263 | def get_commits ( self , since_sha = None ) : assert self . tempdir cmd = [ 'git' , 'log' , '--first-parent' , '--reverse' , COMMIT_FORMAT ] if since_sha : commits = [ self . get_commit ( since_sha ) ] cmd . append ( '{}..HEAD' . format ( since_sha ) ) else : commits = [ ] cmd . append ( 'HEAD' ) output = cmd_output ( * cmd , cwd = self . tempdir ) for sha , date in chunk_iter ( output . splitlines ( ) , 2 ) : commits . append ( Commit ( sha , int ( date ) ) ) return commits | Returns a list of Commit objects . |
55,264 | def discover ( package , cls_match_func ) : matched_classes = set ( ) for _ , module_name , _ in pkgutil . walk_packages ( package . __path__ , prefix = package . __name__ + '.' , ) : module = __import__ ( module_name , fromlist = [ str ( '__trash' ) ] , level = 0 ) for _ , imported_class in inspect . getmembers ( module , inspect . isclass ) : if imported_class . __module__ != module . __name__ : continue if cls_match_func ( imported_class ) : matched_classes . add ( imported_class ) return matched_classes | Returns a set of classes in the directory matched by cls_match_func |
55,265 | def chunk_iter ( iterable , n ) : assert n > 0 iterable = iter ( iterable ) chunk = tuple ( itertools . islice ( iterable , n ) ) while chunk : yield chunk chunk = tuple ( itertools . islice ( iterable , n ) ) | Yields an iterator in chunks |
55,266 | def get_metric_parsers ( metric_packages = tuple ( ) , include_defaults = True ) : metric_parsers = set ( ) if include_defaults : import git_code_debt . metrics metric_parsers . update ( discover ( git_code_debt . metrics , is_metric_cls ) ) for metric_package in metric_packages : metric_parsers . update ( discover ( metric_package , is_metric_cls ) ) return metric_parsers | Gets all of the metric parsers . |
55,267 | def timeago_template ( locale , index , ago_in ) : try : LOCALE = __import__ ( 'timeago.locales.' + locale ) LOCALE = locale_module ( LOCALE , locale ) except : locale = setting . DEFAULT_LOCALE LOCALE = __import__ ( 'timeago.locales.' + locale ) LOCALE = locale_module ( LOCALE , locale ) if isinstance ( LOCALE , list ) : return LOCALE [ index ] [ ago_in ] else : return LOCALE ( index , ago_in ) | simple locale implement |
55,268 | def parse ( input ) : if isinstance ( input , datetime ) : return input if isinstance ( input , date ) : return date_to_datetime ( input ) if isinstance ( input , time ) : return time_to_datetime ( input ) if isinstance ( input , ( int , float ) ) : return timestamp_to_datetime ( input ) if isinstance ( input , ( str ) ) : return string_to_data_time ( input ) return None | parse input to datetime |
55,269 | def format ( date , now = None , locale = 'en' ) : if not isinstance ( date , timedelta ) : if now is None : now = datetime . now ( ) date = parser . parse ( date ) now = parser . parse ( now ) if date is None : raise ParameterUnvalid ( 'the parameter `date` should be datetime ' '/ timedelta, or datetime formated string.' ) if now is None : raise ParameterUnvalid ( 'the parameter `now` should be datetime, ' 'or datetime formated string.' ) date = now - date diff_seconds = int ( total_seconds ( date ) ) ago_in = 0 if diff_seconds < 0 : ago_in = 1 diff_seconds *= - 1 tmp = 0 i = 0 while i < SEC_ARRAY_LEN : tmp = SEC_ARRAY [ i ] if diff_seconds >= tmp : i += 1 diff_seconds /= tmp else : break diff_seconds = int ( diff_seconds ) i *= 2 if diff_seconds > ( i == 0 and 9 or 1 ) : i += 1 if locale is None : locale = DEFAULT_LOCALE tmp = timeago_template ( locale , i , ago_in ) if hasattr ( tmp , '__call__' ) : tmp = tmp ( diff_seconds ) return '%s' in tmp and tmp % diff_seconds or tmp | the entry method |
55,270 | def _is_parent_of ( parent , child ) : if child . is_partition : return child . partition_slave == parent if child . is_toplevel : return child . drive == parent and child != parent return False | Check whether the first device is the parent of the second device . |
55,271 | def prune_empty_node ( node , seen ) : if node . methods : return False if id ( node ) in seen : return True seen = seen | { id ( node ) } for branch in list ( node . branches ) : if prune_empty_node ( branch , seen ) : node . branches . remove ( branch ) else : return False return True | Recursively remove empty branches and return whether this makes the node itself empty . |
55,272 | async def browse ( self , device ) : device = self . _find_device ( device ) if not device . is_mounted : self . _log . error ( _ ( "not browsing {0}: not mounted" , device ) ) return False if not self . _browser : self . _log . error ( _ ( "not browsing {0}: no program" , device ) ) return False self . _log . debug ( _ ( 'opening {0} on {0.mount_paths[0]}' , device ) ) self . _browser ( device . mount_paths [ 0 ] ) self . _log . info ( _ ( 'opened {0} on {0.mount_paths[0]}' , device ) ) return True | Launch file manager on the mount path of the specified device . |
55,273 | async def terminal ( self , device ) : device = self . _find_device ( device ) if not device . is_mounted : self . _log . error ( _ ( "not opening terminal {0}: not mounted" , device ) ) return False if not self . _terminal : self . _log . error ( _ ( "not opening terminal {0}: no program" , device ) ) return False self . _log . debug ( _ ( 'opening {0} on {0.mount_paths[0]}' , device ) ) self . _terminal ( device . mount_paths [ 0 ] ) self . _log . info ( _ ( 'opened {0} on {0.mount_paths[0]}' , device ) ) return True | Launch terminal on the mount path of the specified device . |
55,274 | async def mount ( self , device ) : device = self . _find_device ( device ) if not self . is_handleable ( device ) or not device . is_filesystem : self . _log . warn ( _ ( 'not mounting {0}: unhandled device' , device ) ) return False if device . is_mounted : self . _log . info ( _ ( 'not mounting {0}: already mounted' , device ) ) return True options = match_config ( self . _config , device , 'options' , None ) kwargs = dict ( options = options ) self . _log . debug ( _ ( 'mounting {0} with {1}' , device , kwargs ) ) self . _check_device_before_mount ( device ) mount_path = await device . mount ( ** kwargs ) self . _log . info ( _ ( 'mounted {0} on {1}' , device , mount_path ) ) return True | Mount the device if not already mounted . |
55,275 | async def unmount ( self , device ) : device = self . _find_device ( device ) if not self . is_handleable ( device ) or not device . is_filesystem : self . _log . warn ( _ ( 'not unmounting {0}: unhandled device' , device ) ) return False if not device . is_mounted : self . _log . info ( _ ( 'not unmounting {0}: not mounted' , device ) ) return True self . _log . debug ( _ ( 'unmounting {0}' , device ) ) await device . unmount ( ) self . _log . info ( _ ( 'unmounted {0}' , device ) ) return True | Unmount a Device if mounted . |
55,276 | async def unlock ( self , device ) : device = self . _find_device ( device ) if not self . is_handleable ( device ) or not device . is_crypto : self . _log . warn ( _ ( 'not unlocking {0}: unhandled device' , device ) ) return False if device . is_unlocked : self . _log . info ( _ ( 'not unlocking {0}: already unlocked' , device ) ) return True if not self . _prompt : self . _log . error ( _ ( 'not unlocking {0}: no password prompt' , device ) ) return False unlocked = await self . _unlock_from_cache ( device ) if unlocked : return True unlocked = await self . _unlock_from_keyfile ( device ) if unlocked : return True options = dict ( allow_keyfile = self . udisks . keyfile_support , allow_cache = self . _cache is not None , cache_hint = self . _cache_hint ) password = await self . _prompt ( device , options ) cache_hint = getattr ( password , 'cache_hint' , self . _cache_hint ) password = getattr ( password , 'password' , password ) if password is None : self . _log . debug ( _ ( 'not unlocking {0}: cancelled by user' , device ) ) return False if isinstance ( password , bytes ) : self . _log . debug ( _ ( 'unlocking {0} using keyfile' , device ) ) await device . unlock_keyfile ( password ) else : self . _log . debug ( _ ( 'unlocking {0}' , device ) ) await device . unlock ( password ) self . _update_cache ( device , password , cache_hint ) self . _log . info ( _ ( 'unlocked {0}' , device ) ) return True | Unlock the device if not already unlocked . |
55,277 | async def lock ( self , device ) : device = self . _find_device ( device ) if not self . is_handleable ( device ) or not device . is_crypto : self . _log . warn ( _ ( 'not locking {0}: unhandled device' , device ) ) return False if not device . is_unlocked : self . _log . info ( _ ( 'not locking {0}: not unlocked' , device ) ) return True self . _log . debug ( _ ( 'locking {0}' , device ) ) await device . lock ( ) self . _log . info ( _ ( 'locked {0}' , device ) ) return True | Lock device if unlocked . |
55,278 | async def add ( self , device , recursive = None ) : device , created = await self . _find_device_losetup ( device ) if created and recursive is False : return device if device . is_filesystem : success = await self . mount ( device ) elif device . is_crypto : success = await self . unlock ( device ) if success and recursive : await self . udisks . _sync ( ) device = self . udisks [ device . object_path ] success = await self . add ( device . luks_cleartext_holder , recursive = True ) elif ( recursive and device . is_partition_table and self . is_handleable ( device ) ) : tasks = [ self . add ( dev , recursive = True ) for dev in self . get_all_handleable ( ) if dev . is_partition and dev . partition_slave == device ] results = await gather ( * tasks ) success = all ( results ) else : self . _log . info ( _ ( 'not adding {0}: unhandled device' , device ) ) return False return success | Mount or unlock the device depending on its type . |
55,279 | async def auto_add ( self , device , recursive = None , automount = True ) : device , created = await self . _find_device_losetup ( device ) if created and recursive is False : return device if device . is_luks_cleartext and self . udisks . version_info >= ( 2 , 7 , 0 ) : await sleep ( 1.5 ) success = True if not self . is_automount ( device , automount ) : pass elif device . is_filesystem : if not device . is_mounted : success = await self . mount ( device ) elif device . is_crypto : if self . _prompt and not device . is_unlocked : success = await self . unlock ( device ) if success and recursive : await self . udisks . _sync ( ) device = self . udisks [ device . object_path ] success = await self . auto_add ( device . luks_cleartext_holder , recursive = True ) elif recursive and device . is_partition_table : tasks = [ self . auto_add ( dev , recursive = True ) for dev in self . get_all_handleable ( ) if dev . is_partition and dev . partition_slave == device ] results = await gather ( * tasks ) success = all ( results ) else : self . _log . debug ( _ ( 'not adding {0}: unhandled device' , device ) ) return success | Automatically attempt to mount or unlock a device but be quiet if the device is not supported . |
55,280 | async def remove ( self , device , force = False , detach = False , eject = False , lock = False ) : device = self . _find_device ( device ) if device . is_filesystem : if device . is_mounted or not device . is_loop or detach is False : success = await self . unmount ( device ) elif device . is_crypto : if force and device . is_unlocked : await self . auto_remove ( device . luks_cleartext_holder , force = True ) success = await self . lock ( device ) elif ( force and ( device . is_partition_table or device . is_drive ) and self . is_handleable ( device ) ) : kw = dict ( force = True , detach = detach , eject = eject , lock = lock ) tasks = [ self . auto_remove ( child , ** kw ) for child in self . get_all_handleable ( ) if _is_parent_of ( device , child ) ] results = await gather ( * tasks ) success = all ( results ) else : self . _log . info ( _ ( 'not removing {0}: unhandled device' , device ) ) success = False if lock and device . is_luks_cleartext : device = device . luks_cleartext_slave if self . is_handleable ( device ) : success = await self . lock ( device ) if eject : success = await self . eject ( device ) if ( detach or detach is None ) and device . is_loop : success = await self . delete ( device , remove = False ) elif detach : success = await self . detach ( device ) return success | Unmount or lock the device depending on device type . |
55,281 | async def eject ( self , device , force = False ) : device = self . _find_device ( device ) if not self . is_handleable ( device ) : self . _log . warn ( _ ( 'not ejecting {0}: unhandled device' ) ) return False drive = device . drive if not ( drive . is_drive and drive . is_ejectable ) : self . _log . warn ( _ ( 'not ejecting {0}: drive not ejectable' , drive ) ) return False if force : await self . auto_remove ( device . root , force = True ) self . _log . debug ( _ ( 'ejecting {0}' , device ) ) await drive . eject ( ) self . _log . info ( _ ( 'ejected {0}' , device ) ) return True | Eject a device after unmounting all its mounted filesystems . |
55,282 | async def detach ( self , device , force = False ) : device = self . _find_device ( device ) if not self . is_handleable ( device ) : self . _log . warn ( _ ( 'not detaching {0}: unhandled device' , device ) ) return False drive = device . root if not drive . is_detachable : self . _log . warn ( _ ( 'not detaching {0}: drive not detachable' , drive ) ) return False if force : await self . auto_remove ( drive , force = True ) self . _log . debug ( _ ( 'detaching {0}' , device ) ) await drive . detach ( ) self . _log . info ( _ ( 'detached {0}' , device ) ) return True | Detach a device after unmounting all its mounted filesystems . |
55,283 | async def add_all ( self , recursive = False ) : tasks = [ self . auto_add ( device , recursive = recursive ) for device in self . get_all_handleable_leaves ( ) ] results = await gather ( * tasks ) success = all ( results ) return success | Add all handleable devices that available at start . |
55,284 | async def remove_all ( self , detach = False , eject = False , lock = False ) : kw = dict ( force = True , detach = detach , eject = eject , lock = lock ) tasks = [ self . auto_remove ( device , ** kw ) for device in self . get_all_handleable_roots ( ) ] results = await gather ( * tasks ) success = all ( results ) return success | Remove all filesystems handleable by udiskie . |
55,285 | async def losetup ( self , image , read_only = True , offset = None , size = None , no_part_scan = None ) : try : device = self . udisks . find ( image ) except FileNotFoundError : pass else : self . _log . info ( _ ( 'not setting up {0}: already up' , device ) ) return device if not os . path . isfile ( image ) : self . _log . error ( _ ( 'not setting up {0}: not a file' , image ) ) return None self . _log . debug ( _ ( 'setting up {0}' , image ) ) fd = os . open ( image , os . O_RDONLY ) device = await self . udisks . loop_setup ( fd , { 'offset' : offset , 'size' : size , 'read-only' : read_only , 'no-part-scan' : no_part_scan , } ) self . _log . info ( _ ( 'set up {0} as {1}' , image , device . device_presentation ) ) return device | Setup a loop device . |
55,286 | async def delete ( self , device , remove = True ) : device = self . _find_device ( device ) if not self . is_handleable ( device ) or not device . is_loop : self . _log . warn ( _ ( 'not deleting {0}: unhandled device' , device ) ) return False if remove : await self . auto_remove ( device , force = True ) self . _log . debug ( _ ( 'deleting {0}' , device ) ) await device . delete ( ) self . _log . info ( _ ( 'deleted {0}' , device ) ) return True | Detach the loop device . |
55,287 | def is_handleable ( self , device ) : ignored = self . _ignore_device ( device ) if ignored is None and device is not None : return self . is_handleable ( _get_parent ( device ) ) return not ignored | Check whether this device should be handled by udiskie . |
55,288 | def is_addable ( self , device , automount = True ) : if not self . is_automount ( device , automount ) : return False if device . is_filesystem : return not device . is_mounted if device . is_crypto : return self . _prompt and not device . is_unlocked if device . is_partition_table : return any ( self . is_addable ( dev ) for dev in self . get_all_handleable ( ) if dev . partition_slave == device ) return False | Check if device can be added with auto_add . |
55,289 | def is_removable ( self , device ) : if not self . is_handleable ( device ) : return False if device . is_filesystem : return device . is_mounted if device . is_crypto : return device . is_unlocked if device . is_partition_table or device . is_drive : return any ( self . is_removable ( dev ) for dev in self . get_all_handleable ( ) if _is_parent_of ( device , dev ) ) return False | Check if device can be removed with auto_remove . |
55,290 | def get_all_handleable ( self ) : nodes = self . get_device_tree ( ) return [ node . device for node in sorted ( nodes . values ( ) , key = DevNode . _sort_key ) if not node . ignored and node . device ] | Get list of all known handleable devices . |
55,291 | def get_all_handleable_roots ( self ) : nodes = self . get_device_tree ( ) return [ node . device for node in sorted ( nodes . values ( ) , key = DevNode . _sort_key ) if not node . ignored and node . device and ( node . root == '/' or nodes [ node . root ] . ignored ) ] | Get list of all handleable devices return only those that represent root nodes within the filtered device tree . |
55,292 | def get_all_handleable_leaves ( self ) : nodes = self . get_device_tree ( ) return [ node . device for node in sorted ( nodes . values ( ) , key = DevNode . _sort_key ) if not node . ignored and node . device and all ( child . ignored for child in node . children ) ] | Get list of all handleable devices return only those that represent leaf nodes within the filtered device tree . |
55,293 | def get_device_tree ( self ) : root = DevNode ( None , None , [ ] , None ) device_nodes = { dev . object_path : DevNode ( dev , dev . parent_object_path , [ ] , self . _ignore_device ( dev ) ) for dev in self . udisks } for node in device_nodes . values ( ) : device_nodes . get ( node . root , root ) . children . append ( node ) device_nodes [ '/' ] = root for node in device_nodes . values ( ) : node . children . sort ( key = DevNode . _sort_key ) def propagate_ignored ( node ) : for child in node . children : if child . ignored is None : child . ignored = node . ignored propagate_ignored ( child ) propagate_ignored ( root ) return device_nodes | Get a tree of all devices . |
55,294 | def detect ( self , root_device = '/' ) : root = Device ( None , [ ] , None , "" , [ ] ) device_nodes = dict ( map ( self . _device_node , self . _mounter . get_all_handleable ( ) ) ) for node in device_nodes . values ( ) : device_nodes . get ( node . root , root ) . branches . append ( node ) device_nodes [ '/' ] = root for node in device_nodes . values ( ) : node . branches . sort ( key = lambda node : node . label ) return device_nodes [ root_device ] | Detect all currently known devices . |
55,295 | def _get_device_methods ( self , device ) : if device . is_filesystem : if device . is_mounted : if self . _mounter . _browser : yield 'browse' if self . _mounter . _terminal : yield 'terminal' yield 'unmount' else : yield 'mount' elif device . is_crypto : if device . is_unlocked : yield 'lock' else : yield 'unlock' cache = self . _mounter . _cache if cache and device in cache : yield 'forget_password' if device . is_ejectable and device . has_media : yield 'eject' if device . is_detachable : yield 'detach' if device . is_loop : yield 'delete' | Return an iterable over all available methods the device has . |
55,296 | def _device_node ( self , device ) : label = device . ui_label dev_label = device . ui_device_label methods = [ Action ( method , device , self . _labels [ method ] . format ( label , dev_label ) , partial ( self . _actions [ method ] , device ) ) for method in self . _get_device_methods ( device ) ] root = device . parent_object_path return device . object_path , Device ( root , [ ] , device , dev_label , methods ) | Create an empty menu node for the specified device . |
55,297 | def samefile ( a : str , b : str ) -> bool : try : return os . path . samefile ( a , b ) except OSError : return os . path . normpath ( a ) == os . path . normpath ( b ) | Check if two pathes represent the same file . |
55,298 | def sameuuid ( a : str , b : str ) -> bool : return a and b and a . lower ( ) == b . lower ( ) | Compare two UUIDs . |
55,299 | def extend ( a : dict , b : dict ) -> dict : res = a . copy ( ) res . update ( b ) return res | Merge two dicts and return a new dict . Much like subclassing works . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.