idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
6,400
def chunks ( iterable , chunksize , cast = tuple ) : it = iter ( iterable ) while True : yield cast ( itertools . chain ( [ next ( it ) ] , itertools . islice ( it , chunksize - 1 ) ) )
Yields items from an iterator in iterable chunks .
6,401
def window ( iterable , size = 2 , cast = tuple ) : iterable = iter ( iterable ) d = deque ( itertools . islice ( iterable , size ) , size ) if cast : yield cast ( d ) for x in iterable : d . append ( x ) yield cast ( d ) else : yield d for x in iterable : d . append ( x ) yield d
Yields iterms by bunch of a given size but rolling only one item in and out at a time when iterating .
6,402
def at_index ( iterable , index ) : try : if index < 0 : return deque ( iterable , maxlen = abs ( index ) ) . popleft ( ) return next ( itertools . islice ( iterable , index , index + 1 ) ) except ( StopIteration , IndexError ) as e : raise_from ( IndexError ( 'Index "%d" out of range' % index ) , e )
Return the item at the index of this iterable or raises IndexError .
6,403
def iterslice ( iterable , start = 0 , stop = None , step = 1 ) : if step < 0 : raise ValueError ( "The step can not be negative: '%s' given" % step ) if not isinstance ( start , int ) : if not isinstance ( stop , int ) and stop : return stops_when ( starts_when ( iterable , start ) , stop ) return starts_when ( itertools . islice ( iterable , None , stop , step ) , start ) if not isinstance ( stop , int ) and stop : return stops_when ( itertools . islice ( iterable , start , None , step ) , stop ) return itertools . islice ( iterable , start , stop , step )
Like itertools . islice but accept int and callables .
6,404
def firsts ( iterable , items = 1 , default = None ) : try : items = int ( items ) except ( ValueError , TypeError ) : raise ValueError ( "items should be usable as an int but is currently " "'{}' of type '{}'" . format ( items , type ( items ) ) ) if items < 0 : raise ValueError ( ww . f ( "items is {items} but should " "be greater than 0. If you wish to get the last " "items, use the lasts() function." ) ) i = 0 for i , item in zip ( range ( items ) , iterable ) : yield item for x in range ( items - ( i + 1 ) ) : yield default
Lazily return the first x items from this iterable or default .
6,405
def lasts ( iterable , items = 1 , default = None ) : last_items = deque ( iterable , maxlen = items ) for _ in range ( items - len ( last_items ) ) : yield default for y in last_items : yield y
Lazily return the last x items from this iterable or default .
6,406
def is_legacy_server ( ) : with Session ( ) as session : ret = session . Kernel . hello ( ) bai_version = ret [ 'version' ] legacy = True if bai_version <= 'v4.20181215' else False return legacy
Determine execution mode .
6,407
def clear ( self ) : for node in self . dli ( ) : node . empty = True node . key = None node . value = None self . head = _dlnode ( ) self . head . next = self . head self . head . prev = self . head self . listSize = 1 self . table . clear ( ) self . hit_cnt = 0 self . miss_cnt = 0 self . remove_cnt = 0
claar all the cache and release memory
6,408
def pop ( self , key , default = None ) : node = self . get ( key , None ) if node == None : value = default else : value = node try : del self [ key ] except : return value return value
Delete the item
6,409
async def complete ( self , code : str , opts : dict = None ) -> Iterable [ str ] : opts = { } if opts is None else opts params = { } if self . owner_access_key : params [ 'owner_access_key' ] = self . owner_access_key rqst = Request ( self . session , 'POST' , '/kernel/{}/complete' . format ( self . kernel_id ) , params = params ) rqst . set_json ( { 'code' : code , 'options' : { 'row' : int ( opts . get ( 'row' , 0 ) ) , 'col' : int ( opts . get ( 'col' , 0 ) ) , 'line' : opts . get ( 'line' , '' ) , 'post' : opts . get ( 'post' , '' ) , } , } ) async with rqst . fetch ( ) as resp : return await resp . json ( )
Gets the auto - completion candidates from the given code string as if a user has pressed the tab key just after the code in IDEs .
6,410
async def get_info ( self ) : params = { } if self . owner_access_key : params [ 'owner_access_key' ] = self . owner_access_key rqst = Request ( self . session , 'GET' , '/kernel/{}' . format ( self . kernel_id ) , params = params ) async with rqst . fetch ( ) as resp : return await resp . json ( )
Retrieves a brief information about the compute session .
6,411
async def upload ( self , files : Sequence [ Union [ str , Path ] ] , basedir : Union [ str , Path ] = None , show_progress : bool = False ) : params = { } if self . owner_access_key : params [ 'owner_access_key' ] = self . owner_access_key base_path = ( Path . cwd ( ) if basedir is None else Path ( basedir ) . resolve ( ) ) files = [ Path ( file ) . resolve ( ) for file in files ] total_size = 0 for file_path in files : total_size += file_path . stat ( ) . st_size tqdm_obj = tqdm ( desc = 'Uploading files' , unit = 'bytes' , unit_scale = True , total = total_size , disable = not show_progress ) with tqdm_obj : attachments = [ ] for file_path in files : try : attachments . append ( AttachedFile ( str ( file_path . relative_to ( base_path ) ) , ProgressReportingReader ( str ( file_path ) , tqdm_instance = tqdm_obj ) , 'application/octet-stream' , ) ) except ValueError : msg = 'File "{0}" is outside of the base directory "{1}".' . format ( file_path , base_path ) raise ValueError ( msg ) from None rqst = Request ( self . session , 'POST' , '/kernel/{}/upload' . format ( self . kernel_id ) , params = params ) rqst . attach_files ( attachments ) async with rqst . fetch ( ) as resp : return resp
Uploads the given list of files to the compute session . You may refer them in the batch - mode execution or from the code executed in the server afterwards .
6,412
async def download ( self , files : Sequence [ Union [ str , Path ] ] , dest : Union [ str , Path ] = '.' , show_progress : bool = False ) : params = { } if self . owner_access_key : params [ 'owner_access_key' ] = self . owner_access_key rqst = Request ( self . session , 'GET' , '/kernel/{}/download' . format ( self . kernel_id ) , params = params ) rqst . set_json ( { 'files' : [ * map ( str , files ) ] , } ) async with rqst . fetch ( ) as resp : chunk_size = 1 * 1024 file_names = None tqdm_obj = tqdm ( desc = 'Downloading files' , unit = 'bytes' , unit_scale = True , total = resp . content . total_bytes , disable = not show_progress ) with tqdm_obj as pbar : fp = None while True : chunk = await resp . aread ( chunk_size ) if not chunk : break pbar . update ( len ( chunk ) ) for part in chunk . split ( b'\r\n' ) : if part . startswith ( b'--' ) : if fp : fp . close ( ) with tarfile . open ( fp . name ) as tarf : tarf . extractall ( path = dest ) file_names = tarf . getnames ( ) os . unlink ( fp . name ) fp = tempfile . NamedTemporaryFile ( suffix = '.tar' , delete = False ) elif part . startswith ( b'Content-' ) or part == b'' : continue else : fp . write ( part ) if fp : fp . close ( ) os . unlink ( fp . name ) result = { 'file_names' : file_names } return result
Downloads the given list of files from the compute session .
6,413
async def list_files ( self , path : Union [ str , Path ] = '.' ) : params = { } if self . owner_access_key : params [ 'owner_access_key' ] = self . owner_access_key rqst = Request ( self . session , 'GET' , '/kernel/{}/files' . format ( self . kernel_id ) , params = params ) rqst . set_json ( { 'path' : path , } ) async with rqst . fetch ( ) as resp : return await resp . json ( )
Gets the list of files in the given path inside the compute session container .
6,414
def _message ( self , mqttc , userdata , msg ) : try : inputCmds = [ 'query' , 'command' , 'result' , 'status' , 'shortPoll' , 'longPoll' , 'delete' ] parsed_msg = json . loads ( msg . payload . decode ( 'utf-8' ) ) if 'node' in parsed_msg : if parsed_msg [ 'node' ] != 'polyglot' : return del parsed_msg [ 'node' ] for key in parsed_msg : if key == 'config' : self . inConfig ( parsed_msg [ key ] ) elif key == 'connected' : self . polyglotConnected = parsed_msg [ key ] elif key == 'stop' : LOGGER . debug ( 'Received stop from Polyglot... Shutting Down.' ) self . stop ( ) elif key in inputCmds : self . input ( parsed_msg ) else : LOGGER . error ( 'Invalid command received in message from Polyglot: {}' . format ( key ) ) except ( ValueError ) as err : LOGGER . error ( 'MQTT Received Payload Error: {}' . format ( err ) , exc_info = True )
The callback for when a PUBLISH message is received from the server .
6,415
def _disconnect ( self , mqttc , userdata , rc ) : self . connected = False if rc != 0 : LOGGER . info ( "MQTT Unexpected disconnection. Trying reconnect." ) try : self . _mqttc . reconnect ( ) except Exception as ex : template = "An exception of type {0} occured. Arguments:\n{1!r}" message = template . format ( type ( ex ) . __name__ , ex . args ) LOGGER . error ( "MQTT Connection error: " + message ) else : LOGGER . info ( "MQTT Graceful disconnection." )
The callback for when a DISCONNECT occurs .
6,416
def _startMqtt ( self ) : LOGGER . info ( 'Connecting to MQTT... {}:{}' . format ( self . _server , self . _port ) ) try : self . _mqttc . connect_async ( '{}' . format ( self . _server ) , int ( self . _port ) , 10 ) self . _mqttc . loop_forever ( ) except Exception as ex : template = "An exception of type {0} occurred. Arguments:\n{1!r}" message = template . format ( type ( ex ) . __name__ , ex . args ) LOGGER . error ( "MQTT Connection error: {}" . format ( message ) , exc_info = True )
The client start method . Starts the thread for the MQTT Client and publishes the connected message .
6,417
def stop ( self ) : if self . connected : LOGGER . info ( 'Disconnecting from MQTT... {}:{}' . format ( self . _server , self . _port ) ) self . _mqttc . publish ( self . topicSelfConnection , json . dumps ( { 'node' : self . profileNum , 'connected' : False } ) , retain = True ) self . _mqttc . loop_stop ( ) self . _mqttc . disconnect ( ) try : for watcher in self . __stopObservers : watcher ( ) except KeyError as e : LOGGER . exception ( 'KeyError in gotConfig: {}' . format ( e ) , exc_info = True )
The client stop method . If the client is currently connected stop the thread and disconnect . Publish the disconnected message if clean shutdown .
6,418
def addNode ( self , node ) : LOGGER . info ( 'Adding node {}({})' . format ( node . name , node . address ) ) message = { 'addnode' : { 'nodes' : [ { 'address' : node . address , 'name' : node . name , 'node_def_id' : node . id , 'primary' : node . primary , 'drivers' : node . drivers , 'hint' : node . hint } ] } } self . send ( message )
Add a node to the NodeServer
6,419
def delNode ( self , address ) : LOGGER . info ( 'Removing node {}' . format ( address ) ) message = { 'removenode' : { 'address' : address } } self . send ( message )
Delete a node from the NodeServer
6,420
def getNode ( self , address ) : try : for node in self . config [ 'nodes' ] : if node [ 'address' ] == address : return node return False except KeyError : LOGGER . error ( 'Usually means we have not received the config yet.' , exc_info = True ) return False
Get Node by Address of existing nodes .
6,421
def inConfig ( self , config ) : self . config = config self . isyVersion = config [ 'isyVersion' ] try : for watcher in self . __configObservers : watcher ( config ) self . send_custom_config_docs ( ) except KeyError as e : LOGGER . error ( 'KeyError in gotConfig: {}' . format ( e ) , exc_info = True )
Save incoming config received from Polyglot to Interface . config and then do any functions that are waiting on the config to be received .
6,422
def delNode ( self , address ) : if address in self . nodes : del self . nodes [ address ] self . poly . delNode ( address )
Just send it along if requested should be able to delete the node even if it isn t in our config anywhere . Usually used for normalization .
6,423
async def query ( cls , query : str , variables : Optional [ Mapping [ str , Any ] ] = None , ) -> Any : gql_query = { 'query' : query , 'variables' : variables if variables else { } , } rqst = Request ( cls . session , 'POST' , '/admin/graphql' ) rqst . set_json ( gql_query ) async with rqst . fetch ( ) as resp : return await resp . json ( )
Sends the GraphQL query and returns the response .
6,424
def get_readable_time_string ( seconds ) : seconds = int ( seconds ) minutes = seconds // 60 seconds = seconds % 60 hours = minutes // 60 minutes = minutes % 60 days = hours // 24 hours = hours % 24 result = "" if days > 0 : result += "%d %s " % ( days , "Day" if ( days == 1 ) else "Days" ) if hours > 0 : result += "%d %s " % ( hours , "Hour" if ( hours == 1 ) else "Hours" ) if minutes > 0 : result += "%d %s " % ( minutes , "Minute" if ( minutes == 1 ) else "Minutes" ) if seconds > 0 : result += "%d %s " % ( seconds , "Second" if ( seconds == 1 ) else "Seconds" ) return result . strip ( )
Returns human readable string from number of seconds
6,425
def get_rate_limits ( response ) : periods = response . headers [ 'X-RateLimit-Period' ] if not periods : return [ ] rate_limits = [ ] periods = periods . split ( ',' ) limits = response . headers [ 'X-RateLimit-Limit' ] . split ( ',' ) remaining = response . headers [ 'X-RateLimit-Remaining' ] . split ( ',' ) reset = response . headers [ 'X-RateLimit-Reset' ] . split ( ',' ) for idx , period in enumerate ( periods ) : rate_limit = { } limit_period = get_readable_time_string ( period ) rate_limit [ "period" ] = limit_period rate_limit [ "period_seconds" ] = period rate_limit [ "request_limit" ] = limits [ idx ] rate_limit [ "requests_remaining" ] = remaining [ idx ] reset_datetime = get_datetime_from_timestamp ( reset [ idx ] ) rate_limit [ "reset" ] = reset_datetime right_now = datetime . now ( ) if ( reset_datetime is not None ) and ( right_now < reset_datetime ) : seconds_remaining = ( reset_datetime - right_now ) . seconds + 1 else : seconds_remaining = 0 rate_limit [ "reset_in_seconds" ] = seconds_remaining rate_limit [ "time_to_reset" ] = get_readable_time_string ( seconds_remaining ) rate_limits . append ( rate_limit ) return rate_limits
Returns a list of rate limit information from a given response s headers .
6,426
def swap ( self ) : return self . __class__ ( ( v , k ) for k , v in self . items ( ) )
Swap key and value
6,427
def fromkeys ( cls , iterable , value = None ) : if not callable ( value ) : return cls ( dict . fromkeys ( iterable , value ) ) return cls ( ( key , value ( key ) ) for key in iterable )
Create a new d from
6,428
def get_excel_workbook ( api_data , result_info_key , identifier_keys ) : cleaned_data = [ ] for item_data in api_data : result_info = item_data . pop ( result_info_key , { } ) cleaned_item_data = { } if 'meta' in item_data : meta = item_data . pop ( 'meta' ) cleaned_item_data [ 'meta' ] = meta for key in item_data : cleaned_item_data [ key ] = item_data [ key ] [ 'result' ] cleaned_item_data [ result_info_key ] = result_info cleaned_data . append ( cleaned_item_data ) data_list = copy . deepcopy ( cleaned_data ) workbook = openpyxl . Workbook ( ) write_worksheets ( workbook , data_list , result_info_key , identifier_keys ) return workbook
Generates an Excel workbook object given api_data returned by the Analytics API
6,429
def write_worksheets ( workbook , data_list , result_info_key , identifier_keys ) : worksheet_keys = get_worksheet_keys ( data_list [ 0 ] , result_info_key ) for key in worksheet_keys : title = key . split ( '/' ) [ 1 ] title = utilities . convert_snake_to_title_case ( title ) title = KEY_TO_WORKSHEET_MAP . get ( title , title ) if key == 'property/nod' : create_property_nod_worksheets ( workbook , data_list , result_info_key , identifier_keys ) else : worksheet = workbook . create_sheet ( title = title [ : 31 ] ) processed_data = process_data ( key , data_list , result_info_key , identifier_keys ) write_data ( worksheet , processed_data ) workbook . remove_sheet ( workbook . active )
Writes rest of the worksheets to workbook .
6,430
def get_keys ( data_list , leading_columns = LEADING_COLUMNS ) : all_keys = set ( ) . union ( * ( list ( d . keys ( ) ) for d in data_list ) ) leading_keys = [ ] for key in leading_columns : if key not in all_keys : continue leading_keys . append ( key ) all_keys . remove ( key ) return leading_keys + sorted ( all_keys )
Gets all possible keys from a list of dicts sorting by leading_columns first
6,431
def write_data ( worksheet , data ) : if not data : return if isinstance ( data , list ) : rows = data else : rows = [ data ] if isinstance ( rows [ 0 ] , dict ) : keys = get_keys ( rows ) worksheet . append ( [ utilities . convert_snake_to_title_case ( key ) for key in keys ] ) for row in rows : values = [ get_value_from_row ( row , key ) for key in keys ] worksheet . append ( values ) elif isinstance ( rows [ 0 ] , list ) : for row in rows : values = [ utilities . normalize_cell_value ( value ) for value in row ] worksheet . append ( values ) else : for row in rows : worksheet . append ( [ utilities . normalize_cell_value ( row ) ] )
Writes data into worksheet .
6,432
def process_data ( key , data_list , result_info_key , identifier_keys ) : master_data = [ ] for item_data in data_list : data = item_data [ key ] if data is None : current_item_data = { } else : if key == 'property/value' : current_item_data = data [ 'value' ] elif key == 'property/details' : top_level_keys = [ 'property' , 'assessment' ] current_item_data = flatten_top_level_keys ( data , top_level_keys ) elif key == 'property/school' : current_item_data = data [ 'school' ] school_list = [ ] for school_type_key in current_item_data : schools = current_item_data [ school_type_key ] for school in schools : school [ 'school_type' ] = school_type_key school [ 'school_address' ] = school [ 'address' ] school [ 'school_zipcode' ] = school [ 'zipcode' ] school_list . append ( school ) current_item_data = school_list elif key == 'property/value_forecast' : current_item_data = { } for month_key in data : current_item_data [ month_key ] = data [ month_key ] [ 'value' ] elif key in [ 'property/value_within_block' , 'property/rental_value_within_block' ] : current_item_data = flatten_top_level_keys ( data , [ 'housecanary_value_percentile_range' , 'housecanary_value_sqft_percentile_range' , 'client_value_percentile_range' , 'client_value_sqft_percentile_range' ] ) elif key in [ 'property/zip_details' , 'zip/details' ] : top_level_keys = [ 'multi_family' , 'single_family' ] current_item_data = flatten_top_level_keys ( data , top_level_keys ) else : current_item_data = data if isinstance ( current_item_data , dict ) : _set_identifier_fields ( current_item_data , item_data , result_info_key , identifier_keys ) master_data . append ( current_item_data ) else : for item in current_item_data : _set_identifier_fields ( item , item_data , result_info_key , identifier_keys ) master_data . extend ( current_item_data ) return master_data
Given a key as the endpoint name pulls the data for that endpoint out of the data_list for each address processes the data into a more excel - friendly format and returns that data .
6,433
def check_database_connected ( app_configs , ** kwargs ) : errors = [ ] try : connection . ensure_connection ( ) except OperationalError as e : msg = 'Could not connect to database: {!s}' . format ( e ) errors . append ( checks . Error ( msg , id = health . ERROR_CANNOT_CONNECT_DATABASE ) ) except ImproperlyConfigured as e : msg = 'Datbase misconfigured: "{!s}"' . format ( e ) errors . append ( checks . Error ( msg , id = health . ERROR_MISCONFIGURED_DATABASE ) ) else : if not connection . is_usable ( ) : errors . append ( checks . Error ( 'Database connection is not usable' , id = health . ERROR_UNUSABLE_DATABASE ) ) return errors
A Django check to see if connecting to the configured default database backend succeeds .
6,434
def check_migrations_applied ( app_configs , ** kwargs ) : from django . db . migrations . loader import MigrationLoader errors = [ ] try : loader = MigrationLoader ( connection , ignore_no_migrations = True ) except ( ImproperlyConfigured , ProgrammingError , OperationalError ) : msg = "Can't connect to database to check migrations" return [ checks . Info ( msg , id = health . INFO_CANT_CHECK_MIGRATIONS ) ] if app_configs : app_labels = [ app . label for app in app_configs ] else : app_labels = loader . migrated_apps for node , migration in loader . graph . nodes . items ( ) : if migration . app_label not in app_labels : continue if node not in loader . applied_migrations : msg = 'Unapplied migration {}' . format ( migration ) errors . append ( checks . Warning ( msg , id = health . WARNING_UNAPPLIED_MIGRATION ) ) return errors
A Django check to see if all migrations have been applied correctly .
6,435
def check_redis_connected ( app_configs , ** kwargs ) : import redis from django_redis import get_redis_connection errors = [ ] try : connection = get_redis_connection ( 'default' ) except redis . ConnectionError as e : msg = 'Could not connect to redis: {!s}' . format ( e ) errors . append ( checks . Error ( msg , id = health . ERROR_CANNOT_CONNECT_REDIS ) ) except NotImplementedError as e : msg = 'Redis client not available: {!s}' . format ( e ) errors . append ( checks . Error ( msg , id = health . ERROR_MISSING_REDIS_CLIENT ) ) except ImproperlyConfigured as e : msg = 'Redis misconfigured: "{!s}"' . format ( e ) errors . append ( checks . Error ( msg , id = health . ERROR_MISCONFIGURED_REDIS ) ) else : result = connection . ping ( ) if not result : msg = 'Redis ping failed' errors . append ( checks . Error ( msg , id = health . ERROR_REDIS_PING_FAILED ) ) return errors
A Django check to connect to the default redis connection using django_redis . get_redis_connection and see if Redis responds to a PING command .
6,436
def get_dev_alarms ( auth , url , devid = None , devip = None ) : if devip is not None : devid = get_dev_details ( devip , auth , url ) [ 'id' ] f_url = url + "/imcrs/fault/alarm?operatorName=admin&deviceId=" + str ( devid ) + "&desc=false" response = requests . get ( f_url , auth = auth , headers = HEADERS ) try : if response . status_code == 200 : dev_alarm = ( json . loads ( response . text ) ) if 'alarm' in dev_alarm : return dev_alarm [ 'alarm' ] else : return "Device has no alarms" except requests . exceptions . RequestException as error : return "Error:\n" + str ( error ) + ' get_dev_alarms: An Error has occured'
function takes the devId of a specific device and issues a RESTFUL call to get the current alarms for the target device .
6,437
def list ( ) : fields = [ ( 'Name' , 'name' ) , ( 'ID' , 'id' ) , ( 'Owner' , 'is_owner' ) , ( 'Permission' , 'permission' ) , ] with Session ( ) as session : try : resp = session . VFolder . list ( ) if not resp : print ( 'There is no virtual folders created yet.' ) return rows = ( tuple ( vf [ key ] for _ , key in fields ) for vf in resp ) hdrs = ( display_name for display_name , _ in fields ) print ( tabulate ( rows , hdrs ) ) except Exception as e : print_error ( e ) sys . exit ( 1 )
List virtual folders that belongs to the current user .
6,438
def list_hosts ( ) : with Session ( ) as session : try : resp = session . VFolder . list_hosts ( ) print ( f"Default vfolder host: {resp['default']}" ) print ( f"Usable hosts: {', '.join(resp['allowed'])}" ) except Exception as e : print_error ( e ) sys . exit ( 1 )
List the hosts of virtual folders that is accessible to the current user .
6,439
def create ( name , host ) : with Session ( ) as session : try : result = session . VFolder . create ( name , host ) print ( 'Virtual folder "{0}" is created.' . format ( result [ 'name' ] ) ) except Exception as e : print_error ( e ) sys . exit ( 1 )
Create a new virtual folder .
6,440
def delete ( name ) : with Session ( ) as session : try : session . VFolder ( name ) . delete ( ) print_done ( 'Deleted.' ) except Exception as e : print_error ( e ) sys . exit ( 1 )
Delete the given virtual folder . This operation is irreversible!
6,441
def rename ( old_name , new_name ) : with Session ( ) as session : try : session . VFolder ( old_name ) . rename ( new_name ) print_done ( 'Renamed.' ) except Exception as e : print_error ( e ) sys . exit ( 1 )
Rename the given virtual folder . This operation is irreversible! You cannot change the vfolders that are shared by other users and the new name must be unique among all your accessible vfolders including the shared ones .
6,442
def info ( name ) : with Session ( ) as session : try : result = session . VFolder ( name ) . info ( ) print ( 'Virtual folder "{0}" (ID: {1})' . format ( result [ 'name' ] , result [ 'id' ] ) ) print ( '- Owner:' , result [ 'is_owner' ] ) print ( '- Permission:' , result [ 'permission' ] ) print ( '- Number of files: {0}' . format ( result [ 'numFiles' ] ) ) except Exception as e : print_error ( e ) sys . exit ( 1 )
Show the information of the given virtual folder .
6,443
def download ( name , filenames ) : with Session ( ) as session : try : session . VFolder ( name ) . download ( filenames , show_progress = True ) print_done ( 'Done.' ) except Exception as e : print_error ( e ) sys . exit ( 1 )
Download a file from the virtual folder to the current working directory . The files with the same names will be overwirtten .
6,444
def mkdir ( name , path ) : with Session ( ) as session : try : session . VFolder ( name ) . mkdir ( path ) print_done ( 'Done.' ) except Exception as e : print_error ( e ) sys . exit ( 1 )
Create an empty directory in the virtual folder .
6,445
def rm ( name , filenames , recursive ) : with Session ( ) as session : try : if input ( "> Are you sure? (y/n): " ) . lower ( ) . strip ( ) [ : 1 ] == 'y' : session . VFolder ( name ) . delete_files ( filenames , recursive = recursive ) print_done ( 'Done.' ) except Exception as e : print_error ( e ) sys . exit ( 1 )
Delete files in a virtual folder . If one of the given paths is a directory and the recursive option is enabled all its content and the directory itself are recursively deleted .
6,446
def ls ( name , path ) : with Session ( ) as session : try : print_wait ( 'Retrieving list of files in "{}"...' . format ( path ) ) result = session . VFolder ( name ) . list_files ( path ) if 'error_msg' in result and result [ 'error_msg' ] : print_fail ( result [ 'error_msg' ] ) return files = json . loads ( result [ 'files' ] ) table = [ ] headers = [ 'file name' , 'size' , 'modified' , 'mode' ] for file in files : mdt = datetime . fromtimestamp ( file [ 'mtime' ] ) mtime = mdt . strftime ( '%b %d %Y %H:%M:%S' ) row = [ file [ 'filename' ] , file [ 'size' ] , mtime , file [ 'mode' ] ] table . append ( row ) print_done ( 'Retrived.' ) print ( tabulate ( table , headers = headers ) ) except Exception as e : print_error ( e )
List files in a path of a virtual folder .
6,447
def invite ( name , emails , perm ) : with Session ( ) as session : try : assert perm in [ 'rw' , 'ro' ] , 'Invalid permission: {}' . format ( perm ) result = session . VFolder ( name ) . invite ( perm , emails ) invited_ids = result . get ( 'invited_ids' , [ ] ) if len ( invited_ids ) > 0 : print ( 'Invitation sent to:' ) for invitee in invited_ids : print ( '\t- ' + invitee ) else : print ( 'No users found. Invitation was not sent.' ) except Exception as e : print_error ( e ) sys . exit ( 1 )
Invite other users to access the virtual folder .
6,448
def invitations ( ) : with Session ( ) as session : try : result = session . VFolder . invitations ( ) invitations = result . get ( 'invitations' , [ ] ) if len ( invitations ) < 1 : print ( 'No invitations.' ) return print ( 'List of invitations (inviter, vfolder id, permission):' ) for cnt , inv in enumerate ( invitations ) : if inv [ 'perm' ] == 'rw' : perm = 'read-write' elif inv [ 'perm' ] == 'ro' : perm = 'read-only' else : perm = inv [ 'perm' ] print ( '[{}] {}, {}, {}' . format ( cnt + 1 , inv [ 'inviter' ] , inv [ 'vfolder_id' ] , perm ) ) selection = input ( 'Choose invitation number to manage: ' ) if selection . isdigit ( ) : selection = int ( selection ) - 1 else : return if 0 <= selection < len ( invitations ) : while True : action = input ( 'Choose action. (a)ccept, (r)eject, (c)ancel: ' ) if action . lower ( ) == 'a' : config = get_config ( ) result = session . VFolder . accept_invitation ( invitations [ selection ] [ 'id' ] , config . access_key ) print ( result [ 'msg' ] ) break elif action . lower ( ) == 'r' : result = session . VFolder . delete_invitation ( invitations [ selection ] [ 'id' ] ) print ( result [ 'msg' ] ) break elif action . lower ( ) == 'c' : break except Exception as e : print_error ( e ) sys . exit ( 1 )
List and manage received invitations .
6,449
def init_check ( self , check , obj ) : self . logger . info ( 'Adding extension check %s' % check . __name__ ) check = functools . wraps ( check ) ( functools . partial ( check , obj ) ) self . check ( func = check )
Adds a given check callback with the provided object to the list of checks . Useful for built - ins but also advanced custom checks .
6,450
def init_app ( self , app ) : if self . version_path is None : self . version_path = os . path . dirname ( app . root_path ) for view in ( ( '/__version__' , 'version' , self . _version_view ) , ( '/__heartbeat__' , 'heartbeat' , self . _heartbeat_view ) , ( '/__lbheartbeat__' , 'lbheartbeat' , self . _lbheartbeat_view ) , ) : self . _blueprint . add_url_rule ( * view ) self . _blueprint . before_app_request ( self . _before_request ) self . _blueprint . after_app_request ( self . _after_request ) self . _blueprint . app_errorhandler ( HeartbeatFailure ) ( self . _heartbeat_exception_handler ) app . register_blueprint ( self . _blueprint ) got_request_exception . connect ( self . _got_request_exception , sender = app ) if not hasattr ( app , 'extensions' ) : app . extensions = { } app . extensions [ 'dockerflow' ] = self
Initializes the extension with the given app registers the built - in views with an own blueprint and hooks up our signal callbacks .
6,451
def _before_request ( self ) : g . _request_id = str ( uuid . uuid4 ( ) ) g . _start_timestamp = time . time ( )
The before_request callback .
6,452
def _after_request ( self , response ) : if not getattr ( g , '_has_exception' , False ) : extra = self . summary_extra ( ) self . summary_logger . info ( '' , extra = extra ) return response
The signal handler for the request_finished signal .
6,453
def _got_request_exception ( self , sender , exception , ** extra ) : extra = self . summary_extra ( ) extra [ 'errno' ] = 500 self . summary_logger . error ( str ( exception ) , extra = extra ) g . _has_exception = True
The signal handler for the got_request_exception signal .
6,454
def user_id ( self ) : if not has_flask_login : return if not hasattr ( current_app , 'login_manager' ) : return try : is_authenticated = current_user . is_authenticated except AttributeError : return if callable ( is_authenticated ) : is_authenticated = is_authenticated ( ) if not is_authenticated : return return current_user . get_id ( )
Return the ID of the current request s user
6,455
def summary_extra ( self ) : out = { 'errno' : 0 , 'agent' : request . headers . get ( 'User-Agent' , '' ) , 'lang' : request . headers . get ( 'Accept-Language' , '' ) , 'method' : request . method , 'path' : request . path , } user_id = self . user_id ( ) if user_id is None : user_id = '' out [ 'uid' ] = user_id request_id = g . get ( '_request_id' , None ) if request_id is not None : out [ 'rid' ] = request_id start_timestamp = g . get ( '_start_timestamp' , None ) if start_timestamp is not None : out [ 't' ] = int ( 1000 * ( time . time ( ) - start_timestamp ) ) return out
Build the extra data for the summary logger .
6,456
def _version_view ( self ) : version_json = self . _version_callback ( self . version_path ) if version_json is None : return 'version.json not found' , 404 else : return jsonify ( version_json )
View that returns the contents of version . json or a 404 .
6,457
def _heartbeat_view ( self ) : details = { } statuses = { } level = 0 for name , check in self . checks . items ( ) : detail = self . _heartbeat_check_detail ( check ) statuses [ name ] = detail [ 'status' ] level = max ( level , detail [ 'level' ] ) if detail [ 'level' ] > 0 : details [ name ] = detail payload = { 'status' : checks . level_to_text ( level ) , 'checks' : statuses , 'details' : details , } def render ( status_code ) : return make_response ( jsonify ( payload ) , status_code ) if level < checks . WARNING : status_code = 200 heartbeat_passed . send ( self , level = level ) return render ( status_code ) else : status_code = 500 heartbeat_failed . send ( self , level = level ) raise HeartbeatFailure ( response = render ( status_code ) )
Runs all the registered checks and returns a JSON response with either a status code of 200 or 500 depending on the results of the checks .
6,458
def drange ( start : Decimal , stop : Decimal , num : int ) : delta = stop - start step = delta / ( num - 1 ) yield from ( start + step * Decimal ( tick ) for tick in range ( 0 , num ) )
A simplified version of numpy . linspace with default options
6,459
def range_expr ( arg ) : key , value = arg . split ( '=' , maxsplit = 1 ) assert _rx_range_key . match ( key ) , 'The key must be a valid slug string.' try : if value . startswith ( 'case:' ) : return key , value [ 5 : ] . split ( ',' ) elif value . startswith ( 'linspace:' ) : start , stop , num = value [ 9 : ] . split ( ',' ) return key , tuple ( drange ( Decimal ( start ) , Decimal ( stop ) , int ( num ) ) ) elif value . startswith ( 'range:' ) : range_args = map ( int , value [ 6 : ] . split ( ',' ) ) return key , tuple ( range ( * range_args ) ) else : raise ArgumentTypeError ( 'Unrecognized range expression type' ) except ValueError as e : raise ArgumentTypeError ( str ( e ) )
Accepts a range expression which generates a range of values for a variable .
6,460
async def exec_loop ( stdout , stderr , kernel , mode , code , * , opts = None , vprint_done = print_done , is_multi = False ) : async with kernel . stream_execute ( code , mode = mode , opts = opts ) as stream : async for result in stream : if result . type == aiohttp . WSMsgType . TEXT : result = json . loads ( result . data ) else : continue for rec in result . get ( 'console' , [ ] ) : if rec [ 0 ] == 'stdout' : print ( rec [ 1 ] , end = '' , file = stdout ) elif rec [ 0 ] == 'stderr' : print ( rec [ 1 ] , end = '' , file = stderr ) else : print ( '----- output record (type: {0}) -----' . format ( rec [ 0 ] ) , file = stdout ) print ( rec [ 1 ] , file = stdout ) print ( '----- end of record -----' , file = stdout ) stdout . flush ( ) files = result . get ( 'files' , [ ] ) if files : print ( '--- generated files ---' , file = stdout ) for item in files : print ( '{0}: {1}' . format ( item [ 'name' ] , item [ 'url' ] ) , file = stdout ) print ( '--- end of generated files ---' , file = stdout ) if result [ 'status' ] == 'clean-finished' : exitCode = result . get ( 'exitCode' ) msg = 'Clean finished. (exit code = {0})' . format ( exitCode ) if is_multi : print ( msg , file = stderr ) vprint_done ( msg ) elif result [ 'status' ] == 'build-finished' : exitCode = result . get ( 'exitCode' ) msg = 'Build finished. (exit code = {0})' . format ( exitCode ) if is_multi : print ( msg , file = stderr ) vprint_done ( msg ) elif result [ 'status' ] == 'finished' : exitCode = result . get ( 'exitCode' ) msg = 'Execution finished. (exit code = {0})' . format ( exitCode ) if is_multi : print ( msg , file = stderr ) vprint_done ( msg ) break elif result [ 'status' ] == 'waiting-input' : if result [ 'options' ] . get ( 'is_password' , False ) : code = getpass . getpass ( ) else : code = input ( ) await stream . send_str ( code ) elif result [ 'status' ] == 'continued' : pass
Fully streamed asynchronous version of the execute loop .
6,461
def exec_loop_sync ( stdout , stderr , kernel , mode , code , * , opts = None , vprint_done = print_done ) : opts = opts if opts else { } run_id = None while True : result = kernel . execute ( run_id , code , mode = mode , opts = opts ) run_id = result [ 'runId' ] opts . clear ( ) for rec in result [ 'console' ] : if rec [ 0 ] == 'stdout' : print ( rec [ 1 ] , end = '' , file = stdout ) elif rec [ 0 ] == 'stderr' : print ( rec [ 1 ] , end = '' , file = stderr ) else : print ( '----- output record (type: {0}) -----' . format ( rec [ 0 ] ) , file = stdout ) print ( rec [ 1 ] , file = stdout ) print ( '----- end of record -----' , file = stdout ) stdout . flush ( ) files = result . get ( 'files' , [ ] ) if files : print ( '--- generated files ---' , file = stdout ) for item in files : print ( '{0}: {1}' . format ( item [ 'name' ] , item [ 'url' ] ) , file = stdout ) print ( '--- end of generated files ---' , file = stdout ) if result [ 'status' ] == 'clean-finished' : exitCode = result . get ( 'exitCode' ) vprint_done ( 'Clean finished. (exit code = {0}' . format ( exitCode ) , file = stdout ) mode = 'continue' code = '' elif result [ 'status' ] == 'build-finished' : exitCode = result . get ( 'exitCode' ) vprint_done ( 'Build finished. (exit code = {0})' . format ( exitCode ) , file = stdout ) mode = 'continue' code = '' elif result [ 'status' ] == 'finished' : exitCode = result . get ( 'exitCode' ) vprint_done ( 'Execution finished. (exit code = {0})' . format ( exitCode ) , file = stdout ) break elif result [ 'status' ] == 'waiting-input' : mode = 'input' if result [ 'options' ] . get ( 'is_password' , False ) : code = getpass . getpass ( ) else : code = input ( ) elif result [ 'status' ] == 'continued' : mode = 'continue' code = ''
Old synchronous polling version of the execute loop .
6,462
def start ( lang , session_id , owner , env , mount , tag , resources , cluster_size ) : if session_id is None : session_id = token_hex ( 5 ) else : session_id = session_id envs = _prepare_env_arg ( env ) resources = _prepare_resource_arg ( resources ) mount = _prepare_mount_arg ( mount ) with Session ( ) as session : try : kernel = session . Kernel . get_or_create ( lang , client_token = session_id , cluster_size = cluster_size , mounts = mount , envs = envs , resources = resources , owner_access_key = owner , tag = tag ) except Exception as e : print_error ( e ) sys . exit ( 1 ) else : if kernel . created : print_info ( 'Session ID {0} is created and ready.' . format ( session_id ) ) else : print_info ( 'Session ID {0} is already running and ready.' . format ( session_id ) ) if kernel . service_ports : print_info ( 'This session provides the following app services: ' + ', ' . join ( sport [ 'name' ] for sport in kernel . service_ports ) )
Prepare and start a single compute session without executing codes . You may use the created session to execute codes using the run command or connect to an application service provided by the session using the app command .
6,463
def terminate ( sess_id_or_alias , owner , stats ) : print_wait ( 'Terminating the session(s)...' ) with Session ( ) as session : has_failure = False for sess in sess_id_or_alias : try : kernel = session . Kernel ( sess , owner ) ret = kernel . destroy ( ) except BackendAPIError as e : print_error ( e ) if e . status == 404 : print_info ( 'If you are an admin, use "-o" / "--owner" option ' 'to terminate other user\'s session.' ) has_failure = True except Exception as e : print_error ( e ) has_failure = True if has_failure : sys . exit ( 1 ) else : print_done ( 'Done.' ) if stats : stats = ret . get ( 'stats' , None ) if ret else None if stats : print ( _format_stats ( stats ) ) else : print ( 'Statistics is not available.' )
Terminate the given session .
6,464
def add_devs_custom_views ( custom_view_name , dev_list , auth , url ) : view_id = get_custom_views ( auth , url , name = custom_view_name ) [ 0 ] [ 'symbolId' ] add_devs_custom_views_url = '/imcrs/plat/res/view/custom/' + str ( view_id ) payload = + json . dumps ( dev_list ) + f_url = url + add_devs_custom_views_url r = requests . put ( f_url , data = payload , auth = auth , headers = HEADERS ) try : if r . status_code == 204 : print ( 'View ' + custom_view_name + ' : Devices Successfully Added' ) return r . status_code except requests . exceptions . RequestException as e : return "Error:\n" + str ( e ) + ' get_custom_views: An Error has occured'
function takes a list of devIDs from devices discovered in the HPE IMC platform and and issues a RESTFUL call to add the list of devices to a specific custom views from HPE IMC .
6,465
def status ( ) : with Session ( ) as session : resp = session . Manager . status ( ) print ( tabulate ( [ ( 'Status' , 'Active Sessions' ) , ( resp [ 'status' ] , resp [ 'active_sessions' ] ) ] , headers = 'firstrow' ) )
Show the manager s current status .
6,466
def freeze ( wait , force_kill ) : if wait and force_kill : print ( 'You cannot use both --wait and --force-kill options ' 'at the same time.' , file = sys . stderr ) return with Session ( ) as session : if wait : while True : resp = session . Manager . status ( ) active_sessions_num = resp [ 'active_sessions' ] if active_sessions_num == 0 : break print_wait ( 'Waiting for all sessions terminated... ({0} left)' . format ( active_sessions_num ) ) time . sleep ( 3 ) print_done ( 'All sessions are terminated.' ) if force_kill : print_wait ( 'Killing all sessions...' ) session . Manager . freeze ( force_kill = force_kill ) if force_kill : print_done ( 'All sessions are killed.' ) print ( 'Manager is successfully frozen.' )
Freeze manager .
6,467
def get_dev_vlans ( auth , url , devid = None , devip = None ) : if devip is not None : devid = get_dev_details ( devip , auth , url ) [ 'id' ] get_dev_vlans_url = "/imcrs/vlan?devId=" + str ( devid ) + "&start=0&size=5000&total=false" f_url = url + get_dev_vlans_url response = requests . get ( f_url , auth = auth , headers = HEADERS ) try : if response . status_code == 200 : dev_vlans = ( json . loads ( response . text ) ) return dev_vlans [ 'vlan' ] elif response . status_code == 409 : return { 'vlan' : 'no vlans' } except requests . exceptions . RequestException as error : return "Error:\n" + str ( error ) + ' get_dev_vlans: An Error has occured'
Function takes input of devID to issue RESTUL call to HP IMC
6,468
def get_trunk_interfaces ( auth , url , devid = None , devip = None ) : if devip is not None : devid = get_dev_details ( devip , auth , url ) [ 'id' ] get_trunk_interfaces_url = "/imcrs/vlan/trunk?devId=" + str ( devid ) + "&start=1&size=5000&total=false" f_url = url + get_trunk_interfaces_url response = requests . get ( f_url , auth = auth , headers = HEADERS ) try : if response . status_code == 200 : dev_trunk_interfaces = ( json . loads ( response . text ) ) if len ( dev_trunk_interfaces ) == 2 : if isinstance ( dev_trunk_interfaces [ 'trunkIf' ] , list ) : return dev_trunk_interfaces [ 'trunkIf' ] elif isinstance ( dev_trunk_interfaces [ 'trunkIf' ] , dict ) : return [ dev_trunk_interfaces [ 'trunkIf' ] ] else : dev_trunk_interfaces [ 'trunkIf' ] = [ "No trunk inteface" ] return dev_trunk_interfaces [ 'trunkIf' ] except requests . exceptions . RequestException as error : return "Error:\n" + str ( error ) + ' get_trunk_interfaces: An Error has occured'
Function takes devId as input to RESTFULL call to HP IMC platform
6,469
def get_device_access_interfaces ( auth , url , devid = None , devip = None ) : if devip is not None : devid = get_dev_details ( devip , auth , url ) [ 'id' ] get_access_interface_vlan_url = "/imcrs/vlan/access?devId=" + str ( devid ) + "&start=1&size=500&total=false" f_url = url + get_access_interface_vlan_url response = requests . get ( f_url , auth = auth , headers = HEADERS ) try : if response . status_code == 200 : dev_access_interfaces = ( json . loads ( response . text ) ) if type ( dev_access_interfaces [ 'accessIf' ] ) is dict : return [ dev_access_interfaces [ 'accessIf' ] ] if len ( dev_access_interfaces ) == 2 : return dev_access_interfaces [ 'accessIf' ] else : dev_access_interfaces [ 'accessIf' ] = [ "No access inteface" ] return dev_access_interfaces [ 'accessIf' ] except requests . exceptions . RequestException as error : return "Error:\n" + str ( error ) + " get_device_access_interfaces: An Error has occured"
Function takes devid pr devip as input to RESTFUL call to HP IMC platform
6,470
def get_device_hybrid_interfaces ( auth , url , devid = None , devip = None ) : if devip is not None : devid = get_dev_details ( devip , auth , url ) [ 'id' ] get_hybrid_interface_vlan_url = "/imcrs/vlan/hybrid?devId=" + str ( devid ) + "&start=1&size=500&total=false" f_url = url + get_hybrid_interface_vlan_url response = requests . get ( f_url , auth = auth , headers = HEADERS ) try : if response . status_code == 200 : dev_hybrid_interfaces = ( json . loads ( response . text ) ) if len ( dev_hybrid_interfaces ) == 2 : dev_hybrid = dev_hybrid_interfaces [ 'hybridIf' ] if isinstance ( dev_hybrid , dict ) : dev_hybrid = [ dev_hybrid ] return dev_hybrid else : dev_hybrid_interfaces [ 'hybridIf' ] = [ "No hybrid inteface" ] return dev_hybrid_interfaces [ 'hybridIf' ] except requests . exceptions . RequestException as error : return "Error:\n" + str ( error ) + " get_device_hybrid_interfaces: An Error has occured"
Function takes devId as input to RESTFUL call to HP IMC platform
6,471
def _get_results_from_api ( identifiers , endpoints , api_key , api_secret ) : if api_key is not None and api_secret is not None : client = housecanary . ApiClient ( api_key , api_secret ) else : client = housecanary . ApiClient ( ) wrapper = getattr ( client , endpoints [ 0 ] . split ( '/' ) [ 0 ] ) if len ( endpoints ) > 1 : return wrapper . component_mget ( identifiers , endpoints ) else : return wrapper . fetch_identifier_component ( endpoints [ 0 ] , identifiers )
Use the HouseCanary API Python Client to access the API
6,472
def images ( ) : fields = [ ( 'Name' , 'name' ) , ( 'Registry' , 'registry' ) , ( 'Tag' , 'tag' ) , ( 'Digest' , 'digest' ) , ( 'Size' , 'size_bytes' ) , ( 'Aliases' , 'aliases' ) , ] with Session ( ) as session : try : items = session . Image . list ( fields = ( item [ 1 ] for item in fields ) ) except Exception as e : print_error ( e ) sys . exit ( 1 ) if len ( items ) == 0 : print ( 'There are no registered images.' ) return print ( tabulate ( ( item . values ( ) for item in items ) , headers = ( item [ 0 ] for item in fields ) , floatfmt = ',.0f' ) )
Show the list of registered images in this cluster .
6,473
def rescan_images ( registry ) : with Session ( ) as session : try : result = session . Image . rescanImages ( registry ) except Exception as e : print_error ( e ) sys . exit ( 1 ) if result [ 'ok' ] : print ( "kernel image metadata updated" ) else : print ( "rescanning failed: {0}" . format ( result [ 'msg' ] ) )
Update the kernel image metadata from all configured docker registries .
6,474
def alias_image ( alias , target ) : with Session ( ) as session : try : result = session . Image . aliasImage ( alias , target ) except Exception as e : print_error ( e ) sys . exit ( 1 ) if result [ 'ok' ] : print ( "alias {0} created for target {1}" . format ( alias , target ) ) else : print ( result [ 'msg' ] )
Add an image alias .
6,475
def dealias_image ( alias ) : with Session ( ) as session : try : result = session . Image . dealiasImage ( alias ) except Exception as e : print_error ( e ) sys . exit ( 1 ) if result [ 'ok' ] : print ( "alias {0} removed." . format ( alias ) ) else : print ( result [ 'msg' ] )
Remove an image alias .
6,476
def run_alias ( ) : mode = Path ( sys . argv [ 0 ] ) . stem help = True if len ( sys . argv ) <= 1 else False if mode == 'lcc' : sys . argv . insert ( 1 , 'c' ) elif mode == 'lpython' : sys . argv . insert ( 1 , 'python' ) sys . argv . insert ( 1 , 'run' ) if help : sys . argv . append ( '--help' ) main . main ( prog_name = 'backend.ai' )
Quick aliases for run command .
6,477
def rate_limits ( self ) : if not self . _rate_limits : self . _rate_limits = utilities . get_rate_limits ( self . _response ) return self . _rate_limits
Returns list of rate limit information from the response
6,478
def create_custom_views ( auth , url , name = None , upperview = None ) : create_custom_views_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&desc=false' '&total=false' f_url = url + create_custom_views_url if upperview is None : payload = + name + else : parentviewid = get_custom_views ( auth , url , upperview ) [ 0 ] [ 'symbolId' ] payload = + name + + str ( parentviewid ) + response = requests . post ( f_url , data = payload , auth = auth , headers = HEADERS ) try : if response . status_code == 201 : print ( 'View ' + name + ' created successfully' ) return response . status_code elif response . status_code == 409 : print ( "View " + name + " already exists" ) return response . status_code else : return response . status_code except requests . exceptions . RequestException as error : return "Error:\n" + str ( error ) + ' get_custom_views: An Error has occured'
function takes no input and issues a RESTFUL call to get a list of custom views from HPE IMC . Optional Name input will return only the specified view .
6,479
def add_devs_custom_views ( custom_view_name , dev_list , auth , url ) : view_id = get_custom_views ( auth , url , name = custom_view_name ) if view_id is None : print ( "View " + custom_view_name + " doesn't exist" ) return view_id view_id = get_custom_views ( auth , url , name = custom_view_name ) [ 0 ] [ 'symbolId' ] add_devs_custom_views_url = '/imcrs/plat/res/view/custom/' + str ( view_id ) device_list = [ ] for dev in dev_list : new_dev = { "id" : dev } device_list . append ( new_dev ) payload = + json . dumps ( device_list ) + print ( payload ) f_url = url + add_devs_custom_views_url response = requests . put ( f_url , data = payload , auth = auth , headers = HEADERS ) try : if response . status_code == 204 : print ( 'View ' + custom_view_name + ' : Devices Successfully Added' ) return response . status_code except requests . exceptions . RequestException as error : return "Error:\n" + str ( error ) + ' get_custom_views: An Error has occured'
function takes a list of devIDs from devices discovered in the HPE IMC platform and issues a RESTFUL call to add the list of devices to a specific custom views from HPE IMC .
6,480
def _set_format_oauth ( self ) : format_oauth = urllib . parse . urlencode ( { 'client_id' : self . _client_id , 'client_secret' : self . _client_secret , 'scope' : self . _url_request , 'grant_type' : self . _grant_type } ) . encode ( "utf-8" ) return format_oauth
Format and encode dict for make authentication on microsoft servers .
6,481
def _make_request ( self , params , translation_url , headers ) : resp = requests . get ( translation_url , params = params , headers = headers ) resp . encoding = "UTF-8-sig" result = resp . json ( ) return result
This is the final step where the request is made the data is retrieved and returned .
6,482
async def status ( cls ) : rqst = Request ( cls . session , 'GET' , '/manager/status' ) rqst . set_json ( { 'status' : 'running' , } ) async with rqst . fetch ( ) as resp : return await resp . json ( )
Returns the current status of the configured API server .
6,483
def upload ( sess_id_or_alias , files ) : if len ( files ) < 1 : return with Session ( ) as session : try : print_wait ( 'Uploading files...' ) kernel = session . Kernel ( sess_id_or_alias ) kernel . upload ( files , show_progress = True ) print_done ( 'Uploaded.' ) except Exception as e : print_error ( e ) sys . exit ( 1 )
Upload files to user s home folder .
6,484
def download ( sess_id_or_alias , files , dest ) : if len ( files ) < 1 : return with Session ( ) as session : try : print_wait ( 'Downloading file(s) from {}...' . format ( sess_id_or_alias ) ) kernel = session . Kernel ( sess_id_or_alias ) kernel . download ( files , dest , show_progress = True ) print_done ( 'Downloaded to {}.' . format ( dest . resolve ( ) ) ) except Exception as e : print_error ( e ) sys . exit ( 1 )
Download files from a running container .
6,485
def ls ( sess_id_or_alias , path ) : with Session ( ) as session : try : print_wait ( 'Retrieving list of files in "{}"...' . format ( path ) ) kernel = session . Kernel ( sess_id_or_alias ) result = kernel . list_files ( path ) if 'errors' in result and result [ 'errors' ] : print_fail ( result [ 'errors' ] ) sys . exit ( 1 ) files = json . loads ( result [ 'files' ] ) table = [ ] headers = [ 'file name' , 'size' , 'modified' , 'mode' ] for file in files : mdt = datetime . fromtimestamp ( file [ 'mtime' ] ) mtime = mdt . strftime ( '%b %d %Y %H:%M:%S' ) row = [ file [ 'filename' ] , file [ 'size' ] , mtime , file [ 'mode' ] ] table . append ( row ) print_done ( 'Retrived.' ) print ( 'Path in container:' , result [ 'abspath' ] , end = '' ) print ( tabulate ( table , headers = headers ) ) except Exception as e : print_error ( e ) sys . exit ( 1 )
List files in a path of a running container .
6,486
def set_operator_password ( operator , password , auth , url ) : if operator is None : operator = input ( ) oper_id = '' authtype = None plat_oper_list = get_plat_operator ( auth , url ) for i in plat_oper_list : if i [ 'name' ] == operator : oper_id = i [ 'id' ] authtype = i [ 'authType' ] if oper_id == '' : return "User does not exist" change_pw_url = "/imcrs/plat/operator/" f_url = url + change_pw_url + oper_id if password is None : password = input ( ) payload = json . dumps ( { 'password' : password , 'authType' : authtype } ) response = requests . put ( f_url , data = payload , auth = auth , headers = HEADERS ) try : if response . status_code == 204 : return response . status_code except requests . exceptions . RequestException as error : return "Error:\n" + str ( error ) + ' set_operator_password: An Error has occured'
Function to set the password of an existing operator
6,487
def get_plat_operator ( auth , url ) : f_url = url + '/imcrs/plat/operator?start=0&size=1000&orderBy=id&desc=false&total=false' try : response = requests . get ( f_url , auth = auth , headers = HEADERS ) plat_oper_list = json . loads ( response . text ) [ 'operator' ] if isinstance ( plat_oper_list , dict ) : oper_list = [ plat_oper_list ] return oper_list return plat_oper_list except requests . exceptions . RequestException as error : print ( "Error:\n" + str ( error ) + ' get_plat_operator: An Error has occured' ) return "Error:\n" + str ( error ) + ' get_plat_operator: An Error has occured'
Funtion takes no inputs and returns a list of dictionaties of all of the operators currently configured on the HPE IMC system
6,488
def execute_request ( self , url , http_method , query_params , post_data ) : response = requests . request ( http_method , url , params = query_params , auth = self . _auth , json = post_data , headers = { 'User-Agent' : USER_AGENT } ) if isinstance ( self . _output_generator , str ) and self . _output_generator . lower ( ) == "json" : return response . json ( ) elif self . _output_generator is not None : return self . _output_generator . process_response ( response ) else : return response
Makes a request to the specified url endpoint with the specified http method params and post data .
6,489
def post ( self , url , post_data , query_params = None ) : if query_params is None : query_params = { } return self . execute_request ( url , "POST" , query_params , post_data )
Makes a POST request to the specified url endpoint .
6,490
def get_ip_scope ( auth , url , scopeid = None , ) : if scopeid is None : get_ip_scope_url = "/imcrs/res/access/assignedIpScope" else : get_ip_scope_url = "/imcrs/res/access/assignedIpScope/ip?ipScopeId=" + str ( scopeid ) f_url = url + get_ip_scope_url response = requests . get ( f_url , auth = auth , headers = HEADERS ) try : if response . status_code == 200 : ipscopelist = ( json . loads ( response . text ) ) [ 'assignedIpScope' ] if isinstance ( ipscopelist , list ) : return ipscopelist elif isinstance ( ipscopelist , dict ) : return [ ipscopelist ] except requests . exceptions . RequestException as error : return "Error:\n" + str ( error ) + " get_ip_scope: An Error has occured"
function requires no inputs and returns all IP address scopes currently configured on the HPE IMC server . If the optional scopeid parameter is included this will automatically return only the desired scope id .
6,491
def check_database_connected ( db ) : from sqlalchemy . exc import DBAPIError , SQLAlchemyError errors = [ ] try : with db . engine . connect ( ) as connection : connection . execute ( 'SELECT 1;' ) except DBAPIError as e : msg = 'DB-API error: {!s}' . format ( e ) errors . append ( Error ( msg , id = health . ERROR_DB_API_EXCEPTION ) ) except SQLAlchemyError as e : msg = 'Database misconfigured: "{!s}"' . format ( e ) errors . append ( Error ( msg , id = health . ERROR_SQLALCHEMY_EXCEPTION ) ) return errors
A built - in check to see if connecting to the configured default database backend succeeds .
6,492
def check_migrations_applied ( migrate ) : errors = [ ] from alembic . migration import MigrationContext from alembic . script import ScriptDirectory from sqlalchemy . exc import DBAPIError , SQLAlchemyError config = migrate . get_config ( directory = migrate . directory ) script = ScriptDirectory . from_config ( config ) try : with migrate . db . engine . connect ( ) as connection : context = MigrationContext . configure ( connection ) db_heads = set ( context . get_current_heads ( ) ) script_heads = set ( script . get_heads ( ) ) except ( DBAPIError , SQLAlchemyError ) as e : msg = "Can't connect to database to check migrations: {!s}" . format ( e ) return [ Info ( msg , id = health . INFO_CANT_CHECK_MIGRATIONS ) ] if db_heads != script_heads : msg = "Unapplied migrations found: {}" . format ( ', ' . join ( script_heads ) ) errors . append ( Warning ( msg , id = health . WARNING_UNAPPLIED_MIGRATION ) ) return errors
A built - in check to see if all migrations have been applied correctly .
6,493
def check_redis_connected ( client ) : import redis errors = [ ] try : result = client . ping ( ) except redis . ConnectionError as e : msg = 'Could not connect to redis: {!s}' . format ( e ) errors . append ( Error ( msg , id = health . ERROR_CANNOT_CONNECT_REDIS ) ) except redis . RedisError as e : errors . append ( Error ( 'Redis error: "{!s}"' . format ( e ) , id = health . ERROR_REDIS_EXCEPTION ) ) else : if not result : errors . append ( Error ( 'Redis ping failed' , id = health . ERROR_REDIS_PING_FAILED ) ) return errors
A built - in check to connect to Redis using the given client and see if it responds to the PING command .
6,494
def vfolders ( access_key ) : fields = [ ( 'Name' , 'name' ) , ( 'Created At' , 'created_at' ) , ( 'Last Used' , 'last_used' ) , ( 'Max Files' , 'max_files' ) , ( 'Max Size' , 'max_size' ) , ] if access_key is None : q = 'query { vfolders { $fields } }' else : q = 'query($ak:String) { vfolders(access_key:$ak) { $fields } }' q = q . replace ( '$fields' , ' ' . join ( item [ 1 ] for item in fields ) ) v = { 'ak' : access_key } with Session ( ) as session : try : resp = session . Admin . query ( q , v ) except Exception as e : print_error ( e ) sys . exit ( 1 ) print ( tabulate ( ( item . values ( ) for item in resp [ 'vfolders' ] ) , headers = ( item [ 0 ] for item in fields ) ) )
List and manage virtual folders .
6,495
def config ( ) : config = get_config ( ) print ( 'Client version: {0}' . format ( click . style ( __version__ , bold = True ) ) ) print ( 'API endpoint: {0}' . format ( click . style ( str ( config . endpoint ) , bold = True ) ) ) print ( 'API version: {0}' . format ( click . style ( config . version , bold = True ) ) ) print ( 'Access key: "{0}"' . format ( click . style ( config . access_key , bold = True ) ) ) masked_skey = config . secret_key [ : 6 ] + ( '*' * 24 ) + config . secret_key [ - 10 : ] print ( 'Secret key: "{0}"' . format ( click . style ( masked_skey , bold = True ) ) ) print ( 'Signature hash type: {0}' . format ( click . style ( config . hash_type , bold = True ) ) ) print ( 'Skip SSL certificate validation? {0}' . format ( click . style ( str ( config . skip_sslcert_validation ) , bold = True ) ) )
Shows the current configuration .
6,496
async def create ( cls , name : str , default_for_unspecified : int , total_resource_slots : int , max_concurrent_sessions : int , max_containers_per_session : int , max_vfolder_count : int , max_vfolder_size : int , idle_timeout : int , allowed_vfolder_hosts : Sequence [ str ] , fields : Iterable [ str ] = None ) -> dict : if fields is None : fields = ( 'name' , ) q = 'mutation($name: String!, $input: CreateKeyPairResourcePolicyInput!) {' + ' create_keypair_resource_policy(name: $name, props: $input) {' ' ok msg resource_policy { $fields }' ' }' '}' q = q . replace ( '$fields' , ' ' . join ( fields ) ) variables = { 'name' : name , 'input' : { 'default_for_unspecified' : default_for_unspecified , 'total_resource_slots' : total_resource_slots , 'max_concurrent_sessions' : max_concurrent_sessions , 'max_containers_per_session' : max_containers_per_session , 'max_vfolder_count' : max_vfolder_count , 'max_vfolder_size' : max_vfolder_size , 'idle_timeout' : idle_timeout , 'allowed_vfolder_hosts' : allowed_vfolder_hosts , } , } rqst = Request ( cls . session , 'POST' , '/admin/graphql' ) rqst . set_json ( { 'query' : q , 'variables' : variables , } ) async with rqst . fetch ( ) as resp : data = await resp . json ( ) return data [ 'create_keypair_resource_policy' ]
Creates a new keypair resource policy with the given options . You need an admin privilege for this operation .
6,497
async def list ( cls , fields : Iterable [ str ] = None ) -> Sequence [ dict ] : if fields is None : fields = ( 'name' , 'created_at' , 'total_resource_slots' , 'max_concurrent_sessions' , 'max_vfolder_count' , 'max_vfolder_size' , 'idle_timeout' , ) q = 'query {' ' keypair_resource_policies {' ' $fields' ' }' '}' q = q . replace ( '$fields' , ' ' . join ( fields ) ) rqst = Request ( cls . session , 'POST' , '/admin/graphql' ) rqst . set_json ( { 'query' : q , } ) async with rqst . fetch ( ) as resp : data = await resp . json ( ) return data [ 'keypair_resource_policies' ]
Lists the keypair resource policies . You need an admin privilege for this operation .
6,498
def keypair ( ) : fields = [ ( 'User ID' , 'user_id' ) , ( 'Access Key' , 'access_key' ) , ( 'Secret Key' , 'secret_key' ) , ( 'Active?' , 'is_active' ) , ( 'Admin?' , 'is_admin' ) , ( 'Created At' , 'created_at' ) , ( 'Last Used' , 'last_used' ) , ( 'Res.Policy' , 'resource_policy' ) , ( 'Rate Limit' , 'rate_limit' ) , ( 'Concur.Limit' , 'concurrency_limit' ) , ( 'Concur.Used' , 'concurrency_used' ) , ] with Session ( ) as session : try : kp = session . KeyPair ( session . config . access_key ) info = kp . info ( fields = ( item [ 1 ] for item in fields ) ) except Exception as e : print_error ( e ) sys . exit ( 1 ) rows = [ ] for name , key in fields : rows . append ( ( name , info [ key ] ) ) print ( tabulate ( rows , headers = ( 'Field' , 'Value' ) ) )
Show the server - side information of the currently configured access key .
6,499
def add ( user_id , resource_policy , admin , inactive , rate_limit ) : try : user_id = int ( user_id ) except ValueError : pass with Session ( ) as session : try : data = session . KeyPair . create ( user_id , is_active = not inactive , is_admin = admin , resource_policy = resource_policy , rate_limit = rate_limit ) except Exception as e : print_error ( e ) sys . exit ( 1 ) if not data [ 'ok' ] : print_fail ( 'KeyPair creation has failed: {0}' . format ( data [ 'msg' ] ) ) sys . exit ( 1 ) item = data [ 'keypair' ] print ( 'Access Key: {0}' . format ( item [ 'access_key' ] ) ) print ( 'Secret Key: {0}' . format ( item [ 'secret_key' ] ) )
Add a new keypair .