idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
54,200 | def execute_python_script ( self , script ) : file_handle , filename = tempfile . mkstemp ( ) temp_file = os . fdopen ( file_handle , "wt" ) temp_file . write ( script ) temp_file . close ( ) self . put ( filename , "python_execute.py" ) command = [ "python" , "python_execute.py" ] output = self . execute ( command , False ) self . remove ( "python_execute.py" ) os . unlink ( filename ) return output | Execute a python script of the remote server |
54,201 | def __put_dir ( self , ftp , local_path , remote_path = None ) : if remote_path is None : remote_path = os . path . basename ( local_path ) remote_path += self . separator for current_path , directories , files in os . walk ( local_path ) : try : ftp . listdir ( remote_path ) except IOError : ftp . mkdir ( remote_path ) for filename in files : ftp . put ( os . path . join ( current_path , filename ) , remote_path + filename ) for directory in directories : self . __put_dir ( ftp , os . path . join ( current_path , directory ) , remote_path + directory ) | Helper function to perform copy operation to remote server |
54,202 | def remove ( self , remote_path ) : ftp = self . ssh . open_sftp ( ) if stat . S_ISDIR ( ftp . stat ( remote_path ) . st_mode ) : self . __remove_dir ( ftp , remote_path ) else : ftp . remove ( remote_path ) ftp . close ( ) | Delete a file or directory recursively on the remote server |
54,203 | def __remove_dir ( self , ftp , remote_path ) : files = ftp . listdir ( remote_path ) for filename in files : path = remote_path + self . separator + filename try : ftp . remove ( path ) except IOError : self . __remove_dir ( ftp , path ) ftp . rmdir ( remote_path ) | Helper function to perform delete operation on the remote server |
54,204 | def usage ( self ) : usage = self . parser . format_help ( ) . split ( "optional arguments:" ) [ 1 ] return "Remote Options:" + os . linesep + os . linesep . join ( [ s for s in usage . splitlines ( ) if s ] ) | Get the usage for the remote exectuion options |
54,205 | def jwt_required ( realm = None ) : def wrapper ( fn ) : @ wraps ( fn ) def decorator ( * args , ** kwargs ) : _jwt_required ( realm or current_app . config [ 'JWT_DEFAULT_REALM' ] ) return fn ( * args , ** kwargs ) return decorator return wrapper | View decorator that requires a valid JWT token to be present in the request |
54,206 | def auth_request_handler ( self , callback ) : warnings . warn ( "This handler is deprecated. The recommended approach to have control over " "the authentication resource is to disable the built-in resource by " "setting JWT_AUTH_URL_RULE=None and registering your own authentication " "resource directly on your application." , DeprecationWarning , stacklevel = 2 ) self . auth_request_callback = callback return callback | Specifies the authentication response handler function . |
54,207 | def _svg_path ( self , pathcodes , data ) : def gen_path_elements ( pathcodes , data ) : counts = { 'M' : 1 , 'L' : 1 , 'C' : 3 , 'Z' : 0 } it = iter ( data ) for code in pathcodes : yield code for _ in range ( counts [ code ] ) : p = next ( it ) yield str ( p [ 0 ] ) yield str ( p [ 1 ] ) return ' ' . join ( gen_path_elements ( pathcodes , data ) ) | Return the SVG path s d element . |
54,208 | def fig_to_html ( fig = None , template = 'base.html' , tiles = None , crs = None , epsg = None , embed_links = False , float_precision = 6 ) : if tiles is None : tiles = maptiles . osm elif isinstance ( tiles , six . string_types ) : if tiles not in maptiles . tiles : raise ValueError ( 'Unknown tile source "{}"' . format ( tiles ) ) else : tiles = maptiles . tiles [ tiles ] template = env . get_template ( template ) if fig is None : fig = plt . gcf ( ) dpi = fig . get_dpi ( ) renderer = LeafletRenderer ( crs = crs , epsg = epsg ) exporter = Exporter ( renderer ) exporter . run ( fig ) attribution = _attribution + ' | ' + tiles [ 1 ] mapid = str ( uuid . uuid4 ( ) ) . replace ( '-' , '' ) FloatEncoder . _formatter = ".{}f" . format ( float_precision ) gjdata = json . dumps ( renderer . geojson ( ) , cls = FloatEncoder ) params = { 'geojson' : gjdata , 'width' : fig . get_figwidth ( ) * dpi , 'height' : fig . get_figheight ( ) * dpi , 'mapid' : mapid , 'tile_url' : tiles [ 0 ] , 'attribution' : attribution , 'links' : [ _leaflet_js , _leaflet_css ] , 'embed_links' : embed_links , } html = template . render ( params ) return html | Convert a Matplotlib Figure to a Leaflet map |
54,209 | def fig_to_geojson ( fig = None , ** kwargs ) : if fig is None : fig = plt . gcf ( ) renderer = LeafletRenderer ( ** kwargs ) exporter = Exporter ( renderer ) exporter . run ( fig ) return renderer . geojson ( ) | Returns a figure s GeoJSON representation as a dictionary |
54,210 | def display ( fig = None , closefig = True , ** kwargs ) : from IPython . display import HTML if fig is None : fig = plt . gcf ( ) if closefig : plt . close ( fig ) html = fig_to_html ( fig , ** kwargs ) iframe_html = '<iframe src="data:text/html;base64,{html}" width="{width}" height="{height}"></iframe>' . format ( html = base64 . b64encode ( html . encode ( 'utf8' ) ) . decode ( 'utf8' ) , width = '100%' , height = int ( 60. * fig . get_figheight ( ) ) , ) return HTML ( iframe_html ) | Convert a Matplotlib Figure to a Leaflet map . Embed in IPython notebook . |
54,211 | def show ( fig = None , path = '_map.html' , ** kwargs ) : import webbrowser fullpath = os . path . abspath ( path ) with open ( fullpath , 'w' ) as f : save_html ( fig , fileobj = f , ** kwargs ) webbrowser . open ( 'file://' + fullpath ) | Convert a Matplotlib Figure to a Leaflet map . Open in a browser |
54,212 | def create_incident ( ** kwargs ) : incidents = cachet . Incidents ( endpoint = ENDPOINT , api_token = API_TOKEN ) if 'component_id' in kwargs : return incidents . post ( name = kwargs [ 'name' ] , message = kwargs [ 'message' ] , status = kwargs [ 'status' ] , component_id = kwargs [ 'component_id' ] , component_status = kwargs [ 'component_status' ] ) else : return incidents . post ( name = kwargs [ 'name' ] , message = kwargs [ 'message' ] , status = kwargs [ 'status' ] ) | Creates an incident |
54,213 | def incident_exists ( name , message , status ) : incidents = cachet . Incidents ( endpoint = ENDPOINT ) all_incidents = json . loads ( incidents . get ( ) ) for incident in all_incidents [ 'data' ] : if name == incident [ 'name' ] and status == incident [ 'status' ] and message . strip ( ) == incident [ 'message' ] . strip ( ) : return True return False | Check if an incident with these attributes already exists |
54,214 | def get_component ( id ) : components = cachet . Components ( endpoint = ENDPOINT ) component = json . loads ( components . get ( id = id ) ) return component [ 'data' ] | Gets a Cachet component by id |
54,215 | def api_token_required ( f , * args , ** kwargs ) : try : if args [ 0 ] . api_token is None : raise AttributeError ( 'Parameter api_token is required.' ) except AttributeError : raise AttributeError ( 'Parameter api_token is required.' ) return f ( * args , ** kwargs ) | Decorator helper function to ensure some methods aren t needlessly called without an api_token configured . |
54,216 | def is_true ( self , item = None ) : if item : values = [ item ] else : values = [ ] self . _get_item_and_att_names ( * values ) return self . _passes_all | If you are filtering on object values you need to pass that object here . |
54,217 | def new_from_url ( cls , url , verify = True ) : response = requests . get ( url , verify = verify , timeout = 2.5 ) return cls . new_from_response ( response ) | Constructs a new WebPage object for the URL using the requests module to fetch the HTML . |
54,218 | def new_from_response ( cls , response ) : return cls ( response . url , html = response . text , headers = response . headers ) | Constructs a new WebPage object for the response using the BeautifulSoup module to parse the HTML . |
54,219 | def _prepare_app ( self , app ) : for key in [ 'url' , 'html' , 'script' , 'implies' ] : try : value = app [ key ] except KeyError : app [ key ] = [ ] else : if not isinstance ( value , list ) : app [ key ] = [ value ] for key in [ 'headers' , 'meta' ] : try : value = app [ key ] except KeyError : app [ key ] = { } obj = app [ 'meta' ] if not isinstance ( obj , dict ) : app [ 'meta' ] = { 'generator' : obj } for key in [ 'headers' , 'meta' ] : obj = app [ key ] app [ key ] = { k . lower ( ) : v for k , v in obj . items ( ) } for key in [ 'url' , 'html' , 'script' ] : app [ key ] = [ self . _prepare_pattern ( pattern ) for pattern in app [ key ] ] for key in [ 'headers' , 'meta' ] : obj = app [ key ] for name , pattern in obj . items ( ) : obj [ name ] = self . _prepare_pattern ( obj [ name ] ) | Normalize app data preparing it for the detection phase . |
54,220 | def _has_app ( self , app , webpage ) : for regex in app [ 'url' ] : if regex . search ( webpage . url ) : return True for name , regex in app [ 'headers' ] . items ( ) : if name in webpage . headers : content = webpage . headers [ name ] if regex . search ( content ) : return True for regex in app [ 'script' ] : for script in webpage . scripts : if regex . search ( script ) : return True for name , regex in app [ 'meta' ] . items ( ) : if name in webpage . meta : content = webpage . meta [ name ] if regex . search ( content ) : return True for regex in app [ 'html' ] : if regex . search ( webpage . html ) : return True | Determine whether the web page matches the app signature . |
54,221 | def _get_implied_apps ( self , detected_apps ) : def __get_implied_apps ( apps ) : _implied_apps = set ( ) for app in apps : try : _implied_apps . update ( set ( self . apps [ app ] [ 'implies' ] ) ) except KeyError : pass return _implied_apps implied_apps = __get_implied_apps ( detected_apps ) all_implied_apps = set ( ) while not all_implied_apps . issuperset ( implied_apps ) : all_implied_apps . update ( implied_apps ) implied_apps = __get_implied_apps ( all_implied_apps ) return all_implied_apps | Get the set of apps implied by detected_apps . |
54,222 | def get_categories ( self , app_name ) : cat_nums = self . apps . get ( app_name , { } ) . get ( "cats" , [ ] ) cat_names = [ self . categories . get ( "%s" % cat_num , "" ) for cat_num in cat_nums ] return cat_names | Returns a list of the categories for an app name . |
54,223 | def analyze ( self , webpage ) : detected_apps = set ( ) for app_name , app in self . apps . items ( ) : if self . _has_app ( app , webpage ) : detected_apps . add ( app_name ) detected_apps |= self . _get_implied_apps ( detected_apps ) return detected_apps | Return a list of applications that can be detected on the web page . |
54,224 | def analyze_with_categories ( self , webpage ) : detected_apps = self . analyze ( webpage ) categorised_apps = { } for app_name in detected_apps : cat_names = self . get_categories ( app_name ) categorised_apps [ app_name ] = { "categories" : cat_names } return categorised_apps | Return a list of applications and categories that can be detected on the web page . |
54,225 | def clean ( self ) : if self . _initialized : logger . info ( "brace yourselves, removing %r" , self . path ) shutil . rmtree ( self . path ) | remove the directory we operated on |
54,226 | def initialize ( self ) : if not self . _initialized : logger . info ( "initializing %r" , self ) if not os . path . exists ( self . path ) : if self . mode is not None : os . makedirs ( self . path , mode = self . mode ) else : os . makedirs ( self . path ) self . _set_mode ( ) self . _add_facl_rules ( ) self . _set_selinux_context ( ) self . _set_ownership ( ) self . _initialized = True logger . info ( "initialized" ) return logger . info ( "%r was already initialized" , self ) | create the directory if needed and configure it |
54,227 | def _set_selinux_context ( self ) : chcon_command_exists ( ) if self . selinux_context : logger . debug ( "setting SELinux context of %s to %s" , self . path , self . selinux_context ) run_cmd ( [ "chcon" , self . selinux_context , self . path ] ) if any ( [ self . selinux_user , self . selinux_role , self . selinux_type , self . selinux_range ] ) : logger . debug ( "setting SELinux fields of %s" , self . path , self . selinux_context ) pairs = [ ( "-u" , self . selinux_user ) , ( "-r" , self . selinux_role ) , ( "-l" , self . selinux_range ) , ( "-t" , self . selinux_type ) ] c = [ "chcon" ] for p in pairs : if p [ 1 ] : c += p c += [ self . path ] run_cmd ( c ) | Set SELinux context or fields using chcon program . Raises CommandDoesNotExistException if the command is not present on the system . |
54,228 | def _set_mode ( self ) : if self . mode is not None : logger . debug ( "changing permission bits of %s to %s" , self . path , oct ( self . mode ) ) os . chmod ( self . path , self . mode ) | set permission bits if needed using python API os . chmod |
54,229 | def _add_facl_rules ( self ) : setfacl_command_exists ( ) if self . facl_rules : logger . debug ( "adding ACLs %s to %s" , self . facl_rules , self . path ) r = "," . join ( self . facl_rules ) run_cmd ( [ "setfacl" , "-m" , r , self . path ] ) | Apply ACL rules on the directory using setfacl program . Raises CommandDoesNotExistException if the command is not present on the system . |
54,230 | def get_volume_options ( volumes ) : if not isinstance ( volumes , list ) : volumes = [ volumes ] volumes = [ Volume . create_from_tuple ( v ) for v in volumes ] result = [ ] for v in volumes : result += [ "-v" , str ( v ) ] return result | Generates volume options to run methods . |
54,231 | def layers ( self , rev = True ) : image_layers = [ PodmanImage ( None , identifier = x , pull_policy = PodmanImagePullPolicy . NEVER ) for x in self . get_layer_ids ( ) ] if not rev : image_layers . reverse ( ) return image_layers | Get list of PodmanImage for every layer in image |
54,232 | def get_metadata ( self ) : if self . _metadata is None : self . _metadata = ImageMetadata ( ) inspect_to_metadata ( self . _metadata , self . inspect ( refresh = True ) ) return self . _metadata | Provide metadata about this image . |
54,233 | def is_running ( self ) : try : return graceful_get ( self . inspect ( refresh = True ) , "State" , "Running" ) except subprocess . CalledProcessError : return False | returns True if the container is running |
54,234 | def is_port_open ( self , port , timeout = 2 ) : addresses = self . get_IPv4s ( ) if not addresses : return False return check_port ( port , host = addresses [ 0 ] , timeout = timeout ) | check if given port is open and receiving connections on container ip_address |
54,235 | def wait_for_port ( self , port , timeout = 10 , ** probe_kwargs ) : Probe ( timeout = timeout , fnc = functools . partial ( self . is_port_open , port ) , ** probe_kwargs ) . run ( ) | block until specified port starts accepting connections raises an exc ProbeTimeout if timeout is reached |
54,236 | def mount ( self , mount_point = None ) : cmd = [ "podman" , "mount" , self . _id or self . get_id ( ) ] output = run_cmd ( cmd , return_output = True ) . rstrip ( "\n\r" ) return output | mount container filesystem |
54,237 | def wait ( self , timeout = None ) : timeout = [ "--interval=%s" % timeout ] if timeout else [ ] cmdline = [ "podman" , "wait" ] + timeout + [ self . _id or self . get_id ( ) ] return run_cmd ( cmdline , return_output = True ) | Block until the container stops then return its exit code . Similar to the podman wait command . |
54,238 | def read_file ( self , file_path ) : try : with open ( self . p ( file_path ) ) as fd : return fd . read ( ) except IOError as ex : logger . error ( "error while accessing file %s: %r" , file_path , ex ) raise ConuException ( "There was an error while accessing file %s: %r" , file_path , ex ) | read file specified via file_path and return its content - raises an ConuException if there is an issue accessing the file |
54,239 | def get_file ( self , file_path , mode = "r" ) : return open ( self . p ( file_path ) , mode = mode ) | provide File object specified via file_path |
54,240 | def file_is_present ( self , file_path ) : p = self . p ( file_path ) if not os . path . exists ( p ) : return False if not os . path . isfile ( p ) : raise IOError ( "%s is not a file" % file_path ) return True | check if file file_path is present raises IOError if file_path is not a file |
54,241 | def directory_is_present ( self , directory_path ) : p = self . p ( directory_path ) if not os . path . exists ( p ) : return False if not os . path . isdir ( p ) : raise IOError ( "%s is not a directory" % directory_path ) return True | check if directory directory_path is present raise IOError if it s not a directory |
54,242 | def get_selinux_context ( self , file_path ) : p = self . p ( file_path ) if not HAS_XATTR : raise RuntimeError ( "'xattr' python module is not available, hence we cannot " "determine the SELinux context for this file. " "In Fedora this module is available as python3-pyxattr -- " "other distributions may follow similar naming scheme." ) return xattr . get ( p , "security.selinux" ) | Get SELinux file context of the selected file . |
54,243 | def _wrapper ( self , q , start ) : try : func_name = self . fnc . __name__ except AttributeError : func_name = str ( self . fnc ) logger . debug ( "Running \"%s\" with parameters: \"%s\":\t%s/%s" % ( func_name , str ( self . kwargs ) , round ( time . time ( ) - start ) , self . timeout ) ) try : result = self . fnc ( ** self . kwargs ) logger . debug ( "callback result = %s" , str ( result ) [ : 50 ] ) q . put ( result ) except self . expected_exceptions as ex : logger . debug ( "expected exception was caught: %s" , ex ) q . put ( False ) except Exception as ex : logger . debug ( "adding exception %s to queue" , ex ) q . put ( ex ) | _wrapper checks return status of Probe . fnc and provides the result for process managing |
54,244 | def transport_param ( image ) : transports = { SkopeoTransport . CONTAINERS_STORAGE : "containers-storage:" , SkopeoTransport . DIRECTORY : "dir:" , SkopeoTransport . DOCKER : "docker://" , SkopeoTransport . DOCKER_ARCHIVE : "docker-archive" , SkopeoTransport . DOCKER_DAEMON : "docker-daemon:" , SkopeoTransport . OCI : "oci:" , SkopeoTransport . OSTREE : "ostree:" } transport = image . transport tag = image . tag repository = image . name path = image . path if not transport : transport = SkopeoTransport . DOCKER command = transports [ transport ] path_required = [ SkopeoTransport . DIRECTORY , SkopeoTransport . DOCKER_ARCHIVE , SkopeoTransport . OCI ] if transport in path_required and path is None : raise ValueError ( transports [ transport ] + " path is required to be specified" ) if transport == SkopeoTransport . DIRECTORY : return command + path if transport == SkopeoTransport . DOCKER_ARCHIVE : command += path if repository is None : return command command += ":" if transport in [ SkopeoTransport . CONTAINERS_STORAGE , SkopeoTransport . DOCKER , SkopeoTransport . DOCKER_ARCHIVE , transport . DOCKER_DAEMON ] : return command + repository + ":" + tag if transport == SkopeoTransport . OCI : return command + path + ":" + tag if transport == SkopeoTransport . OSTREE : return command + repository + ( "@" + path if path else "" ) raise ConuException ( "This transport is not supported" ) | Parse DockerImage info into skopeo parameter |
54,245 | def is_running ( self ) : cmd = [ "machinectl" , "--no-pager" , "status" , self . name ] try : subprocess . check_call ( cmd ) return True except subprocess . CalledProcessError as ex : logger . info ( "nspawn container %s is not running probably: %s" , self . name , ex . output ) return False | return True when container is running otherwise return False |
54,246 | def copy_from ( self , src , dest ) : logger . debug ( "copying %s from host to container at %s" , src , dest ) cmd = [ "machinectl" , "--no-pager" , "copy-from" , self . name , src , dest ] run_cmd ( cmd ) | copy a file or a directory from container or image to host system . |
54,247 | def delete ( self , force = False , volumes = False ) : try : self . image . rmi ( ) except ConuException as ime : if not force : raise ime else : pass | delete underlying image |
54,248 | def cleanup ( self , force = False , delete = False ) : try : self . stop ( ) except subprocess . CalledProcessError as stop : logger . debug ( "unable to stop container via stop" , stop ) if not force : raise stop try : self . kill ( ) except subprocess . CalledProcessError as kill : logger . debug ( "unable to stop container via kill" , kill ) pass if delete : self . delete ( force = force ) | Stop container and delete image if given param delete |
54,249 | def run_systemdrun ( self , command , internal_background = False , return_full_dict = False , ** kwargs ) : internalkw = deepcopy ( kwargs ) or { } original_ignore_st = internalkw . get ( "ignore_status" , False ) original_return_st = internalkw . get ( "return_output" , False ) internalkw [ "ignore_status" ] = True internalkw [ "return_output" ] = False unit_name = constants . CONU_ARTIFACT_TAG + "unit_" + random_str ( ) opts = [ "-M" , self . name , "--unit" , unit_name ] lpath = "/var/tmp/{}" . format ( unit_name ) comout = { } if self . _run_systemdrun_decide ( ) : add_wait_var = "--wait" else : add_wait_var = "-r" if internal_background : add_wait_var = "" if add_wait_var : opts . append ( add_wait_var ) bashworkaround = [ "/bin/bash" , "-c" , "({comm})>{path}.stdout 2>{path}.stderr" . format ( comm = " " . join ( command ) , path = lpath ) ] whole_cmd = [ "systemd-run" ] + opts + bashworkaround comout [ 'command' ] = command comout [ 'return_code' ] = run_cmd ( whole_cmd , ** internalkw ) or 0 if not internal_background : if not self . _run_systemdrun_decide ( ) : comout [ 'return_code' ] = self . _systemctl_wait_until_finish ( self . name , unit_name ) if self . is_running ( ) : self . copy_from ( "{pin}.stdout" . format ( pin = lpath ) , "{pin}.stdout" . format ( pin = lpath ) ) with open ( "{pin}.stdout" . format ( pin = lpath ) ) as f : comout [ 'stdout' ] = f . read ( ) self . copy_from ( "{pin}.stderr" . format ( pin = lpath ) , "{pin}.stderr" . format ( pin = lpath ) ) with open ( "{pin}.stderr" . format ( pin = lpath ) ) as f : comout [ 'stderr' ] = f . read ( ) logger . debug ( comout ) if not original_ignore_st and comout [ 'return_code' ] != 0 : raise subprocess . CalledProcessError ( comout [ 'command' ] , comout ) if return_full_dict : return comout if original_return_st : return comout [ 'stdout' ] else : return comout [ 'return_code' ] | execute command via systemd - run inside container |
54,250 | def _wait_for_machine_booted ( name , suffictinet_texts = None ) : suffictinet_texts = suffictinet_texts or [ "systemd-logind" ] for foo in range ( constants . DEFAULT_RETRYTIMEOUT ) : time . sleep ( constants . DEFAULT_SLEEP ) out = run_cmd ( [ "machinectl" , "--no-pager" , "status" , name ] , ignore_status = True , return_output = True ) for restr in suffictinet_texts : if restr in out : time . sleep ( constants . DEFAULT_SLEEP ) return True raise ConuException ( "Unable to start machine %s within %d (machinectl status command dos not contain %s)" % ( name , constants . DEFAULT_RETRYTIMEOUT , suffictinet_texts ) ) | Internal method wait until machine is ready in common case means there is running systemd - logind |
54,251 | def _internal_reschedule ( callback , retry = 3 , sleep_time = constants . DEFAULT_SLEEP ) : for foo in range ( retry ) : container_process = callback [ 0 ] ( callback [ 1 ] , * callback [ 2 ] , ** callback [ 3 ] ) time . sleep ( sleep_time ) container_process . poll ( ) rcode = container_process . returncode if rcode is None : return container_process raise ConuException ( "Unable to start nspawn container - process failed for {}-times" . format ( retry ) ) | workaround method for internal_run_container method It sometimes fails because of Dbus or whatever so try to start it moretimes |
54,252 | def internal_run_container ( name , callback_method , foreground = False ) : if not foreground : logger . info ( "Stating machine (boot nspawn container) {}" . format ( name ) ) nspawn_process = NspawnContainer . _internal_reschedule ( callback_method ) NspawnContainer . _wait_for_machine_booted ( name ) logger . info ( "machine: %s starting finished" % name ) return nspawn_process else : logger . info ( "Stating machine (return process) {}" . format ( name ) ) return callback_method [ 0 ] ( callback_method [ 1 ] , * callback_method [ 2 ] , ** callback_method [ 3 ] ) | Internal method what runs container process |
54,253 | def get_container_output ( backend , image_name , command , image_tag = "latest" , additional_opts = None ) : image = backend . ImageClass ( image_name , tag = image_tag ) c = image . run_via_binary ( DockerRunBuilder ( command = command , additional_opts = additional_opts ) ) try : c . wait ( ) return c . logs_unicode ( ) finally : c . stop ( ) c . wait ( ) c . delete ( ) | Create a throw - away container based on provided image and tag run the supplied command in it and return output . The container is stopped and removed after it exits . |
54,254 | def pull ( self ) : for json_e in self . d . pull ( repository = self . name , tag = self . tag , stream = True , decode = True ) : logger . debug ( json_e ) status = graceful_get ( json_e , "status" ) if status : logger . info ( status ) else : error = graceful_get ( json_e , "error" ) logger . error ( status ) raise ConuException ( "There was an error while pulling the image %s: %s" , self . name , error ) self . using_transport ( SkopeoTransport . DOCKER_DAEMON ) | Pull this image from registry . Raises an exception if the image is not found in the registry . |
54,255 | def using_transport ( self , transport = None , path = None , logs = True ) : if not transport : return self if self . transport == transport and self . path == path : return self path_required = [ SkopeoTransport . DIRECTORY , SkopeoTransport . DOCKER_ARCHIVE , SkopeoTransport . OCI ] if transport in path_required : if not path and logs : logging . debug ( "path not provided, temporary path was used" ) self . path = self . mount ( path ) . mount_point elif transport == SkopeoTransport . OSTREE : if path and not os . path . isabs ( path ) : raise ConuException ( "Path '" , path , "' for OSTree transport is not absolute" ) if not path and logs : logging . debug ( "path not provided, default /ostree/repo path was used" ) self . path = path else : if path and logs : logging . warning ( "path %s was ignored!" , path ) self . path = None self . transport = transport return self | change used transport |
54,256 | def save_to ( self , image ) : if not isinstance ( image , self . __class__ ) : raise ConuException ( "Invalid target image type" , type ( image ) ) self . copy ( image . name , image . tag , target_transport = image . transport , target_path = image . path , logs = False ) | Save this image to another DockerImage |
54,257 | def load_from ( self , image ) : if not isinstance ( image , self . __class__ ) : raise ConuException ( "Invalid source image type" , type ( image ) ) image . save_to ( self ) | Load from another DockerImage to this one |
54,258 | def skopeo_pull ( self ) : return self . copy ( self . name , self . tag , SkopeoTransport . DOCKER , SkopeoTransport . DOCKER_DAEMON ) . using_transport ( SkopeoTransport . DOCKER_DAEMON ) | Pull image from Docker to local Docker daemon using skopeo |
54,259 | def skopeo_push ( self , repository = None , tag = None ) : return self . copy ( repository , tag , SkopeoTransport . DOCKER_DAEMON , SkopeoTransport . DOCKER ) . using_transport ( SkopeoTransport . DOCKER ) | Push image from Docker daemon to Docker using skopeo |
54,260 | def copy ( self , repository = None , tag = None , source_transport = None , target_transport = SkopeoTransport . DOCKER , source_path = None , target_path = None , logs = True ) : if not repository : repository = self . name if not tag : tag = self . tag if self . tag else "latest" if target_transport == SkopeoTransport . OSTREE and tag and logs : logging . warning ( "tag was ignored" ) target = ( DockerImage ( repository , tag , pull_policy = DockerImagePullPolicy . NEVER ) . using_transport ( target_transport , target_path ) ) self . using_transport ( source_transport , source_path ) try : run_cmd ( [ "skopeo" , "copy" , transport_param ( self ) , transport_param ( target ) ] ) except subprocess . CalledProcessError : raise ConuException ( "There was an error while copying repository" , self . name ) return target | Copy this image |
54,261 | def tag_image ( self , repository = None , tag = None ) : if not ( repository or tag ) : raise ValueError ( "You need to specify either repository or tag." ) r = repository or self . name t = "latest" if not tag else tag self . d . tag ( image = self . get_full_name ( ) , repository = r , tag = t ) return DockerImage ( r , tag = t ) | Apply additional tags to the image or even add a new name |
54,262 | def inspect ( self , refresh = True ) : if refresh or not self . _inspect_data : identifier = self . _id or self . get_full_name ( ) if not identifier : raise ConuException ( "This image does not have a valid identifier." ) self . _inspect_data = self . d . inspect_image ( identifier ) return self . _inspect_data | provide metadata about the image ; flip refresh = True if cached metadata are enough |
54,263 | def has_pkgs_signed_with ( self , allowed_keys ) : if not allowed_keys or not isinstance ( allowed_keys , list ) : raise ConuException ( "allowed_keys must be a list" ) command = [ 'rpm' , '-qa' , '--qf' , '%{name} %{SIGPGP:pgpsig}\n' ] cont = self . run_via_binary ( command = command ) try : out = cont . logs_unicode ( ) [ : - 1 ] . split ( '\n' ) check_signatures ( out , allowed_keys ) finally : cont . stop ( ) cont . delete ( ) return True | Check signature of packages installed in image . Raises exception when |
54,264 | def build ( cls , path , tag = None , dockerfile = None ) : if not path : raise ConuException ( 'Please specify path to the directory containing the Dockerfile' ) client = get_client ( ) response = [ line for line in client . build ( path , rm = True , tag = tag , dockerfile = dockerfile , quiet = True ) ] if not response : raise ConuException ( 'Failed to get ID of image' ) if len ( response ) > 1 : raise ConuException ( 'Build failed: ' + str ( response ) ) response_utf = response [ 0 ] . decode ( 'utf-8' ) if response_utf [ : 11 ] != '{"stream":"' or response_utf [ - 6 : ] != '\\n"}\r\n' : raise ConuException ( 'Failed to parse ID from ' + response_utf ) image_id = response_utf [ 11 : - 6 ] return cls ( None , identifier = image_id ) | Build the image from the provided dockerfile in path |
54,265 | def layers ( self , rev = True ) : image_layers = [ DockerImage ( None , identifier = x , pull_policy = DockerImagePullPolicy . NEVER ) for x in self . get_layer_ids ( ) ] if not rev : image_layers . reverse ( ) return image_layers | Get list of DockerImage for every layer in image |
54,266 | def extend ( self , source , new_image_name , s2i_args = None ) : s2i_args = s2i_args or [ ] c = self . _s2i_command ( [ "build" ] + s2i_args + [ source , self . get_full_name ( ) ] ) if new_image_name : c . append ( new_image_name ) try : run_cmd ( c ) except subprocess . CalledProcessError as ex : raise ConuException ( "s2i build failed: %s" % ex ) return S2IDockerImage ( new_image_name ) | extend this s2i - enabled image using provided source raises ConuException if s2i build fails |
54,267 | def usage ( self ) : c = self . _s2i_command ( [ "usage" , self . get_full_name ( ) ] ) with open ( os . devnull , "w" ) as fd : process = subprocess . Popen ( c , stdout = fd , stderr = subprocess . PIPE ) _ , output = process . communicate ( ) retcode = process . poll ( ) if retcode : raise ConuException ( "`s2i usage` failed: %s" % output ) return output . decode ( "utf-8" ) . strip ( ) | Provide output of s2i usage |
54,268 | def http_request ( self , path = "/" , method = "GET" , host = None , port = None , json = False , data = None ) : host = host or '127.0.0.1' port = port or 8080 url = get_url ( host = host , port = port , path = path ) return self . http_session . request ( method , url , json = json , data = data ) | perform a HTTP request |
54,269 | def system_requirements ( ) : command_exists ( "systemd-nspawn" , [ "systemd-nspawn" , "--version" ] , "Command systemd-nspawn does not seems to be present on your system" "Do you have system with systemd" ) command_exists ( "machinectl" , [ "machinectl" , "--no-pager" , "--help" ] , "Command machinectl does not seems to be present on your system" "Do you have system with systemd" ) if "Enforcing" in run_cmd ( [ "getenforce" ] , return_output = True , ignore_status = True ) : logger . error ( "Please disable selinux (setenforce 0), selinux blocks some nspawn operations" "This may lead to strange behaviour" ) | Check if all necessary packages are installed on system |
54,270 | def _generate_id ( self ) : name = self . name . replace ( self . special_separator , "-" ) . replace ( "." , "-" ) loc = "\/" if self . location : loc = self . location _id = "{PREFIX}{SEP}{NAME}{HASH}{SEP}" . format ( PREFIX = constants . CONU_ARTIFACT_TAG , NAME = name , HASH = hashlib . sha512 ( loc ) . hexdigest ( ) [ : 10 ] , SEP = self . special_separator ) return _id | create new unique identifier |
54,271 | def pull ( self ) : if not os . path . exists ( CONU_IMAGES_STORE ) : os . makedirs ( CONU_IMAGES_STORE ) logger . debug ( "Try to pull: {} -> {}" . format ( self . location , self . local_location ) ) if not self . _is_local ( ) : compressed_location = self . local_location + ".xz" run_cmd ( [ "curl" , "-f" , "-L" , "-o" , compressed_location , self . location ] ) run_cmd ( [ "xz" , "-d" , compressed_location ] ) else : if self . location . endswith ( "xz" ) : compressed_location = self . local_location + ".xz" run_cmd ( [ "cp" , self . location , compressed_location ] ) run_cmd ( [ "xz" , "-d" , compressed_location ] ) else : run_cmd ( [ "cp" , self . location , self . local_location ] ) | Pull this image from URL . |
54,272 | def run_via_binary ( self , command = None , foreground = False , volumes = None , additional_opts = None , default_options = None , name = None , * args , ** kwargs ) : command = deepcopy ( command ) or [ ] volumes = deepcopy ( volumes ) or [ ] additional_opts = deepcopy ( additional_opts ) or [ ] internalkw = deepcopy ( kwargs ) or { } inernalargs = deepcopy ( args ) or [ ] if default_options is None : default_options = [ "-b" ] logger . info ( "run container via binary in background" ) machine_name = constants . CONU_ARTIFACT_TAG if name : machine_name += name else : machine_name += random_str ( ) if not foreground : internalkw [ "stdout" ] = subprocess . PIPE internalkw [ "stderr" ] = subprocess . PIPE additional_opts += default_options if volumes : additional_opts += self . get_volume_options ( volumes = volumes ) logger . debug ( "starting NSPAWN" ) systemd_command = [ "systemd-nspawn" , "--machine" , machine_name , "-i" , self . local_location ] + additional_opts + command logger . debug ( "Start command: %s" % " " . join ( systemd_command ) ) callback_method = ( subprocess . Popen , systemd_command , inernalargs , internalkw ) self . container_process = NspawnContainer . internal_run_container ( name = machine_name , callback_method = callback_method , foreground = foreground ) if foreground : return self . container_process else : return NspawnContainer ( self , None , name = machine_name , start_process = self . container_process , start_action = callback_method ) | Create new instance NspawnContianer in case of not running at foreground in case foreground run return process object |
54,273 | def process_rpm_ql_line ( line_str , allowed_keys ) : try : name , key_str = line_str . split ( ' ' , 1 ) except ValueError : logger . error ( "Failed to split line '{0}" . format ( repr ( line_str ) ) ) return False if name in no_key_pkgs : return True if key_str == NONE_KEY : logger . error ( "Unsigned package {0}" . format ( name ) ) return False key_match = re . match ( KEY , key_str ) if not key_match : logger . error ( 'Could not process line "{0}"' . format ( line_str ) ) return False used_key = key_match . group ( 1 ) if used_key in allowed_keys : return True logger . error ( "Wrong key for '{0}' ({1})" . format ( name , used_key ) ) return False | Checks single line of rpm - ql for correct keys |
54,274 | def check_signatures ( pkg_list , allowed_keys ) : all_passed = True for line_str in pkg_list : all_passed &= process_rpm_ql_line ( line_str . strip ( ) , allowed_keys ) if not all_passed : raise PackageSignatureException ( 'Error while checking rpm signatures, see logs for more info' ) | Go through list of packages with signatures and check if all are properly signed |
54,275 | def get_ports ( self ) : ports = [ ] container_ports = self . inspect ( refresh = True ) [ "NetworkSettings" ] [ "Ports" ] if not container_ports : return ports for p in container_ports : ports . append ( p . split ( "/" ) [ 0 ] ) return ports | get ports specified in container metadata |
54,276 | def _clean_tmp_dirs ( self ) : def onerror ( fnc , path , excinfo ) : self . logger . info ( "we were not able to remove temporary file %s: %s" , path , excinfo [ 1 ] ) shutil . rmtree ( self . tmpdir , onerror = onerror ) self . tmpdir = None global _backend_tmpdir _backend_tmpdir = None | Remove temporary dir associated with this backend instance . |
54,277 | def _clean ( self ) : if CleanupPolicy . EVERYTHING in self . cleanup : self . cleanup_containers ( ) self . cleanup_volumes ( ) self . cleanup_images ( ) self . _clean_tmp_dirs ( ) else : if CleanupPolicy . CONTAINERS in self . cleanup : self . cleanup_containers ( ) if CleanupPolicy . VOLUMES in self . cleanup : self . cleanup_volumes ( ) if CleanupPolicy . IMAGES in self . cleanup : self . cleanup_images ( ) if CleanupPolicy . TMP_DIRS in self . cleanup : self . _clean_tmp_dirs ( ) | Method for cleaning according to object cleanup policy value |
54,278 | def list_containers ( self ) : data = run_cmd ( [ "machinectl" , "list" , "--no-legend" , "--no-pager" ] , return_output = True ) output = [ ] reg = re . compile ( r"\s+" ) for line in data . split ( "\n" ) : stripped = line . strip ( ) if stripped : parts = reg . split ( stripped ) name = parts [ 0 ] output . append ( self . ContainerClass ( None , None , name = name ) ) return output | list all available nspawn containers |
54,279 | def list_images ( self ) : data = os . listdir ( CONU_IMAGES_STORE ) output = [ ] for name in data : output . append ( self . ImageClass ( name , pull_policy = ImagePullPolicy . NEVER ) ) return output | list all available nspawn images |
54,280 | def cleanup_containers ( self ) : for cont in self . list_containers ( ) : if CONU_ARTIFACT_TAG in cont . name : try : logger . debug ( "removing container %s created by conu" , cont ) run_cmd ( [ "machinectl" , "terminate" , cont . name ] ) except subprocess . CalledProcessError as e : logger . error ( "unable to remove container %s: %r" , cont , e ) | stop all container created by conu |
54,281 | def check_port ( port , host , timeout = 10 ) : logger . info ( "trying to open connection to %s:%s" , host , port ) sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) try : sock . settimeout ( timeout ) result = sock . connect_ex ( ( host , port ) ) logger . info ( "was connection successful? errno: %s" , result ) if result == 0 : logger . debug ( 'port is opened: %s:%s' % ( host , port ) ) return True else : logger . debug ( 'port is closed: %s:%s' % ( host , port ) ) return False finally : sock . close ( ) | connect to port on host and return True on success |
54,282 | def get_selinux_status ( ) : getenforce_command_exists ( ) o = run_cmd ( [ "getenforce" ] , return_output = True ) . strip ( ) logger . debug ( "SELinux is %r" , o ) return o | get SELinux status of host |
54,283 | def random_str ( size = 10 ) : return '' . join ( random . choice ( string . ascii_lowercase ) for _ in range ( size ) ) | create random string of selected size |
54,284 | def run_cmd ( cmd , return_output = False , ignore_status = False , log_output = True , ** kwargs ) : logger . debug ( 'command: "%s"' % ' ' . join ( cmd ) ) process = subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . STDOUT , universal_newlines = True , ** kwargs ) output = process . communicate ( ) [ 0 ] if log_output : logger . debug ( output ) if process . returncode > 0 : if ignore_status : if return_output : return output else : return process . returncode else : raise subprocess . CalledProcessError ( cmd = cmd , returncode = process . returncode ) if return_output : return output | run provided command on host system using the same user as you invoked this code raises subprocess . CalledProcessError if it fails |
54,285 | def command_exists ( command , noop_invocation , exc_msg ) : try : found = bool ( shutil . which ( command ) ) except AttributeError : try : p = subprocess . Popen ( noop_invocation , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) except OSError : found = False else : stdout , stderr = p . communicate ( ) found = p . returncode == 0 if not found : logger . error ( "`%s` exited with a non-zero return code (%s)" , noop_invocation , p . returncode ) logger . error ( "command stdout = %s" , stdout ) logger . error ( "command stderr = %s" , stderr ) if not found : raise CommandDoesNotExistException ( exc_msg ) return True | Verify that the provided command exists . Raise CommandDoesNotExistException in case of an error or if the command does not exist . |
54,286 | def check_docker_command_works ( ) : try : out = subprocess . check_output ( [ "docker" , "version" ] , stderr = subprocess . STDOUT , universal_newlines = True ) except OSError : logger . info ( "docker binary is not available" ) raise CommandDoesNotExistException ( "docker command doesn't seem to be available on your system. " "Please install and configure docker." ) except subprocess . CalledProcessError as ex : logger . error ( "exception: %s" , ex ) logger . error ( "rc: %s, output: %r" , ex . returncode , ex . output ) raise ConuException ( "`docker version` call failed, it seems that your docker daemon is misconfigured or " "this user can't communicate with dockerd." ) else : logger . info ( "docker environment info: %r" , out ) return True | Verify that dockerd and docker binary works fine . This is performed by calling docker version which also checks server API version . |
54,287 | def export_docker_container_to_directory ( client , container , path ) : check_docker_command_works ( ) export_p = subprocess . Popen ( [ "docker" , "export" , container . get_id ( ) ] , stderr = subprocess . PIPE , stdout = subprocess . PIPE ) try : os . mkdir ( path , 0o0700 ) except OSError as ex : if ex . errno == errno . EEXIST : logger . debug ( "mount point %s exists already" , path ) else : logger . error ( "mount point %s can't be created: %s" , path , ex ) raise logger . debug ( "about to untar the image" ) p = subprocess . Popen ( [ "tar" , "--no-same-owner" , "-C" , path , "-x" ] , stdin = subprocess . PIPE , stderr = subprocess . PIPE , ) while True : data = export_p . stdout . read ( 1048576 ) if not data : break p . stdin . write ( data ) p . stdin . close ( ) p . wait ( ) export_p . wait ( ) if export_p . returncode : logger . error ( export_p . stderr . read ( ) ) raise ConuException ( "Failed to get rootfs of %s from docker." % container ) if p . returncode : logger . error ( p . stderr . read ( ) ) raise ConuException ( "Failed to unpack the archive." ) logger . debug ( "image is unpacked" ) | take selected docker container create an archive out of it and unpack it to a selected location |
54,288 | def get_version ( self ) : raw_version = run_cmd ( [ "podman" , "version" ] , return_output = True ) regex = re . compile ( r"Version:\s*(\d+)\.(\d+)\.(\d+)" ) match = regex . findall ( raw_version ) try : return match [ 0 ] except IndexError : logger . error ( "unable to parse version from `podman version`" ) return | return 3 - tuple of version info or None |
54,289 | def list_containers ( self ) : containers = [ ] for container in self . _list_podman_containers ( ) : identifier = container [ "ID" ] name = container [ "Names" ] image_name = container [ "Image" ] try : image_name , image_tag = parse_reference ( image_name ) except ( IndexError , TypeError ) : image_name , image_tag = None , None image = PodmanImage ( image_name , tag = image_tag , identifier = None ) container = PodmanContainer ( image , identifier , name = name ) containers . append ( container ) return containers | List all available podman containers . |
54,290 | def list_images ( self ) : images = [ ] for image in self . _list_all_podman_images ( ) : try : i_name , tag = parse_reference ( image [ "names" ] [ 0 ] ) except ( IndexError , TypeError ) : i_name , tag = None , None d_im = PodmanImage ( i_name , tag = tag , identifier = image [ "id" ] , pull_policy = PodmanImagePullPolicy . NEVER ) images . append ( d_im ) return images | List all available podman images . |
54,291 | def inspect_to_metadata ( metadata_object , inspect_data ) : identifier = graceful_get ( inspect_data , 'Id' ) if identifier : if ":" in identifier : metadata_object . identifier = identifier . split ( ':' ) [ 1 ] else : metadata_object . identifier = identifier raw_env_vars = graceful_get ( inspect_data , "Config" , "Env" ) or [ ] if raw_env_vars : metadata_object . env_variables = { } for env_variable in raw_env_vars : splits = env_variable . split ( "=" , 1 ) name = splits [ 0 ] value = splits [ 1 ] if len ( splits ) > 1 else None if value is not None : metadata_object . env_variables . update ( { name : value } ) raw_exposed_ports = graceful_get ( inspect_data , "Config" , "ExposedPorts" ) if raw_exposed_ports : metadata_object . exposed_ports = list ( raw_exposed_ports . keys ( ) ) raw_repo_tags = graceful_get ( inspect_data , 'RepoTags' ) if raw_repo_tags : metadata_object . name = raw_repo_tags [ 0 ] metadata_object . labels = graceful_get ( inspect_data , 'Config' , 'Labels' ) metadata_object . command = graceful_get ( inspect_data , 'Config' , 'Cmd' ) metadata_object . creation_timestamp = inspect_data . get ( 'Created' , None ) metadata_object . image_names = inspect_data . get ( 'RepoTags' , None ) digests = inspect_data . get ( "RepoDigests" , None ) if digests : metadata_object . repo_digests = digests metadata_object . digest = digests [ 0 ] return metadata_object | process data from docker inspect and update provided metadata object |
54,292 | def inspect_to_container_metadata ( c_metadata_object , inspect_data , image_instance ) : inspect_to_metadata ( c_metadata_object , inspect_data ) status = ContainerStatus . get_from_docker ( graceful_get ( inspect_data , "State" , "Status" ) , graceful_get ( inspect_data , "State" , "ExitCode" ) , ) image_id = graceful_get ( inspect_data , "Image" ) if image_id : if ":" in image_id : image_instance . identifier = image_id . split ( ':' ) [ 1 ] else : image_instance . identifier = image_id port_mappings = dict ( ) raw_port_mappings = graceful_get ( inspect_data , 'HostConfig' , 'PortBindings' ) or { } for key , value in raw_port_mappings . items ( ) : for item in value : logger . debug ( "parsing ports: key = %s, item = %s" , key , item ) li = port_mappings . get ( key , [ ] ) raw_host_port = item [ 'HostPort' ] if raw_host_port == "" : int_port = None else : try : int_port = int ( raw_host_port ) except ValueError as ex : logger . error ( "could not parse port: %s" , ex ) continue li . append ( int_port ) port_mappings . update ( { key : li } ) c_metadata_object . status = status c_metadata_object . port_mappings = port_mappings c_metadata_object . hostname = graceful_get ( inspect_data , 'Config' , 'Hostname' ) raw_networks = graceful_get ( inspect_data , "NetworkSettings" , "Networks" ) . values ( ) if raw_networks : c_metadata_object . ipv4_addresses = [ graceful_get ( x , "IPAddress" ) for x in raw_networks if graceful_get ( x , "IPAddress" ) ] c_metadata_object . ipv6_addresses = [ graceful_get ( x , "GlobalIPv6Address" ) for x in raw_networks if graceful_get ( x , "GlobalIPv6Address" ) ] c_metadata_object . image = image_instance name = graceful_get ( inspect_data , "Name" ) if name : name = name [ 1 : ] if name . startswith ( "/" ) else name c_metadata_object . name = name return c_metadata_object | process data from docker container inspect and update provided container metadata object |
54,293 | def list_pods ( self , namespace = None ) : if namespace : return [ Pod ( name = p . metadata . name , namespace = namespace , spec = p . spec ) for p in self . core_api . list_namespaced_pod ( namespace , watch = False ) . items ] return [ Pod ( name = p . metadata . name , namespace = p . metadata . namespace , spec = p . spec ) for p in self . core_api . list_pod_for_all_namespaces ( watch = False ) . items ] | List all available pods . |
54,294 | def list_services ( self , namespace = None ) : if namespace : return [ Service ( name = s . metadata . name , ports = k8s_ports_to_metadata_ports ( s . spec . ports ) , namespace = s . metadata . namespace , labels = s . metadata . labels , selector = s . spec . selector , spec = s . spec ) for s in self . core_api . list_namespaced_service ( namespace , watch = False ) . items ] return [ Service ( name = s . metadata . name , ports = k8s_ports_to_metadata_ports ( s . spec . ports ) , namespace = s . metadata . namespace , labels = s . metadata . labels , selector = s . spec . selector , spec = s . spec ) for s in self . core_api . list_service_for_all_namespaces ( watch = False ) . items ] | List all available services . |
54,295 | def list_deployments ( self , namespace = None ) : if namespace : return [ Deployment ( name = d . metadata . name , namespace = d . metadata . namespace , labels = d . metadata . labels , selector = d . spec . selector , image_metadata = ImageMetadata ( name = d . spec . template . spec . containers [ 0 ] . name . split ( "-" , 1 ) [ 0 ] ) ) for d in self . apps_api . list_namespaced_deployment ( namespace , watch = False ) . items ] return [ Deployment ( name = d . metadata . name , namespace = d . metadata . namespace , labels = d . metadata . labels , selector = d . spec . selector , image_metadata = ImageMetadata ( name = d . spec . template . spec . containers [ 0 ] . name . split ( "-" , 1 ) [ 0 ] ) ) for d in self . apps_api . list_deployment_for_all_namespaces ( watch = False ) . items ] | List all available deployments . |
54,296 | def get_url ( path , host , port , method = "http" ) : return urlunsplit ( ( method , "%s:%s" % ( host , port ) , path , "" , "" ) ) | make url from path host and port |
54,297 | def list_containers ( self ) : result = [ ] for c in self . d . containers ( all = True ) : name = None names = c . get ( "Names" , None ) if names : name = names [ 0 ] i = DockerImage ( None , identifier = c [ "ImageID" ] ) cont = DockerContainer ( i , c [ "Id" ] , name = name ) inspect_to_container_metadata ( cont . metadata , c , i ) result . append ( cont ) return result | List all available docker containers . |
54,298 | def list_images ( self ) : response = [ ] for im in self . d . images ( ) : try : i_name , tag = parse_reference ( im [ "RepoTags" ] [ 0 ] ) except ( IndexError , TypeError ) : i_name , tag = None , None d_im = DockerImage ( i_name , tag = tag , identifier = im [ "Id" ] , pull_policy = DockerImagePullPolicy . NEVER ) inspect_to_metadata ( d_im . metadata , im ) response . append ( d_im ) return response | List all available docker images . |
54,299 | def match ( ctx , features , profile , gps_precision ) : access_token = ( ctx . obj and ctx . obj . get ( 'access_token' ) ) or None features = list ( features ) if len ( features ) != 1 : raise click . BadParameter ( "Mapmatching requires a single LineString feature" ) service = mapbox . MapMatcher ( access_token = access_token ) try : res = service . match ( features [ 0 ] , profile = profile , gps_precision = gps_precision ) except mapbox . errors . ValidationError as exc : raise click . BadParameter ( str ( exc ) ) if res . status_code == 200 : stdout = click . open_file ( '-' , 'w' ) click . echo ( res . text , file = stdout ) else : raise MapboxCLIException ( res . text . strip ( ) ) | Mapbox Map Matching API lets you use snap your GPS traces to the OpenStreetMap road and path network . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.