idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
6,200
def placeFavicon ( context ) : fav = Favicon . objects . filter ( isFavicon = True ) . first ( ) if not fav : return mark_safe ( '<!-- no favicon ) html = '' for rel in config : for size in sorted ( config [ rel ] , reverse = True ) : n = fav . get_favicon ( size = size , rel = rel ) html += '<link rel="%s" sizes="%sx%s" href="%s"/>' % ( n . rel , n . size , n . size , n . faviconImage . url ) default_fav = fav . get_favicon ( size = 32 , rel = 'shortcut icon' ) html += '<link rel="%s" sizes="%sx%s" href="%s"/>' % ( default_fav . rel , default_fav . size , default_fav . size , default_fav . faviconImage . url ) return mark_safe ( html )
Gets Favicon - URL for the Model .
6,201
def set_default_theme ( theme ) : pref_init ( ) parser = cp . ConfigParser ( ) parser . read ( PREFS_FILE ) if not parser . has_section ( "theme" ) : parser . add_section ( "theme" ) parser . set ( "theme" , "default" , theme ) with open ( "%s.2" % PREFS_FILE , "w" ) as fp : parser . write ( fp ) copy ( "%s.2" % PREFS_FILE , PREFS_FILE ) unlink ( "%s.2" % PREFS_FILE , )
Set default theme name based in config file .
6,202
def pick_theme ( manual ) : if manual : return manual pref_init ( ) parser = cp . ConfigParser ( ) parser . read ( PREFS_FILE ) try : theme = parser . get ( "theme" , "default" ) except ( cp . NoSectionError , cp . NoOptionError ) : theme = "plain" return theme
Return theme name based on manual input prefs file or default to plain .
6,203
def install_theme ( path_to_theme ) : pref_init ( ) filename = basename ( path_to_theme ) dest = join ( THEMES_DIR , filename ) copy ( path_to_theme , dest ) zf = zipfile . ZipFile ( dest ) zf . extractall ( THEMES_DIR ) unlink ( dest )
Pass a path to a theme file which will be extracted to the themes directory .
6,204
def main ( ) : args = docopt ( __doc__ , version = "cdk" ) if args [ 'FILE' ] : out = output_file ( args [ 'FILE' ] ) theme = pick_theme ( args [ '--theme' ] ) if theme not in listdir ( THEMES_DIR ) : exit ( 'Selected theme "%s" not found. Check ~/.cdk/prefs' % theme ) cmd = create_command ( theme , args [ '--bare' ] , args [ '--toc' ] , args [ '--notransition' ] , args [ '--logo' ] ) run_command ( cmd , args ) if args [ '--toc' ] : add_css ( out , '.deck-container .deck-toc li a span{color: #888;display:inline;}' ) if args [ '--custom-css' ] : add_css_file ( out , args [ '--custom-css' ] ) if args [ '--open' ] : webbrowser . open ( "file://" + abspath ( out ) ) elif args [ '--generate' ] : if isfile ( args [ '--generate' ] ) : exit ( "%s already exists!" % args [ '--generate' ] ) with open ( args [ '--generate' ] , "w" ) as fp : sample = join ( LOCATION , "custom" , "sample.asc" ) fp . write ( open ( sample ) . read ( ) ) print ( "Created sample slide deck in %s..." % args [ '--generate' ] ) exit ( ) elif args [ '--install-theme' ] : path = args [ '--install-theme' ] if not isfile ( path ) : exit ( "Theme file not found." ) if not path . endswith ( ".zip" ) : exit ( "Theme installation currently only supports theme install from " ".zip files." ) install_theme ( path ) elif args [ '--default-theme' ] : set_default_theme ( args [ '--default-theme' ] )
Entry point for choosing what subcommand to run . Really should be using asciidocapi
6,205
def separate_resources ( self ) : self . _separate_hdxobjects ( self . resources , 'resources' , 'name' , hdx . data . resource . Resource )
Move contents of resources key in internal dictionary into self . resources
6,206
def add_update_resources ( self , resources , ignore_datasetid = False ) : if not isinstance ( resources , list ) : raise HDXError ( 'Resources should be a list!' ) for resource in resources : self . add_update_resource ( resource , ignore_datasetid )
Add new or update existing resources with new metadata to the dataset
6,207
def delete_resource ( self , resource , delete = True ) : if isinstance ( resource , str ) : if is_valid_uuid ( resource ) is False : raise HDXError ( '%s is not a valid resource id!' % resource ) return self . _remove_hdxobject ( self . resources , resource , delete = delete )
Delete a resource from the dataset and also from HDX by default
6,208
def reorder_resources ( self , resource_ids , hxl_update = True ) : dataset_id = self . data . get ( 'id' ) if not dataset_id : raise HDXError ( 'Dataset has no id! It must be read, created or updated first.' ) data = { 'id' : dataset_id , 'order' : resource_ids } self . _write_to_hdx ( 'reorder' , data , 'package_id' ) if hxl_update : self . hxl_update ( )
Reorder resources in dataset according to provided list . If only some resource ids are supplied then these are assumed to be first and the other resources will stay in their original order .
6,209
def update_from_yaml ( self , path = join ( 'config' , 'hdx_dataset_static.yml' ) ) : super ( Dataset , self ) . update_from_yaml ( path ) self . separate_resources ( )
Update dataset metadata with static metadata from YAML file
6,210
def update_from_json ( self , path = join ( 'config' , 'hdx_dataset_static.json' ) ) : super ( Dataset , self ) . update_from_json ( path ) self . separate_resources ( )
Update dataset metadata with static metadata from JSON file
6,211
def read_from_hdx ( identifier , configuration = None ) : dataset = Dataset ( configuration = configuration ) result = dataset . _dataset_load_from_hdx ( identifier ) if result : return dataset return None
Reads the dataset given by identifier from HDX and returns Dataset object
6,212
def _dataset_create_resources ( self ) : if 'resources' in self . data : self . old_data [ 'resources' ] = self . _copy_hdxobjects ( self . resources , hdx . data . resource . Resource , 'file_to_upload' ) self . init_resources ( ) self . separate_resources ( )
Creates resource objects in dataset
6,213
def _dataset_load_from_hdx ( self , id_or_name ) : if not self . _load_from_hdx ( 'dataset' , id_or_name ) : return False self . _dataset_create_resources ( ) return True
Loads the dataset given by either id or name from HDX
6,214
def check_required_fields ( self , ignore_fields = list ( ) , allow_no_resources = False ) : if self . is_requestable ( ) : self . _check_required_fields ( 'dataset-requestable' , ignore_fields ) else : self . _check_required_fields ( 'dataset' , ignore_fields ) if len ( self . resources ) == 0 and not allow_no_resources : raise HDXError ( 'There are no resources! Please add at least one resource!' ) for resource in self . resources : ignore_fields = [ 'package_id' ] resource . check_required_fields ( ignore_fields = ignore_fields )
Check that metadata for dataset and its resources is complete . The parameter ignore_fields should be set if required to any fields that should be ignored for the particular operation .
6,215
def _dataset_merge_filestore_resource ( self , resource , updated_resource , filestore_resources , ignore_fields ) : if updated_resource . get_file_to_upload ( ) : resource . set_file_to_upload ( updated_resource . get_file_to_upload ( ) ) filestore_resources . append ( resource ) merge_two_dictionaries ( resource , updated_resource ) resource . check_required_fields ( ignore_fields = ignore_fields ) if resource . get_file_to_upload ( ) : resource [ 'url' ] = Dataset . temporary_url
Helper method to merge updated resource from dataset into HDX resource read from HDX including filestore .
6,216
def _dataset_merge_filestore_newresource ( self , new_resource , ignore_fields , filestore_resources ) : new_resource . check_required_fields ( ignore_fields = ignore_fields ) self . resources . append ( new_resource ) if new_resource . get_file_to_upload ( ) : filestore_resources . append ( new_resource ) new_resource [ 'url' ] = Dataset . temporary_url
Helper method to add new resource from dataset including filestore .
6,217
def _add_filestore_resources ( self , filestore_resources , create_default_views , hxl_update ) : for resource in filestore_resources : for created_resource in self . data [ 'resources' ] : if resource [ 'name' ] == created_resource [ 'name' ] : merge_two_dictionaries ( resource . data , created_resource ) del resource [ 'url' ] resource . update_in_hdx ( ) merge_two_dictionaries ( created_resource , resource . data ) break self . init_resources ( ) self . separate_resources ( ) if create_default_views : self . create_default_views ( ) if hxl_update : self . hxl_update ( )
Helper method to create files in filestore by updating resources .
6,218
def _dataset_merge_hdx_update ( self , update_resources , update_resources_by_name , remove_additional_resources , create_default_views , hxl_update ) : merge_two_dictionaries ( self . data , self . old_data ) if 'resources' in self . data : del self . data [ 'resources' ] updated_resources = self . old_data . get ( 'resources' , None ) filestore_resources = list ( ) if update_resources and updated_resources : ignore_fields = [ 'package_id' ] if update_resources_by_name : resource_names = set ( ) for resource in self . resources : resource_name = resource [ 'name' ] resource_names . add ( resource_name ) for updated_resource in updated_resources : if resource_name == updated_resource [ 'name' ] : logger . warning ( 'Resource exists. Updating %s' % resource_name ) self . _dataset_merge_filestore_resource ( resource , updated_resource , filestore_resources , ignore_fields ) break updated_resource_names = set ( ) for updated_resource in updated_resources : updated_resource_name = updated_resource [ 'name' ] updated_resource_names . add ( updated_resource_name ) if not updated_resource_name in resource_names : self . _dataset_merge_filestore_newresource ( updated_resource , ignore_fields , filestore_resources ) if remove_additional_resources : resources_to_delete = list ( ) for i , resource in enumerate ( self . resources ) : resource_name = resource [ 'name' ] if resource_name not in updated_resource_names : logger . warning ( 'Removing additional resource %s!' % resource_name ) resources_to_delete . append ( i ) for i in sorted ( resources_to_delete , reverse = True ) : del self . resources [ i ] else : for i , updated_resource in enumerate ( updated_resources ) : if len ( self . resources ) > i : updated_resource_name = updated_resource [ 'name' ] resource = self . resources [ i ] resource_name = resource [ 'name' ] logger . warning ( 'Resource exists. Updating %s' % resource_name ) if resource_name != updated_resource_name : logger . warning ( 'Changing resource name to: %s' % updated_resource_name ) self . _dataset_merge_filestore_resource ( resource , updated_resource , filestore_resources , ignore_fields ) else : self . _dataset_merge_filestore_newresource ( updated_resource , ignore_fields , filestore_resources ) if remove_additional_resources : resources_to_delete = list ( ) for i , resource in enumerate ( self . resources ) : if len ( updated_resources ) <= i : logger . warning ( 'Removing additional resource %s!' % resource [ 'name' ] ) resources_to_delete . append ( i ) for i in sorted ( resources_to_delete , reverse = True ) : del self . resources [ i ] if self . resources : self . data [ 'resources' ] = self . _convert_hdxobjects ( self . resources ) ignore_field = self . configuration [ 'dataset' ] . get ( 'ignore_on_update' ) self . check_required_fields ( ignore_fields = [ ignore_field ] ) self . _save_to_hdx ( 'update' , 'id' ) self . _add_filestore_resources ( filestore_resources , create_default_views , hxl_update )
Helper method to check if dataset or its resources exist and update them
6,219
def update_in_hdx ( self , update_resources = True , update_resources_by_name = True , remove_additional_resources = False , create_default_views = True , hxl_update = True ) : loaded = False if 'id' in self . data : self . _check_existing_object ( 'dataset' , 'id' ) if self . _dataset_load_from_hdx ( self . data [ 'id' ] ) : loaded = True else : logger . warning ( 'Failed to load dataset with id %s' % self . data [ 'id' ] ) if not loaded : self . _check_existing_object ( 'dataset' , 'name' ) if not self . _dataset_load_from_hdx ( self . data [ 'name' ] ) : raise HDXError ( 'No existing dataset to update!' ) self . _dataset_merge_hdx_update ( update_resources = update_resources , update_resources_by_name = update_resources_by_name , remove_additional_resources = remove_additional_resources , create_default_views = create_default_views , hxl_update = hxl_update )
Check if dataset exists in HDX and if so update it
6,220
def create_in_hdx ( self , allow_no_resources = False , update_resources = True , update_resources_by_name = True , remove_additional_resources = False , create_default_views = True , hxl_update = True ) : self . check_required_fields ( allow_no_resources = allow_no_resources ) loadedid = None if 'id' in self . data : if self . _dataset_load_from_hdx ( self . data [ 'id' ] ) : loadedid = self . data [ 'id' ] else : logger . warning ( 'Failed to load dataset with id %s' % self . data [ 'id' ] ) if not loadedid : if self . _dataset_load_from_hdx ( self . data [ 'name' ] ) : loadedid = self . data [ 'name' ] if loadedid : logger . warning ( 'Dataset exists. Updating %s' % loadedid ) self . _dataset_merge_hdx_update ( update_resources = update_resources , update_resources_by_name = update_resources_by_name , remove_additional_resources = remove_additional_resources , create_default_views = create_default_views , hxl_update = hxl_update ) return filestore_resources = list ( ) if self . resources : ignore_fields = [ 'package_id' ] for resource in self . resources : resource . check_required_fields ( ignore_fields = ignore_fields ) if resource . get_file_to_upload ( ) : filestore_resources . append ( resource ) resource [ 'url' ] = Dataset . temporary_url self . data [ 'resources' ] = self . _convert_hdxobjects ( self . resources ) self . _save_to_hdx ( 'create' , 'name' ) self . _add_filestore_resources ( filestore_resources , False , hxl_update )
Check if dataset exists in HDX and if so update it otherwise create it
6,221
def search_in_hdx ( cls , query = '*:*' , configuration = None , page_size = 1000 , ** kwargs ) : dataset = Dataset ( configuration = configuration ) total_rows = kwargs . get ( 'rows' , cls . max_int ) start = kwargs . get ( 'start' , 0 ) all_datasets = None attempts = 0 while attempts < cls . max_attempts and all_datasets is None : all_datasets = list ( ) counts = set ( ) for page in range ( total_rows // page_size + 1 ) : pagetimespagesize = page * page_size kwargs [ 'start' ] = start + pagetimespagesize rows_left = total_rows - pagetimespagesize rows = min ( rows_left , page_size ) kwargs [ 'rows' ] = rows _ , result = dataset . _read_from_hdx ( 'dataset' , query , 'q' , Dataset . actions ( ) [ 'search' ] , ** kwargs ) datasets = list ( ) if result : count = result . get ( 'count' , None ) if count : counts . add ( count ) no_results = len ( result [ 'results' ] ) for datasetdict in result [ 'results' ] : dataset = Dataset ( configuration = configuration ) dataset . old_data = dict ( ) dataset . data = datasetdict dataset . _dataset_create_resources ( ) datasets . append ( dataset ) all_datasets += datasets if no_results < rows : break else : break else : logger . debug ( result ) if all_datasets and len ( counts ) != 1 : all_datasets = None attempts += 1 else : ids = [ dataset [ 'id' ] for dataset in all_datasets ] if len ( ids ) != len ( set ( ids ) ) : all_datasets = None attempts += 1 if attempts == cls . max_attempts and all_datasets is None : raise HDXError ( 'Maximum attempts reached for searching for datasets!' ) return all_datasets
Searches for datasets in HDX
6,222
def get_all_dataset_names ( configuration = None , ** kwargs ) : dataset = Dataset ( configuration = configuration ) dataset [ 'id' ] = 'all dataset names' return dataset . _write_to_hdx ( 'list' , kwargs , 'id' )
Get all dataset names in HDX
6,223
def get_all_datasets ( cls , configuration = None , page_size = 1000 , check_duplicates = True , ** kwargs ) : dataset = Dataset ( configuration = configuration ) dataset [ 'id' ] = 'all datasets' total_rows = kwargs . get ( 'limit' , cls . max_int ) start = kwargs . get ( 'offset' , 0 ) all_datasets = None attempts = 0 while attempts < cls . max_attempts and all_datasets is None : all_datasets = list ( ) for page in range ( total_rows // page_size + 1 ) : pagetimespagesize = page * page_size kwargs [ 'offset' ] = start + pagetimespagesize rows_left = total_rows - pagetimespagesize rows = min ( rows_left , page_size ) kwargs [ 'limit' ] = rows result = dataset . _write_to_hdx ( 'all' , kwargs , 'id' ) datasets = list ( ) if isinstance ( result , list ) : no_results = len ( result ) if no_results == 0 and page == 0 : all_datasets = None break for datasetdict in result : dataset = Dataset ( configuration = configuration ) dataset . old_data = dict ( ) dataset . data = datasetdict dataset . _dataset_create_resources ( ) datasets . append ( dataset ) all_datasets += datasets if no_results < rows : break else : logger . debug ( result ) if all_datasets is None : attempts += 1 elif check_duplicates : names_list = [ dataset [ 'name' ] for dataset in all_datasets ] names = set ( names_list ) if len ( names_list ) != len ( names ) : all_datasets = None attempts += 1 if attempts == cls . max_attempts and all_datasets is None : raise HDXError ( 'Maximum attempts reached for getting all datasets!' ) return all_datasets
Get all datasets in HDX
6,224
def get_dataset_date_as_datetime ( self ) : dataset_date = self . data . get ( 'dataset_date' , None ) if dataset_date : if '-' in dataset_date : dataset_date = dataset_date . split ( '-' ) [ 0 ] return datetime . strptime ( dataset_date , '%m/%d/%Y' ) else : return None
Get dataset date as datetime . datetime object . For range returns start date .
6,225
def get_dataset_end_date_as_datetime ( self ) : dataset_date = self . data . get ( 'dataset_date' , None ) if dataset_date : if '-' in dataset_date : dataset_date = dataset_date . split ( '-' ) [ 1 ] return datetime . strptime ( dataset_date , '%m/%d/%Y' ) return None
Get dataset end date as datetime . datetime object .
6,226
def _get_formatted_date ( dataset_date , date_format = None ) : if dataset_date : if date_format : return dataset_date . strftime ( date_format ) else : return dataset_date . date ( ) . isoformat ( ) else : return None
Get supplied dataset date as string in specified format . If no format is supplied an ISO 8601 string is returned .
6,227
def set_dataset_date_from_datetime ( self , dataset_date , dataset_end_date = None ) : start_date = dataset_date . strftime ( '%m/%d/%Y' ) if dataset_end_date is None : self . data [ 'dataset_date' ] = start_date else : end_date = dataset_end_date . strftime ( '%m/%d/%Y' ) self . data [ 'dataset_date' ] = '%s-%s' % ( start_date , end_date )
Set dataset date from datetime . datetime object
6,228
def _parse_date ( dataset_date , date_format ) : if date_format is None : try : return parser . parse ( dataset_date ) except ( ValueError , OverflowError ) as e : raisefrom ( HDXError , 'Invalid dataset date!' , e ) else : try : return datetime . strptime ( dataset_date , date_format ) except ValueError as e : raisefrom ( HDXError , 'Invalid dataset date!' , e )
Parse dataset date from string using specified format . If no format is supplied the function will guess . For unambiguous formats this should be fine .
6,229
def set_dataset_date ( self , dataset_date , dataset_end_date = None , date_format = None ) : parsed_date = self . _parse_date ( dataset_date , date_format ) if dataset_end_date is None : self . set_dataset_date_from_datetime ( parsed_date ) else : parsed_end_date = self . _parse_date ( dataset_end_date , date_format ) self . set_dataset_date_from_datetime ( parsed_date , parsed_end_date )
Set dataset date from string using specified format . If no format is supplied the function will guess . For unambiguous formats this should be fine .
6,230
def set_dataset_year_range ( self , dataset_year , dataset_end_year = None ) : if isinstance ( dataset_year , int ) : dataset_date = '01/01/%d' % dataset_year elif isinstance ( dataset_year , str ) : dataset_date = '01/01/%s' % dataset_year else : raise hdx . data . hdxobject . HDXError ( 'dataset_year has type %s which is not supported!' % type ( dataset_year ) . __name__ ) if dataset_end_year is None : dataset_end_year = dataset_year if isinstance ( dataset_end_year , int ) : dataset_end_date = '31/12/%d' % dataset_end_year elif isinstance ( dataset_end_year , str ) : dataset_end_date = '31/12/%s' % dataset_end_year else : raise hdx . data . hdxobject . HDXError ( 'dataset_end_year has type %s which is not supported!' % type ( dataset_end_year ) . __name__ ) self . set_dataset_date ( dataset_date , dataset_end_date )
Set dataset date as a range from year or start and end year .
6,231
def set_expected_update_frequency ( self , update_frequency ) : try : int ( update_frequency ) except ValueError : update_frequency = Dataset . transform_update_frequency ( update_frequency ) if not update_frequency : raise HDXError ( 'Invalid update frequency supplied!' ) self . data [ 'data_update_frequency' ] = update_frequency
Set expected update frequency
6,232
def remove_tag ( self , tag ) : return self . _remove_hdxobject ( self . data . get ( 'tags' ) , tag , matchon = 'name' )
Remove a tag
6,233
def get_location ( self , locations = None ) : countries = self . data . get ( 'groups' , None ) if not countries : return list ( ) return [ Locations . get_location_from_HDX_code ( x [ 'name' ] , locations = locations , configuration = self . configuration ) for x in countries ]
Return the dataset s location
6,234
def add_country_location ( self , country , exact = True , locations = None , use_live = True ) : iso3 , match = Country . get_iso3_country_code_fuzzy ( country , use_live = use_live ) if iso3 is None : raise HDXError ( 'Country: %s - cannot find iso3 code!' % country ) return self . add_other_location ( iso3 , exact = exact , alterror = 'Country: %s with iso3: %s could not be found in HDX list!' % ( country , iso3 ) , locations = locations )
Add a country . If an iso 3 code is not provided value is parsed and if it is a valid country name converted to an iso 3 code . If the country is already added it is ignored .
6,235
def add_country_locations ( self , countries , locations = None , use_live = True ) : allcountriesadded = True for country in countries : if not self . add_country_location ( country , locations = locations , use_live = use_live ) : allcountriesadded = False return allcountriesadded
Add a list of countries . If iso 3 codes are not provided values are parsed and where they are valid country names converted to iso 3 codes . If any country is already added it is ignored .
6,236
def add_region_location ( self , region , locations = None , use_live = True ) : return self . add_country_locations ( Country . get_countries_in_region ( region , exception = HDXError , use_live = use_live ) , locations = locations )
Add all countries in a region . If a 3 digit UNStats M49 region code is not provided value is parsed as a region name . If any country is already added it is ignored .
6,237
def add_other_location ( self , location , exact = True , alterror = None , locations = None ) : hdx_code , match = Locations . get_HDX_code_from_location_partial ( location , locations = locations , configuration = self . configuration ) if hdx_code is None or ( exact is True and match is False ) : if alterror is None : raise HDXError ( 'Location: %s - cannot find in HDX!' % location ) else : raise HDXError ( alterror ) groups = self . data . get ( 'groups' , None ) hdx_code = hdx_code . lower ( ) if groups : if hdx_code in [ x [ 'name' ] for x in groups ] : return False else : groups = list ( ) groups . append ( { 'name' : hdx_code } ) self . data [ 'groups' ] = groups return True
Add a location which is not a country or region . Value is parsed and compared to existing locations in HDX . If the location is already added it is ignored .
6,238
def remove_location ( self , location ) : res = self . _remove_hdxobject ( self . data . get ( 'groups' ) , location , matchon = 'name' ) if not res : res = self . _remove_hdxobject ( self . data . get ( 'groups' ) , location . upper ( ) , matchon = 'name' ) if not res : res = self . _remove_hdxobject ( self . data . get ( 'groups' ) , location . lower ( ) , matchon = 'name' ) return res
Remove a location . If the location is already added it is ignored .
6,239
def get_maintainer ( self ) : return hdx . data . user . User . read_from_hdx ( self . data [ 'maintainer' ] , configuration = self . configuration )
Get the dataset s maintainer .
6,240
def set_maintainer ( self , maintainer ) : if isinstance ( maintainer , hdx . data . user . User ) or isinstance ( maintainer , dict ) : if 'id' not in maintainer : maintainer = hdx . data . user . User . read_from_hdx ( maintainer [ 'name' ] , configuration = self . configuration ) maintainer = maintainer [ 'id' ] elif not isinstance ( maintainer , str ) : raise HDXError ( 'Type %s cannot be added as a maintainer!' % type ( maintainer ) . __name__ ) if is_valid_uuid ( maintainer ) is False : raise HDXError ( '%s is not a valid user id for a maintainer!' % maintainer ) self . data [ 'maintainer' ] = maintainer
Set the dataset s maintainer .
6,241
def get_organization ( self ) : return hdx . data . organization . Organization . read_from_hdx ( self . data [ 'owner_org' ] , configuration = self . configuration )
Get the dataset s organization .
6,242
def set_organization ( self , organization ) : if isinstance ( organization , hdx . data . organization . Organization ) or isinstance ( organization , dict ) : if 'id' not in organization : organization = hdx . data . organization . Organization . read_from_hdx ( organization [ 'name' ] , configuration = self . configuration ) organization = organization [ 'id' ] elif not isinstance ( organization , str ) : raise HDXError ( 'Type %s cannot be added as a organization!' % type ( organization ) . __name__ ) if is_valid_uuid ( organization ) is False and organization != 'hdx' : raise HDXError ( '%s is not a valid organization id!' % organization ) self . data [ 'owner_org' ] = organization
Set the dataset s organization .
6,243
def get_showcases ( self ) : assoc_result , showcases_dicts = self . _read_from_hdx ( 'showcase' , self . data [ 'id' ] , fieldname = 'package_id' , action = hdx . data . showcase . Showcase . actions ( ) [ 'list_showcases' ] ) showcases = list ( ) if assoc_result : for showcase_dict in showcases_dicts : showcase = hdx . data . showcase . Showcase ( showcase_dict , configuration = self . configuration ) showcases . append ( showcase ) return showcases
Get any showcases the dataset is in
6,244
def _get_dataset_showcase_dict ( self , showcase ) : if isinstance ( showcase , hdx . data . showcase . Showcase ) or isinstance ( showcase , dict ) : if 'id' not in showcase : showcase = hdx . data . showcase . Showcase . read_from_hdx ( showcase [ 'name' ] ) showcase = showcase [ 'id' ] elif not isinstance ( showcase , str ) : raise HDXError ( 'Type %s cannot be added as a showcase!' % type ( showcase ) . __name__ ) if is_valid_uuid ( showcase ) is False : raise HDXError ( '%s is not a valid showcase id!' % showcase ) return { 'package_id' : self . data [ 'id' ] , 'showcase_id' : showcase }
Get dataset showcase dict
6,245
def add_showcase ( self , showcase , showcases_to_check = None ) : dataset_showcase = self . _get_dataset_showcase_dict ( showcase ) if showcases_to_check is None : showcases_to_check = self . get_showcases ( ) for showcase in showcases_to_check : if dataset_showcase [ 'showcase_id' ] == showcase [ 'id' ] : return False showcase = hdx . data . showcase . Showcase ( { 'id' : dataset_showcase [ 'showcase_id' ] } , configuration = self . configuration ) showcase . _write_to_hdx ( 'associate' , dataset_showcase , 'package_id' ) return True
Add dataset to showcase
6,246
def add_showcases ( self , showcases , showcases_to_check = None ) : if showcases_to_check is None : showcases_to_check = self . get_showcases ( ) allshowcasesadded = True for showcase in showcases : if not self . add_showcase ( showcase , showcases_to_check = showcases_to_check ) : allshowcasesadded = False return allshowcasesadded
Add dataset to multiple showcases
6,247
def remove_showcase ( self , showcase ) : dataset_showcase = self . _get_dataset_showcase_dict ( showcase ) showcase = hdx . data . showcase . Showcase ( { 'id' : dataset_showcase [ 'showcase_id' ] } , configuration = self . configuration ) showcase . _write_to_hdx ( 'disassociate' , dataset_showcase , 'package_id' )
Remove dataset from showcase
6,248
def set_requestable ( self , requestable = True ) : self . data [ 'is_requestdata_type' ] = requestable if requestable : self . data [ 'private' ] = False
Set the dataset to be of type requestable or not
6,249
def get_filetypes ( self ) : if not self . is_requestable ( ) : return [ resource . get_file_type ( ) for resource in self . get_resources ( ) ] return self . _get_stringlist_from_commastring ( 'file_types' )
Return list of filetypes in your data
6,250
def clean_dataset_tags ( self ) : tags_dict , wildcard_tags = Tags . tagscleanupdicts ( ) def delete_tag ( tag ) : logger . info ( '%s - Deleting tag %s!' % ( self . data [ 'name' ] , tag ) ) return self . remove_tag ( tag ) , False def update_tag ( tag , final_tags , wording , remove_existing = True ) : text = '%s - %s: %s -> ' % ( self . data [ 'name' ] , wording , tag ) if not final_tags : logger . error ( '%snothing!' % text ) return False , True tags_lower_five = final_tags [ : 5 ] . lower ( ) if tags_lower_five == 'merge' or tags_lower_five == 'split' or ( ';' not in final_tags and len ( final_tags ) > 50 ) : logger . error ( '%s%s - Invalid final tag!' % ( text , final_tags ) ) return False , True if remove_existing : self . remove_tag ( tag ) tags = ', ' . join ( self . get_tags ( ) ) if self . add_tags ( final_tags . split ( ';' ) ) : logger . info ( '%s%s! Dataset tags: %s' % ( text , final_tags , tags ) ) else : logger . warning ( '%s%s - At least one of the tags already exists! Dataset tags: %s' % ( text , final_tags , tags ) ) return True , False def do_action ( tag , tags_dict_key ) : whattodo = tags_dict [ tags_dict_key ] action = whattodo [ u'action' ] final_tags = whattodo [ u'final tags (semicolon separated)' ] if action == u'Delete' : changed , error = delete_tag ( tag ) elif action == u'Merge' : changed , error = update_tag ( tag , final_tags , 'Merging' ) elif action == u'Fix spelling' : changed , error = update_tag ( tag , final_tags , 'Fixing spelling' ) elif action == u'Non English' : changed , error = update_tag ( tag , final_tags , 'Anglicising' , remove_existing = False ) else : changed = False error = False return changed , error def process_tag ( tag ) : changed = False error = False if tag in tags_dict . keys ( ) : changed , error = do_action ( tag , tag ) else : for wildcard_tag in wildcard_tags : if fnmatch . fnmatch ( tag , wildcard_tag ) : changed , error = do_action ( tag , wildcard_tag ) break return changed , error anychange = False anyerror = False for tag in self . get_tags ( ) : changed , error = process_tag ( tag ) if changed : anychange = True if error : anyerror = True return anychange , anyerror
Clean dataset tags according to tags cleanup spreadsheet and return if any changes occurred
6,251
def set_quickchart_resource ( self , resource ) : if isinstance ( resource , int ) and not isinstance ( resource , bool ) : resource = self . get_resources ( ) [ resource ] if isinstance ( resource , hdx . data . resource . Resource ) or isinstance ( resource , dict ) : res = resource . get ( 'id' ) if res is None : resource = resource [ 'name' ] else : resource = res elif not isinstance ( resource , str ) : raise hdx . data . hdxobject . HDXError ( 'Resource id cannot be found in type %s!' % type ( resource ) . __name__ ) if is_valid_uuid ( resource ) is True : search = 'id' else : search = 'name' changed = False for dataset_resource in self . resources : if dataset_resource [ search ] == resource : dataset_resource . enable_dataset_preview ( ) self . preview_resource ( ) changed = True else : dataset_resource . disable_dataset_preview ( ) return changed
Set the resource that will be used for displaying QuickCharts in dataset preview
6,252
def create_default_views ( self , create_datastore_views = False ) : package = deepcopy ( self . data ) if self . resources : package [ 'resources' ] = self . _convert_hdxobjects ( self . resources ) data = { 'package' : package , 'create_datastore_views' : create_datastore_views } self . _write_to_hdx ( 'create_default_views' , data , 'package' )
Create default resource views for all resources in dataset
6,253
def _get_credentials ( self ) : site = self . data [ self . hdx_site ] username = site . get ( 'username' ) if username : return b64decode ( username ) . decode ( 'utf-8' ) , b64decode ( site [ 'password' ] ) . decode ( 'utf-8' ) else : return None
Return HDX site username and password
6,254
def call_remoteckan ( self , * args , ** kwargs ) : requests_kwargs = kwargs . get ( 'requests_kwargs' , dict ( ) ) credentials = self . _get_credentials ( ) if credentials : requests_kwargs [ 'auth' ] = credentials kwargs [ 'requests_kwargs' ] = requests_kwargs apikey = kwargs . get ( 'apikey' , self . get_api_key ( ) ) kwargs [ 'apikey' ] = apikey return self . remoteckan ( ) . call_action ( * args , ** kwargs )
Calls the remote CKAN
6,255
def create_remoteckan ( cls , site_url , user_agent = None , user_agent_config_yaml = None , user_agent_lookup = None , session = None , ** kwargs ) : if not session : session = get_session ( user_agent , user_agent_config_yaml , user_agent_lookup , prefix = Configuration . prefix , method_whitelist = frozenset ( [ 'HEAD' , 'TRACE' , 'GET' , 'POST' , 'PUT' , 'OPTIONS' , 'DELETE' ] ) , ** kwargs ) ua = session . headers [ 'User-Agent' ] else : ua = kwargs . get ( 'full_agent' ) if not ua : ua = UserAgent . get ( user_agent , user_agent_config_yaml , user_agent_lookup , prefix = Configuration . prefix , ** kwargs ) return ckanapi . RemoteCKAN ( site_url , user_agent = ua , session = session )
Create remote CKAN instance from configuration
6,256
def setup_remoteckan ( self , remoteckan = None , ** kwargs ) : if remoteckan is None : self . _remoteckan = self . create_remoteckan ( self . get_hdx_site_url ( ) , full_agent = self . get_user_agent ( ) , ** kwargs ) else : self . _remoteckan = remoteckan
Set up remote CKAN from provided CKAN or by creating from configuration
6,257
def setup ( cls , configuration = None , ** kwargs ) : if configuration is None : cls . _configuration = Configuration ( ** kwargs ) else : cls . _configuration = configuration
Set up the HDX configuration
6,258
def _create ( cls , configuration = None , remoteckan = None , ** kwargs ) : kwargs = cls . _environment_variables ( ** kwargs ) cls . setup ( configuration , ** kwargs ) cls . _configuration . setup_remoteckan ( remoteckan , ** kwargs ) return cls . _configuration . get_hdx_site_url ( )
Create HDX configuration
6,259
def kwargs_to_variable_assignment ( kwargs : dict , value_representation = repr , assignment_operator : str = ' = ' , statement_separator : str = '\n' , statement_per_line : bool = False ) -> str : code = [ ] join_str = '\n' if statement_per_line else '' for key , value in kwargs . items ( ) : code . append ( key + assignment_operator + value_representation ( value ) + statement_separator ) return join_str . join ( code )
Convert a dictionary into a string with assignments
6,260
def decode_json ( json_input : Union [ str , None ] = None ) : if json_input is None : return { } else : if isinstance ( json_input , str ) is False : raise TypeError ( ) elif json_input [ - 5 : ] == ".json" : with open ( json_input ) as f : decoded_json = json . load ( f ) else : decoded_json = json . loads ( json_input ) return decoded_json
Simple wrapper of json . load and json . loads .
6,261
def is_jsonable ( obj ) -> bool : try : return obj == json . loads ( json . dumps ( obj ) ) except TypeError : return False except : raise
Check if an object is jsonable .
6,262
def is_literal_eval ( node_or_string ) -> tuple : try : obj = ast . literal_eval ( node_or_string ) return ( True , obj ) except : return ( False , None )
Check if an expresion can be literal_eval .
6,263
def find_duplicates ( l : list ) -> set : return set ( [ x for x in l if l . count ( x ) > 1 ] )
Return the duplicates in a list .
6,264
def sort_dict ( d : dict , by : str = 'key' , allow_duplicates : bool = True ) -> collections . OrderedDict : if by == 'key' : i = 0 elif by == 'value' : values = list ( d . values ( ) ) if len ( values ) != len ( set ( values ) ) and not allow_duplicates : duplicates = find_duplicates ( values ) raise ValueError ( "There are duplicates in the values: {}" . format ( duplicates ) ) i = 1 else : raise ValueError ( "by can be 'key' or 'value'." ) return collections . OrderedDict ( sorted ( d . items ( ) , key = lambda t : t [ i ] ) )
Sort a dictionary by key or value .
6,265
def group_dict_by_value ( d : dict ) -> dict : d_out = { } for k , v in d . items ( ) : if v in d_out : d_out [ v ] . append ( k ) else : d_out [ v ] = [ k ] return d_out
Group a dictionary by values .
6,266
def variable_status ( code : str , exclude_variable : Union [ set , None ] = None , jsonable_parameter : bool = True ) -> tuple : if exclude_variable is None : exclude_variable = set ( ) else : exclude_variable = copy . deepcopy ( exclude_variable ) root = ast . parse ( code ) store_variable_name = set ( ) assign_only = True dict_parameter = { } for node in ast . iter_child_nodes ( root ) : if isinstance ( node , ast . Assign ) : for assign_node in ast . walk ( node ) : if isinstance ( assign_node , ast . Name ) : if isinstance ( assign_node . ctx , ast . Store ) : if jsonable_parameter is False : store_variable_name |= { assign_node . id } else : exclude_variable |= { assign_node . id } _is_literal_eval , _value = is_literal_eval ( node . value ) if jsonable_parameter is True : for assign_node in ast . iter_child_nodes ( node ) : if isinstance ( assign_node , ast . Tuple ) : i = 0 for assign_tuple_node in ast . iter_child_nodes ( assign_node ) : if isinstance ( assign_tuple_node , ast . Name ) : if isinstance ( _value , ( collections . Iterable ) ) and is_jsonable ( _value [ i ] ) and _is_literal_eval : dict_parameter [ assign_tuple_node . id ] = _value [ i ] store_variable_name |= { assign_tuple_node . id } else : exclude_variable |= { assign_tuple_node . id } i += 1 else : if isinstance ( assign_node , ast . Name ) : if is_jsonable ( _value ) and _is_literal_eval : dict_parameter [ assign_node . id ] = _value store_variable_name |= { assign_node . id } else : exclude_variable |= { assign_node . id } elif isinstance ( node , ast . AugAssign ) : for assign_node in ast . walk ( node ) : if isinstance ( assign_node , ast . Name ) : exclude_variable |= { assign_node . id } elif isinstance ( node , ( ast . FunctionDef , ast . ClassDef ) ) : assign_only = False exclude_variable |= { node . name } elif isinstance ( node , ast . Import ) : assign_only = False for node1 in ast . iter_child_nodes ( node ) : if node1 . asname is not None : exclude_variable |= { node1 . asname } else : exclude_variable |= { node1 . name } elif isinstance ( node , ast . ImportFrom ) : assign_only = False for node1 in ast . iter_child_nodes ( node ) : if node1 . asname is not None : exclude_variable |= { node1 . asname } else : exclude_variable |= { node1 . name } else : assign_only = False if assign_only is True : possible_parameter = store_variable_name - exclude_variable if jsonable_parameter is True : dict_parameter = { k : dict_parameter [ k ] for k in possible_parameter } return ( possible_parameter , store_variable_name | exclude_variable , dict_parameter ) return set ( ) , store_variable_name | exclude_variable , { }
Find the possible parameters and global variables from a python code .
6,267
def increment_name ( name : str , start_marker : str = " (" , end_marker : str = ")" ) -> str : if start_marker == '' : raise ValueError ( "start_marker can not be the empty string." ) a = name start = len ( a ) - a [ : : - 1 ] . find ( start_marker [ : : - 1 ] ) if ( a [ len ( a ) - len ( end_marker ) : len ( a ) ] == end_marker and start < ( len ( a ) - len ( end_marker ) ) and a [ start - len ( start_marker ) : start ] == start_marker and a [ start : len ( a ) - len ( end_marker ) ] . isdigit ( ) ) : old_int = int ( a [ start : len ( a ) - len ( end_marker ) ] ) new_int = old_int + 1 new_name = a [ : start ] + str ( new_int ) + end_marker else : new_name = a + start_marker + '1' + end_marker return new_name
Increment the name where the incremental part is given by parameters .
6,268
def read_from_hdx ( identifier , configuration = None ) : resourceview = ResourceView ( configuration = configuration ) result = resourceview . _load_from_hdx ( 'resource view' , identifier ) if result : return resourceview return None
Reads the resource view given by identifier from HDX and returns ResourceView object
6,269
def get_all_for_resource ( identifier , configuration = None ) : resourceview = ResourceView ( configuration = configuration ) success , result = resourceview . _read_from_hdx ( 'resource view' , identifier , 'id' , ResourceView . actions ( ) [ 'list' ] ) resourceviews = list ( ) if success : for resourceviewdict in result : resourceview = ResourceView ( resourceviewdict , configuration = configuration ) resourceviews . append ( resourceview ) return resourceviews
Read all resource views for a resource given by identifier from HDX and returns list of ResourceView objects
6,270
def _update_resource_view ( self , log = False ) : update = False if 'id' in self . data and self . _load_from_hdx ( 'resource view' , self . data [ 'id' ] ) : update = True else : if 'resource_id' in self . data : resource_views = self . get_all_for_resource ( self . data [ 'resource_id' ] ) for resource_view in resource_views : if self . data [ 'title' ] == resource_view [ 'title' ] : self . old_data = self . data self . data = resource_view . data update = True break if update : if log : logger . warning ( 'resource view exists. Updating %s' % self . data [ 'id' ] ) self . _merge_hdx_update ( 'resource view' , 'id' ) return update
Check if resource view exists in HDX and if so update resource view
6,271
def create_in_hdx ( self ) : self . check_required_fields ( ) if not self . _update_resource_view ( log = True ) : self . _save_to_hdx ( 'create' , 'title' )
Check if resource view exists in HDX and if so update it otherwise create resource view
6,272
def copy ( self , resource_view ) : if isinstance ( resource_view , str ) : if is_valid_uuid ( resource_view ) is False : raise HDXError ( '%s is not a valid resource view id!' % resource_view ) resource_view = ResourceView . read_from_hdx ( resource_view ) if not isinstance ( resource_view , dict ) and not isinstance ( resource_view , ResourceView ) : raise HDXError ( '%s is not a valid resource view!' % resource_view ) for key in resource_view : if key not in ( 'id' , 'resource_id' , 'package_id' ) : self . data [ key ] = resource_view [ key ]
Copies all fields except id resource_id and package_id from another resource view .
6,273
def tagscleanupdicts ( configuration = None , url = None , keycolumn = 5 , failchained = True ) : if not Tags . _tags_dict : if configuration is None : configuration = Configuration . read ( ) with Download ( full_agent = configuration . get_user_agent ( ) ) as downloader : if url is None : url = configuration [ 'tags_cleanup_url' ] Tags . _tags_dict = downloader . download_tabular_rows_as_dicts ( url , keycolumn = keycolumn ) keys = Tags . _tags_dict . keys ( ) chainerror = False for i , tag in enumerate ( keys ) : whattodo = Tags . _tags_dict [ tag ] action = whattodo [ u'action' ] final_tags = whattodo [ u'final tags (semicolon separated)' ] for final_tag in final_tags . split ( ';' ) : if final_tag in keys : index = list ( keys ) . index ( final_tag ) if index != i : whattodo2 = Tags . _tags_dict [ final_tag ] action2 = whattodo2 [ u'action' ] if action2 != 'OK' and action2 != 'Other' : final_tags2 = whattodo2 [ u'final tags (semicolon separated)' ] if final_tag not in final_tags2 . split ( ';' ) : chainerror = True if failchained : logger . error ( 'Chained rules: %s (%s -> %s) | %s (%s -> %s)' % ( action , tag , final_tags , action2 , final_tag , final_tags2 ) ) if failchained and chainerror : raise ChainRuleError ( 'Chained rules for tags detected!' ) Tags . _wildcard_tags = list ( ) for tag in Tags . _tags_dict : if '*' in tag : Tags . _wildcard_tags . append ( tag ) return Tags . _tags_dict , Tags . _wildcard_tags
Get tags cleanup dictionaries
6,274
def read_from_hdx ( identifier , configuration = None ) : user = User ( configuration = configuration ) result = user . _load_from_hdx ( 'user' , identifier ) if result : return user return None
Reads the user given by identifier from HDX and returns User object
6,275
def update_in_hdx ( self ) : capacity = self . data . get ( 'capacity' ) if capacity is not None : del self . data [ 'capacity' ] self . _update_in_hdx ( 'user' , 'id' ) if capacity is not None : self . data [ 'capacity' ] = capacity
Check if user exists in HDX and if so update user
6,276
def create_in_hdx ( self ) : capacity = self . data . get ( 'capacity' ) if capacity is not None : del self . data [ 'capacity' ] self . _create_in_hdx ( 'user' , 'id' , 'name' ) if capacity is not None : self . data [ 'capacity' ] = capacity
Check if user exists in HDX and if so update it otherwise create user
6,277
def email ( self , subject , text_body , html_body = None , sender = None , ** kwargs ) : self . configuration . emailer ( ) . send ( [ self . data [ 'email' ] ] , subject , text_body , html_body = html_body , sender = sender , ** kwargs )
Emails a user .
6,278
def get_all_users ( configuration = None , ** kwargs ) : user = User ( configuration = configuration ) user [ 'id' ] = 'all users' result = user . _write_to_hdx ( 'list' , kwargs , 'id' ) users = list ( ) if result : for userdict in result : user = User ( userdict , configuration = configuration ) users . append ( user ) else : logger . debug ( result ) return users
Get all users in HDX
6,279
def email_users ( users , subject , text_body , html_body = None , sender = None , configuration = None , ** kwargs ) : if not users : raise ValueError ( 'No users supplied' ) recipients = list ( ) for user in users : recipients . append ( user . data [ 'email' ] ) if configuration is None : configuration = users [ 0 ] . configuration configuration . emailer ( ) . send ( recipients , subject , text_body , html_body = html_body , sender = sender , ** kwargs )
Email a list of users
6,280
def get_organizations ( self , permission = 'read' ) : success , result = self . _read_from_hdx ( 'user' , self . data [ 'name' ] , 'id' , self . actions ( ) [ 'listorgs' ] , permission = permission ) organizations = list ( ) if success : for organizationdict in result : organization = hdx . data . organization . Organization . read_from_hdx ( organizationdict [ 'id' ] ) organizations . append ( organization ) return organizations
Get organizations in HDX that this user is a member of .
6,281
def facade ( projectmainfn , ** kwargs ) : site_url = Configuration . _create ( ** kwargs ) logger . info ( '--------------------------------------------------' ) logger . info ( '> Using HDX Python API Library %s' % Configuration . apiversion ) logger . info ( '> HDX Site: %s' % site_url ) UserAgent . user_agent = Configuration . read ( ) . user_agent projectmainfn ( )
Facade to simplify project setup that calls project main function
6,282
def get_lint_config ( config_path = None ) : if config_path : config = LintConfig . load_from_file ( config_path ) click . echo ( "Using config from {0}" . format ( config_path ) ) elif os . path . exists ( DEFAULT_CONFIG_FILE ) : config = LintConfig . load_from_file ( DEFAULT_CONFIG_FILE ) click . echo ( "Using config from {0}" . format ( DEFAULT_CONFIG_FILE ) ) else : config = LintConfig ( ) return config
Tries loading the config from the given path . If no path is specified the default config path is tried and if that is not specified we the default config is returned .
6,283
def cli ( list_files , config , ignore , path ) : files = MarkdownFileFinder . find_files ( path ) if list_files : echo_files ( files ) lint_config = get_lint_config ( config ) lint_config . apply_on_csv_string ( ignore , lint_config . disable_rule ) linter = MarkdownLinter ( lint_config ) error_count = linter . lint_files ( files ) exit ( error_count )
Markdown lint tool checks your markdown for styling issues
6,284
def run ( self , check_interval = 300 ) : while True : if args . config : config = config_file_parser . get_configuration ( args . config ) access_key_id = config [ 'access-key-id' ] secret_access_key = config [ 'secret-access-key' ] region = config [ 'region' ] else : access_key_id = args . access_key_id secret_access_key = args . secret_access_key region = args . region connection = connection_manager . connect_to_ec2 ( region , access_key_id , secret_access_key ) snapshot_manager . run ( connection ) logger . info ( 'Sleeping {} seconds until next check' . format ( check_interval ) ) time . sleep ( check_interval )
Run the daemon
6,285
def _apply_line_rules ( self , markdown_string ) : all_violations = [ ] lines = markdown_string . split ( "\n" ) line_rules = self . line_rules line_nr = 1 ignoring = False for line in lines : if ignoring : if line . strip ( ) == '<!-- markdownlint:enable : ignoring = False else : if line . strip ( ) == '<!-- markdownlint:disable : ignoring = True continue for rule in line_rules : violation = rule . validate ( line ) if violation : violation . line_nr = line_nr all_violations . append ( violation ) line_nr += 1 return all_violations
Iterates over the lines in a given markdown string and applies all the enabled line rules to each line
6,286
def ReadFrom ( self , byte_stream ) : try : return self . _struct . unpack_from ( byte_stream ) except ( TypeError , struct . error ) as exception : raise IOError ( 'Unable to read byte stream with error: {0!s}' . format ( exception ) )
Read values from a byte stream .
6,287
def WriteTo ( self , values ) : try : return self . _struct . pack ( * values ) except ( TypeError , struct . error ) as exception : raise IOError ( 'Unable to write stream with error: {0!s}' . format ( exception ) )
Writes values to a byte stream .
6,288
def run ( connection ) : volumes = volume_manager . get_watched_volumes ( connection ) for volume in volumes : _ensure_snapshot ( connection , volume ) _remove_old_snapshots ( connection , volume )
Ensure that we have snapshots for a given volume
6,289
def _create_snapshot ( volume ) : logger . info ( 'Creating new snapshot for {}' . format ( volume . id ) ) snapshot = volume . create_snapshot ( description = "Automatic snapshot by Automated EBS Snapshots" ) logger . info ( 'Created snapshot {} for volume {}' . format ( snapshot . id , volume . id ) ) return snapshot
Create a new snapshot
6,290
def _ensure_snapshot ( connection , volume ) : if 'AutomatedEBSSnapshots' not in volume . tags : logger . warning ( 'Missing tag AutomatedEBSSnapshots for volume {}' . format ( volume . id ) ) return interval = volume . tags [ 'AutomatedEBSSnapshots' ] if volume . tags [ 'AutomatedEBSSnapshots' ] not in VALID_INTERVALS : logger . warning ( '"{}" is not a valid snapshotting interval for volume {}' . format ( interval , volume . id ) ) return snapshots = connection . get_all_snapshots ( filters = { 'volume-id' : volume . id } ) if not snapshots : _create_snapshot ( volume ) return min_delta = 3600 * 24 * 365 * 10 for snapshot in snapshots : timestamp = datetime . datetime . strptime ( snapshot . start_time , '%Y-%m-%dT%H:%M:%S.000Z' ) delta_seconds = int ( ( datetime . datetime . utcnow ( ) - timestamp ) . total_seconds ( ) ) if delta_seconds < min_delta : min_delta = delta_seconds logger . info ( 'The newest snapshot for {} is {} seconds old' . format ( volume . id , min_delta ) ) if interval == 'hourly' and min_delta > 3600 : _create_snapshot ( volume ) elif interval == 'daily' and min_delta > 3600 * 24 : _create_snapshot ( volume ) elif interval == 'weekly' and min_delta > 3600 * 24 * 7 : _create_snapshot ( volume ) elif interval == 'monthly' and min_delta > 3600 * 24 * 30 : _create_snapshot ( volume ) elif interval == 'yearly' and min_delta > 3600 * 24 * 365 : _create_snapshot ( volume ) else : logger . info ( 'No need for a new snapshot of {}' . format ( volume . id ) )
Ensure that a given volume has an appropriate snapshot
6,291
def _remove_old_snapshots ( connection , volume ) : if 'AutomatedEBSSnapshotsRetention' not in volume . tags : logger . warning ( 'Missing tag AutomatedEBSSnapshotsRetention for volume {}' . format ( volume . id ) ) return retention = int ( volume . tags [ 'AutomatedEBSSnapshotsRetention' ] ) snapshots = connection . get_all_snapshots ( filters = { 'volume-id' : volume . id } ) snapshots . sort ( key = lambda x : x . start_time ) snapshots = snapshots [ : - int ( retention ) ] if not snapshots : logger . info ( 'No old snapshots to remove' ) return for snapshot in snapshots : logger . info ( 'Deleting snapshot {}' . format ( snapshot . id ) ) try : snapshot . delete ( ) except EC2ResponseError as error : logger . warning ( 'Could not remove snapshot: {}' . format ( error . message ) ) logger . info ( 'Done deleting snapshots' )
Remove old snapshots
6,292
def list ( connection ) : volumes = get_watched_volumes ( connection ) if not volumes : logger . info ( 'No watched volumes found' ) return logger . info ( '+-----------------------' '+----------------------' '+--------------' '+------------+' ) logger . info ( '| {volume:<21} ' '| {volume_name:<20.20} ' '| {interval:<12} ' '| {retention:<10} |' . format ( volume = 'Volume ID' , volume_name = 'Volume name' , interval = 'Interval' , retention = 'Retention' ) ) logger . info ( '+-----------------------' '+----------------------' '+--------------' '+------------+' ) for volume in volumes : if 'AutomatedEBSSnapshots' not in volume . tags : interval = 'Interval tag not found' elif volume . tags [ 'AutomatedEBSSnapshots' ] not in VALID_INTERVALS : interval = 'Invalid interval' else : interval = volume . tags [ 'AutomatedEBSSnapshots' ] if 'AutomatedEBSSnapshotsRetention' not in volume . tags : retention = 0 else : retention = volume . tags [ 'AutomatedEBSSnapshotsRetention' ] try : volume_name = volume . tags [ 'Name' ] except KeyError : volume_name = '' logger . info ( '| {volume_id:<14} ' '| {volume_name:<20.20} ' '| {interval:<12} ' '| {retention:<10} |' . format ( volume_id = volume . id , volume_name = volume_name , interval = interval , retention = retention ) ) logger . info ( '+-----------------------' '+----------------------' '+--------------' '+------------+' )
List watched EBS volumes
6,293
def unwatch ( connection , volume_id ) : try : volume = connection . get_all_volumes ( volume_ids = [ volume_id ] ) [ 0 ] volume . remove_tag ( 'AutomatedEBSSnapshots' ) except EC2ResponseError : pass logger . info ( 'Removed {} from the watchlist' . format ( volume_id ) ) return True
Remove watching of a volume
6,294
def get_volume_id ( connection , volume ) : volume_id_pattern = re . compile ( 'vol-\w{8}' ) if volume_id_pattern . match ( volume ) : try : connection . get_all_volumes ( volume_ids = [ volume ] ) volume_id = volume except EC2ResponseError : logger . warning ( 'Volume {} not found' . format ( volume ) ) return None else : name_filter = { 'tag-key' : 'Name' , 'tag-value' : volume } volumes = connection . get_all_volumes ( filters = name_filter ) if not volumes : logger . warning ( 'Volume {} not found' . format ( volume ) ) return None if len ( volumes ) > 1 : logger . warning ( 'Volume {} not unique' . format ( volume ) ) volume_id = volumes [ 0 ] . id return volume_id
Get Volume ID from the given volume . Input can be volume id or its Name tag .
6,295
def list_snapshots ( connection , volume ) : logger . info ( '+----------------' '+----------------------' '+---------------------------+' ) logger . info ( '| {snapshot:<14} ' '| {snapshot_name:<20.20} ' '| {created:<25} |' . format ( snapshot = 'Snapshot ID' , snapshot_name = 'Snapshot name' , created = 'Created' ) ) logger . info ( '+----------------' '+----------------------' '+---------------------------+' ) vid = get_volume_id ( connection , volume ) if vid : vol = connection . get_all_volumes ( volume_ids = [ vid ] ) [ 0 ] for snap in vol . snapshots ( ) : logger . info ( '| {snapshot:<14} ' '| {snapshot_name:<20.20} ' '| {created:<25} |' . format ( snapshot = snap . id , snapshot_name = snap . tags . get ( 'Name' , '' ) , created = snap . start_time ) ) logger . info ( '+----------------' '+----------------------' '+---------------------------+' )
List all snapshots for the volume
6,296
def stem ( self , words , parser , ** kwargs ) : output = self . _run_morfologik ( words ) return parser . parse ( output , ** kwargs )
Get stems for the words using a given parser
6,297
def _run_morfologik ( self , words ) : p = subprocess . Popen ( [ 'java' , '-jar' , self . jar_path , 'plstem' , '-ie' , 'UTF-8' , '-oe' , 'UTF-8' ] , bufsize = - 1 , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . STDOUT ) out , _ = p . communicate ( input = bytes ( "\n" . join ( words ) , "utf-8" ) ) return decode ( out , 'utf-8' )
Runs morfologik java jar and assumes that input and output is UTF - 8 encoded .
6,298
def read_from_hdx ( identifier , configuration = None ) : showcase = Showcase ( configuration = configuration ) result = showcase . _load_from_hdx ( 'showcase' , identifier ) if result : return showcase return None
Reads the showcase given by identifier from HDX and returns Showcase object
6,299
def get_datasets ( self ) : assoc_result , datasets_dicts = self . _read_from_hdx ( 'showcase' , self . data [ 'id' ] , fieldname = 'showcase_id' , action = self . actions ( ) [ 'list_datasets' ] ) datasets = list ( ) if assoc_result : for dataset_dict in datasets_dicts : dataset = hdx . data . dataset . Dataset ( dataset_dict , configuration = self . configuration ) datasets . append ( dataset ) return datasets
Get any datasets in the showcase