sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def placeFavicon(context): """ Gets Favicon-URL for the Model. Template Syntax: {% placeFavicon %} """ fav = Favicon.objects.filter(isFavicon=True).first() if not fav: return mark_safe('<!-- no favicon -->') html = '' for rel in config: for size in sorted(config[rel], reverse=True): n = fav.get_favicon(size=size, rel=rel) html += '<link rel="%s" sizes="%sx%s" href="%s"/>' % ( n.rel, n.size, n.size, n.faviconImage.url) default_fav = fav.get_favicon(size=32, rel='shortcut icon') html += '<link rel="%s" sizes="%sx%s" href="%s"/>' % ( default_fav.rel, default_fav.size, default_fav.size, default_fav.faviconImage.url) return mark_safe(html)
Gets Favicon-URL for the Model. Template Syntax: {% placeFavicon %}
entailment
def set_default_theme(theme): """ Set default theme name based in config file. """ pref_init() # make sure config files exist parser = cp.ConfigParser() parser.read(PREFS_FILE) # Do we need to create a section? if not parser.has_section("theme"): parser.add_section("theme") parser.set("theme", "default", theme) # best way to make sure no file truncation? with open("%s.2" % PREFS_FILE, "w") as fp: parser.write(fp) copy("%s.2" % PREFS_FILE, PREFS_FILE) unlink("%s.2" % PREFS_FILE,)
Set default theme name based in config file.
entailment
def pick_theme(manual): """ Return theme name based on manual input, prefs file, or default to "plain". """ if manual: return manual pref_init() parser = cp.ConfigParser() parser.read(PREFS_FILE) try: theme = parser.get("theme", "default") except (cp.NoSectionError, cp.NoOptionError): theme = "plain" return theme
Return theme name based on manual input, prefs file, or default to "plain".
entailment
def pref_init(): """Can be called without penalty. Create ~/.cdk dir if it doesn't exist. Copy the default pref file if it doesn't exist.""" # make sure we have a ~/.cdk dir if not isdir(PREFS_DIR): mkdir(PREFS_DIR) # make sure we have a default prefs file if not isfile(PREFS_FILE): copy(join(LOCATION, "custom", "prefs"), PREFS_DIR)
Can be called without penalty. Create ~/.cdk dir if it doesn't exist. Copy the default pref file if it doesn't exist.
entailment
def install_theme(path_to_theme): """ Pass a path to a theme file which will be extracted to the themes directory. """ pref_init() # cp the file filename = basename(path_to_theme) dest = join(THEMES_DIR, filename) copy(path_to_theme, dest) # unzip zf = zipfile.ZipFile(dest) # should make sure zipfile contains only themename folder which doesn't conflict # with existing themename. Or some kind of sanity check zf.extractall(THEMES_DIR) # plus this is a potential security flaw pre 2.7.4 # remove the copied zipfile unlink(dest)
Pass a path to a theme file which will be extracted to the themes directory.
entailment
def main(): """ Entry point for choosing what subcommand to run. Really should be using asciidocapi """ # Try parsing command line args and flags with docopt args = docopt(__doc__, version="cdk") # Am I going to need validation? No Schema for the moment... if args['FILE']: out = output_file(args['FILE']) # Great! Run asciidoc with appropriate flags theme = pick_theme(args['--theme']) if theme not in listdir(THEMES_DIR): exit('Selected theme "%s" not found. Check ~/.cdk/prefs' % theme) cmd = create_command(theme, args['--bare'], args['--toc'], args['--notransition'], args['--logo']) run_command(cmd, args) if args['--toc']: add_css(out, '.deck-container .deck-toc li a span{color: #888;display:inline;}') if args['--custom-css']: add_css_file(out, args['--custom-css']) if args['--open']: webbrowser.open("file://" + abspath(out)) # other commands elif args['--generate']: if isfile(args['--generate']): exit("%s already exists!" % args['--generate']) with open(args['--generate'], "w") as fp: sample = join(LOCATION, "custom", "sample.asc") fp.write(open(sample).read()) print("Created sample slide deck in %s..." % args['--generate']) exit() elif args['--install-theme']: path = args['--install-theme'] if not isfile(path): exit("Theme file not found.") if not path.endswith(".zip"): exit("Theme installation currently only supports theme install from " ".zip files.") install_theme(path) elif args['--default-theme']: set_default_theme(args['--default-theme'])
Entry point for choosing what subcommand to run. Really should be using asciidocapi
entailment
def separate_resources(self): # type: () -> None """Move contents of resources key in internal dictionary into self.resources Returns: None """ self._separate_hdxobjects(self.resources, 'resources', 'name', hdx.data.resource.Resource)
Move contents of resources key in internal dictionary into self.resources Returns: None
entailment
def _get_resource_from_obj(self, resource): # type: (Union[hdx.data.resource.Resource,Dict,str]) -> hdx.data.resource.Resource """Add new or update existing resource in dataset with new metadata Args: resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary Returns: hdx.data.resource.Resource: Resource object """ if isinstance(resource, str): if is_valid_uuid(resource) is False: raise HDXError('%s is not a valid resource id!' % resource) resource = hdx.data.resource.Resource.read_from_hdx(resource, configuration=self.configuration) elif isinstance(resource, dict): resource = hdx.data.resource.Resource(resource, configuration=self.configuration) if not isinstance(resource, hdx.data.resource.Resource): raise HDXError('Type %s cannot be added as a resource!' % type(resource).__name__) return resource
Add new or update existing resource in dataset with new metadata Args: resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary Returns: hdx.data.resource.Resource: Resource object
entailment
def add_update_resource(self, resource, ignore_datasetid=False): # type: (Union[hdx.data.resource.Resource,Dict,str], bool) -> None """Add new or update existing resource in dataset with new metadata Args: resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary ignore_datasetid (bool): Whether to ignore dataset id in the resource Returns: None """ resource = self._get_resource_from_obj(resource) if 'package_id' in resource: if not ignore_datasetid: raise HDXError('Resource %s being added already has a dataset id!' % (resource['name'])) resource.check_url_filetoupload() resource_updated = self._addupdate_hdxobject(self.resources, 'name', resource) if resource.get_file_to_upload(): resource_updated.set_file_to_upload(resource.get_file_to_upload())
Add new or update existing resource in dataset with new metadata Args: resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary ignore_datasetid (bool): Whether to ignore dataset id in the resource Returns: None
entailment
def add_update_resources(self, resources, ignore_datasetid=False): # type: (List[Union[hdx.data.resource.Resource,Dict,str]], bool) -> None """Add new or update existing resources with new metadata to the dataset Args: resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False. Returns: None """ if not isinstance(resources, list): raise HDXError('Resources should be a list!') for resource in resources: self.add_update_resource(resource, ignore_datasetid)
Add new or update existing resources with new metadata to the dataset Args: resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False. Returns: None
entailment
def delete_resource(self, resource, delete=True): # type: (Union[hdx.data.resource.Resource,Dict,str], bool) -> bool """Delete a resource from the dataset and also from HDX by default Args: resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary delete (bool): Whetehr to delete the resource from HDX (not just the dataset). Defaults to True. Returns: bool: True if resource removed or False if not """ if isinstance(resource, str): if is_valid_uuid(resource) is False: raise HDXError('%s is not a valid resource id!' % resource) return self._remove_hdxobject(self.resources, resource, delete=delete)
Delete a resource from the dataset and also from HDX by default Args: resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary delete (bool): Whetehr to delete the resource from HDX (not just the dataset). Defaults to True. Returns: bool: True if resource removed or False if not
entailment
def reorder_resources(self, resource_ids, hxl_update=True): # type: (List[str], bool) -> None """Reorder resources in dataset according to provided list. If only some resource ids are supplied then these are assumed to be first and the other resources will stay in their original order. Args: resource_ids (List[str]): List of resource ids hxl_update (bool): Whether to call package_hxl_update. Defaults to True. Returns: None """ dataset_id = self.data.get('id') if not dataset_id: raise HDXError('Dataset has no id! It must be read, created or updated first.') data = {'id': dataset_id, 'order': resource_ids} self._write_to_hdx('reorder', data, 'package_id') if hxl_update: self.hxl_update()
Reorder resources in dataset according to provided list. If only some resource ids are supplied then these are assumed to be first and the other resources will stay in their original order. Args: resource_ids (List[str]): List of resource ids hxl_update (bool): Whether to call package_hxl_update. Defaults to True. Returns: None
entailment
def update_from_yaml(self, path=join('config', 'hdx_dataset_static.yml')): # type: (str) -> None """Update dataset metadata with static metadata from YAML file Args: path (str): Path to YAML dataset metadata. Defaults to config/hdx_dataset_static.yml. Returns: None """ super(Dataset, self).update_from_yaml(path) self.separate_resources()
Update dataset metadata with static metadata from YAML file Args: path (str): Path to YAML dataset metadata. Defaults to config/hdx_dataset_static.yml. Returns: None
entailment
def update_from_json(self, path=join('config', 'hdx_dataset_static.json')): # type: (str) -> None """Update dataset metadata with static metadata from JSON file Args: path (str): Path to JSON dataset metadata. Defaults to config/hdx_dataset_static.json. Returns: None """ super(Dataset, self).update_from_json(path) self.separate_resources()
Update dataset metadata with static metadata from JSON file Args: path (str): Path to JSON dataset metadata. Defaults to config/hdx_dataset_static.json. Returns: None
entailment
def read_from_hdx(identifier, configuration=None): # type: (str, Optional[Configuration]) -> Optional['Dataset'] """Reads the dataset given by identifier from HDX and returns Dataset object Args: identifier (str): Identifier of dataset configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[Dataset]: Dataset object if successful read, None if not """ dataset = Dataset(configuration=configuration) result = dataset._dataset_load_from_hdx(identifier) if result: return dataset return None
Reads the dataset given by identifier from HDX and returns Dataset object Args: identifier (str): Identifier of dataset configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[Dataset]: Dataset object if successful read, None if not
entailment
def _dataset_create_resources(self): # type: () -> None """Creates resource objects in dataset """ if 'resources' in self.data: self.old_data['resources'] = self._copy_hdxobjects(self.resources, hdx.data.resource.Resource, 'file_to_upload') self.init_resources() self.separate_resources()
Creates resource objects in dataset
entailment
def _dataset_load_from_hdx(self, id_or_name): # type: (str) -> bool """Loads the dataset given by either id or name from HDX Args: id_or_name (str): Either id or name of dataset Returns: bool: True if loaded, False if not """ if not self._load_from_hdx('dataset', id_or_name): return False self._dataset_create_resources() return True
Loads the dataset given by either id or name from HDX Args: id_or_name (str): Either id or name of dataset Returns: bool: True if loaded, False if not
entailment
def check_required_fields(self, ignore_fields=list(), allow_no_resources=False): # type: (List[str], bool) -> None """Check that metadata for dataset and its resources is complete. The parameter ignore_fields should be set if required to any fields that should be ignored for the particular operation. Args: ignore_fields (List[str]): Fields to ignore. Default is []. allow_no_resources (bool): Whether to allow no resources. Defaults to False. Returns: None """ if self.is_requestable(): self._check_required_fields('dataset-requestable', ignore_fields) else: self._check_required_fields('dataset', ignore_fields) if len(self.resources) == 0 and not allow_no_resources: raise HDXError('There are no resources! Please add at least one resource!') for resource in self.resources: ignore_fields = ['package_id'] resource.check_required_fields(ignore_fields=ignore_fields)
Check that metadata for dataset and its resources is complete. The parameter ignore_fields should be set if required to any fields that should be ignored for the particular operation. Args: ignore_fields (List[str]): Fields to ignore. Default is []. allow_no_resources (bool): Whether to allow no resources. Defaults to False. Returns: None
entailment
def _dataset_merge_filestore_resource(self, resource, updated_resource, filestore_resources, ignore_fields): # type: (hdx.data.Resource, hdx.data.Resource, List[hdx.data.Resource], List[str]) -> None """Helper method to merge updated resource from dataset into HDX resource read from HDX including filestore. Args: resource (hdx.data.Resource): Resource read from HDX updated_resource (hdx.data.Resource): Updated resource from dataset filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) ignore_fields (List[str]): List of fields to ignore when checking resource Returns: None """ if updated_resource.get_file_to_upload(): resource.set_file_to_upload(updated_resource.get_file_to_upload()) filestore_resources.append(resource) merge_two_dictionaries(resource, updated_resource) resource.check_required_fields(ignore_fields=ignore_fields) if resource.get_file_to_upload(): resource['url'] = Dataset.temporary_url
Helper method to merge updated resource from dataset into HDX resource read from HDX including filestore. Args: resource (hdx.data.Resource): Resource read from HDX updated_resource (hdx.data.Resource): Updated resource from dataset filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) ignore_fields (List[str]): List of fields to ignore when checking resource Returns: None
entailment
def _dataset_merge_filestore_newresource(self, new_resource, ignore_fields, filestore_resources): # type: (hdx.data.Resource, List[str], List[hdx.data.Resource]) -> None """Helper method to add new resource from dataset including filestore. Args: new_resource (hdx.data.Resource): New resource from dataset ignore_fields (List[str]): List of fields to ignore when checking resource filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) Returns: None """ new_resource.check_required_fields(ignore_fields=ignore_fields) self.resources.append(new_resource) if new_resource.get_file_to_upload(): filestore_resources.append(new_resource) new_resource['url'] = Dataset.temporary_url
Helper method to add new resource from dataset including filestore. Args: new_resource (hdx.data.Resource): New resource from dataset ignore_fields (List[str]): List of fields to ignore when checking resource filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) Returns: None
entailment
def _add_filestore_resources(self, filestore_resources, create_default_views, hxl_update): # type: (List[hdx.data.Resource], bool, bool) -> None """Helper method to create files in filestore by updating resources. Args: filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) create_default_views (bool): Whether to call package_create_default_resource_views. hxl_update (bool): Whether to call package_hxl_update. Returns: None """ for resource in filestore_resources: for created_resource in self.data['resources']: if resource['name'] == created_resource['name']: merge_two_dictionaries(resource.data, created_resource) del resource['url'] resource.update_in_hdx() merge_two_dictionaries(created_resource, resource.data) break self.init_resources() self.separate_resources() if create_default_views: self.create_default_views() if hxl_update: self.hxl_update()
Helper method to create files in filestore by updating resources. Args: filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) create_default_views (bool): Whether to call package_create_default_resource_views. hxl_update (bool): Whether to call package_hxl_update. Returns: None
entailment
def _dataset_merge_hdx_update(self, update_resources, update_resources_by_name, remove_additional_resources, create_default_views, hxl_update): # type: (bool, bool, bool, bool, bool) -> None """Helper method to check if dataset or its resources exist and update them Args: update_resources (bool): Whether to update resources update_resources_by_name (bool): Compare resource names rather than position in list remove_additional_resources (bool): Remove additional resources found in dataset (if updating) create_default_views (bool): Whether to call package_create_default_resource_views. hxl_update (bool): Whether to call package_hxl_update. Returns: None """ # 'old_data' here is the data we want to use for updating while 'data' is the data read from HDX merge_two_dictionaries(self.data, self.old_data) if 'resources' in self.data: del self.data['resources'] updated_resources = self.old_data.get('resources', None) filestore_resources = list() if update_resources and updated_resources: ignore_fields = ['package_id'] if update_resources_by_name: resource_names = set() for resource in self.resources: resource_name = resource['name'] resource_names.add(resource_name) for updated_resource in updated_resources: if resource_name == updated_resource['name']: logger.warning('Resource exists. Updating %s' % resource_name) self._dataset_merge_filestore_resource(resource, updated_resource, filestore_resources, ignore_fields) break updated_resource_names = set() for updated_resource in updated_resources: updated_resource_name = updated_resource['name'] updated_resource_names.add(updated_resource_name) if not updated_resource_name in resource_names: self._dataset_merge_filestore_newresource(updated_resource, ignore_fields, filestore_resources) if remove_additional_resources: resources_to_delete = list() for i, resource in enumerate(self.resources): resource_name = resource['name'] if resource_name not in updated_resource_names: logger.warning('Removing additional resource %s!' % resource_name) resources_to_delete.append(i) for i in sorted(resources_to_delete, reverse=True): del self.resources[i] else: # update resources by position for i, updated_resource in enumerate(updated_resources): if len(self.resources) > i: updated_resource_name = updated_resource['name'] resource = self.resources[i] resource_name = resource['name'] logger.warning('Resource exists. Updating %s' % resource_name) if resource_name != updated_resource_name: logger.warning('Changing resource name to: %s' % updated_resource_name) self._dataset_merge_filestore_resource(resource, updated_resource, filestore_resources, ignore_fields) else: self._dataset_merge_filestore_newresource(updated_resource, ignore_fields, filestore_resources) if remove_additional_resources: resources_to_delete = list() for i, resource in enumerate(self.resources): if len(updated_resources) <= i: logger.warning('Removing additional resource %s!' % resource['name']) resources_to_delete.append(i) for i in sorted(resources_to_delete, reverse=True): del self.resources[i] if self.resources: self.data['resources'] = self._convert_hdxobjects(self.resources) ignore_field = self.configuration['dataset'].get('ignore_on_update') self.check_required_fields(ignore_fields=[ignore_field]) self._save_to_hdx('update', 'id') self._add_filestore_resources(filestore_resources, create_default_views, hxl_update)
Helper method to check if dataset or its resources exist and update them Args: update_resources (bool): Whether to update resources update_resources_by_name (bool): Compare resource names rather than position in list remove_additional_resources (bool): Remove additional resources found in dataset (if updating) create_default_views (bool): Whether to call package_create_default_resource_views. hxl_update (bool): Whether to call package_hxl_update. Returns: None
entailment
def update_in_hdx(self, update_resources=True, update_resources_by_name=True, remove_additional_resources=False, create_default_views=True, hxl_update=True): # type: (bool, bool, bool, bool, bool) -> None """Check if dataset exists in HDX and if so, update it Args: update_resources (bool): Whether to update resources. Defaults to True. update_resources_by_name (bool): Compare resource names rather than position in list. Defaults to True. remove_additional_resources (bool): Remove additional resources found in dataset. Defaults to False. create_default_views (bool): Whether to call package_create_default_resource_views. Defaults to True. hxl_update (bool): Whether to call package_hxl_update. Defaults to True. Returns: None """ loaded = False if 'id' in self.data: self._check_existing_object('dataset', 'id') if self._dataset_load_from_hdx(self.data['id']): loaded = True else: logger.warning('Failed to load dataset with id %s' % self.data['id']) if not loaded: self._check_existing_object('dataset', 'name') if not self._dataset_load_from_hdx(self.data['name']): raise HDXError('No existing dataset to update!') self._dataset_merge_hdx_update(update_resources=update_resources, update_resources_by_name=update_resources_by_name, remove_additional_resources=remove_additional_resources, create_default_views=create_default_views, hxl_update=hxl_update)
Check if dataset exists in HDX and if so, update it Args: update_resources (bool): Whether to update resources. Defaults to True. update_resources_by_name (bool): Compare resource names rather than position in list. Defaults to True. remove_additional_resources (bool): Remove additional resources found in dataset. Defaults to False. create_default_views (bool): Whether to call package_create_default_resource_views. Defaults to True. hxl_update (bool): Whether to call package_hxl_update. Defaults to True. Returns: None
entailment
def create_in_hdx(self, allow_no_resources=False, update_resources=True, update_resources_by_name=True, remove_additional_resources=False, create_default_views=True, hxl_update=True): # type: (bool, bool, bool, bool, bool, bool) -> None """Check if dataset exists in HDX and if so, update it, otherwise create it Args: allow_no_resources (bool): Whether to allow no resources. Defaults to False. update_resources (bool): Whether to update resources (if updating). Defaults to True. update_resources_by_name (bool): Compare resource names rather than position in list. Defaults to True. remove_additional_resources (bool): Remove additional resources found in dataset (if updating). Defaults to False. create_default_views (bool): Whether to call package_create_default_resource_views (if updating). Defaults to True. hxl_update (bool): Whether to call package_hxl_update. Defaults to True. Returns: None """ self.check_required_fields(allow_no_resources=allow_no_resources) loadedid = None if 'id' in self.data: if self._dataset_load_from_hdx(self.data['id']): loadedid = self.data['id'] else: logger.warning('Failed to load dataset with id %s' % self.data['id']) if not loadedid: if self._dataset_load_from_hdx(self.data['name']): loadedid = self.data['name'] if loadedid: logger.warning('Dataset exists. Updating %s' % loadedid) self._dataset_merge_hdx_update(update_resources=update_resources, update_resources_by_name=update_resources_by_name, remove_additional_resources=remove_additional_resources, create_default_views=create_default_views, hxl_update=hxl_update) return filestore_resources = list() if self.resources: ignore_fields = ['package_id'] for resource in self.resources: resource.check_required_fields(ignore_fields=ignore_fields) if resource.get_file_to_upload(): filestore_resources.append(resource) resource['url'] = Dataset.temporary_url self.data['resources'] = self._convert_hdxobjects(self.resources) self._save_to_hdx('create', 'name') self._add_filestore_resources(filestore_resources, False, hxl_update)
Check if dataset exists in HDX and if so, update it, otherwise create it Args: allow_no_resources (bool): Whether to allow no resources. Defaults to False. update_resources (bool): Whether to update resources (if updating). Defaults to True. update_resources_by_name (bool): Compare resource names rather than position in list. Defaults to True. remove_additional_resources (bool): Remove additional resources found in dataset (if updating). Defaults to False. create_default_views (bool): Whether to call package_create_default_resource_views (if updating). Defaults to True. hxl_update (bool): Whether to call package_hxl_update. Defaults to True. Returns: None
entailment
def search_in_hdx(cls, query='*:*', configuration=None, page_size=1000, **kwargs): # type: (Optional[str], Optional[Configuration], int, Any) -> List['Dataset'] """Searches for datasets in HDX Args: query (Optional[str]): Query (in Solr format). Defaults to '*:*'. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. page_size (int): Size of page to return. Defaults to 1000. **kwargs: See below fq (string): Any filter queries to apply sort (string): Sorting of the search results. Defaults to 'relevance asc, metadata_modified desc'. rows (int): Number of matching rows to return. Defaults to all datasets (sys.maxsize). start (int): Offset in the complete result for where the set of returned datasets should begin facet (string): Whether to enable faceted results. Default to True. facet.mincount (int): Minimum counts for facet fields should be included in the results facet.limit (int): Maximum number of values the facet fields return (- = unlimited). Defaults to 50. facet.field (List[str]): Fields to facet upon. Default is empty. use_default_schema (bool): Use default package schema instead of custom schema. Defaults to False. Returns: List[Dataset]: list of datasets resulting from query """ dataset = Dataset(configuration=configuration) total_rows = kwargs.get('rows', cls.max_int) start = kwargs.get('start', 0) all_datasets = None attempts = 0 while attempts < cls.max_attempts and all_datasets is None: # if the count values vary for multiple calls, then must redo query all_datasets = list() counts = set() for page in range(total_rows // page_size + 1): pagetimespagesize = page * page_size kwargs['start'] = start + pagetimespagesize rows_left = total_rows - pagetimespagesize rows = min(rows_left, page_size) kwargs['rows'] = rows _, result = dataset._read_from_hdx('dataset', query, 'q', Dataset.actions()['search'], **kwargs) datasets = list() if result: count = result.get('count', None) if count: counts.add(count) no_results = len(result['results']) for datasetdict in result['results']: dataset = Dataset(configuration=configuration) dataset.old_data = dict() dataset.data = datasetdict dataset._dataset_create_resources() datasets.append(dataset) all_datasets += datasets if no_results < rows: break else: break else: logger.debug(result) if all_datasets and len(counts) != 1: # Make sure counts are all same for multiple calls to HDX all_datasets = None attempts += 1 else: ids = [dataset['id'] for dataset in all_datasets] # check for duplicates (shouldn't happen) if len(ids) != len(set(ids)): all_datasets = None attempts += 1 if attempts == cls.max_attempts and all_datasets is None: raise HDXError('Maximum attempts reached for searching for datasets!') return all_datasets
Searches for datasets in HDX Args: query (Optional[str]): Query (in Solr format). Defaults to '*:*'. configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. page_size (int): Size of page to return. Defaults to 1000. **kwargs: See below fq (string): Any filter queries to apply sort (string): Sorting of the search results. Defaults to 'relevance asc, metadata_modified desc'. rows (int): Number of matching rows to return. Defaults to all datasets (sys.maxsize). start (int): Offset in the complete result for where the set of returned datasets should begin facet (string): Whether to enable faceted results. Default to True. facet.mincount (int): Minimum counts for facet fields should be included in the results facet.limit (int): Maximum number of values the facet fields return (- = unlimited). Defaults to 50. facet.field (List[str]): Fields to facet upon. Default is empty. use_default_schema (bool): Use default package schema instead of custom schema. Defaults to False. Returns: List[Dataset]: list of datasets resulting from query
entailment
def get_all_dataset_names(configuration=None, **kwargs): # type: (Optional[Configuration], Any) -> List[str] """Get all dataset names in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. **kwargs: See below limit (int): Number of rows to return. Defaults to all dataset names. offset (int): Offset in the complete result for where the set of returned dataset names should begin Returns: List[str]: list of all dataset names in HDX """ dataset = Dataset(configuration=configuration) dataset['id'] = 'all dataset names' # only for error message if produced return dataset._write_to_hdx('list', kwargs, 'id')
Get all dataset names in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. **kwargs: See below limit (int): Number of rows to return. Defaults to all dataset names. offset (int): Offset in the complete result for where the set of returned dataset names should begin Returns: List[str]: list of all dataset names in HDX
entailment
def get_all_datasets(cls, configuration=None, page_size=1000, check_duplicates=True, **kwargs): # type: (Optional[Configuration], int, bool, Any) -> List['Dataset'] """Get all datasets in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. page_size (int): Size of page to return. Defaults to 1000. check_duplicates (bool): Whether to check for duplicate datasets. Defaults to True. **kwargs: See below limit (int): Number of rows to return. Defaults to all datasets (sys.maxsize) offset (int): Offset in the complete result for where the set of returned datasets should begin Returns: List[Dataset]: list of all datasets in HDX """ dataset = Dataset(configuration=configuration) dataset['id'] = 'all datasets' # only for error message if produced total_rows = kwargs.get('limit', cls.max_int) start = kwargs.get('offset', 0) all_datasets = None attempts = 0 while attempts < cls.max_attempts and all_datasets is None: # if the dataset names vary for multiple calls, then must redo query all_datasets = list() for page in range(total_rows // page_size + 1): pagetimespagesize = page * page_size kwargs['offset'] = start + pagetimespagesize rows_left = total_rows - pagetimespagesize rows = min(rows_left, page_size) kwargs['limit'] = rows result = dataset._write_to_hdx('all', kwargs, 'id') datasets = list() if isinstance(result, list): no_results = len(result) if no_results == 0 and page == 0: all_datasets = None break for datasetdict in result: dataset = Dataset(configuration=configuration) dataset.old_data = dict() dataset.data = datasetdict dataset._dataset_create_resources() datasets.append(dataset) all_datasets += datasets if no_results < rows: break else: logger.debug(result) if all_datasets is None: attempts += 1 elif check_duplicates: names_list = [dataset['name'] for dataset in all_datasets] names = set(names_list) if len(names_list) != len(names): # check for duplicates (shouldn't happen) all_datasets = None attempts += 1 # This check is no longer valid because of showcases being returned by package_list! # elif total_rows == max_int: # all_names = set(Dataset.get_all_dataset_names()) # check dataset names match package_list # if names != all_names: # all_datasets = None # attempts += 1 if attempts == cls.max_attempts and all_datasets is None: raise HDXError('Maximum attempts reached for getting all datasets!') return all_datasets
Get all datasets in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. page_size (int): Size of page to return. Defaults to 1000. check_duplicates (bool): Whether to check for duplicate datasets. Defaults to True. **kwargs: See below limit (int): Number of rows to return. Defaults to all datasets (sys.maxsize) offset (int): Offset in the complete result for where the set of returned datasets should begin Returns: List[Dataset]: list of all datasets in HDX
entailment
def get_all_resources(datasets): # type: (List['Dataset']) -> List[hdx.data.resource.Resource] """Get all resources from a list of datasets (such as returned by search) Args: datasets (List[Dataset]): list of datasets Returns: List[hdx.data.resource.Resource]: list of resources within those datasets """ resources = [] for dataset in datasets: for resource in dataset.get_resources(): resources.append(resource) return resources
Get all resources from a list of datasets (such as returned by search) Args: datasets (List[Dataset]): list of datasets Returns: List[hdx.data.resource.Resource]: list of resources within those datasets
entailment
def get_dataset_date_as_datetime(self): # type: () -> Optional[datetime] """Get dataset date as datetime.datetime object. For range returns start date. Returns: Optional[datetime.datetime]: Dataset date in datetime object or None if no date is set """ dataset_date = self.data.get('dataset_date', None) if dataset_date: if '-' in dataset_date: dataset_date = dataset_date.split('-')[0] return datetime.strptime(dataset_date, '%m/%d/%Y') else: return None
Get dataset date as datetime.datetime object. For range returns start date. Returns: Optional[datetime.datetime]: Dataset date in datetime object or None if no date is set
entailment
def get_dataset_end_date_as_datetime(self): # type: () -> Optional[datetime] """Get dataset end date as datetime.datetime object. Returns: Optional[datetime.datetime]: Dataset date in datetime object or None if no date is set """ dataset_date = self.data.get('dataset_date', None) if dataset_date: if '-' in dataset_date: dataset_date = dataset_date.split('-')[1] return datetime.strptime(dataset_date, '%m/%d/%Y') return None
Get dataset end date as datetime.datetime object. Returns: Optional[datetime.datetime]: Dataset date in datetime object or None if no date is set
entailment
def _get_formatted_date(dataset_date, date_format=None): # type: (Optional[datetime], Optional[str]) -> Optional[str] """Get supplied dataset date as string in specified format. If no format is supplied, an ISO 8601 string is returned. Args: dataset_date (Optional[datetime.datetime]): dataset date in datetime.datetime format date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None. Returns: Optional[str]: Dataset date string or None if no date is set """ if dataset_date: if date_format: return dataset_date.strftime(date_format) else: return dataset_date.date().isoformat() else: return None
Get supplied dataset date as string in specified format. If no format is supplied, an ISO 8601 string is returned. Args: dataset_date (Optional[datetime.datetime]): dataset date in datetime.datetime format date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None. Returns: Optional[str]: Dataset date string or None if no date is set
entailment
def get_dataset_date(self, date_format=None): # type: (Optional[str]) -> Optional[str] """Get dataset date as string in specified format. For range returns start date. If no format is supplied, an ISO 8601 string is returned. Args: date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None. Returns: Optional[str]: Dataset date string or None if no date is set """ dataset_date = self.get_dataset_date_as_datetime() return self._get_formatted_date(dataset_date, date_format)
Get dataset date as string in specified format. For range returns start date. If no format is supplied, an ISO 8601 string is returned. Args: date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None. Returns: Optional[str]: Dataset date string or None if no date is set
entailment
def get_dataset_end_date(self, date_format=None): # type: (Optional[str]) -> Optional[str] """Get dataset date as string in specified format. For range returns start date. If no format is supplied, an ISO 8601 string is returned. Args: date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None. Returns: Optional[str]: Dataset date string or None if no date is set """ dataset_date = self.get_dataset_end_date_as_datetime() return self._get_formatted_date(dataset_date, date_format)
Get dataset date as string in specified format. For range returns start date. If no format is supplied, an ISO 8601 string is returned. Args: date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None. Returns: Optional[str]: Dataset date string or None if no date is set
entailment
def set_dataset_date_from_datetime(self, dataset_date, dataset_end_date=None): # type: (datetime, Optional[datetime]) -> None """Set dataset date from datetime.datetime object Args: dataset_date (datetime.datetime): Dataset date dataset_end_date (Optional[datetime.datetime]): Dataset end date Returns: None """ start_date = dataset_date.strftime('%m/%d/%Y') if dataset_end_date is None: self.data['dataset_date'] = start_date else: end_date = dataset_end_date.strftime('%m/%d/%Y') self.data['dataset_date'] = '%s-%s' % (start_date, end_date)
Set dataset date from datetime.datetime object Args: dataset_date (datetime.datetime): Dataset date dataset_end_date (Optional[datetime.datetime]): Dataset end date Returns: None
entailment
def _parse_date(dataset_date, date_format): # type: (str, Optional[str]) -> datetime """Parse dataset date from string using specified format. If no format is supplied, the function will guess. For unambiguous formats, this should be fine. Args: dataset_date (str): Dataset date string date_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None. Returns: datetime.datetime """ if date_format is None: try: return parser.parse(dataset_date) except (ValueError, OverflowError) as e: raisefrom(HDXError, 'Invalid dataset date!', e) else: try: return datetime.strptime(dataset_date, date_format) except ValueError as e: raisefrom(HDXError, 'Invalid dataset date!', e)
Parse dataset date from string using specified format. If no format is supplied, the function will guess. For unambiguous formats, this should be fine. Args: dataset_date (str): Dataset date string date_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None. Returns: datetime.datetime
entailment
def set_dataset_date(self, dataset_date, dataset_end_date=None, date_format=None): # type: (str, Optional[str], Optional[str]) -> None """Set dataset date from string using specified format. If no format is supplied, the function will guess. For unambiguous formats, this should be fine. Args: dataset_date (str): Dataset date string dataset_end_date (Optional[str]): Dataset end date string date_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None. Returns: None """ parsed_date = self._parse_date(dataset_date, date_format) if dataset_end_date is None: self.set_dataset_date_from_datetime(parsed_date) else: parsed_end_date = self._parse_date(dataset_end_date, date_format) self.set_dataset_date_from_datetime(parsed_date, parsed_end_date)
Set dataset date from string using specified format. If no format is supplied, the function will guess. For unambiguous formats, this should be fine. Args: dataset_date (str): Dataset date string dataset_end_date (Optional[str]): Dataset end date string date_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None. Returns: None
entailment
def set_dataset_year_range(self, dataset_year, dataset_end_year=None): # type: (Union[str, int], Optional[Union[str, int]]) -> None """Set dataset date as a range from year or start and end year. Args: dataset_year (Union[str, int]): Dataset year given as string or int dataset_end_year (Optional[Union[str, int]]): Dataset end year given as string or int Returns: None """ if isinstance(dataset_year, int): dataset_date = '01/01/%d' % dataset_year elif isinstance(dataset_year, str): dataset_date = '01/01/%s' % dataset_year else: raise hdx.data.hdxobject.HDXError('dataset_year has type %s which is not supported!' % type(dataset_year).__name__) if dataset_end_year is None: dataset_end_year = dataset_year if isinstance(dataset_end_year, int): dataset_end_date = '31/12/%d' % dataset_end_year elif isinstance(dataset_end_year, str): dataset_end_date = '31/12/%s' % dataset_end_year else: raise hdx.data.hdxobject.HDXError('dataset_end_year has type %s which is not supported!' % type(dataset_end_year).__name__) self.set_dataset_date(dataset_date, dataset_end_date)
Set dataset date as a range from year or start and end year. Args: dataset_year (Union[str, int]): Dataset year given as string or int dataset_end_year (Optional[Union[str, int]]): Dataset end year given as string or int Returns: None
entailment
def get_expected_update_frequency(self): # type: () -> Optional[str] """Get expected update frequency (in textual rather than numeric form) Returns: Optional[str]: Update frequency in textual form or None if the update frequency doesn't exist or is blank. """ days = self.data.get('data_update_frequency', None) if days: return Dataset.transform_update_frequency(days) else: return None
Get expected update frequency (in textual rather than numeric form) Returns: Optional[str]: Update frequency in textual form or None if the update frequency doesn't exist or is blank.
entailment
def set_expected_update_frequency(self, update_frequency): # type: (str) -> None """Set expected update frequency Args: update_frequency (str): Update frequency Returns: None """ try: int(update_frequency) except ValueError: update_frequency = Dataset.transform_update_frequency(update_frequency) if not update_frequency: raise HDXError('Invalid update frequency supplied!') self.data['data_update_frequency'] = update_frequency
Set expected update frequency Args: update_frequency (str): Update frequency Returns: None
entailment
def remove_tag(self, tag): # type: (str) -> bool """Remove a tag Args: tag (str): Tag to remove Returns: bool: True if tag removed or False if not """ return self._remove_hdxobject(self.data.get('tags'), tag, matchon='name')
Remove a tag Args: tag (str): Tag to remove Returns: bool: True if tag removed or False if not
entailment
def get_location(self, locations=None): # type: (Optional[List[str]]) -> List[str] """Return the dataset's location Args: locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. Returns: List[str]: list of locations or [] if there are none """ countries = self.data.get('groups', None) if not countries: return list() return [Locations.get_location_from_HDX_code(x['name'], locations=locations, configuration=self.configuration) for x in countries]
Return the dataset's location Args: locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. Returns: List[str]: list of locations or [] if there are none
entailment
def add_country_location(self, country, exact=True, locations=None, use_live=True): # type: (str, bool, Optional[List[str]], bool) -> bool """Add a country. If an iso 3 code is not provided, value is parsed and if it is a valid country name, converted to an iso 3 code. If the country is already added, it is ignored. Args: country (str): Country to add exact (bool): True for exact matching or False to allow fuzzy matching. Defaults to True. locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if country added or False if country already present """ iso3, match = Country.get_iso3_country_code_fuzzy(country, use_live=use_live) if iso3 is None: raise HDXError('Country: %s - cannot find iso3 code!' % country) return self.add_other_location(iso3, exact=exact, alterror='Country: %s with iso3: %s could not be found in HDX list!' % (country, iso3), locations=locations)
Add a country. If an iso 3 code is not provided, value is parsed and if it is a valid country name, converted to an iso 3 code. If the country is already added, it is ignored. Args: country (str): Country to add exact (bool): True for exact matching or False to allow fuzzy matching. Defaults to True. locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if country added or False if country already present
entailment
def add_country_locations(self, countries, locations=None, use_live=True): # type: (List[str], Optional[List[str]], bool) -> bool """Add a list of countries. If iso 3 codes are not provided, values are parsed and where they are valid country names, converted to iso 3 codes. If any country is already added, it is ignored. Args: countries (List[str]): list of countries to add locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if all countries added or False if any already present. """ allcountriesadded = True for country in countries: if not self.add_country_location(country, locations=locations, use_live=use_live): allcountriesadded = False return allcountriesadded
Add a list of countries. If iso 3 codes are not provided, values are parsed and where they are valid country names, converted to iso 3 codes. If any country is already added, it is ignored. Args: countries (List[str]): list of countries to add locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if all countries added or False if any already present.
entailment
def add_region_location(self, region, locations=None, use_live=True): # type: (str, Optional[List[str]], bool) -> bool """Add all countries in a region. If a 3 digit UNStats M49 region code is not provided, value is parsed as a region name. If any country is already added, it is ignored. Args: region (str): M49 region, intermediate region or subregion to add locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if all countries in region added or False if any already present. """ return self.add_country_locations(Country.get_countries_in_region(region, exception=HDXError, use_live=use_live), locations=locations)
Add all countries in a region. If a 3 digit UNStats M49 region code is not provided, value is parsed as a region name. If any country is already added, it is ignored. Args: region (str): M49 region, intermediate region or subregion to add locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if all countries in region added or False if any already present.
entailment
def add_other_location(self, location, exact=True, alterror=None, locations=None): # type: (str, bool, Optional[str], Optional[List[str]]) -> bool """Add a location which is not a country or region. Value is parsed and compared to existing locations in HDX. If the location is already added, it is ignored. Args: location (str): Location to add exact (bool): True for exact matching or False to allow fuzzy matching. Defaults to True. alterror (Optional[str]): Alternative error message to builtin if location not found. Defaults to None. locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. Returns: bool: True if location added or False if location already present """ hdx_code, match = Locations.get_HDX_code_from_location_partial(location, locations=locations, configuration=self.configuration) if hdx_code is None or (exact is True and match is False): if alterror is None: raise HDXError('Location: %s - cannot find in HDX!' % location) else: raise HDXError(alterror) groups = self.data.get('groups', None) hdx_code = hdx_code.lower() if groups: if hdx_code in [x['name'] for x in groups]: return False else: groups = list() groups.append({'name': hdx_code}) self.data['groups'] = groups return True
Add a location which is not a country or region. Value is parsed and compared to existing locations in HDX. If the location is already added, it is ignored. Args: location (str): Location to add exact (bool): True for exact matching or False to allow fuzzy matching. Defaults to True. alterror (Optional[str]): Alternative error message to builtin if location not found. Defaults to None. locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. Returns: bool: True if location added or False if location already present
entailment
def remove_location(self, location): # type: (str) -> bool """Remove a location. If the location is already added, it is ignored. Args: location (str): Location to remove Returns: bool: True if location removed or False if not """ res = self._remove_hdxobject(self.data.get('groups'), location, matchon='name') if not res: res = self._remove_hdxobject(self.data.get('groups'), location.upper(), matchon='name') if not res: res = self._remove_hdxobject(self.data.get('groups'), location.lower(), matchon='name') return res
Remove a location. If the location is already added, it is ignored. Args: location (str): Location to remove Returns: bool: True if location removed or False if not
entailment
def get_maintainer(self): # type: () -> hdx.data.user.User """Get the dataset's maintainer. Returns: User: Dataset's maintainer """ return hdx.data.user.User.read_from_hdx(self.data['maintainer'], configuration=self.configuration)
Get the dataset's maintainer. Returns: User: Dataset's maintainer
entailment
def set_maintainer(self, maintainer): # type: (Union[hdx.data.user.User,Dict,str]) -> None """Set the dataset's maintainer. Args: maintainer (Union[User,Dict,str]): Either a user id or User metadata from a User object or dictionary. Returns: None """ if isinstance(maintainer, hdx.data.user.User) or isinstance(maintainer, dict): if 'id' not in maintainer: maintainer = hdx.data.user.User.read_from_hdx(maintainer['name'], configuration=self.configuration) maintainer = maintainer['id'] elif not isinstance(maintainer, str): raise HDXError('Type %s cannot be added as a maintainer!' % type(maintainer).__name__) if is_valid_uuid(maintainer) is False: raise HDXError('%s is not a valid user id for a maintainer!' % maintainer) self.data['maintainer'] = maintainer
Set the dataset's maintainer. Args: maintainer (Union[User,Dict,str]): Either a user id or User metadata from a User object or dictionary. Returns: None
entailment
def get_organization(self): # type: () -> hdx.data.organization.Organization """Get the dataset's organization. Returns: Organization: Dataset's organization """ return hdx.data.organization.Organization.read_from_hdx(self.data['owner_org'], configuration=self.configuration)
Get the dataset's organization. Returns: Organization: Dataset's organization
entailment
def set_organization(self, organization): # type: (Union[hdx.data.organization.Organization,Dict,str]) -> None """Set the dataset's organization. Args: organization (Union[Organization,Dict,str]): Either an Organization id or Organization metadata from an Organization object or dictionary. Returns: None """ if isinstance(organization, hdx.data.organization.Organization) or isinstance(organization, dict): if 'id' not in organization: organization = hdx.data.organization.Organization.read_from_hdx(organization['name'], configuration=self.configuration) organization = organization['id'] elif not isinstance(organization, str): raise HDXError('Type %s cannot be added as a organization!' % type(organization).__name__) if is_valid_uuid(organization) is False and organization != 'hdx': raise HDXError('%s is not a valid organization id!' % organization) self.data['owner_org'] = organization
Set the dataset's organization. Args: organization (Union[Organization,Dict,str]): Either an Organization id or Organization metadata from an Organization object or dictionary. Returns: None
entailment
def get_showcases(self): # type: () -> List[hdx.data.showcase.Showcase] """Get any showcases the dataset is in Returns: List[Showcase]: list of showcases """ assoc_result, showcases_dicts = self._read_from_hdx('showcase', self.data['id'], fieldname='package_id', action=hdx.data.showcase.Showcase.actions()['list_showcases']) showcases = list() if assoc_result: for showcase_dict in showcases_dicts: showcase = hdx.data.showcase.Showcase(showcase_dict, configuration=self.configuration) showcases.append(showcase) return showcases
Get any showcases the dataset is in Returns: List[Showcase]: list of showcases
entailment
def _get_dataset_showcase_dict(self, showcase): # type: (Union[hdx.data.showcase.Showcase, Dict,str]) -> Dict """Get dataset showcase dict Args: showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary Returns: dict: dataset showcase dict """ if isinstance(showcase, hdx.data.showcase.Showcase) or isinstance(showcase, dict): if 'id' not in showcase: showcase = hdx.data.showcase.Showcase.read_from_hdx(showcase['name']) showcase = showcase['id'] elif not isinstance(showcase, str): raise HDXError('Type %s cannot be added as a showcase!' % type(showcase).__name__) if is_valid_uuid(showcase) is False: raise HDXError('%s is not a valid showcase id!' % showcase) return {'package_id': self.data['id'], 'showcase_id': showcase}
Get dataset showcase dict Args: showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary Returns: dict: dataset showcase dict
entailment
def add_showcase(self, showcase, showcases_to_check=None): # type: (Union[hdx.data.showcase.Showcase,Dict,str], List[hdx.data.showcase.Showcase]) -> bool """Add dataset to showcase Args: showcase (Union[Showcase,Dict,str]): Either a showcase id or showcase metadata from a Showcase object or dictionary showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset. Returns: bool: True if the showcase was added, False if already present """ dataset_showcase = self._get_dataset_showcase_dict(showcase) if showcases_to_check is None: showcases_to_check = self.get_showcases() for showcase in showcases_to_check: if dataset_showcase['showcase_id'] == showcase['id']: return False showcase = hdx.data.showcase.Showcase({'id': dataset_showcase['showcase_id']}, configuration=self.configuration) showcase._write_to_hdx('associate', dataset_showcase, 'package_id') return True
Add dataset to showcase Args: showcase (Union[Showcase,Dict,str]): Either a showcase id or showcase metadata from a Showcase object or dictionary showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset. Returns: bool: True if the showcase was added, False if already present
entailment
def add_showcases(self, showcases, showcases_to_check=None): # type: (List[Union[hdx.data.showcase.Showcase,Dict,str]], List[hdx.data.showcase.Showcase]) -> bool """Add dataset to multiple showcases Args: showcases (List[Union[Showcase,Dict,str]]): A list of either showcase ids or showcase metadata from Showcase objects or dictionaries showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset. Returns: bool: True if all showcases added or False if any already present """ if showcases_to_check is None: showcases_to_check = self.get_showcases() allshowcasesadded = True for showcase in showcases: if not self.add_showcase(showcase, showcases_to_check=showcases_to_check): allshowcasesadded = False return allshowcasesadded
Add dataset to multiple showcases Args: showcases (List[Union[Showcase,Dict,str]]): A list of either showcase ids or showcase metadata from Showcase objects or dictionaries showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset. Returns: bool: True if all showcases added or False if any already present
entailment
def remove_showcase(self, showcase): # type: (Union[hdx.data.showcase.Showcase,Dict,str]) -> None """Remove dataset from showcase Args: showcase (Union[Showcase,Dict,str]): Either a showcase id string or showcase metadata from a Showcase object or dictionary Returns: None """ dataset_showcase = self._get_dataset_showcase_dict(showcase) showcase = hdx.data.showcase.Showcase({'id': dataset_showcase['showcase_id']}, configuration=self.configuration) showcase._write_to_hdx('disassociate', dataset_showcase, 'package_id')
Remove dataset from showcase Args: showcase (Union[Showcase,Dict,str]): Either a showcase id string or showcase metadata from a Showcase object or dictionary Returns: None
entailment
def set_requestable(self, requestable=True): # type: (bool) -> None """Set the dataset to be of type requestable or not Args: requestable (bool): Set whether dataset is requestable. Defaults to True. Returns: None """ self.data['is_requestdata_type'] = requestable if requestable: self.data['private'] = False
Set the dataset to be of type requestable or not Args: requestable (bool): Set whether dataset is requestable. Defaults to True. Returns: None
entailment
def get_filetypes(self): # type: () -> List[str] """Return list of filetypes in your data Returns: List[str]: List of filetypes """ if not self.is_requestable(): return [resource.get_file_type() for resource in self.get_resources()] return self._get_stringlist_from_commastring('file_types')
Return list of filetypes in your data Returns: List[str]: List of filetypes
entailment
def clean_dataset_tags(self): # type: () -> Tuple[bool, bool] """Clean dataset tags according to tags cleanup spreadsheet and return if any changes occurred Returns: Tuple[bool, bool]: Returns (True if tags changed or False if not, True if error or False if not) """ tags_dict, wildcard_tags = Tags.tagscleanupdicts() def delete_tag(tag): logger.info('%s - Deleting tag %s!' % (self.data['name'], tag)) return self.remove_tag(tag), False def update_tag(tag, final_tags, wording, remove_existing=True): text = '%s - %s: %s -> ' % (self.data['name'], wording, tag) if not final_tags: logger.error('%snothing!' % text) return False, True tags_lower_five = final_tags[:5].lower() if tags_lower_five == 'merge' or tags_lower_five == 'split' or ( ';' not in final_tags and len(final_tags) > 50): logger.error('%s%s - Invalid final tag!' % (text, final_tags)) return False, True if remove_existing: self.remove_tag(tag) tags = ', '.join(self.get_tags()) if self.add_tags(final_tags.split(';')): logger.info('%s%s! Dataset tags: %s' % (text, final_tags, tags)) else: logger.warning( '%s%s - At least one of the tags already exists! Dataset tags: %s' % (text, final_tags, tags)) return True, False def do_action(tag, tags_dict_key): whattodo = tags_dict[tags_dict_key] action = whattodo[u'action'] final_tags = whattodo[u'final tags (semicolon separated)'] if action == u'Delete': changed, error = delete_tag(tag) elif action == u'Merge': changed, error = update_tag(tag, final_tags, 'Merging') elif action == u'Fix spelling': changed, error = update_tag(tag, final_tags, 'Fixing spelling') elif action == u'Non English': changed, error = update_tag(tag, final_tags, 'Anglicising', remove_existing=False) else: changed = False error = False return changed, error def process_tag(tag): changed = False error = False if tag in tags_dict.keys(): changed, error = do_action(tag, tag) else: for wildcard_tag in wildcard_tags: if fnmatch.fnmatch(tag, wildcard_tag): changed, error = do_action(tag, wildcard_tag) break return changed, error anychange = False anyerror = False for tag in self.get_tags(): changed, error = process_tag(tag) if changed: anychange = True if error: anyerror = True return anychange, anyerror
Clean dataset tags according to tags cleanup spreadsheet and return if any changes occurred Returns: Tuple[bool, bool]: Returns (True if tags changed or False if not, True if error or False if not)
entailment
def set_quickchart_resource(self, resource): # type: (Union[hdx.data.resource.Resource,Dict,str,int]) -> bool """Set the resource that will be used for displaying QuickCharts in dataset preview Args: resource (Union[hdx.data.resource.Resource,Dict,str,int]): Either resource id or name, resource metadata from a Resource object or a dictionary or position Returns: bool: Returns True if resource for QuickCharts in dataset preview set or False if not """ if isinstance(resource, int) and not isinstance(resource, bool): resource = self.get_resources()[resource] if isinstance(resource, hdx.data.resource.Resource) or isinstance(resource, dict): res = resource.get('id') if res is None: resource = resource['name'] else: resource = res elif not isinstance(resource, str): raise hdx.data.hdxobject.HDXError('Resource id cannot be found in type %s!' % type(resource).__name__) if is_valid_uuid(resource) is True: search = 'id' else: search = 'name' changed = False for dataset_resource in self.resources: if dataset_resource[search] == resource: dataset_resource.enable_dataset_preview() self.preview_resource() changed = True else: dataset_resource.disable_dataset_preview() return changed
Set the resource that will be used for displaying QuickCharts in dataset preview Args: resource (Union[hdx.data.resource.Resource,Dict,str,int]): Either resource id or name, resource metadata from a Resource object or a dictionary or position Returns: bool: Returns True if resource for QuickCharts in dataset preview set or False if not
entailment
def create_default_views(self, create_datastore_views=False): # type: (bool) -> None """Create default resource views for all resources in dataset Args: create_datastore_views (bool): Whether to try to create resource views that point to the datastore Returns: None """ package = deepcopy(self.data) if self.resources: package['resources'] = self._convert_hdxobjects(self.resources) data = {'package': package, 'create_datastore_views': create_datastore_views} self._write_to_hdx('create_default_views', data, 'package')
Create default resource views for all resources in dataset Args: create_datastore_views (bool): Whether to try to create resource views that point to the datastore Returns: None
entailment
def _get_credentials(self): # type: () -> Optional[Tuple[str, str]] """ Return HDX site username and password Returns: Optional[Tuple[str, str]]: HDX site username and password or None """ site = self.data[self.hdx_site] username = site.get('username') if username: return b64decode(username).decode('utf-8'), b64decode(site['password']).decode('utf-8') else: return None
Return HDX site username and password Returns: Optional[Tuple[str, str]]: HDX site username and password or None
entailment
def call_remoteckan(self, *args, **kwargs): # type: (Any, Any) -> Dict """ Calls the remote CKAN Args: *args: Arguments to pass to remote CKAN call_action method **kwargs: Keyword arguments to pass to remote CKAN call_action method Returns: Dict: The response from the remote CKAN call_action method """ requests_kwargs = kwargs.get('requests_kwargs', dict()) credentials = self._get_credentials() if credentials: requests_kwargs['auth'] = credentials kwargs['requests_kwargs'] = requests_kwargs apikey = kwargs.get('apikey', self.get_api_key()) kwargs['apikey'] = apikey return self.remoteckan().call_action(*args, **kwargs)
Calls the remote CKAN Args: *args: Arguments to pass to remote CKAN call_action method **kwargs: Keyword arguments to pass to remote CKAN call_action method Returns: Dict: The response from the remote CKAN call_action method
entailment
def _environment_variables(**kwargs): # type: (Any) -> Any """ Overwrite keyword arguments with environment variables Args: **kwargs: See below hdx_url (str): HDX url to use. Overrides hdx_site. hdx_site (str): HDX site to use eg. prod, test. Defaults to test. hdx_key (str): Your HDX key. Ignored if hdx_read_only = True. Returns: kwargs: Changed keyword arguments """ hdx_key = os.getenv('HDX_KEY') if hdx_key is not None: kwargs['hdx_key'] = hdx_key hdx_url = os.getenv('HDX_URL') if hdx_url is not None: kwargs['hdx_url'] = hdx_url else: hdx_site = os.getenv('HDX_SITE') if hdx_site is not None: kwargs['hdx_site'] = hdx_site return kwargs
Overwrite keyword arguments with environment variables Args: **kwargs: See below hdx_url (str): HDX url to use. Overrides hdx_site. hdx_site (str): HDX site to use eg. prod, test. Defaults to test. hdx_key (str): Your HDX key. Ignored if hdx_read_only = True. Returns: kwargs: Changed keyword arguments
entailment
def create_remoteckan(cls, site_url, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, session=None, **kwargs): # type: (str, Optional[str], Optional[str], Optional[str], requests.Session, Any) -> ckanapi.RemoteCKAN """ Create remote CKAN instance from configuration Args: site_url (str): Site url. user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. session (requests.Session): requests Session object to use. Defaults to calling hdx.utilities.session.get_session() Returns: ckanapi.RemoteCKAN: Remote CKAN instance """ if not session: session = get_session(user_agent, user_agent_config_yaml, user_agent_lookup, prefix=Configuration.prefix, method_whitelist=frozenset(['HEAD', 'TRACE', 'GET', 'POST', 'PUT', 'OPTIONS', 'DELETE']), **kwargs) ua = session.headers['User-Agent'] else: ua = kwargs.get('full_agent') if not ua: ua = UserAgent.get(user_agent, user_agent_config_yaml, user_agent_lookup, prefix=Configuration.prefix, **kwargs) return ckanapi.RemoteCKAN(site_url, user_agent=ua, session=session)
Create remote CKAN instance from configuration Args: site_url (str): Site url. user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. session (requests.Session): requests Session object to use. Defaults to calling hdx.utilities.session.get_session() Returns: ckanapi.RemoteCKAN: Remote CKAN instance
entailment
def setup_remoteckan(self, remoteckan=None, **kwargs): # type: (Optional[ckanapi.RemoteCKAN], Any) -> None """ Set up remote CKAN from provided CKAN or by creating from configuration Args: remoteckan (Optional[ckanapi.RemoteCKAN]): CKAN instance. Defaults to setting one up from configuration. Returns: None """ if remoteckan is None: self._remoteckan = self.create_remoteckan(self.get_hdx_site_url(), full_agent=self.get_user_agent(), **kwargs) else: self._remoteckan = remoteckan
Set up remote CKAN from provided CKAN or by creating from configuration Args: remoteckan (Optional[ckanapi.RemoteCKAN]): CKAN instance. Defaults to setting one up from configuration. Returns: None
entailment
def setup(cls, configuration=None, **kwargs): # type: (Optional['Configuration'], Any) -> None """ Set up the HDX configuration Args: configuration (Optional[Configuration]): Configuration instance. Defaults to setting one up from passed arguments. **kwargs: See below user_agent (str): User agent string. HDXPythonLibrary/X.X.X- is prefixed. Must be supplied if remoteckan is not. user_agent_config_yaml (str): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (str): Lookup key for YAML. Ignored if user_agent supplied. hdx_url (str): HDX url to use. Overrides hdx_site. hdx_site (str): HDX site to use eg. prod, test. hdx_read_only (bool): Whether to access HDX in read only mode. Defaults to False. hdx_key (str): Your HDX key. Ignored if hdx_read_only = True. hdx_config_dict (dict): HDX configuration dictionary to use instead of above 3 parameters OR hdx_config_json (str): Path to JSON HDX configuration OR hdx_config_yaml (str): Path to YAML HDX configuration project_config_dict (dict): Project configuration dictionary OR project_config_json (str): Path to JSON Project configuration OR project_config_yaml (str): Path to YAML Project configuration hdx_base_config_dict (dict): HDX base configuration dictionary OR hdx_base_config_json (str): Path to JSON HDX base configuration OR hdx_base_config_yaml (str): Path to YAML HDX base configuration. Defaults to library's internal hdx_base_configuration.yml. Returns: None """ if configuration is None: cls._configuration = Configuration(**kwargs) else: cls._configuration = configuration
Set up the HDX configuration Args: configuration (Optional[Configuration]): Configuration instance. Defaults to setting one up from passed arguments. **kwargs: See below user_agent (str): User agent string. HDXPythonLibrary/X.X.X- is prefixed. Must be supplied if remoteckan is not. user_agent_config_yaml (str): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (str): Lookup key for YAML. Ignored if user_agent supplied. hdx_url (str): HDX url to use. Overrides hdx_site. hdx_site (str): HDX site to use eg. prod, test. hdx_read_only (bool): Whether to access HDX in read only mode. Defaults to False. hdx_key (str): Your HDX key. Ignored if hdx_read_only = True. hdx_config_dict (dict): HDX configuration dictionary to use instead of above 3 parameters OR hdx_config_json (str): Path to JSON HDX configuration OR hdx_config_yaml (str): Path to YAML HDX configuration project_config_dict (dict): Project configuration dictionary OR project_config_json (str): Path to JSON Project configuration OR project_config_yaml (str): Path to YAML Project configuration hdx_base_config_dict (dict): HDX base configuration dictionary OR hdx_base_config_json (str): Path to JSON HDX base configuration OR hdx_base_config_yaml (str): Path to YAML HDX base configuration. Defaults to library's internal hdx_base_configuration.yml. Returns: None
entailment
def _create(cls, configuration=None, remoteckan=None, **kwargs): # type: (Optional['Configuration'], Optional[ckanapi.RemoteCKAN], Any) -> str """ Create HDX configuration Args: configuration (Optional[Configuration]): Configuration instance. Defaults to setting one up from passed arguments. remoteckan (Optional[ckanapi.RemoteCKAN]): CKAN instance. Defaults to setting one up from configuration. **kwargs: See below user_agent (str): User agent string. HDXPythonLibrary/X.X.X- is prefixed. Must be supplied if remoteckan is not. user_agent_config_yaml (str): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (str): Lookup key for YAML. Ignored if user_agent supplied. hdx_url (str): HDX url to use. Overrides hdx_site. hdx_site (str): HDX site to use eg. prod, test. hdx_read_only (bool): Whether to access HDX in read only mode. Defaults to False. hdx_key (str): Your HDX key. Ignored if hdx_read_only = True. hdx_config_dict (dict): HDX configuration dictionary to use instead of above 3 parameters OR hdx_config_json (str): Path to JSON HDX configuration OR hdx_config_yaml (str): Path to YAML HDX configuration project_config_dict (dict): Project configuration dictionary OR project_config_json (str): Path to JSON Project configuration OR project_config_yaml (str): Path to YAML Project configuration hdx_base_config_dict (dict): HDX base configuration dictionary OR hdx_base_config_json (str): Path to JSON HDX base configuration OR hdx_base_config_yaml (str): Path to YAML HDX base configuration. Defaults to library's internal hdx_base_configuration.yml. Returns: str: HDX site url """ kwargs = cls._environment_variables(**kwargs) cls.setup(configuration, **kwargs) cls._configuration.setup_remoteckan(remoteckan, **kwargs) return cls._configuration.get_hdx_site_url()
Create HDX configuration Args: configuration (Optional[Configuration]): Configuration instance. Defaults to setting one up from passed arguments. remoteckan (Optional[ckanapi.RemoteCKAN]): CKAN instance. Defaults to setting one up from configuration. **kwargs: See below user_agent (str): User agent string. HDXPythonLibrary/X.X.X- is prefixed. Must be supplied if remoteckan is not. user_agent_config_yaml (str): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (str): Lookup key for YAML. Ignored if user_agent supplied. hdx_url (str): HDX url to use. Overrides hdx_site. hdx_site (str): HDX site to use eg. prod, test. hdx_read_only (bool): Whether to access HDX in read only mode. Defaults to False. hdx_key (str): Your HDX key. Ignored if hdx_read_only = True. hdx_config_dict (dict): HDX configuration dictionary to use instead of above 3 parameters OR hdx_config_json (str): Path to JSON HDX configuration OR hdx_config_yaml (str): Path to YAML HDX configuration project_config_dict (dict): Project configuration dictionary OR project_config_json (str): Path to JSON Project configuration OR project_config_yaml (str): Path to YAML Project configuration hdx_base_config_dict (dict): HDX base configuration dictionary OR hdx_base_config_json (str): Path to JSON HDX base configuration OR hdx_base_config_yaml (str): Path to YAML HDX base configuration. Defaults to library's internal hdx_base_configuration.yml. Returns: str: HDX site url
entailment
def create(cls, configuration=None, remoteckan=None, **kwargs): # type: (Optional['Configuration'], Optional[ckanapi.RemoteCKAN], Any) -> str """ Create HDX configuration. Can only be called once (will raise an error if called more than once). Args: configuration (Optional[Configuration]): Configuration instance. Defaults to setting one up from passed arguments. remoteckan (Optional[ckanapi.RemoteCKAN]): CKAN instance. Defaults to setting one up from configuration. **kwargs: See below user_agent (str): User agent string. HDXPythonLibrary/X.X.X- is prefixed. Must be supplied if remoteckan is not. user_agent_config_yaml (str): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (str): Lookup key for YAML. Ignored if user_agent supplied. hdx_url (str): HDX url to use. Overrides hdx_site. hdx_site (str): HDX site to use eg. prod, test. hdx_read_only (bool): Whether to access HDX in read only mode. Defaults to False. hdx_key (str): Your HDX key. Ignored if hdx_read_only = True. hdx_config_dict (dict): HDX configuration dictionary to use instead of above 3 parameters OR hdx_config_json (str): Path to JSON HDX configuration OR hdx_config_yaml (str): Path to YAML HDX configuration project_config_dict (dict): Project configuration dictionary OR project_config_json (str): Path to JSON Project configuration OR project_config_yaml (str): Path to YAML Project configuration hdx_base_config_dict (dict): HDX base configuration dictionary OR hdx_base_config_json (str): Path to JSON HDX base configuration OR hdx_base_config_yaml (str): Path to YAML HDX base configuration. Defaults to library's internal hdx_base_configuration.yml. Returns: str: HDX site url """ if cls._configuration is not None: raise ConfigurationError('Configuration already created!') return cls._create(configuration=configuration, remoteckan=remoteckan, **kwargs)
Create HDX configuration. Can only be called once (will raise an error if called more than once). Args: configuration (Optional[Configuration]): Configuration instance. Defaults to setting one up from passed arguments. remoteckan (Optional[ckanapi.RemoteCKAN]): CKAN instance. Defaults to setting one up from configuration. **kwargs: See below user_agent (str): User agent string. HDXPythonLibrary/X.X.X- is prefixed. Must be supplied if remoteckan is not. user_agent_config_yaml (str): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (str): Lookup key for YAML. Ignored if user_agent supplied. hdx_url (str): HDX url to use. Overrides hdx_site. hdx_site (str): HDX site to use eg. prod, test. hdx_read_only (bool): Whether to access HDX in read only mode. Defaults to False. hdx_key (str): Your HDX key. Ignored if hdx_read_only = True. hdx_config_dict (dict): HDX configuration dictionary to use instead of above 3 parameters OR hdx_config_json (str): Path to JSON HDX configuration OR hdx_config_yaml (str): Path to YAML HDX configuration project_config_dict (dict): Project configuration dictionary OR project_config_json (str): Path to JSON Project configuration OR project_config_yaml (str): Path to YAML Project configuration hdx_base_config_dict (dict): HDX base configuration dictionary OR hdx_base_config_json (str): Path to JSON HDX base configuration OR hdx_base_config_yaml (str): Path to YAML HDX base configuration. Defaults to library's internal hdx_base_configuration.yml. Returns: str: HDX site url
entailment
def kwargs_to_variable_assignment(kwargs: dict, value_representation=repr, assignment_operator: str = ' = ', statement_separator: str = '\n', statement_per_line: bool = False) -> str: """ Convert a dictionary into a string with assignments Each assignment is constructed based on: key assignment_operator value_representation(value) statement_separator, where key and value are the key and value of the dictionary. Moreover one can seprate the assignment statements by new lines. Parameters ---------- kwargs : dict assignment_operator: str, optional: Assignment operator (" = " in python) value_representation: str, optinal How to represent the value in the assignments (repr function in python) statement_separator : str, optional: Statement separator (new line in python) statement_per_line: bool, optional Insert each statement on a different line Returns ------- str All the assignemnts. >>> kwargs_to_variable_assignment({'a': 2, 'b': "abc"}) "a = 2\\nb = 'abc'\\n" >>> kwargs_to_variable_assignment({'a':2 ,'b': "abc"}, statement_per_line=True) "a = 2\\n\\nb = 'abc'\\n" >>> kwargs_to_variable_assignment({'a': 2}) 'a = 2\\n' >>> kwargs_to_variable_assignment({'a': 2}, statement_per_line=True) 'a = 2\\n' """ code = [] join_str = '\n' if statement_per_line else '' for key, value in kwargs.items(): code.append(key + assignment_operator + value_representation(value)+statement_separator) return join_str.join(code)
Convert a dictionary into a string with assignments Each assignment is constructed based on: key assignment_operator value_representation(value) statement_separator, where key and value are the key and value of the dictionary. Moreover one can seprate the assignment statements by new lines. Parameters ---------- kwargs : dict assignment_operator: str, optional: Assignment operator (" = " in python) value_representation: str, optinal How to represent the value in the assignments (repr function in python) statement_separator : str, optional: Statement separator (new line in python) statement_per_line: bool, optional Insert each statement on a different line Returns ------- str All the assignemnts. >>> kwargs_to_variable_assignment({'a': 2, 'b': "abc"}) "a = 2\\nb = 'abc'\\n" >>> kwargs_to_variable_assignment({'a':2 ,'b': "abc"}, statement_per_line=True) "a = 2\\n\\nb = 'abc'\\n" >>> kwargs_to_variable_assignment({'a': 2}) 'a = 2\\n' >>> kwargs_to_variable_assignment({'a': 2}, statement_per_line=True) 'a = 2\\n'
entailment
def decode_json(json_input: Union[str, None] = None): """ Simple wrapper of json.load and json.loads. If json_input is None the output is an empty dictionary. If the input is a string that ends in .json it is decoded using json.load. Otherwise it is decoded using json.loads. Parameters ---------- json_input : str, None, optional input json object Returns ------- Decoded json object >>> decode_json() {} >>> decode_json('{"flag":true}') {'flag': True} >>> decode_json('{"value":null}') {'value': None} """ if json_input is None: return {} else: if isinstance(json_input, str) is False: raise TypeError() elif json_input[-5:] == ".json": with open(json_input) as f: decoded_json = json.load(f) else: decoded_json = json.loads(json_input) return decoded_json
Simple wrapper of json.load and json.loads. If json_input is None the output is an empty dictionary. If the input is a string that ends in .json it is decoded using json.load. Otherwise it is decoded using json.loads. Parameters ---------- json_input : str, None, optional input json object Returns ------- Decoded json object >>> decode_json() {} >>> decode_json('{"flag":true}') {'flag': True} >>> decode_json('{"value":null}') {'value': None}
entailment
def is_jsonable(obj) -> bool: """ Check if an object is jsonable. An object is jsonable if it is json serialisable and by loading its json representation the same object is recovered. Parameters ---------- obj : Python object Returns ------- bool >>> is_jsonable([1,2,3]) True >>> is_jsonable((1,2,3)) False >>> is_jsonable({'a':True,'b':1,'c':None}) True """ try: return obj==json.loads(json.dumps(obj)) except TypeError: return False except: raise
Check if an object is jsonable. An object is jsonable if it is json serialisable and by loading its json representation the same object is recovered. Parameters ---------- obj : Python object Returns ------- bool >>> is_jsonable([1,2,3]) True >>> is_jsonable((1,2,3)) False >>> is_jsonable({'a':True,'b':1,'c':None}) True
entailment
def is_literal_eval(node_or_string) -> tuple: """ Check if an expresion can be literal_eval. ---------- node_or_string : Input Returns ------- tuple (bool,python object) If it can be literal_eval the python object is returned. Otherwise None it is returned. >>> is_literal_eval('[1,2,3]') (True, [1, 2, 3]) >>> is_literal_eval('a') (False, None) """ try: obj=ast.literal_eval(node_or_string) return (True, obj) except: return (False, None)
Check if an expresion can be literal_eval. ---------- node_or_string : Input Returns ------- tuple (bool,python object) If it can be literal_eval the python object is returned. Otherwise None it is returned. >>> is_literal_eval('[1,2,3]') (True, [1, 2, 3]) >>> is_literal_eval('a') (False, None)
entailment
def find_duplicates(l: list) -> set: """ Return the duplicates in a list. The function relies on https://stackoverflow.com/questions/9835762/find-and-list-duplicates-in-a-list . Parameters ---------- l : list Name Returns ------- set Duplicated values >>> find_duplicates([1,2,3]) set() >>> find_duplicates([1,2,1]) {1} """ return set([x for x in l if l.count(x) > 1])
Return the duplicates in a list. The function relies on https://stackoverflow.com/questions/9835762/find-and-list-duplicates-in-a-list . Parameters ---------- l : list Name Returns ------- set Duplicated values >>> find_duplicates([1,2,3]) set() >>> find_duplicates([1,2,1]) {1}
entailment
def sort_dict(d: dict, by: str = 'key', allow_duplicates: bool = True) -> collections.OrderedDict: """ Sort a dictionary by key or value. The function relies on https://docs.python.org/3/library/collections.html#collections.OrderedDict . The dulicated are determined based on https://stackoverflow.com/questions/9835762/find-and-list-duplicates-in-a-list . Parameters ---------- d : dict Input dictionary by : ['key','value'], optional By what to sort the input dictionary allow_duplicates : bool, optional Flag to indicate if the duplicates are allowed. Returns ------- collections.OrderedDict Sorted dictionary. >>> sort_dict({2: 3, 1: 2, 3: 1}) OrderedDict([(1, 2), (2, 3), (3, 1)]) >>> sort_dict({2: 3, 1: 2, 3: 1}, by='value') OrderedDict([(3, 1), (1, 2), (2, 3)]) >>> sort_dict({'2': 3, '1': 2}, by='value') OrderedDict([('1', 2), ('2', 3)]) >>> sort_dict({2: 1, 1: 2, 3: 1}, by='value', allow_duplicates=False) Traceback (most recent call last): ... ValueError: There are duplicates in the values: {1} >>> sort_dict({1:1,2:3},by=True) Traceback (most recent call last): ... ValueError: by can be 'key' or 'value'. """ if by == 'key': i = 0 elif by == 'value': values = list(d.values()) if len(values) != len(set(values)) and not allow_duplicates: duplicates = find_duplicates(values) raise ValueError("There are duplicates in the values: {}".format(duplicates)) i = 1 else: raise ValueError("by can be 'key' or 'value'.") return collections.OrderedDict(sorted(d.items(), key=lambda t: t[i]))
Sort a dictionary by key or value. The function relies on https://docs.python.org/3/library/collections.html#collections.OrderedDict . The dulicated are determined based on https://stackoverflow.com/questions/9835762/find-and-list-duplicates-in-a-list . Parameters ---------- d : dict Input dictionary by : ['key','value'], optional By what to sort the input dictionary allow_duplicates : bool, optional Flag to indicate if the duplicates are allowed. Returns ------- collections.OrderedDict Sorted dictionary. >>> sort_dict({2: 3, 1: 2, 3: 1}) OrderedDict([(1, 2), (2, 3), (3, 1)]) >>> sort_dict({2: 3, 1: 2, 3: 1}, by='value') OrderedDict([(3, 1), (1, 2), (2, 3)]) >>> sort_dict({'2': 3, '1': 2}, by='value') OrderedDict([('1', 2), ('2', 3)]) >>> sort_dict({2: 1, 1: 2, 3: 1}, by='value', allow_duplicates=False) Traceback (most recent call last): ... ValueError: There are duplicates in the values: {1} >>> sort_dict({1:1,2:3},by=True) Traceback (most recent call last): ... ValueError: by can be 'key' or 'value'.
entailment
def group_dict_by_value(d: dict) -> dict: """ Group a dictionary by values. Parameters ---------- d : dict Input dictionary Returns ------- dict Output dictionary. The keys are the values of the initial dictionary and the values ae given by a list of keys corresponding to the value. >>> group_dict_by_value({2: 3, 1: 2, 3: 1}) {3: [2], 2: [1], 1: [3]} >>> group_dict_by_value({2: 3, 1: 2, 3: 1, 10:1, 12: 3}) {3: [2, 12], 2: [1], 1: [3, 10]} """ d_out = {} for k, v in d.items(): if v in d_out: d_out[v].append(k) else: d_out[v] = [k] return d_out
Group a dictionary by values. Parameters ---------- d : dict Input dictionary Returns ------- dict Output dictionary. The keys are the values of the initial dictionary and the values ae given by a list of keys corresponding to the value. >>> group_dict_by_value({2: 3, 1: 2, 3: 1}) {3: [2], 2: [1], 1: [3]} >>> group_dict_by_value({2: 3, 1: 2, 3: 1, 10:1, 12: 3}) {3: [2, 12], 2: [1], 1: [3, 10]}
entailment
def variable_status(code: str, exclude_variable: Union[set, None] = None, jsonable_parameter: bool = True) -> tuple: """ Find the possible parameters and "global" variables from a python code. This is achieved by parsing the abstract syntax tree. Parameters ---------- code : str Input code as string. exclude_variable : set, None, optional Variable to exclude. jsonable_parameter: bool, True, optional Consider only jsonable parameter Returns ------- tuple (a set of possible parameter, a set of parameter to exclude, a dictionary of possible parameter ) A variable is a possible parameter if 1) it is not in the input exclude_variable, 2) the code contains only assignments, and 3) it is used only to bound objects. The set of parameter to exclude is the union of the input exclude_variable and all names that looks like a global variable. The dictionary of possible parameter {parameter name, parameter value} is available only if jsonable_parameter is True. >>> variable_status("a=3") ({'a'}, {'a'}, {'a': 3}) >>> variable_status("a=3",jsonable_parameter=False) ({'a'}, {'a'}, {}) >>> variable_status("a += 1") (set(), {'a'}, {}) >>> variable_status("def f(x,y=3):\\n\\t pass") (set(), {'f'}, {}) >>> variable_status("class C(A):\\n\\t pass") (set(), {'C'}, {}) >>> variable_status("import f") (set(), {'f'}, {}) >>> variable_status("import f as g") (set(), {'g'}, {}) >>> variable_status("from X import f") (set(), {'f'}, {}) >>> variable_status("from X import f as g") (set(), {'g'}, {}) """ if exclude_variable is None: exclude_variable = set() else: exclude_variable = copy.deepcopy(exclude_variable) root = ast.parse(code) store_variable_name = set() assign_only = True dict_parameter={} for node in ast.iter_child_nodes(root): if isinstance(node, ast.Assign): for assign_node in ast.walk(node): if isinstance(assign_node, ast.Name): if isinstance(assign_node.ctx, ast.Store): if jsonable_parameter is False: store_variable_name |= {assign_node.id} else: exclude_variable |= {assign_node.id} _is_literal_eval,_value=is_literal_eval(node.value) if jsonable_parameter is True: for assign_node in ast.iter_child_nodes(node): if isinstance(assign_node, ast.Tuple): i=0 for assign_tuple_node in ast.iter_child_nodes(assign_node): if isinstance(assign_tuple_node, ast.Name): if isinstance(_value,(collections.Iterable)) and is_jsonable(_value[i]) and _is_literal_eval: dict_parameter[assign_tuple_node.id]=_value[i] store_variable_name |= {assign_tuple_node.id} else: exclude_variable |= {assign_tuple_node.id} i += 1 else: if isinstance(assign_node, ast.Name): if is_jsonable(_value) and _is_literal_eval: dict_parameter[assign_node.id]=_value store_variable_name |= {assign_node.id} else: exclude_variable |= {assign_node.id} elif isinstance(node, ast.AugAssign): for assign_node in ast.walk(node): if isinstance(assign_node, ast.Name): exclude_variable |= {assign_node.id} # class and function elif isinstance(node, (ast.FunctionDef, ast.ClassDef)): assign_only = False exclude_variable |= {node.name} # import elif isinstance(node, ast.Import): assign_only = False for node1 in ast.iter_child_nodes(node): if node1.asname is not None: exclude_variable |= {node1.asname} else: exclude_variable |= {node1.name} # import from elif isinstance(node, ast.ImportFrom): assign_only = False for node1 in ast.iter_child_nodes(node): if node1.asname is not None: exclude_variable |= {node1.asname} else: exclude_variable |= {node1.name} else: assign_only = False if assign_only is True: possible_parameter = store_variable_name-exclude_variable if jsonable_parameter is True: dict_parameter = {k:dict_parameter[k] for k in possible_parameter} return (possible_parameter, store_variable_name | exclude_variable, dict_parameter) return set(), store_variable_name | exclude_variable, {}
Find the possible parameters and "global" variables from a python code. This is achieved by parsing the abstract syntax tree. Parameters ---------- code : str Input code as string. exclude_variable : set, None, optional Variable to exclude. jsonable_parameter: bool, True, optional Consider only jsonable parameter Returns ------- tuple (a set of possible parameter, a set of parameter to exclude, a dictionary of possible parameter ) A variable is a possible parameter if 1) it is not in the input exclude_variable, 2) the code contains only assignments, and 3) it is used only to bound objects. The set of parameter to exclude is the union of the input exclude_variable and all names that looks like a global variable. The dictionary of possible parameter {parameter name, parameter value} is available only if jsonable_parameter is True. >>> variable_status("a=3") ({'a'}, {'a'}, {'a': 3}) >>> variable_status("a=3",jsonable_parameter=False) ({'a'}, {'a'}, {}) >>> variable_status("a += 1") (set(), {'a'}, {}) >>> variable_status("def f(x,y=3):\\n\\t pass") (set(), {'f'}, {}) >>> variable_status("class C(A):\\n\\t pass") (set(), {'C'}, {}) >>> variable_status("import f") (set(), {'f'}, {}) >>> variable_status("import f as g") (set(), {'g'}, {}) >>> variable_status("from X import f") (set(), {'f'}, {}) >>> variable_status("from X import f as g") (set(), {'g'}, {})
entailment
def increment_name(name: str, start_marker: str = " (", end_marker: str = ")") -> str: """ Increment the name where the incremental part is given by parameters. Parameters ---------- name : str, nbformat.notebooknode.NotebookNode Name start_marker : str The marker used before the incremental end_marker : str The marker after the incrementa Returns ------- str Incremented name. >>> increment_name('abc') 'abc (1)' >>> increment_name('abc(1)') 'abc(1) (1)' >>> increment_name('abc (123)') 'abc (124)' >>> increment_name('abc-1',start_marker='-',end_marker='') 'abc-2' >>> increment_name('abc[2]',start_marker='[',end_marker=']') 'abc[3]' >>> increment_name('abc1',start_marker='',end_marker='') Traceback (most recent call last): ... ValueError: start_marker can not be the empty string. """ if start_marker == '': raise ValueError("start_marker can not be the empty string.") a = name start = len(a)-a[::-1].find(start_marker[::-1]) if (a[len(a)-len(end_marker):len(a)] == end_marker and start < (len(a)-len(end_marker)) and a[start-len(start_marker):start] == start_marker and a[start:len(a)-len(end_marker)].isdigit()): old_int = int(a[start:len(a)-len(end_marker)]) new_int = old_int+1 new_name = a[:start]+str(new_int)+end_marker else: new_name = a+start_marker+'1'+end_marker return new_name
Increment the name where the incremental part is given by parameters. Parameters ---------- name : str, nbformat.notebooknode.NotebookNode Name start_marker : str The marker used before the incremental end_marker : str The marker after the incrementa Returns ------- str Incremented name. >>> increment_name('abc') 'abc (1)' >>> increment_name('abc(1)') 'abc(1) (1)' >>> increment_name('abc (123)') 'abc (124)' >>> increment_name('abc-1',start_marker='-',end_marker='') 'abc-2' >>> increment_name('abc[2]',start_marker='[',end_marker=']') 'abc[3]' >>> increment_name('abc1',start_marker='',end_marker='') Traceback (most recent call last): ... ValueError: start_marker can not be the empty string.
entailment
def read_from_hdx(identifier, configuration=None): # type: (str, Optional[Configuration]) -> Optional['ResourceView'] """Reads the resource view given by identifier from HDX and returns ResourceView object Args: identifier (str): Identifier of resource view configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[ResourceView]: ResourceView object if successful read, None if not """ resourceview = ResourceView(configuration=configuration) result = resourceview._load_from_hdx('resource view', identifier) if result: return resourceview return None
Reads the resource view given by identifier from HDX and returns ResourceView object Args: identifier (str): Identifier of resource view configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[ResourceView]: ResourceView object if successful read, None if not
entailment
def get_all_for_resource(identifier, configuration=None): # type: (str, Optional[Configuration]) -> List['ResourceView'] """Read all resource views for a resource given by identifier from HDX and returns list of ResourceView objects Args: identifier (str): Identifier of resource configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: List[ResourceView]: List of ResourceView objects """ resourceview = ResourceView(configuration=configuration) success, result = resourceview._read_from_hdx('resource view', identifier, 'id', ResourceView.actions()['list']) resourceviews = list() if success: for resourceviewdict in result: resourceview = ResourceView(resourceviewdict, configuration=configuration) resourceviews.append(resourceview) return resourceviews
Read all resource views for a resource given by identifier from HDX and returns list of ResourceView objects Args: identifier (str): Identifier of resource configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: List[ResourceView]: List of ResourceView objects
entailment
def _update_resource_view(self, log=False): # type: () -> bool """Check if resource view exists in HDX and if so, update resource view Returns: bool: True if updated and False if not """ update = False if 'id' in self.data and self._load_from_hdx('resource view', self.data['id']): update = True else: if 'resource_id' in self.data: resource_views = self.get_all_for_resource(self.data['resource_id']) for resource_view in resource_views: if self.data['title'] == resource_view['title']: self.old_data = self.data self.data = resource_view.data update = True break if update: if log: logger.warning('resource view exists. Updating %s' % self.data['id']) self._merge_hdx_update('resource view', 'id') return update
Check if resource view exists in HDX and if so, update resource view Returns: bool: True if updated and False if not
entailment
def create_in_hdx(self): # type: () -> None """Check if resource view exists in HDX and if so, update it, otherwise create resource view Returns: None """ self.check_required_fields() if not self._update_resource_view(log=True): self._save_to_hdx('create', 'title')
Check if resource view exists in HDX and if so, update it, otherwise create resource view Returns: None
entailment
def copy(self, resource_view): # type: (Union[ResourceView,Dict,str]) -> None """Copies all fields except id, resource_id and package_id from another resource view. Args: resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary Returns: None """ if isinstance(resource_view, str): if is_valid_uuid(resource_view) is False: raise HDXError('%s is not a valid resource view id!' % resource_view) resource_view = ResourceView.read_from_hdx(resource_view) if not isinstance(resource_view, dict) and not isinstance(resource_view, ResourceView): raise HDXError('%s is not a valid resource view!' % resource_view) for key in resource_view: if key not in ('id', 'resource_id', 'package_id'): self.data[key] = resource_view[key]
Copies all fields except id, resource_id and package_id from another resource view. Args: resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary Returns: None
entailment
def tagscleanupdicts(configuration=None, url=None, keycolumn=5, failchained=True): # type: (Optional[Configuration], Optional[str], int, bool) -> Tuple[Dict,List] """ Get tags cleanup dictionaries Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. url (Optional[str]): Url of tags cleanup spreadsheet. Defaults to None (internal configuration parameter). keycolumn (int): Column number of tag column in spreadsheet. Defaults to 5. failchained (bool): Fail if chained rules found. Defaults to True. Returns: Tuple[Dict,List]: Returns (Tags dictionary, Wildcard tags list) """ if not Tags._tags_dict: if configuration is None: configuration = Configuration.read() with Download(full_agent=configuration.get_user_agent()) as downloader: if url is None: url = configuration['tags_cleanup_url'] Tags._tags_dict = downloader.download_tabular_rows_as_dicts(url, keycolumn=keycolumn) keys = Tags._tags_dict.keys() chainerror = False for i, tag in enumerate(keys): whattodo = Tags._tags_dict[tag] action = whattodo[u'action'] final_tags = whattodo[u'final tags (semicolon separated)'] for final_tag in final_tags.split(';'): if final_tag in keys: index = list(keys).index(final_tag) if index != i: whattodo2 = Tags._tags_dict[final_tag] action2 = whattodo2[u'action'] if action2 != 'OK' and action2 != 'Other': final_tags2 = whattodo2[u'final tags (semicolon separated)'] if final_tag not in final_tags2.split(';'): chainerror = True if failchained: logger.error('Chained rules: %s (%s -> %s) | %s (%s -> %s)' % (action, tag, final_tags, action2, final_tag, final_tags2)) if failchained and chainerror: raise ChainRuleError('Chained rules for tags detected!') Tags._wildcard_tags = list() for tag in Tags._tags_dict: if '*' in tag: Tags._wildcard_tags.append(tag) return Tags._tags_dict, Tags._wildcard_tags
Get tags cleanup dictionaries Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. url (Optional[str]): Url of tags cleanup spreadsheet. Defaults to None (internal configuration parameter). keycolumn (int): Column number of tag column in spreadsheet. Defaults to 5. failchained (bool): Fail if chained rules found. Defaults to True. Returns: Tuple[Dict,List]: Returns (Tags dictionary, Wildcard tags list)
entailment
def read_from_hdx(identifier, configuration=None): # type: (str, Optional[Configuration]) -> Optional['User'] """Reads the user given by identifier from HDX and returns User object Args: identifier (str): Identifier of user configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[User]: User object if successful read, None if not """ user = User(configuration=configuration) result = user._load_from_hdx('user', identifier) if result: return user return None
Reads the user given by identifier from HDX and returns User object Args: identifier (str): Identifier of user configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[User]: User object if successful read, None if not
entailment
def update_in_hdx(self): # type: () -> None """Check if user exists in HDX and if so, update user Returns: None """ capacity = self.data.get('capacity') if capacity is not None: del self.data['capacity'] # remove capacity (which comes from users from Organization) self._update_in_hdx('user', 'id') if capacity is not None: self.data['capacity'] = capacity
Check if user exists in HDX and if so, update user Returns: None
entailment
def create_in_hdx(self): # type: () -> None """Check if user exists in HDX and if so, update it, otherwise create user Returns: None """ capacity = self.data.get('capacity') if capacity is not None: del self.data['capacity'] self._create_in_hdx('user', 'id', 'name') if capacity is not None: self.data['capacity'] = capacity
Check if user exists in HDX and if so, update it, otherwise create user Returns: None
entailment
def email(self, subject, text_body, html_body=None, sender=None, **kwargs): # type: (str, str, Optional[str], Optional[str], Any) -> None """Emails a user. Args: subject (str): Email subject text_body (str): Plain text email body html_body (str): HTML email body sender (Optional[str]): Email sender. Defaults to SMTP username. **kwargs: See below mail_options (List): Mail options (see smtplib documentation) rcpt_options (List): Recipient options (see smtplib documentation) Returns: None """ self.configuration.emailer().send([self.data['email']], subject, text_body, html_body=html_body, sender=sender, **kwargs)
Emails a user. Args: subject (str): Email subject text_body (str): Plain text email body html_body (str): HTML email body sender (Optional[str]): Email sender. Defaults to SMTP username. **kwargs: See below mail_options (List): Mail options (see smtplib documentation) rcpt_options (List): Recipient options (see smtplib documentation) Returns: None
entailment
def get_all_users(configuration=None, **kwargs): # type: (Optional[Configuration], Any) -> List['User'] """Get all users in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. **kwargs: See below q (str): Restrict to names containing a string. Defaults to all users. order_by (str): Field by which to sort - any user field or edits (number_of_edits). Defaults to 'name'. Returns: List[User]: List of all users in HDX """ user = User(configuration=configuration) user['id'] = 'all users' # only for error message if produced result = user._write_to_hdx('list', kwargs, 'id') users = list() if result: for userdict in result: user = User(userdict, configuration=configuration) users.append(user) else: logger.debug(result) return users
Get all users in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. **kwargs: See below q (str): Restrict to names containing a string. Defaults to all users. order_by (str): Field by which to sort - any user field or edits (number_of_edits). Defaults to 'name'. Returns: List[User]: List of all users in HDX
entailment
def email_users(users, subject, text_body, html_body=None, sender=None, configuration=None, **kwargs): # type: (List['User'], str, str, Optional[str], Optional[str], Optional[Configuration], Any) -> None """Email a list of users Args: users (List[User]): List of users subject (str): Email subject text_body (str): Plain text email body html_body (str): HTML email body sender (Optional[str]): Email sender. Defaults to SMTP username. configuration (Optional[Configuration]): HDX configuration. Defaults to configuration of first user in list. **kwargs: See below mail_options (List): Mail options (see smtplib documentation) rcpt_options (List): Recipient options (see smtplib documentation) Returns: None """ if not users: raise ValueError('No users supplied') recipients = list() for user in users: recipients.append(user.data['email']) if configuration is None: configuration = users[0].configuration configuration.emailer().send(recipients, subject, text_body, html_body=html_body, sender=sender, **kwargs)
Email a list of users Args: users (List[User]): List of users subject (str): Email subject text_body (str): Plain text email body html_body (str): HTML email body sender (Optional[str]): Email sender. Defaults to SMTP username. configuration (Optional[Configuration]): HDX configuration. Defaults to configuration of first user in list. **kwargs: See below mail_options (List): Mail options (see smtplib documentation) rcpt_options (List): Recipient options (see smtplib documentation) Returns: None
entailment
def get_organizations(self, permission='read'): # type: (str) -> List['Organization'] """Get organizations in HDX that this user is a member of. Args: permission (str): Permission to check for. Defaults to 'read'. Returns: List[Organization]: List of organizations in HDX that this user is a member of """ success, result = self._read_from_hdx('user', self.data['name'], 'id', self.actions()['listorgs'], permission=permission) organizations = list() if success: for organizationdict in result: organization = hdx.data.organization.Organization.read_from_hdx(organizationdict['id']) organizations.append(organization) return organizations
Get organizations in HDX that this user is a member of. Args: permission (str): Permission to check for. Defaults to 'read'. Returns: List[Organization]: List of organizations in HDX that this user is a member of
entailment
def facade(projectmainfn, **kwargs): # (Callable[[None], None], Any) -> None """Facade to simplify project setup that calls project main function Args: projectmainfn ((None) -> None): main function of project **kwargs: configuration parameters to pass to HDX Configuration class Returns: None """ # # Setting up configuration # site_url = Configuration._create(**kwargs) logger.info('--------------------------------------------------') logger.info('> Using HDX Python API Library %s' % Configuration.apiversion) logger.info('> HDX Site: %s' % site_url) UserAgent.user_agent = Configuration.read().user_agent projectmainfn()
Facade to simplify project setup that calls project main function Args: projectmainfn ((None) -> None): main function of project **kwargs: configuration parameters to pass to HDX Configuration class Returns: None
entailment
def get_lint_config(config_path=None): """ Tries loading the config from the given path. If no path is specified, the default config path is tried, and if that is not specified, we the default config is returned. """ # config path specified if config_path: config = LintConfig.load_from_file(config_path) click.echo("Using config from {0}".format(config_path)) # default config path elif os.path.exists(DEFAULT_CONFIG_FILE): config = LintConfig.load_from_file(DEFAULT_CONFIG_FILE) click.echo("Using config from {0}".format(DEFAULT_CONFIG_FILE)) # no config file else: config = LintConfig() return config
Tries loading the config from the given path. If no path is specified, the default config path is tried, and if that is not specified, we the default config is returned.
entailment
def cli(list_files, config, ignore, path): """ Markdown lint tool, checks your markdown for styling issues """ files = MarkdownFileFinder.find_files(path) if list_files: echo_files(files) lint_config = get_lint_config(config) lint_config.apply_on_csv_string(ignore, lint_config.disable_rule) linter = MarkdownLinter(lint_config) error_count = linter.lint_files(files) exit(error_count)
Markdown lint tool, checks your markdown for styling issues
entailment
def main(): """ Main function """ # Read configuration from the config file if present, else fall back to # command line options if args.config: config = config_file_parser.get_configuration(args.config) access_key_id = config['access-key-id'] secret_access_key = config['secret-access-key'] region = config['region'] else: access_key_id = args.access_key_id secret_access_key = args.secret_access_key region = args.region if args.daemon: pid_file = '/tmp/automatic-ebs-snapshots.pid' daemon = AutoEBSDaemon(pid_file) if args.daemon == 'start': daemon.start() elif args.daemon == 'stop': daemon.stop() sys.exit(0) elif args.daemon == 'restart': daemon.restart() elif args.daemon in ['foreground', 'fg']: daemon.run() else: print 'Valid options for --daemon are start, stop and restart' sys.exit(1) # Connect to AWS connection = connection_manager.connect_to_ec2( region, access_key_id, secret_access_key) if args.watch: volume_manager.watch( connection, args.watch, args.interval, args.retention) if args.unwatch: volume_manager.unwatch(connection, args.unwatch) if args.watch_file: volume_manager.watch_from_file(connection, args.watch_file) if args.unwatch_file: volume_manager.unwatch_from_file(connection, args.unwatch_file) if args.snapshots: volume_manager.list_snapshots(connection, args.snapshots) if args.list: volume_manager.list(connection) if args.run: snapshot_manager.run(connection)
Main function
entailment
def run(self, check_interval=300): """ Run the daemon :type check_interval: int :param check_interval: Delay in seconds between checks """ while True: # Read configuration from the config file if present, else fall # back to command line options if args.config: config = config_file_parser.get_configuration(args.config) access_key_id = config['access-key-id'] secret_access_key = config['secret-access-key'] region = config['region'] else: access_key_id = args.access_key_id secret_access_key = args.secret_access_key region = args.region # Connect to AWS connection = connection_manager.connect_to_ec2( region, access_key_id, secret_access_key) snapshot_manager.run(connection) logger.info('Sleeping {} seconds until next check'.format( check_interval)) time.sleep(check_interval)
Run the daemon :type check_interval: int :param check_interval: Delay in seconds between checks
entailment
def _apply_line_rules(self, markdown_string): """ Iterates over the lines in a given markdown string and applies all the enabled line rules to each line """ all_violations = [] lines = markdown_string.split("\n") line_rules = self.line_rules line_nr = 1 ignoring = False for line in lines: if ignoring: if line.strip() == '<!-- markdownlint:enable -->': ignoring = False else: if line.strip() == '<!-- markdownlint:disable -->': ignoring = True continue for rule in line_rules: violation = rule.validate(line) if violation: violation.line_nr = line_nr all_violations.append(violation) line_nr += 1 return all_violations
Iterates over the lines in a given markdown string and applies all the enabled line rules to each line
entailment
def lint_files(self, files): """ Lints a list of files. :param files: list of files to lint :return: a list of violations found in the files """ all_violations = [] for filename in files: with open(filename, 'r') as f: content = f.read() violations = self.lint(content) all_violations.extend(violations) for e in violations: print("{0}:{1}: {2} {3}".format(filename, e.line_nr, e.rule_id, e.message)) return len(all_violations)
Lints a list of files. :param files: list of files to lint :return: a list of violations found in the files
entailment
def ReadFrom(self, byte_stream): """Read values from a byte stream. Args: byte_stream (bytes): byte stream. Returns: tuple[object, ...]: values copies from the byte stream. Raises: IOError: if byte stream cannot be read. OSError: if byte stream cannot be read. """ try: return self._struct.unpack_from(byte_stream) except (TypeError, struct.error) as exception: raise IOError('Unable to read byte stream with error: {0!s}'.format( exception))
Read values from a byte stream. Args: byte_stream (bytes): byte stream. Returns: tuple[object, ...]: values copies from the byte stream. Raises: IOError: if byte stream cannot be read. OSError: if byte stream cannot be read.
entailment
def WriteTo(self, values): """Writes values to a byte stream. Args: values (tuple[object, ...]): values to copy to the byte stream. Returns: bytes: byte stream. Raises: IOError: if byte stream cannot be written. OSError: if byte stream cannot be read. """ try: return self._struct.pack(*values) except (TypeError, struct.error) as exception: raise IOError('Unable to write stream with error: {0!s}'.format( exception))
Writes values to a byte stream. Args: values (tuple[object, ...]): values to copy to the byte stream. Returns: bytes: byte stream. Raises: IOError: if byte stream cannot be written. OSError: if byte stream cannot be read.
entailment
def run(connection): """ Ensure that we have snapshots for a given volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :returns: None """ volumes = volume_manager.get_watched_volumes(connection) for volume in volumes: _ensure_snapshot(connection, volume) _remove_old_snapshots(connection, volume)
Ensure that we have snapshots for a given volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :returns: None
entailment