sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _create_snapshot(volume): """ Create a new snapshot :type volume: boto.ec2.volume.Volume :param volume: Volume to snapshot :returns: boto.ec2.snapshot.Snapshot -- The new snapshot """ logger.info('Creating new snapshot for {}'.format(volume.id)) snapshot = volume.create_snapshot( description="Automatic snapshot by Automated EBS Snapshots") logger.info('Created snapshot {} for volume {}'.format( snapshot.id, volume.id)) return snapshot
Create a new snapshot :type volume: boto.ec2.volume.Volume :param volume: Volume to snapshot :returns: boto.ec2.snapshot.Snapshot -- The new snapshot
entailment
def _ensure_snapshot(connection, volume): """ Ensure that a given volume has an appropriate snapshot :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume: boto.ec2.volume.Volume :param volume: Volume to check :returns: None """ if 'AutomatedEBSSnapshots' not in volume.tags: logger.warning( 'Missing tag AutomatedEBSSnapshots for volume {}'.format( volume.id)) return interval = volume.tags['AutomatedEBSSnapshots'] if volume.tags['AutomatedEBSSnapshots'] not in VALID_INTERVALS: logger.warning( '"{}" is not a valid snapshotting interval for volume {}'.format( interval, volume.id)) return snapshots = connection.get_all_snapshots(filters={'volume-id': volume.id}) # Create a snapshot if we don't have any if not snapshots: _create_snapshot(volume) return min_delta = 3600*24*365*10 # 10 years :) for snapshot in snapshots: timestamp = datetime.datetime.strptime( snapshot.start_time, '%Y-%m-%dT%H:%M:%S.000Z') delta_seconds = int( (datetime.datetime.utcnow() - timestamp).total_seconds()) if delta_seconds < min_delta: min_delta = delta_seconds logger.info('The newest snapshot for {} is {} seconds old'.format( volume.id, min_delta)) if interval == 'hourly' and min_delta > 3600: _create_snapshot(volume) elif interval == 'daily' and min_delta > 3600*24: _create_snapshot(volume) elif interval == 'weekly' and min_delta > 3600*24*7: _create_snapshot(volume) elif interval == 'monthly' and min_delta > 3600*24*30: _create_snapshot(volume) elif interval == 'yearly' and min_delta > 3600*24*365: _create_snapshot(volume) else: logger.info('No need for a new snapshot of {}'.format(volume.id))
Ensure that a given volume has an appropriate snapshot :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume: boto.ec2.volume.Volume :param volume: Volume to check :returns: None
entailment
def _remove_old_snapshots(connection, volume): """ Remove old snapshots :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume: boto.ec2.volume.Volume :param volume: Volume to check :returns: None """ if 'AutomatedEBSSnapshotsRetention' not in volume.tags: logger.warning( 'Missing tag AutomatedEBSSnapshotsRetention for volume {}'.format( volume.id)) return retention = int(volume.tags['AutomatedEBSSnapshotsRetention']) snapshots = connection.get_all_snapshots(filters={'volume-id': volume.id}) # Sort the list based on the start time snapshots.sort(key=lambda x: x.start_time) # Remove snapshots we want to keep snapshots = snapshots[:-int(retention)] if not snapshots: logger.info('No old snapshots to remove') return for snapshot in snapshots: logger.info('Deleting snapshot {}'.format(snapshot.id)) try: snapshot.delete() except EC2ResponseError as error: logger.warning('Could not remove snapshot: {}'.format( error.message)) logger.info('Done deleting snapshots')
Remove old snapshots :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume: boto.ec2.volume.Volume :param volume: Volume to check :returns: None
entailment
def list(connection): """ List watched EBS volumes :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :returns: None """ volumes = get_watched_volumes(connection) if not volumes: logger.info('No watched volumes found') return logger.info( '+-----------------------' '+----------------------' '+--------------' '+------------+') logger.info( '| {volume:<21} ' '| {volume_name:<20.20} ' '| {interval:<12} ' '| {retention:<10} |'.format( volume='Volume ID', volume_name='Volume name', interval='Interval', retention='Retention')) logger.info( '+-----------------------' '+----------------------' '+--------------' '+------------+') for volume in volumes: if 'AutomatedEBSSnapshots' not in volume.tags: interval = 'Interval tag not found' elif volume.tags['AutomatedEBSSnapshots'] not in VALID_INTERVALS: interval = 'Invalid interval' else: interval = volume.tags['AutomatedEBSSnapshots'] if 'AutomatedEBSSnapshotsRetention' not in volume.tags: retention = 0 else: retention = volume.tags['AutomatedEBSSnapshotsRetention'] # Get the volume name try: volume_name = volume.tags['Name'] except KeyError: volume_name = '' logger.info( '| {volume_id:<14} ' '| {volume_name:<20.20} ' '| {interval:<12} ' '| {retention:<10} |'.format( volume_id=volume.id, volume_name=volume_name, interval=interval, retention=retention)) logger.info( '+-----------------------' '+----------------------' '+--------------' '+------------+')
List watched EBS volumes :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :returns: None
entailment
def unwatch(connection, volume_id): """ Remove watching of a volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume_id: str :param volume_id: VolumeID to add to the watchlist :returns: bool - True if the watch was successful """ try: volume = connection.get_all_volumes(volume_ids=[volume_id])[0] volume.remove_tag('AutomatedEBSSnapshots') except EC2ResponseError: pass logger.info('Removed {} from the watchlist'.format(volume_id)) return True
Remove watching of a volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume_id: str :param volume_id: VolumeID to add to the watchlist :returns: bool - True if the watch was successful
entailment
def watch(connection, volume_id, interval='daily', retention=0): """ Start watching a new volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume_id: str :param volume_id: VolumeID to add to the watchlist :type interval: str :param interval: Backup interval [hourly|daily|weekly|monthly|yearly] :type retention: int :param retention: Number of snapshots to keep. 0 == keep all :returns: bool - True if the watch was successful """ try: volume = connection.get_all_volumes(volume_ids=[volume_id])[0] except EC2ResponseError: logger.warning('Volume {} not found'.format(volume_id)) return False if interval not in VALID_INTERVALS: logger.warning( '{} is not a valid interval. Valid intervals are {}'.format( interval, ', '.join(VALID_INTERVALS))) # Remove the tag first volume.remove_tag('AutomatedEBSSnapshots') # Re-add the tag volume.add_tag('AutomatedEBSSnapshots', value=interval) # Remove the tag first volume.remove_tag('AutomatedEBSSnapshotsRetention') # Re-add the tag volume.add_tag('AutomatedEBSSnapshotsRetention', value=int(retention)) logger.info('Updated the rotation interval to {} for {}'.format( interval, volume_id)) return True
Start watching a new volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume_id: str :param volume_id: VolumeID to add to the watchlist :type interval: str :param interval: Backup interval [hourly|daily|weekly|monthly|yearly] :type retention: int :param retention: Number of snapshots to keep. 0 == keep all :returns: bool - True if the watch was successful
entailment
def get_volume_id(connection, volume): """ Get Volume ID from the given volume. Input can be volume id or its Name tag. :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume: str :param volume: Volume ID or Volume Name :returns: Volume ID or None if the given volume does not exist """ # Regular expression to check whether input is a volume id volume_id_pattern = re.compile('vol-\w{8}') if volume_id_pattern.match(volume): # input is volume id try: # Check whether it exists connection.get_all_volumes(volume_ids=[volume]) volume_id = volume except EC2ResponseError: logger.warning('Volume {} not found'.format(volume)) return None else: # input is volume name name_filter = {'tag-key': 'Name', 'tag-value': volume} volumes = connection.get_all_volumes(filters=name_filter) if not volumes: logger.warning('Volume {} not found'.format(volume)) return None if len(volumes) > 1: logger.warning('Volume {} not unique'.format(volume)) volume_id = volumes[0].id return volume_id
Get Volume ID from the given volume. Input can be volume id or its Name tag. :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume: str :param volume: Volume ID or Volume Name :returns: Volume ID or None if the given volume does not exist
entailment
def watch_from_file(connection, file_name): """ Start watching a new volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type file_name: str :param file_name: path to config file :returns: None """ with open(file_name, 'r') as filehandle: for line in filehandle.xreadlines(): volume, interval, retention = line.rstrip().split(',') watch( connection, get_volume_id(connection, volume), interval, retention)
Start watching a new volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type file_name: str :param file_name: path to config file :returns: None
entailment
def unwatch_from_file(connection, file_name): """ Start watching a new volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type file_name: str :param file_name: path to config file :returns: None """ with open(file_name, 'r') as filehandle: for line in filehandle.xreadlines(): volume, interval, retention = line.rstrip().split(',') unwatch(connection, get_volume_id(connection, volume))
Start watching a new volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type file_name: str :param file_name: path to config file :returns: None
entailment
def list_snapshots(connection, volume): """ List all snapshots for the volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume: str :param volume: Volume ID or Volume Name :returns: None """ logger.info( '+----------------' '+----------------------' '+---------------------------+') logger.info( '| {snapshot:<14} ' '| {snapshot_name:<20.20} ' '| {created:<25} |'.format( snapshot='Snapshot ID', snapshot_name='Snapshot name', created='Created')) logger.info( '+----------------' '+----------------------' '+---------------------------+') vid = get_volume_id(connection, volume) if vid: vol = connection.get_all_volumes(volume_ids=[vid])[0] for snap in vol.snapshots(): logger.info( '| {snapshot:<14} ' '| {snapshot_name:<20.20} ' '| {created:<25} |'.format( snapshot=snap.id, snapshot_name=snap.tags.get('Name', ''), created=snap.start_time)) logger.info( '+----------------' '+----------------------' '+---------------------------+')
List all snapshots for the volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type volume: str :param volume: Volume ID or Volume Name :returns: None
entailment
def stem(self, words, parser, **kwargs): """ Get stems for the words using a given parser Example: from .parsing import ListParser parser = ListParser() stemmer = Morfologik() stemmer.stem(['ja tańczę a ona śpi], parser) [ ('ja': ['ja']), ('tańczę': ['tańczyć']), ('a': ['a']), ('ona': ['on']), ('śpi': ['spać']) ] """ output = self._run_morfologik(words) return parser.parse(output, **kwargs)
Get stems for the words using a given parser Example: from .parsing import ListParser parser = ListParser() stemmer = Morfologik() stemmer.stem(['ja tańczę a ona śpi], parser) [ ('ja': ['ja']), ('tańczę': ['tańczyć']), ('a': ['a']), ('ona': ['on']), ('śpi': ['spać']) ]
entailment
def _run_morfologik(self, words): """ Runs morfologik java jar and assumes that input and output is UTF-8 encoded. """ p = subprocess.Popen( ['java', '-jar', self.jar_path, 'plstem', '-ie', 'UTF-8', '-oe', 'UTF-8'], bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = p.communicate(input=bytes("\n".join(words), "utf-8")) return decode(out, 'utf-8')
Runs morfologik java jar and assumes that input and output is UTF-8 encoded.
entailment
def read_from_hdx(identifier, configuration=None): # type: (str, Optional[Configuration]) -> Optional['Showcase'] """Reads the showcase given by identifier from HDX and returns Showcase object Args: identifier (str): Identifier of showcase configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[Showcase]: Showcase object if successful read, None if not """ showcase = Showcase(configuration=configuration) result = showcase._load_from_hdx('showcase', identifier) if result: return showcase return None
Reads the showcase given by identifier from HDX and returns Showcase object Args: identifier (str): Identifier of showcase configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[Showcase]: Showcase object if successful read, None if not
entailment
def get_datasets(self): # type: () -> List[hdx.data.dataset.Dataset] """Get any datasets in the showcase Returns: List[Dataset]: List of datasets """ assoc_result, datasets_dicts = self._read_from_hdx('showcase', self.data['id'], fieldname='showcase_id', action=self.actions()['list_datasets']) datasets = list() if assoc_result: for dataset_dict in datasets_dicts: dataset = hdx.data.dataset.Dataset(dataset_dict, configuration=self.configuration) datasets.append(dataset) return datasets
Get any datasets in the showcase Returns: List[Dataset]: List of datasets
entailment
def _get_showcase_dataset_dict(self, dataset): # type: (Union[hdx.data.dataset.Dataset,Dict,str]) -> Dict """Get showcase dataset dict Args: showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary Returns: Dict: showcase dataset dict """ if isinstance(dataset, hdx.data.dataset.Dataset) or isinstance(dataset, dict): if 'id' not in dataset: dataset = hdx.data.dataset.Dataset.read_from_hdx(dataset['name']) dataset = dataset['id'] elif not isinstance(dataset, str): raise hdx.data.hdxobject.HDXError('Type %s cannot be added as a dataset!' % type(dataset).__name__) if is_valid_uuid(dataset) is False: raise hdx.data.hdxobject.HDXError('%s is not a valid dataset id!' % dataset) return {'showcase_id': self.data['id'], 'package_id': dataset}
Get showcase dataset dict Args: showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary Returns: Dict: showcase dataset dict
entailment
def add_dataset(self, dataset, datasets_to_check=None): # type: (Union[hdx.data.dataset.Dataset,Dict,str], List[hdx.data.dataset.Dataset]) -> bool """Add a dataset Args: dataset (Union[Dataset,Dict,str]): Either a dataset id or dataset metadata either from a Dataset object or a dictionary datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase. Returns: bool: True if the dataset was added, False if already present """ showcase_dataset = self._get_showcase_dataset_dict(dataset) if datasets_to_check is None: datasets_to_check = self.get_datasets() for dataset in datasets_to_check: if showcase_dataset['package_id'] == dataset['id']: return False self._write_to_hdx('associate', showcase_dataset, 'package_id') return True
Add a dataset Args: dataset (Union[Dataset,Dict,str]): Either a dataset id or dataset metadata either from a Dataset object or a dictionary datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase. Returns: bool: True if the dataset was added, False if already present
entailment
def add_datasets(self, datasets, datasets_to_check=None): # type: (List[Union[hdx.data.dataset.Dataset,Dict,str]], List[hdx.data.dataset.Dataset]) -> bool """Add multiple datasets Args: datasets (List[Union[Dataset,Dict,str]]): A list of either dataset ids or dataset metadata from Dataset objects or dictionaries datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase. Returns: bool: True if all datasets added or False if any already present """ if datasets_to_check is None: datasets_to_check = self.get_datasets() alldatasetsadded = True for dataset in datasets: if not self.add_dataset(dataset, datasets_to_check=datasets_to_check): alldatasetsadded = False return alldatasetsadded
Add multiple datasets Args: datasets (List[Union[Dataset,Dict,str]]): A list of either dataset ids or dataset metadata from Dataset objects or dictionaries datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase. Returns: bool: True if all datasets added or False if any already present
entailment
def possible_parameter(nb, jsonable_parameter=True, end_cell_index=None): """ Find the possible parameters from a jupyter notebook (python3 only). The possible parameters are obtained by parsing the abstract syntax tree of the python code generated from the jupyter notebook. For a jupuyter notebook, a variable can be a possible parameter if: - it is defined in a cell that contains only comments or assignments, - its name is not used in the current cell beside the assignment nor previously. Parameters ---------- nb : str, nbformat.notebooknode.NotebookNode Jupyter notebook path or its content as a NotebookNode object. jsonable_parameter: bool, optional Consider only jsonable parameters. end_cell_index : int, optional End cell index used to slice the notebook in finding the possible parameters. Returns ------- list[collections.namedtuple] If jsonable_parameter is true the fields are ('name','value','cell_index'), otherwise ('name', 'cell_index'). The list is ordered by the name of the parameters. """ jh = _JupyterNotebookHelper(nb, jsonable_parameter, end_cell_index) if jsonable_parameter is True: PossibleParameter=collections.namedtuple('PossibleParameter',['name','value','cell_index']) else: PossibleParameter=collections.namedtuple('PossibleParameter',['name', 'cell_index']) res=[] for name, cell_index in jh.param_cell_index.items(): if jsonable_parameter is True: res.append(PossibleParameter(name=name,value=jh.param_value[name],cell_index=cell_index)) else: res.append(PossibleParameter(name=name,cell_index=cell_index)) return sorted(res, key = lambda x: (x.name))
Find the possible parameters from a jupyter notebook (python3 only). The possible parameters are obtained by parsing the abstract syntax tree of the python code generated from the jupyter notebook. For a jupuyter notebook, a variable can be a possible parameter if: - it is defined in a cell that contains only comments or assignments, - its name is not used in the current cell beside the assignment nor previously. Parameters ---------- nb : str, nbformat.notebooknode.NotebookNode Jupyter notebook path or its content as a NotebookNode object. jsonable_parameter: bool, optional Consider only jsonable parameters. end_cell_index : int, optional End cell index used to slice the notebook in finding the possible parameters. Returns ------- list[collections.namedtuple] If jsonable_parameter is true the fields are ('name','value','cell_index'), otherwise ('name', 'cell_index'). The list is ordered by the name of the parameters.
entailment
def run_jnb(input_path, output_path=r"///_run_jnb/*-output", execution_path=r'///input', return_mode='except', overwrite=False, timeout=ExecutePreprocessor.timeout.default_value, kernel_name=ExecutePreprocessor.kernel_name.default_value, ep_kwargs=None, jsonable_parameter=True, end_cell_index=None, arg=None, **kwargs): """ Run an input jupyter notebook file and optionally (python3 only) parametrise it. One can pass arguments as keyword arguments or in a json format (file or string). For safety reasons, in order to avoid any code injection, only json serialisable keywords arguments are available. The keyword arguments are firstly encoded in json format using the standard json encoder. The json content is decoded into python objects using the standard json decoder and it is mapped to a variable assignment by unpacking it. The assignments are appended at the end of the cell where they are initially defined. Parameters ---------- input_path : str Path of the input jupyter notebook. output_path : str, optional Path of the output jupyter notebook. One can use the input_path location as relative path by starting with "///" . * can be used once in the beggining or end (excluding the ".ipynb" extension) as a wildcard of the input_path filename. "///_run_jnb/*-output" is the default value and states that the output is in the run_jnb folder with respect to the input_path directory and "-output" is appended to the input name. execution_path : str, optional The path of the folder where to execute the notebook. r'///input' or r'///output' can be used to denote the input / output folder. return_mode : ['parametrised_only','except',True, False], optional Flag to write the generated notebook to the output_path: "parametrised_only" writes the generated notebook without executing it, "except" writes in case of an exception, True writes always, False writes never. overwrite : bool, optional Flag to overwrite or not the output_path. If the parameter is False the used output_path will be incremented until a valid one is found. timeout : int, optional ExecutePreprocessor.timeout kernel_name : str, optional ExecutePreprocessor.kernel_name ep_kwargs : dict, optional Other kwargs accepted by nbconvert.preprocessors.ExecutePreprocessor jsonable_parameter: bool, optional Parametrise only jsonable parameters end_cell_index : int, optional End cell index used to slice the notebook in finding the possible parameters. arg : str Path of a json file (it should end in ".json") or json formatted string used to parametrise the jupyter notebook. It should containt json objects. It is decoded into python objects following https://docs.python.org/3.6/library/json.html#json-to-py-table . kwargs: json serialsable keyword arguments used to parametrise the jupyter notebook. Returns ------- collections.namedtuple The fields are ('output_nb_path', 'error_prompt_number','error_type','error_value','error_traceback'). If the generated file is written the output path is returned otherwise None. If an error is catched the details are return otherwise None. """ if os.path.splitext(input_path)[1] != '.ipynb': raise ValueError("The extension of input_path = '{}' is not '.ipynb'".format(input_path)) if os.path.basename(input_path) == '*': raise ValueError("The filename ={} can not start with *".format(input_path)) input_path_dir, input_path_base = os.path.split(input_path) if output_path.startswith(r'///'): input_rel = True output_path = output_path[3:] else: input_rel = False output_path_dir, output_path_base = os.path.split(output_path) if input_rel: output_path_dir = os.path.join(input_path_dir, output_path_dir) output_path_dir = os.path.normpath(output_path_dir) if os.path.exists(output_path_dir) is False: os.makedirs(output_path_dir) if output_path_base.endswith('.ipynb'): pass elif output_path_base.startswith("*"): output_path_base = input_path_base[:-6]+output_path_base[1:]+'.ipynb' elif output_path_base.endswith("*"): output_path_base = output_path_base[:-1]+input_path_base[:-6]+'.ipynb' else: raise ValueError("Invalid output_path") output_path = os.path.abspath(os.path.join(output_path_dir, output_path_base)) if output_path is not None and os.path.splitext(output_path)[1] != '.ipynb': raise ValueError("The extension of output_path = '{}' is not '.ipynb'".format(output_path)) if execution_path.startswith(r'///input'): execution_path = os.path.join(input_path_dir, execution_path[8:]) elif execution_path.startswith(r'///output'): execution_path = os.path.join(input_path_dir, execution_path[8:]) execution_path = os.path.normpath(execution_path) if os.path.exists(execution_path) is False: os.makedirs(execution_path) if ep_kwargs is None: ep_kwargs = {} if return_mode not in ['parametrised_only', 'except', True, False]: raise TypeError("return mode is not valid!") kwarg_to_json = json.dumps(kwargs) kwarg_as_kwarg = decode_json(kwarg_to_json) arg_as_kwarg = decode_json(arg) multiple_kwarg = set(arg_as_kwarg.keys()) & set(kwarg_as_kwarg.keys()) if multiple_kwarg != set(): raise ValueError('Multiple values for keyword argument {}'.format(multiple_kwarg)) jupyter_kwargs = {**arg_as_kwarg, **kwarg_as_kwarg} nb = _read_nb(input_path) # clean notebook for i, cell in enumerate(nb['cells']): if cell['cell_type'] == 'code': nb['cells'][i]['outputs'] = [] nb['cells'][i]['execution_count'] = None if jupyter_kwargs != {}: params_of_interest = {} jnh = _JupyterNotebookHelper(nb, jsonable_parameter ,end_cell_index) for el in jupyter_kwargs.keys(): if el not in jnh.param_cell_index.keys(): raise ValueError(repr(el)+' is not a possible parameter {}.'.format(list(jnh.param_cell_index.keys()))) else: params_of_interest[el] = jnh.param_cell_index[el] params_of_interest = sort_dict(params_of_interest, by='value') cell_index_param = group_dict_by_value(params_of_interest) for key, value in cell_index_param.items(): cell_param = {k: jupyter_kwargs[k] for k in value} cell_code = kwargs_to_variable_assignment(cell_param) marked_code = _mark_auto_generated_code(cell_code) nb['cells'][key]['source'] += marked_code if return_mode != 'parametrised_only': ep = ExecutePreprocessor(timeout=timeout, kernel_name=kernel_name, **ep_kwargs) catch_except = False error = (None, None, None, None) try: if return_mode != 'parametrised_only': ep.preprocess(nb, {'metadata': {'path': execution_path}}) except CellExecutionError: catch_except = True for cell in nb['cells'][::-1]: if cell['cell_type'] == 'code' and cell.get('outputs') != []: for output in cell['outputs']: if output.get('output_type') == 'error': error = (cell['execution_count'], output.get('ename'), output.get('evalue'), output.get('traceback')) break if error[0] is not None: break else: raise ValueError('Cell expected to have an error.') except: raise if return_mode == 'except': if catch_except is True: nb_return = True else: nb_return = None elif return_mode is True or return_mode == 'parametrised_only': nb_return = True elif return_mode is False: nb_return = None if nb_return is not None: if overwrite is False: while os.path.exists(output_path): dirname, basename = os.path.split(output_path) root, ext = os.path.splitext(basename) new_root = increment_name(root) output_path = os.path.join(dirname, new_root+ext) nb_return = output_path # update the output_path _write_nb(nb, output_path) Output=collections.namedtuple('Output',['output_nb_path', 'error_prompt_number','error_type','error_value','error_traceback']) res = Output(output_nb_path=nb_return,error_prompt_number=error[0], error_type=error[1],error_value=error[2],error_traceback=error[3]) return res
Run an input jupyter notebook file and optionally (python3 only) parametrise it. One can pass arguments as keyword arguments or in a json format (file or string). For safety reasons, in order to avoid any code injection, only json serialisable keywords arguments are available. The keyword arguments are firstly encoded in json format using the standard json encoder. The json content is decoded into python objects using the standard json decoder and it is mapped to a variable assignment by unpacking it. The assignments are appended at the end of the cell where they are initially defined. Parameters ---------- input_path : str Path of the input jupyter notebook. output_path : str, optional Path of the output jupyter notebook. One can use the input_path location as relative path by starting with "///" . * can be used once in the beggining or end (excluding the ".ipynb" extension) as a wildcard of the input_path filename. "///_run_jnb/*-output" is the default value and states that the output is in the run_jnb folder with respect to the input_path directory and "-output" is appended to the input name. execution_path : str, optional The path of the folder where to execute the notebook. r'///input' or r'///output' can be used to denote the input / output folder. return_mode : ['parametrised_only','except',True, False], optional Flag to write the generated notebook to the output_path: "parametrised_only" writes the generated notebook without executing it, "except" writes in case of an exception, True writes always, False writes never. overwrite : bool, optional Flag to overwrite or not the output_path. If the parameter is False the used output_path will be incremented until a valid one is found. timeout : int, optional ExecutePreprocessor.timeout kernel_name : str, optional ExecutePreprocessor.kernel_name ep_kwargs : dict, optional Other kwargs accepted by nbconvert.preprocessors.ExecutePreprocessor jsonable_parameter: bool, optional Parametrise only jsonable parameters end_cell_index : int, optional End cell index used to slice the notebook in finding the possible parameters. arg : str Path of a json file (it should end in ".json") or json formatted string used to parametrise the jupyter notebook. It should containt json objects. It is decoded into python objects following https://docs.python.org/3.6/library/json.html#json-to-py-table . kwargs: json serialsable keyword arguments used to parametrise the jupyter notebook. Returns ------- collections.namedtuple The fields are ('output_nb_path', 'error_prompt_number','error_type','error_value','error_traceback'). If the generated file is written the output path is returned otherwise None. If an error is catched the details are return otherwise None.
entailment
def join(self, joiner, formatter=lambda s, t: t.format(s), template="{}"): """Join values and convert to string Example: >>> from ww import l >>> lst = l('012') >>> lst.join(',') u'0,1,2' >>> lst.join(',', template="{}#") u'0#,1#,2#' >>> string = lst.join(',',\ formatter = lambda x, y: str(int(x) ** 2)) >>> string u'0,1,4' """ return ww.s(joiner).join(self, formatter, template)
Join values and convert to string Example: >>> from ww import l >>> lst = l('012') >>> lst.join(',') u'0,1,2' >>> lst.join(',', template="{}#") u'0#,1#,2#' >>> string = lst.join(',',\ formatter = lambda x, y: str(int(x) ** 2)) >>> string u'0,1,4'
entailment
def append(self, *values): """Append values at the end of the list Allow chaining. Args: values: values to be appened at the end. Example: >>> from ww import l >>> lst = l([]) >>> lst.append(1) [1] >>> lst [1] >>> lst.append(2, 3).append(4,5) [1, 2, 3, 4, 5] >>> lst [1, 2, 3, 4, 5] """ for value in values: list.append(self, value) return self
Append values at the end of the list Allow chaining. Args: values: values to be appened at the end. Example: >>> from ww import l >>> lst = l([]) >>> lst.append(1) [1] >>> lst [1] >>> lst.append(2, 3).append(4,5) [1, 2, 3, 4, 5] >>> lst [1, 2, 3, 4, 5]
entailment
def extend(self, *iterables): """Add all values of all iterables at the end of the list Args: iterables: iterable which content to add at the end Example: >>> from ww import l >>> lst = l([]) >>> lst.extend([1, 2]) [1, 2] >>> lst [1, 2] >>> lst.extend([3, 4]).extend([5, 6]) [1, 2, 3, 4, 5, 6] >>> lst [1, 2, 3, 4, 5, 6] """ for value in iterables: list.extend(self, value) return self
Add all values of all iterables at the end of the list Args: iterables: iterable which content to add at the end Example: >>> from ww import l >>> lst = l([]) >>> lst.extend([1, 2]) [1, 2] >>> lst [1, 2] >>> lst.extend([3, 4]).extend([5, 6]) [1, 2, 3, 4, 5, 6] >>> lst [1, 2, 3, 4, 5, 6]
entailment
def normalize_cell_value(value): """Process value for writing into a cell. Args: value: any type of variable Returns: json serialized value if value is list or dict, else value """ if isinstance(value, dict) or isinstance(value, list): return json.dumps(value) return value
Process value for writing into a cell. Args: value: any type of variable Returns: json serialized value if value is list or dict, else value
entailment
def get_addresses_from_input_file(input_file_name): """Read addresses from input file into list of tuples. This only supports address and zipcode headers """ mode = 'r' if sys.version_info[0] < 3: mode = 'rb' with io.open(input_file_name, mode) as input_file: reader = csv.reader(input_file, delimiter=',', quotechar='"') addresses = list(map(tuple, reader)) if len(addresses) == 0: raise Exception('No addresses found in input file') header_columns = list(column.lower() for column in addresses.pop(0)) try: address_index = header_columns.index('address') zipcode_index = header_columns.index('zipcode') except ValueError: raise Exception("""The first row of the input CSV must be a header that contains \ a column labeled 'address' and a column labeled 'zipcode'.""") return list((row[address_index], row[zipcode_index]) for row in addresses)
Read addresses from input file into list of tuples. This only supports address and zipcode headers
entailment
def get_identifiers_from_input_file(input_file_name): """Read identifiers from input file into list of dicts with the header row values as keys, and the rest of the rows as values. """ valid_identifiers = ['address', 'zipcode', 'unit', 'city', 'state', 'slug', 'block_id', 'msa', 'num_bins', 'property_type', 'client_value', 'client_value_sqft', 'meta'] mode = 'r' if sys.version_info[0] < 3: mode = 'rb' with io.open(input_file_name, mode) as input_file: result = [{identifier: val for identifier, val in list(row.items()) if identifier in valid_identifiers} for row in csv.DictReader(input_file, skipinitialspace=True)] return result
Read identifiers from input file into list of dicts with the header row values as keys, and the rest of the rows as values.
entailment
def get_real_time_locate(ipAddress, auth, url): """ function takes the ipAddress of a specific host and issues a RESTFUL call to get the device and interface that the target host is currently connected to. Note: Although intended to return a single location, Multiple locations may be returned for a single host due to a partially discovered network or misconfigured environment. :param ipAddress: str value valid IPv4 IP address :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: list of dictionaries where each element of the list represents the location of the target host :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> found_device = get_real_time_locate('10.101.0.51', auth.creds, auth.url) >>> assert type(found_device) is list >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> no_device = get_real_time_locate('192.168.254.254', auth.creds, auth.url) >>> assert type(no_device) is dict >>> assert len(no_device) == 0 """ real_time_locate_url = "/imcrs/res/access/realtimeLocate?type=2&value=" + str(ipAddress) + "&total=false" f_url = url + real_time_locate_url r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents try: if r.status_code == 200: response = json.loads(r.text) if 'realtimeLocation' in response: real_time_locate = json.loads(r.text)['realtimeLocation'] if type(real_time_locate) is dict: real_time_locate = [real_time_locate] return real_time_locate else: return json.loads(r.text)['realtimeLocation'] else: return json.loads(r.text) except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + " get_real_time_locate: An Error has occured"
function takes the ipAddress of a specific host and issues a RESTFUL call to get the device and interface that the target host is currently connected to. Note: Although intended to return a single location, Multiple locations may be returned for a single host due to a partially discovered network or misconfigured environment. :param ipAddress: str value valid IPv4 IP address :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: list of dictionaries where each element of the list represents the location of the target host :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> found_device = get_real_time_locate('10.101.0.51', auth.creds, auth.url) >>> assert type(found_device) is list >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> no_device = get_real_time_locate('192.168.254.254', auth.creds, auth.url) >>> assert type(no_device) is dict >>> assert len(no_device) == 0
entailment
def get_ip_mac_arp_list(devId, auth,url): """ function takes devid of specific device and issues a RESTFUL call to get the IP/MAC/ARP list from the target device. :param devId: int or str value of the target device. :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: list of dictionaries containing the IP/MAC/ARP list of the target device. :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> ip_mac_list = get_ip_mac_arp_list('10', auth.creds, auth.url) >>> assert type(ip_mac_list) is list >>> assert 'deviceId' in ip_mac_list[0] """ if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() ip_mac_arp_list_url = "/imcrs/res/access/ipMacArp/" + str(devId) f_url = url + ip_mac_arp_list_url r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents try: if r.status_code == 200: macarplist = (json.loads(r.text)) if len(macarplist) > 1: return macarplist['ipMacArp'] else: return ['this function is unsupported'] except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + " get_ip_mac_arp_list: An Error has occured"
function takes devid of specific device and issues a RESTFUL call to get the IP/MAC/ARP list from the target device. :param devId: int or str value of the target device. :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: list of dictionaries containing the IP/MAC/ARP list of the target device. :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> ip_mac_list = get_ip_mac_arp_list('10', auth.creds, auth.url) >>> assert type(ip_mac_list) is list >>> assert 'deviceId' in ip_mac_list[0]
entailment
def get_ip_scope_detail(scopeId, auth, url ): """ function requires no inputs and returns all IP address scopes currently configured on the HPE IMC server. If the optional scopeId parameter is included, this will automatically return only the desired scope id. :param scopeId: integer of the desired scope id ( optional ) :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: dictionary, may containing multiple entries if sub-scopes have been created :rtype: dict >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> ip_scope_detail = get_ip_scope_detail('45', auth.creds, auth.url) >>> assert type(ip_scope_detail) is dict >>> assert 'startIp' in ip_scope_detail """ get_ip_scope_url = "/imcrs/res/access/assignedIpScope/"+str(scopeId) f_url = url + get_ip_scope_url r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents try: if r.status_code == 200: ipscopelist = (json.loads(r.text)) return ipscopelist except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + " get_ip_scope: An Error has occured"
function requires no inputs and returns all IP address scopes currently configured on the HPE IMC server. If the optional scopeId parameter is included, this will automatically return only the desired scope id. :param scopeId: integer of the desired scope id ( optional ) :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: dictionary, may containing multiple entries if sub-scopes have been created :rtype: dict >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> ip_scope_detail = get_ip_scope_detail('45', auth.creds, auth.url) >>> assert type(ip_scope_detail) is dict >>> assert 'startIp' in ip_scope_detail
entailment
def delete_ip_scope(network_address, auth, url): '''Function to delete an entire IP segment from the IMC IP Address management under terminal access :param network_address :param auth :param url >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url) >>> delete_scope = delete_ip_scope('10.50.0.0/24', auth.creds, auth.url) ''' scope_id = get_scope_id(network_address, auth,url) delete_ip_address_url = '''/imcrs/res/access/assignedIpScope/'''+str(scope_id) f_url = url + delete_ip_address_url r = requests.delete(f_url, auth=auth, headers=HEADERS) try: return r if r.status_code == 204: #print("IP Segment Successfully Deleted") return r.status_code except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + " delete_ip_scope: An Error has occured"
Function to delete an entire IP segment from the IMC IP Address management under terminal access :param network_address :param auth :param url >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url) >>> delete_scope = delete_ip_scope('10.50.0.0/24', auth.creds, auth.url)
entailment
def add_scope_ip(ipaddress, name, description, scopeid, auth, url): """ Function to add new host IP address allocation to existing scope ID :param ipaddress: :param name: name of the owner of this host :param description: Description of the host :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: :rtype: >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> add_scope_ip('10.50.0.5', 'cyoung', 'New Test Host','175', auth.creds, auth.url) """ new_ip = { "ip": ipaddress, "name": name, "description": description} add_scope_ip_url = '/imcrs/res/access/assignedIpScope/ip?ipScopeId='+str(scopeid) f_url = url + add_scope_ip_url payload = json.dumps(new_ip) r = requests.post(f_url, auth=auth, headers=HEADERS, data=payload) # creates the URL using the payload variable as the contents try: if r.status_code == 200: #print("IP Scope Successfully Created") return r.status_code elif r.status_code == 409: #print("IP Scope Already Exists") return r.status_code except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + " add_ip_scope: An Error has occured"
Function to add new host IP address allocation to existing scope ID :param ipaddress: :param name: name of the owner of this host :param description: Description of the host :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: :rtype: >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> add_scope_ip('10.50.0.5', 'cyoung', 'New Test Host','175', auth.creds, auth.url)
entailment
def remove_scope_ip(hostid, auth, url): """ Function to add remove IP address allocation :param hostid: Host id of the host to be deleted :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: String of HTTP response code. Should be 204 is successfull :rtype: str >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url) >>> add_host_to_segment('10.50.0.5', 'cyoung', 'New Test Host', '10.50.0.0/24', auth.creds, auth.url) >>> host_id = get_host_id('10.50.0.5', '10.50.0.0/24', auth.creds, auth.url) >>> rem_host = remove_scope_ip(host_id, auth.creds, auth.url) >>> assert type(rem_host) is int >>> assert rem_host == 204 """ add_scope_ip_url = '/imcrs/res/access/assignedIpScope/ip/'+str(hostid) f_url = url + add_scope_ip_url r = requests.delete(f_url, auth=auth, headers=HEADERS, ) try: if r.status_code == 204: #print("Host Successfully Deleted") return r.status_code elif r.status_code == 409: #print("IP Scope Already Exists") return r.status_code except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + " add_ip_scope: An Error has occured"
Function to add remove IP address allocation :param hostid: Host id of the host to be deleted :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: String of HTTP response code. Should be 204 is successfull :rtype: str >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url) >>> add_host_to_segment('10.50.0.5', 'cyoung', 'New Test Host', '10.50.0.0/24', auth.creds, auth.url) >>> host_id = get_host_id('10.50.0.5', '10.50.0.0/24', auth.creds, auth.url) >>> rem_host = remove_scope_ip(host_id, auth.creds, auth.url) >>> assert type(rem_host) is int >>> assert rem_host == 204
entailment
def get_ip_scope_hosts( scopeId, auth, url): """ Function requires input of scope ID and returns list of allocated IP address for the specified scope :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param scopeId: Interger of teh desired scope id :return: list of dictionary objects where each element of the list represents a single host assigned to the IP scope :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> scope_id = get_scope_id('10.50.0.0/24', auth.creds, auth.url) >>> ip_scope_hosts = get_ip_scope_hosts(scope_id, auth.creds, auth.url) >>> assert type(ip_scope_hosts) is list >>> assert 'name' in ip_scope_hosts[0] >>> assert 'description' in ip_scope_hosts[0] >>> assert 'ip' in ip_scope_hosts[0] >>> assert 'id' in ip_scope_hosts[0] """ get_ip_scope_url = "/imcrs/res/access/assignedIpScope/ip?size=10000&ipScopeId="+str(scopeId) f_url = url + get_ip_scope_url r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents try: if r.status_code == 200: ipscopelist = (json.loads(r.text)) if ipscopelist == {}: return ipscopelist else: ipscopelist = ipscopelist['assignedIpInfo'] if type(ipscopelist) is dict: ipscope = [] ipscope.append(ipscopelist) return ipscope return ipscopelist except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + " get_ip_scope: An Error has occured"
Function requires input of scope ID and returns list of allocated IP address for the specified scope :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param scopeId: Interger of teh desired scope id :return: list of dictionary objects where each element of the list represents a single host assigned to the IP scope :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> scope_id = get_scope_id('10.50.0.0/24', auth.creds, auth.url) >>> ip_scope_hosts = get_ip_scope_hosts(scope_id, auth.creds, auth.url) >>> assert type(ip_scope_hosts) is list >>> assert 'name' in ip_scope_hosts[0] >>> assert 'description' in ip_scope_hosts[0] >>> assert 'ip' in ip_scope_hosts[0] >>> assert 'id' in ip_scope_hosts[0]
entailment
def add_host_to_segment(ipaddress, name, description, network_address, auth, url): ''' Function to abstract existing add_scope_ip_function. Allows for use of network address rather than forcing human to learn the scope_id :param ipaddress: :param name: name of the owner of this host :param description: Description of the host :param: network_address: network address of the target scope in format x.x.x.x/yy where x.x.x.x representents the network address and yy represents the length of the subnet mask. Example: 10.50.0.0 255.255.255.0 would be written as 10.50.0.0/24 :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: :rtype: ''' scope_id = get_scope_id(network_address, auth, url) add_scope_ip(ipaddress, name, description, scope_id, auth,url)
Function to abstract existing add_scope_ip_function. Allows for use of network address rather than forcing human to learn the scope_id :param ipaddress: :param name: name of the owner of this host :param description: Description of the host :param: network_address: network address of the target scope in format x.x.x.x/yy where x.x.x.x representents the network address and yy represents the length of the subnet mask. Example: 10.50.0.0 255.255.255.0 would be written as 10.50.0.0/24 :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: :rtype:
entailment
def delete_host_from_segment(ipaddress, networkaddress, auth, url): '''Function to abstract ''' host_id = get_host_id(ipaddress, networkaddress, auth, url) remove_scope_ip(host_id, auth.creds, auth.url)
Function to abstract
entailment
def generate_signature(method, version, endpoint, date, rel_url, content_type, content, access_key, secret_key, hash_type): ''' Generates the API request signature from the given parameters. ''' hash_type = hash_type hostname = endpoint._val.netloc # FIXME: migrate to public API if version >= 'v4.20181215': content = b'' else: if content_type.startswith('multipart/'): content = b'' body_hash = hashlib.new(hash_type, content).hexdigest() sign_str = '{}\n{}\n{}\nhost:{}\ncontent-type:{}\nx-backendai-version:{}\n{}'.format( # noqa method.upper(), rel_url, date.isoformat(), hostname, content_type.lower(), version, body_hash ) sign_bytes = sign_str.encode() sign_key = hmac.new(secret_key.encode(), date.strftime('%Y%m%d').encode(), hash_type).digest() sign_key = hmac.new(sign_key, hostname.encode(), hash_type).digest() signature = hmac.new(sign_key, sign_bytes, hash_type).hexdigest() headers = { 'Authorization': 'BackendAI signMethod=HMAC-{}, credential={}:{}'.format( hash_type.upper(), access_key, signature ), } return headers, signature
Generates the API request signature from the given parameters.
entailment
async def list_with_limit(cls, limit, offset, status: str = 'ALIVE', fields: Iterable[str] = None) -> Sequence[dict]: ''' Fetches the list of agents with the given status with limit and offset for pagination. :param limit: number of agents to get :param offset: offset index of agents to get :param status: An upper-cased string constant representing agent status (one of ``'ALIVE'``, ``'TERMINATED'``, ``'LOST'``, etc.) :param fields: Additional per-agent query fields to fetch. ''' if fields is None: fields = ( 'id', 'addr', 'status', 'first_contact', 'mem_slots', 'cpu_slots', 'gpu_slots', ) q = 'query($limit: Int!, $offset: Int!, $status: String) {' \ ' agent_list(limit: $limit, offset: $offset, status: $status) {' \ ' items { $fields }' \ ' total_count' \ ' }' \ '}' q = q.replace('$fields', ' '.join(fields)) variables = { 'limit': limit, 'offset': offset, 'status': status, } rqst = Request(cls.session, 'POST', '/admin/graphql') rqst.set_json({ 'query': q, 'variables': variables, }) async with rqst.fetch() as resp: data = await resp.json() return data['agent_list']
Fetches the list of agents with the given status with limit and offset for pagination. :param limit: number of agents to get :param offset: offset index of agents to get :param status: An upper-cased string constant representing agent status (one of ``'ALIVE'``, ``'TERMINATED'``, ``'LOST'``, etc.) :param fields: Additional per-agent query fields to fetch.
entailment
def set_content(self, value: RequestContent, *, content_type: str = None): ''' Sets the content of the request. ''' assert self._attached_files is None, \ 'cannot set content because you already attached files.' guessed_content_type = 'application/octet-stream' if value is None: guessed_content_type = 'text/plain' self._content = b'' elif isinstance(value, str): guessed_content_type = 'text/plain' self._content = value.encode('utf-8') else: guessed_content_type = 'application/octet-stream' self._content = value self.content_type = (content_type if content_type is not None else guessed_content_type)
Sets the content of the request.
entailment
def set_json(self, value: object): ''' A shortcut for set_content() with JSON objects. ''' self.set_content(modjson.dumps(value, cls=ExtendedJSONEncoder), content_type='application/json')
A shortcut for set_content() with JSON objects.
entailment
def attach_files(self, files: Sequence[AttachedFile]): ''' Attach a list of files represented as AttachedFile. ''' assert not self._content, 'content must be empty to attach files.' self.content_type = 'multipart/form-data' self._attached_files = files
Attach a list of files represented as AttachedFile.
entailment
def _sign(self, rel_url, access_key=None, secret_key=None, hash_type=None): ''' Calculates the signature of the given request and adds the Authorization HTTP header. It should be called at the very end of request preparation and before sending the request to the server. ''' if access_key is None: access_key = self.config.access_key if secret_key is None: secret_key = self.config.secret_key if hash_type is None: hash_type = self.config.hash_type hdrs, _ = generate_signature( self.method, self.config.version, self.config.endpoint, self.date, str(rel_url), self.content_type, self._content, access_key, secret_key, hash_type) self.headers.update(hdrs)
Calculates the signature of the given request and adds the Authorization HTTP header. It should be called at the very end of request preparation and before sending the request to the server.
entailment
def fetch(self, **kwargs) -> 'FetchContextManager': ''' Sends the request to the server and reads the response. You may use this method either with plain synchronous Session or AsyncSession. Both the followings patterns are valid: .. code-block:: python3 from ai.backend.client.request import Request from ai.backend.client.session import Session with Session() as sess: rqst = Request(sess, 'GET', ...) with rqst.fetch() as resp: print(resp.text()) .. code-block:: python3 from ai.backend.client.request import Request from ai.backend.client.session import AsyncSession async with AsyncSession() as sess: rqst = Request(sess, 'GET', ...) async with rqst.fetch() as resp: print(await resp.text()) ''' assert self.method in self._allowed_methods, \ 'Disallowed HTTP method: {}'.format(self.method) self.date = datetime.now(tzutc()) self.headers['Date'] = self.date.isoformat() if self.content_type is not None: self.headers['Content-Type'] = self.content_type full_url = self._build_url() self._sign(full_url.relative()) rqst_ctx = self.session.aiohttp_session.request( self.method, str(full_url), data=self._pack_content(), timeout=_default_request_timeout, headers=self.headers) return FetchContextManager(self.session, rqst_ctx, **kwargs)
Sends the request to the server and reads the response. You may use this method either with plain synchronous Session or AsyncSession. Both the followings patterns are valid: .. code-block:: python3 from ai.backend.client.request import Request from ai.backend.client.session import Session with Session() as sess: rqst = Request(sess, 'GET', ...) with rqst.fetch() as resp: print(resp.text()) .. code-block:: python3 from ai.backend.client.request import Request from ai.backend.client.session import AsyncSession async with AsyncSession() as sess: rqst = Request(sess, 'GET', ...) async with rqst.fetch() as resp: print(await resp.text())
entailment
def connect_websocket(self, **kwargs) -> 'WebSocketContextManager': ''' Creates a WebSocket connection. .. warning:: This method only works with :class:`~ai.backend.client.session.AsyncSession`. ''' assert isinstance(self.session, AsyncSession), \ 'Cannot use websockets with sessions in the synchronous mode' assert self.method == 'GET', 'Invalid websocket method' self.date = datetime.now(tzutc()) self.headers['Date'] = self.date.isoformat() # websocket is always a "binary" stream. self.content_type = 'application/octet-stream' full_url = self._build_url() self._sign(full_url.relative()) ws_ctx = self.session.aiohttp_session.ws_connect( str(full_url), autoping=True, heartbeat=30.0, headers=self.headers) return WebSocketContextManager(self.session, ws_ctx, **kwargs)
Creates a WebSocket connection. .. warning:: This method only works with :class:`~ai.backend.client.session.AsyncSession`.
entailment
def main(): """ Main function. :return: None. """ try: # Get the `src` directory's absolute path src_path = os.path.dirname( # `aoiklivereload` directory's absolute path os.path.dirname( # `demo` directory's absolute path os.path.dirname( # This file's absolute path os.path.abspath(__file__) ) ) ) # If the `src` directory path is not in `sys.path` if src_path not in sys.path: # Add to `sys.path`. # # This aims to save user setting PYTHONPATH when running this demo. # sys.path.append(src_path) # Import reloader class from aoiklivereload import LiveReloader # Create reloader reloader = LiveReloader( # Reload mode. # # In windows, have to use `spawn_exit` reload mode and force the # current process to exit immediately, otherwise will get the # error: # ``` # OSError: [WinError 10048] Only one usage of each socket address # (protocol/network address/port) is normally permitted # ``` # # Notice in `spawn_exit` reload mode, the user will not be able # to kill the new process using Ctrl-c. # reload_mode=('spawn_exit' if sys.platform == 'win32' else 'exec'), force_exit=True, ) # Start watcher thread reloader.start_watcher_thread() # Server host server_host = '0.0.0.0' # Server port server_port = 8000 # Get message msg = '# ----- Run server -----\nHost: {}\nPort: {}'.format( server_host, server_port ) # Print message print(msg) # Create request handler class HelloHandler(tornado.web.RequestHandler): """ Request handler class. """ def get(self): """ Request handler. :return: None. """ # Write response body self.write('hello') # List of tuples that maps URL pattern to handler handler_tuples = [ ('/', HelloHandler), ] # Create Tornado app tornado_app = tornado.web.Application( handler_tuples, # Disable Tornado's reloader debug=False, ) # Start listening tornado_app.listen(server_port, address=server_host) # Get event loop io_loop = tornado.ioloop.IOLoop.current() # Run event loop io_loop.start() # If have `KeyboardInterrupt` except KeyboardInterrupt: # Not treat as error pass
Main function. :return: None.
entailment
def add_perf_task(task, auth, url): """ function takes the a python dict containing all necessary fields for a performance tasks, transforms the dict into JSON and issues a RESTFUL call to create the performance task. device. :param task: dictionary containing all required fields for performance tasks :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: 204 :rtype: str >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.perf import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> new_task = {'indexDesc': '1.3.6.1.4.1.9.9.13.1.3.1.3', 'indexType': '[index1[0]:ciscoEnvMonTemperatureStatusValue:1:0]', 'itemFunction': '1.3.6.1.4.1.9.9.13.1.3.1.3', 'itemName': 'Cisco_Temperature', 'selectDefaultUnit': '400', 'unit': 'Celsius'} >>> new_perf_task = add_perf_task(new_task, auth.creds, auth.url) """ add_perf_task_url = "/imcrs/perf/task" f_url = url + add_perf_task_url payload = json.dumps(task) response = requests.post(f_url, data=payload, auth=auth, headers=HEADERS) try: return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + ' add_perf_task: An Error has occured'
function takes the a python dict containing all necessary fields for a performance tasks, transforms the dict into JSON and issues a RESTFUL call to create the performance task. device. :param task: dictionary containing all required fields for performance tasks :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: 204 :rtype: str >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.perf import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> new_task = {'indexDesc': '1.3.6.1.4.1.9.9.13.1.3.1.3', 'indexType': '[index1[0]:ciscoEnvMonTemperatureStatusValue:1:0]', 'itemFunction': '1.3.6.1.4.1.9.9.13.1.3.1.3', 'itemName': 'Cisco_Temperature', 'selectDefaultUnit': '400', 'unit': 'Celsius'} >>> new_perf_task = add_perf_task(new_task, auth.creds, auth.url)
entailment
def get_perf_task(task_name, auth, url): """ function takes the a str object containing the name of an existing performance tasks and issues a RESTFUL call to the IMC REST service. It will return a list :param task_name: str containing the name of the performance task :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: 204 :rtype: dict >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.perf import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> selected_task = get_perf_task('Cisco_Temperature', auth.creds, auth.url) >>> assert type(selected_task) is dict >>> assert 'taskName' in selected_task """ get_perf_task_url = "/imcrs/perf/task?name=" + task_name + "&orderBy=taskId&desc=false" f_url = url + get_perf_task_url response = requests.get(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 200: perf_task_info = (json.loads(response.text)) if 'task' in perf_task_info: perf_task_info = (json.loads(response.text))['task'] else: perf_task_info = "Task Doesn't Exist" return perf_task_info except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + ' get_perf_task: An Error has occured'
function takes the a str object containing the name of an existing performance tasks and issues a RESTFUL call to the IMC REST service. It will return a list :param task_name: str containing the name of the performance task :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: 204 :rtype: dict >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.perf import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> selected_task = get_perf_task('Cisco_Temperature', auth.creds, auth.url) >>> assert type(selected_task) is dict >>> assert 'taskName' in selected_task
entailment
def delete_perf_task(task_name, auth, url): """ Function takes a str of the target task_name to be deleted and retrieves task_id using the get_perf_task function. Once the task_id has been successfully retrieved it is populated into the task_id variable and an DELETE call is made against the HPE IMC REST interface to delete the target task. :param task_name: str of task name :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: int of 204 if successful, str of "Perf Task doesn't exist" i :rtype: int """ task_id = get_perf_task(task_name, auth, url) if isinstance(task_id, str): print("Perf task doesn't exist") return 403 task_id = task_id['taskId'] get_perf_task_url = "/imcrs/perf/task/delete/" + str(task_id) f_url = url + get_perf_task_url response = requests.delete(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 204: print("Perf Task successfully delete") return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + ' delete_perf_task: An Error has occured'
Function takes a str of the target task_name to be deleted and retrieves task_id using the get_perf_task function. Once the task_id has been successfully retrieved it is populated into the task_id variable and an DELETE call is made against the HPE IMC REST interface to delete the target task. :param task_name: str of task name :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: int of 204 if successful, str of "Perf Task doesn't exist" i :rtype: int
entailment
def process_json_response(self, response): """For a json response, check if there was any error and throw exception. Otherwise, create a housecanary.response.Response.""" response_json = response.json() # handle errors code_key = "code" if code_key in response_json and response_json[code_key] != constants.HTTP_CODE_OK: code = response_json[code_key] message = response_json if "message" in response_json: message = response_json["message"] elif "code_description" in response_json: message = response_json["code_description"] if code == constants.HTTP_FORBIDDEN: raise housecanary.exceptions.UnauthorizedException(code, message) if code == constants.HTTP_TOO_MANY_REQUESTS: raise housecanary.exceptions.RateLimitException(code, message, response) else: raise housecanary.exceptions.RequestException(code, message) request_url = response.request.url endpoint_name = self._parse_endpoint_name_from_url(request_url) return Response.create(endpoint_name, response_json, response)
For a json response, check if there was any error and throw exception. Otherwise, create a housecanary.response.Response.
entailment
def start_watcher_thread(self): """ Start watcher thread. :return: Watcher thread object. """ # Create watcher thread watcher_thread = threading.Thread(target=self.run_watcher) # If the reload mode is `spawn_wait` if self._reload_mode == self.RELOAD_MODE_V_SPAWN_WAIT: # Use non-daemon thread daemon = False # If the reload mode is not `spawn_wait` else: # Use daemon thread daemon = True # Set whether the thread is daemon watcher_thread.setDaemon(daemon) # Start watcher thread watcher_thread.start() # Return watcher thread return watcher_thread
Start watcher thread. :return: Watcher thread object.
entailment
def run_watcher(self): """ Watcher thread's function. :return: None. """ # Create observer observer = Observer() # Start observer observer.start() # Dict that maps file path to `watch object` watche_obj_map = {} # Run change check in a loop while not self._watcher_to_stop: # Get current watch paths old_watch_path_s = set(watche_obj_map) # Get new watch paths new_watch_path_s = self._find_watch_paths() # For each new watch path for new_watch_path in new_watch_path_s: # Remove from the old watch paths if exists old_watch_path_s.discard(new_watch_path) # If the new watch path was not watched if new_watch_path not in watche_obj_map: try: # Schedule a watch watch_obj = observer.schedule( # 2KGRW # `FileSystemEventHandler` instance self, # File path to watch new_watch_path, # Whether recursive recursive=True, ) # Store the watch obj watche_obj_map[new_watch_path] = watch_obj # If have error except OSError: # Set the watch object be None watche_obj_map[new_watch_path] = None # For each old watch path that is not in the new watch paths for old_watch_path in old_watch_path_s: # Get watch object watch_obj = watche_obj_map.pop(old_watch_path, None) # If have watch object if watch_obj is not None: # Unschedule the watch observer.unschedule(watch_obj) # Store new watch paths self._watch_paths = new_watch_path_s # Sleep before next check time.sleep(self._interval)
Watcher thread's function. :return: None.
entailment
def _find_watch_paths(self): """ Find paths to watch. :return: Paths to watch. """ # Add directory paths in `sys.path` to watch paths watch_path_s = set(os.path.abspath(x) for x in sys.path) # For each extra path for extra_path in self._extra_paths or (): # Get the extra path's directory path extra_dir_path = os.path.dirname(os.path.abspath(extra_path)) # Add to watch paths watch_path_s.add(extra_dir_path) # For each module in `sys.modules` for module in list(sys.modules.values()): # Get module file path module_path = getattr(module, '__file__', None) # If have module file path if module_path is not None: # Get module directory path module_dir_path = os.path.dirname(os.path.abspath(module_path)) # Add to watch paths watch_path_s.add(module_dir_path) # Find short paths of these watch paths. # E.g. if both `/home` and `/home/aoik` exist, only keep `/home`. watch_path_s = self._find_short_paths(watch_path_s) # Return the watch paths return watch_path_s
Find paths to watch. :return: Paths to watch.
entailment
def _find_short_paths(self, paths): """ Find short paths of given paths. E.g. if both `/home` and `/home/aoik` exist, only keep `/home`. :param paths: Paths. :return: Set of short paths. """ # Split each path to parts. # E.g. '/home/aoik' to ['', 'home', 'aoik'] path_parts_s = [path.split(os.path.sep) for path in paths] # Root node root_node = {} # Sort these path parts by length, with the longest being the first. # # Longer paths appear first so that their extra parts are discarded # when a shorter path is found at 5TQ8L. # # Then for each path's parts. for parts in sorted(path_parts_s, key=len, reverse=True): # Start from the root node node = root_node # For each part of the path for part in parts: # Create node of the path node = node.setdefault(part, {}) # 5TQ8L # Clear the last path part's node's child nodes. # # This aims to keep only the shortest path that needs be watched. # node.clear() # Short paths short_path_s = set() # Collect leaf paths self._collect_leaf_paths( node=root_node, path_parts=(), leaf_paths=short_path_s, ) # Return short paths return short_path_s
Find short paths of given paths. E.g. if both `/home` and `/home/aoik` exist, only keep `/home`. :param paths: Paths. :return: Set of short paths.
entailment
def _collect_leaf_paths(self, node, path_parts, leaf_paths): """ Collect paths of leaf nodes. :param node: Starting node. Type is dict. Key is child node's path part. Value is child node. :param path_parts: The starting node's path parts. Type is tuple. :param leaf_paths: Leaf path list. :return: None. """ # If the node is leaf node if not node: # Get node path node_path = '/'.join(path_parts) # Add to list leaf_paths.add(node_path) # If the node is not leaf node else: # For each child node for child_path_part, child_node in node.items(): # Get the child node's path parts child_path_part_s = path_parts + (child_path_part,) # Visit the child node self._collect_leaf_paths( node=child_node, path_parts=child_path_part_s, leaf_paths=leaf_paths, )
Collect paths of leaf nodes. :param node: Starting node. Type is dict. Key is child node's path part. Value is child node. :param path_parts: The starting node's path parts. Type is tuple. :param leaf_paths: Leaf path list. :return: None.
entailment
def dispatch(self, event): """ Dispatch file system event. Callback called when there is a file system event. Hooked at 2KGRW. This function overrides `FileSystemEventHandler.dispatch`. :param event: File system event object. :return: None. """ # Get file path file_path = event.src_path # If the file path is in extra paths if file_path in self._extra_paths: # Call `reload` self.reload() # If the file path ends with `.pyc` or `.pyo` if file_path.endswith(('.pyc', '.pyo')): # Get `.py` file path file_path = file_path[:-1] # If the file path ends with `.py` if file_path.endswith('.py'): # Get the file's directory path file_dir = os.path.dirname(file_path) # If the file's directory path starts with any of the watch paths if file_dir.startswith(tuple(self._watch_paths)): # Call `reload` self.reload()
Dispatch file system event. Callback called when there is a file system event. Hooked at 2KGRW. This function overrides `FileSystemEventHandler.dispatch`. :param event: File system event object. :return: None.
entailment
def reload(self): """ Reload the program. :return: None. """ # Get reload mode reload_mode = self._reload_mode # If reload mode is `exec` if self._reload_mode == self.RELOAD_MODE_V_EXEC: # Call `reload_using_exec` self.reload_using_exec() # If reload mode is `spawn_exit` elif self._reload_mode == self.RELOAD_MODE_V_SPAWN_EXIT: # Call `reload_using_spawn_exit` self.reload_using_spawn_exit() # If reload mode is `spawn_wait` elif self._reload_mode == self.RELOAD_MODE_V_SPAWN_WAIT: # Call `reload_using_spawn_wait` self.reload_using_spawn_wait() # If reload mode is none of above else: # Get error message error_msg = 'Invalid reload mode: {}.'.format(repr(reload_mode)) # Raise error raise ValueError(error_msg)
Reload the program. :return: None.
entailment
def reload_using_exec(self): """ Reload the program process. :return: None. """ # Create command parts cmd_parts = [sys.executable] + sys.argv # Get env dict copy env_copy = os.environ.copy() # Reload the program process os.execvpe( # Program file path sys.executable, # Command parts cmd_parts, # Env dict env_copy, )
Reload the program process. :return: None.
entailment
def reload_using_spawn_exit(self): """ Spawn a subprocess and exit the current process. :return: None. """ # Create command parts cmd_parts = [sys.executable] + sys.argv # Get env dict copy env_copy = os.environ.copy() # Spawn subprocess subprocess.Popen(cmd_parts, env=env_copy, close_fds=True) # If need force exit if self._force_exit: # Force exit os._exit(0) # pylint: disable=protected-access # If not need force exit else: # Send interrupt to main thread interrupt_main() # Set the flag self._watcher_to_stop = True # Exit the watcher thread sys.exit(0)
Spawn a subprocess and exit the current process. :return: None.
entailment
def reload_using_spawn_wait(self): """ Spawn a subprocess and wait until it finishes. :return: None. """ # Create command parts cmd_parts = [sys.executable] + sys.argv # Get env dict copy env_copy = os.environ.copy() # Send interrupt to main thread interrupt_main() # Spawn subprocess and wait until it finishes subprocess.call(cmd_parts, env=env_copy, close_fds=True) # Exit the watcher thread sys.exit(0)
Spawn a subprocess and wait until it finishes. :return: None.
entailment
def agent(agent_id): ''' Show the information about the given agent. ''' fields = [ ('ID', 'id'), ('Status', 'status'), ('Region', 'region'), ('First Contact', 'first_contact'), ('CPU Usage (%)', 'cpu_cur_pct'), ('Used Memory (MiB)', 'mem_cur_bytes'), ('Total slots', 'available_slots'), ('Occupied slots', 'occupied_slots'), ] if is_legacy_server(): del fields[9] del fields[6] q = 'query($agent_id:String!) {' \ ' agent(agent_id:$agent_id) { $fields }' \ '}' q = q.replace('$fields', ' '.join(item[1] for item in fields)) v = {'agent_id': agent_id} with Session() as session: try: resp = session.Admin.query(q, v) except Exception as e: print_error(e) sys.exit(1) info = resp['agent'] rows = [] for name, key in fields: if key == 'mem_cur_bytes' and info[key] is not None: info[key] = round(info[key] / 2 ** 20, 1) if key in info: rows.append((name, info[key])) print(tabulate(rows, headers=('Field', 'Value')))
Show the information about the given agent.
entailment
def agents(status, all): ''' List and manage agents. (admin privilege required) ''' fields = [ ('ID', 'id'), ('Status', 'status'), ('Region', 'region'), ('First Contact', 'first_contact'), ('CPU Usage (%)', 'cpu_cur_pct'), ('Used Memory (MiB)', 'mem_cur_bytes'), ('Total slots', 'available_slots'), ('Occupied slots', 'occupied_slots'), ] if is_legacy_server(): del fields[9] del fields[6] def execute_paginated_query(limit, offset): try: resp_agents = session.Agent.list_with_limit( limit, offset, status, fields=(item[1] for item in fields)) except Exception as e: print_error(e) sys.exit(1) return resp_agents def round_mem(results): for item in results: if 'mem_cur_bytes' in item and item['mem_cur_bytes'] is not None: item['mem_cur_bytes'] = round(item['mem_cur_bytes'] / 2 ** 20, 1) return results def _generate_paginated_results(interval): offset = 0 is_first = True total_count = -1 while True: limit = (interval if is_first else min(interval, total_count - offset)) try: result = execute_paginated_query(limit, offset) except Exception as e: print_error(e) sys.exit(1) offset += interval total_count = result['total_count'] items = result['items'] items = round_mem(items) table = tabulate((item.values() for item in items), headers=(item[0] for item in fields)) if is_first: is_first = False else: table_rows = table.split('\n') table = '\n'.join(table_rows[2:]) yield table + '\n' if not offset < total_count: break with Session() as session: paginating_interval = 10 if all: click.echo_via_pager(_generate_paginated_results(paginating_interval)) else: result = execute_paginated_query(paginating_interval, offset=0) total_count = result['total_count'] if total_count == 0: print('There are no matching agents.') return items = result['items'] items = round_mem(items) fields = [field for field in fields if field[1] in items[0]] print(tabulate((item.values() for item in items), headers=(item[0] for item in fields))) if total_count > paginating_interval: print("More agents can be displayed by using --all option.")
List and manage agents. (admin privilege required)
entailment
def resource_policy(name): """ Show details about a keypair resource policy. When `name` option is omitted, the resource policy for the current access_key will be returned. """ fields = [ ('Name', 'name'), ('Created At', 'created_at'), ('Default for Unspecified', 'default_for_unspecified'), ('Total Resource Slot', 'total_resource_slots'), ('Max Concurrent Sessions', 'max_concurrent_sessions'), ('Max Containers per Session', 'max_containers_per_session'), ('Max vFolder Count', 'max_vfolder_count'), ('Max vFolder Size', 'max_vfolder_size'), ('Idle Timeeout', 'idle_timeout'), ('Allowed vFolder Hosts', 'allowed_vfolder_hosts'), ] with Session() as session: try: rp = session.ResourcePolicy(session.config.access_key) info = rp.info(name, fields=(item[1] for item in fields)) except Exception as e: print_error(e) sys.exit(1) rows = [] if info is None: print('No such resource policy.') sys.exit(1) for name, key in fields: rows.append((name, info[key])) print(tabulate(rows, headers=('Field', 'Value')))
Show details about a keypair resource policy. When `name` option is omitted, the resource policy for the current access_key will be returned.
entailment
def resource_policies(ctx): ''' List and manage resource policies. (admin privilege required) ''' if ctx.invoked_subcommand is not None: return fields = [ ('Name', 'name'), ('Created At', 'created_at'), ('Default for Unspecified', 'default_for_unspecified'), ('Total Resource Slot', 'total_resource_slots'), ('Max Concurrent Sessions', 'max_concurrent_sessions'), ('Max Containers per Session', 'max_containers_per_session'), ('Max vFolder Count', 'max_vfolder_count'), ('Max vFolder Size', 'max_vfolder_size'), ('Idle Timeeout', 'idle_timeout'), ('Allowed vFolder Hosts', 'allowed_vfolder_hosts'), ] with Session() as session: try: items = session.ResourcePolicy.list(fields=(item[1] for item in fields)) except Exception as e: print_error(e) sys.exit(1) if len(items) == 0: print('There are no keypair resource policies.') return print(tabulate((item.values() for item in items), headers=(item[0] for item in fields)))
List and manage resource policies. (admin privilege required)
entailment
def add(name, default_for_unspecified, total_resource_slots, max_concurrent_sessions, max_containers_per_session, max_vfolder_count, max_vfolder_size, idle_timeout, allowed_vfolder_hosts): ''' Add a new keypair resource policy. NAME: NAME of a new keypair resource policy. ''' with Session() as session: try: data = session.ResourcePolicy.create( name, default_for_unspecified=default_for_unspecified, total_resource_slots=total_resource_slots, max_concurrent_sessions=max_concurrent_sessions, max_containers_per_session=max_containers_per_session, max_vfolder_count=max_vfolder_count, max_vfolder_size=max_vfolder_size, idle_timeout=idle_timeout, allowed_vfolder_hosts=allowed_vfolder_hosts, ) except Exception as e: print_error(e) sys.exit(1) if not data['ok']: print_fail('KeyPair Resource Policy creation has failed: {0}' .format(data['msg'])) sys.exit(1) item = data['resource_policy'] print('Keypair resource policy ' + item['name'] + ' is created.')
Add a new keypair resource policy. NAME: NAME of a new keypair resource policy.
entailment
def delete(name): """ Delete a keypair resource policy. NAME: NAME of a keypair resource policy to delete. """ with Session() as session: if input('Are you sure? (y/n): ').lower().strip()[:1] != 'y': print('Canceled.') sys.exit(1) try: data = session.ResourcePolicy.delete(name) except Exception as e: print_error(e) sys.exit(1) if not data['ok']: print_fail('KeyPair Resource Policy deletion has failed: {0}' .format(data['msg'])) sys.exit(1) print('Resource policy ' + name + ' is deleted.')
Delete a keypair resource policy. NAME: NAME of a keypair resource policy to delete.
entailment
def app(session_id, app, bind, port): """ Run a local proxy to a service provided by Backend.AI compute sessions. The type of proxy depends on the app definition: plain TCP or HTTP. \b SESSID: The compute session ID. APP: The name of service provided by the given session. """ api_session = None runner = None async def app_setup(): nonlocal api_session, runner loop = current_loop() api_session = AsyncSession() # TODO: generalize protocol using service ports metadata protocol = 'http' runner = ProxyRunner(api_session, session_id, app, protocol, bind, port, loop=loop) await runner.ready() print_info( "A local proxy to the application \"{0}\" ".format(app) + "provided by the session \"{0}\" ".format(session_id) + "is available at: {0}://{1}:{2}" .format(protocol, bind, port) ) async def app_shutdown(): nonlocal api_session, runner print_info("Shutting down....") await runner.close() await api_session.close() print_info("The local proxy to \"{}\" has terminated." .format(app)) asyncio_run_forever(app_setup(), app_shutdown(), stop_signals={signal.SIGINT, signal.SIGTERM})
Run a local proxy to a service provided by Backend.AI compute sessions. The type of proxy depends on the app definition: plain TCP or HTTP. \b SESSID: The compute session ID. APP: The name of service provided by the given session.
entailment
def get_env(key: str, default: Any = None, clean: Callable[[str], Any] = lambda v: v): ''' Retrieves a configuration value from the environment variables. The given *key* is uppercased and prefixed by ``"BACKEND_"`` and then ``"SORNA_"`` if the former does not exist. :param key: The key name. :param default: The default value returned when there is no corresponding environment variable. :param clean: A single-argument function that is applied to the result of lookup (in both successes and the default value for failures). The default is returning the value as-is. :returns: The value processed by the *clean* function. ''' key = key.upper() v = os.environ.get('BACKEND_' + key) if v is None: v = os.environ.get('SORNA_' + key) if v is None: if default is None: raise KeyError(key) v = default return clean(v)
Retrieves a configuration value from the environment variables. The given *key* is uppercased and prefixed by ``"BACKEND_"`` and then ``"SORNA_"`` if the former does not exist. :param key: The key name. :param default: The default value returned when there is no corresponding environment variable. :param clean: A single-argument function that is applied to the result of lookup (in both successes and the default value for failures). The default is returning the value as-is. :returns: The value processed by the *clean* function.
entailment
def logs(sess_id_or_alias): ''' Shows the output logs of a running container. \b SESSID: Session ID or its alias given when creating the session. ''' with Session() as session: try: print_wait('Retrieving container logs...') kernel = session.Kernel(sess_id_or_alias) result = kernel.get_logs().get('result') logs = result.get('logs') if 'logs' in result else '' print(logs) print_done('End of logs.') except Exception as e: print_error(e) sys.exit(1)
Shows the output logs of a running container. \b SESSID: Session ID or its alias given when creating the session.
entailment
def _wrap_key(function, args, kws): ''' get the key from the function input. ''' return hashlib.md5(pickle.dumps((_from_file(function) + function.__name__, args, kws))).hexdigest()
get the key from the function input.
entailment
def get(key, adapter = MemoryAdapter): ''' get the cache value ''' try: return pickle.loads(adapter().get(key)) except CacheExpiredException: return None
get the cache value
entailment
def set(key, value, timeout = -1, adapter = MemoryAdapter): ''' set cache by code, must set timeout length ''' if adapter(timeout = timeout).set(key, pickle.dumps(value)): return value else: return None
set cache by code, must set timeout length
entailment
def wrapcache(timeout = -1, adapter = MemoryAdapter): ''' the Decorator to cache Function. ''' def _wrapcache(function): @wraps(function) def __wrapcache(*args, **kws): hash_key = _wrap_key(function, args, kws) try: adapter_instance = adapter() return pickle.loads(adapter_instance.get(hash_key)) except CacheExpiredException: #timeout value = function(*args, **kws) set(hash_key, value, timeout, adapter) return value return __wrapcache return _wrapcache
the Decorator to cache Function.
entailment
def export_analytics_data_to_excel(data, output_file_name, result_info_key, identifier_keys): """Creates an Excel file containing data returned by the Analytics API Args: data: Analytics API data as a list of dicts output_file_name: File name for output Excel file (use .xlsx extension). """ workbook = create_excel_workbook(data, result_info_key, identifier_keys) workbook.save(output_file_name) print('Saved Excel file to {}'.format(output_file_name))
Creates an Excel file containing data returned by the Analytics API Args: data: Analytics API data as a list of dicts output_file_name: File name for output Excel file (use .xlsx extension).
entailment
def export_analytics_data_to_csv(data, output_folder, result_info_key, identifier_keys): """Creates CSV files containing data returned by the Analytics API. Creates one file per requested endpoint and saves it into the specified output_folder Args: data: Analytics API data as a list of dicts output_folder: Path to a folder to save the CSV files into """ workbook = create_excel_workbook(data, result_info_key, identifier_keys) suffix = '.csv' if not os.path.exists(output_folder): os.makedirs(output_folder) for worksheet in workbook.worksheets: file_name = utilities.convert_title_to_snake_case(worksheet.title) file_path = os.path.join(output_folder, file_name + suffix) mode = 'w' if sys.version_info[0] < 3: mode = 'wb' with io.open(file_path, mode) as output_file: csv_writer = csv.writer(output_file) for row in worksheet.rows: csv_writer.writerow([cell.value for cell in row]) print('Saved CSV files to {}'.format(output_folder))
Creates CSV files containing data returned by the Analytics API. Creates one file per requested endpoint and saves it into the specified output_folder Args: data: Analytics API data as a list of dicts output_folder: Path to a folder to save the CSV files into
entailment
def concat_excel_reports(addresses, output_file_name, endpoint, report_type, retry, api_key, api_secret, files_path): """Creates an Excel file made up of combining the Value Report or Rental Report Excel output for the provided addresses. Args: addresses: A list of (address, zipcode) tuples output_file_name: A file name for the Excel output endpoint: One of 'value_report' or 'rental_report' report_type: One of 'full' or 'summary' retry: optional boolean to retry if rate limit is reached api_key: optional API Key api_secret: optional API Secret files_path: Path to save individual files. If None, don't save files """ # create the master workbook to output master_workbook = openpyxl.Workbook() if api_key is not None and api_secret is not None: client = ApiClient(api_key, api_secret) else: client = ApiClient() errors = [] # for each address, call the API and load the xlsx content in a workbook. for index, addr in enumerate(addresses): print('Processing {}'.format(addr[0])) result = _get_excel_report( client, endpoint, addr[0], addr[1], report_type, retry) if not result['success']: print('Error retrieving report for {}'.format(addr[0])) print(result['content']) errors.append({'address': addr[0], 'message': result['content']}) continue orig_wb = openpyxl.load_workbook(filename=io.BytesIO(result['content'])) _save_individual_file(orig_wb, files_path, addr[0]) # for each worksheet for this address for sheet_name in orig_wb.get_sheet_names(): # if worksheet doesn't exist in master workbook, create it if sheet_name in master_workbook.get_sheet_names(): master_ws = master_workbook.get_sheet_by_name(sheet_name) else: master_ws = master_workbook.create_sheet(sheet_name) # get all the rows in the address worksheet orig_rows = orig_wb.get_sheet_by_name(sheet_name).rows if sheet_name == 'Summary' or sheet_name == 'Chart Data': _process_non_standard_sheet(master_ws, orig_rows, addr, index) continue _process_standard_sheet(master_ws, orig_rows, addr, index) # remove the first sheet which will be empty master_workbook.remove(master_workbook.worksheets[0]) # if any errors occurred, write them to an "Errors" worksheet if len(errors) > 0: errors_sheet = master_workbook.create_sheet('Errors') for error_idx, error in enumerate(errors): errors_sheet.cell(row=error_idx+1, column=1, value=error['address']) errors_sheet.cell(row=error_idx+1, column=2, value=error['message']) # save the master workbook to output_file_name adjust_column_width_workbook(master_workbook) output_file_path = os.path.join(files_path, output_file_name) master_workbook.save(output_file_path) print('Saved output to {}'.format(output_file_path))
Creates an Excel file made up of combining the Value Report or Rental Report Excel output for the provided addresses. Args: addresses: A list of (address, zipcode) tuples output_file_name: A file name for the Excel output endpoint: One of 'value_report' or 'rental_report' report_type: One of 'full' or 'summary' retry: optional boolean to retry if rate limit is reached api_key: optional API Key api_secret: optional API Secret files_path: Path to save individual files. If None, don't save files
entailment
def create_excel_workbook(data, result_info_key, identifier_keys): """Calls the analytics_data_excel module to create the Workbook""" workbook = analytics_data_excel.get_excel_workbook(data, result_info_key, identifier_keys) adjust_column_width_workbook(workbook) return workbook
Calls the analytics_data_excel module to create the Workbook
entailment
def adjust_column_width(worksheet): """Adjust column width in worksheet. Args: worksheet: worksheet to be adjusted """ dims = {} padding = 1 for row in worksheet.rows: for cell in row: if not cell.value: continue dims[cell.column] = max( dims.get(cell.column, 0), len(str(cell.value)) ) for col, value in list(dims.items()): worksheet.column_dimensions[col].width = value + padding
Adjust column width in worksheet. Args: worksheet: worksheet to be adjusted
entailment
def get_ap_info(ipaddress, auth, url): """ function takes input of ipaddress to RESTFUL call to HP IMC :param ipaddress: The current IP address of the Access Point at time of query. :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: Dictionary object with the details of the target access point :rtype: dict >>> from pyhpeimc.auth import * >>> from pyhpeimc.wsm.apinfo import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> ap_info = get_ap_info('10.101.0.170',auth.creds, auth.url) >>> assert type(ap_info) is dict >>> assert len(ap_info) == 20 >>> assert 'acDevId' in ap_info >>> assert 'acIpAddress' in ap_info >>> assert 'acLabel' in ap_info >>> assert 'apAlias' in ap_info >>> assert 'connectType' in ap_info >>> assert 'hardwareVersion' in ap_info >>> assert 'ipAddress' in ap_info >>> assert 'isFit' in ap_info >>> assert 'label' in ap_info >>> assert 'location' in ap_info >>> assert 'locationList' in ap_info >>> assert 'macAddress' in ap_info >>> assert 'onlineClientCount' in ap_info >>> assert 'serialId' in ap_info >>> assert 'softwareVersion' in ap_info >>> assert 'ssids' in ap_info >>> assert 'status' in ap_info >>> assert 'sysName' in ap_info >>> assert 'type' in ap_info """ get_ap_info_url = "/imcrs/wlan/apInfo/queryApBasicInfoByCondition?ipAddress=" + str(ipaddress) f_url = url + get_ap_info_url payload = None r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents # print(r.status_code) try: if r.status_code == 200: if len(r.text) > 0: return json.loads(r.text)['apBasicInfo'] except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + " get_ap_info_all: An Error has occured"
function takes input of ipaddress to RESTFUL call to HP IMC :param ipaddress: The current IP address of the Access Point at time of query. :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: Dictionary object with the details of the target access point :rtype: dict >>> from pyhpeimc.auth import * >>> from pyhpeimc.wsm.apinfo import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> ap_info = get_ap_info('10.101.0.170',auth.creds, auth.url) >>> assert type(ap_info) is dict >>> assert len(ap_info) == 20 >>> assert 'acDevId' in ap_info >>> assert 'acIpAddress' in ap_info >>> assert 'acLabel' in ap_info >>> assert 'apAlias' in ap_info >>> assert 'connectType' in ap_info >>> assert 'hardwareVersion' in ap_info >>> assert 'ipAddress' in ap_info >>> assert 'isFit' in ap_info >>> assert 'label' in ap_info >>> assert 'location' in ap_info >>> assert 'locationList' in ap_info >>> assert 'macAddress' in ap_info >>> assert 'onlineClientCount' in ap_info >>> assert 'serialId' in ap_info >>> assert 'softwareVersion' in ap_info >>> assert 'ssids' in ap_info >>> assert 'status' in ap_info >>> assert 'sysName' in ap_info >>> assert 'type' in ap_info
entailment
def sessions(status, access_key, id_only, all): ''' List and manage compute sessions. ''' fields = [ ('Session ID', 'sess_id'), ] with Session() as session: if is_admin(session): fields.append(('Owner', 'access_key')) if not id_only: fields.extend([ ('Image', 'image'), ('Tag', 'tag'), ('Created At', 'created_at',), ('Terminated At', 'terminated_at'), ('Status', 'status'), ('Occupied Resource', 'occupied_slots'), ('Used Memory (MiB)', 'mem_cur_bytes'), ('Max Used Memory (MiB)', 'mem_max_bytes'), ('CPU Using (%)', 'cpu_using'), ]) if is_legacy_server(): del fields[2] def execute_paginated_query(limit, offset): q = ''' query($limit:Int!, $offset:Int!, $ak:String, $status:String) { compute_session_list( limit:$limit, offset:$offset, access_key:$ak, status:$status) { items { $fields } total_count } }''' q = textwrap.dedent(q).strip() q = q.replace('$fields', ' '.join(item[1] for item in fields)) v = { 'limit': limit, 'offset': offset, 'status': status if status != 'ALL' else None, 'ak': access_key, } try: resp = session.Admin.query(q, v) except Exception as e: print_error(e) sys.exit(1) return resp['compute_session_list'] def round_mem(items): for item in items: if 'mem_cur_bytes' in item: item['mem_cur_bytes'] = round(item['mem_cur_bytes'] / 2 ** 20, 1) if 'mem_max_bytes' in item: item['mem_max_bytes'] = round(item['mem_max_bytes'] / 2 ** 20, 1) return items def _generate_paginated_results(interval): offset = 0 is_first = True total_count = -1 while True: limit = (interval if is_first else min(interval, total_count - offset)) try: result = execute_paginated_query(limit, offset) except Exception as e: print_error(e) sys.exit(1) offset += interval total_count = result['total_count'] items = result['items'] items = round_mem(items) if id_only: yield '\n'.join([item['sess_id'] for item in items]) + '\n' else: table = tabulate([item.values() for item in items], headers=(item[0] for item in fields)) if not is_first: table_rows = table.split('\n') table = '\n'.join(table_rows[2:]) yield table + '\n' if is_first: is_first = False if not offset < total_count: break with Session() as session: paginating_interval = 10 if all: click.echo_via_pager(_generate_paginated_results(paginating_interval)) else: result = execute_paginated_query(paginating_interval, offset=0) total_count = result['total_count'] if total_count == 0: print('There are no compute sessions currently {0}.' .format(status.lower())) return items = result['items'] items = round_mem(items) if id_only: for item in items: print(item['sess_id']) else: print(tabulate([item.values() for item in items], headers=(item[0] for item in fields))) if total_count > paginating_interval: print("More sessions can be displayed by using --all option.")
List and manage compute sessions.
entailment
def session(sess_id_or_alias): ''' Show detailed information for a running compute session. SESSID: Session id or its alias. ''' fields = [ ('Session ID', 'sess_id'), ('Role', 'role'), ('Image', 'image'), ('Tag', 'tag'), ('Created At', 'created_at'), ('Terminated At', 'terminated_at'), ('Agent', 'agent'), ('Status', 'status'), ('Status Info', 'status_info'), ('Occupied Resources', 'occupied_slots'), ('CPU Used (ms)', 'cpu_used'), ('Used Memory (MiB)', 'mem_cur_bytes'), ('Max Used Memory (MiB)', 'mem_max_bytes'), ('Number of Queries', 'num_queries'), ('Network RX Bytes', 'net_rx_bytes'), ('Network TX Bytes', 'net_tx_bytes'), ('IO Read Bytes', 'io_read_bytes'), ('IO Write Bytes', 'io_write_bytes'), ('IO Max Scratch Size', 'io_max_scratch_size'), ('IO Current Scratch Size', 'io_cur_scratch_size'), ('CPU Using (%)', 'cpu_using'), ] if is_legacy_server(): del fields[3] q = 'query($sess_id:String) {' \ ' compute_session(sess_id:$sess_id) { $fields }' \ '}' q = q.replace('$fields', ' '.join(item[1] for item in fields)) v = {'sess_id': sess_id_or_alias} with Session() as session: try: resp = session.Admin.query(q, v) except Exception as e: print_error(e) sys.exit(1) if resp['compute_session']['sess_id'] is None: print('There is no such running compute session.') return print('Session detail:\n---------------') for i, value in enumerate(resp['compute_session'].values()): if fields[i][1] in ['mem_cur_bytes', 'mem_max_bytes']: value = round(value / 2 ** 20, 1) print(fields[i][0] + ': ' + str(value))
Show detailed information for a running compute session. SESSID: Session id or its alias.
entailment
def create(cls, endpoint_name, json_body, original_response): """Factory for creating the correct type of Response based on the data. Args: endpoint_name (str) - The endpoint of the request, such as "property/value" json_body - The response body in json format. original_response (response object) - server response returned from an http request. """ if endpoint_name == "property/value_report": return ValueReportResponse(endpoint_name, json_body, original_response) if endpoint_name == "property/rental_report": return RentalReportResponse(endpoint_name, json_body, original_response) prefix = endpoint_name.split("/")[0] if prefix == "block": return BlockResponse(endpoint_name, json_body, original_response) if prefix == "zip": return ZipCodeResponse(endpoint_name, json_body, original_response) if prefix == "msa": return MsaResponse(endpoint_name, json_body, original_response) return PropertyResponse(endpoint_name, json_body, original_response)
Factory for creating the correct type of Response based on the data. Args: endpoint_name (str) - The endpoint of the request, such as "property/value" json_body - The response body in json format. original_response (response object) - server response returned from an http request.
entailment
def get_object_errors(self): """Gets a list of business error message strings for each of the requested objects that had a business error. If there was no error, returns an empty list Returns: List of strings """ if self._object_errors is None: self._object_errors = [{str(o): o.get_errors()} for o in self.objects() if o.has_error()] return self._object_errors
Gets a list of business error message strings for each of the requested objects that had a business error. If there was no error, returns an empty list Returns: List of strings
entailment
def has_object_error(self): """Returns true if any requested object had a business logic error, otherwise returns false Returns: boolean """ if self._has_object_error is None: # scan the objects for any business error codes self._has_object_error = next( (True for o in self.objects() if o.has_error()), False) return self._has_object_error
Returns true if any requested object had a business logic error, otherwise returns false Returns: boolean
entailment
def rate_limits(self): """Returns a list of rate limit details.""" if not self._rate_limits: self._rate_limits = utilities.get_rate_limits(self.response) return self._rate_limits
Returns a list of rate limit details.
entailment
def check_imc_creds(auth, url): """Function takes input of auth class object auth object and URL and returns a BOOL of TRUE if the authentication was successful. >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> check_imc_creds(auth.creds, auth.url) True """ test_url = '/imcrs' f_url = url + test_url try: response = requests.get(f_url, auth=auth, headers=HEADERS, verify=False) return bool(response.status_code == 200) except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " test_imc_creds: An Error has occured"
Function takes input of auth class object auth object and URL and returns a BOOL of TRUE if the authentication was successful. >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> check_imc_creds(auth.creds, auth.url) True
entailment
def print_to_file(object_name): """ Function takes in object of type str, list, or dict and prints out to current working directory as pyoutput.txt :param: Object: object of type str, list, or dict :return: No return. Just prints out to file handler and save to current working directory as pyoutput.txt """ with open('pyoutput.txt', 'w') as filehandler: output = None if isinstance(object_name, list): output = json.dumps(object, indent=4) if isinstance(object_name, dict): output = json.dumps(object, indent=4) if isinstance(object_name, str): output = object_name filehandler.write(output)
Function takes in object of type str, list, or dict and prints out to current working directory as pyoutput.txt :param: Object: object of type str, list, or dict :return: No return. Just prints out to file handler and save to current working directory as pyoutput.txt
entailment
def get_auth(self): """ This method requests an authentication object from the HPE IMC NMS and returns an HTTPDigest Auth Object :return: """ url = self.h_url + self.server + ":" + self.port auth = requests.auth.HTTPDigestAuth(self.username, self.password) f_url = url + "/imcrs" try: response = requests.get(f_url, auth=auth, headers=HEADERS, verify=False) if response.status_code != 200: # checks for valid IMC credentials print("Error:\n" + "Error: \n You're credentials are invalid. Please try again\n\n") set_imc_creds() return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + 'The IMC server address is invalid. Please try again'
This method requests an authentication object from the HPE IMC NMS and returns an HTTPDigest Auth Object :return:
entailment
def print_title(title, is_end=False): """ Print title like ``----- {title} -----`` or ``===== {title} =====``. :param title: Title. :param is_end: Whether is end title. End title use ``=`` instead of ``-``. :return: None. """ # If is end title if is_end: # Use `=` sep = '=====' # If is not end title else: # Use `-` sep = '-----' # If is not end title if not is_end: # Print an empty line for visual comfort print_text() # Print the title, e.g. `----- {title} -----` print_text('# {sep} {title} {sep}'.format(title=title, sep=sep))
Print title like ``----- {title} -----`` or ``===== {title} =====``. :param title: Title. :param is_end: Whether is end title. End title use ``=`` instead of ``-``. :return: None.
entailment
def get_full_python_version(): """ Get full Python version. E.g. - `2.7.11.final.0.32bit` - `3.5.1.final.0.64bit` :return: Full Python version. """ # Get version part, e.g. `3.5.1.final.0` version_part = '.'.join(str(x) for x in sys.version_info) # Get integer width, e.g. 32 or 64 int_width = struct.calcsize('P') * 8 # Get integer width part, e.g. `64bit` or `32bit` int_width_part = str(int_width) + 'bit' # Return full Python version return version_part + '.' + int_width_part
Get full Python version. E.g. - `2.7.11.final.0.32bit` - `3.5.1.final.0.64bit` :return: Full Python version.
entailment
def get_python_path(venv_path): """ Get given virtual environment's `python` program path. :param venv_path: Virtual environment directory path. :return: `python` program path. """ # Get `bin` directory path bin_path = get_bin_path(venv_path) # Get `python` program path program_path = os.path.join(bin_path, 'python') # If the platform is Windows if sys.platform.startswith('win'): # Add `.exe` suffix to the `python` program path program_path = program_path + '.exe' # Return the `python` program path return program_path
Get given virtual environment's `python` program path. :param venv_path: Virtual environment directory path. :return: `python` program path.
entailment
def add_options(ctx): """ Add command line options. :return: None. """ # Add option ctx.add_option( '--always', action='store_true', default=False, dest='always', help='whether always run tasks.', ) # Add option ctx.add_option( '--check-import', action='store_true', default=False, dest='check_import', help='whether import module for dirty checking.', ) # Add option ctx.add_option( '--venv', dest='venv', help=( 'virtual environment directory relative path relative to top' ' directory.' ), ) # Add option ctx.add_option( '--venv-add-version', default='1', dest='venv_add_version', # Convert to int so that the value can be used as boolean type=int, metavar='0|1', help=( 'whether add full Python version to virtual environment directory' ' name. E.g. `.py3.5.1.final.0.64bit`. Default is add.' ), ) # Add option ctx.add_option( '--req', default=None, dest='req_path', help='requirements file relative path relative to top directory.', )
Add command line options. :return: None.
entailment
def add_pythonpath(path): """ Prepend given path to environment variable PYTHONPATH. :param path: Path to add to PYTHONPATH. :return: New PYTHONPATH value. """ # Get PYTHONPATH value. Default is empty string. pythonpath = os.environ.setdefault('PYTHONPATH', '') # If given path is not in PYTHONPATH if path not in pythonpath.split(os.pathsep): # Prepend given path to PYTHONPATH pythonpath = os.environ['PYTHONPATH'] = \ (path + os.pathsep + pythonpath) if pythonpath else path # Return new PYTHONPATH value return pythonpath
Prepend given path to environment variable PYTHONPATH. :param path: Path to add to PYTHONPATH. :return: New PYTHONPATH value.
entailment
def mark_path(path): """ Wrap given path as relative path relative to top directory. Wrapper object will be handled specially in \ :paramref:`create_cmd_task.parts`. :param path: Relative path relative to top directory. :return: Wrapper object. """ # If given path is not string, # or given path is absolute path. if not isinstance(path, str) or os.path.isabs(path): # Get error message msg = 'Error (2D9ZA): Given path is not relative path: {0}.'.format( path ) # Raise error raise ValueError(msg) # If given path is string, # and given path is not absolute path. # Wrap given path return _ItemWrapper(type='path', item=path)
Wrap given path as relative path relative to top directory. Wrapper object will be handled specially in \ :paramref:`create_cmd_task.parts`. :param path: Relative path relative to top directory. :return: Wrapper object.
entailment
def _mark_target(type, item): """ Wrap given item as input or output target that should be added to task. Wrapper object will be handled specially in \ :paramref:`create_cmd_task.parts`. :param type: Target type. Allowed values: - 'input' - 'output' :param item: Item to mark as input or output target. Allowed values: - Relative path relative to top directory. - Node object. - List of these. :return: Wrapper object. """ # If given type is not valid if type not in ('input', 'output'): # Get error message msg = 'Error (7D74X): Type is not valid: {0}'.format(type) # Raise error raise ValueError(msg) # If given type is valid. # Store given item orig_item = item # If given path is list if isinstance(item, list): # Use it as items list item_s = item # If given path is not list else: # Create items list containing given path item_s = [item] # For the items list's each item for item in item_s: # If the item is string, # and the item is absolute path. if isinstance(item, str) and os.path.isabs(item): # Get error message msg = ( 'Error (5VWOZ): Given path is not relative path: {0}.' ).format(item) # Raise error raise ValueError(msg) # Wrap given item return _ItemWrapper(type=type, item=orig_item)
Wrap given item as input or output target that should be added to task. Wrapper object will be handled specially in \ :paramref:`create_cmd_task.parts`. :param type: Target type. Allowed values: - 'input' - 'output' :param item: Item to mark as input or output target. Allowed values: - Relative path relative to top directory. - Node object. - List of these. :return: Wrapper object.
entailment
def create_node(ctx, path): """ Create node for given relative path. :param ctx: BuildContext object. :param path: Relative path relative to top directory. :return: Created Node. """ # Ensure given context object is BuildContext object _ensure_build_context(ctx) # Get top directory's relative path relative to `wscript` directory top_dir_relpath = os.path.relpath( # Top directory's absolute path ctx.top_dir, # `wscript` directory's absolute path ctx.run_dir, ) # Convert given relative path to be relative to `wscript` directory node_path = os.path.join(top_dir_relpath, path) # Create node using the relative path relative to `wscript` directory node = ctx.path.make_node(node_path) # Return the created node return node
Create node for given relative path. :param ctx: BuildContext object. :param path: Relative path relative to top directory. :return: Created Node.
entailment
def _normalize_items( ctx, items, str_to_node=False, node_to_str=False, allow_task=False, ): """ Normalize given items. Do several things: - Ignore None. - Flatten list. - Unwrap wrapped item in `_ItemWrapper`. :param ctx: BuildContext object. :param items: Items list to normalize. :param str_to_node: Convert string to node. :param node_to_str: Convert node to absolute path. :param allow_task: Whether allow task item. :return: Normalized tuples list. Tuple format is: :: ( normalized_item, # Normalized item. wrapper_type, # Original `_ItemWrapper` type. ) """ # Ensure given context object is BuildContext object _ensure_build_context(ctx) # Normalized tuples list norm_tuple_s = [] # If given items list is empty if not items: # Return empty list return norm_tuple_s # If given items list is not empty. # For given items list's each item for item in items: # If the item is item wrapper if isinstance(item, _ItemWrapper): # Get wrapper type wrapper_type = item.type() # Get real item item = item.item() # If the item is not item wrapper else: # Set wrapper type be None wrapper_type = None # Use the item as real item item = item # If the real item is list if isinstance(item, list): # Use the real item as real items list real_item_s = item # If the real item is not list else: # Create real items list containing the real item real_item_s = [item] # For each real item for real_item in real_item_s: # If the real item is None if real_item is None: # Ignore None continue # If the real item is not None. # If the real item is string elif isinstance(real_item, str): # If need convert string to node if (wrapper_type is not None) or str_to_node: # If the path string is absolute path if os.path.isabs(real_item): # Get error message msg = ( 'Error (7MWU9): Given path is not relative path:' ' {0}.' ).format(real_item) # Raise error raise ValueError(msg) # If the path string is not absolute path. # Create node as normalized item norm_item = create_node(ctx, real_item) # If need convert node to absolute path if node_to_str: # Convert the node to absolute path norm_item = norm_item.abspath() # If not need convert string to node else: # Use the string as normalized item norm_item = real_item # Create normalized tuple norm_tuple = (norm_item, wrapper_type) # If the real item is not string. # If the real item is node elif isinstance(real_item, Node): # If need convert node to absolute path if node_to_str: # Convert the node to absolute path real_item = real_item.abspath() # Create normalized tuple norm_tuple = (real_item, wrapper_type) # If the real item is not node. # If the real item is task elif isinstance(real_item, Task): # If allow task item if allow_task: # Create normalized tuple norm_tuple = (real_item, wrapper_type) # If not allow task item else: # Get error message msg = 'Error (6PVMG): Item type is not valid: {0}.'.format( real_item ) # Raise error raise ValueError(msg) # If the real item is not task. # If the real item is not None, string, node, or task else: # Get error message msg = 'Error (63KUG): Item type is not valid: {0}.'.format( real_item ) # Raise error raise ValueError(msg) # Add the normalized tuple to list norm_tuple_s.append(norm_tuple) # Return the normalized tuples list return norm_tuple_s
Normalize given items. Do several things: - Ignore None. - Flatten list. - Unwrap wrapped item in `_ItemWrapper`. :param ctx: BuildContext object. :param items: Items list to normalize. :param str_to_node: Convert string to node. :param node_to_str: Convert node to absolute path. :param allow_task: Whether allow task item. :return: Normalized tuples list. Tuple format is: :: ( normalized_item, # Normalized item. wrapper_type, # Original `_ItemWrapper` type. )
entailment
def update_touch_file( ctx, path, check_import=False, check_import_module=None, check_import_python=None, always=False, ): """ Update touch file at given path. Do two things: - Create touch file if it not exists. - Update touch file if import checking fails. The returned touch file node is used as task's output target for dirty checking. Task will run if the touch file changes. :param ctx: BuildContext instance. :param path: Touch file relative path relative to top directory. :param check_import: Whether import module for dirty checking. :param check_import_module: Module name to import for dirty checking. :param check_import_python: Python program to use for dirty checking. :param always: Whether always run. :return: A two-item tuple. Tuple format is: :: ( touch_file_node, # Touch file node. task_needs_run, # Whether task needs run. ) """ # Ensure given context object is BuildContext object _ensure_build_context(ctx) # Print title print_title('Update touch file: {}'.format(path)) # Create touch node touch_node = create_node(ctx, path) # Whether task needs run need_run = False # If the touch file not exists, # or `always` flag is on. if not touch_node.exists() or always: # Set `need_run` flag on need_run = True # If the touch file exists, # and `always` flag is off. else: # If need import module for dirty checking, # and module name to import is given. if check_import and check_import_module: # Get import statement. # Notice `from` import ensures the imported module is not imported # as `__main__` module. And `__name__` exists in any module. import_stmt = 'from {} import __name__'.format(check_import_module) # Print info print_text('Check import: {}'.format(import_stmt)) # If Python program to check import is not given if check_import_python is None: # Get error message msg = ( 'Error (3BKFW): Python program to check import is not' ' given.' ) # Raise error raise ValueError(msg) # If Python program to check import is given. # Normalize given Python program path check_import_python, _ = _normalize_items( ctx=ctx, items=[check_import_python], # Convert node to absolute path node_to_str=True, )[0] # If the Python program path is not string if not isinstance(check_import_python, str): # Get error message msg = ( 'Error (39FQE): Given Python program to check import is' ' not string or node: {0}.' ).format(check_import_python) # Raise error raise ValueError(msg) # If the Python program path is string. # If the Python program path is not absolute path if not os.path.isabs(check_import_python): # Convert the Python program path to absolute path check_import_python = \ create_node(ctx, check_import_python).abspath() # The Python program path is absolute path now. # Get command parts cmd_part_s = [ # Python program absolute path check_import_python, # Run code '-c', # Code to run import_stmt ] # Print the command in multi-line format print_text(_format_multi_line_command(cmd_part_s)) # try: # Run the command subprocess.check_output(cmd_part_s) # If not have error, # it means the module can be imported. # Set `need_run` flag off. need_run = False # If have error, # it means the module can not be imported. # # Notice the program may not exist. So catch general exception. except Exception: # pylint: disable=W0703 # Set `need_run` flag on need_run = True # If task needs run if need_run: # If the touch file's parent directory not exists if not touch_node.parent.exists(): # Create the touch file's parent directory touch_node.parent.mkdir() # Write current time to the touch file to force content change. # This will fail dirty-checking and cause task to run. touch_node.write('{0}\n'.format(datetime.utcnow())) # Print info print_text('Updated.') # If task not needs run else: # Print info print_text('Skipped.') # Print end title print_title('Update touch file: {}'.format(path), is_end=True) # Return a two-item tuple return touch_node, need_run
Update touch file at given path. Do two things: - Create touch file if it not exists. - Update touch file if import checking fails. The returned touch file node is used as task's output target for dirty checking. Task will run if the touch file changes. :param ctx: BuildContext instance. :param path: Touch file relative path relative to top directory. :param check_import: Whether import module for dirty checking. :param check_import_module: Module name to import for dirty checking. :param check_import_python: Python program to use for dirty checking. :param always: Whether always run. :return: A two-item tuple. Tuple format is: :: ( touch_file_node, # Touch file node. task_needs_run, # Whether task needs run. )
entailment
def chain_tasks(tasks): """ Chain given tasks. Set each task to run after its previous task. :param tasks: Tasks list. :return: Given tasks list. """ # If given tasks list is not empty if tasks: # Previous task previous_task = None # For given tasks list's each task for task in tasks: # If the task is not None. # Task can be None to allow code like ``task if _PY2 else None``. if task is not None: # If previous task is not None if previous_task is not None: # Set the task to run after the previous task task.set_run_after(previous_task) # Set the task as previous task for the next task previous_task = task # Return given tasks list. return tasks
Chain given tasks. Set each task to run after its previous task. :param tasks: Tasks list. :return: Given tasks list.
entailment
def create_cmd_task( ctx, parts, inputs=None, outputs=None, env=None, cwd=None, task_name=None, cache_key=None, always=False, add_to_group=True, ): """ Create task that runs given command. :param ctx: BuildContext object. :param parts: Command parts list. Each part can be: - **None**: Will be ignored. - **String**: Will be used as-is. - **Node object**: Will be converted to absolute path. - **List of these**: Will be flattened. - **Item wrapper of these**: Will be unwrapped. Item marked as input or output target will be added to created task. :param inputs: Input items list to add to created task. Each item can be: - **None**: Will be ignored. - **Path**: Relative path relative to top directory. Will be converted to node. - **Node object**: Will be used as-is. - **Task object**: Will set created task to run after the task. - **List of these**: Will be flattened. - **Item wrapper of these**: Will be unwrapped. :param outputs: Outputs items list to add to created task. Each item can be: - **None**: Will be ignored. - **Path**: Relative path relative to top directory. Will be converted to node. - **Node object**: Will be used as-is. - **Task object**: Will set the task to run after created task. - **List of these**: Will be flattened. - **Item wrapper of these**: Will be unwrapped. :param env: Environment dict. Default is use given context object's environment dict. :param cwd: Working directory in which to run given command. Default is top directory. :param task_name: Task name for display purpose. Default is use the caller function's name. :param cache_key: Task cache key. :param always: Whether mark created task as always run. :param add_to_group: Whether add created task to given context. :return: Created task. :notes: It is recommended to refer to files inside project using relative paths. This is even required if the files are used as task's input or output targets. It is not ideal to make relative paths relative to current working directory, or relative to `wscript` file, because neither of the two choices guarantee a fixed start directory. It is not ideal to let `Waf` resolve relative paths neither, because the start directory used for resolving relative paths varies with different types of command context (i.e. OptionsContext, ConfigurationContext, BuildContext, and Context). It is best to make relative paths relative to project's top directory. To do so, first use `Waf`'s pre-defined path variable `top` to locate project's top directory (Notice path variable `top`'s value is relative to `wscript` file.). Then `aoikwafutil`'s functions like `create_cmd_task` will use path variable `top`'s value to resolve other relative paths. `create_cmd_task` handles relative paths using rules below: - Relative paths marked by `mark_path`, `mark_input`, or `mark_output` are relative to top directory. - Relative paths passed to argument `parts` are used as-is. They are relative to the working directory in which to run the command. However, they can be marked to be relative to top directory instead. - Relative paths passed to argument `inputs` or `outputs` are relative to top directory. They need not be marked. """ # Ensure given context object is BuildContext object _ensure_build_context(ctx) # If task name is not given if not task_name: # Get the caller function's name task_name = inspect.stack()[1][3] # Print title print_title('Create task: {}'.format(task_name)) # If cache key is given if cache_key: # Find cached task cached_task = _TASKS_CACHE.get(cache_key, None) # If have found cached task if cached_task is not None: # Print info print_text('Use cached task `{}`.'.format(cache_key)) # Print end title print_title('Create task: {}'.format(task_name), is_end=True) # Return the cached task return cached_task # If have not found cached task. # Continue to create task. # If cache key is not given. # Continue to create task. # Normalized command parts list norm_part_s = [] # Input nodes list input_node_s = [] # Output nodes list output_node_s = [] # Get the first command part that is program path first_part = parts[0] # If the first command part is list. # Notice the first command part can be list if the value is from `ctx.env`, # e.g `ctx.env.PYTHON`. if isinstance(first_part, list): # Use the first item in the list as the first command part parts[0] = first_part[0] # For given command parts list's each command part for part, wrapper_type in _normalize_items(ctx=ctx, items=parts): # If the command part is string if isinstance(part, str): # Add the string to the normalized list norm_part_s.append(part) # If the command part is node elif isinstance(part, Node): # Add the node's absolute path to the normalized list norm_part_s.append(part.abspath()) # If the wrapper type is `input` if wrapper_type == 'input': # Add the node to the input nodes list input_node_s.append(part) # If the wrapper type is `output` elif wrapper_type == 'output': # Add the node to the output nodes list output_node_s.append(part) # If the wrapper type is not `input` or `output` else: # Do nothing pass # If the command part is not string or node else: # Get error message msg = 'Error (2W9YD): Command part is not valid: {0}.'.format(part) # Raise error raise ValueError(msg) # If environment dict is not given if env is None: # Use given context object's environment dict env = ctx.env # If working directory path is not given if cwd is None: # Set working directory path be top directory absolute path cwd = ctx.top_dir # If the working directory path is not absolute path. if not os.path.isabs(cwd): # Convert the working directory path to absolute path cwd = create_node(ctx, cwd).abspath() # Create task that runs command task = CmdTask( # Context object ctx=ctx, # Command parts list parts=norm_part_s, # Environment dict env=env, # Working directory cwd=cwd, # Task name task_name=task_name, ) # For each input or output item. # Notice the code structure for handling input and output items are the # same so use the same code with different variables to avoid duplicate # code. for wrapper_type, item_s, node_s, nodes_set_func in [ ('input', inputs, input_node_s, task.set_inputs), ('output', outputs, output_node_s, task.set_outputs), ]: # For each normalized item for item, _ in _normalize_items( # Context ctx=ctx, # Items to normalize items=item_s, # Convert string to node str_to_node=True, # Allow task item allow_task=True, ): # If the item is node if isinstance(item, Node): # Add the node to the nodes list node_s.append(item) # If the item is task elif isinstance(item, Task): # If the wrapper type is `input` if wrapper_type == 'input': # Set the created task to run after the task task.set_run_after(item) # If the wrapper type is `output` elif wrapper_type == 'output': # Set the task to run after the created task item.set_run_after(task) # If the wrapper type is not `input` or `output` else: # Get error message msg = ( 'Error (3ZLGJ): Wrapper type is not valid: {0}.' ).format(wrapper_type) # Raise error raise ValueError(msg) # If the item is not node or task else: # Get error message msg = 'Error (5H4GC): Item type is not valid: {0}.'.format( item ) # Raise error raise ValueError(msg) # Add these nodes to the created task as input or output targets nodes_set_func(node_s) # If need mark the created task as always run if always: # Mark the created task as always run task.always_run = True # pylint: disable=W0201 # If need add the created task to given context if add_to_group: # Add the created task to given context ctx.add_to_group(task) # If cache key is given if cache_key: # Add the created task to the cache _TASKS_CACHE[cache_key] = task # Print end title print_title('Create task: {}'.format(task_name), is_end=True) # Return the task return task
Create task that runs given command. :param ctx: BuildContext object. :param parts: Command parts list. Each part can be: - **None**: Will be ignored. - **String**: Will be used as-is. - **Node object**: Will be converted to absolute path. - **List of these**: Will be flattened. - **Item wrapper of these**: Will be unwrapped. Item marked as input or output target will be added to created task. :param inputs: Input items list to add to created task. Each item can be: - **None**: Will be ignored. - **Path**: Relative path relative to top directory. Will be converted to node. - **Node object**: Will be used as-is. - **Task object**: Will set created task to run after the task. - **List of these**: Will be flattened. - **Item wrapper of these**: Will be unwrapped. :param outputs: Outputs items list to add to created task. Each item can be: - **None**: Will be ignored. - **Path**: Relative path relative to top directory. Will be converted to node. - **Node object**: Will be used as-is. - **Task object**: Will set the task to run after created task. - **List of these**: Will be flattened. - **Item wrapper of these**: Will be unwrapped. :param env: Environment dict. Default is use given context object's environment dict. :param cwd: Working directory in which to run given command. Default is top directory. :param task_name: Task name for display purpose. Default is use the caller function's name. :param cache_key: Task cache key. :param always: Whether mark created task as always run. :param add_to_group: Whether add created task to given context. :return: Created task. :notes: It is recommended to refer to files inside project using relative paths. This is even required if the files are used as task's input or output targets. It is not ideal to make relative paths relative to current working directory, or relative to `wscript` file, because neither of the two choices guarantee a fixed start directory. It is not ideal to let `Waf` resolve relative paths neither, because the start directory used for resolving relative paths varies with different types of command context (i.e. OptionsContext, ConfigurationContext, BuildContext, and Context). It is best to make relative paths relative to project's top directory. To do so, first use `Waf`'s pre-defined path variable `top` to locate project's top directory (Notice path variable `top`'s value is relative to `wscript` file.). Then `aoikwafutil`'s functions like `create_cmd_task` will use path variable `top`'s value to resolve other relative paths. `create_cmd_task` handles relative paths using rules below: - Relative paths marked by `mark_path`, `mark_input`, or `mark_output` are relative to top directory. - Relative paths passed to argument `parts` are used as-is. They are relative to the working directory in which to run the command. However, they can be marked to be relative to top directory instead. - Relative paths passed to argument `inputs` or `outputs` are relative to top directory. They need not be marked.
entailment
def build_ctx(pythonpath=None): """ Decorator that makes decorated function use BuildContext instead of \ Context instance. BuildContext instance has more methods. :param pythonpath: Path or list of paths to add to environment variable PYTHONPATH. Each path can be absolute path, or relative path relative to top directory. Notice if this decorator is used without arguments, argument `pythonpath` is the decorated function. :return: Two situations: - If decorator arguments are given, return no-argument decorator. - If decorator arguments are not given, return wrapper function. """ # If argument `pythonpath` is string if isinstance(pythonpath, str): # Create paths list containing the string path_s = [pythonpath] # If argument `pythonpath` is list elif isinstance(pythonpath, list): # Use the list as paths list path_s = pythonpath # If argument `pythonpath` is not string or list, # it means the decorator is used without arguments. else: # Set paths list be None path_s = None # Create no-argument decorator def _noarg_decorator(func): """ No-argument decorator. :param func: Decorated function. :return: Wrapper function. """ # Create BuildContext subclass class _BuildContext(BuildContext): # Set command name for the context class cmd = func.__name__ # Set function name for the context class fun = func.__name__ # Create wrapper function @wraps(func) def _new_func(ctx, *args, **kwargs): """ Wrapper function. :param ctx: BuildContext object. :param \\*args: Other arguments passed to decorated function. :param \\*\\*kwargs: Other keyword arguments passed to decorated function. :return: Decorated function's call result. """ # If paths list is not empty if path_s: # For each path for path in path_s: # If the path is absolute path if os.path.isabs(path): # Use the path as absolute path abs_path = path # If the path is not absolute path, # it means relative path relative to top directory. else: # Create path node path_node = create_node(ctx, path) # Get absolute path abs_path = path_node.abspath() # Add the absolute path to environment variable PYTHONPATH add_pythonpath(abs_path) # Call the decorated function result = func(ctx, *args, **kwargs) # Return the call result return result # Store the created context class with the wrapper function _new_func._context_class = _BuildContext # pylint: disable=W0212 # Return the wrapper function return _new_func # If decorator arguments are given if path_s is not None: # Return no-argument decorator return _noarg_decorator # If decorator arguments are not given else: # Argument `pythonpath` is the decorated function _func = pythonpath # Call the no-argument decorator to create wrapper function wrapper_func = _noarg_decorator(_func) # Return the wrapper function return wrapper_func
Decorator that makes decorated function use BuildContext instead of \ Context instance. BuildContext instance has more methods. :param pythonpath: Path or list of paths to add to environment variable PYTHONPATH. Each path can be absolute path, or relative path relative to top directory. Notice if this decorator is used without arguments, argument `pythonpath` is the decorated function. :return: Two situations: - If decorator arguments are given, return no-argument decorator. - If decorator arguments are not given, return wrapper function.
entailment
def config_ctx(func): """ Decorator that makes decorated function use ConfigurationContext instead \ of Context instance. :param func: Decorated function. :return: Decorated function. """ # Create ConfigurationContext subclass class _ConfigurationContext(ConfigurationContext): # Set command name for the context class cmd = func.__name__ # Set function name for the context class fun = func.__name__ # Store the created context class with the decorated function func._context_class = _ConfigurationContext # pylint: disable=W0212 # Return the decorated function return func
Decorator that makes decorated function use ConfigurationContext instead \ of Context instance. :param func: Decorated function. :return: Decorated function.
entailment
def print_ctx(ctx): """ Print given context's info. :param ctx: Context object. :return: None. """ # Print title print_title('ctx attributes') # Print context object's attributes print_text(dir(ctx)) # Print end title print_title('ctx attributes', is_end=True) # Print title print_title('ctx.options') # Print context options dict print_text(pformat(vars(ctx.options), indent=4, width=1)) # Print end title print_title('ctx.options', is_end=True) # If the context object has `env` attribute. # Notice plain context object not has `env` attribute. if hasattr(ctx, 'env'): # Print title print_title('ctx.env') # Print context environment variables dict print_text(pformat(dict(ctx.env), indent=4, width=1)) # Print end title print_title('ctx.env', is_end=True)
Print given context's info. :param ctx: Context object. :return: None.
entailment