Search is not available for this dataset
text
stringlengths
75
104k
def map_vals(func, dict_): """ applies a function to each of the keys in a dictionary Args: func (callable): a function or indexable object dict_ (dict): a dictionary Returns: newdict: transformed dictionary CommandLine: python -m ubelt.util_dict map_vals Example: >>> import ubelt as ub >>> dict_ = {'a': [1, 2, 3], 'b': []} >>> func = len >>> newdict = ub.map_vals(func, dict_) >>> assert newdict == {'a': 3, 'b': 0} >>> print(newdict) >>> # Can also use indexables as `func` >>> dict_ = {'a': 0, 'b': 1} >>> func = [42, 21] >>> newdict = ub.map_vals(func, dict_) >>> assert newdict == {'a': 42, 'b': 21} >>> print(newdict) """ if not hasattr(func, '__call__'): func = func.__getitem__ keyval_list = [(key, func(val)) for key, val in six.iteritems(dict_)] dictclass = OrderedDict if isinstance(dict_, OrderedDict) else dict newdict = dictclass(keyval_list) # newdict = type(dict_)(keyval_list) return newdict
def invert_dict(dict_, unique_vals=True): r""" Swaps the keys and values in a dictionary. Args: dict_ (dict): dictionary to invert unique_vals (bool): if False, inverted keys are returned in a set. The default is True. Returns: dict: inverted Notes: The must values be hashable. If the original dictionary contains duplicate values, then only one of the corresponding keys will be returned and the others will be discarded. This can be prevented by setting `unique_vals=True`, causing the inverted keys to be returned in a set. CommandLine: python -m ubelt.util_dict invert_dict Example: >>> import ubelt as ub >>> dict_ = {'a': 1, 'b': 2} >>> inverted = ub.invert_dict(dict_) >>> assert inverted == {1: 'a', 2: 'b'} Example: >>> import ubelt as ub >>> dict_ = ub.odict([(2, 'a'), (1, 'b'), (0, 'c'), (None, 'd')]) >>> inverted = ub.invert_dict(dict_) >>> assert list(inverted.keys())[0] == 'a' Example: >>> import ubelt as ub >>> dict_ = {'a': 1, 'b': 0, 'c': 0, 'd': 0, 'f': 2} >>> inverted = ub.invert_dict(dict_, unique_vals=False) >>> assert inverted == {0: {'b', 'c', 'd'}, 1: {'a'}, 2: {'f'}} """ if unique_vals: if isinstance(dict_, OrderedDict): inverted = OrderedDict((val, key) for key, val in dict_.items()) else: inverted = {val: key for key, val in dict_.items()} else: # Handle non-unique keys using groups inverted = defaultdict(set) for key, value in dict_.items(): inverted[value].add(key) inverted = dict(inverted) return inverted
def to_dict(self): """ Recursively casts a AutoDict into a regular dictionary. All nested AutoDict values are also converted. Returns: dict: a copy of this dict without autovivification Example: >>> from ubelt.util_dict import AutoDict >>> auto = AutoDict() >>> auto[1] = 1 >>> auto['n1'] = AutoDict() >>> static = auto.to_dict() >>> assert not isinstance(static, AutoDict) >>> assert not isinstance(static['n1'], AutoDict) """ return self._base( (key, (value.to_dict() if isinstance(value, AutoDict) else value)) for key, value in self.items())
def _win32_can_symlink(verbose=0, force=0, testing=0): """ CommandLine: python -m ubelt._win32_links _win32_can_symlink Example: >>> # xdoc: +REQUIRES(WIN32) >>> import ubelt as ub >>> _win32_can_symlink(verbose=1, force=1, testing=1) """ global __win32_can_symlink__ if verbose: print('__win32_can_symlink__ = {!r}'.format(__win32_can_symlink__)) if __win32_can_symlink__ is not None and not force: return __win32_can_symlink__ from ubelt import util_platform tempdir = util_platform.ensure_app_cache_dir('ubelt', '_win32_can_symlink') util_io.delete(tempdir) util_path.ensuredir(tempdir) dpath = join(tempdir, 'dpath') fpath = join(tempdir, 'fpath.txt') dlink = join(tempdir, 'dlink') flink = join(tempdir, 'flink.txt') util_path.ensuredir(dpath) util_io.touch(fpath) # Add broken variants of the links for testing purposes # Its ugly, but so is all this windows code. if testing: broken_dpath = join(tempdir, 'broken_dpath') broken_fpath = join(tempdir, 'broken_fpath.txt') # Create files that we will delete after we link to them util_path.ensuredir(broken_dpath) util_io.touch(broken_fpath) try: _win32_symlink(dpath, dlink) if testing: _win32_symlink(broken_dpath, join(tempdir, 'broken_dlink')) can_symlink_directories = os.path.islink(dlink) except OSError: can_symlink_directories = False if verbose: print('can_symlink_directories = {!r}'.format(can_symlink_directories)) try: _win32_symlink(fpath, flink) if testing: _win32_symlink(broken_fpath, join(tempdir, 'broken_flink')) can_symlink_files = os.path.islink(flink) # os.path.islink(flink) except OSError: can_symlink_files = False if verbose: print('can_symlink_files = {!r}'.format(can_symlink_files)) assert int(can_symlink_directories) + int(can_symlink_files) != 1, ( 'can do one but not both. Unexpected {} {}'.format( can_symlink_directories, can_symlink_files)) try: # test that we can create junctions, even if symlinks are disabled djunc = _win32_junction(dpath, join(tempdir, 'djunc')) fjunc = _win32_junction(fpath, join(tempdir, 'fjunc.txt')) if testing: _win32_junction(broken_dpath, join(tempdir, 'broken_djunc')) _win32_junction(broken_fpath, join(tempdir, 'broken_fjunc.txt')) assert _win32_is_junction(djunc) assert _win32_is_hardlinked(fpath, fjunc) except Exception: warnings.warn('We cannot create junctions either!') raise if testing: # break the links util_io.delete(broken_dpath) util_io.delete(broken_fpath) if verbose: from ubelt import util_links util_links._dirstats(tempdir) try: # Cleanup the test directory util_io.delete(tempdir) except Exception: print('ERROR IN DELETE') from ubelt import util_links util_links._dirstats(tempdir) raise can_symlink = can_symlink_directories and can_symlink_files __win32_can_symlink__ = can_symlink if not can_symlink: warnings.warn('Cannot make real symlink. Falling back to junction') if verbose: print('can_symlink = {!r}'.format(can_symlink)) print('__win32_can_symlink__ = {!r}'.format(__win32_can_symlink__)) return can_symlink
def _symlink(path, link, overwrite=0, verbose=0): """ Windows helper for ub.symlink """ if exists(link) and not os.path.islink(link): # On windows a broken link might still exist as a hard link or a # junction. Overwrite it if it is a file and we cannot symlink. # However, if it is a non-junction directory then do not overwrite if verbose: print('link location already exists') is_junc = _win32_is_junction(link) # NOTE: # in python2 broken junctions are directories and exist # in python3 broken junctions are directories and do not exist if os.path.isdir(link): if is_junc: pointed = _win32_read_junction(link) if path == pointed: if verbose: print('...and is a junction that points to the same place') return link else: if verbose: if not exists(pointed): print('...and is a broken junction that points somewhere else') else: print('...and is a junction that points somewhere else') else: if verbose: print('...and is an existing real directory!') raise IOError('Cannot overwrite a real directory') elif os.path.isfile(link): if _win32_is_hardlinked(link, path): if verbose: print('...and is a hard link that points to the same place') return link else: if verbose: print('...and is a hard link that points somewhere else') if _win32_can_symlink(): raise IOError('Cannot overwrite potentially real file if we can symlink') if overwrite: if verbose: print('...overwriting') util_io.delete(link, verbose > 1) else: if exists(link): raise IOError('Link already exists') _win32_symlink2(path, link, verbose=verbose)
def _win32_symlink2(path, link, allow_fallback=True, verbose=0): """ Perform a real symbolic link if possible. However, on most versions of windows you need special privledges to create a real symlink. Therefore, we try to create a symlink, but if that fails we fallback to using a junction. AFAIK, the main difference between symlinks and junctions are that symlinks can reference relative or absolute paths, where as junctions always reference absolute paths. Not 100% on this though. Windows is weird. Note that junctions will not register as links via `islink`, but I believe real symlinks will. """ if _win32_can_symlink(): return _win32_symlink(path, link, verbose) else: return _win32_junction(path, link, verbose)
def _win32_symlink(path, link, verbose=0): """ Creates real symlink. This will only work in versions greater than Windows Vista. Creating real symlinks requires admin permissions or at least specially enabled symlink permissions. On Windows 10 enabling developer mode should give you these permissions. """ from ubelt import util_cmd if os.path.isdir(path): # directory symbolic link if verbose: print('... as directory symlink') command = 'mklink /D "{}" "{}"'.format(link, path) # Using the win32 API seems to result in privilege errors # but using shell commands does not have this problem. Weird. # jwfs.symlink(path, link, target_is_directory=True) # TODO: what do we need to do to use the windows api instead of shell? else: # file symbolic link if verbose: print('... as file symlink') command = 'mklink "{}" "{}"'.format(link, path) if command is not None: info = util_cmd.cmd(command, shell=True) if info['ret'] != 0: from ubelt import util_format permission_msg = 'You do not have sufficient privledges' if permission_msg not in info['err']: print('Failed command:') print(info['command']) print(util_format.repr2(info, nl=1)) raise OSError(str(info)) return link
def _win32_junction(path, link, verbose=0): """ On older (pre 10) versions of windows we need admin privledges to make symlinks, however junctions seem to work. For paths we do a junction (softlink) and for files we use a hard link CommandLine: python -m ubelt._win32_links _win32_junction Example: >>> # xdoc: +REQUIRES(WIN32) >>> import ubelt as ub >>> root = ub.ensure_app_cache_dir('ubelt', 'win32_junction') >>> ub.delete(root) >>> ub.ensuredir(root) >>> fpath = join(root, 'fpath.txt') >>> dpath = join(root, 'dpath') >>> fjunc = join(root, 'fjunc.txt') >>> djunc = join(root, 'djunc') >>> ub.touch(fpath) >>> ub.ensuredir(dpath) >>> ub.ensuredir(join(root, 'djunc_fake')) >>> ub.ensuredir(join(root, 'djunc_fake with space')) >>> ub.touch(join(root, 'djunc_fake with space file')) >>> _win32_junction(fpath, fjunc) >>> _win32_junction(dpath, djunc) >>> # thank god colons are not allowed >>> djunc2 = join(root, 'djunc2 [with pathological attrs]') >>> _win32_junction(dpath, djunc2) >>> _win32_is_junction(djunc) >>> ub.writeto(join(djunc, 'afile.txt'), 'foo') >>> assert ub.readfrom(join(dpath, 'afile.txt')) == 'foo' >>> ub.writeto(fjunc, 'foo') """ # junctions store absolute paths path = os.path.abspath(path) link = os.path.abspath(link) from ubelt import util_cmd if os.path.isdir(path): # try using a junction (soft link) if verbose: print('... as soft link') # TODO: what is the windows api for this? command = 'mklink /J "{}" "{}"'.format(link, path) else: # try using a hard link if verbose: print('... as hard link') # command = 'mklink /H "{}" "{}"'.format(link, path) try: jwfs.link(path, link) # this seems to be allowed except Exception: print('Failed to hardlink link={} to path={}'.format(link, path)) raise command = None if command is not None: info = util_cmd.cmd(command, shell=True) if info['ret'] != 0: from ubelt import util_format print('Failed command:') print(info['command']) print(util_format.repr2(info, nl=1)) raise OSError(str(info)) return link
def _win32_is_junction(path): """ Determines if a path is a win32 junction CommandLine: python -m ubelt._win32_links _win32_is_junction Example: >>> # xdoc: +REQUIRES(WIN32) >>> import ubelt as ub >>> root = ub.ensure_app_cache_dir('ubelt', 'win32_junction') >>> ub.delete(root) >>> ub.ensuredir(root) >>> dpath = join(root, 'dpath') >>> djunc = join(root, 'djunc') >>> ub.ensuredir(dpath) >>> _win32_junction(dpath, djunc) >>> assert _win32_is_junction(djunc) is True >>> assert _win32_is_junction(dpath) is False >>> assert _win32_is_junction('notafile') is False """ if not exists(path): if os.path.isdir(path): if not os.path.islink(path): return True return False return jwfs.is_reparse_point(path) and not os.path.islink(path)
def _win32_read_junction(path): """ Returns the location that the junction points, raises ValueError if path is not a junction. CommandLine: python -m ubelt._win32_links _win32_read_junction Example: >>> # xdoc: +REQUIRES(WIN32) >>> import ubelt as ub >>> root = ub.ensure_app_cache_dir('ubelt', 'win32_junction') >>> ub.delete(root) >>> ub.ensuredir(root) >>> dpath = join(root, 'dpath') >>> djunc = join(root, 'djunc') >>> ub.ensuredir(dpath) >>> _win32_junction(dpath, djunc) >>> path = djunc >>> pointed = _win32_read_junction(path) >>> print('pointed = {!r}'.format(pointed)) """ if not jwfs.is_reparse_point(path): raise ValueError('not a junction') # --- Older version based on using shell commands --- # if not exists(path): # if six.PY2: # raise OSError('Cannot find path={}'.format(path)) # else: # raise FileNotFoundError('Cannot find path={}'.format(path)) # target_name = os.path.basename(path) # for type_or_size, name, pointed in _win32_dir(path, '*'): # if type_or_size == '<JUNCTION>' and name == target_name: # return pointed # raise ValueError('not a junction') # new version using the windows api handle = jwfs.api.CreateFile( path, 0, 0, None, jwfs.api.OPEN_EXISTING, jwfs.api.FILE_FLAG_OPEN_REPARSE_POINT | jwfs.api.FILE_FLAG_BACKUP_SEMANTICS, None) if handle == jwfs.api.INVALID_HANDLE_VALUE: raise WindowsError() res = jwfs.reparse.DeviceIoControl( handle, jwfs.api.FSCTL_GET_REPARSE_POINT, None, 10240) bytes = jwfs.create_string_buffer(res) p_rdb = jwfs.cast(bytes, jwfs.POINTER(jwfs.api.REPARSE_DATA_BUFFER)) rdb = p_rdb.contents if rdb.tag not in [2684354563, jwfs.api.IO_REPARSE_TAG_SYMLINK]: raise RuntimeError( "Expected <2684354563 or 2684354572>, but got %d" % rdb.tag) jwfs.handle_nonzero_success(jwfs.api.CloseHandle(handle)) subname = rdb.get_substitute_name() # probably has something to do with long paths, not sure if subname.startswith('?\\'): subname = subname[2:] return subname
def _win32_rmtree(path, verbose=0): """ rmtree for win32 that treats junctions like directory symlinks. The junction removal portion may not be safe on race conditions. There is a known issue that prevents shutil.rmtree from deleting directories with junctions. https://bugs.python.org/issue31226 """ # --- old version using the shell --- # def _rmjunctions(root): # subdirs = [] # for type_or_size, name, pointed in _win32_dir(root): # if type_or_size == '<DIR>': # subdirs.append(name) # elif type_or_size == '<JUNCTION>': # # remove any junctions as we encounter them # # os.unlink(join(root, name)) # os.rmdir(join(root, name)) # # recurse in all real directories # for name in subdirs: # _rmjunctions(join(root, name)) def _rmjunctions(root): subdirs = [] for name in os.listdir(root): current = join(root, name) if os.path.isdir(current): if _win32_is_junction(current): # remove any junctions as we encounter them os.rmdir(current) elif not os.path.islink(current): subdirs.append(current) # recurse in all real directories for subdir in subdirs: _rmjunctions(subdir) if _win32_is_junction(path): if verbose: print('Deleting <JUNCTION> directory="{}"'.format(path)) os.rmdir(path) else: if verbose: print('Deleting directory="{}"'.format(path)) # first remove all junctions _rmjunctions(path) # now we can rmtree as normal import shutil shutil.rmtree(path)
def _win32_is_hardlinked(fpath1, fpath2): """ Test if two hard links point to the same location CommandLine: python -m ubelt._win32_links _win32_is_hardlinked Example: >>> # xdoc: +REQUIRES(WIN32) >>> import ubelt as ub >>> root = ub.ensure_app_cache_dir('ubelt', 'win32_hardlink') >>> ub.delete(root) >>> ub.ensuredir(root) >>> fpath1 = join(root, 'fpath1') >>> fpath2 = join(root, 'fpath2') >>> ub.touch(fpath1) >>> ub.touch(fpath2) >>> fjunc1 = _win32_junction(fpath1, join(root, 'fjunc1')) >>> fjunc2 = _win32_junction(fpath2, join(root, 'fjunc2')) >>> assert _win32_is_hardlinked(fjunc1, fpath1) >>> assert _win32_is_hardlinked(fjunc2, fpath2) >>> assert not _win32_is_hardlinked(fjunc2, fpath1) >>> assert not _win32_is_hardlinked(fjunc1, fpath2) """ # NOTE: jwf.samefile(fpath1, fpath2) seems to behave differently def get_read_handle(fpath): if os.path.isdir(fpath): dwFlagsAndAttributes = jwfs.api.FILE_FLAG_BACKUP_SEMANTICS else: dwFlagsAndAttributes = 0 hFile = jwfs.api.CreateFile(fpath, jwfs.api.GENERIC_READ, jwfs.api.FILE_SHARE_READ, None, jwfs.api.OPEN_EXISTING, dwFlagsAndAttributes, None) return hFile def get_unique_id(hFile): info = jwfs.api.BY_HANDLE_FILE_INFORMATION() res = jwfs.api.GetFileInformationByHandle(hFile, info) jwfs.handle_nonzero_success(res) unique_id = (info.volume_serial_number, info.file_index_high, info.file_index_low) return unique_id hFile1 = get_read_handle(fpath1) hFile2 = get_read_handle(fpath2) try: are_equal = (get_unique_id(hFile1) == get_unique_id(hFile2)) except Exception: raise finally: jwfs.api.CloseHandle(hFile1) jwfs.api.CloseHandle(hFile2) return are_equal
def _win32_dir(path, star=''): """ Using the windows cmd shell to get information about a directory """ from ubelt import util_cmd import re wrapper = 'cmd /S /C "{}"' # the /S will preserve all inner quotes command = 'dir /-C "{}"{}'.format(path, star) wrapped = wrapper.format(command) info = util_cmd.cmd(wrapped, shell=True) if info['ret'] != 0: from ubelt import util_format print('Failed command:') print(info['command']) print(util_format.repr2(info, nl=1)) raise OSError(str(info)) # parse the output of dir to get some info # Remove header and footer lines = info['out'].split('\n')[5:-3] splitter = re.compile('( +)') for line in lines: parts = splitter.split(line) date, sep, time, sep, ampm, sep, type_or_size, sep = parts[:8] name = ''.join(parts[8:]) # if type is a junction then name will also contain the linked loc if name == '.' or name == '..': continue if type_or_size in ['<JUNCTION>', '<SYMLINKD>', '<SYMLINK>']: # colons cannot be in path names, so use that to find where # the name ends pos = name.find(':') bpos = name[:pos].rfind('[') name = name[:bpos - 1] pointed = name[bpos + 1:-1] yield type_or_size, name, pointed else: yield type_or_size, name, None
def parse_generator_doubling(config): """ Returns generators that double with each value returned Config includes optional start value """ start = 1 if 'start' in config: start = int(config['start']) # We cannot simply use start as the variable, because of scoping # limitations def generator(): val = start while(True): yield val val = val * 2 return generator()
def parse(config): """ Parse a contains validator, which takes as the config a simple string to find """ if not isinstance(config, basestring): raise TypeError("Contains input must be a simple string") validator = ContainsValidator() validator.contains_string = config return validator
def retrieve_adjacency_matrix(graph, order_nodes=None, weight=False): """Retrieve the adjacency matrix from the nx.DiGraph or numpy array.""" if isinstance(graph, np.ndarray): return graph elif isinstance(graph, nx.DiGraph): if order_nodes is None: order_nodes = graph.nodes() if not weight: return np.array(nx.adjacency_matrix(graph, order_nodes, weight=None).todense()) else: return np.array(nx.adjacency_matrix(graph, order_nodes).todense()) else: raise TypeError("Only networkx.DiGraph and np.ndarray (adjacency matrixes) are supported.")
def precision_recall(target, prediction, low_confidence_undirected=False): r"""Compute precision-recall statistics for directed graphs. Precision recall statistics are useful to compare algorithms that make predictions with a confidence score. Using these statistics, performance of an algorithms given a set threshold (confidence score) can be approximated. Area under the precision-recall curve, as well as the coordinates of the precision recall curve are computed, using the scikit-learn library tools. Note that unlike the AUROC metric, this metric does not account for class imbalance. Precision is defined by: :math:`Pr=tp/(tp+fp)` and directly denotes the total classification accuracy given a confidence threshold. On the other hand, Recall is defined by: :math:`Re=tp/(tp+fn)` and denotes misclassification given a threshold. Args: target (numpy.ndarray or networkx.DiGraph): Target graph, must be of ones and zeros. prediction (numpy.ndarray or networkx.DiGraph): Prediction made by the algorithm to evaluate. low_confidence_undirected: Put the lowest confidence possible to undirected edges (edges that are symmetric in the confidence score). Default: False Returns: tuple: tuple containing: + Area under the precision recall curve (float) + Tuple of data points of the precision-recall curve used in the computation of the score (tuple). Examples: >>> import numpy as np >>> tar, pred = np.random.randint(2, size=(10, 10)), np.random.randn(10, 10) >>> # adjacency matrixes of size 10x10 >>> aupr, curve = precision_recall(target, input) >>> # leave low_confidence_undirected to False as the predictions are continuous """ true_labels = retrieve_adjacency_matrix(target) pred = retrieve_adjacency_matrix(prediction, target.nodes() if isinstance(target, nx.DiGraph) else None, weight=True) if low_confidence_undirected: # Take account of undirected edges by putting them with low confidence pred[pred==pred.transpose()] *= min(min(pred[np.nonzero(pred)])*.5, .1) precision, recall, _ = precision_recall_curve( true_labels.ravel(), pred.ravel()) aupr = auc(recall, precision, reorder=True) return aupr, list(zip(precision, recall))
def SHD(target, pred, double_for_anticausal=True): r"""Compute the Structural Hamming Distance. The Structural Hamming Distance (SHD) is a standard distance to compare graphs by their adjacency matrix. It consists in computing the difference between the two (binary) adjacency matrixes: every edge that is either missing or not in the target graph is counted as a mistake. Note that for directed graph, two mistakes can be counted as the edge in the wrong direction is false and the edge in the good direction is missing ; the `double_for_anticausal` argument accounts for this remark. Setting it to `False` will count this as a single mistake. Args: target (numpy.ndarray or networkx.DiGraph): Target graph, must be of ones and zeros. prediction (numpy.ndarray or networkx.DiGraph): Prediction made by the algorithm to evaluate. double_for_anticausal (bool): Count the badly oriented edges as two mistakes. Default: True Returns: int: Structural Hamming Distance (int). The value tends to zero as the graphs tend to be identical. Examples: >>> from numpy.random import randint >>> tar, pred = randint(2, size=(10, 10)), randint(2, size=(10, 10)) >>> SHD(tar, pred, double_for_anticausal=False) """ true_labels = retrieve_adjacency_matrix(target) predictions = retrieve_adjacency_matrix(pred, target.nodes() if isinstance(target, nx.DiGraph) else None) diff = np.abs(true_labels - predictions) if double_for_anticausal: return np.sum(diff) else: diff = diff + diff.transpose() diff[diff > 1] = 1 # Ignoring the double edges. return np.sum(diff)/2
def SID(target, pred): """Compute the Strutural Intervention Distance. [R wrapper] The Structural Intervention Distance (SID) is a new distance for graphs introduced by Peters and Bühlmann (2013). This distance was created to account for the shortcomings of the SHD metric for a causal sense. It consists in computing the path between all the pairs of variables, and checks if the causal relationship between the variables is respected. The given graphs have to be DAGs for the SID metric to make sense. Args: target (numpy.ndarray or networkx.DiGraph): Target graph, must be of ones and zeros, and instance of either numpy.ndarray or networkx.DiGraph. Must be a DAG. prediction (numpy.ndarray or networkx.DiGraph): Prediction made by the algorithm to evaluate. Must be a DAG. Returns: int: Structural Intervention Distance. The value tends to zero as the graphs tends to be identical. .. note:: Ref: Structural Intervention Distance (SID) for Evaluating Causal Graphs, Jonas Peters, Peter Bühlmann: https://arxiv.org/abs/1306.1043 Examples: >>> from numpy.random import randint >>> tar = np.triu(randint(2, size=(10, 10))) >>> pred = np.triu(randint(2, size=(10, 10))) >>> SID(tar, pred) """ if not RPackages.SID: raise ImportError("SID R package is not available. Please check your installation.") true_labels = retrieve_adjacency_matrix(target) predictions = retrieve_adjacency_matrix(pred, target.nodes() if isinstance(target, nx.DiGraph) else None) os.makedirs('/tmp/cdt_SID/') def retrieve_result(): return np.loadtxt('/tmp/cdt_SID/result.csv') try: np.savetxt('/tmp/cdt_SID/target.csv', true_labels, delimiter=',') np.savetxt('/tmp/cdt_SID/pred.csv', predictions, delimiter=',') sid_score = launch_R_script("{}/R_templates/sid.R".format(os.path.dirname(os.path.realpath(__file__))), {"{target}": '/tmp/cdt_SID/target.csv', "{prediction}": '/tmp/cdt_SID/pred.csv', "{result}": '/tmp/cdt_SID/result.csv'}, output_function=retrieve_result) # Cleanup except Exception as e: rmtree('/tmp/cdt_SID') raise e except KeyboardInterrupt: rmtree('/tmp/cdt_SID/') raise KeyboardInterrupt rmtree('/tmp/cdt_SID') return sid_score
def create_graph_from_data(self, data, **kwargs): """Apply causal discovery on observational data using CCDr. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the CCDR algorithm. """ # Building setup w/ arguments. self.arguments['{VERBOSE}'] = str(self.verbose).upper() results = self._run_ccdr(data, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
def init_variables(self, verbose=False): """Redefine the causes of the graph.""" for j in range(1, self.nodes): nb_parents = np.random.randint(0, min([self.parents_max, j])+1) for i in np.random.choice(range(0, j), nb_parents, replace=False): self.adjacency_matrix[i, j] = 1 try: self.g = nx.DiGraph(self.adjacency_matrix) assert not list(nx.simple_cycles(self.g)) except AssertionError: if verbose: print("Regenerating, graph non valid...") self.init_variables() # Mechanisms self.cfunctions = [self.mechanism(int(sum(self.adjacency_matrix[:, i])), self.points, self.noise, noise_coeff=self.noise_coeff) if sum(self.adjacency_matrix[:, i]) else self.initial_generator for i in range(self.nodes)]
def generate(self, rescale=True): """Generate data from an FCM containing cycles.""" if self.cfunctions is None: self.init_variables() for i in nx.topological_sort(self.g): # Root cause if not sum(self.adjacency_matrix[:, i]): self.data['V{}'.format(i)] = self.cfunctions[i](self.points) # Generating causes else: self.data['V{}'.format(i)] = self.cfunctions[i](self.data.iloc[:, self.adjacency_matrix[:, i].nonzero()[0]].values) if rescale: self.data['V{}'.format(i)] = scale(self.data['V{}'.format(i)].values) return self.g, self.data
def to_csv(self, fname_radical, **kwargs): """ Save data to the csv format by default, in two separate files. Optional keyword arguments can be passed to pandas. """ if self.data is not None: self.data.to_csv(fname_radical+'_data.csv', index=False, **kwargs) pd.DataFrame(self.adjacency_matrix).to_csv(fname_radical \ + '_target.csv', index=False, **kwargs) else: raise ValueError("Graph has not yet been generated. \ Use self.generate() to do so.")
def launch_R_script(template, arguments, output_function=None, verbose=True, debug=False): """Launch an R script, starting from a template and replacing text in file before execution. Args: template (str): path to the template of the R script arguments (dict): Arguments that modify the template's placeholders with arguments output_function (function): Function to execute **after** the execution of the R script, and its output is returned by this function. Used traditionally as a function to retrieve the results of the execution. verbose (bool): Sets the verbosity of the R subprocess. debug (bool): If True, the generated scripts are not deleted. Return: Returns the output of the ``output_function`` if not `None` else `True` or `False` depending on whether the execution was successful. """ id = str(uuid.uuid4()) os.makedirs('/tmp/cdt_R_script_' + id + '/') try: scriptpath = '/tmp/cdt_R_script_' + id + '/instance_{}'.format(os.path.basename(template)) copy(template, scriptpath) with fileinput.FileInput(scriptpath, inplace=True) as file: for line in file: mline = line for elt in arguments: mline = mline.replace(elt, arguments[elt]) print(mline, end='') if output_function is None: output = subprocess.call("Rscript --vanilla {}".format(scriptpath), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) else: if verbose: process = subprocess.Popen("Rscript --vanilla {}".format(scriptpath), shell=True) else: process = subprocess.Popen("Rscript --vanilla {}".format(scriptpath), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) process.wait() output = output_function() # Cleaning up except Exception as e: if not debug: rmtree('/tmp/cdt_R_script_' + id + '/') raise e except KeyboardInterrupt: if not debug: rmtree('/tmp/cdt_R_script_' + id + '/') raise KeyboardInterrupt if not debug: rmtree('/tmp/cdt_R_script_' + id + '/') return output
def check_R_package(self, package): """Execute a subprocess to check the package's availability. Args: package (str): Name of the package to be tested. Returns: bool: `True` if the package is available, `False` otherwise """ test_package = not bool(launch_R_script("{}/R_templates/test_import.R".format(os.path.dirname(os.path.realpath(__file__))), {"{package}": package}, verbose=True)) return test_package
def bin_variable(var, bins='fd'): # bin with normalization """Bin variables w/ normalization.""" var = np.array(var).astype(np.float) var = (var - np.mean(var)) / np.std(var) var = np.digitize(var, np.histogram(var, bins=bins)[1]) return var
def predict(self, a, b, **kwargs): """Perform the independence test. :param a: input data :param b: input data :type a: array-like, numerical data :type b: array-like, numerical data :return: dependency statistic (1=Highly dependent, 0=Not dependent) :rtype: float """ binning_alg = kwargs.get('bins', 'fd') return metrics.adjusted_mutual_info_score(bin_variable(a, bins=binning_alg), bin_variable(b, bins=binning_alg))
def predict(self, df_data, graph=None, **kwargs): """Orient a graph using the method defined by the arguments. Depending on the type of `graph`, this function process to execute different functions: 1. If ``graph`` is a ``networkx.DiGraph``, then ``self.orient_directed_graph`` is executed. 2. If ``graph`` is a ``networkx.Graph``, then ``self.orient_undirected_graph`` is executed. 3. If ``graph`` is a ``None``, then ``self.create_graph_from_data`` is executed. Args: df_data (pandas.DataFrame): DataFrame containing the observational data. graph (networkx.DiGraph or networkx.Graph or None): Prior knowledge on the causal graph. .. warning:: Requirement : Name of the nodes in the graph must correspond to the name of the variables in df_data """ if graph is None: return self.create_graph_from_data(df_data, **kwargs) elif isinstance(graph, nx.DiGraph): return self.orient_directed_graph(df_data, graph, **kwargs) elif isinstance(graph, nx.Graph): return self.orient_undirected_graph(df_data, graph, **kwargs) else: print('Unknown Graph type') raise ValueError
def graph_evaluation(data, adj_matrix, gpu=None, gpu_id=0, **kwargs): """Evaluate a graph taking account of the hardware.""" gpu = SETTINGS.get_default(gpu=gpu) device = 'cuda:{}'.format(gpu_id) if gpu else 'cpu' obs = th.FloatTensor(data).to(device) cgnn = CGNN_model(adj_matrix, data.shape[0], gpu_id=gpu_id, **kwargs).to(device) cgnn.reset_parameters() return cgnn.run(obs, **kwargs)
def parallel_graph_evaluation(data, adj_matrix, nb_runs=16, nb_jobs=None, **kwargs): """Parallelize the various runs of CGNN to evaluate a graph.""" nb_jobs = SETTINGS.get_default(nb_jobs=nb_jobs) if nb_runs == 1: return graph_evaluation(data, adj_matrix, **kwargs) else: output = Parallel(n_jobs=nb_jobs)(delayed(graph_evaluation)(data, adj_matrix, idx=run, gpu_id=run % SETTINGS.GPU, **kwargs) for run in range(nb_runs)) return np.mean(output)
def hill_climbing(data, graph, **kwargs): """Hill Climbing optimization: a greedy exploration algorithm.""" nodelist = list(data.columns) data = scale(data.values).astype('float32') tested_candidates = [nx.adj_matrix(graph, nodelist=nodelist, weight=None)] best_score = parallel_graph_evaluation(data, tested_candidates[0].todense(), ** kwargs) best_candidate = graph can_improve = True while can_improve: can_improve = False for (i, j) in best_candidate.edges(): test_graph = deepcopy(best_candidate) test_graph.add_edge(j, i, weight=test_graph[i][j]['weight']) test_graph.remove_edge(i, j) tadjmat = nx.adj_matrix(test_graph, nodelist=nodelist, weight=None) if (nx.is_directed_acyclic_graph(test_graph) and not any([(tadjmat != cand).nnz == 0 for cand in tested_candidates])): tested_candidates.append(tadjmat) score = parallel_graph_evaluation(data, tadjmat.todense(), **kwargs) if score < best_score: can_improve = True best_candidate = test_graph best_score = score break return best_candidate
def forward(self): """Generate according to the topological order of the graph.""" self.noise.data.normal_() if not self.confounding: for i in self.topological_order: self.generated[i] = self.blocks[i](th.cat([v for c in [ [self.generated[j] for j in np.nonzero(self.adjacency_matrix[:, i])[0]], [self.noise[:, [i]]]] for v in c], 1)) else: for i in self.topological_order: self.generated[i] = self.blocks[i](th.cat([v for c in [ [self.generated[j] for j in np.nonzero(self.adjacency_matrix[:, i])[0]], [self.corr_noise[min(i, j), max(i, j)] for j in np.nonzero(self.i_adj_matrix[:, i])[0]] [self.noise[:, [i]]]] for v in c], 1)) return th.cat(self.generated, 1)
def run(self, data, train_epochs=1000, test_epochs=1000, verbose=None, idx=0, lr=0.01, **kwargs): """Run the CGNN on a given graph.""" verbose = SETTINGS.get_default(verbose=verbose) optim = th.optim.Adam(self.parameters(), lr=lr) self.score.zero_() with trange(train_epochs + test_epochs, disable=not verbose) as t: for epoch in t: optim.zero_grad() generated_data = self.forward() mmd = self.criterion(generated_data, data) if not epoch % 200: t.set_postfix(idx=idx, epoch=epoch, loss=mmd.item()) mmd.backward() optim.step() if epoch >= test_epochs: self.score.add_(mmd.data) return self.score.cpu().numpy() / test_epochs
def create_graph_from_data(self, data): """Use CGNN to create a graph from scratch. All the possible structures are tested, which leads to a super exponential complexity. It would be preferable to start from a graph skeleton for large graphs. Args: data (pandas.DataFrame): Observational data on which causal discovery has to be performed. Returns: networkx.DiGraph: Solution given by CGNN. """ warnings.warn("An exhaustive search of the causal structure of CGNN without" " skeleton is super-exponential in the number of variables.") # Building all possible candidates: nb_vars = len(list(data.columns)) data = scale(data.values).astype('float32') candidates = [np.reshape(np.array(i), (nb_vars, nb_vars)) for i in itertools.product([0, 1], repeat=nb_vars*nb_vars) if (np.trace(np.reshape(np.array(i), (nb_vars, nb_vars))) == 0 and nx.is_directed_acyclic_graph(nx.DiGraph(np.reshape(np.array(i), (nb_vars, nb_vars)))))] warnings.warn("A total of {} graphs will be evaluated.".format(len(candidates))) scores = [parallel_graph_evaluation(data, i, nh=self.nh, nb_runs=self.nb_runs, gpu=self.gpu, nb_jobs=self.nb_jobs, lr=self.lr, train_epochs=self.train_epochs, test_epochs=self.test_epochs, verbose=self.verbose) for i in candidates] final_candidate = candidates[scores.index(min(scores))] output = np.zeros(final_candidate.shape) # Retrieve the confidence score on each edge. for (i, j), x in np.ndenumerate(final_candidate): if x > 0: cand = final_candidate cand[i, j] = 0 output[i, j] = min(scores) - scores[candidates.index(cand)] return nx.DiGraph(candidates[output], {idx: i for idx, i in enumerate(data.columns)})
def orient_directed_graph(self, data, dag, alg='HC'): """Modify and improve a directed acyclic graph solution using CGNN. Args: data (pandas.DataFrame): Observational data on which causal discovery has to be performed. dag (nx.DiGraph): Graph that provides the initial solution, on which the CGNN algorithm will be applied. alg (str): Exploration heuristic to use, among ["HC", "HCr", "tabu", "EHC"] Returns: networkx.DiGraph: Solution given by CGNN. """ alg_dic = {'HC': hill_climbing, 'HCr': hill_climbing_with_removal, 'tabu': tabu_search, 'EHC': exploratory_hill_climbing} return alg_dic[alg](data, dag, nh=self.nh, nb_runs=self.nb_runs, gpu=self.gpu, nb_jobs=self.nb_jobs, lr=self.lr, train_epochs=self.train_epochs, test_epochs=self.test_epochs, verbose=self.verbose)
def orient_undirected_graph(self, data, umg, alg='HC'): """Orient the undirected graph using GNN and apply CGNN to improve the graph. Args: data (pandas.DataFrame): Observational data on which causal discovery has to be performed. umg (nx.Graph): Graph that provides the skeleton, on which the GNN then the CGNN algorithm will be applied. alg (str): Exploration heuristic to use, among ["HC", "HCr", "tabu", "EHC"] Returns: networkx.DiGraph: Solution given by CGNN. .. note:: GNN (``cdt.causality.pairwise.GNN``) is first used to orient the undirected graph and output a DAG before applying CGNN. """ warnings.warn("The pairwise GNN model is computed on each edge of the UMG " "to initialize the model and start CGNN with a DAG") gnn = GNN(nh=self.nh, lr=self.lr) og = gnn.orient_graph(data, umg, nb_runs=self.nb_runs, nb_max_runs=self.nb_runs, nb_jobs=self.nb_jobs, train_epochs=self.train_epochs, test_epochs=self.test_epochs, verbose=self.verbose, gpu=self.gpu) # Pairwise method # print(nx.adj_matrix(og).todense().shape) # print(list(og.edges())) dag = dagify_min_edge(og) # print(nx.adj_matrix(dag).todense().shape) return self.orient_directed_graph(data, dag, alg=alg)
def eval_entropy(x): """Evaluate the entropy of the input variable. :param x: input variable 1D :return: entropy of x """ hx = 0. sx = sorted(x) for i, j in zip(sx[:-1], sx[1:]): delta = j-i if bool(delta): hx += np.log(np.abs(delta)) hx = hx / (len(x) - 1) + psi(len(x)) - psi(1) return hx
def integral_approx_estimator(x, y): """Integral approximation estimator for causal inference. :param x: input variable x 1D :param y: input variable y 1D :return: Return value of the IGCI model >0 if x->y otherwise if return <0 """ a, b = (0., 0.) x = np.array(x) y = np.array(y) idx, idy = (np.argsort(x), np.argsort(y)) for x1, x2, y1, y2 in zip(x[[idx]][:-1], x[[idx]][1:], y[[idx]][:-1], y[[idx]][1:]): if x1 != x2 and y1 != y2: a = a + np.log(np.abs((y2 - y1) / (x2 - x1))) for x1, x2, y1, y2 in zip(x[[idy]][:-1], x[[idy]][1:], y[[idy]][:-1], y[[idy]][1:]): if x1 != x2 and y1 != y2: b = b + np.log(np.abs((x2 - x1) / (y2 - y1))) return (a - b)/len(x)
def predict_proba(self, a, b, **kwargs): """Evaluate a pair using the IGCI model. :param a: Input variable 1D :param b: Input variable 1D :param kwargs: {refMeasure: Scaling method (gaussian, integral or None), estimator: method used to evaluate the pairs (entropy or integral)} :return: Return value of the IGCI model >0 if a->b otherwise if return <0 """ estimators = {'entropy': lambda x, y: eval_entropy(y) - eval_entropy(x), 'integral': integral_approx_estimator} ref_measures = {'gaussian': lambda x: standard_scale.fit_transform(x.reshape((-1, 1))), 'uniform': lambda x: min_max_scale.fit_transform(x.reshape((-1, 1))), 'None': lambda x: x} ref_measure = ref_measures[kwargs.get('refMeasure', 'gaussian')] estimator = estimators[kwargs.get('estimator', 'entropy')] a = ref_measure(a) b = ref_measure(b) return estimator(a, b)
def featurize_row(self, x, y): """ Projects the causal pair to the RKHS using the sampled kernel approximation. Args: x (np.ndarray): Variable 1 y (np.ndarray): Variable 2 Returns: np.ndarray: projected empirical distributions into a single fixed-size vector. """ x = x.ravel() y = y.ravel() b = np.ones(x.shape) dx = np.cos(np.dot(self.W2, np.vstack((x, b)))).mean(1) dy = np.cos(np.dot(self.W2, np.vstack((y, b)))).mean(1) if(sum(dx) > sum(dy)): return np.hstack((dx, dy, np.cos(np.dot(self.W, np.vstack((x, y, b)))).mean(1))) else: return np.hstack((dx, dy, np.cos(np.dot(self.W, np.vstack((y, x, b)))).mean(1)))
def fit(self, x, y): """Train the model. Args: x_tr (pd.DataFrame): CEPC format dataframe containing the pairs y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs """ train = np.vstack((np.array([self.featurize_row(row.iloc[0], row.iloc[1]) for idx, row in x.iterrows()]), np.array([self.featurize_row(row.iloc[1], row.iloc[0]) for idx, row in x.iterrows()]))) labels = np.vstack((y, -y)).ravel() verbose = 1 if self.verbose else 0 self.clf = CLF(verbose=verbose, min_samples_leaf=self.L, n_estimators=self.E, max_depth=self.max_depth, n_jobs=self.n_jobs).fit(train, labels)
def predict_proba(self, x, y=None, **kwargs): """ Predict the causal score using a trained RCC model Args: x (numpy.array or pandas.DataFrame or pandas.Series): First variable or dataset. args (numpy.array): second variable (optional depending on the 1st argument). Returns: float: Causation score (Value : 1 if a->b and -1 if b->a) """ if self.clf is None: raise ValueError("Model has to be trained before making predictions.") if x is pandas.Series: input_ = self.featurize_row(x.iloc[0], x.iloc[1]).reshape((1, -1)) elif x is pandas.DataFrame: input_ = np.array([self.featurize_row(x.iloc[0], x.iloc[1]) for row in x]) elif y is not None: input_ = self.featurize_row(x, y).reshape((1, -1)) else: raise TypeError("DataType not understood.") return self.clf.predict(input_)
def predict_features(self, df_features, df_target, nh=20, idx=0, dropout=0., activation_function=th.nn.ReLU, lr=0.01, l1=0.1, batch_size=-1, train_epochs=1000, test_epochs=1000, device=None, verbose=None, nb_runs=3): """For one variable, predict its neighbours. Args: df_features (pandas.DataFrame): df_target (pandas.Series): nh (int): number of hidden units idx (int): (optional) for printing purposes dropout (float): probability of dropout (between 0 and 1) activation_function (torch.nn.Module): activation function of the NN lr (float): learning rate of Adam l1 (float): L1 penalization coefficient batch_size (int): batch size, defaults to full-batch train_epochs (int): number of train epochs test_epochs (int): number of test epochs device (str): cuda or cpu device (defaults to ``cdt.SETTINGS.default_device``) verbose (bool): verbosity (defaults to ``cdt.SETTINGS.verbose``) nb_runs (int): number of bootstrap runs Returns: list: scores of each feature relatively to the target """ device, verbose = SETTINGS.get_default(('device', device), ('verbose', verbose)) x = th.FloatTensor(scale(df_features.values)).to(device) y = th.FloatTensor(scale(df_target.values)).to(device) out = [] for i in range(nb_runs): model = FSGNN_model([x.size()[1] + 1, nh, 1], dropout=dropout, activation_function=activation_function).to(device) out.append(model.train(x, y, lr=0.01, l1=0.1, batch_size=-1, train_epochs=train_epochs, test_epochs=test_epochs, device=device, verbose=verbose)) return list(np.mean(np.array(out), axis=0))
def predict_undirected_graph(self, data): """Build a skeleton using a pairwise independence criterion. Args: data (pandas.DataFrame): Raw data table Returns: networkx.Graph: Undirected graph representing the skeleton. """ graph = Graph() for idx_i, i in enumerate(data.columns): for idx_j, j in enumerate(data.columns[idx_i+1:]): score = self.predict(data[i].values, data[j].values) if abs(score) > 0.001: graph.add_edge(i, j, weight=score) return graph
def run_feature_selection(self, df_data, target, idx=0, **kwargs): """Run feature selection for one node: wrapper around ``self.predict_features``. Args: df_data (pandas.DataFrame): All the observational data target (str): Name of the target variable idx (int): (optional) For printing purposes Returns: list: scores of each feature relatively to the target """ list_features = list(df_data.columns.values) list_features.remove(target) df_target = pd.DataFrame(df_data[target], columns=[target]) df_features = df_data[list_features] return self.predict_features(df_features, df_target, idx=idx, **kwargs)
def predict(self, df_data, threshold=0.05, **kwargs): """Predict the skeleton of the graph from raw data. Returns iteratively the feature selection algorithm on each node. Args: df_data (pandas.DataFrame): data to construct a graph from threshold (float): cutoff value for feature selection scores kwargs (dict): additional arguments for algorithms Returns: networkx.Graph: predicted skeleton of the graph. """ nb_jobs = kwargs.get("nb_jobs", SETTINGS.NB_JOBS) list_nodes = list(df_data.columns.values) if nb_jobs != 1: result_feature_selection = Parallel(n_jobs=nb_jobs)(delayed(self.run_feature_selection) (df_data, node, idx, **kwargs) for idx, node in enumerate(list_nodes)) else: result_feature_selection = [self.run_feature_selection(df_data, node, idx, **kwargs) for idx, node in enumerate(list_nodes)] for idx, i in enumerate(result_feature_selection): try: i.insert(idx, 0) except AttributeError: # if results are numpy arrays result_feature_selection[idx] = np.insert(i, idx, 0) matrix_results = np.array(result_feature_selection) matrix_results *= matrix_results.transpose() np.fill_diagonal(matrix_results, 0) matrix_results /= 2 graph = nx.Graph() for (i, j), x in np.ndenumerate(matrix_results): if matrix_results[i, j] > threshold: graph.add_edge(list_nodes[i], list_nodes[j], weight=matrix_results[i, j]) for node in list_nodes: if node not in graph.nodes(): graph.add_node(node) return graph
def orient_undirected_graph(self, data, graph): """Run GIES on an undirected graph. Args: data (pandas.DataFrame): DataFrame containing the data graph (networkx.Graph): Skeleton of the graph to orient Returns: networkx.DiGraph: Solution given by the GIES algorithm. """ # Building setup w/ arguments. self.arguments['{VERBOSE}'] = str(self.verbose).upper() self.arguments['{SCORE}'] = self.scores[self.score] fe = DataFrame(nx.adj_matrix(graph, weight=None).todense()) fg = DataFrame(1 - fe.values) results = self._run_gies(data, fixedGaps=fg, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
def create_graph_from_data(self, data): """Run the GIES algorithm. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the GIES algorithm. """ # Building setup w/ arguments. self.arguments['{SCORE}'] = self.scores[self.score] self.arguments['{VERBOSE}'] = str(self.verbose).upper() results = self._run_gies(data, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
def _run_gies(self, data, fixedGaps=None, verbose=True): """Setting up and running GIES with all arguments.""" # Run gies id = str(uuid.uuid4()) os.makedirs('/tmp/cdt_gies' + id + '/') self.arguments['{FOLDER}'] = '/tmp/cdt_gies' + id + '/' def retrieve_result(): return read_csv('/tmp/cdt_gies' + id + '/result.csv', delimiter=',').values try: data.to_csv('/tmp/cdt_gies' + id + '/data.csv', header=False, index=False) if fixedGaps is not None: fixedGaps.to_csv('/tmp/cdt_gies' + id + '/fixedgaps.csv', index=False, header=False) self.arguments['{SKELETON}'] = 'TRUE' else: self.arguments['{SKELETON}'] = 'FALSE' gies_result = launch_R_script("{}/R_templates/gies.R".format(os.path.dirname(os.path.realpath(__file__))), self.arguments, output_function=retrieve_result, verbose=verbose) # Cleanup except Exception as e: rmtree('/tmp/cdt_gies' + id + '') raise e except KeyboardInterrupt: rmtree('/tmp/cdt_gies' + id + '/') raise KeyboardInterrupt rmtree('/tmp/cdt_gies' + id + '') return gies_result
def plot_curves(i_batch, adv_loss, gen_loss, l1_reg, cols): """Plot SAM's various losses.""" from matplotlib import pyplot as plt if i_batch == 0: try: ax.clear() ax.plot(range(len(adv_plt)), adv_plt, "r-", linewidth=1.5, markersize=4, label="Discriminator") ax.plot(range(len(adv_plt)), gen_plt, "g-", linewidth=1.5, markersize=4, label="Generators") ax.plot(range(len(adv_plt)), l1_plt, "b-", linewidth=1.5, markersize=4, label="L1-Regularization") plt.legend() adv_plt.append(adv_loss.cpu().data[0]) gen_plt.append(gen_loss.cpu().data[0] / cols) l1_plt.append(l1_reg.cpu().data[0]) plt.pause(0.0001) except NameError: plt.ion() fig, ax = plt.figure() plt.xlabel("Epoch") plt.ylabel("Losses") plt.pause(0.0001) adv_plt = [adv_loss.cpu().data[0]] gen_plt = [gen_loss.cpu().data[0] / cols] l1_plt = [l1_reg.cpu().data[0]] else: adv_plt.append(adv_loss.cpu().data[0]) gen_plt.append(gen_loss.cpu().data[0] / cols) l1_plt.append(l1_reg.cpu().data[0])
def plot_gen(epoch, batch, generated_variables, pairs_to_plot=[[0, 1]]): """Plot generated pairs of variables.""" from matplotlib import pyplot as plt if epoch == 0: plt.ion() plt.clf() for (i, j) in pairs_to_plot: plt.scatter(generated_variables[i].data.cpu().numpy( ), batch.data.cpu().numpy()[:, j], label="Y -> X") plt.scatter(batch.data.cpu().numpy()[ :, i], generated_variables[j].data.cpu().numpy(), label="X -> Y") plt.scatter(batch.data.cpu().numpy()[:, i], batch.data.cpu().numpy()[ :, j], label="original data") plt.legend() plt.pause(0.01)
def run_SAM(df_data, skeleton=None, **kwargs): """Execute the SAM model. :param df_data: Input data; either np.array or pd.DataFrame """ gpu = kwargs.get('gpu', False) gpu_no = kwargs.get('gpu_no', 0) train_epochs = kwargs.get('train_epochs', 1000) test_epochs = kwargs.get('test_epochs', 1000) batch_size = kwargs.get('batch_size', -1) lr_gen = kwargs.get('lr_gen', 0.1) lr_disc = kwargs.get('lr_disc', lr_gen) verbose = kwargs.get('verbose', True) regul_param = kwargs.get('regul_param', 0.1) dnh = kwargs.get('dnh', None) plot = kwargs.get("plot", False) plot_generated_pair = kwargs.get("plot_generated_pair", False) d_str = "Epoch: {} -- Disc: {} -- Gen: {} -- L1: {}" try: list_nodes = list(df_data.columns) df_data = (df_data[list_nodes]).values except AttributeError: list_nodes = list(range(df_data.shape[1])) data = df_data.astype('float32') data = th.from_numpy(data) if batch_size == -1: batch_size = data.shape[0] rows, cols = data.size() # Get the list of indexes to ignore if skeleton is not None: zero_components = [[] for i in range(cols)] skel = nx.adj_matrix(skeleton, weight=None) for i, j in zip(*((1-skel).nonzero())): zero_components[j].append(i) else: zero_components = [[i] for i in range(cols)] sam = SAM_generators((rows, cols), zero_components, batch_norm=True, **kwargs) # Begin UGLY activation_function = kwargs.get('activation_function', th.nn.Tanh) try: del kwargs["activation_function"] except KeyError: pass discriminator_sam = SAM_discriminator( [cols, dnh, dnh, 1], batch_norm=True, activation_function=th.nn.LeakyReLU, activation_argument=0.2, **kwargs) kwargs["activation_function"] = activation_function # End of UGLY if gpu: sam = sam.cuda(gpu_no) discriminator_sam = discriminator_sam.cuda(gpu_no) data = data.cuda(gpu_no) # Select parameters to optimize : ignore the non connected nodes criterion = th.nn.BCEWithLogitsLoss() g_optimizer = th.optim.Adam(sam.parameters(), lr=lr_gen) d_optimizer = th.optim.Adam( discriminator_sam.parameters(), lr=lr_disc) true_variable = Variable( th.ones(batch_size, 1), requires_grad=False) false_variable = Variable( th.zeros(batch_size, 1), requires_grad=False) causal_filters = th.zeros(data.shape[1], data.shape[1]) if gpu: true_variable = true_variable.cuda(gpu_no) false_variable = false_variable.cuda(gpu_no) causal_filters = causal_filters.cuda(gpu_no) data_iterator = DataLoader(data, batch_size=batch_size, shuffle=True) # TRAIN for epoch in range(train_epochs + test_epochs): for i_batch, batch in enumerate(data_iterator): batch = Variable(batch) batch_vectors = [batch[:, [i]] for i in range(cols)] g_optimizer.zero_grad() d_optimizer.zero_grad() # Train the discriminator generated_variables = sam(batch) disc_losses = [] gen_losses = [] for i in range(cols): generator_output = th.cat([v for c in [batch_vectors[: i], [ generated_variables[i]], batch_vectors[i + 1:]] for v in c], 1) # 1. Train discriminator on fake disc_output_detached = discriminator_sam( generator_output.detach()) disc_output = discriminator_sam(generator_output) disc_losses.append( criterion(disc_output_detached, false_variable)) # 2. Train the generator : gen_losses.append(criterion(disc_output, true_variable)) true_output = discriminator_sam(batch) adv_loss = sum(disc_losses)/cols + \ criterion(true_output, true_variable) gen_loss = sum(gen_losses) adv_loss.backward() d_optimizer.step() # 3. Compute filter regularization filters = th.stack( [i.fs_filter[0, :-1].abs() for i in sam.blocks], 1) l1_reg = regul_param * filters.sum() loss = gen_loss + l1_reg if verbose and epoch % 200 == 0 and i_batch == 0: print(str(i) + " " + d_str.format(epoch, adv_loss.item(), gen_loss.item() / cols, l1_reg.item())) loss.backward() # STORE ASSYMETRY values for output if epoch > train_epochs: causal_filters.add_(filters.data) g_optimizer.step() if plot: plot_curves(i_batch, adv_loss, gen_loss, l1_reg, cols) if plot_generated_pair and epoch % 200 == 0: plot_gen(epoch, batch, generated_variables) return causal_filters.div_(test_epochs).cpu().numpy()
def reset_parameters(self): """Reset the parameters.""" stdv = 1. / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv)
def forward(self, input): """Feed-forward through the network.""" return th.nn.functional.linear(input, self.weight.div(self.weight.pow(2).sum(0).sqrt()))
def forward(self, x): """Feed-forward the model.""" return self.layers(x * (self._filter * self.fs_filter).expand_as(x))
def forward(self, x): """Feed-forward the model.""" for i in self.noise: i.data.normal_() self.generated_variables = [self.blocks[i]( th.cat([x, self.noise[i]], 1)) for i in range(self.cols)] return self.generated_variables
def predict(self, data, graph=None, nruns=6, njobs=None, gpus=0, verbose=None, plot=False, plot_generated_pair=False, return_list_results=False): """Execute SAM on a dataset given a skeleton or not. Args: data (pandas.DataFrame): Observational data for estimation of causal relationships by SAM skeleton (numpy.ndarray): A priori knowledge about the causal relationships as an adjacency matrix. Can be fed either directed or undirected links. nruns (int): Number of runs to be made for causal estimation. Recommended: >=12 for optimal performance. njobs (int): Numbers of jobs to be run in Parallel. Recommended: 1 if no GPU available, 2*number of GPUs else. gpus (int): Number of available GPUs for the algorithm. verbose (bool): verbose mode plot (bool): Plot losses interactively. Not recommended if nruns>1 plot_generated_pair (bool): plots a generated pair interactively. Not recommended if nruns>1 Returns: networkx.DiGraph: Graph estimated by SAM, where A[i,j] is the term of the ith variable for the jth generator. """ verbose, njobs = SETTINGS.get_default(('verbose', verbose), ('nb_jobs', njobs)) if njobs != 1: list_out = Parallel(n_jobs=njobs)(delayed(run_SAM)(data, skeleton=graph, lr_gen=self.lr, lr_disc=self.dlr, regul_param=self.l1, nh=self.nh, dnh=self.dnh, gpu=bool(gpus), train_epochs=self.train, test_epochs=self.test, batch_size=self.batchsize, plot=plot, verbose=verbose, gpu_no=idx % max(gpus, 1)) for idx in range(nruns)) else: list_out = [run_SAM(data, skeleton=graph, lr_gen=self.lr, lr_disc=self.dlr, regul_param=self.l1, nh=self.nh, dnh=self.dnh, gpu=bool(gpus), train_epochs=self.train, test_epochs=self.test, batch_size=self.batchsize, plot=plot, verbose=verbose, gpu_no=0) for idx in range(nruns)] if return_list_results: return list_out else: W = list_out[0] for w in list_out[1:]: W += w W /= nruns return nx.relabel_nodes(nx.DiGraph(W), {idx: i for idx, i in enumerate(data.columns)})
def predict_proba(self, a, b, **kwargs): """ Infer causal relationships between 2 variables using the RECI statistic :param a: Input variable 1 :param b: Input variable 2 :return: Causation coefficient (Value : 1 if a->b and -1 if b->a) :rtype: float """ return self.b_fit_score(b, a) - self.b_fit_score(a, b)
def b_fit_score(self, x, y): """ Compute the RECI fit score Args: x (numpy.ndarray): Variable 1 y (numpy.ndarray): Variable 2 Returns: float: RECI fit score """ x = np.reshape(minmax_scale(x), (-1, 1)) y = np.reshape(minmax_scale(y), (-1, 1)) poly = PolynomialFeatures(degree=self.degree) poly_x = poly.fit_transform(x) poly_x[:,1] = 0 poly_x[:,2] = 0 regressor = LinearRegression() regressor.fit(poly_x, y) y_predict = regressor.predict(poly_x) error = mean_squared_error(y_predict, y) return error
def predict_proba(self, a, b, **kwargs): """ Infer causal relationships between 2 variables using the CDS statistic Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 Returns: float: Causation score (Value : 1 if a->b and -1 if b->a) """ return self.cds_score(b, a) - self.cds_score(a, b)
def cds_score(self, x_te, y_te): """ Computes the cds statistic from variable 1 to variable 2 Args: x_te (numpy.ndarray): Variable 1 y_te (numpy.ndarray): Variable 2 Returns: float: CDS fit score """ if type(x_te) == np.ndarray: x_te, y_te = pd.Series(x_te.reshape(-1)), pd.Series(y_te.reshape(-1)) xd, yd = discretized_sequences(x_te, y_te, self.ffactor, self.maxdev) cx = Counter(xd) cy = Counter(yd) yrange = sorted(cy.keys()) ny = len(yrange) py = np.array([cy[i] for i in yrange], dtype=float) py = py / py.sum() pyx = [] for a in cx: if cx[a] > self.minc: yx = y_te[xd == a] # if not numerical(ty): # cyx = Counter(yx) # pyxa = np.array([cyx[i] for i in yrange], dtype=float) # pyxa.sort() if count_unique(y_te) > len_discretized_values(y_te, "Numerical", self.ffactor, self.maxdev): yx = (yx - np.mean(yx)) / np.std(y_te) yx = discretized_sequence(yx, "Numerical", self.ffactor, self.maxdev, norm=False) cyx = Counter(yx.astype(int)) pyxa = np.array([cyx[i] for i in discretized_values(y_te, "Numerical", self.ffactor, self.maxdev)], dtype=float) else: cyx = Counter(yx) pyxa = [cyx[i] for i in yrange] pyxax = np.array([0] * (ny - 1) + pyxa + [0] * (ny - 1), dtype=float) xcorr = [sum(py * pyxax[i:i + ny]) for i in range(2 * ny - 1)] imax = xcorr.index(max(xcorr)) pyxa = np.array([0] * (2 * ny - 2 - imax) + pyxa + [0] * imax, dtype=float) assert pyxa.sum() == cx[a] pyxa = pyxa / pyxa.sum() pyx.append(pyxa) if len(pyx) == 0: return 0 pyx = np.array(pyx) pyx = pyx - pyx.mean(axis=0) return np.std(pyx)
def predict_proba(self, a, b, **kwargs): """Prediction method for pairwise causal inference using the ANM model. Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 Returns: float: Causation score (Value : 1 if a->b and -1 if b->a) """ a = scale(a).reshape((-1, 1)) b = scale(b).reshape((-1, 1)) return self.anm_score(b, a) - self.anm_score(a, b)
def anm_score(self, x, y): """Compute the fitness score of the ANM model in the x->y direction. Args: a (numpy.ndarray): Variable seen as cause b (numpy.ndarray): Variable seen as effect Returns: float: ANM fit score """ gp = GaussianProcessRegressor().fit(x, y) y_predict = gp.predict(x) indepscore = normalized_hsic(y_predict - y, x) return indepscore
def orient_undirected_graph(self, data, graph, **kwargs): """Run PC on an undirected graph. Args: data (pandas.DataFrame): DataFrame containing the data graph (networkx.Graph): Skeleton of the graph to orient Returns: networkx.DiGraph: Solution given by PC on the given skeleton. """ # Building setup w/ arguments. self.arguments['{CITEST}'] = self.dir_CI_test[self.CI_test] self.arguments['{METHOD_INDEP}'] = self.dir_method_indep[self.method_indep] self.arguments['{DIRECTED}'] = 'TRUE' self.arguments['{ALPHA}'] = str(self.alpha) self.arguments['{NJOBS}'] = str(self.nb_jobs) self.arguments['{VERBOSE}'] = str(self.verbose).upper() fe = DataFrame(nx.adj_matrix(graph, weight=None).todense()) fg = DataFrame(1 - fe.values) results = self._run_pc(data, fixedEdges=fe, fixedGaps=fg, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
def create_graph_from_data(self, data, **kwargs): """Run the PC algorithm. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by PC on the given data. """ # Building setup w/ arguments. self.arguments['{CITEST}'] = self.dir_CI_test[self.CI_test] self.arguments['{METHOD_INDEP}'] = self.dir_method_indep[self.method_indep] self.arguments['{DIRECTED}'] = 'TRUE' self.arguments['{ALPHA}'] = str(self.alpha) self.arguments['{NJOBS}'] = str(self.nb_jobs) self.arguments['{VERBOSE}'] = str(self.verbose).upper() results = self._run_pc(data, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
def _run_pc(self, data, fixedEdges=None, fixedGaps=None, verbose=True): """Setting up and running pc with all arguments.""" # Checking coherence of arguments # print(self.arguments) if (self.arguments['{CITEST}'] == self.dir_CI_test['hsic'] and self.arguments['{METHOD_INDEP}'] == self.dir_method_indep['corr']): warnings.warn('Selected method for indep is unfit for the hsic test,' ' setting the hsic.gamma method.') self.arguments['{METHOD_INDEP}'] = self.dir_method_indep['hsic_gamma'] elif (self.arguments['{CITEST}'] == self.dir_CI_test['gaussian'] and self.arguments['{METHOD_INDEP}'] != self.dir_method_indep['corr']): warnings.warn('Selected method for indep is unfit for the selected test,' ' setting the classic correlation-based method.') self.arguments['{METHOD_INDEP}'] = self.dir_method_indep['corr'] # Run PC id = str(uuid.uuid4()) os.makedirs('/tmp/cdt_pc' + id + '/') self.arguments['{FOLDER}'] = '/tmp/cdt_pc' + id + '/' def retrieve_result(): return read_csv('/tmp/cdt_pc' + id + '/result.csv', delimiter=',').values try: data.to_csv('/tmp/cdt_pc' + id + '/data.csv', header=False, index=False) if fixedGaps is not None and fixedEdges is not None: fixedGaps.to_csv('/tmp/cdt_pc' + id + '/fixedgaps.csv', index=False, header=False) fixedEdges.to_csv('/tmp/cdt_pc' + id + '/fixededges.csv', index=False, header=False) self.arguments['{SKELETON}'] = 'TRUE' else: self.arguments['{SKELETON}'] = 'FALSE' pc_result = launch_R_script("{}/R_templates/pc.R".format(os.path.dirname(os.path.realpath(__file__))), self.arguments, output_function=retrieve_result, verbose=verbose) # Cleanup except Exception as e: rmtree('/tmp/cdt_pc' + id + '') raise e except KeyboardInterrupt: rmtree('/tmp/cdt_pc' + id + '/') raise KeyboardInterrupt rmtree('/tmp/cdt_pc' + id + '') return pc_result
def b_fit_score(self, x, y): """ Computes the cds statistic from variable 1 to variable 2 Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 Returns: float: BF fit score """ x = np.reshape(scale(x), (-1, 1)) y = np.reshape(scale(y), (-1, 1)) gp = GaussianProcessRegressor().fit(x, y) y_predict = gp.predict(x) error = mean_squared_error(y_predict, y) return error
def predict(self, data, alpha=0.01, max_iter=2000, **kwargs): """ Predict the graph skeleton. Args: data (pandas.DataFrame): observational data alpha (float): regularization parameter max_iter (int): maximum number of iterations Returns: networkx.Graph: Graph skeleton """ edge_model = GraphLasso(alpha=alpha, max_iter=max_iter) edge_model.fit(data.values) return nx.relabel_nodes(nx.DiGraph(edge_model.get_precision()), {idx: i for idx, i in enumerate(data.columns)})
def predict_features(self, df_features, df_target, idx=0, **kwargs): """For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target .. warning:: Not implemented. Implemented by the algorithms. """ y = np.transpose(df_target.values) X = np.transpose(df_features.values) path, beta, A, lam = hsiclasso(X, y) return beta
def autoset_settings(set_var): """Autoset GPU parameters using CUDA_VISIBLE_DEVICES variables. Return default config if variable not set. :param set_var: Variable to set. Must be of type ConfigSettings """ try: devices = ast.literal_eval(os.environ["CUDA_VISIBLE_DEVICES"]) if type(devices) != list and type(devices) != tuple: devices = [devices] if len(devices) != 0: set_var.GPU = len(devices) set_var.NB_JOBS = len(devices) warnings.warn("Detecting CUDA devices : {}".format(devices)) except KeyError: set_var.GPU = check_cuda_devices() set_var.NB_JOBS = set_var.GPU warnings.warn("Detecting {} CUDA devices.".format(set_var.GPU)) if not set_var.GPU: warnings.warn("No GPU automatically detected. Setting SETTINGS.GPU to 0, " + "and SETTINGS.NB_JOBS to cpu_count.") set_var.GPU = 0 set_var.NB_JOBS = multiprocessing.cpu_count() return set_var
def check_cuda_devices(): """Output some information on CUDA-enabled devices on your computer, including current memory usage. Modified to only get number of devices. It's a port of https://gist.github.com/f0k/0d6431e3faa60bffc788f8b4daa029b1 from C to Python with ctypes, so it can run without compiling anything. Note that this is a direct translation with no attempt to make the code Pythonic. It's meant as a general demonstration on how to obtain CUDA device information from Python without resorting to nvidia-smi or a compiled Python extension. .. note:: Author: Jan Schlüter, https://gist.github.com/63a664160d016a491b2cbea15913d549.git """ import ctypes # Some constants taken from cuda.h CUDA_SUCCESS = 0 libnames = ('libcuda.so', 'libcuda.dylib', 'cuda.dll') for libname in libnames: try: cuda = ctypes.CDLL(libname) except OSError: continue else: break else: # raise OSError("could not load any of: " + ' '.join(libnames)) return 0 nGpus = ctypes.c_int() error_str = ctypes.c_char_p() result = cuda.cuInit(0) if result != CUDA_SUCCESS: cuda.cuGetErrorString(result, ctypes.byref(error_str)) # print("cuInit failed with error code %d: %s" % (result, error_str.value.decode())) return 0 result = cuda.cuDeviceGetCount(ctypes.byref(nGpus)) if result != CUDA_SUCCESS: cuda.cuGetErrorString(result, ctypes.byref(error_str)) # print("cuDeviceGetCount failed with error code %d: %s" % (result, error_str.value.decode())) return 0 # print("Found %d device(s)." % nGpus.value) return nGpus.value
def get_default(self, *args, **kwargs): """Get the default parameters as defined in the Settings instance. This function proceeds to seamlessly retrieve the argument to pass through, depending on either it was overidden or not: If no argument was overridden in a function of the toolbox, the default argument will be set to ``None``, and this function will retrieve the default parameters as defined by the ``cdt.SETTINGS`` 's attributes. It has two modes of processing: 1. **kwargs for retrieving a single argument: ``get_default(argument_name=value)``. 2. *args through a list of tuples of the shape ``('argument_name', value)`` to retrieve multiple values at once. """ def retrieve_param(i): try: return self.__getattribute__(i) except AttributeError: if i == "device": return self.default_device else: return self.__getattribute__(i.upper()) if len(args) == 0: if len(kwargs) == 1 and kwargs[list(kwargs.keys())[0]] is not None: return kwargs[list(kwargs.keys())[0]] elif len(kwargs) == 1: return retrieve_param(list(kwargs.keys())[0]) else: raise TypeError("As dict is unordered, it is impossible to give" "the parameters in the correct order.") else: out = [] for i in args: if i[1] is None: out.append(retrieve_param(i[0])) else: out.append(i[1]) return out
def read_causal_pairs(filename, scale=True, **kwargs): """Convert a ChaLearn Cause effect pairs challenge format into numpy.ndarray. :param filename: path of the file to read or DataFrame containing the data :type filename: str or pandas.DataFrame :param scale: Scale the data :type scale: bool :param kwargs: parameters to be passed to pandas.read_csv :return: Dataframe composed of (SampleID, a (numpy.ndarray) , b (numpy.ndarray)) :rtype: pandas.DataFrame """ def convert_row(row, scale): """Convert a CCEPC row into numpy.ndarrays. :param row: :type row: pandas.Series :return: tuple of sample ID and the converted data into numpy.ndarrays :rtype: tuple """ a = row["A"].split(" ") b = row["B"].split(" ") if a[0] == "": a.pop(0) b.pop(0) if a[-1] == "": a.pop(-1) b.pop(-1) a = array([float(i) for i in a]) b = array([float(i) for i in b]) if scale: a = scaler(a) b = scaler(b) return row['SampleID'], a, b if isinstance(filename, str): data = read_csv(filename, **kwargs) elif isinstance(filename, DataFrame): data = filename else: raise TypeError("Type not supported.") conv_data = [] for idx, row in data.iterrows(): conv_data.append(convert_row(row, scale)) df = DataFrame(conv_data, columns=['SampleID', 'A', 'B']) df = df.set_index("SampleID") return df
def read_adjacency_matrix(filename, directed=True, **kwargs): """Read a file (containing an adjacency matrix) and convert it into a directed or undirected networkx graph. :param filename: file to read or DataFrame containing the data :type filename: str or pandas.DataFrame :param directed: Return directed graph :type directed: bool :param kwargs: extra parameters to be passed to pandas.read_csv :return: networkx graph containing the graph. :rtype: **networkx.DiGraph** or **networkx.Graph** depending on the ``directed`` parameter. """ if isinstance(filename, str): data = read_csv(filename, **kwargs) elif isinstance(filename, DataFrame): data = filename else: raise TypeError("Type not supported.") if directed: return nx.relabel_nodes(nx.DiGraph(data.values), {idx: i for idx, i in enumerate(data.columns)}) else: return nx.relabel_nodes(nx.Graph(data.values), {idx: i for idx, i in enumerate(data.columns)})
def read_list_edges(filename, directed=True, **kwargs): """Read a file (containing list of edges) and convert it into a directed or undirected networkx graph. :param filename: file to read or DataFrame containing the data :type filename: str or pandas.DataFrame :param directed: Return directed graph :type directed: bool :param kwargs: extra parameters to be passed to pandas.read_csv :return: networkx graph containing the graph. :rtype: **networkx.DiGraph** or **networkx.Graph** depending on the ``directed`` parameter. """ if isinstance(filename, str): data = read_csv(filename, **kwargs) elif isinstance(filename, DataFrame): data = filename else: raise TypeError("Type not supported.") if directed: graph = nx.DiGraph() else: graph = nx.Graph() for idx, row in data.iterrows(): try: score = row["Score"] except KeyError: score = 1 graph.add_edge(row['Cause'], row["Effect"], weight=score) return graph
def forward(self, pred, target): """Compute the loss model. :param pred: predicted Variable :param target: Target Variable :return: Loss """ loss = th.FloatTensor([0]) for i in range(1, self.moments): mk_pred = th.mean(th.pow(pred, i), 0) mk_tar = th.mean(th.pow(target, i), 0) loss.add_(th.mean((mk_pred - mk_tar) ** 2)) # L2 return loss
def predict(self, a, b): """ Compute the test statistic Args: a (array-like): Variable 1 b (array-like): Variable 2 Returns: float: test statistic """ a = np.array(a).reshape((-1, 1)) b = np.array(b).reshape((-1, 1)) return (mutual_info_regression(a, b.reshape((-1,))) + mutual_info_regression(b, a.reshape((-1,))))/2
def predict(self, a, b): """ Compute the test statistic Args: a (array-like): Variable 1 b (array-like): Variable 2 Returns: float: test statistic """ a = np.array(a).reshape((-1, 1)) b = np.array(b).reshape((-1, 1)) return sp.kendalltau(a, b)[0]
def predict(self, a, b, sig=[-1, -1], maxpnt=500): """ Compute the test statistic Args: a (array-like): Variable 1 b (array-like): Variable 2 sig (list): [0] (resp [1]) is kernel size for a(resp b) (set to median distance if -1) maxpnt (int): maximum number of points used, for computational time Returns: float: test statistic """ a = (a - np.mean(a)) / np.std(a) b = (b - np.mean(b)) / np.std(b) return FastHsicTestGamma(a, b, sig, maxpnt)
def predict(self, x, *args, **kwargs): """Generic predict method, chooses which subfunction to use for a more suited. Depending on the type of `x` and of `*args`, this function process to execute different functions in the priority order: 1. If ``args[0]`` is a ``networkx.(Di)Graph``, then ``self.orient_graph`` is executed. 2. If ``args[0]`` exists, then ``self.predict_proba`` is executed. 3. If ``x`` is a ``pandas.DataFrame``, then ``self.predict_dataset`` is executed. 4. If ``x`` is a ``pandas.Series``, then ``self.predict_proba`` is executed. Args: x (numpy.array or pandas.DataFrame or pandas.Series): First variable or dataset. args (numpy.array or networkx.Graph): graph or second variable. Returns: pandas.Dataframe or networkx.Digraph: predictions output """ if len(args) > 0: if type(args[0]) == nx.Graph or type(args[0]) == nx.DiGraph: return self.orient_graph(x, *args, **kwargs) else: return self.predict_proba(x, *args, **kwargs) elif type(x) == DataFrame: return self.predict_dataset(x, *args, **kwargs) elif type(x) == Series: return self.predict_proba(x.iloc[0], x.iloc[1], *args, **kwargs)
def predict_dataset(self, x, **kwargs): """Generic dataset prediction function. Runs the score independently on all pairs. Args: x (pandas.DataFrame): a CEPC format Dataframe. kwargs (dict): additional arguments for the algorithms Returns: pandas.DataFrame: a Dataframe with the predictions. """ printout = kwargs.get("printout", None) pred = [] res = [] x.columns = ["A", "B"] for idx, row in x.iterrows(): a = scale(row['A'].reshape((len(row['A']), 1))) b = scale(row['B'].reshape((len(row['B']), 1))) pred.append(self.predict_proba(a, b, idx=idx)) if printout is not None: res.append([row['SampleID'], pred[-1]]) DataFrame(res, columns=['SampleID', 'Predictions']).to_csv( printout, index=False) return pred
def orient_graph(self, df_data, graph, nb_runs=6, printout=None, **kwargs): """Orient an undirected graph using the pairwise method defined by the subclass. The pairwise method is ran on every undirected edge. Args: df_data (pandas.DataFrame): Data umg (networkx.Graph): Graph to orient nb_runs (int): number of times to rerun for each pair (bootstrap) printout (str): (optional) Path to file where to save temporary results Returns: networkx.DiGraph: a directed graph, which might contain cycles .. warning: Requirement : Name of the nodes in the graph correspond to name of the variables in df_data """ if type(graph) == nx.DiGraph: edges = [a for a in list(graph.edges()) if (a[1], a[0]) in list(graph.edges())] oriented_edges = [a for a in list(graph.edges()) if (a[1], a[0]) not in list(graph.edges())] for a in edges: if (a[1], a[0]) in list(graph.edges()): edges.remove(a) output = nx.DiGraph() for i in oriented_edges: output.add_edge(*i) elif type(graph) == nx.Graph: edges = list(graph.edges()) output = nx.DiGraph() else: raise TypeError("Data type not understood.") res = [] for idx, (a, b) in enumerate(edges): weight = self.predict_proba( df_data[a].values.reshape((-1, 1)), df_data[b].values.reshape((-1, 1)), idx=idx, nb_runs=nb_runs, **kwargs) if weight > 0: # a causes b output.add_edge(a, b, weight=weight) else: output.add_edge(b, a, weight=abs(weight)) if printout is not None: res.append([str(a) + '-' + str(b), weight]) DataFrame(res, columns=['SampleID', 'Predictions']).to_csv( printout, index=False) for node in list(df_data.columns.values): if node not in output.nodes(): output.add_node(node) return output
def orient_undirected_graph(self, data, graph): """Run the algorithm on an undirected graph. Args: data (pandas.DataFrame): DataFrame containing the data graph (networkx.Graph): Skeleton of the graph to orient Returns: networkx.DiGraph: Solution on the given skeleton. """ # Building setup w/ arguments. self.arguments['{VERBOSE}'] = str(self.verbose).upper() self.arguments['{SCORE}'] = self.score self.arguments['{BETA}'] = str(self.beta) self.arguments['{OPTIM}'] = str(self.optim).upper() self.arguments['{ALPHA}'] = str(self.alpha) whitelist = DataFrame(list(nx.edges(graph)), columns=["from", "to"]) blacklist = DataFrame(list(nx.edges(nx.DiGraph(DataFrame(-nx.adj_matrix(graph, weight=None).to_dense() + 1, columns=list(graph.nodes()), index=list(graph.nodes()))))), columns=["from", "to"]) results = self._run_bnlearn(data, whitelist=whitelist, blacklist=blacklist, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
def orient_directed_graph(self, data, graph): """Run the algorithm on a directed_graph. Args: data (pandas.DataFrame): DataFrame containing the data graph (networkx.DiGraph): Skeleton of the graph to orient Returns: networkx.DiGraph: Solution on the given skeleton. .. warning:: The algorithm is ran on the skeleton of the given graph. """ warnings.warn("The algorithm is ran on the skeleton of the given graph.") return self.orient_undirected_graph(data, nx.Graph(graph))
def create_graph_from_data(self, data): """Run the algorithm on data. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the algorithm. """ # Building setup w/ arguments. self.arguments['{SCORE}'] = self.score self.arguments['{VERBOSE}'] = str(self.verbose).upper() self.arguments['{BETA}'] = str(self.beta) self.arguments['{OPTIM}'] = str(self.optim).upper() self.arguments['{ALPHA}'] = str(self.alpha) results = self._run_bnlearn(data, verbose=self.verbose) graph = nx.DiGraph() graph.add_edges_from(results) return graph
def computeGaussKernel(x): """Compute the gaussian kernel on a 1D vector.""" xnorm = np.power(euclidean_distances(x, x), 2) return np.exp(-xnorm / (2.0))
def gmm_cause(points, k=4, p1=2, p2=2): """Init a root cause with a Gaussian Mixture Model w/ a spherical covariance type.""" g = GMM(k, covariance_type="spherical") g.fit(np.random.randn(300, 1)) g.means_ = p1 * np.random.randn(k, 1) g.covars_ = np.power(abs(p2 * np.random.randn(k, 1) + 1), 2) g.weights_ = abs(np.random.rand(k)) g.weights_ = g.weights_ / sum(g.weights_) return g.sample(points)[0].reshape(-1)
def normal_noise(points): """Init a noise variable.""" return np.random.rand(1) * np.random.randn(points, 1) \ + random.sample([2, -2], 1)
def uniform_noise(points): """Init a uniform noise variable.""" return np.random.rand(1) * np.random.uniform(points, 1) \ + random.sample([2, -2], 1)
def mechanism(self, x): """Mechanism function.""" result = np.\ zeros((self.points, 1)) for i in range(self.points): result[i, 0] = self.a * self.b * (x[i] + self.c) / (1 + abs(self.b * (x[i] + self.c))) return result + self.noise
def mechanism(self, causes): """Mechanism function.""" result = np.zeros((self.points, 1)) for i in range(self.points): pre_add_effect = 0 for c in range(causes.shape[1]): pre_add_effect += causes[i, c] pre_add_effect += self.noise[i] result[i, 0] = self.a * self.b * \ (pre_add_effect + self.c)/(1 + abs(self.b*(pre_add_effect + self.c))) return result
def mechanism(self, x, par): """Mechanism function.""" list_coeff = self.polycause[par] result = np.zeros((self.points, 1)) for i in range(self.points): for j in range(self.d+1): result[i, 0] += list_coeff[j]*np.power(x[i], j) result[i, 0] = min(result[i, 0], 1) result[i, 0] = max(result[i, 0], -1) return result
def mechanism(self, x): """Mechanism function.""" self.nb_step += 1 x = np.reshape(x, (x.shape[0], 1)) if(self.nb_step < 5): cov = computeGaussKernel(x) mean = np.zeros((1, self.points))[0, :] y = np.random.multivariate_normal(mean, cov) elif(self.nb_step == 5): cov = computeGaussKernel(x) mean = np.zeros((1, self.points))[0, :] y = np.random.multivariate_normal(mean, cov) self.gpr = GaussianProcessRegressor() self.gpr.fit(x, y) y = self.gpr.predict(x) else: y = self.gpr.predict(x) return y
def mechanism(self, x): """Mechanism function.""" layers = [] layers.append(th.nn.modules.Linear(self.n_causes+1, self.nh)) layers.append(th.nn.Tanh()) layers.append(th.nn.modules.Linear(self.nh, 1)) self.layers = th.nn.Sequential(*layers) data = x.astype('float32') data = th.from_numpy(data) return np.reshape(self.layers(data).data, (x.shape[0],))
def predict_dataset(self, df): """Runs Jarfo independently on all pairs. Args: x (pandas.DataFrame): a CEPC format Dataframe. kwargs (dict): additional arguments for the algorithms Returns: pandas.DataFrame: a Dataframe with the predictions. """ if len(list(df.columns)) == 2: df.columns = ["A", "B"] if self.model is None: raise AssertionError("Model has not been trained before predictions") df2 = DataFrame() for idx, row in df.iterrows(): df2 = df2.append(row, ignore_index=True) df2 = df2.append({'A': row["B"], 'B': row["A"]}, ignore_index=True) return predict.predict(deepcopy(df2), deepcopy(self.model))[::2]
def predict_proba(self, a, b, idx=0, **kwargs): """ Use Jarfo to predict the causal direction of a pair of vars. Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 idx (int): (optional) index number for printing purposes Returns: float: Causation score (Value : 1 if a->b and -1 if b->a) """ return self.predict_dataset(DataFrame([[a, b]], columns=['A', 'B']))
def network_deconvolution(mat, **kwargs): """Python implementation/translation of network deconvolution by MIT-KELLIS LAB. .. note:: code author:gidonro [Github username](https://github.com/gidonro/Network-Deconvolution) LICENSE: MIT-KELLIS LAB AUTHORS: Algorithm was programmed by Soheil Feizi. Paper authors are S. Feizi, D. Marbach, M. M?©dard and M. Kellis Python implementation: Gideon Rosenthal For more details, see the following paper: Network Deconvolution as a General Method to Distinguish Direct Dependencies over Networks By: Soheil Feizi, Daniel Marbach, Muriel Médard and Manolis Kellis Nature Biotechnology Args: mat (numpy.ndarray): matrix, if it is a square matrix, the program assumes it is a relevance matrix where mat(i,j) represents the similarity content between nodes i and j. Elements of matrix should be non-negative. beta (float): Scaling parameter, the program maps the largest absolute eigenvalue of the direct dependency matrix to beta. It should be between 0 and 1. alpha (float): fraction of edges of the observed dependency matrix to be kept in deconvolution process. control (int): if 0, displaying direct weights for observed interactions, if 1, displaying direct weights for both observed and non-observed interactions. Returns: mat_nd (numpy.ndarray): Output deconvolved matrix (direct dependency matrix). Its components represent direct edge weights of observed interactions. Choosing top direct interactions (a cut-off) depends on the application and is not implemented in this code. .. note:: To apply ND on regulatory networks, follow steps explained in Supplementary notes 1.4.1 and 2.1 and 2.3 of the paper. In this implementation, input matrices are made symmetric. """ alpha = kwargs.get('alpha', 1) beta = kwargs.get('beta', 0.99) control = kwargs.get('control', 0) # ToDO : ASSERTS try: assert beta < 1 or beta > 0 assert alpha <= 1 or alpha > 0 except AssertionError: raise ValueError("alpha must be in ]0, 1] and beta in [0, 1]") # Processing the input matrix, diagonal values are filtered np.fill_diagonal(mat, 0) # Thresholding the input matrix y = stat.mquantiles(mat[:], prob=[1 - alpha]) th = mat >= y mat_th = mat * th # Making the matrix symetric if already not mat_th = (mat_th + mat_th.T) / 2 # Eigen decomposition Dv, U = LA.eigh(mat_th) D = np.diag((Dv)) lam_n = np.abs(np.min(np.min(np.diag(D)), 0)) lam_p = np.abs(np.max(np.max(np.diag(D)), 0)) m1 = lam_p * (1 - beta) / beta m2 = lam_n * (1 + beta) / beta m = max(m1, m2) # network deconvolution for i in range(D.shape[0]): D[i, i] = (D[i, i]) / (m + D[i, i]) mat_new1 = np.dot(U, np.dot(D, LA.inv(U))) # Displying direct weights if control == 0: ind_edges = (mat_th > 0) * 1.0 ind_nonedges = (mat_th == 0) * 1.0 m1 = np.max(np.max(mat * ind_nonedges)) m2 = np.min(np.min(mat_new1)) mat_new2 = (mat_new1 + np.max(m1 - m2, 0)) * ind_edges + (mat * ind_nonedges) else: m2 = np.min(np.min(mat_new1)) mat_new2 = (mat_new1 + np.max(-m2, 0)) # linearly mapping the deconvolved matrix to be between 0 and 1 m1 = np.min(np.min(mat_new2)) m2 = np.max(np.max(mat_new2)) mat_nd = (mat_new2 - m1) / (m2 - m1) return mat_nd
def clr(M, **kwargs): """Implementation of the Context Likelihood or Relatedness Network algorithm. Args: mat (numpy.ndarray): matrix, if it is a square matrix, the program assumes it is a relevance matrix where mat(i,j) represents the similarity content between nodes i and j. Elements of matrix should be non-negative. Returns: mat_nd (numpy.ndarray): Output deconvolved matrix (direct dependency matrix). Its components represent direct edge weights of observed interactions. .. note:: Ref:Jeremiah J. Faith, Boris Hayete, Joshua T. Thaden, Ilaria Mogno, Jamey Wierzbowski, Guillaume Cottarel, Simon Kasif, James J. Collins, and Timothy S. Gardner. Large-scale mapping and validation of escherichia coli transcriptional regulation from a compendium of expression profiles. PLoS Biology, 2007 """ R = np.zeros(M.shape) Id = [[0, 0] for i in range(M.shape[0])] for i in range(M.shape[0]): mu_i = np.mean(M[i, :]) sigma_i = np.std(M[i, :]) Id[i] = [mu_i, sigma_i] for i in range(M.shape[0]): for j in range(i + 1, M.shape[0]): z_i = np.max([0, (M[i, j] - Id[i][0]) / Id[i][0]]) z_j = np.max([0, (M[i, j] - Id[j][0]) / Id[j][0]]) R[i, j] = np.sqrt(z_i**2 + z_j**2) R[j, i] = R[i, j] # Symmetric return R
def aracne(m, **kwargs): """Implementation of the ARACNE algorithm. Args: mat (numpy.ndarray): matrix, if it is a square matrix, the program assumes it is a relevance matrix where mat(i,j) represents the similarity content between nodes i and j. Elements of matrix should be non-negative. Returns: mat_nd (numpy.ndarray): Output deconvolved matrix (direct dependency matrix). Its components represent direct edge weights of observed interactions. .. note:: Ref: ARACNE: An Algorithm for the Reconstruction of Gene Regulatory Networks in a Mammalian Cellular Context Adam A Margolin, Ilya Nemenman, Katia Basso, Chris Wiggins, Gustavo Stolovitzky, Riccardo Dalla Favera and Andrea Califano DOI: https://doi.org/10.1186/1471-2105-7-S1-S7 """ I0 = kwargs.get('I0', 0.0) # No default thresholding W0 = kwargs.get('W0', 0.05) # thresholding m = np.where(m > I0, m, 0) # Finding triplets and filtering them for i in range(m.shape[0]-2): for j in range(i+1, m.shape[0]-1): for k in range(j+1, m.shape[0]): triplet = [m[i, j], m[j, k], m[i, k]] min_index, min_value = min(enumerate(triplet), key=operator.itemgetter(1)) if 0 < min_value < W0: if min_index == 0: m[i, j] = m[j, i] = 0. elif min_index == 1: m[j, k] = m[k, j] = 0. else: m[i, k] = m[k, i] = 0. return m
def remove_indirect_links(g, alg="aracne", **kwargs): """Apply deconvolution to a networkx graph. Args: g (networkx.Graph): Graph to apply deconvolution to alg (str): Algorithm to use ('aracne', 'clr', 'nd') kwargs (dict): extra options for algorithms Returns: networkx.Graph: graph with undirected links removed. """ alg = {"aracne": aracne, "nd": network_deconvolution, "clr": clr}[alg] mat = np.array(nx.adjacency_matrix(g).todense()) return nx.relabel_nodes(nx.DiGraph(alg(mat, **kwargs)), {idx: i for idx, i in enumerate(list(g.nodes()))})