partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
dagify_min_edge
Input a graph and output a DAG. The heuristic is to reverse the edge with the lowest score of the cycle if possible, else remove it. Args: g (networkx.DiGraph): Graph to modify to output a DAG Returns: networkx.DiGraph: DAG made out of the input graph.
cdt/utils/graph.py
def dagify_min_edge(g): """Input a graph and output a DAG. The heuristic is to reverse the edge with the lowest score of the cycle if possible, else remove it. Args: g (networkx.DiGraph): Graph to modify to output a DAG Returns: networkx.DiGraph: DAG made out of the input graph. """ while not nx.is_directed_acyclic_graph(g): cycle = next(nx.simple_cycles(g)) scores = [] edges = [] for i, j in zip(cycle[:1], cycle[:1]): edges.append((i, j)) scores.append(g[i][j]['weight']) i, j = edges[scores.index(min(scores))] gc = deepcopy(g) gc.remove_edge(i, j) gc.add_edge(j, i) if len(list(nx.simple_cycles(gc))) < len(list(nx.simple_cycles(g))): g.add_edge(j, i, weight=min(scores)) g.remove_edge(i, j) return g
def dagify_min_edge(g): """Input a graph and output a DAG. The heuristic is to reverse the edge with the lowest score of the cycle if possible, else remove it. Args: g (networkx.DiGraph): Graph to modify to output a DAG Returns: networkx.DiGraph: DAG made out of the input graph. """ while not nx.is_directed_acyclic_graph(g): cycle = next(nx.simple_cycles(g)) scores = [] edges = [] for i, j in zip(cycle[:1], cycle[:1]): edges.append((i, j)) scores.append(g[i][j]['weight']) i, j = edges[scores.index(min(scores))] gc = deepcopy(g) gc.remove_edge(i, j) gc.add_edge(j, i) if len(list(nx.simple_cycles(gc))) < len(list(nx.simple_cycles(g))): g.add_edge(j, i, weight=min(scores)) g.remove_edge(i, j) return g
[ "Input", "a", "graph", "and", "output", "a", "DAG", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/utils/graph.py#L235-L263
[ "def", "dagify_min_edge", "(", "g", ")", ":", "while", "not", "nx", ".", "is_directed_acyclic_graph", "(", "g", ")", ":", "cycle", "=", "next", "(", "nx", ".", "simple_cycles", "(", "g", ")", ")", "scores", "=", "[", "]", "edges", "=", "[", "]", "f...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
weighted_mean_and_std
Returns the weighted average and standard deviation. values, weights -- numpy ndarrays with the same shape.
cdt/causality/pairwise/Jarfo_model/features.py
def weighted_mean_and_std(values, weights): """ Returns the weighted average and standard deviation. values, weights -- numpy ndarrays with the same shape. """ average = np.average(values, weights=weights, axis=0) variance = np.dot(weights, (values - average) ** 2) / weights.sum() # Fast and numerically precise return (average, np.sqrt(variance))
def weighted_mean_and_std(values, weights): """ Returns the weighted average and standard deviation. values, weights -- numpy ndarrays with the same shape. """ average = np.average(values, weights=weights, axis=0) variance = np.dot(weights, (values - average) ** 2) / weights.sum() # Fast and numerically precise return (average, np.sqrt(variance))
[ "Returns", "the", "weighted", "average", "and", "standard", "deviation", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/causality/pairwise/Jarfo_model/features.py#L43-L51
[ "def", "weighted_mean_and_std", "(", "values", ",", "weights", ")", ":", "average", "=", "np", ".", "average", "(", "values", ",", "weights", "=", "weights", ",", "axis", "=", "0", ")", "variance", "=", "np", ".", "dot", "(", "weights", ",", "(", "va...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
GNN_instance
Run an instance of GNN, testing causal direction. :param m: data corresponding to the config : (N, 2) data, [:, 0] cause and [:, 1] effect :param pair_idx: print purposes :param run: numner of the run (for GPU dispatch) :param device: device on with the algorithm is going to be run on. :return:
cdt/causality/pairwise/GNN.py
def GNN_instance(x, idx=0, device=None, nh=20, **kwargs): """Run an instance of GNN, testing causal direction. :param m: data corresponding to the config : (N, 2) data, [:, 0] cause and [:, 1] effect :param pair_idx: print purposes :param run: numner of the run (for GPU dispatch) :param device: device on with the algorithm is going to be run on. :return: """ device = SETTINGS.get_default(device=device) xy = scale(x).astype('float32') inputx = th.FloatTensor(xy[:, [0]]).to(device) target = th.FloatTensor(xy[:, [1]]).to(device) GNNXY = GNN_model(x.shape[0], device=device, nh=nh).to(device) GNNYX = GNN_model(x.shape[0], device=device, nh=nh).to(device) GNNXY.reset_parameters() GNNYX.reset_parameters() XY = GNNXY.run(inputx, target, **kwargs) YX = GNNYX.run(target, inputx, **kwargs) return [XY, YX]
def GNN_instance(x, idx=0, device=None, nh=20, **kwargs): """Run an instance of GNN, testing causal direction. :param m: data corresponding to the config : (N, 2) data, [:, 0] cause and [:, 1] effect :param pair_idx: print purposes :param run: numner of the run (for GPU dispatch) :param device: device on with the algorithm is going to be run on. :return: """ device = SETTINGS.get_default(device=device) xy = scale(x).astype('float32') inputx = th.FloatTensor(xy[:, [0]]).to(device) target = th.FloatTensor(xy[:, [1]]).to(device) GNNXY = GNN_model(x.shape[0], device=device, nh=nh).to(device) GNNYX = GNN_model(x.shape[0], device=device, nh=nh).to(device) GNNXY.reset_parameters() GNNYX.reset_parameters() XY = GNNXY.run(inputx, target, **kwargs) YX = GNNYX.run(target, inputx, **kwargs) return [XY, YX]
[ "Run", "an", "instance", "of", "GNN", "testing", "causal", "direction", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/causality/pairwise/GNN.py#L105-L125
[ "def", "GNN_instance", "(", "x", ",", "idx", "=", "0", ",", "device", "=", "None", ",", "nh", "=", "20", ",", "*", "*", "kwargs", ")", ":", "device", "=", "SETTINGS", ".", "get_default", "(", "device", "=", "device", ")", "xy", "=", "scale", "(",...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
GNN_model.forward
Pass data through the net structure. :param x: input data: shape (:,1) :type x: torch.Variable :return: output of the shallow net :rtype: torch.Variable
cdt/causality/pairwise/GNN.py
def forward(self, x): """Pass data through the net structure. :param x: input data: shape (:,1) :type x: torch.Variable :return: output of the shallow net :rtype: torch.Variable """ self.noise.normal_() return self.layers(th.cat([x, self.noise], 1))
def forward(self, x): """Pass data through the net structure. :param x: input data: shape (:,1) :type x: torch.Variable :return: output of the shallow net :rtype: torch.Variable """ self.noise.normal_() return self.layers(th.cat([x, self.noise], 1))
[ "Pass", "data", "through", "the", "net", "structure", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/causality/pairwise/GNN.py#L60-L70
[ "def", "forward", "(", "self", ",", "x", ")", ":", "self", ".", "noise", ".", "normal_", "(", ")", "return", "self", ".", "layers", "(", "th", ".", "cat", "(", "[", "x", ",", "self", ".", "noise", "]", ",", "1", ")", ")" ]
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
GNN_model.run
Run the GNN on a pair x,y of FloatTensor data.
cdt/causality/pairwise/GNN.py
def run(self, x, y, lr=0.01, train_epochs=1000, test_epochs=1000, idx=0, verbose=None, **kwargs): """Run the GNN on a pair x,y of FloatTensor data.""" verbose = SETTINGS.get_default(verbose=verbose) optim = th.optim.Adam(self.parameters(), lr=lr) running_loss = 0 teloss = 0 for i in range(train_epochs + test_epochs): optim.zero_grad() pred = self.forward(x) loss = self.criterion(pred, y) running_loss += loss.item() if i < train_epochs: loss.backward() optim.step() else: teloss += running_loss # print statistics if verbose and not i % 300: print('Idx:{}; epoch:{}; score:{}'. format(idx, i, running_loss/300)) running_loss = 0.0 return teloss / test_epochs
def run(self, x, y, lr=0.01, train_epochs=1000, test_epochs=1000, idx=0, verbose=None, **kwargs): """Run the GNN on a pair x,y of FloatTensor data.""" verbose = SETTINGS.get_default(verbose=verbose) optim = th.optim.Adam(self.parameters(), lr=lr) running_loss = 0 teloss = 0 for i in range(train_epochs + test_epochs): optim.zero_grad() pred = self.forward(x) loss = self.criterion(pred, y) running_loss += loss.item() if i < train_epochs: loss.backward() optim.step() else: teloss += running_loss # print statistics if verbose and not i % 300: print('Idx:{}; epoch:{}; score:{}'. format(idx, i, running_loss/300)) running_loss = 0.0 return teloss / test_epochs
[ "Run", "the", "GNN", "on", "a", "pair", "x", "y", "of", "FloatTensor", "data", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/causality/pairwise/GNN.py#L72-L97
[ "def", "run", "(", "self", ",", "x", ",", "y", ",", "lr", "=", "0.01", ",", "train_epochs", "=", "1000", ",", "test_epochs", "=", "1000", ",", "idx", "=", "0", ",", "verbose", "=", "None", ",", "*", "*", "kwargs", ")", ":", "verbose", "=", "SET...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
GNN.predict_proba
Run multiple times GNN to estimate the causal direction. Args: a (np.ndarray): Variable 1 b (np.ndarray): Variable 2 nb_runs (int): number of runs to execute per batch (before testing for significance with t-test). nb_jobs (int): number of runs to execute in parallel. (Initialized with ``cdt.SETTINGS.NB_JOBS``) gpu (bool): use gpu (Initialized with ``cdt.SETTINGS.GPU``) idx (int): (optional) index of the pair, for printing purposes verbose (bool): verbosity (Initialized with ``cdt.SETTINGS.verbose``) ttest_threshold (float): threshold to stop the boostraps before ``nb_max_runs`` if the difference is significant nb_max_runs (int): Max number of bootstraps train_epochs (int): Number of epochs during which the model is going to be trained test_epochs (int): Number of epochs during which the model is going to be tested Returns: float: Causal score of the pair (Value : 1 if a->b and -1 if b->a)
cdt/causality/pairwise/GNN.py
def predict_proba(self, a, b, nb_runs=6, nb_jobs=None, gpu=None, idx=0, verbose=None, ttest_threshold=0.01, nb_max_runs=16, train_epochs=1000, test_epochs=1000): """Run multiple times GNN to estimate the causal direction. Args: a (np.ndarray): Variable 1 b (np.ndarray): Variable 2 nb_runs (int): number of runs to execute per batch (before testing for significance with t-test). nb_jobs (int): number of runs to execute in parallel. (Initialized with ``cdt.SETTINGS.NB_JOBS``) gpu (bool): use gpu (Initialized with ``cdt.SETTINGS.GPU``) idx (int): (optional) index of the pair, for printing purposes verbose (bool): verbosity (Initialized with ``cdt.SETTINGS.verbose``) ttest_threshold (float): threshold to stop the boostraps before ``nb_max_runs`` if the difference is significant nb_max_runs (int): Max number of bootstraps train_epochs (int): Number of epochs during which the model is going to be trained test_epochs (int): Number of epochs during which the model is going to be tested Returns: float: Causal score of the pair (Value : 1 if a->b and -1 if b->a) """ Nb_jobs, verbose, gpu = SETTINGS.get_default(('nb_jobs', nb_jobs), ('verbose', verbose), ('gpu', gpu)) x = np.stack([a.ravel(), b.ravel()], 1) ttest_criterion = TTestCriterion( max_iter=nb_max_runs, runs_per_iter=nb_runs, threshold=ttest_threshold) AB = [] BA = [] while ttest_criterion.loop(AB, BA): if nb_jobs != 1: result_pair = Parallel(n_jobs=nb_jobs)(delayed(GNN_instance)( x, idx=idx, device='cuda:{}'.format(run % gpu) if gpu else 'cpu', verbose=verbose, train_epochs=train_epochs, test_epochs=test_epochs) for run in range(ttest_criterion.iter, ttest_criterion.iter + nb_runs)) else: result_pair = [GNN_instance(x, idx=idx, device='cuda:0' if gpu else 'cpu', verbose=verbose, train_epochs=train_epochs, test_epochs=test_epochs) for run in range(ttest_criterion.iter, ttest_criterion.iter + nb_runs)] AB.extend([runpair[0] for runpair in result_pair]) BA.extend([runpair[1] for runpair in result_pair]) if verbose: print("P-value after {} runs : {}".format(ttest_criterion.iter, ttest_criterion.p_value)) score_AB = np.mean(AB) score_BA = np.mean(BA) return (score_BA - score_AB) / (score_BA + score_AB)
def predict_proba(self, a, b, nb_runs=6, nb_jobs=None, gpu=None, idx=0, verbose=None, ttest_threshold=0.01, nb_max_runs=16, train_epochs=1000, test_epochs=1000): """Run multiple times GNN to estimate the causal direction. Args: a (np.ndarray): Variable 1 b (np.ndarray): Variable 2 nb_runs (int): number of runs to execute per batch (before testing for significance with t-test). nb_jobs (int): number of runs to execute in parallel. (Initialized with ``cdt.SETTINGS.NB_JOBS``) gpu (bool): use gpu (Initialized with ``cdt.SETTINGS.GPU``) idx (int): (optional) index of the pair, for printing purposes verbose (bool): verbosity (Initialized with ``cdt.SETTINGS.verbose``) ttest_threshold (float): threshold to stop the boostraps before ``nb_max_runs`` if the difference is significant nb_max_runs (int): Max number of bootstraps train_epochs (int): Number of epochs during which the model is going to be trained test_epochs (int): Number of epochs during which the model is going to be tested Returns: float: Causal score of the pair (Value : 1 if a->b and -1 if b->a) """ Nb_jobs, verbose, gpu = SETTINGS.get_default(('nb_jobs', nb_jobs), ('verbose', verbose), ('gpu', gpu)) x = np.stack([a.ravel(), b.ravel()], 1) ttest_criterion = TTestCriterion( max_iter=nb_max_runs, runs_per_iter=nb_runs, threshold=ttest_threshold) AB = [] BA = [] while ttest_criterion.loop(AB, BA): if nb_jobs != 1: result_pair = Parallel(n_jobs=nb_jobs)(delayed(GNN_instance)( x, idx=idx, device='cuda:{}'.format(run % gpu) if gpu else 'cpu', verbose=verbose, train_epochs=train_epochs, test_epochs=test_epochs) for run in range(ttest_criterion.iter, ttest_criterion.iter + nb_runs)) else: result_pair = [GNN_instance(x, idx=idx, device='cuda:0' if gpu else 'cpu', verbose=verbose, train_epochs=train_epochs, test_epochs=test_epochs) for run in range(ttest_criterion.iter, ttest_criterion.iter + nb_runs)] AB.extend([runpair[0] for runpair in result_pair]) BA.extend([runpair[1] for runpair in result_pair]) if verbose: print("P-value after {} runs : {}".format(ttest_criterion.iter, ttest_criterion.p_value)) score_AB = np.mean(AB) score_BA = np.mean(BA) return (score_BA - score_AB) / (score_BA + score_AB)
[ "Run", "multiple", "times", "GNN", "to", "estimate", "the", "causal", "direction", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/causality/pairwise/GNN.py#L151-L202
[ "def", "predict_proba", "(", "self", ",", "a", ",", "b", ",", "nb_runs", "=", "6", ",", "nb_jobs", "=", "None", ",", "gpu", "=", "None", ",", "idx", "=", "0", ",", "verbose", "=", "None", ",", "ttest_threshold", "=", "0.01", ",", "nb_max_runs", "="...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
CyclicGraphGenerator.init_variables
Redefine the causes of the graph.
cdt/generators/cyclic_graph_generator.py
def init_variables(self, verbose=False): """Redefine the causes of the graph.""" # Resetting adjacency matrix for i in range(self.nodes): for j in np.random.choice(range(self.nodes), np.random.randint( 0, self.parents_max + 1), replace=False): if i != j: self.adjacency_matrix[j, i] = 1 try: assert any([sum(self.adjacency_matrix[:, i]) == self.parents_max for i in range(self.nodes)]) self.g = nx.DiGraph(self.adjacency_matrix) assert list(nx.simple_cycles(self.g)) assert any(len(i) == 2 for i in nx.simple_cycles(self.g)) except AssertionError: if verbose: print("Regenerating, graph non valid...") self.init_variables() if verbose: print("Matrix generated ! \ Number of cycles: {}".format(len(list(nx.simple_cycles(self.g))))) for i in range(self.nodes): self.data.iloc[:, i] = scale(self.initial_generator(self.points)) # Mechanisms self.cfunctions = [self.mechanism(int(sum(self.adjacency_matrix[:, i])), self.points, self.noise, noise_coeff=self.noise_coeff) for i in range(self.nodes)]
def init_variables(self, verbose=False): """Redefine the causes of the graph.""" # Resetting adjacency matrix for i in range(self.nodes): for j in np.random.choice(range(self.nodes), np.random.randint( 0, self.parents_max + 1), replace=False): if i != j: self.adjacency_matrix[j, i] = 1 try: assert any([sum(self.adjacency_matrix[:, i]) == self.parents_max for i in range(self.nodes)]) self.g = nx.DiGraph(self.adjacency_matrix) assert list(nx.simple_cycles(self.g)) assert any(len(i) == 2 for i in nx.simple_cycles(self.g)) except AssertionError: if verbose: print("Regenerating, graph non valid...") self.init_variables() if verbose: print("Matrix generated ! \ Number of cycles: {}".format(len(list(nx.simple_cycles(self.g))))) for i in range(self.nodes): self.data.iloc[:, i] = scale(self.initial_generator(self.points)) # Mechanisms self.cfunctions = [self.mechanism(int(sum(self.adjacency_matrix[:, i])), self.points, self.noise, noise_coeff=self.noise_coeff) for i in range(self.nodes)]
[ "Redefine", "the", "causes", "of", "the", "graph", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/generators/cyclic_graph_generator.py#L78-L110
[ "def", "init_variables", "(", "self", ",", "verbose", "=", "False", ")", ":", "# Resetting adjacency matrix", "for", "i", "in", "range", "(", "self", ".", "nodes", ")", ":", "for", "j", "in", "np", ".", "random", ".", "choice", "(", "range", "(", "self...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
CyclicGraphGenerator.generate
Generate data from an FCM containing cycles.
cdt/generators/cyclic_graph_generator.py
def generate(self, nb_steps=100, averaging=50, rescale=True): """Generate data from an FCM containing cycles.""" if self.cfunctions is None: self.init_variables() new_df = pd.DataFrame() causes = [[c for c in np.nonzero(self.adjacency_matrix[:, j])[0]] for j in range(self.nodes)] values = [[] for i in range(self.nodes)] for i in range(nb_steps): for j in range(self.nodes): new_df["V" + str(j)] = self.cfunctions[j](self.data.iloc[:, causes[j]].values)[:, 0] if rescale: new_df["V" + str(j)] = scale(new_df["V" + str(j)]) if i > nb_steps-averaging: values[j].append(new_df["V" + str(j)]) self.data = new_df self.data = pd.DataFrame(np.array([np.mean(values[i], axis=0) for i in range(self.nodes)]).transpose(), columns=["V{}".format(j) for j in range(self.nodes)]) return self.g, self.data
def generate(self, nb_steps=100, averaging=50, rescale=True): """Generate data from an FCM containing cycles.""" if self.cfunctions is None: self.init_variables() new_df = pd.DataFrame() causes = [[c for c in np.nonzero(self.adjacency_matrix[:, j])[0]] for j in range(self.nodes)] values = [[] for i in range(self.nodes)] for i in range(nb_steps): for j in range(self.nodes): new_df["V" + str(j)] = self.cfunctions[j](self.data.iloc[:, causes[j]].values)[:, 0] if rescale: new_df["V" + str(j)] = scale(new_df["V" + str(j)]) if i > nb_steps-averaging: values[j].append(new_df["V" + str(j)]) self.data = new_df self.data = pd.DataFrame(np.array([np.mean(values[i], axis=0) for i in range(self.nodes)]).transpose(), columns=["V{}".format(j) for j in range(self.nodes)]) return self.g, self.data
[ "Generate", "data", "from", "an", "FCM", "containing", "cycles", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/generators/cyclic_graph_generator.py#L112-L133
[ "def", "generate", "(", "self", ",", "nb_steps", "=", "100", ",", "averaging", "=", "50", ",", "rescale", "=", "True", ")", ":", "if", "self", ".", "cfunctions", "is", "None", ":", "self", ".", "init_variables", "(", ")", "new_df", "=", "pd", ".", ...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
CAM.create_graph_from_data
Apply causal discovery on observational data using CAM. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the CAM algorithm.
cdt/causality/graph/CAM.py
def create_graph_from_data(self, data, **kwargs): """Apply causal discovery on observational data using CAM. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the CAM algorithm. """ # Building setup w/ arguments. self.arguments['{SCORE}'] = self.scores[self.score] self.arguments['{CUTOFF}'] = str(self.cutoff) self.arguments['{VARSEL}'] = str(self.variablesel).upper() self.arguments['{SELMETHOD}'] = self.var_selection[self.selmethod] self.arguments['{PRUNING}'] = str(self.pruning).upper() self.arguments['{PRUNMETHOD}'] = self.var_selection[self.prunmethod] self.arguments['{NJOBS}'] = str(self.nb_jobs) self.arguments['{VERBOSE}'] = str(self.verbose).upper() results = self._run_cam(data, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
def create_graph_from_data(self, data, **kwargs): """Apply causal discovery on observational data using CAM. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the CAM algorithm. """ # Building setup w/ arguments. self.arguments['{SCORE}'] = self.scores[self.score] self.arguments['{CUTOFF}'] = str(self.cutoff) self.arguments['{VARSEL}'] = str(self.variablesel).upper() self.arguments['{SELMETHOD}'] = self.var_selection[self.selmethod] self.arguments['{PRUNING}'] = str(self.pruning).upper() self.arguments['{PRUNMETHOD}'] = self.var_selection[self.prunmethod] self.arguments['{NJOBS}'] = str(self.nb_jobs) self.arguments['{VERBOSE}'] = str(self.verbose).upper() results = self._run_cam(data, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
[ "Apply", "causal", "discovery", "on", "observational", "data", "using", "CAM", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/causality/graph/CAM.py#L139-L160
[ "def", "create_graph_from_data", "(", "self", ",", "data", ",", "*", "*", "kwargs", ")", ":", "# Building setup w/ arguments.", "self", ".", "arguments", "[", "'{SCORE}'", "]", "=", "self", ".", "scores", "[", "self", ".", "score", "]", "self", ".", "argum...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
RFECVLinearSVR.predict_features
For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target
cdt/independence/graph/FSRegression.py
def predict_features(self, df_features, df_target, idx=0, **kwargs): """For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target """ estimator = SVR(kernel='linear') selector = RFECV(estimator, step=1) selector = selector.fit(df_features.values, df_target.values[:, 0]) return selector.grid_scores_
def predict_features(self, df_features, df_target, idx=0, **kwargs): """For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target """ estimator = SVR(kernel='linear') selector = RFECV(estimator, step=1) selector = selector.fit(df_features.values, df_target.values[:, 0]) return selector.grid_scores_
[ "For", "one", "variable", "predict", "its", "neighbouring", "nodes", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/independence/graph/FSRegression.py#L51-L67
[ "def", "predict_features", "(", "self", ",", "df_features", ",", "df_target", ",", "idx", "=", "0", ",", "*", "*", "kwargs", ")", ":", "estimator", "=", "SVR", "(", "kernel", "=", "'linear'", ")", "selector", "=", "RFECV", "(", "estimator", ",", "step"...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
LinearSVRL2.predict_features
For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms C (float): Penalty parameter of the error term Returns: list: scores of each feature relatively to the target
cdt/independence/graph/FSRegression.py
def predict_features(self, df_features, df_target, idx=0, C=.1, **kwargs): """For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms C (float): Penalty parameter of the error term Returns: list: scores of each feature relatively to the target """ lsvc = LinearSVR(C=C).fit(df_features.values, df_target.values) return np.abs(lsvc.coef_)
def predict_features(self, df_features, df_target, idx=0, C=.1, **kwargs): """For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms C (float): Penalty parameter of the error term Returns: list: scores of each feature relatively to the target """ lsvc = LinearSVR(C=C).fit(df_features.values, df_target.values) return np.abs(lsvc.coef_)
[ "For", "one", "variable", "predict", "its", "neighbouring", "nodes", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/independence/graph/FSRegression.py#L76-L91
[ "def", "predict_features", "(", "self", ",", "df_features", ",", "df_target", ",", "idx", "=", "0", ",", "C", "=", ".1", ",", "*", "*", "kwargs", ")", ":", "lsvc", "=", "LinearSVR", "(", "C", "=", "C", ")", ".", "fit", "(", "df_features", ".", "v...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
DecisionTreeRegression.predict_features
For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target
cdt/independence/graph/FSRegression.py
def predict_features(self, df_features, df_target, idx=0, **kwargs): """For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target """ X = df_features.values y = df_target.values regressor = DecisionTreeRegressor() regressor.fit(X, y) return regressor.feature_importances_
def predict_features(self, df_features, df_target, idx=0, **kwargs): """For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target """ X = df_features.values y = df_target.values regressor = DecisionTreeRegressor() regressor.fit(X, y) return regressor.feature_importances_
[ "For", "one", "variable", "predict", "its", "neighbouring", "nodes", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/independence/graph/FSRegression.py#L100-L117
[ "def", "predict_features", "(", "self", ",", "df_features", ",", "df_target", ",", "idx", "=", "0", ",", "*", "*", "kwargs", ")", ":", "X", "=", "df_features", ".", "values", "y", "=", "df_target", ".", "values", "regressor", "=", "DecisionTreeRegressor", ...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
ARD.predict_features
For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target
cdt/independence/graph/FSRegression.py
def predict_features(self, df_features, df_target, idx=0, **kwargs): """For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target """ X = df_features.values y = df_target.values clf = ard(compute_score=True) clf.fit(X, y.ravel()) return np.abs(clf.coef_)
def predict_features(self, df_features, df_target, idx=0, **kwargs): """For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target """ X = df_features.values y = df_target.values clf = ard(compute_score=True) clf.fit(X, y.ravel()) return np.abs(clf.coef_)
[ "For", "one", "variable", "predict", "its", "neighbouring", "nodes", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/independence/graph/FSRegression.py#L125-L142
[ "def", "predict_features", "(", "self", ",", "df_features", ",", "df_target", ",", "idx", "=", "0", ",", "*", "*", "kwargs", ")", ":", "X", "=", "df_features", ".", "values", "y", "=", "df_target", ".", "values", "clf", "=", "ard", "(", "compute_score"...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
RRelief.predict_features
For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target
cdt/independence/graph/FSRegression.py
def predict_features(self, df_features, df_target, idx=0, **kwargs): """For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target """ X = df_features.values y = df_target.values[:, 0] rr = ReliefF() rr.fit(X, y) return rr.feature_importances_
def predict_features(self, df_features, df_target, idx=0, **kwargs): """For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target """ X = df_features.values y = df_target.values[:, 0] rr = ReliefF() rr.fit(X, y) return rr.feature_importances_
[ "For", "one", "variable", "predict", "its", "neighbouring", "nodes", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/independence/graph/FSRegression.py#L150-L167
[ "def", "predict_features", "(", "self", ",", "df_features", ",", "df_target", ",", "idx", "=", "0", ",", "*", "*", "kwargs", ")", ":", "X", "=", "df_features", ".", "values", "y", "=", "df_target", ".", "values", "[", ":", ",", "0", "]", "rr", "=",...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
NCC_model.forward
Passing data through the network. :param x: 2d tensor containing both (x,y) Variables :return: output of the net
cdt/causality/pairwise/NCC.py
def forward(self, x): """Passing data through the network. :param x: 2d tensor containing both (x,y) Variables :return: output of the net """ features = self.conv(x).mean(dim=2) return self.dense(features)
def forward(self, x): """Passing data through the network. :param x: 2d tensor containing both (x,y) Variables :return: output of the net """ features = self.conv(x).mean(dim=2) return self.dense(features)
[ "Passing", "data", "through", "the", "network", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/causality/pairwise/NCC.py#L82-L90
[ "def", "forward", "(", "self", ",", "x", ")", ":", "features", "=", "self", ".", "conv", "(", "x", ")", ".", "mean", "(", "dim", "=", "2", ")", "return", "self", ".", "dense", "(", "features", ")" ]
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
NCC.fit
Fit the NCC model. Args: x_tr (pd.DataFrame): CEPC format dataframe containing the pairs y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs epochs (int): number of train epochs learning_rate (float): learning rate of Adam verbose (bool): verbosity (defaults to ``cdt.SETTINGS.verbose``) device (str): cuda or cpu device (defaults to ``cdt.SETTINGS.default_device``)
cdt/causality/pairwise/NCC.py
def fit(self, x_tr, y_tr, epochs=50, batchsize=32, learning_rate=0.01, verbose=None, device=None): """Fit the NCC model. Args: x_tr (pd.DataFrame): CEPC format dataframe containing the pairs y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs epochs (int): number of train epochs learning_rate (float): learning rate of Adam verbose (bool): verbosity (defaults to ``cdt.SETTINGS.verbose``) device (str): cuda or cpu device (defaults to ``cdt.SETTINGS.default_device``) """ if batchsize > len(x_tr): batchsize = len(x_tr) verbose, device = SETTINGS.get_default(('verbose', verbose), ('device', device)) self.model = NCC_model() opt = th.optim.Adam(self.model.parameters(), lr=learning_rate) criterion = th.nn.BCEWithLogitsLoss() y = y_tr.values if isinstance(y_tr, pd.DataFrame) else y_tr y = th.Tensor(y)/2 + .5 # print(y) self.model = self.model.to(device) y = y.to(device) dataset = [] for i, (idx, row) in enumerate(x_tr.iterrows()): a = row['A'].reshape((len(row['A']), 1)) b = row['B'].reshape((len(row['B']), 1)) m = np.hstack((a, b)) m = m.astype('float32') m = th.from_numpy(m).t().unsqueeze(0) dataset.append(m) dataset = [m.to(device) for m in dataset] acc = [0] da = th.utils.data.DataLoader(Dataset(dataset, y), batch_size=batchsize, shuffle=True) data_per_epoch = (len(dataset) // batchsize) with trange(epochs, desc="Epochs", disable=not verbose) as te: for epoch in te: with trange(data_per_epoch, desc="Batches of {}".format(batchsize), disable=not (verbose and batchsize == len(dataset))) as t: output = [] labels = [] for (batch, label), i in zip(da, t): opt.zero_grad() # print(batch.shape, labels.shape) out = th.stack([self.model(m) for m in batch], 0).squeeze(2) loss = criterion(out, label) loss.backward() t.set_postfix(loss=loss.item()) opt.step() output.append(out) labels.append(label) acc = th.where(th.cat(output, 0) > .5, th.ones(len(output)), th.zeros(len(output))) - th.cat(labels, 0) te.set_postfix(Acc=1-acc.abs().mean().item())
def fit(self, x_tr, y_tr, epochs=50, batchsize=32, learning_rate=0.01, verbose=None, device=None): """Fit the NCC model. Args: x_tr (pd.DataFrame): CEPC format dataframe containing the pairs y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs epochs (int): number of train epochs learning_rate (float): learning rate of Adam verbose (bool): verbosity (defaults to ``cdt.SETTINGS.verbose``) device (str): cuda or cpu device (defaults to ``cdt.SETTINGS.default_device``) """ if batchsize > len(x_tr): batchsize = len(x_tr) verbose, device = SETTINGS.get_default(('verbose', verbose), ('device', device)) self.model = NCC_model() opt = th.optim.Adam(self.model.parameters(), lr=learning_rate) criterion = th.nn.BCEWithLogitsLoss() y = y_tr.values if isinstance(y_tr, pd.DataFrame) else y_tr y = th.Tensor(y)/2 + .5 # print(y) self.model = self.model.to(device) y = y.to(device) dataset = [] for i, (idx, row) in enumerate(x_tr.iterrows()): a = row['A'].reshape((len(row['A']), 1)) b = row['B'].reshape((len(row['B']), 1)) m = np.hstack((a, b)) m = m.astype('float32') m = th.from_numpy(m).t().unsqueeze(0) dataset.append(m) dataset = [m.to(device) for m in dataset] acc = [0] da = th.utils.data.DataLoader(Dataset(dataset, y), batch_size=batchsize, shuffle=True) data_per_epoch = (len(dataset) // batchsize) with trange(epochs, desc="Epochs", disable=not verbose) as te: for epoch in te: with trange(data_per_epoch, desc="Batches of {}".format(batchsize), disable=not (verbose and batchsize == len(dataset))) as t: output = [] labels = [] for (batch, label), i in zip(da, t): opt.zero_grad() # print(batch.shape, labels.shape) out = th.stack([self.model(m) for m in batch], 0).squeeze(2) loss = criterion(out, label) loss.backward() t.set_postfix(loss=loss.item()) opt.step() output.append(out) labels.append(label) acc = th.where(th.cat(output, 0) > .5, th.ones(len(output)), th.zeros(len(output))) - th.cat(labels, 0) te.set_postfix(Acc=1-acc.abs().mean().item())
[ "Fit", "the", "NCC", "model", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/causality/pairwise/NCC.py#L108-L165
[ "def", "fit", "(", "self", ",", "x_tr", ",", "y_tr", ",", "epochs", "=", "50", ",", "batchsize", "=", "32", ",", "learning_rate", "=", "0.01", ",", "verbose", "=", "None", ",", "device", "=", "None", ")", ":", "if", "batchsize", ">", "len", "(", ...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
NCC.predict_proba
Infer causal directions using the trained NCC pairwise model. Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 device (str): Device to run the algorithm on (defaults to ``cdt.SETTINGS.default_device``) Returns: float: Causation score (Value : 1 if a->b and -1 if b->a)
cdt/causality/pairwise/NCC.py
def predict_proba(self, a, b, device=None): """Infer causal directions using the trained NCC pairwise model. Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 device (str): Device to run the algorithm on (defaults to ``cdt.SETTINGS.default_device``) Returns: float: Causation score (Value : 1 if a->b and -1 if b->a) """ device = SETTINGS.get_default(device=device) if self.model is None: print('Model has to be trained before doing any predictions') raise ValueError if len(np.array(a).shape) == 1: a = np.array(a).reshape((-1, 1)) b = np.array(b).reshape((-1, 1)) m = np.hstack((a, b)) m = scale(m) m = m.astype('float32') m = th.from_numpy(m).t().unsqueeze(0) if th.cuda.is_available(): m = m.cuda() return (self.model(m).data.cpu().numpy()-.5) * 2
def predict_proba(self, a, b, device=None): """Infer causal directions using the trained NCC pairwise model. Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 device (str): Device to run the algorithm on (defaults to ``cdt.SETTINGS.default_device``) Returns: float: Causation score (Value : 1 if a->b and -1 if b->a) """ device = SETTINGS.get_default(device=device) if self.model is None: print('Model has to be trained before doing any predictions') raise ValueError if len(np.array(a).shape) == 1: a = np.array(a).reshape((-1, 1)) b = np.array(b).reshape((-1, 1)) m = np.hstack((a, b)) m = scale(m) m = m.astype('float32') m = th.from_numpy(m).t().unsqueeze(0) if th.cuda.is_available(): m = m.cuda() return (self.model(m).data.cpu().numpy()-.5) * 2
[ "Infer", "causal", "directions", "using", "the", "trained", "NCC", "pairwise", "model", "." ]
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/causality/pairwise/NCC.py#L167-L193
[ "def", "predict_proba", "(", "self", ",", "a", ",", "b", ",", "device", "=", "None", ")", ":", "device", "=", "SETTINGS", ".", "get_default", "(", "device", "=", "device", ")", "if", "self", ".", "model", "is", "None", ":", "print", "(", "'Model has ...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
NCC.predict_dataset
Args: x_tr (pd.DataFrame): CEPC format dataframe containing the pairs y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs epochs (int): number of train epochs learning rate (float): learning rate of Adam verbose (bool): verbosity (defaults to ``cdt.SETTINGS.verbose``) device (str): cuda or cpu device (defaults to ``cdt.SETTINGS.default_device``) Returns: pandas.DataFrame: dataframe containing the predicted causation coefficients
cdt/causality/pairwise/NCC.py
def predict_dataset(self, df, device=None, verbose=None): """ Args: x_tr (pd.DataFrame): CEPC format dataframe containing the pairs y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs epochs (int): number of train epochs learning rate (float): learning rate of Adam verbose (bool): verbosity (defaults to ``cdt.SETTINGS.verbose``) device (str): cuda or cpu device (defaults to ``cdt.SETTINGS.default_device``) Returns: pandas.DataFrame: dataframe containing the predicted causation coefficients """ verbose, device = SETTINGS.get_default(('verbose', verbose), ('device', device)) dataset = [] for i, (idx, row) in enumerate(df.iterrows()): a = row['A'].reshape((len(row['A']), 1)) b = row['B'].reshape((len(row['B']), 1)) m = np.hstack((a, b)) m = m.astype('float32') m = th.from_numpy(m).t().unsqueeze(0) dataset.append(m) dataset = [m.to(device) for m in dataset] return pd.DataFrame((th.cat([self.model(m) for m, t in zip(dataset, trange(len(dataset)), disable=not verbose)]\ , 0).data.cpu().numpy() -.5) * 2)
def predict_dataset(self, df, device=None, verbose=None): """ Args: x_tr (pd.DataFrame): CEPC format dataframe containing the pairs y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs epochs (int): number of train epochs learning rate (float): learning rate of Adam verbose (bool): verbosity (defaults to ``cdt.SETTINGS.verbose``) device (str): cuda or cpu device (defaults to ``cdt.SETTINGS.default_device``) Returns: pandas.DataFrame: dataframe containing the predicted causation coefficients """ verbose, device = SETTINGS.get_default(('verbose', verbose), ('device', device)) dataset = [] for i, (idx, row) in enumerate(df.iterrows()): a = row['A'].reshape((len(row['A']), 1)) b = row['B'].reshape((len(row['B']), 1)) m = np.hstack((a, b)) m = m.astype('float32') m = th.from_numpy(m).t().unsqueeze(0) dataset.append(m) dataset = [m.to(device) for m in dataset] return pd.DataFrame((th.cat([self.model(m) for m, t in zip(dataset, trange(len(dataset)), disable=not verbose)]\ , 0).data.cpu().numpy() -.5) * 2)
[ "Args", ":", "x_tr", "(", "pd", ".", "DataFrame", ")", ":", "CEPC", "format", "dataframe", "containing", "the", "pairs", "y_tr", "(", "pd", ".", "DataFrame", "or", "np", ".", "ndarray", ")", ":", "labels", "associated", "to", "the", "pairs", "epochs", ...
Diviyan-Kalainathan/CausalDiscoveryToolbox
python
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/causality/pairwise/NCC.py#L195-L222
[ "def", "predict_dataset", "(", "self", ",", "df", ",", "device", "=", "None", ",", "verbose", "=", "None", ")", ":", "verbose", ",", "device", "=", "SETTINGS", ".", "get_default", "(", "(", "'verbose'", ",", "verbose", ")", ",", "(", "'device'", ",", ...
be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1
valid
VisualDirective.phrase_to_filename
Convert phrase to normilized file name.
doc/source/custom_directives.py
def phrase_to_filename(self, phrase): """Convert phrase to normilized file name.""" # remove non-word characters name = re.sub(r"[^\w\s\.]", '', phrase.strip().lower()) # replace whitespace with underscores name = re.sub(r"\s+", '_', name) return name + '.png'
def phrase_to_filename(self, phrase): """Convert phrase to normilized file name.""" # remove non-word characters name = re.sub(r"[^\w\s\.]", '', phrase.strip().lower()) # replace whitespace with underscores name = re.sub(r"\s+", '_', name) return name + '.png'
[ "Convert", "phrase", "to", "normilized", "file", "name", "." ]
swistakm/pyimgui
python
https://github.com/swistakm/pyimgui/blob/04dd78053900bf69e0ce7638d1b7036bf2181982/doc/source/custom_directives.py#L152-L159
[ "def", "phrase_to_filename", "(", "self", ",", "phrase", ")", ":", "# remove non-word characters", "name", "=", "re", ".", "sub", "(", "r\"[^\\w\\s\\.]\"", ",", "''", ",", "phrase", ".", "strip", "(", ")", ".", "lower", "(", ")", ")", "# replace whitespace w...
04dd78053900bf69e0ce7638d1b7036bf2181982
valid
Page.seed_url
A URL that can be used to open the page. The URL is formatted from :py:attr:`URL_TEMPLATE`, which is then appended to :py:attr:`base_url` unless the template results in an absolute URL. :return: URL that can be used to open the page. :rtype: str
src/pypom/page.py
def seed_url(self): """A URL that can be used to open the page. The URL is formatted from :py:attr:`URL_TEMPLATE`, which is then appended to :py:attr:`base_url` unless the template results in an absolute URL. :return: URL that can be used to open the page. :rtype: str """ url = self.base_url if self.URL_TEMPLATE is not None: url = urlparse.urljoin( self.base_url, self.URL_TEMPLATE.format(**self.url_kwargs) ) if not url: return None url_parts = list(urlparse.urlparse(url)) query = urlparse.parse_qsl(url_parts[4]) for k, v in self.url_kwargs.items(): if v is None: continue if "{{{}}}".format(k) not in str(self.URL_TEMPLATE): for i in iterable(v): query.append((k, i)) url_parts[4] = urlencode(query) return urlparse.urlunparse(url_parts)
def seed_url(self): """A URL that can be used to open the page. The URL is formatted from :py:attr:`URL_TEMPLATE`, which is then appended to :py:attr:`base_url` unless the template results in an absolute URL. :return: URL that can be used to open the page. :rtype: str """ url = self.base_url if self.URL_TEMPLATE is not None: url = urlparse.urljoin( self.base_url, self.URL_TEMPLATE.format(**self.url_kwargs) ) if not url: return None url_parts = list(urlparse.urlparse(url)) query = urlparse.parse_qsl(url_parts[4]) for k, v in self.url_kwargs.items(): if v is None: continue if "{{{}}}".format(k) not in str(self.URL_TEMPLATE): for i in iterable(v): query.append((k, i)) url_parts[4] = urlencode(query) return urlparse.urlunparse(url_parts)
[ "A", "URL", "that", "can", "be", "used", "to", "open", "the", "page", "." ]
mozilla/PyPOM
python
https://github.com/mozilla/PyPOM/blob/1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8/src/pypom/page.py#L86-L117
[ "def", "seed_url", "(", "self", ")", ":", "url", "=", "self", ".", "base_url", "if", "self", ".", "URL_TEMPLATE", "is", "not", "None", ":", "url", "=", "urlparse", ".", "urljoin", "(", "self", ".", "base_url", ",", "self", ".", "URL_TEMPLATE", ".", "...
1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8
valid
Page.open
Open the page. Navigates to :py:attr:`seed_url` and calls :py:func:`wait_for_page_to_load`. :return: The current page object. :rtype: :py:class:`Page` :raises: UsageError
src/pypom/page.py
def open(self): """Open the page. Navigates to :py:attr:`seed_url` and calls :py:func:`wait_for_page_to_load`. :return: The current page object. :rtype: :py:class:`Page` :raises: UsageError """ if self.seed_url: self.driver_adapter.open(self.seed_url) self.wait_for_page_to_load() return self raise UsageError("Set a base URL or URL_TEMPLATE to open this page.")
def open(self): """Open the page. Navigates to :py:attr:`seed_url` and calls :py:func:`wait_for_page_to_load`. :return: The current page object. :rtype: :py:class:`Page` :raises: UsageError """ if self.seed_url: self.driver_adapter.open(self.seed_url) self.wait_for_page_to_load() return self raise UsageError("Set a base URL or URL_TEMPLATE to open this page.")
[ "Open", "the", "page", "." ]
mozilla/PyPOM
python
https://github.com/mozilla/PyPOM/blob/1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8/src/pypom/page.py#L119-L133
[ "def", "open", "(", "self", ")", ":", "if", "self", ".", "seed_url", ":", "self", ".", "driver_adapter", ".", "open", "(", "self", ".", "seed_url", ")", "self", ".", "wait_for_page_to_load", "(", ")", "return", "self", "raise", "UsageError", "(", "\"Set ...
1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8
valid
Page.wait_for_page_to_load
Wait for the page to load.
src/pypom/page.py
def wait_for_page_to_load(self): """Wait for the page to load.""" self.wait.until(lambda _: self.loaded) self.pm.hook.pypom_after_wait_for_page_to_load(page=self) return self
def wait_for_page_to_load(self): """Wait for the page to load.""" self.wait.until(lambda _: self.loaded) self.pm.hook.pypom_after_wait_for_page_to_load(page=self) return self
[ "Wait", "for", "the", "page", "to", "load", "." ]
mozilla/PyPOM
python
https://github.com/mozilla/PyPOM/blob/1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8/src/pypom/page.py#L135-L139
[ "def", "wait_for_page_to_load", "(", "self", ")", ":", "self", ".", "wait", ".", "until", "(", "lambda", "_", ":", "self", ".", "loaded", ")", "self", ".", "pm", ".", "hook", ".", "pypom_after_wait_for_page_to_load", "(", "page", "=", "self", ")", "retur...
1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8
valid
register
Register the Selenium specific driver implementation. This register call is performed by the init module if selenium is available.
src/pypom/selenium_driver.py
def register(): """ Register the Selenium specific driver implementation. This register call is performed by the init module if selenium is available. """ registerDriver( ISelenium, Selenium, class_implements=[ Firefox, Chrome, Ie, Edge, Opera, Safari, BlackBerry, PhantomJS, Android, Remote, EventFiringWebDriver, ], )
def register(): """ Register the Selenium specific driver implementation. This register call is performed by the init module if selenium is available. """ registerDriver( ISelenium, Selenium, class_implements=[ Firefox, Chrome, Ie, Edge, Opera, Safari, BlackBerry, PhantomJS, Android, Remote, EventFiringWebDriver, ], )
[ "Register", "the", "Selenium", "specific", "driver", "implementation", "." ]
mozilla/PyPOM
python
https://github.com/mozilla/PyPOM/blob/1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8/src/pypom/selenium_driver.py#L121-L143
[ "def", "register", "(", ")", ":", "registerDriver", "(", "ISelenium", ",", "Selenium", ",", "class_implements", "=", "[", "Firefox", ",", "Chrome", ",", "Ie", ",", "Edge", ",", "Opera", ",", "Safari", ",", "BlackBerry", ",", "PhantomJS", ",", "Android", ...
1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8
valid
Region.root
Root element for the page region. Page regions should define a root element either by passing this on instantiation or by defining a :py:attr:`_root_locator` attribute. To reduce the chances of hitting :py:class:`~selenium.common.exceptions.StaleElementReferenceException` or similar you should use :py:attr:`_root_locator`, as this is looked up every time the :py:attr:`root` property is accessed.
src/pypom/region.py
def root(self): """Root element for the page region. Page regions should define a root element either by passing this on instantiation or by defining a :py:attr:`_root_locator` attribute. To reduce the chances of hitting :py:class:`~selenium.common.exceptions.StaleElementReferenceException` or similar you should use :py:attr:`_root_locator`, as this is looked up every time the :py:attr:`root` property is accessed. """ if self._root is None and self._root_locator is not None: return self.page.find_element(*self._root_locator) return self._root
def root(self): """Root element for the page region. Page regions should define a root element either by passing this on instantiation or by defining a :py:attr:`_root_locator` attribute. To reduce the chances of hitting :py:class:`~selenium.common.exceptions.StaleElementReferenceException` or similar you should use :py:attr:`_root_locator`, as this is looked up every time the :py:attr:`root` property is accessed. """ if self._root is None and self._root_locator is not None: return self.page.find_element(*self._root_locator) return self._root
[ "Root", "element", "for", "the", "page", "region", "." ]
mozilla/PyPOM
python
https://github.com/mozilla/PyPOM/blob/1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8/src/pypom/region.py#L76-L87
[ "def", "root", "(", "self", ")", ":", "if", "self", ".", "_root", "is", "None", "and", "self", ".", "_root_locator", "is", "not", "None", ":", "return", "self", ".", "page", ".", "find_element", "(", "*", "self", ".", "_root_locator", ")", "return", ...
1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8
valid
Region.wait_for_region_to_load
Wait for the page region to load.
src/pypom/region.py
def wait_for_region_to_load(self): """Wait for the page region to load.""" self.wait.until(lambda _: self.loaded) self.pm.hook.pypom_after_wait_for_region_to_load(region=self) return self
def wait_for_region_to_load(self): """Wait for the page region to load.""" self.wait.until(lambda _: self.loaded) self.pm.hook.pypom_after_wait_for_region_to_load(region=self) return self
[ "Wait", "for", "the", "page", "region", "to", "load", "." ]
mozilla/PyPOM
python
https://github.com/mozilla/PyPOM/blob/1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8/src/pypom/region.py#L89-L93
[ "def", "wait_for_region_to_load", "(", "self", ")", ":", "self", ".", "wait", ".", "until", "(", "lambda", "_", ":", "self", ".", "loaded", ")", "self", ".", "pm", ".", "hook", ".", "pypom_after_wait_for_region_to_load", "(", "region", "=", "self", ")", ...
1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8
valid
Region.find_element
Finds an element on the page. :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`. :param locator: Location of target element. :type strategy: str :type locator: str :return: An element. :rytpe: :py:class:`~selenium.webdriver.remote.webelement.WebElement` or :py:class:`~splinter.driver.webdriver.WebDriverElement`
src/pypom/region.py
def find_element(self, strategy, locator): """Finds an element on the page. :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`. :param locator: Location of target element. :type strategy: str :type locator: str :return: An element. :rytpe: :py:class:`~selenium.webdriver.remote.webelement.WebElement` or :py:class:`~splinter.driver.webdriver.WebDriverElement` """ return self.driver_adapter.find_element(strategy, locator, root=self.root)
def find_element(self, strategy, locator): """Finds an element on the page. :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`. :param locator: Location of target element. :type strategy: str :type locator: str :return: An element. :rytpe: :py:class:`~selenium.webdriver.remote.webelement.WebElement` or :py:class:`~splinter.driver.webdriver.WebDriverElement` """ return self.driver_adapter.find_element(strategy, locator, root=self.root)
[ "Finds", "an", "element", "on", "the", "page", "." ]
mozilla/PyPOM
python
https://github.com/mozilla/PyPOM/blob/1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8/src/pypom/region.py#L95-L106
[ "def", "find_element", "(", "self", ",", "strategy", ",", "locator", ")", ":", "return", "self", ".", "driver_adapter", ".", "find_element", "(", "strategy", ",", "locator", ",", "root", "=", "self", ".", "root", ")" ]
1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8
valid
Region.find_elements
Finds elements on the page. :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`. :param locator: Location of target elements. :type strategy: str :type locator: str :return: List of :py:class:`~selenium.webdriver.remote.webelement.WebElement` or :py:class:`~splinter.element_list.ElementList` :rtype: list
src/pypom/region.py
def find_elements(self, strategy, locator): """Finds elements on the page. :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`. :param locator: Location of target elements. :type strategy: str :type locator: str :return: List of :py:class:`~selenium.webdriver.remote.webelement.WebElement` or :py:class:`~splinter.element_list.ElementList` :rtype: list """ return self.driver_adapter.find_elements(strategy, locator, root=self.root)
def find_elements(self, strategy, locator): """Finds elements on the page. :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`. :param locator: Location of target elements. :type strategy: str :type locator: str :return: List of :py:class:`~selenium.webdriver.remote.webelement.WebElement` or :py:class:`~splinter.element_list.ElementList` :rtype: list """ return self.driver_adapter.find_elements(strategy, locator, root=self.root)
[ "Finds", "elements", "on", "the", "page", "." ]
mozilla/PyPOM
python
https://github.com/mozilla/PyPOM/blob/1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8/src/pypom/region.py#L108-L119
[ "def", "find_elements", "(", "self", ",", "strategy", ",", "locator", ")", ":", "return", "self", ".", "driver_adapter", ".", "find_elements", "(", "strategy", ",", "locator", ",", "root", "=", "self", ".", "root", ")" ]
1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8
valid
Region.is_element_present
Checks whether an element is present. :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`. :param locator: Location of target element. :type strategy: str :type locator: str :return: ``True`` if element is present, else ``False``. :rtype: bool
src/pypom/region.py
def is_element_present(self, strategy, locator): """Checks whether an element is present. :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`. :param locator: Location of target element. :type strategy: str :type locator: str :return: ``True`` if element is present, else ``False``. :rtype: bool """ return self.driver_adapter.is_element_present(strategy, locator, root=self.root)
def is_element_present(self, strategy, locator): """Checks whether an element is present. :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`. :param locator: Location of target element. :type strategy: str :type locator: str :return: ``True`` if element is present, else ``False``. :rtype: bool """ return self.driver_adapter.is_element_present(strategy, locator, root=self.root)
[ "Checks", "whether", "an", "element", "is", "present", "." ]
mozilla/PyPOM
python
https://github.com/mozilla/PyPOM/blob/1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8/src/pypom/region.py#L121-L132
[ "def", "is_element_present", "(", "self", ",", "strategy", ",", "locator", ")", ":", "return", "self", ".", "driver_adapter", ".", "is_element_present", "(", "strategy", ",", "locator", ",", "root", "=", "self", ".", "root", ")" ]
1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8
valid
Region.is_element_displayed
Checks whether an element is displayed. :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`. :param locator: Location of target element. :type strategy: str :type locator: str :return: ``True`` if element is displayed, else ``False``. :rtype: bool
src/pypom/region.py
def is_element_displayed(self, strategy, locator): """Checks whether an element is displayed. :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`. :param locator: Location of target element. :type strategy: str :type locator: str :return: ``True`` if element is displayed, else ``False``. :rtype: bool """ return self.driver_adapter.is_element_displayed( strategy, locator, root=self.root )
def is_element_displayed(self, strategy, locator): """Checks whether an element is displayed. :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`. :param locator: Location of target element. :type strategy: str :type locator: str :return: ``True`` if element is displayed, else ``False``. :rtype: bool """ return self.driver_adapter.is_element_displayed( strategy, locator, root=self.root )
[ "Checks", "whether", "an", "element", "is", "displayed", "." ]
mozilla/PyPOM
python
https://github.com/mozilla/PyPOM/blob/1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8/src/pypom/region.py#L134-L147
[ "def", "is_element_displayed", "(", "self", ",", "strategy", ",", "locator", ")", ":", "return", "self", ".", "driver_adapter", ".", "is_element_displayed", "(", "strategy", ",", "locator", ",", "root", "=", "self", ".", "root", ")" ]
1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8
valid
registerDriver
Register driver adapter used by page object
src/pypom/driver.py
def registerDriver(iface, driver, class_implements=[]): """ Register driver adapter used by page object""" for class_item in class_implements: classImplements(class_item, iface) component.provideAdapter(factory=driver, adapts=[iface], provides=IDriver)
def registerDriver(iface, driver, class_implements=[]): """ Register driver adapter used by page object""" for class_item in class_implements: classImplements(class_item, iface) component.provideAdapter(factory=driver, adapts=[iface], provides=IDriver)
[ "Register", "driver", "adapter", "used", "by", "page", "object" ]
mozilla/PyPOM
python
https://github.com/mozilla/PyPOM/blob/1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8/src/pypom/driver.py#L11-L16
[ "def", "registerDriver", "(", "iface", ",", "driver", ",", "class_implements", "=", "[", "]", ")", ":", "for", "class_item", "in", "class_implements", ":", "classImplements", "(", "class_item", ",", "iface", ")", "component", ".", "provideAdapter", "(", "facto...
1e7d7ac6e19ec2dac0ea04bad5f3daadbe0c43b8
valid
isHcl
Detects whether a string is JSON or HCL :param s: String that may contain HCL or JSON :returns: True if HCL, False if JSON, raises ValueError if neither
src/hcl/api.py
def isHcl(s): ''' Detects whether a string is JSON or HCL :param s: String that may contain HCL or JSON :returns: True if HCL, False if JSON, raises ValueError if neither ''' for c in s: if c.isspace(): continue if c == '{': return False else: return True raise ValueError("No HCL object could be decoded")
def isHcl(s): ''' Detects whether a string is JSON or HCL :param s: String that may contain HCL or JSON :returns: True if HCL, False if JSON, raises ValueError if neither ''' for c in s: if c.isspace(): continue if c == '{': return False else: return True raise ValueError("No HCL object could be decoded")
[ "Detects", "whether", "a", "string", "is", "JSON", "or", "HCL", ":", "param", "s", ":", "String", "that", "may", "contain", "HCL", "or", "JSON", ":", "returns", ":", "True", "if", "HCL", "False", "if", "JSON", "raises", "ValueError", "if", "neither" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/api.py#L24-L42
[ "def", "isHcl", "(", "s", ")", ":", "for", "c", "in", "s", ":", "if", "c", ".", "isspace", "(", ")", ":", "continue", "if", "c", "==", "'{'", ":", "return", "False", "else", ":", "return", "True", "raise", "ValueError", "(", "\"No HCL object could be...
e6e27742215692974f0ef503a91a81ec4adc171c
valid
loads
Deserializes a string and converts it to a dictionary. The contents of the string must either be JSON or HCL. :returns: Dictionary
src/hcl/api.py
def loads(s): ''' Deserializes a string and converts it to a dictionary. The contents of the string must either be JSON or HCL. :returns: Dictionary ''' s = u(s) if isHcl(s): return HclParser().parse(s) else: return json.loads(s)
def loads(s): ''' Deserializes a string and converts it to a dictionary. The contents of the string must either be JSON or HCL. :returns: Dictionary ''' s = u(s) if isHcl(s): return HclParser().parse(s) else: return json.loads(s)
[ "Deserializes", "a", "string", "and", "converts", "it", "to", "a", "dictionary", ".", "The", "contents", "of", "the", "string", "must", "either", "be", "JSON", "or", "HCL", ".", ":", "returns", ":", "Dictionary" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/api.py#L57-L68
[ "def", "loads", "(", "s", ")", ":", "s", "=", "u", "(", "s", ")", "if", "isHcl", "(", "s", ")", ":", "return", "HclParser", "(", ")", ".", "parse", "(", "s", ")", "else", ":", "return", "json", ".", "loads", "(", "s", ")" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
Lexer.t_hexnumber
r'-?0[xX][0-9a-fA-F]+
src/hcl/lexer.py
def t_hexnumber(self, t): r'-?0[xX][0-9a-fA-F]+' t.value = int(t.value, base=16) t.type = 'NUMBER' return t
def t_hexnumber(self, t): r'-?0[xX][0-9a-fA-F]+' t.value = int(t.value, base=16) t.type = 'NUMBER' return t
[ "r", "-", "?0", "[", "xX", "]", "[", "0", "-", "9a", "-", "fA", "-", "F", "]", "+" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/lexer.py#L74-L78
[ "def", "t_hexnumber", "(", "self", ",", "t", ")", ":", "t", ".", "value", "=", "int", "(", "t", ".", "value", ",", "base", "=", "16", ")", "t", ".", "type", "=", "'NUMBER'", "return", "t" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
Lexer.t_intnumber
r'-?\d+
src/hcl/lexer.py
def t_intnumber(self, t): r'-?\d+' t.value = int(t.value) t.type = 'NUMBER' return t
def t_intnumber(self, t): r'-?\d+' t.value = int(t.value) t.type = 'NUMBER' return t
[ "r", "-", "?", "\\", "d", "+" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/lexer.py#L80-L84
[ "def", "t_intnumber", "(", "self", ",", "t", ")", ":", "t", ".", "value", "=", "int", "(", "t", ".", "value", ")", "t", ".", "type", "=", "'NUMBER'", "return", "t" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
Lexer.t_string
r'\"
src/hcl/lexer.py
def t_string(self, t): # Start of a string r'\"' # abs_start is the absolute start of the string. We use this at the end # to know how many new lines we've consumed t.lexer.abs_start = t.lexer.lexpos # rel_pos is the begining of the unconsumed part of the string. It will # get modified when consuming escaped characters t.lexer.rel_pos = t.lexer.lexpos # The value of the consumed part of the string t.lexer.string_value = u'' t.lexer.begin('string')
def t_string(self, t): # Start of a string r'\"' # abs_start is the absolute start of the string. We use this at the end # to know how many new lines we've consumed t.lexer.abs_start = t.lexer.lexpos # rel_pos is the begining of the unconsumed part of the string. It will # get modified when consuming escaped characters t.lexer.rel_pos = t.lexer.lexpos # The value of the consumed part of the string t.lexer.string_value = u'' t.lexer.begin('string')
[ "r", "\\" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/lexer.py#L100-L111
[ "def", "t_string", "(", "self", ",", "t", ")", ":", "# Start of a string", "# abs_start is the absolute start of the string. We use this at the end", "# to know how many new lines we've consumed", "t", ".", "lexer", ".", "abs_start", "=", "t", ".", "lexer", ".", "lexpos", ...
e6e27742215692974f0ef503a91a81ec4adc171c
valid
Lexer.t_string_escapedchar
r'(?<=\\)(\"|\\)
src/hcl/lexer.py
def t_string_escapedchar(self, t): # If a quote or backslash is escaped, build up the string by ignoring # the escape character. Should this be done for other characters? r'(?<=\\)(\"|\\)' t.lexer.string_value += ( t.lexer.lexdata[t.lexer.rel_pos : t.lexer.lexpos - 2] + t.value ) t.lexer.rel_pos = t.lexer.lexpos pass
def t_string_escapedchar(self, t): # If a quote or backslash is escaped, build up the string by ignoring # the escape character. Should this be done for other characters? r'(?<=\\)(\"|\\)' t.lexer.string_value += ( t.lexer.lexdata[t.lexer.rel_pos : t.lexer.lexpos - 2] + t.value ) t.lexer.rel_pos = t.lexer.lexpos pass
[ "r", "(", "?<", "=", "\\\\", ")", "(", "\\", "|", "\\\\", ")" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/lexer.py#L113-L121
[ "def", "t_string_escapedchar", "(", "self", ",", "t", ")", ":", "# If a quote or backslash is escaped, build up the string by ignoring", "# the escape character. Should this be done for other characters?", "t", ".", "lexer", ".", "string_value", "+=", "(", "t", ".", "lexer", ...
e6e27742215692974f0ef503a91a81ec4adc171c
valid
Lexer.t_string_STRING
r'\"
src/hcl/lexer.py
def t_string_STRING(self, t): # End of the string r'\"' t.value = ( t.lexer.string_value + t.lexer.lexdata[t.lexer.rel_pos : t.lexer.lexpos - 1] ) t.lexer.lineno += t.lexer.lexdata[t.lexer.abs_start : t.lexer.lexpos - 1].count( '\n' ) t.lexer.begin('INITIAL') return t
def t_string_STRING(self, t): # End of the string r'\"' t.value = ( t.lexer.string_value + t.lexer.lexdata[t.lexer.rel_pos : t.lexer.lexpos - 1] ) t.lexer.lineno += t.lexer.lexdata[t.lexer.abs_start : t.lexer.lexpos - 1].count( '\n' ) t.lexer.begin('INITIAL') return t
[ "r", "\\" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/lexer.py#L134-L144
[ "def", "t_string_STRING", "(", "self", ",", "t", ")", ":", "# End of the string", "t", ".", "value", "=", "(", "t", ".", "lexer", ".", "string_value", "+", "t", ".", "lexer", ".", "lexdata", "[", "t", ".", "lexer", ".", "rel_pos", ":", "t", ".", "l...
e6e27742215692974f0ef503a91a81ec4adc171c
valid
Lexer.t_stringdollar_rbrace
r'\}
src/hcl/lexer.py
def t_stringdollar_rbrace(self, t): r'\}' t.lexer.braces -= 1 if t.lexer.braces == 0: # End of the dollar brace, back to the rest of the string t.lexer.begin('string')
def t_stringdollar_rbrace(self, t): r'\}' t.lexer.braces -= 1 if t.lexer.braces == 0: # End of the dollar brace, back to the rest of the string t.lexer.begin('string')
[ "r", "\\", "}" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/lexer.py#L161-L167
[ "def", "t_stringdollar_rbrace", "(", "self", ",", "t", ")", ":", "t", ".", "lexer", ".", "braces", "-=", "1", "if", "t", ".", "lexer", ".", "braces", "==", "0", ":", "# End of the dollar brace, back to the rest of the string", "t", ".", "lexer", ".", "begin"...
e6e27742215692974f0ef503a91a81ec4adc171c
valid
Lexer.t_tabbedheredoc
r'<<-\S+\r?\n
src/hcl/lexer.py
def t_tabbedheredoc(self, t): r'<<-\S+\r?\n' t.lexer.is_tabbed = True self._init_heredoc(t) t.lexer.begin('tabbedheredoc')
def t_tabbedheredoc(self, t): r'<<-\S+\r?\n' t.lexer.is_tabbed = True self._init_heredoc(t) t.lexer.begin('tabbedheredoc')
[ "r", "<<", "-", "\\", "S", "+", "\\", "r?", "\\", "n" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/lexer.py#L194-L198
[ "def", "t_tabbedheredoc", "(", "self", ",", "t", ")", ":", "t", ".", "lexer", ".", "is_tabbed", "=", "True", "self", ".", "_init_heredoc", "(", "t", ")", "t", ".", "lexer", ".", "begin", "(", "'tabbedheredoc'", ")" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
Lexer.t_heredoc
r'<<\S+\r?\n
src/hcl/lexer.py
def t_heredoc(self, t): r'<<\S+\r?\n' t.lexer.is_tabbed = False self._init_heredoc(t) t.lexer.begin('heredoc')
def t_heredoc(self, t): r'<<\S+\r?\n' t.lexer.is_tabbed = False self._init_heredoc(t) t.lexer.begin('heredoc')
[ "r", "<<", "\\", "S", "+", "\\", "r?", "\\", "n" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/lexer.py#L200-L204
[ "def", "t_heredoc", "(", "self", ",", "t", ")", ":", "t", ".", "lexer", ".", "is_tabbed", "=", "False", "self", ".", "_init_heredoc", "(", "t", ")", "t", ".", "lexer", ".", "begin", "(", "'heredoc'", ")" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.objectlist_flat
Similar to the dict constructor, but handles dups HCL is unclear on what one should do when duplicate keys are encountered. These comments aren't clear either: from decoder.go: if we're at the root or we're directly within a list, decode into dicts, otherwise lists from object.go: there's a flattened list structure
src/hcl/parser.py
def objectlist_flat(self, lt, replace): ''' Similar to the dict constructor, but handles dups HCL is unclear on what one should do when duplicate keys are encountered. These comments aren't clear either: from decoder.go: if we're at the root or we're directly within a list, decode into dicts, otherwise lists from object.go: there's a flattened list structure ''' d = {} for k, v in lt: if k in d.keys() and not replace: if type(d[k]) is list: d[k].append(v) else: d[k] = [d[k], v] else: if isinstance(v, dict): dd = d.setdefault(k, {}) for kk, vv in iteritems(v): if type(dd) == list: dd.append({kk: vv}) elif kk in dd.keys(): if hasattr(vv, 'items'): for k2, v2 in iteritems(vv): dd[kk][k2] = v2 else: d[k] = [dd, {kk: vv}] else: dd[kk] = vv else: d[k] = v return d
def objectlist_flat(self, lt, replace): ''' Similar to the dict constructor, but handles dups HCL is unclear on what one should do when duplicate keys are encountered. These comments aren't clear either: from decoder.go: if we're at the root or we're directly within a list, decode into dicts, otherwise lists from object.go: there's a flattened list structure ''' d = {} for k, v in lt: if k in d.keys() and not replace: if type(d[k]) is list: d[k].append(v) else: d[k] = [d[k], v] else: if isinstance(v, dict): dd = d.setdefault(k, {}) for kk, vv in iteritems(v): if type(dd) == list: dd.append({kk: vv}) elif kk in dd.keys(): if hasattr(vv, 'items'): for k2, v2 in iteritems(vv): dd[kk][k2] = v2 else: d[k] = [dd, {kk: vv}] else: dd[kk] = vv else: d[k] = v return d
[ "Similar", "to", "the", "dict", "constructor", "but", "handles", "dups", "HCL", "is", "unclear", "on", "what", "one", "should", "do", "when", "duplicate", "keys", "are", "encountered", ".", "These", "comments", "aren", "t", "clear", "either", ":", "from", ...
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L65-L102
[ "def", "objectlist_flat", "(", "self", ",", "lt", ",", "replace", ")", ":", "d", "=", "{", "}", "for", "k", ",", "v", "in", "lt", ":", "if", "k", "in", "d", ".", "keys", "(", ")", "and", "not", "replace", ":", "if", "type", "(", "d", "[", "...
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.p_top
top : objectlist
src/hcl/parser.py
def p_top(self, p): "top : objectlist" if DEBUG: self.print_p(p) p[0] = self.objectlist_flat(p[1], True)
def p_top(self, p): "top : objectlist" if DEBUG: self.print_p(p) p[0] = self.objectlist_flat(p[1], True)
[ "top", ":", "objectlist" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L104-L108
[ "def", "p_top", "(", "self", ",", "p", ")", ":", "if", "DEBUG", ":", "self", ".", "print_p", "(", "p", ")", "p", "[", "0", "]", "=", "self", ".", "objectlist_flat", "(", "p", "[", "1", "]", ",", "True", ")" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.p_objectlist_1
objectlist : objectlist objectitem
src/hcl/parser.py
def p_objectlist_1(self, p): "objectlist : objectlist objectitem" if DEBUG: self.print_p(p) p[0] = p[1] + [p[2]]
def p_objectlist_1(self, p): "objectlist : objectlist objectitem" if DEBUG: self.print_p(p) p[0] = p[1] + [p[2]]
[ "objectlist", ":", "objectlist", "objectitem" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L116-L120
[ "def", "p_objectlist_1", "(", "self", ",", "p", ")", ":", "if", "DEBUG", ":", "self", ".", "print_p", "(", "p", ")", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "p", "[", "2", "]", "]" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.p_objectlist_2
objectlist : objectlist COMMA objectitem
src/hcl/parser.py
def p_objectlist_2(self, p): "objectlist : objectlist COMMA objectitem" if DEBUG: self.print_p(p) p[0] = p[1] + [p[3]]
def p_objectlist_2(self, p): "objectlist : objectlist COMMA objectitem" if DEBUG: self.print_p(p) p[0] = p[1] + [p[3]]
[ "objectlist", ":", "objectlist", "COMMA", "objectitem" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L122-L126
[ "def", "p_objectlist_2", "(", "self", ",", "p", ")", ":", "if", "DEBUG", ":", "self", ".", "print_p", "(", "p", ")", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "p", "[", "3", "]", "]" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.p_object_0
object : LEFTBRACE objectlist RIGHTBRACE
src/hcl/parser.py
def p_object_0(self, p): "object : LEFTBRACE objectlist RIGHTBRACE" if DEBUG: self.print_p(p) p[0] = self.objectlist_flat(p[2], False)
def p_object_0(self, p): "object : LEFTBRACE objectlist RIGHTBRACE" if DEBUG: self.print_p(p) p[0] = self.objectlist_flat(p[2], False)
[ "object", ":", "LEFTBRACE", "objectlist", "RIGHTBRACE" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L128-L132
[ "def", "p_object_0", "(", "self", ",", "p", ")", ":", "if", "DEBUG", ":", "self", ".", "print_p", "(", "p", ")", "p", "[", "0", "]", "=", "self", ".", "objectlist_flat", "(", "p", "[", "2", "]", ",", "False", ")" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.p_object_1
object : LEFTBRACE objectlist COMMA RIGHTBRACE
src/hcl/parser.py
def p_object_1(self, p): "object : LEFTBRACE objectlist COMMA RIGHTBRACE" if DEBUG: self.print_p(p) p[0] = self.objectlist_flat(p[2], False)
def p_object_1(self, p): "object : LEFTBRACE objectlist COMMA RIGHTBRACE" if DEBUG: self.print_p(p) p[0] = self.objectlist_flat(p[2], False)
[ "object", ":", "LEFTBRACE", "objectlist", "COMMA", "RIGHTBRACE" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L134-L138
[ "def", "p_object_1", "(", "self", ",", "p", ")", ":", "if", "DEBUG", ":", "self", ".", "print_p", "(", "p", ")", "p", "[", "0", "]", "=", "self", ".", "objectlist_flat", "(", "p", "[", "2", "]", ",", "False", ")" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.p_objectitem_0
objectitem : objectkey EQUAL number | objectkey EQUAL BOOL | objectkey EQUAL STRING | objectkey EQUAL object | objectkey EQUAL list
src/hcl/parser.py
def p_objectitem_0(self, p): ''' objectitem : objectkey EQUAL number | objectkey EQUAL BOOL | objectkey EQUAL STRING | objectkey EQUAL object | objectkey EQUAL list ''' if DEBUG: self.print_p(p) p[0] = (p[1], p[3])
def p_objectitem_0(self, p): ''' objectitem : objectkey EQUAL number | objectkey EQUAL BOOL | objectkey EQUAL STRING | objectkey EQUAL object | objectkey EQUAL list ''' if DEBUG: self.print_p(p) p[0] = (p[1], p[3])
[ "objectitem", ":", "objectkey", "EQUAL", "number", "|", "objectkey", "EQUAL", "BOOL", "|", "objectkey", "EQUAL", "STRING", "|", "objectkey", "EQUAL", "object", "|", "objectkey", "EQUAL", "list" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L155-L165
[ "def", "p_objectitem_0", "(", "self", ",", "p", ")", ":", "if", "DEBUG", ":", "self", ".", "print_p", "(", "p", ")", "p", "[", "0", "]", "=", "(", "p", "[", "1", "]", ",", "p", "[", "3", "]", ")" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.p_block_0
block : blockId object
src/hcl/parser.py
def p_block_0(self, p): "block : blockId object" if DEBUG: self.print_p(p) p[0] = (p[1], p[2])
def p_block_0(self, p): "block : blockId object" if DEBUG: self.print_p(p) p[0] = (p[1], p[2])
[ "block", ":", "blockId", "object" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L173-L177
[ "def", "p_block_0", "(", "self", ",", "p", ")", ":", "if", "DEBUG", ":", "self", ".", "print_p", "(", "p", ")", "p", "[", "0", "]", "=", "(", "p", "[", "1", "]", ",", "p", "[", "2", "]", ")" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.p_block_1
block : blockId block
src/hcl/parser.py
def p_block_1(self, p): "block : blockId block" if DEBUG: self.print_p(p) p[0] = (p[1], {p[2][0]: p[2][1]})
def p_block_1(self, p): "block : blockId block" if DEBUG: self.print_p(p) p[0] = (p[1], {p[2][0]: p[2][1]})
[ "block", ":", "blockId", "block" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L179-L183
[ "def", "p_block_1", "(", "self", ",", "p", ")", ":", "if", "DEBUG", ":", "self", ".", "print_p", "(", "p", ")", "p", "[", "0", "]", "=", "(", "p", "[", "1", "]", ",", "{", "p", "[", "2", "]", "[", "0", "]", ":", "p", "[", "2", "]", "[...
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.p_listitems_1
listitems : listitems COMMA listitem
src/hcl/parser.py
def p_listitems_1(self, p): "listitems : listitems COMMA listitem" if DEBUG: self.print_p(p) p[0] = p[1] + [p[3]]
def p_listitems_1(self, p): "listitems : listitems COMMA listitem" if DEBUG: self.print_p(p) p[0] = p[1] + [p[3]]
[ "listitems", ":", "listitems", "COMMA", "listitem" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L215-L219
[ "def", "p_listitems_1", "(", "self", ",", "p", ")", ":", "if", "DEBUG", ":", "self", ".", "print_p", "(", "p", ")", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "p", "[", "3", "]", "]" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.p_number_1
number : float
src/hcl/parser.py
def p_number_1(self, p): "number : float" if DEBUG: self.print_p(p) p[0] = float(p[1])
def p_number_1(self, p): "number : float" if DEBUG: self.print_p(p) p[0] = float(p[1])
[ "number", ":", "float" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L237-L241
[ "def", "p_number_1", "(", "self", ",", "p", ")", ":", "if", "DEBUG", ":", "self", ".", "print_p", "(", "p", ")", "p", "[", "0", "]", "=", "float", "(", "p", "[", "1", "]", ")" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.p_number_2
number : int exp
src/hcl/parser.py
def p_number_2(self, p): "number : int exp" if DEBUG: self.print_p(p) p[0] = float("{0}{1}".format(p[1], p[2]))
def p_number_2(self, p): "number : int exp" if DEBUG: self.print_p(p) p[0] = float("{0}{1}".format(p[1], p[2]))
[ "number", ":", "int", "exp" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L243-L247
[ "def", "p_number_2", "(", "self", ",", "p", ")", ":", "if", "DEBUG", ":", "self", ".", "print_p", "(", "p", ")", "p", "[", "0", "]", "=", "float", "(", "\"{0}{1}\"", ".", "format", "(", "p", "[", "1", "]", ",", "p", "[", "2", "]", ")", ")" ...
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.p_number_3
number : float exp
src/hcl/parser.py
def p_number_3(self, p): "number : float exp" if DEBUG: self.print_p(p) p[0] = float("{0}{1}".format(p[1], p[2]))
def p_number_3(self, p): "number : float exp" if DEBUG: self.print_p(p) p[0] = float("{0}{1}".format(p[1], p[2]))
[ "number", ":", "float", "exp" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L249-L253
[ "def", "p_number_3", "(", "self", ",", "p", ")", ":", "if", "DEBUG", ":", "self", ".", "print_p", "(", "p", ")", "p", "[", "0", "]", "=", "float", "(", "\"{0}{1}\"", ".", "format", "(", "p", "[", "1", "]", ",", "p", "[", "2", "]", ")", ")" ...
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.p_exp_0
exp : EPLUS NUMBER
src/hcl/parser.py
def p_exp_0(self, p): "exp : EPLUS NUMBER" if DEBUG: self.print_p(p) p[0] = "e{0}".format(p[2])
def p_exp_0(self, p): "exp : EPLUS NUMBER" if DEBUG: self.print_p(p) p[0] = "e{0}".format(p[2])
[ "exp", ":", "EPLUS", "NUMBER" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L275-L279
[ "def", "p_exp_0", "(", "self", ",", "p", ")", ":", "if", "DEBUG", ":", "self", ".", "print_p", "(", "p", ")", "p", "[", "0", "]", "=", "\"e{0}\"", ".", "format", "(", "p", "[", "2", "]", ")" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
HclParser.p_exp_1
exp : EMINUS NUMBER
src/hcl/parser.py
def p_exp_1(self, p): "exp : EMINUS NUMBER" if DEBUG: self.print_p(p) p[0] = "e-{0}".format(p[2])
def p_exp_1(self, p): "exp : EMINUS NUMBER" if DEBUG: self.print_p(p) p[0] = "e-{0}".format(p[2])
[ "exp", ":", "EMINUS", "NUMBER" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/src/hcl/parser.py#L281-L285
[ "def", "p_exp_1", "(", "self", ",", "p", ")", ":", "if", "DEBUG", ":", "self", ".", "print_p", "(", "p", ")", "p", "[", "0", "]", "=", "\"e-{0}\"", ".", "format", "(", "p", "[", "2", "]", ")" ]
e6e27742215692974f0ef503a91a81ec4adc171c
valid
_pre_install
Initialize the parse table at install time
setup.py
def _pre_install(): '''Initialize the parse table at install time''' # Generate the parsetab.dat file at setup time dat = join(setup_dir, 'src', 'hcl', 'parsetab.dat') if exists(dat): os.unlink(dat) sys.path.insert(0, join(setup_dir, 'src')) import hcl from hcl.parser import HclParser parser = HclParser()
def _pre_install(): '''Initialize the parse table at install time''' # Generate the parsetab.dat file at setup time dat = join(setup_dir, 'src', 'hcl', 'parsetab.dat') if exists(dat): os.unlink(dat) sys.path.insert(0, join(setup_dir, 'src')) import hcl from hcl.parser import HclParser parser = HclParser()
[ "Initialize", "the", "parse", "table", "at", "install", "time" ]
virtuald/pyhcl
python
https://github.com/virtuald/pyhcl/blob/e6e27742215692974f0ef503a91a81ec4adc171c/setup.py#L21-L34
[ "def", "_pre_install", "(", ")", ":", "# Generate the parsetab.dat file at setup time", "dat", "=", "join", "(", "setup_dir", ",", "'src'", ",", "'hcl'", ",", "'parsetab.dat'", ")", "if", "exists", "(", "dat", ")", ":", "os", ".", "unlink", "(", "dat", ")", ...
e6e27742215692974f0ef503a91a81ec4adc171c
valid
RobotStatements.append
Add another row of data from a test suite
rflint/parser/common.py
def append(self, linenumber, raw_text, cells): """Add another row of data from a test suite""" self.rows.append(Row(linenumber, raw_text, cells))
def append(self, linenumber, raw_text, cells): """Add another row of data from a test suite""" self.rows.append(Row(linenumber, raw_text, cells))
[ "Add", "another", "row", "of", "data", "from", "a", "test", "suite" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/common.py#L5-L7
[ "def", "append", "(", "self", ",", "linenumber", ",", "raw_text", ",", "cells", ")", ":", "self", ".", "rows", ".", "append", "(", "Row", "(", "linenumber", ",", "raw_text", ",", "cells", ")", ")" ]
3e3578f4e39af9af9961aa0a715f146b74474091
valid
RobotStatements.steps
Return a list of steps (statements that are not settings or comments)
rflint/parser/common.py
def steps(self): """Return a list of steps (statements that are not settings or comments)""" steps = [] for statement in self.statements: if ((not statement.is_comment()) and (not statement.is_setting())): steps.append(statement) return steps
def steps(self): """Return a list of steps (statements that are not settings or comments)""" steps = [] for statement in self.statements: if ((not statement.is_comment()) and (not statement.is_setting())): steps.append(statement) return steps
[ "Return", "a", "list", "of", "steps", "(", "statements", "that", "are", "not", "settings", "or", "comments", ")" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/common.py#L16-L23
[ "def", "steps", "(", "self", ")", ":", "steps", "=", "[", "]", "for", "statement", "in", "self", ".", "statements", ":", "if", "(", "(", "not", "statement", ".", "is_comment", "(", ")", ")", "and", "(", "not", "statement", ".", "is_setting", "(", "...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
Statement.is_comment
Return True if the first non-empty cell starts with "#"
rflint/parser/common.py
def is_comment(self): '''Return True if the first non-empty cell starts with "#"''' for cell in self[:]: if cell == "": continue # this is the first non-empty cell. Check whether it is # a comment or not. if cell.lstrip().startswith("#"): return True else: return False return False
def is_comment(self): '''Return True if the first non-empty cell starts with "#"''' for cell in self[:]: if cell == "": continue # this is the first non-empty cell. Check whether it is # a comment or not. if cell.lstrip().startswith("#"): return True else: return False return False
[ "Return", "True", "if", "the", "first", "non", "-", "empty", "cell", "starts", "with", "#" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/common.py#L112-L125
[ "def", "is_comment", "(", "self", ")", ":", "for", "cell", "in", "self", "[", ":", "]", ":", "if", "cell", "==", "\"\"", ":", "continue", "# this is the first non-empty cell. Check whether it is", "# a comment or not.", "if", "cell", ".", "lstrip", "(", ")", "...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
RobotFactory
Return an instance of SuiteFile, ResourceFile, SuiteFolder Exactly which is returned depends on whether it's a file or folder, and if a file, the contents of the file. If there is a testcase table, this will return an instance of SuiteFile, otherwise it will return an instance of ResourceFile.
rflint/parser/parser.py
def RobotFactory(path, parent=None): '''Return an instance of SuiteFile, ResourceFile, SuiteFolder Exactly which is returned depends on whether it's a file or folder, and if a file, the contents of the file. If there is a testcase table, this will return an instance of SuiteFile, otherwise it will return an instance of ResourceFile. ''' if os.path.isdir(path): return SuiteFolder(path, parent) else: rf = RobotFile(path, parent) for table in rf.tables: if isinstance(table, TestcaseTable): rf.__class__ = SuiteFile return rf rf.__class__ = ResourceFile return rf
def RobotFactory(path, parent=None): '''Return an instance of SuiteFile, ResourceFile, SuiteFolder Exactly which is returned depends on whether it's a file or folder, and if a file, the contents of the file. If there is a testcase table, this will return an instance of SuiteFile, otherwise it will return an instance of ResourceFile. ''' if os.path.isdir(path): return SuiteFolder(path, parent) else: rf = RobotFile(path, parent) for table in rf.tables: if isinstance(table, TestcaseTable): rf.__class__ = SuiteFile return rf rf.__class__ = ResourceFile return rf
[ "Return", "an", "instance", "of", "SuiteFile", "ResourceFile", "SuiteFolder" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/parser.py#L34-L55
[ "def", "RobotFactory", "(", "path", ",", "parent", "=", "None", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "return", "SuiteFolder", "(", "path", ",", "parent", ")", "else", ":", "rf", "=", "RobotFile", "(", "path", ",",...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
SuiteFolder.walk
Iterator which visits all suites and suite files, yielding test cases and keywords
rflint/parser/parser.py
def walk(self, *types): ''' Iterator which visits all suites and suite files, yielding test cases and keywords ''' requested = types if len(types) > 0 else [SuiteFile, ResourceFile, SuiteFolder, Testcase, Keyword] for thing in self.robot_files: if thing.__class__ in requested: yield thing if isinstance(thing, SuiteFolder): for child in thing.walk(): if child.__class__ in requested: yield child else: for child in thing.walk(*types): yield child
def walk(self, *types): ''' Iterator which visits all suites and suite files, yielding test cases and keywords ''' requested = types if len(types) > 0 else [SuiteFile, ResourceFile, SuiteFolder, Testcase, Keyword] for thing in self.robot_files: if thing.__class__ in requested: yield thing if isinstance(thing, SuiteFolder): for child in thing.walk(): if child.__class__ in requested: yield child else: for child in thing.walk(*types): yield child
[ "Iterator", "which", "visits", "all", "suites", "and", "suite", "files", "yielding", "test", "cases", "and", "keywords" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/parser.py#L73-L89
[ "def", "walk", "(", "self", ",", "*", "types", ")", ":", "requested", "=", "types", "if", "len", "(", "types", ")", ">", "0", "else", "[", "SuiteFile", ",", "ResourceFile", ",", "SuiteFolder", ",", "Testcase", ",", "Keyword", "]", "for", "thing", "in...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
SuiteFolder.robot_files
Return a list of all folders, and test suite files (.txt, .robot)
rflint/parser/parser.py
def robot_files(self): '''Return a list of all folders, and test suite files (.txt, .robot) ''' result = [] for name in os.listdir(self.path): fullpath = os.path.join(self.path, name) if os.path.isdir(fullpath): result.append(RobotFactory(fullpath, parent=self)) else: if ((name.endswith(".txt") or name.endswith(".robot")) and (name not in ("__init__.txt", "__init__.robot"))): result.append(RobotFactory(fullpath, parent=self)) return result
def robot_files(self): '''Return a list of all folders, and test suite files (.txt, .robot) ''' result = [] for name in os.listdir(self.path): fullpath = os.path.join(self.path, name) if os.path.isdir(fullpath): result.append(RobotFactory(fullpath, parent=self)) else: if ((name.endswith(".txt") or name.endswith(".robot")) and (name not in ("__init__.txt", "__init__.robot"))): result.append(RobotFactory(fullpath, parent=self)) return result
[ "Return", "a", "list", "of", "all", "folders", "and", "test", "suite", "files", "(", ".", "txt", ".", "robot", ")" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/parser.py#L92-L105
[ "def", "robot_files", "(", "self", ")", ":", "result", "=", "[", "]", "for", "name", "in", "os", ".", "listdir", "(", "self", ".", "path", ")", ":", "fullpath", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "name", ")", "i...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
RobotFile.walk
Iterator which can return all test cases and/or keywords You can specify with objects to return as parameters; if no parameters are given, both tests and keywords will be returned. For example, to get only test cases, you could call it like this: robot_file = RobotFactory(...) for testcase in robot_file.walk(Testcase): ...
rflint/parser/parser.py
def walk(self, *types): ''' Iterator which can return all test cases and/or keywords You can specify with objects to return as parameters; if no parameters are given, both tests and keywords will be returned. For example, to get only test cases, you could call it like this: robot_file = RobotFactory(...) for testcase in robot_file.walk(Testcase): ... ''' requested = types if len(types) > 0 else [Testcase, Keyword] if Testcase in requested: for testcase in self.testcases: yield testcase if Keyword in requested: for keyword in self.keywords: yield keyword
def walk(self, *types): ''' Iterator which can return all test cases and/or keywords You can specify with objects to return as parameters; if no parameters are given, both tests and keywords will be returned. For example, to get only test cases, you could call it like this: robot_file = RobotFactory(...) for testcase in robot_file.walk(Testcase): ... ''' requested = types if len(types) > 0 else [Testcase, Keyword] if Testcase in requested: for testcase in self.testcases: yield testcase if Keyword in requested: for keyword in self.keywords: yield keyword
[ "Iterator", "which", "can", "return", "all", "test", "cases", "and", "/", "or", "keywords" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/parser.py#L134-L157
[ "def", "walk", "(", "self", ",", "*", "types", ")", ":", "requested", "=", "types", "if", "len", "(", "types", ")", ">", "0", "else", "[", "Testcase", ",", "Keyword", "]", "if", "Testcase", "in", "requested", ":", "for", "testcase", "in", "self", "...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
RobotFile._load
The general idea is to do a quick parse, creating a list of tables. Each table is nothing more than a list of rows, with each row being a list of cells. Additional parsing such as combining rows into statements is done on demand. This first pass is solely to read in the plain text and organize it by table.
rflint/parser/parser.py
def _load(self, path): ''' The general idea is to do a quick parse, creating a list of tables. Each table is nothing more than a list of rows, with each row being a list of cells. Additional parsing such as combining rows into statements is done on demand. This first pass is solely to read in the plain text and organize it by table. ''' self.tables = [] current_table = DefaultTable(self) with Utf8Reader(path) as f: # N.B. the caller should be catching errors self.raw_text = f.read() f._file.seek(0) # bleh; wish this wasn't a private property matcher = Matcher(re.IGNORECASE) for linenumber, raw_text in enumerate(f.readlines()): linenumber += 1; # start counting at 1 rather than zero # this mimics what the robot TSV reader does -- # it replaces non-breaking spaces with regular spaces, # and then strips trailing whitespace raw_text = raw_text.replace(u'\xA0', ' ') raw_text = raw_text.rstrip() # FIXME: I'm keeping line numbers but throwing away # where each cell starts. I should be preserving that # (though to be fair, robot is throwing that away so # I'll have to write my own splitter if I want to save # the character position) cells = TxtReader.split_row(raw_text) _heading_regex = r'^\s*\*+\s*(.*?)[ *]*$' if matcher(_heading_regex, cells[0]): # we've found the start of a new table table_name = matcher.group(1) current_table = tableFactory(self, linenumber, table_name, raw_text) self.tables.append(current_table) else: current_table.append(Row(linenumber, raw_text, cells))
def _load(self, path): ''' The general idea is to do a quick parse, creating a list of tables. Each table is nothing more than a list of rows, with each row being a list of cells. Additional parsing such as combining rows into statements is done on demand. This first pass is solely to read in the plain text and organize it by table. ''' self.tables = [] current_table = DefaultTable(self) with Utf8Reader(path) as f: # N.B. the caller should be catching errors self.raw_text = f.read() f._file.seek(0) # bleh; wish this wasn't a private property matcher = Matcher(re.IGNORECASE) for linenumber, raw_text in enumerate(f.readlines()): linenumber += 1; # start counting at 1 rather than zero # this mimics what the robot TSV reader does -- # it replaces non-breaking spaces with regular spaces, # and then strips trailing whitespace raw_text = raw_text.replace(u'\xA0', ' ') raw_text = raw_text.rstrip() # FIXME: I'm keeping line numbers but throwing away # where each cell starts. I should be preserving that # (though to be fair, robot is throwing that away so # I'll have to write my own splitter if I want to save # the character position) cells = TxtReader.split_row(raw_text) _heading_regex = r'^\s*\*+\s*(.*?)[ *]*$' if matcher(_heading_regex, cells[0]): # we've found the start of a new table table_name = matcher.group(1) current_table = tableFactory(self, linenumber, table_name, raw_text) self.tables.append(current_table) else: current_table.append(Row(linenumber, raw_text, cells))
[ "The", "general", "idea", "is", "to", "do", "a", "quick", "parse", "creating", "a", "list", "of", "tables", ".", "Each", "table", "is", "nothing", "more", "than", "a", "list", "of", "rows", "with", "each", "row", "being", "a", "list", "of", "cells", ...
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/parser.py#L159-L201
[ "def", "_load", "(", "self", ",", "path", ")", ":", "self", ".", "tables", "=", "[", "]", "current_table", "=", "DefaultTable", "(", "self", ")", "with", "Utf8Reader", "(", "path", ")", "as", "f", ":", "# N.B. the caller should be catching errors", "self", ...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
RobotFile.type
Return 'suite' or 'resource' or None This will return 'suite' if a testcase table is found; It will return 'resource' if at least one robot table is found. If no tables are found it will return None
rflint/parser/parser.py
def type(self): '''Return 'suite' or 'resource' or None This will return 'suite' if a testcase table is found; It will return 'resource' if at least one robot table is found. If no tables are found it will return None ''' robot_tables = [table for table in self.tables if not isinstance(table, UnknownTable)] if len(robot_tables) == 0: return None for table in self.tables: if isinstance(table, TestcaseTable): return "suite" return "resource"
def type(self): '''Return 'suite' or 'resource' or None This will return 'suite' if a testcase table is found; It will return 'resource' if at least one robot table is found. If no tables are found it will return None ''' robot_tables = [table for table in self.tables if not isinstance(table, UnknownTable)] if len(robot_tables) == 0: return None for table in self.tables: if isinstance(table, TestcaseTable): return "suite" return "resource"
[ "Return", "suite", "or", "resource", "or", "None" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/parser.py#L208-L224
[ "def", "type", "(", "self", ")", ":", "robot_tables", "=", "[", "table", "for", "table", "in", "self", ".", "tables", "if", "not", "isinstance", "(", "table", ",", "UnknownTable", ")", "]", "if", "len", "(", "robot_tables", ")", "==", "0", ":", "retu...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
RobotFile.keywords
Generator which returns all keywords in the suite
rflint/parser/parser.py
def keywords(self): '''Generator which returns all keywords in the suite''' for table in self.tables: if isinstance(table, KeywordTable): for keyword in table.keywords: yield keyword
def keywords(self): '''Generator which returns all keywords in the suite''' for table in self.tables: if isinstance(table, KeywordTable): for keyword in table.keywords: yield keyword
[ "Generator", "which", "returns", "all", "keywords", "in", "the", "suite" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/parser.py#L227-L232
[ "def", "keywords", "(", "self", ")", ":", "for", "table", "in", "self", ".", "tables", ":", "if", "isinstance", "(", "table", ",", "KeywordTable", ")", ":", "for", "keyword", "in", "table", ".", "keywords", ":", "yield", "keyword" ]
3e3578f4e39af9af9961aa0a715f146b74474091
valid
RobotFile.dump
Regurgitate the tables and rows
rflint/parser/parser.py
def dump(self): '''Regurgitate the tables and rows''' for table in self.tables: print("*** %s ***" % table.name) table.dump()
def dump(self): '''Regurgitate the tables and rows''' for table in self.tables: print("*** %s ***" % table.name) table.dump()
[ "Regurgitate", "the", "tables", "and", "rows" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/parser.py#L242-L246
[ "def", "dump", "(", "self", ")", ":", "for", "table", "in", "self", ".", "tables", ":", "print", "(", "\"*** %s ***\"", "%", "table", ".", "name", ")", "table", ".", "dump", "(", ")" ]
3e3578f4e39af9af9961aa0a715f146b74474091
valid
SuiteFile.settings
Generator which returns all of the statements in all of the settings tables
rflint/parser/parser.py
def settings(self): '''Generator which returns all of the statements in all of the settings tables''' for table in self.tables: if isinstance(table, SettingTable): for statement in table.statements: yield statement
def settings(self): '''Generator which returns all of the statements in all of the settings tables''' for table in self.tables: if isinstance(table, SettingTable): for statement in table.statements: yield statement
[ "Generator", "which", "returns", "all", "of", "the", "statements", "in", "all", "of", "the", "settings", "tables" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/parser.py#L272-L277
[ "def", "settings", "(", "self", ")", ":", "for", "table", "in", "self", ".", "tables", ":", "if", "isinstance", "(", "table", ",", "SettingTable", ")", ":", "for", "statement", "in", "table", ".", "statements", ":", "yield", "statement" ]
3e3578f4e39af9af9961aa0a715f146b74474091
valid
SuiteFile.variables
Generator which returns all of the statements in all of the variables tables
rflint/parser/parser.py
def variables(self): '''Generator which returns all of the statements in all of the variables tables''' for table in self.tables: if isinstance(table, VariableTable): # FIXME: settings have statements, variables have rows WTF? :-( for statement in table.rows: if statement[0] != "": yield statement
def variables(self): '''Generator which returns all of the statements in all of the variables tables''' for table in self.tables: if isinstance(table, VariableTable): # FIXME: settings have statements, variables have rows WTF? :-( for statement in table.rows: if statement[0] != "": yield statement
[ "Generator", "which", "returns", "all", "of", "the", "statements", "in", "all", "of", "the", "variables", "tables" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/parser.py#L280-L287
[ "def", "variables", "(", "self", ")", ":", "for", "table", "in", "self", ".", "tables", ":", "if", "isinstance", "(", "table", ",", "VariableTable", ")", ":", "# FIXME: settings have statements, variables have rows WTF? :-(", "for", "statement", "in", "table", "."...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
SimpleTableMixin.statements
Return a list of statements This is done by joining together any rows that have continuations
rflint/parser/tables.py
def statements(self): '''Return a list of statements This is done by joining together any rows that have continuations ''' # FIXME: no need to do this every time; we should cache the # result if len(self.rows) == 0: return [] current_statement = Statement(self.rows[0]) current_statement.startline = self.rows[0].linenumber current_statement.endline = self.rows[0].linenumber statements = [] for row in self.rows[1:]: if len(row) > 0 and row[0] == "...": # we found a continuation current_statement += row[1:] current_statement.endline = row.linenumber else: if len(current_statement) > 0: # append current statement to the list of statements... statements.append(current_statement) # start a new statement current_statement = Statement(row) current_statement.startline = row.linenumber current_statement.endline = row.linenumber if len(current_statement) > 0: statements.append(current_statement) # trim trailing blank statements while (len(statements[-1]) == 0 or ((len(statements[-1]) == 1) and len(statements[-1][0]) == 0)): statements.pop() return statements
def statements(self): '''Return a list of statements This is done by joining together any rows that have continuations ''' # FIXME: no need to do this every time; we should cache the # result if len(self.rows) == 0: return [] current_statement = Statement(self.rows[0]) current_statement.startline = self.rows[0].linenumber current_statement.endline = self.rows[0].linenumber statements = [] for row in self.rows[1:]: if len(row) > 0 and row[0] == "...": # we found a continuation current_statement += row[1:] current_statement.endline = row.linenumber else: if len(current_statement) > 0: # append current statement to the list of statements... statements.append(current_statement) # start a new statement current_statement = Statement(row) current_statement.startline = row.linenumber current_statement.endline = row.linenumber if len(current_statement) > 0: statements.append(current_statement) # trim trailing blank statements while (len(statements[-1]) == 0 or ((len(statements[-1]) == 1) and len(statements[-1][0]) == 0)): statements.pop() return statements
[ "Return", "a", "list", "of", "statements" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/tables.py#L35-L71
[ "def", "statements", "(", "self", ")", ":", "# FIXME: no need to do this every time; we should cache the", "# result", "if", "len", "(", "self", ".", "rows", ")", "==", "0", ":", "return", "[", "]", "current_statement", "=", "Statement", "(", "self", ".", "rows"...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
AbstractContainerTable.append
The idea is, we recognize when we have a new testcase by checking the first cell. If it's not empty and not a comment, we have a new test case.
rflint/parser/tables.py
def append(self, row): ''' The idea is, we recognize when we have a new testcase by checking the first cell. If it's not empty and not a comment, we have a new test case. ''' if len(row) == 0: # blank line. Should we throw it away, or append a BlankLine object? return if (row[0] != "" and (not row[0].lstrip().startswith("#"))): # we have a new child table self._children.append(self._childClass(self.parent, row.linenumber, row[0])) if len(row.cells) > 1: # It appears the first row -- which contains the test case or # keyword name -- also has the first logical row of cells. # We'll create a Row, but we'll make the first cell empty instead # of leaving the name in it, since other code always assumes the # first cell is empty. # # To be honest, I'm not sure this is the Right Thing To Do, but # I'm too lazy to audit the code to see if it matters if we keep # the first cell intact. Sorry if this ends up causing you grief # some day... row[0] = "" self._children[-1].append(row.linenumber, row.raw_text, row.cells) elif len(self._children) == 0: # something before the first test case # For now, append it to self.comments; eventually we should flag # an error if it's NOT a comment self.comments.append(row) else: # another row for the testcase if len(row.cells) > 0: self._children[-1].append(row.linenumber, row.raw_text, row.cells)
def append(self, row): ''' The idea is, we recognize when we have a new testcase by checking the first cell. If it's not empty and not a comment, we have a new test case. ''' if len(row) == 0: # blank line. Should we throw it away, or append a BlankLine object? return if (row[0] != "" and (not row[0].lstrip().startswith("#"))): # we have a new child table self._children.append(self._childClass(self.parent, row.linenumber, row[0])) if len(row.cells) > 1: # It appears the first row -- which contains the test case or # keyword name -- also has the first logical row of cells. # We'll create a Row, but we'll make the first cell empty instead # of leaving the name in it, since other code always assumes the # first cell is empty. # # To be honest, I'm not sure this is the Right Thing To Do, but # I'm too lazy to audit the code to see if it matters if we keep # the first cell intact. Sorry if this ends up causing you grief # some day... row[0] = "" self._children[-1].append(row.linenumber, row.raw_text, row.cells) elif len(self._children) == 0: # something before the first test case # For now, append it to self.comments; eventually we should flag # an error if it's NOT a comment self.comments.append(row) else: # another row for the testcase if len(row.cells) > 0: self._children[-1].append(row.linenumber, row.raw_text, row.cells)
[ "The", "idea", "is", "we", "recognize", "when", "we", "have", "a", "new", "testcase", "by", "checking", "the", "first", "cell", ".", "If", "it", "s", "not", "empty", "and", "not", "a", "comment", "we", "have", "a", "new", "test", "case", "." ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/parser/tables.py#L96-L134
[ "def", "append", "(", "self", ",", "row", ")", ":", "if", "len", "(", "row", ")", "==", "0", ":", "# blank line. Should we throw it away, or append a BlankLine object?", "return", "if", "(", "row", "[", "0", "]", "!=", "\"\"", "and", "(", "not", "row", "["...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
Rule.report
Report an error or warning
rflint/common.py
def report(self, obj, message, linenum, char_offset=0): """Report an error or warning""" self.controller.report(linenumber=linenum, filename=obj.path, severity=self.severity, message=message, rulename = self.__class__.__name__, char=char_offset)
def report(self, obj, message, linenum, char_offset=0): """Report an error or warning""" self.controller.report(linenumber=linenum, filename=obj.path, severity=self.severity, message=message, rulename = self.__class__.__name__, char=char_offset)
[ "Report", "an", "error", "or", "warning" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/common.py#L26-L31
[ "def", "report", "(", "self", ",", "obj", ",", "message", ",", "linenum", ",", "char_offset", "=", "0", ")", ":", "self", ".", "controller", ".", "report", "(", "linenumber", "=", "linenum", ",", "filename", "=", "obj", ".", "path", ",", "severity", ...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
Rule.doc
Algorithm from https://www.python.org/dev/peps/pep-0257/
rflint/common.py
def doc(self): '''Algorithm from https://www.python.org/dev/peps/pep-0257/''' if not self.__doc__: return "" lines = self.__doc__.expandtabs().splitlines() # Determine minimum indentation (first line doesn't count): indent = sys.maxsize for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) # Remove indentation (first line is special): trimmed = [lines[0].strip()] if indent < sys.maxsize: for line in lines[1:]: trimmed.append(line[indent:].rstrip()) # Strip off trailing and leading blank lines: while trimmed and not trimmed[-1]: trimmed.pop() while trimmed and not trimmed[0]: trimmed.pop(0) # Return a single string: return '\n'.join(trimmed)
def doc(self): '''Algorithm from https://www.python.org/dev/peps/pep-0257/''' if not self.__doc__: return "" lines = self.__doc__.expandtabs().splitlines() # Determine minimum indentation (first line doesn't count): indent = sys.maxsize for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) # Remove indentation (first line is special): trimmed = [lines[0].strip()] if indent < sys.maxsize: for line in lines[1:]: trimmed.append(line[indent:].rstrip()) # Strip off trailing and leading blank lines: while trimmed and not trimmed[-1]: trimmed.pop() while trimmed and not trimmed[0]: trimmed.pop(0) # Return a single string: return '\n'.join(trimmed)
[ "Algorithm", "from", "https", ":", "//", "www", ".", "python", ".", "org", "/", "dev", "/", "peps", "/", "pep", "-", "0257", "/" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/common.py#L34-L61
[ "def", "doc", "(", "self", ")", ":", "if", "not", "self", ".", "__doc__", ":", "return", "\"\"", "lines", "=", "self", ".", "__doc__", ".", "expandtabs", "(", ")", ".", "splitlines", "(", ")", "# Determine minimum indentation (first line doesn't count):", "ind...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
RfLint.run
Parse command line arguments, and run rflint
rflint/rflint.py
def run(self, args): """Parse command line arguments, and run rflint""" self.args = self.parse_and_process_args(args) if self.args.version: print(__version__) return 0 if self.args.rulefile: for filename in self.args.rulefile: self._load_rule_file(filename) if self.args.list: self.list_rules() return 0 if self.args.describe: self._describe_rules(self.args.args) return 0 self.counts = { ERROR: 0, WARNING: 0, "other": 0} for filename in self.args.args: if not (os.path.exists(filename)): sys.stderr.write("rflint: %s: No such file or directory\n" % filename) continue if os.path.isdir(filename): self._process_folder(filename) else: self._process_file(filename) if self.counts[ERROR] > 0: return self.counts[ERROR] if self.counts[ERROR] < 254 else 255 return 0
def run(self, args): """Parse command line arguments, and run rflint""" self.args = self.parse_and_process_args(args) if self.args.version: print(__version__) return 0 if self.args.rulefile: for filename in self.args.rulefile: self._load_rule_file(filename) if self.args.list: self.list_rules() return 0 if self.args.describe: self._describe_rules(self.args.args) return 0 self.counts = { ERROR: 0, WARNING: 0, "other": 0} for filename in self.args.args: if not (os.path.exists(filename)): sys.stderr.write("rflint: %s: No such file or directory\n" % filename) continue if os.path.isdir(filename): self._process_folder(filename) else: self._process_file(filename) if self.counts[ERROR] > 0: return self.counts[ERROR] if self.counts[ERROR] < 254 else 255 return 0
[ "Parse", "command", "line", "arguments", "and", "run", "rflint" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/rflint.py#L79-L114
[ "def", "run", "(", "self", ",", "args", ")", ":", "self", ".", "args", "=", "self", ".", "parse_and_process_args", "(", "args", ")", "if", "self", ".", "args", ".", "version", ":", "print", "(", "__version__", ")", "return", "0", "if", "self", ".", ...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
RfLint.list_rules
Print a list of all rules
rflint/rflint.py
def list_rules(self): """Print a list of all rules""" for rule in sorted(self.all_rules, key=lambda rule: rule.name): print(rule) if self.args.verbose: for line in rule.doc.split("\n"): print(" ", line)
def list_rules(self): """Print a list of all rules""" for rule in sorted(self.all_rules, key=lambda rule: rule.name): print(rule) if self.args.verbose: for line in rule.doc.split("\n"): print(" ", line)
[ "Print", "a", "list", "of", "all", "rules" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/rflint.py#L178-L184
[ "def", "list_rules", "(", "self", ")", ":", "for", "rule", "in", "sorted", "(", "self", ".", "all_rules", ",", "key", "=", "lambda", "rule", ":", "rule", ".", "name", ")", ":", "print", "(", "rule", ")", "if", "self", ".", "args", ".", "verbose", ...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
RfLint.report
Report a rule violation
rflint/rflint.py
def report(self, linenumber, filename, severity, message, rulename, char): """Report a rule violation""" if self._print_filename is not None: # we print the filename only once. self._print_filename # will get reset each time a new file is processed. print("+ " + self._print_filename) self._print_filename = None if severity in (WARNING, ERROR): self.counts[severity] += 1 else: self.counts["other"] += 1 print(self.args.format.format(linenumber=linenumber, filename=filename, severity=severity, message=message.encode('utf-8'), rulename=rulename, char=char))
def report(self, linenumber, filename, severity, message, rulename, char): """Report a rule violation""" if self._print_filename is not None: # we print the filename only once. self._print_filename # will get reset each time a new file is processed. print("+ " + self._print_filename) self._print_filename = None if severity in (WARNING, ERROR): self.counts[severity] += 1 else: self.counts["other"] += 1 print(self.args.format.format(linenumber=linenumber, filename=filename, severity=severity, message=message.encode('utf-8'), rulename=rulename, char=char))
[ "Report", "a", "rule", "violation" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/rflint.py#L186-L202
[ "def", "report", "(", "self", ",", "linenumber", ",", "filename", ",", "severity", ",", "message", ",", "rulename", ",", "char", ")", ":", "if", "self", ".", "_print_filename", "is", "not", "None", ":", "# we print the filename only once. self._print_filename", ...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
RfLint._get_rules
Returns a list of rules of a given class Rules are treated as singletons - we only instantiate each rule once.
rflint/rflint.py
def _get_rules(self, cls): """Returns a list of rules of a given class Rules are treated as singletons - we only instantiate each rule once. """ result = [] for rule_class in cls.__subclasses__(): rule_name = rule_class.__name__.lower() if rule_name not in self._rules: rule = rule_class(self) self._rules[rule_name] = rule result.append(self._rules[rule_name]) return result
def _get_rules(self, cls): """Returns a list of rules of a given class Rules are treated as singletons - we only instantiate each rule once. """ result = [] for rule_class in cls.__subclasses__(): rule_name = rule_class.__name__.lower() if rule_name not in self._rules: rule = rule_class(self) self._rules[rule_name] = rule result.append(self._rules[rule_name]) return result
[ "Returns", "a", "list", "of", "rules", "of", "a", "given", "class", "Rules", "are", "treated", "as", "singletons", "-", "we", "only", "instantiate", "each", "rule", "once", "." ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/rflint.py#L204-L218
[ "def", "_get_rules", "(", "self", ",", "cls", ")", ":", "result", "=", "[", "]", "for", "rule_class", "in", "cls", ".", "__subclasses__", "(", ")", ":", "rule_name", "=", "rule_class", ".", "__name__", ".", "lower", "(", ")", "if", "rule_name", "not", ...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
RfLint._load_rule_file
Import the given rule file
rflint/rflint.py
def _load_rule_file(self, filename): """Import the given rule file""" if not (os.path.exists(filename)): sys.stderr.write("rflint: %s: No such file or directory\n" % filename) return try: basename = os.path.basename(filename) (name, ext) = os.path.splitext(basename) imp.load_source(name, filename) except Exception as e: sys.stderr.write("rflint: %s: exception while loading: %s\n" % (filename, str(e)))
def _load_rule_file(self, filename): """Import the given rule file""" if not (os.path.exists(filename)): sys.stderr.write("rflint: %s: No such file or directory\n" % filename) return try: basename = os.path.basename(filename) (name, ext) = os.path.splitext(basename) imp.load_source(name, filename) except Exception as e: sys.stderr.write("rflint: %s: exception while loading: %s\n" % (filename, str(e)))
[ "Import", "the", "given", "rule", "file" ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/rflint.py#L220-L230
[ "def", "_load_rule_file", "(", "self", ",", "filename", ")", ":", "if", "not", "(", "os", ".", "path", ".", "exists", "(", "filename", ")", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"rflint: %s: No such file or directory\\n\"", "%", "filename", ...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
RfLint.parse_and_process_args
Handle the parsing of command line arguments.
rflint/rflint.py
def parse_and_process_args(self, args): """Handle the parsing of command line arguments.""" parser = argparse.ArgumentParser( prog="python -m rflint", description="A style checker for robot framework plain text files.", formatter_class=argparse.RawDescriptionHelpFormatter, epilog = ( "You can use 'all' in place of RULENAME to refer to all rules. \n" "\n" "For example: '--ignore all --warn DuplicateTestNames' will ignore all\n" "rules except DuplicateTestNames.\n" "\n" "FORMAT is a string that performs a substitution on the following \n" "patterns: {severity}, {linenumber}, {char}, {message}, and {rulename}.\n" "\n" "For example: --format 'line: {linenumber}: message: {message}'. \n" "\n" "ARGUMENTFILE is a filename with contents that match the format of \n" "standard robot framework argument files\n" "\n" "If you give a directory as an argument, all files in the directory\n" "with the suffix .txt, .robot or .tsv will be processed. With the \n" "--recursive option, subfolders within the directory will also be\n" "processed." ) ) parser.add_argument("--error", "-e", metavar="RULENAME", action=SetErrorAction, help="Assign a severity of ERROR to the given RULENAME") parser.add_argument("--ignore", "-i", metavar="RULENAME", action=SetIgnoreAction, help="Ignore the given RULENAME") parser.add_argument("--warning", "-w", metavar="RULENAME", action=SetWarningAction, help="Assign a severity of WARNING for the given RULENAME") parser.add_argument("--list", "-l", action="store_true", help="show a list of known rules and exit") parser.add_argument("--describe", "-d", action="store_true", help="describe the given rules") parser.add_argument("--no-filenames", action="store_false", dest="print_filenames", default=True, help="suppress the printing of filenames") parser.add_argument("--format", "-f", help="Define the output format", default='{severity}: {linenumber}, {char}: {message} ({rulename})') parser.add_argument("--version", action="store_true", default=False, help="Display version number and exit") parser.add_argument("--verbose", "-v", action="store_true", default=False, help="Give verbose output") parser.add_argument("--configure", "-c", action=ConfigureAction, help="Configure a rule") parser.add_argument("--recursive", "-r", action="store_true", default=False, help="Recursively scan subfolders in a directory") parser.add_argument("--rulefile", "-R", action=RulefileAction, help="import additional rules from the given RULEFILE") parser.add_argument("--argumentfile", "-A", action=ArgfileLoader, help="read arguments from the given file") parser.add_argument('args', metavar="file", nargs=argparse.REMAINDER) # create a custom namespace, in which we can store a reference to # our rules. This lets the custom argument actions access the list # of rules ns = argparse.Namespace() setattr(ns, "app", self) args = parser.parse_args(args, ns) Rule.output_format = args.format return args
def parse_and_process_args(self, args): """Handle the parsing of command line arguments.""" parser = argparse.ArgumentParser( prog="python -m rflint", description="A style checker for robot framework plain text files.", formatter_class=argparse.RawDescriptionHelpFormatter, epilog = ( "You can use 'all' in place of RULENAME to refer to all rules. \n" "\n" "For example: '--ignore all --warn DuplicateTestNames' will ignore all\n" "rules except DuplicateTestNames.\n" "\n" "FORMAT is a string that performs a substitution on the following \n" "patterns: {severity}, {linenumber}, {char}, {message}, and {rulename}.\n" "\n" "For example: --format 'line: {linenumber}: message: {message}'. \n" "\n" "ARGUMENTFILE is a filename with contents that match the format of \n" "standard robot framework argument files\n" "\n" "If you give a directory as an argument, all files in the directory\n" "with the suffix .txt, .robot or .tsv will be processed. With the \n" "--recursive option, subfolders within the directory will also be\n" "processed." ) ) parser.add_argument("--error", "-e", metavar="RULENAME", action=SetErrorAction, help="Assign a severity of ERROR to the given RULENAME") parser.add_argument("--ignore", "-i", metavar="RULENAME", action=SetIgnoreAction, help="Ignore the given RULENAME") parser.add_argument("--warning", "-w", metavar="RULENAME", action=SetWarningAction, help="Assign a severity of WARNING for the given RULENAME") parser.add_argument("--list", "-l", action="store_true", help="show a list of known rules and exit") parser.add_argument("--describe", "-d", action="store_true", help="describe the given rules") parser.add_argument("--no-filenames", action="store_false", dest="print_filenames", default=True, help="suppress the printing of filenames") parser.add_argument("--format", "-f", help="Define the output format", default='{severity}: {linenumber}, {char}: {message} ({rulename})') parser.add_argument("--version", action="store_true", default=False, help="Display version number and exit") parser.add_argument("--verbose", "-v", action="store_true", default=False, help="Give verbose output") parser.add_argument("--configure", "-c", action=ConfigureAction, help="Configure a rule") parser.add_argument("--recursive", "-r", action="store_true", default=False, help="Recursively scan subfolders in a directory") parser.add_argument("--rulefile", "-R", action=RulefileAction, help="import additional rules from the given RULEFILE") parser.add_argument("--argumentfile", "-A", action=ArgfileLoader, help="read arguments from the given file") parser.add_argument('args', metavar="file", nargs=argparse.REMAINDER) # create a custom namespace, in which we can store a reference to # our rules. This lets the custom argument actions access the list # of rules ns = argparse.Namespace() setattr(ns, "app", self) args = parser.parse_args(args, ns) Rule.output_format = args.format return args
[ "Handle", "the", "parsing", "of", "command", "line", "arguments", "." ]
boakley/robotframework-lint
python
https://github.com/boakley/robotframework-lint/blob/3e3578f4e39af9af9961aa0a715f146b74474091/rflint/rflint.py#L232-L298
[ "def", "parse_and_process_args", "(", "self", ",", "args", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "\"python -m rflint\"", ",", "description", "=", "\"A style checker for robot framework plain text files.\"", ",", "formatter_class", ...
3e3578f4e39af9af9961aa0a715f146b74474091
valid
Draft4ExtendedValidatorFactory.from_resolver
Creates a customized Draft4ExtendedValidator. :param spec_resolver: resolver for the spec :type resolver: :class:`jsonschema.RefResolver`
openapi_spec_validator/factories.py
def from_resolver(cls, spec_resolver): """Creates a customized Draft4ExtendedValidator. :param spec_resolver: resolver for the spec :type resolver: :class:`jsonschema.RefResolver` """ spec_validators = cls._get_spec_validators(spec_resolver) return validators.extend(Draft4Validator, spec_validators)
def from_resolver(cls, spec_resolver): """Creates a customized Draft4ExtendedValidator. :param spec_resolver: resolver for the spec :type resolver: :class:`jsonschema.RefResolver` """ spec_validators = cls._get_spec_validators(spec_resolver) return validators.extend(Draft4Validator, spec_validators)
[ "Creates", "a", "customized", "Draft4ExtendedValidator", "." ]
p1c2u/openapi-spec-validator
python
https://github.com/p1c2u/openapi-spec-validator/blob/7fef38ab2962ab4866ffa27e2e9fd92f6a3ff67f/openapi_spec_validator/factories.py#L15-L22
[ "def", "from_resolver", "(", "cls", ",", "spec_resolver", ")", ":", "spec_validators", "=", "cls", ".", "_get_spec_validators", "(", "spec_resolver", ")", "return", "validators", ".", "extend", "(", "Draft4Validator", ",", "spec_validators", ")" ]
7fef38ab2962ab4866ffa27e2e9fd92f6a3ff67f
valid
JSONSpecValidatorFactory.create
Creates json documents validator from spec resolver. :param spec_resolver: reference resolver. :return: RefResolver for spec with cached remote $refs used during validation. :rtype: :class:`jsonschema.RefResolver`
openapi_spec_validator/factories.py
def create(self, spec_resolver): """Creates json documents validator from spec resolver. :param spec_resolver: reference resolver. :return: RefResolver for spec with cached remote $refs used during validation. :rtype: :class:`jsonschema.RefResolver` """ validator_cls = self.spec_validator_factory.from_resolver( spec_resolver) return validator_cls( self.schema, resolver=self.schema_resolver)
def create(self, spec_resolver): """Creates json documents validator from spec resolver. :param spec_resolver: reference resolver. :return: RefResolver for spec with cached remote $refs used during validation. :rtype: :class:`jsonschema.RefResolver` """ validator_cls = self.spec_validator_factory.from_resolver( spec_resolver) return validator_cls( self.schema, resolver=self.schema_resolver)
[ "Creates", "json", "documents", "validator", "from", "spec", "resolver", ".", ":", "param", "spec_resolver", ":", "reference", "resolver", "." ]
p1c2u/openapi-spec-validator
python
https://github.com/p1c2u/openapi-spec-validator/blob/7fef38ab2962ab4866ffa27e2e9fd92f6a3ff67f/openapi_spec_validator/factories.py#L53-L65
[ "def", "create", "(", "self", ",", "spec_resolver", ")", ":", "validator_cls", "=", "self", ".", "spec_validator_factory", ".", "from_resolver", "(", "spec_resolver", ")", "return", "validator_cls", "(", "self", ".", "schema", ",", "resolver", "=", "self", "."...
7fef38ab2962ab4866ffa27e2e9fd92f6a3ff67f
valid
ExtendedSafeConstructor.construct_mapping
While yaml supports integer keys, these are not valid in json, and will break jsonschema. This method coerces all keys to strings.
openapi_spec_validator/constructors.py
def construct_mapping(self, node, deep=False): """While yaml supports integer keys, these are not valid in json, and will break jsonschema. This method coerces all keys to strings. """ mapping = super(ExtendedSafeConstructor, self).construct_mapping( node, deep) return { (str(key) if isinstance(key, int) else key): mapping[key] for key in mapping }
def construct_mapping(self, node, deep=False): """While yaml supports integer keys, these are not valid in json, and will break jsonschema. This method coerces all keys to strings. """ mapping = super(ExtendedSafeConstructor, self).construct_mapping( node, deep) return { (str(key) if isinstance(key, int) else key): mapping[key] for key in mapping }
[ "While", "yaml", "supports", "integer", "keys", "these", "are", "not", "valid", "in", "json", "and", "will", "break", "jsonschema", ".", "This", "method", "coerces", "all", "keys", "to", "strings", "." ]
p1c2u/openapi-spec-validator
python
https://github.com/p1c2u/openapi-spec-validator/blob/7fef38ab2962ab4866ffa27e2e9fd92f6a3ff67f/openapi_spec_validator/constructors.py#L6-L17
[ "def", "construct_mapping", "(", "self", ",", "node", ",", "deep", "=", "False", ")", ":", "mapping", "=", "super", "(", "ExtendedSafeConstructor", ",", "self", ")", ".", "construct_mapping", "(", "node", ",", "deep", ")", "return", "{", "(", "str", "(",...
7fef38ab2962ab4866ffa27e2e9fd92f6a3ff67f
valid
read_yaml_file
Open a file, read it and return its contents.
openapi_spec_validator/schemas.py
def read_yaml_file(path, loader=ExtendedSafeLoader): """Open a file, read it and return its contents.""" with open(path) as fh: return load(fh, loader)
def read_yaml_file(path, loader=ExtendedSafeLoader): """Open a file, read it and return its contents.""" with open(path) as fh: return load(fh, loader)
[ "Open", "a", "file", "read", "it", "and", "return", "its", "contents", "." ]
p1c2u/openapi-spec-validator
python
https://github.com/p1c2u/openapi-spec-validator/blob/7fef38ab2962ab4866ffa27e2e9fd92f6a3ff67f/openapi_spec_validator/schemas.py#L20-L23
[ "def", "read_yaml_file", "(", "path", ",", "loader", "=", "ExtendedSafeLoader", ")", ":", "with", "open", "(", "path", ")", "as", "fh", ":", "return", "load", "(", "fh", ",", "loader", ")" ]
7fef38ab2962ab4866ffa27e2e9fd92f6a3ff67f
valid
SpecValidatorsGeneratorFactory.from_spec_resolver
Creates validators generator for the spec resolver. :param spec_resolver: resolver for the spec :type instance_resolver: :class:`jsonschema.RefResolver`
openapi_spec_validator/generators.py
def from_spec_resolver(cls, spec_resolver): """Creates validators generator for the spec resolver. :param spec_resolver: resolver for the spec :type instance_resolver: :class:`jsonschema.RefResolver` """ deref = DerefValidatorDecorator(spec_resolver) for key, validator_callable in iteritems(cls.validators): yield key, deref(validator_callable)
def from_spec_resolver(cls, spec_resolver): """Creates validators generator for the spec resolver. :param spec_resolver: resolver for the spec :type instance_resolver: :class:`jsonschema.RefResolver` """ deref = DerefValidatorDecorator(spec_resolver) for key, validator_callable in iteritems(cls.validators): yield key, deref(validator_callable)
[ "Creates", "validators", "generator", "for", "the", "spec", "resolver", "." ]
p1c2u/openapi-spec-validator
python
https://github.com/p1c2u/openapi-spec-validator/blob/7fef38ab2962ab4866ffa27e2e9fd92f6a3ff67f/openapi_spec_validator/generators.py#L34-L42
[ "def", "from_spec_resolver", "(", "cls", ",", "spec_resolver", ")", ":", "deref", "=", "DerefValidatorDecorator", "(", "spec_resolver", ")", "for", "key", ",", "validator_callable", "in", "iteritems", "(", "cls", ".", "validators", ")", ":", "yield", "key", ",...
7fef38ab2962ab4866ffa27e2e9fd92f6a3ff67f
valid
grade
Grades a specified submission using specified models grader_data - A dictionary: { 'model' : trained model, 'extractor' : trained feature extractor, 'prompt' : prompt for the question, 'algorithm' : algorithm for the question, } submission - The student submission (string)
ease/grade.py
def grade(grader_data,submission): """ Grades a specified submission using specified models grader_data - A dictionary: { 'model' : trained model, 'extractor' : trained feature extractor, 'prompt' : prompt for the question, 'algorithm' : algorithm for the question, } submission - The student submission (string) """ #Initialize result dictionary results = {'errors': [],'tests': [],'score': 0, 'feedback' : "", 'success' : False, 'confidence' : 0} has_error=False grader_set=EssaySet(essaytype="test") feedback = {} model, extractor = get_classifier_and_ext(grader_data) #This is to preserve legacy functionality if 'algorithm' not in grader_data: grader_data['algorithm'] = util_functions.AlgorithmTypes.classification try: #Try to add essay to essay set object grader_set.add_essay(str(submission),0) grader_set.update_prompt(str(grader_data['prompt'])) except Exception: error_message = "Essay could not be added to essay set:{0}".format(submission) log.exception(error_message) results['errors'].append(error_message) has_error=True #Try to extract features from submission and assign score via the model try: grader_feats=extractor.gen_feats(grader_set) feedback=extractor.gen_feedback(grader_set,grader_feats)[0] results['score']=int(model.predict(grader_feats)[0]) except Exception: error_message = "Could not extract features and score essay." log.exception(error_message) results['errors'].append(error_message) has_error=True #Try to determine confidence level try: results['confidence'] = get_confidence_value(grader_data['algorithm'], model, grader_feats, results['score'], grader_data['score']) except Exception: #If there is an error getting confidence, it is not a show-stopper, so just log log.exception("Problem generating confidence value") if not has_error: #If the essay is just a copy of the prompt, return a 0 as the score if( 'too_similar_to_prompt' in feedback and feedback['too_similar_to_prompt']): results['score']=0 results['correct']=False results['success']=True #Generate short form output--number of problem areas identified in feedback #Add feedback to results if available results['feedback'] = {} if 'topicality' in feedback and 'prompt_overlap' in feedback: results['feedback'].update({ 'topicality' : feedback['topicality'], 'prompt-overlap' : feedback['prompt_overlap'], }) results['feedback'].update( { 'spelling' : feedback['spelling'], 'grammar' : feedback['grammar'], 'markup-text' : feedback['markup_text'], } ) else: #If error, success is False. results['success']=False return results
def grade(grader_data,submission): """ Grades a specified submission using specified models grader_data - A dictionary: { 'model' : trained model, 'extractor' : trained feature extractor, 'prompt' : prompt for the question, 'algorithm' : algorithm for the question, } submission - The student submission (string) """ #Initialize result dictionary results = {'errors': [],'tests': [],'score': 0, 'feedback' : "", 'success' : False, 'confidence' : 0} has_error=False grader_set=EssaySet(essaytype="test") feedback = {} model, extractor = get_classifier_and_ext(grader_data) #This is to preserve legacy functionality if 'algorithm' not in grader_data: grader_data['algorithm'] = util_functions.AlgorithmTypes.classification try: #Try to add essay to essay set object grader_set.add_essay(str(submission),0) grader_set.update_prompt(str(grader_data['prompt'])) except Exception: error_message = "Essay could not be added to essay set:{0}".format(submission) log.exception(error_message) results['errors'].append(error_message) has_error=True #Try to extract features from submission and assign score via the model try: grader_feats=extractor.gen_feats(grader_set) feedback=extractor.gen_feedback(grader_set,grader_feats)[0] results['score']=int(model.predict(grader_feats)[0]) except Exception: error_message = "Could not extract features and score essay." log.exception(error_message) results['errors'].append(error_message) has_error=True #Try to determine confidence level try: results['confidence'] = get_confidence_value(grader_data['algorithm'], model, grader_feats, results['score'], grader_data['score']) except Exception: #If there is an error getting confidence, it is not a show-stopper, so just log log.exception("Problem generating confidence value") if not has_error: #If the essay is just a copy of the prompt, return a 0 as the score if( 'too_similar_to_prompt' in feedback and feedback['too_similar_to_prompt']): results['score']=0 results['correct']=False results['success']=True #Generate short form output--number of problem areas identified in feedback #Add feedback to results if available results['feedback'] = {} if 'topicality' in feedback and 'prompt_overlap' in feedback: results['feedback'].update({ 'topicality' : feedback['topicality'], 'prompt-overlap' : feedback['prompt_overlap'], }) results['feedback'].update( { 'spelling' : feedback['spelling'], 'grammar' : feedback['grammar'], 'markup-text' : feedback['markup_text'], } ) else: #If error, success is False. results['success']=False return results
[ "Grades", "a", "specified", "submission", "using", "specified", "models", "grader_data", "-", "A", "dictionary", ":", "{", "model", ":", "trained", "model", "extractor", ":", "trained", "feature", "extractor", "prompt", ":", "prompt", "for", "the", "question", ...
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/grade.py#L28-L113
[ "def", "grade", "(", "grader_data", ",", "submission", ")", ":", "#Initialize result dictionary", "results", "=", "{", "'errors'", ":", "[", "]", ",", "'tests'", ":", "[", "]", ",", "'score'", ":", "0", ",", "'feedback'", ":", "\"\"", ",", "'success'", "...
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
grade_generic
Grades a set of numeric and textual features using a generic model grader_data -- dictionary containing: { 'algorithm' - Type of algorithm to use to score } numeric_features - list of numeric features to predict on textual_features - list of textual feature to predict on
ease/grade.py
def grade_generic(grader_data, numeric_features, textual_features): """ Grades a set of numeric and textual features using a generic model grader_data -- dictionary containing: { 'algorithm' - Type of algorithm to use to score } numeric_features - list of numeric features to predict on textual_features - list of textual feature to predict on """ results = {'errors': [],'tests': [],'score': 0, 'success' : False, 'confidence' : 0} has_error=False #Try to find and load the model file grader_set=predictor_set.PredictorSet(essaytype="test") model, extractor = get_classifier_and_ext(grader_data) #Try to add essays to essay set object try: grader_set.add_row(numeric_features, textual_features,0) except Exception: error_msg = "Row could not be added to predictor set:{0} {1}".format(numeric_features, textual_features) log.exception(error_msg) results['errors'].append(error_msg) has_error=True #Try to extract features from submission and assign score via the model try: grader_feats=extractor.gen_feats(grader_set) results['score']=model.predict(grader_feats)[0] except Exception: error_msg = "Could not extract features and score essay." log.exception(error_msg) results['errors'].append(error_msg) has_error=True #Try to determine confidence level try: results['confidence'] = get_confidence_value(grader_data['algorithm'],model, grader_feats, results['score']) except Exception: #If there is an error getting confidence, it is not a show-stopper, so just log log.exception("Problem generating confidence value") if not has_error: results['success'] = True return results
def grade_generic(grader_data, numeric_features, textual_features): """ Grades a set of numeric and textual features using a generic model grader_data -- dictionary containing: { 'algorithm' - Type of algorithm to use to score } numeric_features - list of numeric features to predict on textual_features - list of textual feature to predict on """ results = {'errors': [],'tests': [],'score': 0, 'success' : False, 'confidence' : 0} has_error=False #Try to find and load the model file grader_set=predictor_set.PredictorSet(essaytype="test") model, extractor = get_classifier_and_ext(grader_data) #Try to add essays to essay set object try: grader_set.add_row(numeric_features, textual_features,0) except Exception: error_msg = "Row could not be added to predictor set:{0} {1}".format(numeric_features, textual_features) log.exception(error_msg) results['errors'].append(error_msg) has_error=True #Try to extract features from submission and assign score via the model try: grader_feats=extractor.gen_feats(grader_set) results['score']=model.predict(grader_feats)[0] except Exception: error_msg = "Could not extract features and score essay." log.exception(error_msg) results['errors'].append(error_msg) has_error=True #Try to determine confidence level try: results['confidence'] = get_confidence_value(grader_data['algorithm'],model, grader_feats, results['score']) except Exception: #If there is an error getting confidence, it is not a show-stopper, so just log log.exception("Problem generating confidence value") if not has_error: results['success'] = True return results
[ "Grades", "a", "set", "of", "numeric", "and", "textual", "features", "using", "a", "generic", "model", "grader_data", "--", "dictionary", "containing", ":", "{", "algorithm", "-", "Type", "of", "algorithm", "to", "use", "to", "score", "}", "numeric_features", ...
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/grade.py#L115-L165
[ "def", "grade_generic", "(", "grader_data", ",", "numeric_features", ",", "textual_features", ")", ":", "results", "=", "{", "'errors'", ":", "[", "]", ",", "'tests'", ":", "[", "]", ",", "'score'", ":", "0", ",", "'success'", ":", "False", ",", "'confid...
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
get_confidence_value
Determines a confidence in a certain score, given proper input parameters algorithm- from util_functions.AlgorithmTypes model - a trained model grader_feats - a row of features used by the model for classification/regression score - The score assigned to the submission by a prior model
ease/grade.py
def get_confidence_value(algorithm,model,grader_feats,score, scores): """ Determines a confidence in a certain score, given proper input parameters algorithm- from util_functions.AlgorithmTypes model - a trained model grader_feats - a row of features used by the model for classification/regression score - The score assigned to the submission by a prior model """ min_score=min(numpy.asarray(scores)) max_score=max(numpy.asarray(scores)) if algorithm == util_functions.AlgorithmTypes.classification and hasattr(model, "predict_proba"): #If classification, predict with probability, which gives you a matrix of confidences per score point raw_confidence=model.predict_proba(grader_feats)[0,(float(score)-float(min_score))] #TODO: Normalize confidence somehow here confidence=raw_confidence elif hasattr(model, "predict"): raw_confidence = model.predict(grader_feats)[0] confidence = max(float(raw_confidence) - math.floor(float(raw_confidence)), math.ceil(float(raw_confidence)) - float(raw_confidence)) else: confidence = 0 return confidence
def get_confidence_value(algorithm,model,grader_feats,score, scores): """ Determines a confidence in a certain score, given proper input parameters algorithm- from util_functions.AlgorithmTypes model - a trained model grader_feats - a row of features used by the model for classification/regression score - The score assigned to the submission by a prior model """ min_score=min(numpy.asarray(scores)) max_score=max(numpy.asarray(scores)) if algorithm == util_functions.AlgorithmTypes.classification and hasattr(model, "predict_proba"): #If classification, predict with probability, which gives you a matrix of confidences per score point raw_confidence=model.predict_proba(grader_feats)[0,(float(score)-float(min_score))] #TODO: Normalize confidence somehow here confidence=raw_confidence elif hasattr(model, "predict"): raw_confidence = model.predict(grader_feats)[0] confidence = max(float(raw_confidence) - math.floor(float(raw_confidence)), math.ceil(float(raw_confidence)) - float(raw_confidence)) else: confidence = 0 return confidence
[ "Determines", "a", "confidence", "in", "a", "certain", "score", "given", "proper", "input", "parameters", "algorithm", "-", "from", "util_functions", ".", "AlgorithmTypes", "model", "-", "a", "trained", "model", "grader_feats", "-", "a", "row", "of", "features",...
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/grade.py#L167-L188
[ "def", "get_confidence_value", "(", "algorithm", ",", "model", ",", "grader_feats", ",", "score", ",", "scores", ")", ":", "min_score", "=", "min", "(", "numpy", ".", "asarray", "(", "scores", ")", ")", "max_score", "=", "max", "(", "numpy", ".", "asarra...
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
create_model_path
Creates a path to model files model_path - string
ease/util_functions.py
def create_model_path(model_path): """ Creates a path to model files model_path - string """ if not model_path.startswith("/") and not model_path.startswith("models/"): model_path="/" + model_path if not model_path.startswith("models"): model_path = "models" + model_path if not model_path.endswith(".p"): model_path+=".p" return model_path
def create_model_path(model_path): """ Creates a path to model files model_path - string """ if not model_path.startswith("/") and not model_path.startswith("models/"): model_path="/" + model_path if not model_path.startswith("models"): model_path = "models" + model_path if not model_path.endswith(".p"): model_path+=".p" return model_path
[ "Creates", "a", "path", "to", "model", "files", "model_path", "-", "string" ]
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L36-L48
[ "def", "create_model_path", "(", "model_path", ")", ":", "if", "not", "model_path", ".", "startswith", "(", "\"/\"", ")", "and", "not", "model_path", ".", "startswith", "(", "\"models/\"", ")", ":", "model_path", "=", "\"/\"", "+", "model_path", "if", "not",...
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
sub_chars
Strips illegal characters from a string. Used to sanitize input essays. Removes all non-punctuation, digit, or letter characters. Returns sanitized string. string - string
ease/util_functions.py
def sub_chars(string): """ Strips illegal characters from a string. Used to sanitize input essays. Removes all non-punctuation, digit, or letter characters. Returns sanitized string. string - string """ #Define replacement patterns sub_pat = r"[^A-Za-z\.\?!,';:]" char_pat = r"\." com_pat = r"," ques_pat = r"\?" excl_pat = r"!" sem_pat = r";" col_pat = r":" whitespace_pat = r"\s{1,}" #Replace text. Ordering is very important! nstring = re.sub(sub_pat, " ", string) nstring = re.sub(char_pat," .", nstring) nstring = re.sub(com_pat, " ,", nstring) nstring = re.sub(ques_pat, " ?", nstring) nstring = re.sub(excl_pat, " !", nstring) nstring = re.sub(sem_pat, " ;", nstring) nstring = re.sub(col_pat, " :", nstring) nstring = re.sub(whitespace_pat, " ", nstring) return nstring
def sub_chars(string): """ Strips illegal characters from a string. Used to sanitize input essays. Removes all non-punctuation, digit, or letter characters. Returns sanitized string. string - string """ #Define replacement patterns sub_pat = r"[^A-Za-z\.\?!,';:]" char_pat = r"\." com_pat = r"," ques_pat = r"\?" excl_pat = r"!" sem_pat = r";" col_pat = r":" whitespace_pat = r"\s{1,}" #Replace text. Ordering is very important! nstring = re.sub(sub_pat, " ", string) nstring = re.sub(char_pat," .", nstring) nstring = re.sub(com_pat, " ,", nstring) nstring = re.sub(ques_pat, " ?", nstring) nstring = re.sub(excl_pat, " !", nstring) nstring = re.sub(sem_pat, " ;", nstring) nstring = re.sub(col_pat, " :", nstring) nstring = re.sub(whitespace_pat, " ", nstring) return nstring
[ "Strips", "illegal", "characters", "from", "a", "string", ".", "Used", "to", "sanitize", "input", "essays", ".", "Removes", "all", "non", "-", "punctuation", "digit", "or", "letter", "characters", ".", "Returns", "sanitized", "string", ".", "string", "-", "s...
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L50-L77
[ "def", "sub_chars", "(", "string", ")", ":", "#Define replacement patterns", "sub_pat", "=", "r\"[^A-Za-z\\.\\?!,';:]\"", "char_pat", "=", "r\"\\.\"", "com_pat", "=", "r\",\"", "ques_pat", "=", "r\"\\?\"", "excl_pat", "=", "r\"!\"", "sem_pat", "=", "r\";\"", "col_pa...
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
spell_correct
Uses aspell to spell correct an input string. Requires aspell to be installed and added to the path. Returns the spell corrected string if aspell is found, original string if not. string - string
ease/util_functions.py
def spell_correct(string): """ Uses aspell to spell correct an input string. Requires aspell to be installed and added to the path. Returns the spell corrected string if aspell is found, original string if not. string - string """ # Create a temp file so that aspell could be used # By default, tempfile will delete this file when the file handle is closed. f = tempfile.NamedTemporaryFile(mode='w') f.write(string) f.flush() f_path = os.path.abspath(f.name) try: p = os.popen(aspell_path + " -a < " + f_path + " --sug-mode=ultra") # Aspell returns a list of incorrect words with the above flags incorrect = p.readlines() p.close() except Exception: log.exception("aspell process failed; could not spell check") # Return original string if aspell fails return string,0, string finally: f.close() incorrect_words = list() correct_spelling = list() for i in range(1, len(incorrect)): if(len(incorrect[i]) > 10): #Reformat aspell output to make sense match = re.search(":", incorrect[i]) if hasattr(match, "start"): begstring = incorrect[i][2:match.start()] begmatch = re.search(" ", begstring) begword = begstring[0:begmatch.start()] sugstring = incorrect[i][match.start() + 2:] sugmatch = re.search(",", sugstring) if hasattr(sugmatch, "start"): sug = sugstring[0:sugmatch.start()] incorrect_words.append(begword) correct_spelling.append(sug) #Create markup based on spelling errors newstring = string markup_string = string already_subbed=[] for i in range(0, len(incorrect_words)): sub_pat = r"\b" + incorrect_words[i] + r"\b" sub_comp = re.compile(sub_pat) newstring = re.sub(sub_comp, correct_spelling[i], newstring) if incorrect_words[i] not in already_subbed: markup_string=re.sub(sub_comp,'<bs>' + incorrect_words[i] + "</bs>", markup_string) already_subbed.append(incorrect_words[i]) return newstring,len(incorrect_words),markup_string
def spell_correct(string): """ Uses aspell to spell correct an input string. Requires aspell to be installed and added to the path. Returns the spell corrected string if aspell is found, original string if not. string - string """ # Create a temp file so that aspell could be used # By default, tempfile will delete this file when the file handle is closed. f = tempfile.NamedTemporaryFile(mode='w') f.write(string) f.flush() f_path = os.path.abspath(f.name) try: p = os.popen(aspell_path + " -a < " + f_path + " --sug-mode=ultra") # Aspell returns a list of incorrect words with the above flags incorrect = p.readlines() p.close() except Exception: log.exception("aspell process failed; could not spell check") # Return original string if aspell fails return string,0, string finally: f.close() incorrect_words = list() correct_spelling = list() for i in range(1, len(incorrect)): if(len(incorrect[i]) > 10): #Reformat aspell output to make sense match = re.search(":", incorrect[i]) if hasattr(match, "start"): begstring = incorrect[i][2:match.start()] begmatch = re.search(" ", begstring) begword = begstring[0:begmatch.start()] sugstring = incorrect[i][match.start() + 2:] sugmatch = re.search(",", sugstring) if hasattr(sugmatch, "start"): sug = sugstring[0:sugmatch.start()] incorrect_words.append(begword) correct_spelling.append(sug) #Create markup based on spelling errors newstring = string markup_string = string already_subbed=[] for i in range(0, len(incorrect_words)): sub_pat = r"\b" + incorrect_words[i] + r"\b" sub_comp = re.compile(sub_pat) newstring = re.sub(sub_comp, correct_spelling[i], newstring) if incorrect_words[i] not in already_subbed: markup_string=re.sub(sub_comp,'<bs>' + incorrect_words[i] + "</bs>", markup_string) already_subbed.append(incorrect_words[i]) return newstring,len(incorrect_words),markup_string
[ "Uses", "aspell", "to", "spell", "correct", "an", "input", "string", ".", "Requires", "aspell", "to", "be", "installed", "and", "added", "to", "the", "path", ".", "Returns", "the", "spell", "corrected", "string", "if", "aspell", "is", "found", "original", ...
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L80-L140
[ "def", "spell_correct", "(", "string", ")", ":", "# Create a temp file so that aspell could be used", "# By default, tempfile will delete this file when the file handle is closed.", "f", "=", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "'w'", ")", "f", ".", "writ...
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
ngrams
Generates ngrams(word sequences of fixed length) from an input token sequence. tokens is a list of words. min_n is the minimum length of an ngram to return. max_n is the maximum length of an ngram to return. returns a list of ngrams (words separated by a space)
ease/util_functions.py
def ngrams(tokens, min_n, max_n): """ Generates ngrams(word sequences of fixed length) from an input token sequence. tokens is a list of words. min_n is the minimum length of an ngram to return. max_n is the maximum length of an ngram to return. returns a list of ngrams (words separated by a space) """ all_ngrams = list() n_tokens = len(tokens) for i in xrange(n_tokens): for j in xrange(i + min_n, min(n_tokens, i + max_n) + 1): all_ngrams.append(" ".join(tokens[i:j])) return all_ngrams
def ngrams(tokens, min_n, max_n): """ Generates ngrams(word sequences of fixed length) from an input token sequence. tokens is a list of words. min_n is the minimum length of an ngram to return. max_n is the maximum length of an ngram to return. returns a list of ngrams (words separated by a space) """ all_ngrams = list() n_tokens = len(tokens) for i in xrange(n_tokens): for j in xrange(i + min_n, min(n_tokens, i + max_n) + 1): all_ngrams.append(" ".join(tokens[i:j])) return all_ngrams
[ "Generates", "ngrams", "(", "word", "sequences", "of", "fixed", "length", ")", "from", "an", "input", "token", "sequence", ".", "tokens", "is", "a", "list", "of", "words", ".", "min_n", "is", "the", "minimum", "length", "of", "an", "ngram", "to", "return...
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L143-L156
[ "def", "ngrams", "(", "tokens", ",", "min_n", ",", "max_n", ")", ":", "all_ngrams", "=", "list", "(", ")", "n_tokens", "=", "len", "(", "tokens", ")", "for", "i", "in", "xrange", "(", "n_tokens", ")", ":", "for", "j", "in", "xrange", "(", "i", "+...
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
f7
Makes a list unique
ease/util_functions.py
def f7(seq): """ Makes a list unique """ seen = set() seen_add = seen.add return [x for x in seq if x not in seen and not seen_add(x)]
def f7(seq): """ Makes a list unique """ seen = set() seen_add = seen.add return [x for x in seq if x not in seen and not seen_add(x)]
[ "Makes", "a", "list", "unique" ]
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L159-L165
[ "def", "f7", "(", "seq", ")", ":", "seen", "=", "set", "(", ")", "seen_add", "=", "seen", ".", "add", "return", "[", "x", "for", "x", "in", "seq", "if", "x", "not", "in", "seen", "and", "not", "seen_add", "(", "x", ")", "]" ]
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
count_list
Generates a count of the number of times each unique item appears in a list
ease/util_functions.py
def count_list(the_list): """ Generates a count of the number of times each unique item appears in a list """ count = the_list.count result = [(item, count(item)) for item in set(the_list)] result.sort() return result
def count_list(the_list): """ Generates a count of the number of times each unique item appears in a list """ count = the_list.count result = [(item, count(item)) for item in set(the_list)] result.sort() return result
[ "Generates", "a", "count", "of", "the", "number", "of", "times", "each", "unique", "item", "appears", "in", "a", "list" ]
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L168-L175
[ "def", "count_list", "(", "the_list", ")", ":", "count", "=", "the_list", ".", "count", "result", "=", "[", "(", "item", ",", "count", "(", "item", ")", ")", "for", "item", "in", "set", "(", "the_list", ")", "]", "result", ".", "sort", "(", ")", ...
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
regenerate_good_tokens
Given an input string, part of speech tags the string, then generates a list of ngrams that appear in the string. Used to define grammatically correct part of speech tag sequences. Returns a list of part of speech tag sequences.
ease/util_functions.py
def regenerate_good_tokens(string): """ Given an input string, part of speech tags the string, then generates a list of ngrams that appear in the string. Used to define grammatically correct part of speech tag sequences. Returns a list of part of speech tag sequences. """ toks = nltk.word_tokenize(string) pos_string = nltk.pos_tag(toks) pos_seq = [tag[1] for tag in pos_string] pos_ngrams = ngrams(pos_seq, 2, 4) sel_pos_ngrams = f7(pos_ngrams) return sel_pos_ngrams
def regenerate_good_tokens(string): """ Given an input string, part of speech tags the string, then generates a list of ngrams that appear in the string. Used to define grammatically correct part of speech tag sequences. Returns a list of part of speech tag sequences. """ toks = nltk.word_tokenize(string) pos_string = nltk.pos_tag(toks) pos_seq = [tag[1] for tag in pos_string] pos_ngrams = ngrams(pos_seq, 2, 4) sel_pos_ngrams = f7(pos_ngrams) return sel_pos_ngrams
[ "Given", "an", "input", "string", "part", "of", "speech", "tags", "the", "string", "then", "generates", "a", "list", "of", "ngrams", "that", "appear", "in", "the", "string", ".", "Used", "to", "define", "grammatically", "correct", "part", "of", "speech", "...
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L178-L190
[ "def", "regenerate_good_tokens", "(", "string", ")", ":", "toks", "=", "nltk", ".", "word_tokenize", "(", "string", ")", "pos_string", "=", "nltk", ".", "pos_tag", "(", "toks", ")", "pos_seq", "=", "[", "tag", "[", "1", "]", "for", "tag", "in", "pos_st...
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
get_vocab
Uses a fisher test to find words that are significant in that they separate high scoring essays from low scoring essays. text is a list of input essays. score is a list of scores, with score[n] corresponding to text[n] max_feats is the maximum number of features to consider in the first pass max_feats2 is the maximum number of features to consider in the second (final) pass Returns a list of words that constitute the significant vocabulary
ease/util_functions.py
def get_vocab(text, score, max_feats=750, max_feats2=200): """ Uses a fisher test to find words that are significant in that they separate high scoring essays from low scoring essays. text is a list of input essays. score is a list of scores, with score[n] corresponding to text[n] max_feats is the maximum number of features to consider in the first pass max_feats2 is the maximum number of features to consider in the second (final) pass Returns a list of words that constitute the significant vocabulary """ dict = CountVectorizer(ngram_range=(1,2), max_features=max_feats) dict_mat = dict.fit_transform(text) set_score = numpy.asarray(score, dtype=numpy.int) med_score = numpy.median(set_score) new_score = set_score if(med_score == 0): med_score = 1 new_score[set_score < med_score] = 0 new_score[set_score >= med_score] = 1 fish_vals = [] for col_num in range(0, dict_mat.shape[1]): loop_vec = dict_mat.getcol(col_num).toarray() good_loop_vec = loop_vec[new_score == 1] bad_loop_vec = loop_vec[new_score == 0] good_loop_present = len(good_loop_vec[good_loop_vec > 0]) good_loop_missing = len(good_loop_vec[good_loop_vec == 0]) bad_loop_present = len(bad_loop_vec[bad_loop_vec > 0]) bad_loop_missing = len(bad_loop_vec[bad_loop_vec == 0]) fish_val = pvalue(good_loop_present, bad_loop_present, good_loop_missing, bad_loop_missing).two_tail fish_vals.append(fish_val) cutoff = 1 if(len(fish_vals) > max_feats2): cutoff = sorted(fish_vals)[max_feats2] good_cols = numpy.asarray([num for num in range(0, dict_mat.shape[1]) if fish_vals[num] <= cutoff]) getVar = lambda searchList, ind: [searchList[i] for i in ind] vocab = getVar(dict.get_feature_names(), good_cols) return vocab
def get_vocab(text, score, max_feats=750, max_feats2=200): """ Uses a fisher test to find words that are significant in that they separate high scoring essays from low scoring essays. text is a list of input essays. score is a list of scores, with score[n] corresponding to text[n] max_feats is the maximum number of features to consider in the first pass max_feats2 is the maximum number of features to consider in the second (final) pass Returns a list of words that constitute the significant vocabulary """ dict = CountVectorizer(ngram_range=(1,2), max_features=max_feats) dict_mat = dict.fit_transform(text) set_score = numpy.asarray(score, dtype=numpy.int) med_score = numpy.median(set_score) new_score = set_score if(med_score == 0): med_score = 1 new_score[set_score < med_score] = 0 new_score[set_score >= med_score] = 1 fish_vals = [] for col_num in range(0, dict_mat.shape[1]): loop_vec = dict_mat.getcol(col_num).toarray() good_loop_vec = loop_vec[new_score == 1] bad_loop_vec = loop_vec[new_score == 0] good_loop_present = len(good_loop_vec[good_loop_vec > 0]) good_loop_missing = len(good_loop_vec[good_loop_vec == 0]) bad_loop_present = len(bad_loop_vec[bad_loop_vec > 0]) bad_loop_missing = len(bad_loop_vec[bad_loop_vec == 0]) fish_val = pvalue(good_loop_present, bad_loop_present, good_loop_missing, bad_loop_missing).two_tail fish_vals.append(fish_val) cutoff = 1 if(len(fish_vals) > max_feats2): cutoff = sorted(fish_vals)[max_feats2] good_cols = numpy.asarray([num for num in range(0, dict_mat.shape[1]) if fish_vals[num] <= cutoff]) getVar = lambda searchList, ind: [searchList[i] for i in ind] vocab = getVar(dict.get_feature_names(), good_cols) return vocab
[ "Uses", "a", "fisher", "test", "to", "find", "words", "that", "are", "significant", "in", "that", "they", "separate", "high", "scoring", "essays", "from", "low", "scoring", "essays", ".", "text", "is", "a", "list", "of", "input", "essays", ".", "score", ...
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L193-L233
[ "def", "get_vocab", "(", "text", ",", "score", ",", "max_feats", "=", "750", ",", "max_feats2", "=", "200", ")", ":", "dict", "=", "CountVectorizer", "(", "ngram_range", "=", "(", "1", ",", "2", ")", ",", "max_features", "=", "max_feats", ")", "dict_ma...
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
edit_distance
Calculates string edit distance between string 1 and string 2. Deletion, insertion, substitution, and transposition all increase edit distance.
ease/util_functions.py
def edit_distance(s1, s2): """ Calculates string edit distance between string 1 and string 2. Deletion, insertion, substitution, and transposition all increase edit distance. """ d = {} lenstr1 = len(s1) lenstr2 = len(s2) for i in xrange(-1, lenstr1 + 1): d[(i, -1)] = i + 1 for j in xrange(-1, lenstr2 + 1): d[(-1, j)] = j + 1 for i in xrange(lenstr1): for j in xrange(lenstr2): if s1[i] == s2[j]: cost = 0 else: cost = 1 d[(i, j)] = min( d[(i - 1, j)] + 1, # deletion d[(i, j - 1)] + 1, # insertion d[(i - 1, j - 1)] + cost, # substitution ) if i and j and s1[i] == s2[j - 1] and s1[i - 1] == s2[j]: d[(i, j)] = min(d[(i, j)], d[i - 2, j - 2] + cost) # transposition return d[lenstr1 - 1, lenstr2 - 1]
def edit_distance(s1, s2): """ Calculates string edit distance between string 1 and string 2. Deletion, insertion, substitution, and transposition all increase edit distance. """ d = {} lenstr1 = len(s1) lenstr2 = len(s2) for i in xrange(-1, lenstr1 + 1): d[(i, -1)] = i + 1 for j in xrange(-1, lenstr2 + 1): d[(-1, j)] = j + 1 for i in xrange(lenstr1): for j in xrange(lenstr2): if s1[i] == s2[j]: cost = 0 else: cost = 1 d[(i, j)] = min( d[(i - 1, j)] + 1, # deletion d[(i, j - 1)] + 1, # insertion d[(i - 1, j - 1)] + cost, # substitution ) if i and j and s1[i] == s2[j - 1] and s1[i - 1] == s2[j]: d[(i, j)] = min(d[(i, j)], d[i - 2, j - 2] + cost) # transposition return d[lenstr1 - 1, lenstr2 - 1]
[ "Calculates", "string", "edit", "distance", "between", "string", "1", "and", "string", "2", ".", "Deletion", "insertion", "substitution", "and", "transposition", "all", "increase", "edit", "distance", "." ]
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L236-L263
[ "def", "edit_distance", "(", "s1", ",", "s2", ")", ":", "d", "=", "{", "}", "lenstr1", "=", "len", "(", "s1", ")", "lenstr2", "=", "len", "(", "s2", ")", "for", "i", "in", "xrange", "(", "-", "1", ",", "lenstr1", "+", "1", ")", ":", "d", "[...
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
gen_cv_preds
Generates cross validated predictions using an input classifier and data. clf is a classifier that implements that implements the fit and predict methods. arr is the input data array (X) sel_score is the target list (y). y[n] corresponds to X[n,:] num_chunks is the number of cross validation folds to use Returns an array of the predictions where prediction[n] corresponds to X[n,:]
ease/util_functions.py
def gen_cv_preds(clf, arr, sel_score, num_chunks=3): """ Generates cross validated predictions using an input classifier and data. clf is a classifier that implements that implements the fit and predict methods. arr is the input data array (X) sel_score is the target list (y). y[n] corresponds to X[n,:] num_chunks is the number of cross validation folds to use Returns an array of the predictions where prediction[n] corresponds to X[n,:] """ cv_len = int(math.floor(len(sel_score) / num_chunks)) chunks = [] for i in range(0, num_chunks): range_min = i * cv_len range_max = ((i + 1) * cv_len) if i == num_chunks - 1: range_max = len(sel_score) chunks.append(range(range_min, range_max)) preds = [] set_score = numpy.asarray(sel_score, dtype=numpy.int) chunk_vec = numpy.asarray(range(0, len(chunks))) for i in xrange(0, len(chunks)): loop_inds = list( chain.from_iterable([chunks[int(z)] for z, m in enumerate(range(0, len(chunks))) if int(z) != i])) sim_fit = clf.fit(arr[loop_inds], set_score[loop_inds]) preds.append(list(sim_fit.predict(arr[chunks[i]]))) all_preds = list(chain(*preds)) return(all_preds)
def gen_cv_preds(clf, arr, sel_score, num_chunks=3): """ Generates cross validated predictions using an input classifier and data. clf is a classifier that implements that implements the fit and predict methods. arr is the input data array (X) sel_score is the target list (y). y[n] corresponds to X[n,:] num_chunks is the number of cross validation folds to use Returns an array of the predictions where prediction[n] corresponds to X[n,:] """ cv_len = int(math.floor(len(sel_score) / num_chunks)) chunks = [] for i in range(0, num_chunks): range_min = i * cv_len range_max = ((i + 1) * cv_len) if i == num_chunks - 1: range_max = len(sel_score) chunks.append(range(range_min, range_max)) preds = [] set_score = numpy.asarray(sel_score, dtype=numpy.int) chunk_vec = numpy.asarray(range(0, len(chunks))) for i in xrange(0, len(chunks)): loop_inds = list( chain.from_iterable([chunks[int(z)] for z, m in enumerate(range(0, len(chunks))) if int(z) != i])) sim_fit = clf.fit(arr[loop_inds], set_score[loop_inds]) preds.append(list(sim_fit.predict(arr[chunks[i]]))) all_preds = list(chain(*preds)) return(all_preds)
[ "Generates", "cross", "validated", "predictions", "using", "an", "input", "classifier", "and", "data", ".", "clf", "is", "a", "classifier", "that", "implements", "that", "implements", "the", "fit", "and", "predict", "methods", ".", "arr", "is", "the", "input",...
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L276-L302
[ "def", "gen_cv_preds", "(", "clf", ",", "arr", ",", "sel_score", ",", "num_chunks", "=", "3", ")", ":", "cv_len", "=", "int", "(", "math", ".", "floor", "(", "len", "(", "sel_score", ")", "/", "num_chunks", ")", ")", "chunks", "=", "[", "]", "for",...
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
gen_model
Fits a classifier to data and a target score clf is an input classifier that implements the fit method. arr is a data array(X) sel_score is the target list (y) where y[n] corresponds to X[n,:] sim_fit is not a useful return value. Instead the clf is the useful output.
ease/util_functions.py
def gen_model(clf, arr, sel_score): """ Fits a classifier to data and a target score clf is an input classifier that implements the fit method. arr is a data array(X) sel_score is the target list (y) where y[n] corresponds to X[n,:] sim_fit is not a useful return value. Instead the clf is the useful output. """ set_score = numpy.asarray(sel_score, dtype=numpy.int) sim_fit = clf.fit(arr, set_score) return(sim_fit)
def gen_model(clf, arr, sel_score): """ Fits a classifier to data and a target score clf is an input classifier that implements the fit method. arr is a data array(X) sel_score is the target list (y) where y[n] corresponds to X[n,:] sim_fit is not a useful return value. Instead the clf is the useful output. """ set_score = numpy.asarray(sel_score, dtype=numpy.int) sim_fit = clf.fit(arr, set_score) return(sim_fit)
[ "Fits", "a", "classifier", "to", "data", "and", "a", "target", "score", "clf", "is", "an", "input", "classifier", "that", "implements", "the", "fit", "method", ".", "arr", "is", "a", "data", "array", "(", "X", ")", "sel_score", "is", "the", "target", "...
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L305-L315
[ "def", "gen_model", "(", "clf", ",", "arr", ",", "sel_score", ")", ":", "set_score", "=", "numpy", ".", "asarray", "(", "sel_score", ",", "dtype", "=", "numpy", ".", "int", ")", "sim_fit", "=", "clf", ".", "fit", "(", "arr", ",", "set_score", ")", ...
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
gen_preds
Generates predictions on a novel data array using a fit classifier clf is a classifier that has already been fit arr is a data array identical in dimension to the array clf was trained on Returns the array of predictions.
ease/util_functions.py
def gen_preds(clf, arr): """ Generates predictions on a novel data array using a fit classifier clf is a classifier that has already been fit arr is a data array identical in dimension to the array clf was trained on Returns the array of predictions. """ if(hasattr(clf, "predict_proba")): ret = clf.predict(arr) # pred_score=preds.argmax(1)+min(x._score) else: ret = clf.predict(arr) return ret
def gen_preds(clf, arr): """ Generates predictions on a novel data array using a fit classifier clf is a classifier that has already been fit arr is a data array identical in dimension to the array clf was trained on Returns the array of predictions. """ if(hasattr(clf, "predict_proba")): ret = clf.predict(arr) # pred_score=preds.argmax(1)+min(x._score) else: ret = clf.predict(arr) return ret
[ "Generates", "predictions", "on", "a", "novel", "data", "array", "using", "a", "fit", "classifier", "clf", "is", "a", "classifier", "that", "has", "already", "been", "fit", "arr", "is", "a", "data", "array", "identical", "in", "dimension", "to", "the", "ar...
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L318-L330
[ "def", "gen_preds", "(", "clf", ",", "arr", ")", ":", "if", "(", "hasattr", "(", "clf", ",", "\"predict_proba\"", ")", ")", ":", "ret", "=", "clf", ".", "predict", "(", "arr", ")", "# pred_score=preds.argmax(1)+min(x._score)", "else", ":", "ret", "=", "c...
a7890ed403da94d03726b0639cd8ebda45af6bbb
valid
calc_list_average
Calculates the average value of a list of numbers Returns a float
ease/util_functions.py
def calc_list_average(l): """ Calculates the average value of a list of numbers Returns a float """ total = 0.0 for value in l: total += value return total / len(l)
def calc_list_average(l): """ Calculates the average value of a list of numbers Returns a float """ total = 0.0 for value in l: total += value return total / len(l)
[ "Calculates", "the", "average", "value", "of", "a", "list", "of", "numbers", "Returns", "a", "float" ]
edx/ease
python
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/util_functions.py#L333-L341
[ "def", "calc_list_average", "(", "l", ")", ":", "total", "=", "0.0", "for", "value", "in", "l", ":", "total", "+=", "value", "return", "total", "/", "len", "(", "l", ")" ]
a7890ed403da94d03726b0639cd8ebda45af6bbb