id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
17,100
skggm/skggm
examples/estimator_suite_spark.py
sk_ledoit_wolf
def sk_ledoit_wolf(X): """Estimate inverse covariance via scikit-learn ledoit_wolf function. """ print("Ledoit-Wolf (sklearn)") lw_cov_, _ = ledoit_wolf(X) lw_prec_ = np.linalg.inv(lw_cov_) return lw_cov_, lw_prec_
python
def sk_ledoit_wolf(X): """Estimate inverse covariance via scikit-learn ledoit_wolf function. """ print("Ledoit-Wolf (sklearn)") lw_cov_, _ = ledoit_wolf(X) lw_prec_ = np.linalg.inv(lw_cov_) return lw_cov_, lw_prec_
[ "def", "sk_ledoit_wolf", "(", "X", ")", ":", "print", "(", "\"Ledoit-Wolf (sklearn)\"", ")", "lw_cov_", ",", "_", "=", "ledoit_wolf", "(", "X", ")", "lw_prec_", "=", "np", ".", "linalg", ".", "inv", "(", "lw_cov_", ")", "return", "lw_cov_", ",", "lw_prec_" ]
Estimate inverse covariance via scikit-learn ledoit_wolf function.
[ "Estimate", "inverse", "covariance", "via", "scikit", "-", "learn", "ledoit_wolf", "function", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite_spark.py#L240-L246
17,101
skggm/skggm
inverse_covariance/profiling/metrics.py
_nonzero_intersection
def _nonzero_intersection(m, m_hat): """Count the number of nonzeros in and between m and m_hat. Returns ---------- m_nnz : number of nonzeros in m (w/o diagonal) m_hat_nnz : number of nonzeros in m_hat (w/o diagonal) intersection_nnz : number of nonzeros in intersection of m/m_hat (w/o diagonal) """ n_features, _ = m.shape m_no_diag = m.copy() m_no_diag[np.diag_indices(n_features)] = 0 m_hat_no_diag = m_hat.copy() m_hat_no_diag[np.diag_indices(n_features)] = 0 m_hat_nnz = len(np.nonzero(m_hat_no_diag.flat)[0]) m_nnz = len(np.nonzero(m_no_diag.flat)[0]) intersection_nnz = len( np.intersect1d(np.nonzero(m_no_diag.flat)[0], np.nonzero(m_hat_no_diag.flat)[0]) ) return m_nnz, m_hat_nnz, intersection_nnz
python
def _nonzero_intersection(m, m_hat): """Count the number of nonzeros in and between m and m_hat. Returns ---------- m_nnz : number of nonzeros in m (w/o diagonal) m_hat_nnz : number of nonzeros in m_hat (w/o diagonal) intersection_nnz : number of nonzeros in intersection of m/m_hat (w/o diagonal) """ n_features, _ = m.shape m_no_diag = m.copy() m_no_diag[np.diag_indices(n_features)] = 0 m_hat_no_diag = m_hat.copy() m_hat_no_diag[np.diag_indices(n_features)] = 0 m_hat_nnz = len(np.nonzero(m_hat_no_diag.flat)[0]) m_nnz = len(np.nonzero(m_no_diag.flat)[0]) intersection_nnz = len( np.intersect1d(np.nonzero(m_no_diag.flat)[0], np.nonzero(m_hat_no_diag.flat)[0]) ) return m_nnz, m_hat_nnz, intersection_nnz
[ "def", "_nonzero_intersection", "(", "m", ",", "m_hat", ")", ":", "n_features", ",", "_", "=", "m", ".", "shape", "m_no_diag", "=", "m", ".", "copy", "(", ")", "m_no_diag", "[", "np", ".", "diag_indices", "(", "n_features", ")", "]", "=", "0", "m_hat_no_diag", "=", "m_hat", ".", "copy", "(", ")", "m_hat_no_diag", "[", "np", ".", "diag_indices", "(", "n_features", ")", "]", "=", "0", "m_hat_nnz", "=", "len", "(", "np", ".", "nonzero", "(", "m_hat_no_diag", ".", "flat", ")", "[", "0", "]", ")", "m_nnz", "=", "len", "(", "np", ".", "nonzero", "(", "m_no_diag", ".", "flat", ")", "[", "0", "]", ")", "intersection_nnz", "=", "len", "(", "np", ".", "intersect1d", "(", "np", ".", "nonzero", "(", "m_no_diag", ".", "flat", ")", "[", "0", "]", ",", "np", ".", "nonzero", "(", "m_hat_no_diag", ".", "flat", ")", "[", "0", "]", ")", ")", "return", "m_nnz", ",", "m_hat_nnz", ",", "intersection_nnz" ]
Count the number of nonzeros in and between m and m_hat. Returns ---------- m_nnz : number of nonzeros in m (w/o diagonal) m_hat_nnz : number of nonzeros in m_hat (w/o diagonal) intersection_nnz : number of nonzeros in intersection of m/m_hat (w/o diagonal)
[ "Count", "the", "number", "of", "nonzeros", "in", "and", "between", "m", "and", "m_hat", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/metrics.py#L4-L30
17,102
skggm/skggm
inverse_covariance/profiling/metrics.py
support_false_positive_count
def support_false_positive_count(m, m_hat): """Count the number of false positive support elements in m_hat in one triangle, not including the diagonal. """ m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat) return int((m_hat_nnz - intersection_nnz) / 2.0)
python
def support_false_positive_count(m, m_hat): """Count the number of false positive support elements in m_hat in one triangle, not including the diagonal. """ m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat) return int((m_hat_nnz - intersection_nnz) / 2.0)
[ "def", "support_false_positive_count", "(", "m", ",", "m_hat", ")", ":", "m_nnz", ",", "m_hat_nnz", ",", "intersection_nnz", "=", "_nonzero_intersection", "(", "m", ",", "m_hat", ")", "return", "int", "(", "(", "m_hat_nnz", "-", "intersection_nnz", ")", "/", "2.0", ")" ]
Count the number of false positive support elements in m_hat in one triangle, not including the diagonal.
[ "Count", "the", "number", "of", "false", "positive", "support", "elements", "in", "m_hat", "in", "one", "triangle", "not", "including", "the", "diagonal", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/metrics.py#L33-L38
17,103
skggm/skggm
inverse_covariance/profiling/metrics.py
support_false_negative_count
def support_false_negative_count(m, m_hat): """Count the number of false negative support elements in m_hat in one triangle, not including the diagonal. """ m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat) return int((m_nnz - intersection_nnz) / 2.0)
python
def support_false_negative_count(m, m_hat): """Count the number of false negative support elements in m_hat in one triangle, not including the diagonal. """ m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat) return int((m_nnz - intersection_nnz) / 2.0)
[ "def", "support_false_negative_count", "(", "m", ",", "m_hat", ")", ":", "m_nnz", ",", "m_hat_nnz", ",", "intersection_nnz", "=", "_nonzero_intersection", "(", "m", ",", "m_hat", ")", "return", "int", "(", "(", "m_nnz", "-", "intersection_nnz", ")", "/", "2.0", ")" ]
Count the number of false negative support elements in m_hat in one triangle, not including the diagonal.
[ "Count", "the", "number", "of", "false", "negative", "support", "elements", "in", "m_hat", "in", "one", "triangle", "not", "including", "the", "diagonal", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/metrics.py#L41-L46
17,104
skggm/skggm
inverse_covariance/profiling/metrics.py
support_difference_count
def support_difference_count(m, m_hat): """Count the number of different elements in the support in one triangle, not including the diagonal. """ m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat) return int((m_nnz + m_hat_nnz - (2 * intersection_nnz)) / 2.0)
python
def support_difference_count(m, m_hat): """Count the number of different elements in the support in one triangle, not including the diagonal. """ m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat) return int((m_nnz + m_hat_nnz - (2 * intersection_nnz)) / 2.0)
[ "def", "support_difference_count", "(", "m", ",", "m_hat", ")", ":", "m_nnz", ",", "m_hat_nnz", ",", "intersection_nnz", "=", "_nonzero_intersection", "(", "m", ",", "m_hat", ")", "return", "int", "(", "(", "m_nnz", "+", "m_hat_nnz", "-", "(", "2", "*", "intersection_nnz", ")", ")", "/", "2.0", ")" ]
Count the number of different elements in the support in one triangle, not including the diagonal.
[ "Count", "the", "number", "of", "different", "elements", "in", "the", "support", "in", "one", "triangle", "not", "including", "the", "diagonal", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/metrics.py#L49-L54
17,105
skggm/skggm
inverse_covariance/profiling/metrics.py
has_exact_support
def has_exact_support(m, m_hat): """Returns 1 if support_difference_count is zero, 0 else. """ m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat) return int((m_nnz + m_hat_nnz - (2 * intersection_nnz)) == 0)
python
def has_exact_support(m, m_hat): """Returns 1 if support_difference_count is zero, 0 else. """ m_nnz, m_hat_nnz, intersection_nnz = _nonzero_intersection(m, m_hat) return int((m_nnz + m_hat_nnz - (2 * intersection_nnz)) == 0)
[ "def", "has_exact_support", "(", "m", ",", "m_hat", ")", ":", "m_nnz", ",", "m_hat_nnz", ",", "intersection_nnz", "=", "_nonzero_intersection", "(", "m", ",", "m_hat", ")", "return", "int", "(", "(", "m_nnz", "+", "m_hat_nnz", "-", "(", "2", "*", "intersection_nnz", ")", ")", "==", "0", ")" ]
Returns 1 if support_difference_count is zero, 0 else.
[ "Returns", "1", "if", "support_difference_count", "is", "zero", "0", "else", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/metrics.py#L57-L61
17,106
skggm/skggm
inverse_covariance/profiling/metrics.py
has_approx_support
def has_approx_support(m, m_hat, prob=0.01): """Returns 1 if model selection error is less than or equal to prob rate, 0 else. NOTE: why does np.nonzero/np.flatnonzero create so much problems? """ m_nz = np.flatnonzero(np.triu(m, 1)) m_hat_nz = np.flatnonzero(np.triu(m_hat, 1)) upper_diagonal_mask = np.flatnonzero(np.triu(np.ones(m.shape), 1)) not_m_nz = np.setdiff1d(upper_diagonal_mask, m_nz) intersection = np.in1d(m_hat_nz, m_nz) # true positives not_intersection = np.in1d(m_hat_nz, not_m_nz) # false positives true_positive_rate = 0.0 if len(m_nz): true_positive_rate = 1. * np.sum(intersection) / len(m_nz) true_negative_rate = 1. - true_positive_rate false_positive_rate = 0.0 if len(not_m_nz): false_positive_rate = 1. * np.sum(not_intersection) / len(not_m_nz) return int(np.less_equal(true_negative_rate + false_positive_rate, prob))
python
def has_approx_support(m, m_hat, prob=0.01): """Returns 1 if model selection error is less than or equal to prob rate, 0 else. NOTE: why does np.nonzero/np.flatnonzero create so much problems? """ m_nz = np.flatnonzero(np.triu(m, 1)) m_hat_nz = np.flatnonzero(np.triu(m_hat, 1)) upper_diagonal_mask = np.flatnonzero(np.triu(np.ones(m.shape), 1)) not_m_nz = np.setdiff1d(upper_diagonal_mask, m_nz) intersection = np.in1d(m_hat_nz, m_nz) # true positives not_intersection = np.in1d(m_hat_nz, not_m_nz) # false positives true_positive_rate = 0.0 if len(m_nz): true_positive_rate = 1. * np.sum(intersection) / len(m_nz) true_negative_rate = 1. - true_positive_rate false_positive_rate = 0.0 if len(not_m_nz): false_positive_rate = 1. * np.sum(not_intersection) / len(not_m_nz) return int(np.less_equal(true_negative_rate + false_positive_rate, prob))
[ "def", "has_approx_support", "(", "m", ",", "m_hat", ",", "prob", "=", "0.01", ")", ":", "m_nz", "=", "np", ".", "flatnonzero", "(", "np", ".", "triu", "(", "m", ",", "1", ")", ")", "m_hat_nz", "=", "np", ".", "flatnonzero", "(", "np", ".", "triu", "(", "m_hat", ",", "1", ")", ")", "upper_diagonal_mask", "=", "np", ".", "flatnonzero", "(", "np", ".", "triu", "(", "np", ".", "ones", "(", "m", ".", "shape", ")", ",", "1", ")", ")", "not_m_nz", "=", "np", ".", "setdiff1d", "(", "upper_diagonal_mask", ",", "m_nz", ")", "intersection", "=", "np", ".", "in1d", "(", "m_hat_nz", ",", "m_nz", ")", "# true positives", "not_intersection", "=", "np", ".", "in1d", "(", "m_hat_nz", ",", "not_m_nz", ")", "# false positives", "true_positive_rate", "=", "0.0", "if", "len", "(", "m_nz", ")", ":", "true_positive_rate", "=", "1.", "*", "np", ".", "sum", "(", "intersection", ")", "/", "len", "(", "m_nz", ")", "true_negative_rate", "=", "1.", "-", "true_positive_rate", "false_positive_rate", "=", "0.0", "if", "len", "(", "not_m_nz", ")", ":", "false_positive_rate", "=", "1.", "*", "np", ".", "sum", "(", "not_intersection", ")", "/", "len", "(", "not_m_nz", ")", "return", "int", "(", "np", ".", "less_equal", "(", "true_negative_rate", "+", "false_positive_rate", ",", "prob", ")", ")" ]
Returns 1 if model selection error is less than or equal to prob rate, 0 else. NOTE: why does np.nonzero/np.flatnonzero create so much problems?
[ "Returns", "1", "if", "model", "selection", "error", "is", "less", "than", "or", "equal", "to", "prob", "rate", "0", "else", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/profiling/metrics.py#L64-L88
17,107
skggm/skggm
inverse_covariance/inverse_covariance.py
_validate_path
def _validate_path(path): """Sorts path values from largest to smallest. Will warn if path parameter was not already sorted. """ if path is None: return None new_path = np.array(sorted(set(path), reverse=True)) if new_path[0] != path[0]: print("Warning: Path must be sorted largest to smallest.") return new_path
python
def _validate_path(path): """Sorts path values from largest to smallest. Will warn if path parameter was not already sorted. """ if path is None: return None new_path = np.array(sorted(set(path), reverse=True)) if new_path[0] != path[0]: print("Warning: Path must be sorted largest to smallest.") return new_path
[ "def", "_validate_path", "(", "path", ")", ":", "if", "path", "is", "None", ":", "return", "None", "new_path", "=", "np", ".", "array", "(", "sorted", "(", "set", "(", "path", ")", ",", "reverse", "=", "True", ")", ")", "if", "new_path", "[", "0", "]", "!=", "path", "[", "0", "]", ":", "print", "(", "\"Warning: Path must be sorted largest to smallest.\"", ")", "return", "new_path" ]
Sorts path values from largest to smallest. Will warn if path parameter was not already sorted.
[ "Sorts", "path", "values", "from", "largest", "to", "smallest", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/inverse_covariance.py#L77-L89
17,108
skggm/skggm
inverse_covariance/inverse_covariance.py
InverseCovarianceEstimator.ebic
def ebic(self, gamma=0): """Compute EBIC scores for each model. If model is not "path" then returns a scalar score value. May require self.path_ See: Extended Bayesian Information Criteria for Gaussian Graphical Models R. Foygel and M. Drton NIPS 2010 Parameters ---------- gamma : (float) \in (0, 1) Choice of gamma=0 leads to classical BIC Positive gamma leads to stronger penalization of large graphs. Returns ------- Scalar ebic score or list of ebic scores. """ if not self.is_fitted_: return if not isinstance(self.precision_, list): return metrics.ebic( self.sample_covariance_, self.precision_, self.n_samples_, self.n_features_, gamma=gamma, ) ebic_scores = [] for lidx, lam in enumerate(self.path_): ebic_scores.append( metrics.ebic( self.sample_covariance_, self.precision_[lidx], self.n_samples_, self.n_features_, gamma=gamma, ) ) return np.array(ebic_scores)
python
def ebic(self, gamma=0): """Compute EBIC scores for each model. If model is not "path" then returns a scalar score value. May require self.path_ See: Extended Bayesian Information Criteria for Gaussian Graphical Models R. Foygel and M. Drton NIPS 2010 Parameters ---------- gamma : (float) \in (0, 1) Choice of gamma=0 leads to classical BIC Positive gamma leads to stronger penalization of large graphs. Returns ------- Scalar ebic score or list of ebic scores. """ if not self.is_fitted_: return if not isinstance(self.precision_, list): return metrics.ebic( self.sample_covariance_, self.precision_, self.n_samples_, self.n_features_, gamma=gamma, ) ebic_scores = [] for lidx, lam in enumerate(self.path_): ebic_scores.append( metrics.ebic( self.sample_covariance_, self.precision_[lidx], self.n_samples_, self.n_features_, gamma=gamma, ) ) return np.array(ebic_scores)
[ "def", "ebic", "(", "self", ",", "gamma", "=", "0", ")", ":", "if", "not", "self", ".", "is_fitted_", ":", "return", "if", "not", "isinstance", "(", "self", ".", "precision_", ",", "list", ")", ":", "return", "metrics", ".", "ebic", "(", "self", ".", "sample_covariance_", ",", "self", ".", "precision_", ",", "self", ".", "n_samples_", ",", "self", ".", "n_features_", ",", "gamma", "=", "gamma", ",", ")", "ebic_scores", "=", "[", "]", "for", "lidx", ",", "lam", "in", "enumerate", "(", "self", ".", "path_", ")", ":", "ebic_scores", ".", "append", "(", "metrics", ".", "ebic", "(", "self", ".", "sample_covariance_", ",", "self", ".", "precision_", "[", "lidx", "]", ",", "self", ".", "n_samples_", ",", "self", ".", "n_features_", ",", "gamma", "=", "gamma", ",", ")", ")", "return", "np", ".", "array", "(", "ebic_scores", ")" ]
Compute EBIC scores for each model. If model is not "path" then returns a scalar score value. May require self.path_ See: Extended Bayesian Information Criteria for Gaussian Graphical Models R. Foygel and M. Drton NIPS 2010 Parameters ---------- gamma : (float) \in (0, 1) Choice of gamma=0 leads to classical BIC Positive gamma leads to stronger penalization of large graphs. Returns ------- Scalar ebic score or list of ebic scores.
[ "Compute", "EBIC", "scores", "for", "each", "model", ".", "If", "model", "is", "not", "path", "then", "returns", "a", "scalar", "score", "value", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/inverse_covariance.py#L268-L313
17,109
skggm/skggm
inverse_covariance/inverse_covariance.py
InverseCovarianceEstimator.ebic_select
def ebic_select(self, gamma=0): """Uses Extended Bayesian Information Criteria for model selection. Can only be used in path mode (doesn't really make sense otherwise). See: Extended Bayesian Information Criteria for Gaussian Graphical Models R. Foygel and M. Drton NIPS 2010 Parameters ---------- gamma : (float) \in (0, 1) Choice of gamma=0 leads to classical BIC Positive gamma leads to stronger penalization of large graphs. Returns ------- Lambda index with best ebic score. When multiple ebic scores are the same, returns the smallest lambda (largest index) with minimum score. """ if not isinstance(self.precision_, list): raise ValueError("EBIC requires multiple models to select from.") return if not self.is_fitted_: return ebic_scores = self.ebic(gamma=gamma) min_indices = np.where(np.abs(ebic_scores - ebic_scores.min()) < 1e-10) return np.max(min_indices)
python
def ebic_select(self, gamma=0): """Uses Extended Bayesian Information Criteria for model selection. Can only be used in path mode (doesn't really make sense otherwise). See: Extended Bayesian Information Criteria for Gaussian Graphical Models R. Foygel and M. Drton NIPS 2010 Parameters ---------- gamma : (float) \in (0, 1) Choice of gamma=0 leads to classical BIC Positive gamma leads to stronger penalization of large graphs. Returns ------- Lambda index with best ebic score. When multiple ebic scores are the same, returns the smallest lambda (largest index) with minimum score. """ if not isinstance(self.precision_, list): raise ValueError("EBIC requires multiple models to select from.") return if not self.is_fitted_: return ebic_scores = self.ebic(gamma=gamma) min_indices = np.where(np.abs(ebic_scores - ebic_scores.min()) < 1e-10) return np.max(min_indices)
[ "def", "ebic_select", "(", "self", ",", "gamma", "=", "0", ")", ":", "if", "not", "isinstance", "(", "self", ".", "precision_", ",", "list", ")", ":", "raise", "ValueError", "(", "\"EBIC requires multiple models to select from.\"", ")", "return", "if", "not", "self", ".", "is_fitted_", ":", "return", "ebic_scores", "=", "self", ".", "ebic", "(", "gamma", "=", "gamma", ")", "min_indices", "=", "np", ".", "where", "(", "np", ".", "abs", "(", "ebic_scores", "-", "ebic_scores", ".", "min", "(", ")", ")", "<", "1e-10", ")", "return", "np", ".", "max", "(", "min_indices", ")" ]
Uses Extended Bayesian Information Criteria for model selection. Can only be used in path mode (doesn't really make sense otherwise). See: Extended Bayesian Information Criteria for Gaussian Graphical Models R. Foygel and M. Drton NIPS 2010 Parameters ---------- gamma : (float) \in (0, 1) Choice of gamma=0 leads to classical BIC Positive gamma leads to stronger penalization of large graphs. Returns ------- Lambda index with best ebic score. When multiple ebic scores are the same, returns the smallest lambda (largest index) with minimum score.
[ "Uses", "Extended", "Bayesian", "Information", "Criteria", "for", "model", "selection", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/inverse_covariance.py#L315-L345
17,110
skggm/skggm
examples/estimator_suite.py
quic_graph_lasso
def quic_graph_lasso(X, num_folds, metric): """Run QuicGraphicalLasso with mode='default' and use standard scikit GridSearchCV to find the best lambda. Primarily demonstrates compatibility with existing scikit tooling. """ print("QuicGraphicalLasso + GridSearchCV with:") print(" metric: {}".format(metric)) search_grid = { "lam": np.logspace(np.log10(0.01), np.log10(1.0), num=100, endpoint=True), "init_method": ["cov"], "score_metric": [metric], } model = GridSearchCV(QuicGraphicalLasso(), search_grid, cv=num_folds, refit=True) model.fit(X) bmodel = model.best_estimator_ print(" len(cv_lams): {}".format(len(search_grid["lam"]))) print(" cv-lam: {}".format(model.best_params_["lam"])) print(" lam_scale_: {}".format(bmodel.lam_scale_)) print(" lam_: {}".format(bmodel.lam_)) return bmodel.covariance_, bmodel.precision_, bmodel.lam_
python
def quic_graph_lasso(X, num_folds, metric): """Run QuicGraphicalLasso with mode='default' and use standard scikit GridSearchCV to find the best lambda. Primarily demonstrates compatibility with existing scikit tooling. """ print("QuicGraphicalLasso + GridSearchCV with:") print(" metric: {}".format(metric)) search_grid = { "lam": np.logspace(np.log10(0.01), np.log10(1.0), num=100, endpoint=True), "init_method": ["cov"], "score_metric": [metric], } model = GridSearchCV(QuicGraphicalLasso(), search_grid, cv=num_folds, refit=True) model.fit(X) bmodel = model.best_estimator_ print(" len(cv_lams): {}".format(len(search_grid["lam"]))) print(" cv-lam: {}".format(model.best_params_["lam"])) print(" lam_scale_: {}".format(bmodel.lam_scale_)) print(" lam_: {}".format(bmodel.lam_)) return bmodel.covariance_, bmodel.precision_, bmodel.lam_
[ "def", "quic_graph_lasso", "(", "X", ",", "num_folds", ",", "metric", ")", ":", "print", "(", "\"QuicGraphicalLasso + GridSearchCV with:\"", ")", "print", "(", "\" metric: {}\"", ".", "format", "(", "metric", ")", ")", "search_grid", "=", "{", "\"lam\"", ":", "np", ".", "logspace", "(", "np", ".", "log10", "(", "0.01", ")", ",", "np", ".", "log10", "(", "1.0", ")", ",", "num", "=", "100", ",", "endpoint", "=", "True", ")", ",", "\"init_method\"", ":", "[", "\"cov\"", "]", ",", "\"score_metric\"", ":", "[", "metric", "]", ",", "}", "model", "=", "GridSearchCV", "(", "QuicGraphicalLasso", "(", ")", ",", "search_grid", ",", "cv", "=", "num_folds", ",", "refit", "=", "True", ")", "model", ".", "fit", "(", "X", ")", "bmodel", "=", "model", ".", "best_estimator_", "print", "(", "\" len(cv_lams): {}\"", ".", "format", "(", "len", "(", "search_grid", "[", "\"lam\"", "]", ")", ")", ")", "print", "(", "\" cv-lam: {}\"", ".", "format", "(", "model", ".", "best_params_", "[", "\"lam\"", "]", ")", ")", "print", "(", "\" lam_scale_: {}\"", ".", "format", "(", "bmodel", ".", "lam_scale_", ")", ")", "print", "(", "\" lam_: {}\"", ".", "format", "(", "bmodel", ".", "lam_", ")", ")", "return", "bmodel", ".", "covariance_", ",", "bmodel", ".", "precision_", ",", "bmodel", ".", "lam_" ]
Run QuicGraphicalLasso with mode='default' and use standard scikit GridSearchCV to find the best lambda. Primarily demonstrates compatibility with existing scikit tooling.
[ "Run", "QuicGraphicalLasso", "with", "mode", "=", "default", "and", "use", "standard", "scikit", "GridSearchCV", "to", "find", "the", "best", "lambda", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite.py#L97-L117
17,111
skggm/skggm
examples/estimator_suite.py
quic_graph_lasso_cv
def quic_graph_lasso_cv(X, metric): """Run QuicGraphicalLassoCV on data with metric of choice. Compare results with GridSearchCV + quic_graph_lasso. The number of lambdas tested should be much lower with similar final lam_ selected. """ print("QuicGraphicalLassoCV with:") print(" metric: {}".format(metric)) model = QuicGraphicalLassoCV( cv=2, # cant deal w more folds at small size n_refinements=6, n_jobs=1, init_method="cov", score_metric=metric, ) model.fit(X) print(" len(cv_lams): {}".format(len(model.cv_lams_))) print(" lam_scale_: {}".format(model.lam_scale_)) print(" lam_: {}".format(model.lam_)) return model.covariance_, model.precision_, model.lam_
python
def quic_graph_lasso_cv(X, metric): """Run QuicGraphicalLassoCV on data with metric of choice. Compare results with GridSearchCV + quic_graph_lasso. The number of lambdas tested should be much lower with similar final lam_ selected. """ print("QuicGraphicalLassoCV with:") print(" metric: {}".format(metric)) model = QuicGraphicalLassoCV( cv=2, # cant deal w more folds at small size n_refinements=6, n_jobs=1, init_method="cov", score_metric=metric, ) model.fit(X) print(" len(cv_lams): {}".format(len(model.cv_lams_))) print(" lam_scale_: {}".format(model.lam_scale_)) print(" lam_: {}".format(model.lam_)) return model.covariance_, model.precision_, model.lam_
[ "def", "quic_graph_lasso_cv", "(", "X", ",", "metric", ")", ":", "print", "(", "\"QuicGraphicalLassoCV with:\"", ")", "print", "(", "\" metric: {}\"", ".", "format", "(", "metric", ")", ")", "model", "=", "QuicGraphicalLassoCV", "(", "cv", "=", "2", ",", "# cant deal w more folds at small size", "n_refinements", "=", "6", ",", "n_jobs", "=", "1", ",", "init_method", "=", "\"cov\"", ",", "score_metric", "=", "metric", ",", ")", "model", ".", "fit", "(", "X", ")", "print", "(", "\" len(cv_lams): {}\"", ".", "format", "(", "len", "(", "model", ".", "cv_lams_", ")", ")", ")", "print", "(", "\" lam_scale_: {}\"", ".", "format", "(", "model", ".", "lam_scale_", ")", ")", "print", "(", "\" lam_: {}\"", ".", "format", "(", "model", ".", "lam_", ")", ")", "return", "model", ".", "covariance_", ",", "model", ".", "precision_", ",", "model", ".", "lam_" ]
Run QuicGraphicalLassoCV on data with metric of choice. Compare results with GridSearchCV + quic_graph_lasso. The number of lambdas tested should be much lower with similar final lam_ selected.
[ "Run", "QuicGraphicalLassoCV", "on", "data", "with", "metric", "of", "choice", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite.py#L120-L139
17,112
skggm/skggm
examples/estimator_suite.py
graph_lasso
def graph_lasso(X, num_folds): """Estimate inverse covariance via scikit-learn GraphLassoCV class. """ print("GraphLasso (sklearn)") model = GraphLassoCV(cv=num_folds) model.fit(X) print(" lam_: {}".format(model.alpha_)) return model.covariance_, model.precision_, model.alpha_
python
def graph_lasso(X, num_folds): """Estimate inverse covariance via scikit-learn GraphLassoCV class. """ print("GraphLasso (sklearn)") model = GraphLassoCV(cv=num_folds) model.fit(X) print(" lam_: {}".format(model.alpha_)) return model.covariance_, model.precision_, model.alpha_
[ "def", "graph_lasso", "(", "X", ",", "num_folds", ")", ":", "print", "(", "\"GraphLasso (sklearn)\"", ")", "model", "=", "GraphLassoCV", "(", "cv", "=", "num_folds", ")", "model", ".", "fit", "(", "X", ")", "print", "(", "\" lam_: {}\"", ".", "format", "(", "model", ".", "alpha_", ")", ")", "return", "model", ".", "covariance_", ",", "model", ".", "precision_", ",", "model", ".", "alpha_" ]
Estimate inverse covariance via scikit-learn GraphLassoCV class.
[ "Estimate", "inverse", "covariance", "via", "scikit", "-", "learn", "GraphLassoCV", "class", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/examples/estimator_suite.py#L295-L302
17,113
skggm/skggm
inverse_covariance/quic_graph_lasso.py
_quic_path
def _quic_path( X, path, X_test=None, lam=0.5, tol=1e-6, max_iter=1000, Theta0=None, Sigma0=None, method="quic", verbose=0, score_metric="log_likelihood", init_method="corrcoef", ): """Wrapper to compute path for example X. """ S, lam_scale_ = _init_coefs(X, method=init_method) path = path.copy(order="C") if method == "quic": (precisions_, covariances_, opt_, cputime_, iters_, duality_gap_) = quic( S, lam, mode="path", tol=tol, max_iter=max_iter, Theta0=Theta0, Sigma0=Sigma0, path=path, msg=verbose, ) else: raise NotImplementedError("Only method='quic' has been implemented.") if X_test is not None: S_test, lam_scale_test = _init_coefs(X_test, method=init_method) path_errors = [] for lidx, lam in enumerate(path): path_errors.append( _compute_error( S_test, covariances_[lidx], precisions_[lidx], score_metric=score_metric, ) ) scores_ = [-e for e in path_errors] return covariances_, precisions_, scores_ return covariances_, precisions_
python
def _quic_path( X, path, X_test=None, lam=0.5, tol=1e-6, max_iter=1000, Theta0=None, Sigma0=None, method="quic", verbose=0, score_metric="log_likelihood", init_method="corrcoef", ): """Wrapper to compute path for example X. """ S, lam_scale_ = _init_coefs(X, method=init_method) path = path.copy(order="C") if method == "quic": (precisions_, covariances_, opt_, cputime_, iters_, duality_gap_) = quic( S, lam, mode="path", tol=tol, max_iter=max_iter, Theta0=Theta0, Sigma0=Sigma0, path=path, msg=verbose, ) else: raise NotImplementedError("Only method='quic' has been implemented.") if X_test is not None: S_test, lam_scale_test = _init_coefs(X_test, method=init_method) path_errors = [] for lidx, lam in enumerate(path): path_errors.append( _compute_error( S_test, covariances_[lidx], precisions_[lidx], score_metric=score_metric, ) ) scores_ = [-e for e in path_errors] return covariances_, precisions_, scores_ return covariances_, precisions_
[ "def", "_quic_path", "(", "X", ",", "path", ",", "X_test", "=", "None", ",", "lam", "=", "0.5", ",", "tol", "=", "1e-6", ",", "max_iter", "=", "1000", ",", "Theta0", "=", "None", ",", "Sigma0", "=", "None", ",", "method", "=", "\"quic\"", ",", "verbose", "=", "0", ",", "score_metric", "=", "\"log_likelihood\"", ",", "init_method", "=", "\"corrcoef\"", ",", ")", ":", "S", ",", "lam_scale_", "=", "_init_coefs", "(", "X", ",", "method", "=", "init_method", ")", "path", "=", "path", ".", "copy", "(", "order", "=", "\"C\"", ")", "if", "method", "==", "\"quic\"", ":", "(", "precisions_", ",", "covariances_", ",", "opt_", ",", "cputime_", ",", "iters_", ",", "duality_gap_", ")", "=", "quic", "(", "S", ",", "lam", ",", "mode", "=", "\"path\"", ",", "tol", "=", "tol", ",", "max_iter", "=", "max_iter", ",", "Theta0", "=", "Theta0", ",", "Sigma0", "=", "Sigma0", ",", "path", "=", "path", ",", "msg", "=", "verbose", ",", ")", "else", ":", "raise", "NotImplementedError", "(", "\"Only method='quic' has been implemented.\"", ")", "if", "X_test", "is", "not", "None", ":", "S_test", ",", "lam_scale_test", "=", "_init_coefs", "(", "X_test", ",", "method", "=", "init_method", ")", "path_errors", "=", "[", "]", "for", "lidx", ",", "lam", "in", "enumerate", "(", "path", ")", ":", "path_errors", ".", "append", "(", "_compute_error", "(", "S_test", ",", "covariances_", "[", "lidx", "]", ",", "precisions_", "[", "lidx", "]", ",", "score_metric", "=", "score_metric", ",", ")", ")", "scores_", "=", "[", "-", "e", "for", "e", "in", "path_errors", "]", "return", "covariances_", ",", "precisions_", ",", "scores_", "return", "covariances_", ",", "precisions_" ]
Wrapper to compute path for example X.
[ "Wrapper", "to", "compute", "path", "for", "example", "X", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/quic_graph_lasso.py#L383-L435
17,114
skggm/skggm
inverse_covariance/quic_graph_lasso.py
QuicGraphicalLasso.lam_at_index
def lam_at_index(self, lidx): """Compute the scaled lambda used at index lidx. """ if self.path_ is None: return self.lam * self.lam_scale_ return self.lam * self.lam_scale_ * self.path_[lidx]
python
def lam_at_index(self, lidx): """Compute the scaled lambda used at index lidx. """ if self.path_ is None: return self.lam * self.lam_scale_ return self.lam * self.lam_scale_ * self.path_[lidx]
[ "def", "lam_at_index", "(", "self", ",", "lidx", ")", ":", "if", "self", ".", "path_", "is", "None", ":", "return", "self", ".", "lam", "*", "self", ".", "lam_scale_", "return", "self", ".", "lam", "*", "self", ".", "lam_scale_", "*", "self", ".", "path_", "[", "lidx", "]" ]
Compute the scaled lambda used at index lidx.
[ "Compute", "the", "scaled", "lambda", "used", "at", "index", "lidx", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/quic_graph_lasso.py#L361-L367
17,115
skggm/skggm
inverse_covariance/rank_correlation.py
_compute_ranks
def _compute_ranks(X, winsorize=False, truncation=None, verbose=True): """ Transform each column into ranked data. Tied ranks are averaged. Ranks can optionally be winsorized as described in Liu 2009 otherwise this returns Tsukahara's scaled rank based Z-estimator. Parameters ---------- X : array-like, shape = (n_samples, n_features) The data matrix where each column is a feature. Row observations for each column will be replaced by correponding rank. winsorize: bool Choose whether ranks should be winsorized (trimmed) or not. If True, then ranks will be winsorized using the truncation parameter. truncation: (float) The default value is given by 1/(4 n^(1/4) * sqrt(pi log n)), where n is the number of samples. Returns ------- Xrank References ---------- Liu, Han, John Lafferty, and Larry Wasserman. "The nonparanormal: Semiparametric estimation of high dimensional undirected graphs." Journal of Machine Learning Research 10.Oct (2009): 2295-2328. """ n_samples, n_features = X.shape Xrank = np.zeros(shape=X.shape) if winsorize: if truncation is None: truncation = 1 / ( 4 * np.power(n_samples, 0.25) * np.sqrt(np.pi * np.log(n_samples)) ) elif truncation > 1: truncation = np.min(1.0, truncation) for col in np.arange(n_features): Xrank[:, col] = rankdata(X[:, col], method="average") Xrank[:, col] /= n_samples if winsorize: if n_samples > 100 * n_features: Xrank[:, col] = n_samples * Xrank[:, col] / (n_samples + 1) else: lower_truncate = Xrank[:, col] <= truncation upper_truncate = Xrank[:, col] > 1 - truncation Xrank[lower_truncate, col] = truncation Xrank[upper_truncate, col] = 1 - truncation return Xrank
python
def _compute_ranks(X, winsorize=False, truncation=None, verbose=True): """ Transform each column into ranked data. Tied ranks are averaged. Ranks can optionally be winsorized as described in Liu 2009 otherwise this returns Tsukahara's scaled rank based Z-estimator. Parameters ---------- X : array-like, shape = (n_samples, n_features) The data matrix where each column is a feature. Row observations for each column will be replaced by correponding rank. winsorize: bool Choose whether ranks should be winsorized (trimmed) or not. If True, then ranks will be winsorized using the truncation parameter. truncation: (float) The default value is given by 1/(4 n^(1/4) * sqrt(pi log n)), where n is the number of samples. Returns ------- Xrank References ---------- Liu, Han, John Lafferty, and Larry Wasserman. "The nonparanormal: Semiparametric estimation of high dimensional undirected graphs." Journal of Machine Learning Research 10.Oct (2009): 2295-2328. """ n_samples, n_features = X.shape Xrank = np.zeros(shape=X.shape) if winsorize: if truncation is None: truncation = 1 / ( 4 * np.power(n_samples, 0.25) * np.sqrt(np.pi * np.log(n_samples)) ) elif truncation > 1: truncation = np.min(1.0, truncation) for col in np.arange(n_features): Xrank[:, col] = rankdata(X[:, col], method="average") Xrank[:, col] /= n_samples if winsorize: if n_samples > 100 * n_features: Xrank[:, col] = n_samples * Xrank[:, col] / (n_samples + 1) else: lower_truncate = Xrank[:, col] <= truncation upper_truncate = Xrank[:, col] > 1 - truncation Xrank[lower_truncate, col] = truncation Xrank[upper_truncate, col] = 1 - truncation return Xrank
[ "def", "_compute_ranks", "(", "X", ",", "winsorize", "=", "False", ",", "truncation", "=", "None", ",", "verbose", "=", "True", ")", ":", "n_samples", ",", "n_features", "=", "X", ".", "shape", "Xrank", "=", "np", ".", "zeros", "(", "shape", "=", "X", ".", "shape", ")", "if", "winsorize", ":", "if", "truncation", "is", "None", ":", "truncation", "=", "1", "/", "(", "4", "*", "np", ".", "power", "(", "n_samples", ",", "0.25", ")", "*", "np", ".", "sqrt", "(", "np", ".", "pi", "*", "np", ".", "log", "(", "n_samples", ")", ")", ")", "elif", "truncation", ">", "1", ":", "truncation", "=", "np", ".", "min", "(", "1.0", ",", "truncation", ")", "for", "col", "in", "np", ".", "arange", "(", "n_features", ")", ":", "Xrank", "[", ":", ",", "col", "]", "=", "rankdata", "(", "X", "[", ":", ",", "col", "]", ",", "method", "=", "\"average\"", ")", "Xrank", "[", ":", ",", "col", "]", "/=", "n_samples", "if", "winsorize", ":", "if", "n_samples", ">", "100", "*", "n_features", ":", "Xrank", "[", ":", ",", "col", "]", "=", "n_samples", "*", "Xrank", "[", ":", ",", "col", "]", "/", "(", "n_samples", "+", "1", ")", "else", ":", "lower_truncate", "=", "Xrank", "[", ":", ",", "col", "]", "<=", "truncation", "upper_truncate", "=", "Xrank", "[", ":", ",", "col", "]", ">", "1", "-", "truncation", "Xrank", "[", "lower_truncate", ",", "col", "]", "=", "truncation", "Xrank", "[", "upper_truncate", ",", "col", "]", "=", "1", "-", "truncation", "return", "Xrank" ]
Transform each column into ranked data. Tied ranks are averaged. Ranks can optionally be winsorized as described in Liu 2009 otherwise this returns Tsukahara's scaled rank based Z-estimator. Parameters ---------- X : array-like, shape = (n_samples, n_features) The data matrix where each column is a feature. Row observations for each column will be replaced by correponding rank. winsorize: bool Choose whether ranks should be winsorized (trimmed) or not. If True, then ranks will be winsorized using the truncation parameter. truncation: (float) The default value is given by 1/(4 n^(1/4) * sqrt(pi log n)), where n is the number of samples. Returns ------- Xrank References ---------- Liu, Han, John Lafferty, and Larry Wasserman. "The nonparanormal: Semiparametric estimation of high dimensional undirected graphs." Journal of Machine Learning Research 10.Oct (2009): 2295-2328.
[ "Transform", "each", "column", "into", "ranked", "data", ".", "Tied", "ranks", "are", "averaged", ".", "Ranks", "can", "optionally", "be", "winsorized", "as", "described", "in", "Liu", "2009", "otherwise", "this", "returns", "Tsukahara", "s", "scaled", "rank", "based", "Z", "-", "estimator", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/rank_correlation.py#L9-L66
17,116
skggm/skggm
inverse_covariance/rank_correlation.py
spearman_correlation
def spearman_correlation(X, rowvar=False): """ Computes the spearman correlation estimate. This is effectively a bias corrected pearson correlation between rank transformed columns of X. Parameters ---------- X: array-like, shape = [n_samples, n_features] Data matrix using which we compute the empirical correlation Returns ------- rank_correlation References ---------- Xue, Lingzhou; Zou, Hui. "Regularized rank-based estimation of high-dimensional nonparanormal graphical models." Ann. Statist. 40 (2012), no. 5, 2541--2571. doi:10.1214/12-AOS1041. Liu, Han, Fang; Yuan, Ming; Lafferty, John; Wasserman, Larry. "High-dimensional semiparametric Gaussian copula graphical models." Ann. Statist. 40.4 (2012): 2293-2326. doi:10.1214/12-AOS1037 """ Xrank = _compute_ranks(X) rank_correlation = np.corrcoef(Xrank, rowvar=rowvar) return 2 * np.sin(rank_correlation * np.pi / 6)
python
def spearman_correlation(X, rowvar=False): """ Computes the spearman correlation estimate. This is effectively a bias corrected pearson correlation between rank transformed columns of X. Parameters ---------- X: array-like, shape = [n_samples, n_features] Data matrix using which we compute the empirical correlation Returns ------- rank_correlation References ---------- Xue, Lingzhou; Zou, Hui. "Regularized rank-based estimation of high-dimensional nonparanormal graphical models." Ann. Statist. 40 (2012), no. 5, 2541--2571. doi:10.1214/12-AOS1041. Liu, Han, Fang; Yuan, Ming; Lafferty, John; Wasserman, Larry. "High-dimensional semiparametric Gaussian copula graphical models." Ann. Statist. 40.4 (2012): 2293-2326. doi:10.1214/12-AOS1037 """ Xrank = _compute_ranks(X) rank_correlation = np.corrcoef(Xrank, rowvar=rowvar) return 2 * np.sin(rank_correlation * np.pi / 6)
[ "def", "spearman_correlation", "(", "X", ",", "rowvar", "=", "False", ")", ":", "Xrank", "=", "_compute_ranks", "(", "X", ")", "rank_correlation", "=", "np", ".", "corrcoef", "(", "Xrank", ",", "rowvar", "=", "rowvar", ")", "return", "2", "*", "np", ".", "sin", "(", "rank_correlation", "*", "np", ".", "pi", "/", "6", ")" ]
Computes the spearman correlation estimate. This is effectively a bias corrected pearson correlation between rank transformed columns of X. Parameters ---------- X: array-like, shape = [n_samples, n_features] Data matrix using which we compute the empirical correlation Returns ------- rank_correlation References ---------- Xue, Lingzhou; Zou, Hui. "Regularized rank-based estimation of high-dimensional nonparanormal graphical models." Ann. Statist. 40 (2012), no. 5, 2541--2571. doi:10.1214/12-AOS1041. Liu, Han, Fang; Yuan, Ming; Lafferty, John; Wasserman, Larry. "High-dimensional semiparametric Gaussian copula graphical models." Ann. Statist. 40.4 (2012): 2293-2326. doi:10.1214/12-AOS1037
[ "Computes", "the", "spearman", "correlation", "estimate", ".", "This", "is", "effectively", "a", "bias", "corrected", "pearson", "correlation", "between", "rank", "transformed", "columns", "of", "X", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/rank_correlation.py#L69-L101
17,117
skggm/skggm
inverse_covariance/rank_correlation.py
kendalltau_correlation
def kendalltau_correlation(X, rowvar=False, weighted=False): """ Computes kendall's tau correlation estimate. The option to use scipy.stats.weightedtau is not recommended as the implementation does not appear to handle ties correctly. Parameters ---------- X: array-like, shape = [n_samples, n_features] Data matrix using which we compute the empirical correlation Returns ------- rank_correlation References ---------- Liu, Han, Fang; Yuan, Ming; Lafferty, John; Wasserman, Larry. "High-dimensional semiparametric Gaussian copula graphical models." Ann. Statist. 40.4 (2012): 2293-2326. doi:10.1214/12-AOS1037 Barber, Rina Foygel; Kolar, Mladen. "ROCKET: Robust Confidence Intervals via Kendall's Tau for Transelliptical Graphical Models." arXiv:1502.07641 """ if rowvar: X = X.T _, n_features = X.shape rank_correlation = np.eye(n_features) for row in np.arange(n_features): for col in np.arange(1 + row, n_features): if weighted: rank_correlation[row, col], _ = weightedtau( X[:, row], X[:, col], rank=False ) else: rank_correlation[row, col], _ = kendalltau(X[:, row], X[:, col]) rank_correlation = np.triu(rank_correlation, 1) + rank_correlation.T return np.sin(rank_correlation * np.pi / 2)
python
def kendalltau_correlation(X, rowvar=False, weighted=False): """ Computes kendall's tau correlation estimate. The option to use scipy.stats.weightedtau is not recommended as the implementation does not appear to handle ties correctly. Parameters ---------- X: array-like, shape = [n_samples, n_features] Data matrix using which we compute the empirical correlation Returns ------- rank_correlation References ---------- Liu, Han, Fang; Yuan, Ming; Lafferty, John; Wasserman, Larry. "High-dimensional semiparametric Gaussian copula graphical models." Ann. Statist. 40.4 (2012): 2293-2326. doi:10.1214/12-AOS1037 Barber, Rina Foygel; Kolar, Mladen. "ROCKET: Robust Confidence Intervals via Kendall's Tau for Transelliptical Graphical Models." arXiv:1502.07641 """ if rowvar: X = X.T _, n_features = X.shape rank_correlation = np.eye(n_features) for row in np.arange(n_features): for col in np.arange(1 + row, n_features): if weighted: rank_correlation[row, col], _ = weightedtau( X[:, row], X[:, col], rank=False ) else: rank_correlation[row, col], _ = kendalltau(X[:, row], X[:, col]) rank_correlation = np.triu(rank_correlation, 1) + rank_correlation.T return np.sin(rank_correlation * np.pi / 2)
[ "def", "kendalltau_correlation", "(", "X", ",", "rowvar", "=", "False", ",", "weighted", "=", "False", ")", ":", "if", "rowvar", ":", "X", "=", "X", ".", "T", "_", ",", "n_features", "=", "X", ".", "shape", "rank_correlation", "=", "np", ".", "eye", "(", "n_features", ")", "for", "row", "in", "np", ".", "arange", "(", "n_features", ")", ":", "for", "col", "in", "np", ".", "arange", "(", "1", "+", "row", ",", "n_features", ")", ":", "if", "weighted", ":", "rank_correlation", "[", "row", ",", "col", "]", ",", "_", "=", "weightedtau", "(", "X", "[", ":", ",", "row", "]", ",", "X", "[", ":", ",", "col", "]", ",", "rank", "=", "False", ")", "else", ":", "rank_correlation", "[", "row", ",", "col", "]", ",", "_", "=", "kendalltau", "(", "X", "[", ":", ",", "row", "]", ",", "X", "[", ":", ",", "col", "]", ")", "rank_correlation", "=", "np", ".", "triu", "(", "rank_correlation", ",", "1", ")", "+", "rank_correlation", ".", "T", "return", "np", ".", "sin", "(", "rank_correlation", "*", "np", ".", "pi", "/", "2", ")" ]
Computes kendall's tau correlation estimate. The option to use scipy.stats.weightedtau is not recommended as the implementation does not appear to handle ties correctly. Parameters ---------- X: array-like, shape = [n_samples, n_features] Data matrix using which we compute the empirical correlation Returns ------- rank_correlation References ---------- Liu, Han, Fang; Yuan, Ming; Lafferty, John; Wasserman, Larry. "High-dimensional semiparametric Gaussian copula graphical models." Ann. Statist. 40.4 (2012): 2293-2326. doi:10.1214/12-AOS1037 Barber, Rina Foygel; Kolar, Mladen. "ROCKET: Robust Confidence Intervals via Kendall's Tau for Transelliptical Graphical Models." arXiv:1502.07641
[ "Computes", "kendall", "s", "tau", "correlation", "estimate", ".", "The", "option", "to", "use", "scipy", ".", "stats", ".", "weightedtau", "is", "not", "recommended", "as", "the", "implementation", "does", "not", "appear", "to", "handle", "ties", "correctly", "." ]
a0ed406586c4364ea3297a658f415e13b5cbdaf8
https://github.com/skggm/skggm/blob/a0ed406586c4364ea3297a658f415e13b5cbdaf8/inverse_covariance/rank_correlation.py#L104-L148
17,118
fabiobatalha/crossrefapi
crossref/restful.py
Endpoint.version
def version(self): """ This attribute retrieve the API version. >>> Works().version '1.0.0' """ request_params = dict(self.request_params) request_url = str(self.request_url) result = self.do_http_request( 'get', request_url, data=request_params, custom_header=str(self.etiquette) ).json() return result['message-version']
python
def version(self): """ This attribute retrieve the API version. >>> Works().version '1.0.0' """ request_params = dict(self.request_params) request_url = str(self.request_url) result = self.do_http_request( 'get', request_url, data=request_params, custom_header=str(self.etiquette) ).json() return result['message-version']
[ "def", "version", "(", "self", ")", ":", "request_params", "=", "dict", "(", "self", ".", "request_params", ")", "request_url", "=", "str", "(", "self", ".", "request_url", ")", "result", "=", "self", ".", "do_http_request", "(", "'get'", ",", "request_url", ",", "data", "=", "request_params", ",", "custom_header", "=", "str", "(", "self", ".", "etiquette", ")", ")", ".", "json", "(", ")", "return", "result", "[", "'message-version'", "]" ]
This attribute retrieve the API version. >>> Works().version '1.0.0'
[ "This", "attribute", "retrieve", "the", "API", "version", "." ]
53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L157-L174
17,119
fabiobatalha/crossrefapi
crossref/restful.py
Endpoint.count
def count(self): """ This method retrieve the total of records resulting from a given query. This attribute can be used compounded with query, filter, sort, order and facet methods. Examples: >>> from crossref.restful import Works >>> Works().query('zika').count() 3597 >>> Works().query('zika').filter(prefix='10.1590').count() 61 >>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').count() 14 >>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').count() 1 """ request_params = dict(self.request_params) request_url = str(self.request_url) request_params['rows'] = 0 result = self.do_http_request( 'get', request_url, data=request_params, custom_header=str(self.etiquette) ).json() return int(result['message']['total-results'])
python
def count(self): """ This method retrieve the total of records resulting from a given query. This attribute can be used compounded with query, filter, sort, order and facet methods. Examples: >>> from crossref.restful import Works >>> Works().query('zika').count() 3597 >>> Works().query('zika').filter(prefix='10.1590').count() 61 >>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').count() 14 >>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').count() 1 """ request_params = dict(self.request_params) request_url = str(self.request_url) request_params['rows'] = 0 result = self.do_http_request( 'get', request_url, data=request_params, custom_header=str(self.etiquette) ).json() return int(result['message']['total-results'])
[ "def", "count", "(", "self", ")", ":", "request_params", "=", "dict", "(", "self", ".", "request_params", ")", "request_url", "=", "str", "(", "self", ".", "request_url", ")", "request_params", "[", "'rows'", "]", "=", "0", "result", "=", "self", ".", "do_http_request", "(", "'get'", ",", "request_url", ",", "data", "=", "request_params", ",", "custom_header", "=", "str", "(", "self", ".", "etiquette", ")", ")", ".", "json", "(", ")", "return", "int", "(", "result", "[", "'message'", "]", "[", "'total-results'", "]", ")" ]
This method retrieve the total of records resulting from a given query. This attribute can be used compounded with query, filter, sort, order and facet methods. Examples: >>> from crossref.restful import Works >>> Works().query('zika').count() 3597 >>> Works().query('zika').filter(prefix='10.1590').count() 61 >>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').count() 14 >>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').count() 1
[ "This", "method", "retrieve", "the", "total", "of", "records", "resulting", "from", "a", "given", "query", "." ]
53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L186-L215
17,120
fabiobatalha/crossrefapi
crossref/restful.py
Endpoint.url
def url(self): """ This attribute retrieve the url that will be used as a HTTP request to the Crossref API. This attribute can be used compounded with query, filter, sort, order and facet methods. Examples: >>> from crossref.restful import Works >>> Works().query('zika').url 'https://api.crossref.org/works?query=zika' >>> Works().query('zika').filter(prefix='10.1590').url 'https://api.crossref.org/works?query=zika&filter=prefix%3A10.1590' >>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').url 'https://api.crossref.org/works?sort=published&order=desc&query=zika&filter=prefix%3A10.1590' >>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').url 'https://api.crossref.org/works?sort=published&filter=prefix%3A10.1590%2Chas-abstract%3Atrue&query=zika&order=desc&query.author=Marli' """ request_params = self._escaped_pagging() sorted_request_params = sorted([(k, v) for k, v in request_params.items()]) req = requests.Request( 'get', self.request_url, params=sorted_request_params).prepare() return req.url
python
def url(self): """ This attribute retrieve the url that will be used as a HTTP request to the Crossref API. This attribute can be used compounded with query, filter, sort, order and facet methods. Examples: >>> from crossref.restful import Works >>> Works().query('zika').url 'https://api.crossref.org/works?query=zika' >>> Works().query('zika').filter(prefix='10.1590').url 'https://api.crossref.org/works?query=zika&filter=prefix%3A10.1590' >>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').url 'https://api.crossref.org/works?sort=published&order=desc&query=zika&filter=prefix%3A10.1590' >>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').url 'https://api.crossref.org/works?sort=published&filter=prefix%3A10.1590%2Chas-abstract%3Atrue&query=zika&order=desc&query.author=Marli' """ request_params = self._escaped_pagging() sorted_request_params = sorted([(k, v) for k, v in request_params.items()]) req = requests.Request( 'get', self.request_url, params=sorted_request_params).prepare() return req.url
[ "def", "url", "(", "self", ")", ":", "request_params", "=", "self", ".", "_escaped_pagging", "(", ")", "sorted_request_params", "=", "sorted", "(", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "request_params", ".", "items", "(", ")", "]", ")", "req", "=", "requests", ".", "Request", "(", "'get'", ",", "self", ".", "request_url", ",", "params", "=", "sorted_request_params", ")", ".", "prepare", "(", ")", "return", "req", ".", "url" ]
This attribute retrieve the url that will be used as a HTTP request to the Crossref API. This attribute can be used compounded with query, filter, sort, order and facet methods. Examples: >>> from crossref.restful import Works >>> Works().query('zika').url 'https://api.crossref.org/works?query=zika' >>> Works().query('zika').filter(prefix='10.1590').url 'https://api.crossref.org/works?query=zika&filter=prefix%3A10.1590' >>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').url 'https://api.crossref.org/works?sort=published&order=desc&query=zika&filter=prefix%3A10.1590' >>> Works().query('zika').filter(prefix='10.1590').sort('published').order('desc').filter(has_abstract='true').query(author='Marli').url 'https://api.crossref.org/works?sort=published&filter=prefix%3A10.1590%2Chas-abstract%3Atrue&query=zika&order=desc&query.author=Marli'
[ "This", "attribute", "retrieve", "the", "url", "that", "will", "be", "used", "as", "a", "HTTP", "request", "to", "the", "Crossref", "API", "." ]
53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L218-L243
17,121
fabiobatalha/crossrefapi
crossref/restful.py
Works.doi
def doi(self, doi, only_message=True): """ This method retrieve the DOI metadata related to a given DOI number. args: Crossref DOI id (String) return: JSON Example: >>> from crossref.restful import Works >>> works = Works() >>> works.doi('10.1590/S0004-28032013005000001') {'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001', 'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref', 'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [], 'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z', 'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article', 'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001', 'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z', 'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []}, 'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z', 'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}], 'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'], 'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530', 'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]}, 'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'], 'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'], 'abstract': '<jats:p>Significant abstract data..... .</jats:p>', 'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [ {'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk', 'given': 'Ismael'}, {'affiliation': [ {'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}], 'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [ {'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'}, {'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [ {'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho', 'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'} """ request_url = build_url_endpoint( '/'.join([self.ENDPOINT, doi]) ) request_params = {} result = self.do_http_request( 'get', request_url, data=request_params, custom_header=str(self.etiquette) ) if result.status_code == 404: return result = result.json() return result['message'] if only_message is True else result
python
def doi(self, doi, only_message=True): """ This method retrieve the DOI metadata related to a given DOI number. args: Crossref DOI id (String) return: JSON Example: >>> from crossref.restful import Works >>> works = Works() >>> works.doi('10.1590/S0004-28032013005000001') {'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001', 'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref', 'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [], 'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z', 'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article', 'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001', 'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z', 'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []}, 'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z', 'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}], 'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'], 'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530', 'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]}, 'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'], 'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'], 'abstract': '<jats:p>Significant abstract data..... .</jats:p>', 'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [ {'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk', 'given': 'Ismael'}, {'affiliation': [ {'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}], 'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [ {'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'}, {'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [ {'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho', 'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'} """ request_url = build_url_endpoint( '/'.join([self.ENDPOINT, doi]) ) request_params = {} result = self.do_http_request( 'get', request_url, data=request_params, custom_header=str(self.etiquette) ) if result.status_code == 404: return result = result.json() return result['message'] if only_message is True else result
[ "def", "doi", "(", "self", ",", "doi", ",", "only_message", "=", "True", ")", ":", "request_url", "=", "build_url_endpoint", "(", "'/'", ".", "join", "(", "[", "self", ".", "ENDPOINT", ",", "doi", "]", ")", ")", "request_params", "=", "{", "}", "result", "=", "self", ".", "do_http_request", "(", "'get'", ",", "request_url", ",", "data", "=", "request_params", ",", "custom_header", "=", "str", "(", "self", ".", "etiquette", ")", ")", "if", "result", ".", "status_code", "==", "404", ":", "return", "result", "=", "result", ".", "json", "(", ")", "return", "result", "[", "'message'", "]", "if", "only_message", "is", "True", "else", "result" ]
This method retrieve the DOI metadata related to a given DOI number. args: Crossref DOI id (String) return: JSON Example: >>> from crossref.restful import Works >>> works = Works() >>> works.doi('10.1590/S0004-28032013005000001') {'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001', 'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref', 'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [], 'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z', 'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article', 'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001', 'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z', 'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []}, 'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z', 'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}], 'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'], 'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530', 'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]}, 'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'], 'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'], 'abstract': '<jats:p>Significant abstract data..... .</jats:p>', 'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [ {'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk', 'given': 'Ismael'}, {'affiliation': [ {'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}], 'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [ {'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'}, {'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [ {'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho', 'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
[ "This", "method", "retrieve", "the", "DOI", "metadata", "related", "to", "a", "given", "DOI", "number", "." ]
53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L901-L959
17,122
fabiobatalha/crossrefapi
crossref/restful.py
Works.doi_exists
def doi_exists(self, doi): """ This method retrieve a boolean according to the existence of a crossref DOI number. It returns False if the API results a 404 status code. args: Crossref DOI id (String) return: Boolean Example 1: >>> from crossref.restful import Works >>> works = Works() >>> works.doi_exists('10.1590/S0004-28032013005000001') True Example 2: >>> from crossref.restful import Works >>> works = Works() >>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi') False """ request_url = build_url_endpoint( '/'.join([self.ENDPOINT, doi]) ) request_params = {} result = self.do_http_request( 'get', request_url, data=request_params, only_headers=True, custom_header=str(self.etiquette) ) if result.status_code == 404: return False return True
python
def doi_exists(self, doi): """ This method retrieve a boolean according to the existence of a crossref DOI number. It returns False if the API results a 404 status code. args: Crossref DOI id (String) return: Boolean Example 1: >>> from crossref.restful import Works >>> works = Works() >>> works.doi_exists('10.1590/S0004-28032013005000001') True Example 2: >>> from crossref.restful import Works >>> works = Works() >>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi') False """ request_url = build_url_endpoint( '/'.join([self.ENDPOINT, doi]) ) request_params = {} result = self.do_http_request( 'get', request_url, data=request_params, only_headers=True, custom_header=str(self.etiquette) ) if result.status_code == 404: return False return True
[ "def", "doi_exists", "(", "self", ",", "doi", ")", ":", "request_url", "=", "build_url_endpoint", "(", "'/'", ".", "join", "(", "[", "self", ".", "ENDPOINT", ",", "doi", "]", ")", ")", "request_params", "=", "{", "}", "result", "=", "self", ".", "do_http_request", "(", "'get'", ",", "request_url", ",", "data", "=", "request_params", ",", "only_headers", "=", "True", ",", "custom_header", "=", "str", "(", "self", ".", "etiquette", ")", ")", "if", "result", ".", "status_code", "==", "404", ":", "return", "False", "return", "True" ]
This method retrieve a boolean according to the existence of a crossref DOI number. It returns False if the API results a 404 status code. args: Crossref DOI id (String) return: Boolean Example 1: >>> from crossref.restful import Works >>> works = Works() >>> works.doi_exists('10.1590/S0004-28032013005000001') True Example 2: >>> from crossref.restful import Works >>> works = Works() >>> works.doi_exists('10.1590/S0004-28032013005000001_invalid_doi') False
[ "This", "method", "retrieve", "a", "boolean", "according", "to", "the", "existence", "of", "a", "crossref", "DOI", "number", ".", "It", "returns", "False", "if", "the", "API", "results", "a", "404", "status", "code", "." ]
53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L995-L1032
17,123
fabiobatalha/crossrefapi
crossref/restful.py
Funders.works
def works(self, funder_id): """ This method retrieve a iterable of Works of the given funder. args: Crossref allowed document Types (String) return: Works() """ context = '%s/%s' % (self.ENDPOINT, str(funder_id)) return Works(context=context)
python
def works(self, funder_id): """ This method retrieve a iterable of Works of the given funder. args: Crossref allowed document Types (String) return: Works() """ context = '%s/%s' % (self.ENDPOINT, str(funder_id)) return Works(context=context)
[ "def", "works", "(", "self", ",", "funder_id", ")", ":", "context", "=", "'%s/%s'", "%", "(", "self", ".", "ENDPOINT", ",", "str", "(", "funder_id", ")", ")", "return", "Works", "(", "context", "=", "context", ")" ]
This method retrieve a iterable of Works of the given funder. args: Crossref allowed document Types (String) return: Works()
[ "This", "method", "retrieve", "a", "iterable", "of", "Works", "of", "the", "given", "funder", "." ]
53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1199-L1208
17,124
fabiobatalha/crossrefapi
crossref/restful.py
Members.works
def works(self, member_id): """ This method retrieve a iterable of Works of the given member. args: Member ID (Integer) return: Works() """ context = '%s/%s' % (self.ENDPOINT, str(member_id)) return Works(context=context)
python
def works(self, member_id): """ This method retrieve a iterable of Works of the given member. args: Member ID (Integer) return: Works() """ context = '%s/%s' % (self.ENDPOINT, str(member_id)) return Works(context=context)
[ "def", "works", "(", "self", ",", "member_id", ")", ":", "context", "=", "'%s/%s'", "%", "(", "self", ".", "ENDPOINT", ",", "str", "(", "member_id", ")", ")", "return", "Works", "(", "context", "=", "context", ")" ]
This method retrieve a iterable of Works of the given member. args: Member ID (Integer) return: Works()
[ "This", "method", "retrieve", "a", "iterable", "of", "Works", "of", "the", "given", "member", "." ]
53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1418-L1427
17,125
fabiobatalha/crossrefapi
crossref/restful.py
Types.all
def all(self): """ This method retrieve an iterator with all the available types. return: iterator of crossref document types Example: >>> from crossref.restful import Types >>> types = Types() >>> [i for i in types.all()] [{'label': 'Book Section', 'id': 'book-section'}, {'label': 'Monograph', 'id': 'monograph'}, {'label': 'Report', 'id': 'report'}, {'label': 'Book Track', 'id': 'book-track'}, {'label': 'Journal Article', 'id': 'journal-article'}, {'label': 'Part', 'id': 'book-part'}, ... }] """ request_url = build_url_endpoint(self.ENDPOINT, self.context) request_params = dict(self.request_params) result = self.do_http_request( 'get', request_url, data=request_params, custom_header=str(self.etiquette) ) if result.status_code == 404: raise StopIteration() result = result.json() for item in result['message']['items']: yield item
python
def all(self): """ This method retrieve an iterator with all the available types. return: iterator of crossref document types Example: >>> from crossref.restful import Types >>> types = Types() >>> [i for i in types.all()] [{'label': 'Book Section', 'id': 'book-section'}, {'label': 'Monograph', 'id': 'monograph'}, {'label': 'Report', 'id': 'report'}, {'label': 'Book Track', 'id': 'book-track'}, {'label': 'Journal Article', 'id': 'journal-article'}, {'label': 'Part', 'id': 'book-part'}, ... }] """ request_url = build_url_endpoint(self.ENDPOINT, self.context) request_params = dict(self.request_params) result = self.do_http_request( 'get', request_url, data=request_params, custom_header=str(self.etiquette) ) if result.status_code == 404: raise StopIteration() result = result.json() for item in result['message']['items']: yield item
[ "def", "all", "(", "self", ")", ":", "request_url", "=", "build_url_endpoint", "(", "self", ".", "ENDPOINT", ",", "self", ".", "context", ")", "request_params", "=", "dict", "(", "self", ".", "request_params", ")", "result", "=", "self", ".", "do_http_request", "(", "'get'", ",", "request_url", ",", "data", "=", "request_params", ",", "custom_header", "=", "str", "(", "self", ".", "etiquette", ")", ")", "if", "result", ".", "status_code", "==", "404", ":", "raise", "StopIteration", "(", ")", "result", "=", "result", ".", "json", "(", ")", "for", "item", "in", "result", "[", "'message'", "]", "[", "'items'", "]", ":", "yield", "item" ]
This method retrieve an iterator with all the available types. return: iterator of crossref document types Example: >>> from crossref.restful import Types >>> types = Types() >>> [i for i in types.all()] [{'label': 'Book Section', 'id': 'book-section'}, {'label': 'Monograph', 'id': 'monograph'}, {'label': 'Report', 'id': 'report'}, {'label': 'Book Track', 'id': 'book-track'}, {'label': 'Journal Article', 'id': 'journal-article'}, {'label': 'Part', 'id': 'book-part'}, ... }]
[ "This", "method", "retrieve", "an", "iterator", "with", "all", "the", "available", "types", "." ]
53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1466-L1501
17,126
fabiobatalha/crossrefapi
crossref/restful.py
Types.works
def works(self, type_id): """ This method retrieve a iterable of Works of the given type. args: Crossref allowed document Types (String) return: Works() """ context = '%s/%s' % (self.ENDPOINT, str(type_id)) return Works(context=context)
python
def works(self, type_id): """ This method retrieve a iterable of Works of the given type. args: Crossref allowed document Types (String) return: Works() """ context = '%s/%s' % (self.ENDPOINT, str(type_id)) return Works(context=context)
[ "def", "works", "(", "self", ",", "type_id", ")", ":", "context", "=", "'%s/%s'", "%", "(", "self", ".", "ENDPOINT", ",", "str", "(", "type_id", ")", ")", "return", "Works", "(", "context", "=", "context", ")" ]
This method retrieve a iterable of Works of the given type. args: Crossref allowed document Types (String) return: Works()
[ "This", "method", "retrieve", "a", "iterable", "of", "Works", "of", "the", "given", "type", "." ]
53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1542-L1551
17,127
fabiobatalha/crossrefapi
crossref/restful.py
Prefixes.works
def works(self, prefix_id): """ This method retrieve a iterable of Works of the given prefix. args: Crossref Prefix (String) return: Works() """ context = '%s/%s' % (self.ENDPOINT, str(prefix_id)) return Works(context=context)
python
def works(self, prefix_id): """ This method retrieve a iterable of Works of the given prefix. args: Crossref Prefix (String) return: Works() """ context = '%s/%s' % (self.ENDPOINT, str(prefix_id)) return Works(context=context)
[ "def", "works", "(", "self", ",", "prefix_id", ")", ":", "context", "=", "'%s/%s'", "%", "(", "self", ".", "ENDPOINT", ",", "str", "(", "prefix_id", ")", ")", "return", "Works", "(", "context", "=", "context", ")" ]
This method retrieve a iterable of Works of the given prefix. args: Crossref Prefix (String) return: Works()
[ "This", "method", "retrieve", "a", "iterable", "of", "Works", "of", "the", "given", "prefix", "." ]
53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1594-L1603
17,128
fabiobatalha/crossrefapi
crossref/restful.py
Journals.works
def works(self, issn): """ This method retrieve a iterable of Works of the given journal. args: Journal ISSN (String) return: Works() """ context = '%s/%s' % (self.ENDPOINT, str(issn)) return Works(context=context)
python
def works(self, issn): """ This method retrieve a iterable of Works of the given journal. args: Journal ISSN (String) return: Works() """ context = '%s/%s' % (self.ENDPOINT, str(issn)) return Works(context=context)
[ "def", "works", "(", "self", ",", "issn", ")", ":", "context", "=", "'%s/%s'", "%", "(", "self", ".", "ENDPOINT", ",", "str", "(", "issn", ")", ")", "return", "Works", "(", "context", "=", "context", ")" ]
This method retrieve a iterable of Works of the given journal. args: Journal ISSN (String) return: Works()
[ "This", "method", "retrieve", "a", "iterable", "of", "Works", "of", "the", "given", "journal", "." ]
53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1718-L1728
17,129
fabiobatalha/crossrefapi
crossref/restful.py
Depositor.register_doi
def register_doi(self, submission_id, request_xml): """ This method registry a new DOI number in Crossref or update some DOI metadata. submission_id: Will be used as the submission file name. The file name could be used in future requests to retrieve the submission status. request_xml: The XML with the document metadata. It must be under compliance with the Crossref Submission Schema. """ endpoint = self.get_endpoint('deposit') files = { 'mdFile': ('%s.xml' % submission_id, request_xml) } params = { 'operation': 'doMDUpload', 'login_id': self.api_user, 'login_passwd': self.api_key } result = self.do_http_request( 'post', endpoint, data=params, files=files, timeout=10, custom_header=str(self.etiquette) ) return result
python
def register_doi(self, submission_id, request_xml): """ This method registry a new DOI number in Crossref or update some DOI metadata. submission_id: Will be used as the submission file name. The file name could be used in future requests to retrieve the submission status. request_xml: The XML with the document metadata. It must be under compliance with the Crossref Submission Schema. """ endpoint = self.get_endpoint('deposit') files = { 'mdFile': ('%s.xml' % submission_id, request_xml) } params = { 'operation': 'doMDUpload', 'login_id': self.api_user, 'login_passwd': self.api_key } result = self.do_http_request( 'post', endpoint, data=params, files=files, timeout=10, custom_header=str(self.etiquette) ) return result
[ "def", "register_doi", "(", "self", ",", "submission_id", ",", "request_xml", ")", ":", "endpoint", "=", "self", ".", "get_endpoint", "(", "'deposit'", ")", "files", "=", "{", "'mdFile'", ":", "(", "'%s.xml'", "%", "submission_id", ",", "request_xml", ")", "}", "params", "=", "{", "'operation'", ":", "'doMDUpload'", ",", "'login_id'", ":", "self", ".", "api_user", ",", "'login_passwd'", ":", "self", ".", "api_key", "}", "result", "=", "self", ".", "do_http_request", "(", "'post'", ",", "endpoint", ",", "data", "=", "params", ",", "files", "=", "files", ",", "timeout", "=", "10", ",", "custom_header", "=", "str", "(", "self", ".", "etiquette", ")", ")", "return", "result" ]
This method registry a new DOI number in Crossref or update some DOI metadata. submission_id: Will be used as the submission file name. The file name could be used in future requests to retrieve the submission status. request_xml: The XML with the document metadata. It must be under compliance with the Crossref Submission Schema.
[ "This", "method", "registry", "a", "new", "DOI", "number", "in", "Crossref", "or", "update", "some", "DOI", "metadata", "." ]
53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7
https://github.com/fabiobatalha/crossrefapi/blob/53f84ee0d8a8fc6ad9b2493f51c5151e66d2faf7/crossref/restful.py#L1746-L1779
17,130
buildinspace/peru
peru/plugin.py
_find_plugin_dir
def _find_plugin_dir(module_type): '''Find the directory containing the plugin definition for the given type. Do this by searching all the paths where plugins can live for a dir that matches the type name.''' for install_dir in _get_plugin_install_dirs(): candidate = os.path.join(install_dir, module_type) if os.path.isdir(candidate): return candidate else: raise PluginCandidateError( 'No plugin found for `{}` module in paths:\n{}'.format( module_type, '\n'.join(_get_plugin_install_dirs())))
python
def _find_plugin_dir(module_type): '''Find the directory containing the plugin definition for the given type. Do this by searching all the paths where plugins can live for a dir that matches the type name.''' for install_dir in _get_plugin_install_dirs(): candidate = os.path.join(install_dir, module_type) if os.path.isdir(candidate): return candidate else: raise PluginCandidateError( 'No plugin found for `{}` module in paths:\n{}'.format( module_type, '\n'.join(_get_plugin_install_dirs())))
[ "def", "_find_plugin_dir", "(", "module_type", ")", ":", "for", "install_dir", "in", "_get_plugin_install_dirs", "(", ")", ":", "candidate", "=", "os", ".", "path", ".", "join", "(", "install_dir", ",", "module_type", ")", "if", "os", ".", "path", ".", "isdir", "(", "candidate", ")", ":", "return", "candidate", "else", ":", "raise", "PluginCandidateError", "(", "'No plugin found for `{}` module in paths:\\n{}'", ".", "format", "(", "module_type", ",", "'\\n'", ".", "join", "(", "_get_plugin_install_dirs", "(", ")", ")", ")", ")" ]
Find the directory containing the plugin definition for the given type. Do this by searching all the paths where plugins can live for a dir that matches the type name.
[ "Find", "the", "directory", "containing", "the", "plugin", "definition", "for", "the", "given", "type", ".", "Do", "this", "by", "searching", "all", "the", "paths", "where", "plugins", "can", "live", "for", "a", "dir", "that", "matches", "the", "type", "name", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/plugin.py#L264-L276
17,131
buildinspace/peru
peru/main.py
merged_args_dicts
def merged_args_dicts(global_args, subcommand_args): '''We deal with docopt args from the toplevel peru parse and the subcommand parse. We don't want False values for a flag in the subcommand to override True values if that flag was given at the top level. This function specifically handles that case.''' merged = global_args.copy() for key, val in subcommand_args.items(): if key not in merged: merged[key] = val elif type(merged[key]) is type(val) is bool: merged[key] = merged[key] or val else: raise RuntimeError("Unmergable args.") return merged
python
def merged_args_dicts(global_args, subcommand_args): '''We deal with docopt args from the toplevel peru parse and the subcommand parse. We don't want False values for a flag in the subcommand to override True values if that flag was given at the top level. This function specifically handles that case.''' merged = global_args.copy() for key, val in subcommand_args.items(): if key not in merged: merged[key] = val elif type(merged[key]) is type(val) is bool: merged[key] = merged[key] or val else: raise RuntimeError("Unmergable args.") return merged
[ "def", "merged_args_dicts", "(", "global_args", ",", "subcommand_args", ")", ":", "merged", "=", "global_args", ".", "copy", "(", ")", "for", "key", ",", "val", "in", "subcommand_args", ".", "items", "(", ")", ":", "if", "key", "not", "in", "merged", ":", "merged", "[", "key", "]", "=", "val", "elif", "type", "(", "merged", "[", "key", "]", ")", "is", "type", "(", "val", ")", "is", "bool", ":", "merged", "[", "key", "]", "=", "merged", "[", "key", "]", "or", "val", "else", ":", "raise", "RuntimeError", "(", "\"Unmergable args.\"", ")", "return", "merged" ]
We deal with docopt args from the toplevel peru parse and the subcommand parse. We don't want False values for a flag in the subcommand to override True values if that flag was given at the top level. This function specifically handles that case.
[ "We", "deal", "with", "docopt", "args", "from", "the", "toplevel", "peru", "parse", "and", "the", "subcommand", "parse", ".", "We", "don", "t", "want", "False", "values", "for", "a", "flag", "in", "the", "subcommand", "to", "override", "True", "values", "if", "that", "flag", "was", "given", "at", "the", "top", "level", ".", "This", "function", "specifically", "handles", "that", "case", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/main.py#L299-L312
17,132
buildinspace/peru
peru/main.py
force_utf8_in_ascii_mode_hack
def force_utf8_in_ascii_mode_hack(): '''In systems without a UTF8 locale configured, Python will default to ASCII mode for stdout and stderr. This causes our fancy display to fail with encoding errors. In particular, you run into this if you try to run peru inside of Docker. This is a hack to force emitting UTF8 in that case. Hopefully it doesn't break anything important.''' if sys.stdout.encoding == 'ANSI_X3.4-1968': sys.stdout = open( sys.stdout.fileno(), mode='w', encoding='utf8', buffering=1) sys.stderr = open( sys.stderr.fileno(), mode='w', encoding='utf8', buffering=1)
python
def force_utf8_in_ascii_mode_hack(): '''In systems without a UTF8 locale configured, Python will default to ASCII mode for stdout and stderr. This causes our fancy display to fail with encoding errors. In particular, you run into this if you try to run peru inside of Docker. This is a hack to force emitting UTF8 in that case. Hopefully it doesn't break anything important.''' if sys.stdout.encoding == 'ANSI_X3.4-1968': sys.stdout = open( sys.stdout.fileno(), mode='w', encoding='utf8', buffering=1) sys.stderr = open( sys.stderr.fileno(), mode='w', encoding='utf8', buffering=1)
[ "def", "force_utf8_in_ascii_mode_hack", "(", ")", ":", "if", "sys", ".", "stdout", ".", "encoding", "==", "'ANSI_X3.4-1968'", ":", "sys", ".", "stdout", "=", "open", "(", "sys", ".", "stdout", ".", "fileno", "(", ")", ",", "mode", "=", "'w'", ",", "encoding", "=", "'utf8'", ",", "buffering", "=", "1", ")", "sys", ".", "stderr", "=", "open", "(", "sys", ".", "stderr", ".", "fileno", "(", ")", ",", "mode", "=", "'w'", ",", "encoding", "=", "'utf8'", ",", "buffering", "=", "1", ")" ]
In systems without a UTF8 locale configured, Python will default to ASCII mode for stdout and stderr. This causes our fancy display to fail with encoding errors. In particular, you run into this if you try to run peru inside of Docker. This is a hack to force emitting UTF8 in that case. Hopefully it doesn't break anything important.
[ "In", "systems", "without", "a", "UTF8", "locale", "configured", "Python", "will", "default", "to", "ASCII", "mode", "for", "stdout", "and", "stderr", ".", "This", "causes", "our", "fancy", "display", "to", "fail", "with", "encoding", "errors", ".", "In", "particular", "you", "run", "into", "this", "if", "you", "try", "to", "run", "peru", "inside", "of", "Docker", ".", "This", "is", "a", "hack", "to", "force", "emitting", "UTF8", "in", "that", "case", ".", "Hopefully", "it", "doesn", "t", "break", "anything", "important", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/main.py#L334-L344
17,133
buildinspace/peru
peru/scope.py
Scope.parse_target
async def parse_target(self, runtime, target_str): '''A target is a pipeline of a module into zero or more rules, and each module and rule can itself be scoped with zero or more module names.''' pipeline_parts = target_str.split(RULE_SEPARATOR) module = await self.resolve_module(runtime, pipeline_parts[0], target_str) rules = [] for part in pipeline_parts[1:]: rule = await self.resolve_rule(runtime, part) rules.append(rule) return module, tuple(rules)
python
async def parse_target(self, runtime, target_str): '''A target is a pipeline of a module into zero or more rules, and each module and rule can itself be scoped with zero or more module names.''' pipeline_parts = target_str.split(RULE_SEPARATOR) module = await self.resolve_module(runtime, pipeline_parts[0], target_str) rules = [] for part in pipeline_parts[1:]: rule = await self.resolve_rule(runtime, part) rules.append(rule) return module, tuple(rules)
[ "async", "def", "parse_target", "(", "self", ",", "runtime", ",", "target_str", ")", ":", "pipeline_parts", "=", "target_str", ".", "split", "(", "RULE_SEPARATOR", ")", "module", "=", "await", "self", ".", "resolve_module", "(", "runtime", ",", "pipeline_parts", "[", "0", "]", ",", "target_str", ")", "rules", "=", "[", "]", "for", "part", "in", "pipeline_parts", "[", "1", ":", "]", ":", "rule", "=", "await", "self", ".", "resolve_rule", "(", "runtime", ",", "part", ")", "rules", ".", "append", "(", "rule", ")", "return", "module", ",", "tuple", "(", "rules", ")" ]
A target is a pipeline of a module into zero or more rules, and each module and rule can itself be scoped with zero or more module names.
[ "A", "target", "is", "a", "pipeline", "of", "a", "module", "into", "zero", "or", "more", "rules", "and", "each", "module", "and", "rule", "can", "itself", "be", "scoped", "with", "zero", "or", "more", "module", "names", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/scope.py#L17-L27
17,134
buildinspace/peru
peru/edit_yaml.py
_maybe_quote
def _maybe_quote(val): '''All of our values should be strings. Usually those can be passed in as bare words, but if they're parseable as an int or float we need to quote them.''' assert isinstance(val, str), 'We should never set non-string values.' needs_quoting = False try: int(val) needs_quoting = True except Exception: pass try: float(val) needs_quoting = True except Exception: pass if needs_quoting: return '"{}"'.format(val) else: return val
python
def _maybe_quote(val): '''All of our values should be strings. Usually those can be passed in as bare words, but if they're parseable as an int or float we need to quote them.''' assert isinstance(val, str), 'We should never set non-string values.' needs_quoting = False try: int(val) needs_quoting = True except Exception: pass try: float(val) needs_quoting = True except Exception: pass if needs_quoting: return '"{}"'.format(val) else: return val
[ "def", "_maybe_quote", "(", "val", ")", ":", "assert", "isinstance", "(", "val", ",", "str", ")", ",", "'We should never set non-string values.'", "needs_quoting", "=", "False", "try", ":", "int", "(", "val", ")", "needs_quoting", "=", "True", "except", "Exception", ":", "pass", "try", ":", "float", "(", "val", ")", "needs_quoting", "=", "True", "except", "Exception", ":", "pass", "if", "needs_quoting", ":", "return", "'\"{}\"'", ".", "format", "(", "val", ")", "else", ":", "return", "val" ]
All of our values should be strings. Usually those can be passed in as bare words, but if they're parseable as an int or float we need to quote them.
[ "All", "of", "our", "values", "should", "be", "strings", ".", "Usually", "those", "can", "be", "passed", "in", "as", "bare", "words", "but", "if", "they", "re", "parseable", "as", "an", "int", "or", "float", "we", "need", "to", "quote", "them", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/edit_yaml.py#L26-L45
17,135
buildinspace/peru
peru/async_helpers.py
gather_coalescing_exceptions
async def gather_coalescing_exceptions(coros, display, *, verbose): '''The tricky thing about running multiple coroutines in parallel is what we're supposed to do when one of them raises an exception. The approach we're using here is to catch exceptions and keep waiting for other tasks to finish. At the end, we reraise a GatheredExceptions error, if any exceptions were caught. Another minor detail: We also want to make sure to start coroutines in the order given, so that they end up appearing to the user alphabetically in the fancy display. Note that asyncio.gather() puts coroutines in a set internally, so we schedule coroutines *before* we give them to gather(). ''' exceptions = [] reprs = [] async def catching_wrapper(coro): try: return (await coro) except Exception as e: exceptions.append(e) if isinstance(e, PrintableError) and not verbose: reprs.append(e.message) else: reprs.append(traceback.format_exc()) return None # Suppress a deprecation warning in Python 3.5, while continuing to support # 3.3 and early 3.4 releases. if hasattr(asyncio, 'ensure_future'): schedule = getattr(asyncio, 'ensure_future') else: schedule = getattr(asyncio, 'async') futures = [schedule(catching_wrapper(coro)) for coro in coros] results = await asyncio.gather(*futures) if exceptions: raise GatheredExceptions(exceptions, reprs) else: return results
python
async def gather_coalescing_exceptions(coros, display, *, verbose): '''The tricky thing about running multiple coroutines in parallel is what we're supposed to do when one of them raises an exception. The approach we're using here is to catch exceptions and keep waiting for other tasks to finish. At the end, we reraise a GatheredExceptions error, if any exceptions were caught. Another minor detail: We also want to make sure to start coroutines in the order given, so that they end up appearing to the user alphabetically in the fancy display. Note that asyncio.gather() puts coroutines in a set internally, so we schedule coroutines *before* we give them to gather(). ''' exceptions = [] reprs = [] async def catching_wrapper(coro): try: return (await coro) except Exception as e: exceptions.append(e) if isinstance(e, PrintableError) and not verbose: reprs.append(e.message) else: reprs.append(traceback.format_exc()) return None # Suppress a deprecation warning in Python 3.5, while continuing to support # 3.3 and early 3.4 releases. if hasattr(asyncio, 'ensure_future'): schedule = getattr(asyncio, 'ensure_future') else: schedule = getattr(asyncio, 'async') futures = [schedule(catching_wrapper(coro)) for coro in coros] results = await asyncio.gather(*futures) if exceptions: raise GatheredExceptions(exceptions, reprs) else: return results
[ "async", "def", "gather_coalescing_exceptions", "(", "coros", ",", "display", ",", "*", ",", "verbose", ")", ":", "exceptions", "=", "[", "]", "reprs", "=", "[", "]", "async", "def", "catching_wrapper", "(", "coro", ")", ":", "try", ":", "return", "(", "await", "coro", ")", "except", "Exception", "as", "e", ":", "exceptions", ".", "append", "(", "e", ")", "if", "isinstance", "(", "e", ",", "PrintableError", ")", "and", "not", "verbose", ":", "reprs", ".", "append", "(", "e", ".", "message", ")", "else", ":", "reprs", ".", "append", "(", "traceback", ".", "format_exc", "(", ")", ")", "return", "None", "# Suppress a deprecation warning in Python 3.5, while continuing to support", "# 3.3 and early 3.4 releases.", "if", "hasattr", "(", "asyncio", ",", "'ensure_future'", ")", ":", "schedule", "=", "getattr", "(", "asyncio", ",", "'ensure_future'", ")", "else", ":", "schedule", "=", "getattr", "(", "asyncio", ",", "'async'", ")", "futures", "=", "[", "schedule", "(", "catching_wrapper", "(", "coro", ")", ")", "for", "coro", "in", "coros", "]", "results", "=", "await", "asyncio", ".", "gather", "(", "*", "futures", ")", "if", "exceptions", ":", "raise", "GatheredExceptions", "(", "exceptions", ",", "reprs", ")", "else", ":", "return", "results" ]
The tricky thing about running multiple coroutines in parallel is what we're supposed to do when one of them raises an exception. The approach we're using here is to catch exceptions and keep waiting for other tasks to finish. At the end, we reraise a GatheredExceptions error, if any exceptions were caught. Another minor detail: We also want to make sure to start coroutines in the order given, so that they end up appearing to the user alphabetically in the fancy display. Note that asyncio.gather() puts coroutines in a set internally, so we schedule coroutines *before* we give them to gather().
[ "The", "tricky", "thing", "about", "running", "multiple", "coroutines", "in", "parallel", "is", "what", "we", "re", "supposed", "to", "do", "when", "one", "of", "them", "raises", "an", "exception", ".", "The", "approach", "we", "re", "using", "here", "is", "to", "catch", "exceptions", "and", "keep", "waiting", "for", "other", "tasks", "to", "finish", ".", "At", "the", "end", "we", "reraise", "a", "GatheredExceptions", "error", "if", "any", "exceptions", "were", "caught", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/async_helpers.py#L53-L94
17,136
buildinspace/peru
peru/async_helpers.py
create_subprocess_with_handle
async def create_subprocess_with_handle(command, display_handle, *, shell=False, cwd, **kwargs): '''Writes subprocess output to a display handle as it comes in, and also returns a copy of it as a string. Throws if the subprocess returns an error. Note that cwd is a required keyword-only argument, on theory that peru should never start child processes "wherever I happen to be running right now."''' # We're going to get chunks of bytes from the subprocess, and it's possible # that one of those chunks ends in the middle of a unicode character. An # incremental decoder keeps those dangling bytes around until the next # chunk arrives, so that split characters get decoded properly. Use # stdout's encoding, but provide a default for the case where stdout has # been redirected to a StringIO. (This happens in tests.) encoding = sys.stdout.encoding or 'utf8' decoder_factory = codecs.getincrementaldecoder(encoding) decoder = decoder_factory(errors='replace') output_copy = io.StringIO() # Display handles are context managers. Entering and exiting the display # handle lets the display know when the job starts and stops. with display_handle: stdin = asyncio.subprocess.DEVNULL stdout = asyncio.subprocess.PIPE stderr = asyncio.subprocess.STDOUT if shell: proc = await asyncio.create_subprocess_shell( command, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs) else: proc = await asyncio.create_subprocess_exec( *command, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs) # Read all the output from the subprocess as its comes in. while True: outputbytes = await proc.stdout.read(4096) if not outputbytes: break outputstr = decoder.decode(outputbytes) outputstr_unified = _unify_newlines(outputstr) display_handle.write(outputstr_unified) output_copy.write(outputstr_unified) returncode = await proc.wait() if returncode != 0: raise subprocess.CalledProcessError(returncode, command, output_copy.getvalue()) if hasattr(decoder, 'buffer'): # The utf8 decoder has this attribute, but some others don't. assert not decoder.buffer, 'decoder nonempty: ' + repr(decoder.buffer) return output_copy.getvalue()
python
async def create_subprocess_with_handle(command, display_handle, *, shell=False, cwd, **kwargs): '''Writes subprocess output to a display handle as it comes in, and also returns a copy of it as a string. Throws if the subprocess returns an error. Note that cwd is a required keyword-only argument, on theory that peru should never start child processes "wherever I happen to be running right now."''' # We're going to get chunks of bytes from the subprocess, and it's possible # that one of those chunks ends in the middle of a unicode character. An # incremental decoder keeps those dangling bytes around until the next # chunk arrives, so that split characters get decoded properly. Use # stdout's encoding, but provide a default for the case where stdout has # been redirected to a StringIO. (This happens in tests.) encoding = sys.stdout.encoding or 'utf8' decoder_factory = codecs.getincrementaldecoder(encoding) decoder = decoder_factory(errors='replace') output_copy = io.StringIO() # Display handles are context managers. Entering and exiting the display # handle lets the display know when the job starts and stops. with display_handle: stdin = asyncio.subprocess.DEVNULL stdout = asyncio.subprocess.PIPE stderr = asyncio.subprocess.STDOUT if shell: proc = await asyncio.create_subprocess_shell( command, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs) else: proc = await asyncio.create_subprocess_exec( *command, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs) # Read all the output from the subprocess as its comes in. while True: outputbytes = await proc.stdout.read(4096) if not outputbytes: break outputstr = decoder.decode(outputbytes) outputstr_unified = _unify_newlines(outputstr) display_handle.write(outputstr_unified) output_copy.write(outputstr_unified) returncode = await proc.wait() if returncode != 0: raise subprocess.CalledProcessError(returncode, command, output_copy.getvalue()) if hasattr(decoder, 'buffer'): # The utf8 decoder has this attribute, but some others don't. assert not decoder.buffer, 'decoder nonempty: ' + repr(decoder.buffer) return output_copy.getvalue()
[ "async", "def", "create_subprocess_with_handle", "(", "command", ",", "display_handle", ",", "*", ",", "shell", "=", "False", ",", "cwd", ",", "*", "*", "kwargs", ")", ":", "# We're going to get chunks of bytes from the subprocess, and it's possible", "# that one of those chunks ends in the middle of a unicode character. An", "# incremental decoder keeps those dangling bytes around until the next", "# chunk arrives, so that split characters get decoded properly. Use", "# stdout's encoding, but provide a default for the case where stdout has", "# been redirected to a StringIO. (This happens in tests.)", "encoding", "=", "sys", ".", "stdout", ".", "encoding", "or", "'utf8'", "decoder_factory", "=", "codecs", ".", "getincrementaldecoder", "(", "encoding", ")", "decoder", "=", "decoder_factory", "(", "errors", "=", "'replace'", ")", "output_copy", "=", "io", ".", "StringIO", "(", ")", "# Display handles are context managers. Entering and exiting the display", "# handle lets the display know when the job starts and stops.", "with", "display_handle", ":", "stdin", "=", "asyncio", ".", "subprocess", ".", "DEVNULL", "stdout", "=", "asyncio", ".", "subprocess", ".", "PIPE", "stderr", "=", "asyncio", ".", "subprocess", ".", "STDOUT", "if", "shell", ":", "proc", "=", "await", "asyncio", ".", "create_subprocess_shell", "(", "command", ",", "stdin", "=", "stdin", ",", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ",", "cwd", "=", "cwd", ",", "*", "*", "kwargs", ")", "else", ":", "proc", "=", "await", "asyncio", ".", "create_subprocess_exec", "(", "*", "command", ",", "stdin", "=", "stdin", ",", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ",", "cwd", "=", "cwd", ",", "*", "*", "kwargs", ")", "# Read all the output from the subprocess as its comes in.", "while", "True", ":", "outputbytes", "=", "await", "proc", ".", "stdout", ".", "read", "(", "4096", ")", "if", "not", "outputbytes", ":", "break", "outputstr", "=", "decoder", ".", "decode", "(", "outputbytes", ")", "outputstr_unified", "=", "_unify_newlines", "(", "outputstr", ")", "display_handle", ".", "write", "(", "outputstr_unified", ")", "output_copy", ".", "write", "(", "outputstr_unified", ")", "returncode", "=", "await", "proc", ".", "wait", "(", ")", "if", "returncode", "!=", "0", ":", "raise", "subprocess", ".", "CalledProcessError", "(", "returncode", ",", "command", ",", "output_copy", ".", "getvalue", "(", ")", ")", "if", "hasattr", "(", "decoder", ",", "'buffer'", ")", ":", "# The utf8 decoder has this attribute, but some others don't.", "assert", "not", "decoder", ".", "buffer", ",", "'decoder nonempty: '", "+", "repr", "(", "decoder", ".", "buffer", ")", "return", "output_copy", ".", "getvalue", "(", ")" ]
Writes subprocess output to a display handle as it comes in, and also returns a copy of it as a string. Throws if the subprocess returns an error. Note that cwd is a required keyword-only argument, on theory that peru should never start child processes "wherever I happen to be running right now."
[ "Writes", "subprocess", "output", "to", "a", "display", "handle", "as", "it", "comes", "in", "and", "also", "returns", "a", "copy", "of", "it", "as", "a", "string", ".", "Throws", "if", "the", "subprocess", "returns", "an", "error", ".", "Note", "that", "cwd", "is", "a", "required", "keyword", "-", "only", "argument", "on", "theory", "that", "peru", "should", "never", "start", "child", "processes", "wherever", "I", "happen", "to", "be", "running", "right", "now", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/async_helpers.py#L97-L164
17,137
buildinspace/peru
peru/async_helpers.py
raises_gathered
def raises_gathered(error_type): '''For use in tests. Many tests expect a single error to be thrown, and want it to be of a specific type. This is a helper method for when that type is inside a gathered exception.''' container = RaisesGatheredContainer() try: yield container except GatheredExceptions as e: # Make sure there is exactly one exception. if len(e.exceptions) != 1: raise inner = e.exceptions[0] # Make sure the exception is the right type. if not isinstance(inner, error_type): raise # Success. container.exception = inner
python
def raises_gathered(error_type): '''For use in tests. Many tests expect a single error to be thrown, and want it to be of a specific type. This is a helper method for when that type is inside a gathered exception.''' container = RaisesGatheredContainer() try: yield container except GatheredExceptions as e: # Make sure there is exactly one exception. if len(e.exceptions) != 1: raise inner = e.exceptions[0] # Make sure the exception is the right type. if not isinstance(inner, error_type): raise # Success. container.exception = inner
[ "def", "raises_gathered", "(", "error_type", ")", ":", "container", "=", "RaisesGatheredContainer", "(", ")", "try", ":", "yield", "container", "except", "GatheredExceptions", "as", "e", ":", "# Make sure there is exactly one exception.", "if", "len", "(", "e", ".", "exceptions", ")", "!=", "1", ":", "raise", "inner", "=", "e", ".", "exceptions", "[", "0", "]", "# Make sure the exception is the right type.", "if", "not", "isinstance", "(", "inner", ",", "error_type", ")", ":", "raise", "# Success.", "container", ".", "exception", "=", "inner" ]
For use in tests. Many tests expect a single error to be thrown, and want it to be of a specific type. This is a helper method for when that type is inside a gathered exception.
[ "For", "use", "in", "tests", ".", "Many", "tests", "expect", "a", "single", "error", "to", "be", "thrown", "and", "want", "it", "to", "be", "of", "a", "specific", "type", ".", "This", "is", "a", "helper", "method", "for", "when", "that", "type", "is", "inside", "a", "gathered", "exception", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/async_helpers.py#L201-L217
17,138
buildinspace/peru
peru/resources/plugins/curl/curl_plugin.py
get_request_filename
def get_request_filename(request): '''Figure out the filename for an HTTP download.''' # Check to see if a filename is specified in the HTTP headers. if 'Content-Disposition' in request.info(): disposition = request.info()['Content-Disposition'] pieces = re.split(r'\s*;\s*', disposition) for piece in pieces: if piece.startswith('filename='): filename = piece[len('filename='):] # Strip exactly one " from each end. if filename.startswith('"'): filename = filename[1:] if filename.endswith('"'): filename = filename[:-1] # Interpret backslashed quotes. filename = filename.replace('\\"', '"') return filename # If no filename was specified, pick a reasonable default. return os.path.basename(urlsplit(request.url).path) or 'index.html'
python
def get_request_filename(request): '''Figure out the filename for an HTTP download.''' # Check to see if a filename is specified in the HTTP headers. if 'Content-Disposition' in request.info(): disposition = request.info()['Content-Disposition'] pieces = re.split(r'\s*;\s*', disposition) for piece in pieces: if piece.startswith('filename='): filename = piece[len('filename='):] # Strip exactly one " from each end. if filename.startswith('"'): filename = filename[1:] if filename.endswith('"'): filename = filename[:-1] # Interpret backslashed quotes. filename = filename.replace('\\"', '"') return filename # If no filename was specified, pick a reasonable default. return os.path.basename(urlsplit(request.url).path) or 'index.html'
[ "def", "get_request_filename", "(", "request", ")", ":", "# Check to see if a filename is specified in the HTTP headers.", "if", "'Content-Disposition'", "in", "request", ".", "info", "(", ")", ":", "disposition", "=", "request", ".", "info", "(", ")", "[", "'Content-Disposition'", "]", "pieces", "=", "re", ".", "split", "(", "r'\\s*;\\s*'", ",", "disposition", ")", "for", "piece", "in", "pieces", ":", "if", "piece", ".", "startswith", "(", "'filename='", ")", ":", "filename", "=", "piece", "[", "len", "(", "'filename='", ")", ":", "]", "# Strip exactly one \" from each end.", "if", "filename", ".", "startswith", "(", "'\"'", ")", ":", "filename", "=", "filename", "[", "1", ":", "]", "if", "filename", ".", "endswith", "(", "'\"'", ")", ":", "filename", "=", "filename", "[", ":", "-", "1", "]", "# Interpret backslashed quotes.", "filename", "=", "filename", ".", "replace", "(", "'\\\\\"'", ",", "'\"'", ")", "return", "filename", "# If no filename was specified, pick a reasonable default.", "return", "os", ".", "path", ".", "basename", "(", "urlsplit", "(", "request", ".", "url", ")", ".", "path", ")", "or", "'index.html'" ]
Figure out the filename for an HTTP download.
[ "Figure", "out", "the", "filename", "for", "an", "HTTP", "download", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/resources/plugins/curl/curl_plugin.py#L16-L34
17,139
buildinspace/peru
peru/parser.py
_extract_optional_list_field
def _extract_optional_list_field(blob, name): '''Handle optional fields that can be either a string or a list of strings.''' value = _optional_list(typesafe_pop(blob, name, [])) if value is None: raise ParserError( '"{}" field must be a string or a list.'.format(name)) return value
python
def _extract_optional_list_field(blob, name): '''Handle optional fields that can be either a string or a list of strings.''' value = _optional_list(typesafe_pop(blob, name, [])) if value is None: raise ParserError( '"{}" field must be a string or a list.'.format(name)) return value
[ "def", "_extract_optional_list_field", "(", "blob", ",", "name", ")", ":", "value", "=", "_optional_list", "(", "typesafe_pop", "(", "blob", ",", "name", ",", "[", "]", ")", ")", "if", "value", "is", "None", ":", "raise", "ParserError", "(", "'\"{}\" field must be a string or a list.'", ".", "format", "(", "name", ")", ")", "return", "value" ]
Handle optional fields that can be either a string or a list of strings.
[ "Handle", "optional", "fields", "that", "can", "be", "either", "a", "string", "or", "a", "list", "of", "strings", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/parser.py#L135-L142
17,140
buildinspace/peru
peru/async_exit_stack.py
AsyncExitStack.pop_all
def pop_all(self): """Preserve the context stack by transferring it to a new instance.""" new_stack = type(self)() new_stack._exit_callbacks = self._exit_callbacks self._exit_callbacks = deque() return new_stack
python
def pop_all(self): """Preserve the context stack by transferring it to a new instance.""" new_stack = type(self)() new_stack._exit_callbacks = self._exit_callbacks self._exit_callbacks = deque() return new_stack
[ "def", "pop_all", "(", "self", ")", ":", "new_stack", "=", "type", "(", "self", ")", "(", ")", "new_stack", ".", "_exit_callbacks", "=", "self", ".", "_exit_callbacks", "self", ".", "_exit_callbacks", "=", "deque", "(", ")", "return", "new_stack" ]
Preserve the context stack by transferring it to a new instance.
[ "Preserve", "the", "context", "stack", "by", "transferring", "it", "to", "a", "new", "instance", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/async_exit_stack.py#L55-L60
17,141
buildinspace/peru
peru/async_exit_stack.py
AsyncExitStack.callback
def callback(self, callback, *args, **kwds): """Registers an arbitrary callback and arguments. Cannot suppress exceptions. """ _exit_wrapper = self._create_cb_wrapper(callback, *args, **kwds) # We changed the signature, so using @wraps is not appropriate, but # setting __wrapped__ may still help with introspection. _exit_wrapper.__wrapped__ = callback self._push_exit_callback(_exit_wrapper) return callback
python
def callback(self, callback, *args, **kwds): """Registers an arbitrary callback and arguments. Cannot suppress exceptions. """ _exit_wrapper = self._create_cb_wrapper(callback, *args, **kwds) # We changed the signature, so using @wraps is not appropriate, but # setting __wrapped__ may still help with introspection. _exit_wrapper.__wrapped__ = callback self._push_exit_callback(_exit_wrapper) return callback
[ "def", "callback", "(", "self", ",", "callback", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "_exit_wrapper", "=", "self", ".", "_create_cb_wrapper", "(", "callback", ",", "*", "args", ",", "*", "*", "kwds", ")", "# We changed the signature, so using @wraps is not appropriate, but", "# setting __wrapped__ may still help with introspection.", "_exit_wrapper", ".", "__wrapped__", "=", "callback", "self", ".", "_push_exit_callback", "(", "_exit_wrapper", ")", "return", "callback" ]
Registers an arbitrary callback and arguments. Cannot suppress exceptions.
[ "Registers", "an", "arbitrary", "callback", "and", "arguments", ".", "Cannot", "suppress", "exceptions", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/async_exit_stack.py#L94-L104
17,142
buildinspace/peru
peru/async_exit_stack.py
AsyncExitStack.push_async_callback
def push_async_callback(self, callback, *args, **kwds): """Registers an arbitrary coroutine function and arguments. Cannot suppress exceptions. """ _exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds) # We changed the signature, so using @wraps is not appropriate, but # setting __wrapped__ may still help with introspection. _exit_wrapper.__wrapped__ = callback self._push_exit_callback(_exit_wrapper, False) return callback
python
def push_async_callback(self, callback, *args, **kwds): """Registers an arbitrary coroutine function and arguments. Cannot suppress exceptions. """ _exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds) # We changed the signature, so using @wraps is not appropriate, but # setting __wrapped__ may still help with introspection. _exit_wrapper.__wrapped__ = callback self._push_exit_callback(_exit_wrapper, False) return callback
[ "def", "push_async_callback", "(", "self", ",", "callback", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "_exit_wrapper", "=", "self", ".", "_create_async_cb_wrapper", "(", "callback", ",", "*", "args", ",", "*", "*", "kwds", ")", "# We changed the signature, so using @wraps is not appropriate, but", "# setting __wrapped__ may still help with introspection.", "_exit_wrapper", ".", "__wrapped__", "=", "callback", "self", ".", "_push_exit_callback", "(", "_exit_wrapper", ",", "False", ")", "return", "callback" ]
Registers an arbitrary coroutine function and arguments. Cannot suppress exceptions.
[ "Registers", "an", "arbitrary", "coroutine", "function", "and", "arguments", ".", "Cannot", "suppress", "exceptions", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/async_exit_stack.py#L144-L154
17,143
buildinspace/peru
peru/runtime.py
Runtime
async def Runtime(args, env): 'This is the async constructor for the _Runtime class.' r = _Runtime(args, env) await r._init_cache() return r
python
async def Runtime(args, env): 'This is the async constructor for the _Runtime class.' r = _Runtime(args, env) await r._init_cache() return r
[ "async", "def", "Runtime", "(", "args", ",", "env", ")", ":", "r", "=", "_Runtime", "(", "args", ",", "env", ")", "await", "r", ".", "_init_cache", "(", ")", "return", "r" ]
This is the async constructor for the _Runtime class.
[ "This", "is", "the", "async", "constructor", "for", "the", "_Runtime", "class", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/runtime.py#L16-L20
17,144
buildinspace/peru
peru/runtime.py
find_project_file
def find_project_file(start_dir, basename): '''Walk up the directory tree until we find a file of the given name.''' prefix = os.path.abspath(start_dir) while True: candidate = os.path.join(prefix, basename) if os.path.isfile(candidate): return candidate if os.path.exists(candidate): raise PrintableError( "Found {}, but it's not a file.".format(candidate)) if os.path.dirname(prefix) == prefix: # We've walked all the way to the top. Bail. raise PrintableError("Can't find " + basename) # Not found at this level. We must go...shallower. prefix = os.path.dirname(prefix)
python
def find_project_file(start_dir, basename): '''Walk up the directory tree until we find a file of the given name.''' prefix = os.path.abspath(start_dir) while True: candidate = os.path.join(prefix, basename) if os.path.isfile(candidate): return candidate if os.path.exists(candidate): raise PrintableError( "Found {}, but it's not a file.".format(candidate)) if os.path.dirname(prefix) == prefix: # We've walked all the way to the top. Bail. raise PrintableError("Can't find " + basename) # Not found at this level. We must go...shallower. prefix = os.path.dirname(prefix)
[ "def", "find_project_file", "(", "start_dir", ",", "basename", ")", ":", "prefix", "=", "os", ".", "path", ".", "abspath", "(", "start_dir", ")", "while", "True", ":", "candidate", "=", "os", ".", "path", ".", "join", "(", "prefix", ",", "basename", ")", "if", "os", ".", "path", ".", "isfile", "(", "candidate", ")", ":", "return", "candidate", "if", "os", ".", "path", ".", "exists", "(", "candidate", ")", ":", "raise", "PrintableError", "(", "\"Found {}, but it's not a file.\"", ".", "format", "(", "candidate", ")", ")", "if", "os", ".", "path", ".", "dirname", "(", "prefix", ")", "==", "prefix", ":", "# We've walked all the way to the top. Bail.", "raise", "PrintableError", "(", "\"Can't find \"", "+", "basename", ")", "# Not found at this level. We must go...shallower.", "prefix", "=", "os", ".", "path", ".", "dirname", "(", "prefix", ")" ]
Walk up the directory tree until we find a file of the given name.
[ "Walk", "up", "the", "directory", "tree", "until", "we", "find", "a", "file", "of", "the", "given", "name", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/runtime.py#L150-L164
17,145
buildinspace/peru
peru/cache.py
delete_if_error
def delete_if_error(path): '''If any exception is raised inside the context, delete the file at the given path, and allow the exception to continue.''' try: yield except Exception: if os.path.exists(path): os.remove(path) raise
python
def delete_if_error(path): '''If any exception is raised inside the context, delete the file at the given path, and allow the exception to continue.''' try: yield except Exception: if os.path.exists(path): os.remove(path) raise
[ "def", "delete_if_error", "(", "path", ")", ":", "try", ":", "yield", "except", "Exception", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "remove", "(", "path", ")", "raise" ]
If any exception is raised inside the context, delete the file at the given path, and allow the exception to continue.
[ "If", "any", "exception", "is", "raised", "inside", "the", "context", "delete", "the", "file", "at", "the", "given", "path", "and", "allow", "the", "exception", "to", "continue", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/cache.py#L516-L524
17,146
buildinspace/peru
peru/cache.py
_format_file_lines
def _format_file_lines(files): '''Given a list of filenames that we're about to print, limit it to a reasonable number of lines.''' LINES_TO_SHOW = 10 if len(files) <= LINES_TO_SHOW: lines = '\n'.join(files) else: lines = ('\n'.join(files[:LINES_TO_SHOW - 1]) + '\n...{} total'.format( len(files))) return lines
python
def _format_file_lines(files): '''Given a list of filenames that we're about to print, limit it to a reasonable number of lines.''' LINES_TO_SHOW = 10 if len(files) <= LINES_TO_SHOW: lines = '\n'.join(files) else: lines = ('\n'.join(files[:LINES_TO_SHOW - 1]) + '\n...{} total'.format( len(files))) return lines
[ "def", "_format_file_lines", "(", "files", ")", ":", "LINES_TO_SHOW", "=", "10", "if", "len", "(", "files", ")", "<=", "LINES_TO_SHOW", ":", "lines", "=", "'\\n'", ".", "join", "(", "files", ")", "else", ":", "lines", "=", "(", "'\\n'", ".", "join", "(", "files", "[", ":", "LINES_TO_SHOW", "-", "1", "]", ")", "+", "'\\n...{} total'", ".", "format", "(", "len", "(", "files", ")", ")", ")", "return", "lines" ]
Given a list of filenames that we're about to print, limit it to a reasonable number of lines.
[ "Given", "a", "list", "of", "filenames", "that", "we", "re", "about", "to", "print", "limit", "it", "to", "a", "reasonable", "number", "of", "lines", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/cache.py#L527-L536
17,147
buildinspace/peru
peru/cache.py
GitSession.git_env
def git_env(self): 'Set the index file and prevent git from reading global configs.' env = dict(os.environ) for var in ["HOME", "XDG_CONFIG_HOME"]: env.pop(var, None) env["GIT_CONFIG_NOSYSTEM"] = "true" # Weirdly, GIT_INDEX_FILE is interpreted relative to the work tree. As # a workaround, we absoluteify the path. env["GIT_INDEX_FILE"] = os.path.abspath(self.index_file) return env
python
def git_env(self): 'Set the index file and prevent git from reading global configs.' env = dict(os.environ) for var in ["HOME", "XDG_CONFIG_HOME"]: env.pop(var, None) env["GIT_CONFIG_NOSYSTEM"] = "true" # Weirdly, GIT_INDEX_FILE is interpreted relative to the work tree. As # a workaround, we absoluteify the path. env["GIT_INDEX_FILE"] = os.path.abspath(self.index_file) return env
[ "def", "git_env", "(", "self", ")", ":", "env", "=", "dict", "(", "os", ".", "environ", ")", "for", "var", "in", "[", "\"HOME\"", ",", "\"XDG_CONFIG_HOME\"", "]", ":", "env", ".", "pop", "(", "var", ",", "None", ")", "env", "[", "\"GIT_CONFIG_NOSYSTEM\"", "]", "=", "\"true\"", "# Weirdly, GIT_INDEX_FILE is interpreted relative to the work tree. As", "# a workaround, we absoluteify the path.", "env", "[", "\"GIT_INDEX_FILE\"", "]", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "index_file", ")", "return", "env" ]
Set the index file and prevent git from reading global configs.
[ "Set", "the", "index", "file", "and", "prevent", "git", "from", "reading", "global", "configs", "." ]
76e4012c6c34e85fb53a4c6d85f4ac3633d93f77
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/cache.py#L79-L88
17,148
unitedstates/python-us
us/states.py
load_states
def load_states(): """ Load state data from pickle file distributed with this package. Creates lists of states, territories, and combined states and territories. Also adds state abbreviation attribute access to the package: us.states.MD """ from pkg_resources import resource_stream # load state data from pickle file with resource_stream(__name__, 'states.pkl') as pklfile: for s in pickle.load(pklfile): state = State(**s) # create state object # create separate lists for obsolete, states, and territories if state.is_obsolete: OBSOLETE.append(state) elif state.is_territory: TERRITORIES.append(state) else: STATES.append(state) if state.is_contiguous: STATES_CONTIGUOUS.append(state) if state.is_continental: STATES_CONTINENTAL.append(state) # also create list of all states and territories STATES_AND_TERRITORIES.append(state) # provide package-level abbreviation access: us.states.MD globals()[state.abbr] = state
python
def load_states(): """ Load state data from pickle file distributed with this package. Creates lists of states, territories, and combined states and territories. Also adds state abbreviation attribute access to the package: us.states.MD """ from pkg_resources import resource_stream # load state data from pickle file with resource_stream(__name__, 'states.pkl') as pklfile: for s in pickle.load(pklfile): state = State(**s) # create state object # create separate lists for obsolete, states, and territories if state.is_obsolete: OBSOLETE.append(state) elif state.is_territory: TERRITORIES.append(state) else: STATES.append(state) if state.is_contiguous: STATES_CONTIGUOUS.append(state) if state.is_continental: STATES_CONTINENTAL.append(state) # also create list of all states and territories STATES_AND_TERRITORIES.append(state) # provide package-level abbreviation access: us.states.MD globals()[state.abbr] = state
[ "def", "load_states", "(", ")", ":", "from", "pkg_resources", "import", "resource_stream", "# load state data from pickle file", "with", "resource_stream", "(", "__name__", ",", "'states.pkl'", ")", "as", "pklfile", ":", "for", "s", "in", "pickle", ".", "load", "(", "pklfile", ")", ":", "state", "=", "State", "(", "*", "*", "s", ")", "# create state object", "# create separate lists for obsolete, states, and territories", "if", "state", ".", "is_obsolete", ":", "OBSOLETE", ".", "append", "(", "state", ")", "elif", "state", ".", "is_territory", ":", "TERRITORIES", ".", "append", "(", "state", ")", "else", ":", "STATES", ".", "append", "(", "state", ")", "if", "state", ".", "is_contiguous", ":", "STATES_CONTIGUOUS", ".", "append", "(", "state", ")", "if", "state", ".", "is_continental", ":", "STATES_CONTINENTAL", ".", "append", "(", "state", ")", "# also create list of all states and territories", "STATES_AND_TERRITORIES", ".", "append", "(", "state", ")", "# provide package-level abbreviation access: us.states.MD", "globals", "(", ")", "[", "state", ".", "abbr", "]", "=", "state" ]
Load state data from pickle file distributed with this package. Creates lists of states, territories, and combined states and territories. Also adds state abbreviation attribute access to the package: us.states.MD
[ "Load", "state", "data", "from", "pickle", "file", "distributed", "with", "this", "package", "." ]
15165f47a0508bef3737d07d033eaf4b782fb039
https://github.com/unitedstates/python-us/blob/15165f47a0508bef3737d07d033eaf4b782fb039/us/states.py#L59-L92
17,149
unitedstates/python-us
us/states.py
lookup
def lookup(val, field=None, use_cache=True): """ Semi-fuzzy state lookup. This method will make a best effort attempt at finding the state based on the lookup value provided. * two digits will search for FIPS code * two letters will search for state abbreviation * anything else will try to match the metaphone of state names Metaphone is used to allow for incorrect, but phonetically accurate, spelling of state names. Exact matches can be done on any attribute on State objects by passing the `field` argument. This skips the fuzzy-ish matching and does an exact, case-sensitive comparison against the specified field. This method caches non-None results, but can the cache can be bypassed with the `use_cache=False` argument. """ import jellyfish if field is None: if FIPS_RE.match(val): field = 'fips' elif ABBR_RE.match(val): val = val.upper() field = 'abbr' else: val = jellyfish.metaphone(val) field = 'name_metaphone' # see if result is in cache cache_key = "%s:%s" % (field, val) if use_cache and cache_key in _lookup_cache: return _lookup_cache[cache_key] for state in STATES_AND_TERRITORIES: if val == getattr(state, field): _lookup_cache[cache_key] = state return state
python
def lookup(val, field=None, use_cache=True): """ Semi-fuzzy state lookup. This method will make a best effort attempt at finding the state based on the lookup value provided. * two digits will search for FIPS code * two letters will search for state abbreviation * anything else will try to match the metaphone of state names Metaphone is used to allow for incorrect, but phonetically accurate, spelling of state names. Exact matches can be done on any attribute on State objects by passing the `field` argument. This skips the fuzzy-ish matching and does an exact, case-sensitive comparison against the specified field. This method caches non-None results, but can the cache can be bypassed with the `use_cache=False` argument. """ import jellyfish if field is None: if FIPS_RE.match(val): field = 'fips' elif ABBR_RE.match(val): val = val.upper() field = 'abbr' else: val = jellyfish.metaphone(val) field = 'name_metaphone' # see if result is in cache cache_key = "%s:%s" % (field, val) if use_cache and cache_key in _lookup_cache: return _lookup_cache[cache_key] for state in STATES_AND_TERRITORIES: if val == getattr(state, field): _lookup_cache[cache_key] = state return state
[ "def", "lookup", "(", "val", ",", "field", "=", "None", ",", "use_cache", "=", "True", ")", ":", "import", "jellyfish", "if", "field", "is", "None", ":", "if", "FIPS_RE", ".", "match", "(", "val", ")", ":", "field", "=", "'fips'", "elif", "ABBR_RE", ".", "match", "(", "val", ")", ":", "val", "=", "val", ".", "upper", "(", ")", "field", "=", "'abbr'", "else", ":", "val", "=", "jellyfish", ".", "metaphone", "(", "val", ")", "field", "=", "'name_metaphone'", "# see if result is in cache", "cache_key", "=", "\"%s:%s\"", "%", "(", "field", ",", "val", ")", "if", "use_cache", "and", "cache_key", "in", "_lookup_cache", ":", "return", "_lookup_cache", "[", "cache_key", "]", "for", "state", "in", "STATES_AND_TERRITORIES", ":", "if", "val", "==", "getattr", "(", "state", ",", "field", ")", ":", "_lookup_cache", "[", "cache_key", "]", "=", "state", "return", "state" ]
Semi-fuzzy state lookup. This method will make a best effort attempt at finding the state based on the lookup value provided. * two digits will search for FIPS code * two letters will search for state abbreviation * anything else will try to match the metaphone of state names Metaphone is used to allow for incorrect, but phonetically accurate, spelling of state names. Exact matches can be done on any attribute on State objects by passing the `field` argument. This skips the fuzzy-ish matching and does an exact, case-sensitive comparison against the specified field. This method caches non-None results, but can the cache can be bypassed with the `use_cache=False` argument.
[ "Semi", "-", "fuzzy", "state", "lookup", ".", "This", "method", "will", "make", "a", "best", "effort", "attempt", "at", "finding", "the", "state", "based", "on", "the", "lookup", "value", "provided", "." ]
15165f47a0508bef3737d07d033eaf4b782fb039
https://github.com/unitedstates/python-us/blob/15165f47a0508bef3737d07d033eaf4b782fb039/us/states.py#L95-L134
17,150
venthur/gscholar
gscholar/gscholar.py
query
def query(searchstr, outformat=FORMAT_BIBTEX, allresults=False): """Query google scholar. This method queries google scholar and returns a list of citations. Parameters ---------- searchstr : str the query outformat : int, optional the output format of the citations. Default is bibtex. allresults : bool, optional return all results or only the first (i.e. best one) Returns ------- result : list of strings the list with citations """ logger.debug("Query: {sstring}".format(sstring=searchstr)) searchstr = '/scholar?q='+quote(searchstr) url = GOOGLE_SCHOLAR_URL + searchstr header = HEADERS header['Cookie'] = "GSP=CF=%d" % outformat request = Request(url, headers=header) response = urlopen(request) html = response.read() html = html.decode('utf8') # grab the links tmp = get_links(html, outformat) # follow the bibtex links to get the bibtex entries result = list() if not allresults: tmp = tmp[:1] for link in tmp: url = GOOGLE_SCHOLAR_URL+link request = Request(url, headers=header) response = urlopen(request) bib = response.read() bib = bib.decode('utf8') result.append(bib) return result
python
def query(searchstr, outformat=FORMAT_BIBTEX, allresults=False): """Query google scholar. This method queries google scholar and returns a list of citations. Parameters ---------- searchstr : str the query outformat : int, optional the output format of the citations. Default is bibtex. allresults : bool, optional return all results or only the first (i.e. best one) Returns ------- result : list of strings the list with citations """ logger.debug("Query: {sstring}".format(sstring=searchstr)) searchstr = '/scholar?q='+quote(searchstr) url = GOOGLE_SCHOLAR_URL + searchstr header = HEADERS header['Cookie'] = "GSP=CF=%d" % outformat request = Request(url, headers=header) response = urlopen(request) html = response.read() html = html.decode('utf8') # grab the links tmp = get_links(html, outformat) # follow the bibtex links to get the bibtex entries result = list() if not allresults: tmp = tmp[:1] for link in tmp: url = GOOGLE_SCHOLAR_URL+link request = Request(url, headers=header) response = urlopen(request) bib = response.read() bib = bib.decode('utf8') result.append(bib) return result
[ "def", "query", "(", "searchstr", ",", "outformat", "=", "FORMAT_BIBTEX", ",", "allresults", "=", "False", ")", ":", "logger", ".", "debug", "(", "\"Query: {sstring}\"", ".", "format", "(", "sstring", "=", "searchstr", ")", ")", "searchstr", "=", "'/scholar?q='", "+", "quote", "(", "searchstr", ")", "url", "=", "GOOGLE_SCHOLAR_URL", "+", "searchstr", "header", "=", "HEADERS", "header", "[", "'Cookie'", "]", "=", "\"GSP=CF=%d\"", "%", "outformat", "request", "=", "Request", "(", "url", ",", "headers", "=", "header", ")", "response", "=", "urlopen", "(", "request", ")", "html", "=", "response", ".", "read", "(", ")", "html", "=", "html", ".", "decode", "(", "'utf8'", ")", "# grab the links", "tmp", "=", "get_links", "(", "html", ",", "outformat", ")", "# follow the bibtex links to get the bibtex entries", "result", "=", "list", "(", ")", "if", "not", "allresults", ":", "tmp", "=", "tmp", "[", ":", "1", "]", "for", "link", "in", "tmp", ":", "url", "=", "GOOGLE_SCHOLAR_URL", "+", "link", "request", "=", "Request", "(", "url", ",", "headers", "=", "header", ")", "response", "=", "urlopen", "(", "request", ")", "bib", "=", "response", ".", "read", "(", ")", "bib", "=", "bib", ".", "decode", "(", "'utf8'", ")", "result", ".", "append", "(", "bib", ")", "return", "result" ]
Query google scholar. This method queries google scholar and returns a list of citations. Parameters ---------- searchstr : str the query outformat : int, optional the output format of the citations. Default is bibtex. allresults : bool, optional return all results or only the first (i.e. best one) Returns ------- result : list of strings the list with citations
[ "Query", "google", "scholar", "." ]
f2f830c4009a4e9a393a81675d5ad88b9b2d87b9
https://github.com/venthur/gscholar/blob/f2f830c4009a4e9a393a81675d5ad88b9b2d87b9/gscholar/gscholar.py#L43-L86
17,151
venthur/gscholar
gscholar/gscholar.py
get_links
def get_links(html, outformat): """Return a list of reference links from the html. Parameters ---------- html : str outformat : int the output format of the citations Returns ------- List[str] the links to the references """ if outformat == FORMAT_BIBTEX: refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.bib\?[^"]*)') elif outformat == FORMAT_ENDNOTE: refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.enw\?[^"]*)"') elif outformat == FORMAT_REFMAN: refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.ris\?[^"]*)"') elif outformat == FORMAT_WENXIANWANG: refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.ral\?[^"]*)"') reflist = refre.findall(html) # escape html entities reflist = [re.sub('&(%s);' % '|'.join(name2codepoint), lambda m: chr(name2codepoint[m.group(1)]), s) for s in reflist] return reflist
python
def get_links(html, outformat): """Return a list of reference links from the html. Parameters ---------- html : str outformat : int the output format of the citations Returns ------- List[str] the links to the references """ if outformat == FORMAT_BIBTEX: refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.bib\?[^"]*)') elif outformat == FORMAT_ENDNOTE: refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.enw\?[^"]*)"') elif outformat == FORMAT_REFMAN: refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.ris\?[^"]*)"') elif outformat == FORMAT_WENXIANWANG: refre = re.compile(r'<a href="https://scholar.googleusercontent.com(/scholar\.ral\?[^"]*)"') reflist = refre.findall(html) # escape html entities reflist = [re.sub('&(%s);' % '|'.join(name2codepoint), lambda m: chr(name2codepoint[m.group(1)]), s) for s in reflist] return reflist
[ "def", "get_links", "(", "html", ",", "outformat", ")", ":", "if", "outformat", "==", "FORMAT_BIBTEX", ":", "refre", "=", "re", ".", "compile", "(", "r'<a href=\"https://scholar.googleusercontent.com(/scholar\\.bib\\?[^\"]*)'", ")", "elif", "outformat", "==", "FORMAT_ENDNOTE", ":", "refre", "=", "re", ".", "compile", "(", "r'<a href=\"https://scholar.googleusercontent.com(/scholar\\.enw\\?[^\"]*)\"'", ")", "elif", "outformat", "==", "FORMAT_REFMAN", ":", "refre", "=", "re", ".", "compile", "(", "r'<a href=\"https://scholar.googleusercontent.com(/scholar\\.ris\\?[^\"]*)\"'", ")", "elif", "outformat", "==", "FORMAT_WENXIANWANG", ":", "refre", "=", "re", ".", "compile", "(", "r'<a href=\"https://scholar.googleusercontent.com(/scholar\\.ral\\?[^\"]*)\"'", ")", "reflist", "=", "refre", ".", "findall", "(", "html", ")", "# escape html entities", "reflist", "=", "[", "re", ".", "sub", "(", "'&(%s);'", "%", "'|'", ".", "join", "(", "name2codepoint", ")", ",", "lambda", "m", ":", "chr", "(", "name2codepoint", "[", "m", ".", "group", "(", "1", ")", "]", ")", ",", "s", ")", "for", "s", "in", "reflist", "]", "return", "reflist" ]
Return a list of reference links from the html. Parameters ---------- html : str outformat : int the output format of the citations Returns ------- List[str] the links to the references
[ "Return", "a", "list", "of", "reference", "links", "from", "the", "html", "." ]
f2f830c4009a4e9a393a81675d5ad88b9b2d87b9
https://github.com/venthur/gscholar/blob/f2f830c4009a4e9a393a81675d5ad88b9b2d87b9/gscholar/gscholar.py#L89-L116
17,152
venthur/gscholar
gscholar/gscholar.py
convert_pdf_to_txt
def convert_pdf_to_txt(pdf, startpage=None): """Convert a pdf file to text and return the text. This method requires pdftotext to be installed. Parameters ---------- pdf : str path to pdf file startpage : int, optional the first page we try to convert Returns ------- str the converted text """ if startpage is not None: startpageargs = ['-f', str(startpage)] else: startpageargs = [] stdout = subprocess.Popen(["pdftotext", "-q"] + startpageargs + [pdf, "-"], stdout=subprocess.PIPE).communicate()[0] # python2 and 3 if not isinstance(stdout, str): stdout = stdout.decode() return stdout
python
def convert_pdf_to_txt(pdf, startpage=None): """Convert a pdf file to text and return the text. This method requires pdftotext to be installed. Parameters ---------- pdf : str path to pdf file startpage : int, optional the first page we try to convert Returns ------- str the converted text """ if startpage is not None: startpageargs = ['-f', str(startpage)] else: startpageargs = [] stdout = subprocess.Popen(["pdftotext", "-q"] + startpageargs + [pdf, "-"], stdout=subprocess.PIPE).communicate()[0] # python2 and 3 if not isinstance(stdout, str): stdout = stdout.decode() return stdout
[ "def", "convert_pdf_to_txt", "(", "pdf", ",", "startpage", "=", "None", ")", ":", "if", "startpage", "is", "not", "None", ":", "startpageargs", "=", "[", "'-f'", ",", "str", "(", "startpage", ")", "]", "else", ":", "startpageargs", "=", "[", "]", "stdout", "=", "subprocess", ".", "Popen", "(", "[", "\"pdftotext\"", ",", "\"-q\"", "]", "+", "startpageargs", "+", "[", "pdf", ",", "\"-\"", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "communicate", "(", ")", "[", "0", "]", "# python2 and 3", "if", "not", "isinstance", "(", "stdout", ",", "str", ")", ":", "stdout", "=", "stdout", ".", "decode", "(", ")", "return", "stdout" ]
Convert a pdf file to text and return the text. This method requires pdftotext to be installed. Parameters ---------- pdf : str path to pdf file startpage : int, optional the first page we try to convert Returns ------- str the converted text
[ "Convert", "a", "pdf", "file", "to", "text", "and", "return", "the", "text", "." ]
f2f830c4009a4e9a393a81675d5ad88b9b2d87b9
https://github.com/venthur/gscholar/blob/f2f830c4009a4e9a393a81675d5ad88b9b2d87b9/gscholar/gscholar.py#L119-L146
17,153
venthur/gscholar
gscholar/gscholar.py
pdflookup
def pdflookup(pdf, allresults, outformat, startpage=None): """Look a pdf up on google scholar and return bibtex items. Paramters --------- pdf : str path to the pdf file allresults : bool return all results or only the first (i.e. best one) outformat : int the output format of the citations startpage : int first page to start reading from Returns ------- List[str] the list with citations """ txt = convert_pdf_to_txt(pdf, startpage) # remove all non alphanumeric characters txt = re.sub("\W", " ", txt) words = txt.strip().split()[:20] gsquery = " ".join(words) bibtexlist = query(gsquery, outformat, allresults) return bibtexlist
python
def pdflookup(pdf, allresults, outformat, startpage=None): """Look a pdf up on google scholar and return bibtex items. Paramters --------- pdf : str path to the pdf file allresults : bool return all results or only the first (i.e. best one) outformat : int the output format of the citations startpage : int first page to start reading from Returns ------- List[str] the list with citations """ txt = convert_pdf_to_txt(pdf, startpage) # remove all non alphanumeric characters txt = re.sub("\W", " ", txt) words = txt.strip().split()[:20] gsquery = " ".join(words) bibtexlist = query(gsquery, outformat, allresults) return bibtexlist
[ "def", "pdflookup", "(", "pdf", ",", "allresults", ",", "outformat", ",", "startpage", "=", "None", ")", ":", "txt", "=", "convert_pdf_to_txt", "(", "pdf", ",", "startpage", ")", "# remove all non alphanumeric characters", "txt", "=", "re", ".", "sub", "(", "\"\\W\"", ",", "\" \"", ",", "txt", ")", "words", "=", "txt", ".", "strip", "(", ")", ".", "split", "(", ")", "[", ":", "20", "]", "gsquery", "=", "\" \"", ".", "join", "(", "words", ")", "bibtexlist", "=", "query", "(", "gsquery", ",", "outformat", ",", "allresults", ")", "return", "bibtexlist" ]
Look a pdf up on google scholar and return bibtex items. Paramters --------- pdf : str path to the pdf file allresults : bool return all results or only the first (i.e. best one) outformat : int the output format of the citations startpage : int first page to start reading from Returns ------- List[str] the list with citations
[ "Look", "a", "pdf", "up", "on", "google", "scholar", "and", "return", "bibtex", "items", "." ]
f2f830c4009a4e9a393a81675d5ad88b9b2d87b9
https://github.com/venthur/gscholar/blob/f2f830c4009a4e9a393a81675d5ad88b9b2d87b9/gscholar/gscholar.py#L149-L175
17,154
venthur/gscholar
gscholar/gscholar.py
_get_bib_element
def _get_bib_element(bibitem, element): """Return element from bibitem or None. Paramteters ----------- bibitem : element : Returns ------- """ lst = [i.strip() for i in bibitem.split("\n")] for i in lst: if i.startswith(element): value = i.split("=", 1)[-1] value = value.strip() while value.endswith(','): value = value[:-1] while value.startswith('{') or value.startswith('"'): value = value[1:-1] return value return None
python
def _get_bib_element(bibitem, element): """Return element from bibitem or None. Paramteters ----------- bibitem : element : Returns ------- """ lst = [i.strip() for i in bibitem.split("\n")] for i in lst: if i.startswith(element): value = i.split("=", 1)[-1] value = value.strip() while value.endswith(','): value = value[:-1] while value.startswith('{') or value.startswith('"'): value = value[1:-1] return value return None
[ "def", "_get_bib_element", "(", "bibitem", ",", "element", ")", ":", "lst", "=", "[", "i", ".", "strip", "(", ")", "for", "i", "in", "bibitem", ".", "split", "(", "\"\\n\"", ")", "]", "for", "i", "in", "lst", ":", "if", "i", ".", "startswith", "(", "element", ")", ":", "value", "=", "i", ".", "split", "(", "\"=\"", ",", "1", ")", "[", "-", "1", "]", "value", "=", "value", ".", "strip", "(", ")", "while", "value", ".", "endswith", "(", "','", ")", ":", "value", "=", "value", "[", ":", "-", "1", "]", "while", "value", ".", "startswith", "(", "'{'", ")", "or", "value", ".", "startswith", "(", "'\"'", ")", ":", "value", "=", "value", "[", "1", ":", "-", "1", "]", "return", "value", "return", "None" ]
Return element from bibitem or None. Paramteters ----------- bibitem : element : Returns -------
[ "Return", "element", "from", "bibitem", "or", "None", "." ]
f2f830c4009a4e9a393a81675d5ad88b9b2d87b9
https://github.com/venthur/gscholar/blob/f2f830c4009a4e9a393a81675d5ad88b9b2d87b9/gscholar/gscholar.py#L178-L200
17,155
venthur/gscholar
gscholar/gscholar.py
rename_file
def rename_file(pdf, bibitem): """Attempt to rename pdf according to bibitem. """ year = _get_bib_element(bibitem, "year") author = _get_bib_element(bibitem, "author") if author: author = author.split(",")[0] title = _get_bib_element(bibitem, "title") l = [i for i in (year, author, title) if i] filename = "-".join(l) + ".pdf" newfile = pdf.replace(os.path.basename(pdf), filename) logger.info('Renaming {in_} to {out}'.format(in_=pdf, out=newfile)) os.rename(pdf, newfile)
python
def rename_file(pdf, bibitem): """Attempt to rename pdf according to bibitem. """ year = _get_bib_element(bibitem, "year") author = _get_bib_element(bibitem, "author") if author: author = author.split(",")[0] title = _get_bib_element(bibitem, "title") l = [i for i in (year, author, title) if i] filename = "-".join(l) + ".pdf" newfile = pdf.replace(os.path.basename(pdf), filename) logger.info('Renaming {in_} to {out}'.format(in_=pdf, out=newfile)) os.rename(pdf, newfile)
[ "def", "rename_file", "(", "pdf", ",", "bibitem", ")", ":", "year", "=", "_get_bib_element", "(", "bibitem", ",", "\"year\"", ")", "author", "=", "_get_bib_element", "(", "bibitem", ",", "\"author\"", ")", "if", "author", ":", "author", "=", "author", ".", "split", "(", "\",\"", ")", "[", "0", "]", "title", "=", "_get_bib_element", "(", "bibitem", ",", "\"title\"", ")", "l", "=", "[", "i", "for", "i", "in", "(", "year", ",", "author", ",", "title", ")", "if", "i", "]", "filename", "=", "\"-\"", ".", "join", "(", "l", ")", "+", "\".pdf\"", "newfile", "=", "pdf", ".", "replace", "(", "os", ".", "path", ".", "basename", "(", "pdf", ")", ",", "filename", ")", "logger", ".", "info", "(", "'Renaming {in_} to {out}'", ".", "format", "(", "in_", "=", "pdf", ",", "out", "=", "newfile", ")", ")", "os", ".", "rename", "(", "pdf", ",", "newfile", ")" ]
Attempt to rename pdf according to bibitem.
[ "Attempt", "to", "rename", "pdf", "according", "to", "bibitem", "." ]
f2f830c4009a4e9a393a81675d5ad88b9b2d87b9
https://github.com/venthur/gscholar/blob/f2f830c4009a4e9a393a81675d5ad88b9b2d87b9/gscholar/gscholar.py#L203-L216
17,156
greedo/python-xbrl
xbrl/xbrl.py
soup_maker
def soup_maker(fh): """ Takes a file handler returns BeautifulSoup""" try: from bs4 import BeautifulSoup soup = BeautifulSoup(fh, "lxml") for tag in soup.find_all(): tag.name = tag.name.lower() except ImportError: from BeautifulSoup import BeautifulStoneSoup soup = BeautifulStoneSoup(fh) return soup
python
def soup_maker(fh): """ Takes a file handler returns BeautifulSoup""" try: from bs4 import BeautifulSoup soup = BeautifulSoup(fh, "lxml") for tag in soup.find_all(): tag.name = tag.name.lower() except ImportError: from BeautifulSoup import BeautifulStoneSoup soup = BeautifulStoneSoup(fh) return soup
[ "def", "soup_maker", "(", "fh", ")", ":", "try", ":", "from", "bs4", "import", "BeautifulSoup", "soup", "=", "BeautifulSoup", "(", "fh", ",", "\"lxml\"", ")", "for", "tag", "in", "soup", ".", "find_all", "(", ")", ":", "tag", ".", "name", "=", "tag", ".", "name", ".", "lower", "(", ")", "except", "ImportError", ":", "from", "BeautifulSoup", "import", "BeautifulStoneSoup", "soup", "=", "BeautifulStoneSoup", "(", "fh", ")", "return", "soup" ]
Takes a file handler returns BeautifulSoup
[ "Takes", "a", "file", "handler", "returns", "BeautifulSoup" ]
e6baa4de61333f7fcead758a1072c21943563b49
https://github.com/greedo/python-xbrl/blob/e6baa4de61333f7fcead758a1072c21943563b49/xbrl/xbrl.py#L22-L32
17,157
greedo/python-xbrl
xbrl/xbrl.py
XBRLParser.parse
def parse(self, file_handle): """ parse is the main entry point for an XBRLParser. It takes a file handle. """ xbrl_obj = XBRL() # if no file handle was given create our own if not hasattr(file_handle, 'read'): file_handler = open(file_handle) else: file_handler = file_handle # Store the headers xbrl_file = XBRLPreprocessedFile(file_handler) xbrl = soup_maker(xbrl_file.fh) file_handler.close() xbrl_base = xbrl.find(name=re.compile("xbrl*:*")) if xbrl.find('xbrl') is None and xbrl_base is None: raise XBRLParserException('The xbrl file is empty!') # lookahead to see if we need a custom leading element lookahead = xbrl.find(name=re.compile("context", re.IGNORECASE | re.MULTILINE)).name if ":" in lookahead: self.xbrl_base = lookahead.split(":")[0] + ":" else: self.xbrl_base = "" return xbrl
python
def parse(self, file_handle): """ parse is the main entry point for an XBRLParser. It takes a file handle. """ xbrl_obj = XBRL() # if no file handle was given create our own if not hasattr(file_handle, 'read'): file_handler = open(file_handle) else: file_handler = file_handle # Store the headers xbrl_file = XBRLPreprocessedFile(file_handler) xbrl = soup_maker(xbrl_file.fh) file_handler.close() xbrl_base = xbrl.find(name=re.compile("xbrl*:*")) if xbrl.find('xbrl') is None and xbrl_base is None: raise XBRLParserException('The xbrl file is empty!') # lookahead to see if we need a custom leading element lookahead = xbrl.find(name=re.compile("context", re.IGNORECASE | re.MULTILINE)).name if ":" in lookahead: self.xbrl_base = lookahead.split(":")[0] + ":" else: self.xbrl_base = "" return xbrl
[ "def", "parse", "(", "self", ",", "file_handle", ")", ":", "xbrl_obj", "=", "XBRL", "(", ")", "# if no file handle was given create our own", "if", "not", "hasattr", "(", "file_handle", ",", "'read'", ")", ":", "file_handler", "=", "open", "(", "file_handle", ")", "else", ":", "file_handler", "=", "file_handle", "# Store the headers", "xbrl_file", "=", "XBRLPreprocessedFile", "(", "file_handler", ")", "xbrl", "=", "soup_maker", "(", "xbrl_file", ".", "fh", ")", "file_handler", ".", "close", "(", ")", "xbrl_base", "=", "xbrl", ".", "find", "(", "name", "=", "re", ".", "compile", "(", "\"xbrl*:*\"", ")", ")", "if", "xbrl", ".", "find", "(", "'xbrl'", ")", "is", "None", "and", "xbrl_base", "is", "None", ":", "raise", "XBRLParserException", "(", "'The xbrl file is empty!'", ")", "# lookahead to see if we need a custom leading element", "lookahead", "=", "xbrl", ".", "find", "(", "name", "=", "re", ".", "compile", "(", "\"context\"", ",", "re", ".", "IGNORECASE", "|", "re", ".", "MULTILINE", ")", ")", ".", "name", "if", "\":\"", "in", "lookahead", ":", "self", ".", "xbrl_base", "=", "lookahead", ".", "split", "(", "\":\"", ")", "[", "0", "]", "+", "\":\"", "else", ":", "self", ".", "xbrl_base", "=", "\"\"", "return", "xbrl" ]
parse is the main entry point for an XBRLParser. It takes a file handle.
[ "parse", "is", "the", "main", "entry", "point", "for", "an", "XBRLParser", ".", "It", "takes", "a", "file", "handle", "." ]
e6baa4de61333f7fcead758a1072c21943563b49
https://github.com/greedo/python-xbrl/blob/e6baa4de61333f7fcead758a1072c21943563b49/xbrl/xbrl.py#L54-L86
17,158
greedo/python-xbrl
xbrl/xbrl.py
XBRLParser.parseDEI
def parseDEI(self, xbrl, ignore_errors=0): """ Parse DEI from our XBRL soup and return a DEI object. """ dei_obj = DEI() if ignore_errors == 2: logging.basicConfig(filename='/tmp/xbrl.log', level=logging.ERROR, format='%(asctime)s %(levelname)s %(name)s %(message)s') logger = logging.getLogger(__name__) else: logger = None trading_symbol = xbrl.find_all(name=re.compile("(dei:tradingsymbol)", re.IGNORECASE | re.MULTILINE)) dei_obj.trading_symbol = \ self.data_processing(trading_symbol, xbrl, ignore_errors, logger, options={'type': 'String', 'no_context': True}) company_name = xbrl.find_all(name=re.compile("(dei:entityregistrantname)", re.IGNORECASE | re.MULTILINE)) dei_obj.company_name = \ self.data_processing(company_name, xbrl, ignore_errors, logger, options={'type': 'String', 'no_context': True}) shares_outstanding = xbrl.find_all(name=re.compile("(dei:entitycommonstocksharesoutstanding)", re.IGNORECASE | re.MULTILINE)) dei_obj.shares_outstanding = \ self.data_processing(shares_outstanding, xbrl, ignore_errors, logger, options={'type': 'Number', 'no_context': True}) public_float = xbrl.find_all(name=re.compile("(dei:entitypublicfloat)", re.IGNORECASE | re.MULTILINE)) dei_obj.public_float = \ self.data_processing(public_float, xbrl, ignore_errors, logger, options={'type': 'Number', 'no_context': True}) return dei_obj
python
def parseDEI(self, xbrl, ignore_errors=0): """ Parse DEI from our XBRL soup and return a DEI object. """ dei_obj = DEI() if ignore_errors == 2: logging.basicConfig(filename='/tmp/xbrl.log', level=logging.ERROR, format='%(asctime)s %(levelname)s %(name)s %(message)s') logger = logging.getLogger(__name__) else: logger = None trading_symbol = xbrl.find_all(name=re.compile("(dei:tradingsymbol)", re.IGNORECASE | re.MULTILINE)) dei_obj.trading_symbol = \ self.data_processing(trading_symbol, xbrl, ignore_errors, logger, options={'type': 'String', 'no_context': True}) company_name = xbrl.find_all(name=re.compile("(dei:entityregistrantname)", re.IGNORECASE | re.MULTILINE)) dei_obj.company_name = \ self.data_processing(company_name, xbrl, ignore_errors, logger, options={'type': 'String', 'no_context': True}) shares_outstanding = xbrl.find_all(name=re.compile("(dei:entitycommonstocksharesoutstanding)", re.IGNORECASE | re.MULTILINE)) dei_obj.shares_outstanding = \ self.data_processing(shares_outstanding, xbrl, ignore_errors, logger, options={'type': 'Number', 'no_context': True}) public_float = xbrl.find_all(name=re.compile("(dei:entitypublicfloat)", re.IGNORECASE | re.MULTILINE)) dei_obj.public_float = \ self.data_processing(public_float, xbrl, ignore_errors, logger, options={'type': 'Number', 'no_context': True}) return dei_obj
[ "def", "parseDEI", "(", "self", ",", "xbrl", ",", "ignore_errors", "=", "0", ")", ":", "dei_obj", "=", "DEI", "(", ")", "if", "ignore_errors", "==", "2", ":", "logging", ".", "basicConfig", "(", "filename", "=", "'/tmp/xbrl.log'", ",", "level", "=", "logging", ".", "ERROR", ",", "format", "=", "'%(asctime)s %(levelname)s %(name)s %(message)s'", ")", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "else", ":", "logger", "=", "None", "trading_symbol", "=", "xbrl", ".", "find_all", "(", "name", "=", "re", ".", "compile", "(", "\"(dei:tradingsymbol)\"", ",", "re", ".", "IGNORECASE", "|", "re", ".", "MULTILINE", ")", ")", "dei_obj", ".", "trading_symbol", "=", "self", ".", "data_processing", "(", "trading_symbol", ",", "xbrl", ",", "ignore_errors", ",", "logger", ",", "options", "=", "{", "'type'", ":", "'String'", ",", "'no_context'", ":", "True", "}", ")", "company_name", "=", "xbrl", ".", "find_all", "(", "name", "=", "re", ".", "compile", "(", "\"(dei:entityregistrantname)\"", ",", "re", ".", "IGNORECASE", "|", "re", ".", "MULTILINE", ")", ")", "dei_obj", ".", "company_name", "=", "self", ".", "data_processing", "(", "company_name", ",", "xbrl", ",", "ignore_errors", ",", "logger", ",", "options", "=", "{", "'type'", ":", "'String'", ",", "'no_context'", ":", "True", "}", ")", "shares_outstanding", "=", "xbrl", ".", "find_all", "(", "name", "=", "re", ".", "compile", "(", "\"(dei:entitycommonstocksharesoutstanding)\"", ",", "re", ".", "IGNORECASE", "|", "re", ".", "MULTILINE", ")", ")", "dei_obj", ".", "shares_outstanding", "=", "self", ".", "data_processing", "(", "shares_outstanding", ",", "xbrl", ",", "ignore_errors", ",", "logger", ",", "options", "=", "{", "'type'", ":", "'Number'", ",", "'no_context'", ":", "True", "}", ")", "public_float", "=", "xbrl", ".", "find_all", "(", "name", "=", "re", ".", "compile", "(", "\"(dei:entitypublicfloat)\"", ",", "re", ".", "IGNORECASE", "|", "re", ".", "MULTILINE", ")", ")", "dei_obj", ".", "public_float", "=", "self", ".", "data_processing", "(", "public_float", ",", "xbrl", ",", "ignore_errors", ",", "logger", ",", "options", "=", "{", "'type'", ":", "'Number'", ",", "'no_context'", ":", "True", "}", ")", "return", "dei_obj" ]
Parse DEI from our XBRL soup and return a DEI object.
[ "Parse", "DEI", "from", "our", "XBRL", "soup", "and", "return", "a", "DEI", "object", "." ]
e6baa4de61333f7fcead758a1072c21943563b49
https://github.com/greedo/python-xbrl/blob/e6baa4de61333f7fcead758a1072c21943563b49/xbrl/xbrl.py#L585-L633
17,159
greedo/python-xbrl
xbrl/xbrl.py
XBRLParser.parseCustom
def parseCustom(self, xbrl, ignore_errors=0): """ Parse company custom entities from XBRL and return an Custom object. """ custom_obj = Custom() custom_data = xbrl.find_all(re.compile('^((?!(us-gaap|dei|xbrll|xbrldi)).)*:\s*', re.IGNORECASE | re.MULTILINE)) elements = {} for data in custom_data: if XBRLParser().is_number(data.text): setattr(custom_obj, data.name.split(':')[1], data.text) return custom_obj
python
def parseCustom(self, xbrl, ignore_errors=0): """ Parse company custom entities from XBRL and return an Custom object. """ custom_obj = Custom() custom_data = xbrl.find_all(re.compile('^((?!(us-gaap|dei|xbrll|xbrldi)).)*:\s*', re.IGNORECASE | re.MULTILINE)) elements = {} for data in custom_data: if XBRLParser().is_number(data.text): setattr(custom_obj, data.name.split(':')[1], data.text) return custom_obj
[ "def", "parseCustom", "(", "self", ",", "xbrl", ",", "ignore_errors", "=", "0", ")", ":", "custom_obj", "=", "Custom", "(", ")", "custom_data", "=", "xbrl", ".", "find_all", "(", "re", ".", "compile", "(", "'^((?!(us-gaap|dei|xbrll|xbrldi)).)*:\\s*'", ",", "re", ".", "IGNORECASE", "|", "re", ".", "MULTILINE", ")", ")", "elements", "=", "{", "}", "for", "data", "in", "custom_data", ":", "if", "XBRLParser", "(", ")", ".", "is_number", "(", "data", ".", "text", ")", ":", "setattr", "(", "custom_obj", ",", "data", ".", "name", ".", "split", "(", "':'", ")", "[", "1", "]", ",", "data", ".", "text", ")", "return", "custom_obj" ]
Parse company custom entities from XBRL and return an Custom object.
[ "Parse", "company", "custom", "entities", "from", "XBRL", "and", "return", "an", "Custom", "object", "." ]
e6baa4de61333f7fcead758a1072c21943563b49
https://github.com/greedo/python-xbrl/blob/e6baa4de61333f7fcead758a1072c21943563b49/xbrl/xbrl.py#L636-L652
17,160
greedo/python-xbrl
xbrl/xbrl.py
XBRLParser.trim_decimals
def trim_decimals(s, precision=-3): """ Convert from scientific notation using precision """ encoded = s.encode('ascii', 'ignore') str_val = "" if six.PY3: str_val = str(encoded, encoding='ascii', errors='ignore')[:precision] else: # If precision is 0, this must be handled seperately if precision == 0: str_val = str(encoded) else: str_val = str(encoded)[:precision] if len(str_val) > 0: return float(str_val) else: return 0
python
def trim_decimals(s, precision=-3): """ Convert from scientific notation using precision """ encoded = s.encode('ascii', 'ignore') str_val = "" if six.PY3: str_val = str(encoded, encoding='ascii', errors='ignore')[:precision] else: # If precision is 0, this must be handled seperately if precision == 0: str_val = str(encoded) else: str_val = str(encoded)[:precision] if len(str_val) > 0: return float(str_val) else: return 0
[ "def", "trim_decimals", "(", "s", ",", "precision", "=", "-", "3", ")", ":", "encoded", "=", "s", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", "str_val", "=", "\"\"", "if", "six", ".", "PY3", ":", "str_val", "=", "str", "(", "encoded", ",", "encoding", "=", "'ascii'", ",", "errors", "=", "'ignore'", ")", "[", ":", "precision", "]", "else", ":", "# If precision is 0, this must be handled seperately", "if", "precision", "==", "0", ":", "str_val", "=", "str", "(", "encoded", ")", "else", ":", "str_val", "=", "str", "(", "encoded", ")", "[", ":", "precision", "]", "if", "len", "(", "str_val", ")", ">", "0", ":", "return", "float", "(", "str_val", ")", "else", ":", "return", "0" ]
Convert from scientific notation using precision
[ "Convert", "from", "scientific", "notation", "using", "precision" ]
e6baa4de61333f7fcead758a1072c21943563b49
https://github.com/greedo/python-xbrl/blob/e6baa4de61333f7fcead758a1072c21943563b49/xbrl/xbrl.py#L655-L672
17,161
greedo/python-xbrl
xbrl/xbrl.py
XBRLParser.data_processing
def data_processing(self, elements, xbrl, ignore_errors, logger, context_ids=[], **kwargs): """ Process a XBRL tag object and extract the correct value as stated by the context. """ options = kwargs.get('options', {'type': 'Number', 'no_context': False}) if options['type'] == 'String': if len(elements) > 0: return elements[0].text if options['no_context'] == True: if len(elements) > 0 and XBRLParser().is_number(elements[0].text): return elements[0].text try: # Extract the correct values by context correct_elements = [] for element in elements: std = element.attrs['contextref'] if std in context_ids: correct_elements.append(element) elements = correct_elements if len(elements) > 0 and XBRLParser().is_number(elements[0].text): decimals = elements[0].attrs['decimals'] if decimals is not None: attr_precision = decimals if xbrl.precision != 0 \ and xbrl.precison != attr_precision: xbrl.precision = attr_precision if elements: return XBRLParser().trim_decimals(elements[0].text, int(xbrl.precision)) else: return 0 else: return 0 except Exception as e: if ignore_errors == 0: raise XBRLParserException('value extraction error') elif ignore_errors == 1: return 0 elif ignore_errors == 2: logger.error(str(e) + " error at " + ''.join(elements[0].text))
python
def data_processing(self, elements, xbrl, ignore_errors, logger, context_ids=[], **kwargs): """ Process a XBRL tag object and extract the correct value as stated by the context. """ options = kwargs.get('options', {'type': 'Number', 'no_context': False}) if options['type'] == 'String': if len(elements) > 0: return elements[0].text if options['no_context'] == True: if len(elements) > 0 and XBRLParser().is_number(elements[0].text): return elements[0].text try: # Extract the correct values by context correct_elements = [] for element in elements: std = element.attrs['contextref'] if std in context_ids: correct_elements.append(element) elements = correct_elements if len(elements) > 0 and XBRLParser().is_number(elements[0].text): decimals = elements[0].attrs['decimals'] if decimals is not None: attr_precision = decimals if xbrl.precision != 0 \ and xbrl.precison != attr_precision: xbrl.precision = attr_precision if elements: return XBRLParser().trim_decimals(elements[0].text, int(xbrl.precision)) else: return 0 else: return 0 except Exception as e: if ignore_errors == 0: raise XBRLParserException('value extraction error') elif ignore_errors == 1: return 0 elif ignore_errors == 2: logger.error(str(e) + " error at " + ''.join(elements[0].text))
[ "def", "data_processing", "(", "self", ",", "elements", ",", "xbrl", ",", "ignore_errors", ",", "logger", ",", "context_ids", "=", "[", "]", ",", "*", "*", "kwargs", ")", ":", "options", "=", "kwargs", ".", "get", "(", "'options'", ",", "{", "'type'", ":", "'Number'", ",", "'no_context'", ":", "False", "}", ")", "if", "options", "[", "'type'", "]", "==", "'String'", ":", "if", "len", "(", "elements", ")", ">", "0", ":", "return", "elements", "[", "0", "]", ".", "text", "if", "options", "[", "'no_context'", "]", "==", "True", ":", "if", "len", "(", "elements", ")", ">", "0", "and", "XBRLParser", "(", ")", ".", "is_number", "(", "elements", "[", "0", "]", ".", "text", ")", ":", "return", "elements", "[", "0", "]", ".", "text", "try", ":", "# Extract the correct values by context", "correct_elements", "=", "[", "]", "for", "element", "in", "elements", ":", "std", "=", "element", ".", "attrs", "[", "'contextref'", "]", "if", "std", "in", "context_ids", ":", "correct_elements", ".", "append", "(", "element", ")", "elements", "=", "correct_elements", "if", "len", "(", "elements", ")", ">", "0", "and", "XBRLParser", "(", ")", ".", "is_number", "(", "elements", "[", "0", "]", ".", "text", ")", ":", "decimals", "=", "elements", "[", "0", "]", ".", "attrs", "[", "'decimals'", "]", "if", "decimals", "is", "not", "None", ":", "attr_precision", "=", "decimals", "if", "xbrl", ".", "precision", "!=", "0", "and", "xbrl", ".", "precison", "!=", "attr_precision", ":", "xbrl", ".", "precision", "=", "attr_precision", "if", "elements", ":", "return", "XBRLParser", "(", ")", ".", "trim_decimals", "(", "elements", "[", "0", "]", ".", "text", ",", "int", "(", "xbrl", ".", "precision", ")", ")", "else", ":", "return", "0", "else", ":", "return", "0", "except", "Exception", "as", "e", ":", "if", "ignore_errors", "==", "0", ":", "raise", "XBRLParserException", "(", "'value extraction error'", ")", "elif", "ignore_errors", "==", "1", ":", "return", "0", "elif", "ignore_errors", "==", "2", ":", "logger", ".", "error", "(", "str", "(", "e", ")", "+", "\" error at \"", "+", "''", ".", "join", "(", "elements", "[", "0", "]", ".", "text", ")", ")" ]
Process a XBRL tag object and extract the correct value as stated by the context.
[ "Process", "a", "XBRL", "tag", "object", "and", "extract", "the", "correct", "value", "as", "stated", "by", "the", "context", "." ]
e6baa4de61333f7fcead758a1072c21943563b49
https://github.com/greedo/python-xbrl/blob/e6baa4de61333f7fcead758a1072c21943563b49/xbrl/xbrl.py#L686-L739
17,162
EliotBerriot/django-dynamic-preferences
dynamic_preferences/managers.py
PreferencesManager.by_name
def by_name(self): """Return a dictionary with preferences identifiers and values, but without the section name in the identifier""" return {key.split(preferences_settings.SECTION_KEY_SEPARATOR)[-1]: value for key, value in self.all().items()}
python
def by_name(self): """Return a dictionary with preferences identifiers and values, but without the section name in the identifier""" return {key.split(preferences_settings.SECTION_KEY_SEPARATOR)[-1]: value for key, value in self.all().items()}
[ "def", "by_name", "(", "self", ")", ":", "return", "{", "key", ".", "split", "(", "preferences_settings", ".", "SECTION_KEY_SEPARATOR", ")", "[", "-", "1", "]", ":", "value", "for", "key", ",", "value", "in", "self", ".", "all", "(", ")", ".", "items", "(", ")", "}" ]
Return a dictionary with preferences identifiers and values, but without the section name in the identifier
[ "Return", "a", "dictionary", "with", "preferences", "identifiers", "and", "values", "but", "without", "the", "section", "name", "in", "the", "identifier" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/managers.py#L47-L49
17,163
EliotBerriot/django-dynamic-preferences
dynamic_preferences/managers.py
PreferencesManager.get_cache_key
def get_cache_key(self, section, name): """Return the cache key corresponding to a given preference""" if not self.instance: return 'dynamic_preferences_{0}_{1}_{2}'.format(self.model.__name__, section, name) return 'dynamic_preferences_{0}_{1}_{2}_{3}'.format(self.model.__name__, self.instance.pk, section, name, self.instance.pk)
python
def get_cache_key(self, section, name): """Return the cache key corresponding to a given preference""" if not self.instance: return 'dynamic_preferences_{0}_{1}_{2}'.format(self.model.__name__, section, name) return 'dynamic_preferences_{0}_{1}_{2}_{3}'.format(self.model.__name__, self.instance.pk, section, name, self.instance.pk)
[ "def", "get_cache_key", "(", "self", ",", "section", ",", "name", ")", ":", "if", "not", "self", ".", "instance", ":", "return", "'dynamic_preferences_{0}_{1}_{2}'", ".", "format", "(", "self", ".", "model", ".", "__name__", ",", "section", ",", "name", ")", "return", "'dynamic_preferences_{0}_{1}_{2}_{3}'", ".", "format", "(", "self", ".", "model", ".", "__name__", ",", "self", ".", "instance", ".", "pk", ",", "section", ",", "name", ",", "self", ".", "instance", ".", "pk", ")" ]
Return the cache key corresponding to a given preference
[ "Return", "the", "cache", "key", "corresponding", "to", "a", "given", "preference" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/managers.py#L54-L58
17,164
EliotBerriot/django-dynamic-preferences
dynamic_preferences/managers.py
PreferencesManager.from_cache
def from_cache(self, section, name): """Return a preference raw_value from cache""" cached_value = self.cache.get( self.get_cache_key(section, name), CachedValueNotFound) if cached_value is CachedValueNotFound: raise CachedValueNotFound if cached_value == preferences_settings.CACHE_NONE_VALUE: cached_value = None return self.registry.get( section=section, name=name).serializer.deserialize(cached_value)
python
def from_cache(self, section, name): """Return a preference raw_value from cache""" cached_value = self.cache.get( self.get_cache_key(section, name), CachedValueNotFound) if cached_value is CachedValueNotFound: raise CachedValueNotFound if cached_value == preferences_settings.CACHE_NONE_VALUE: cached_value = None return self.registry.get( section=section, name=name).serializer.deserialize(cached_value)
[ "def", "from_cache", "(", "self", ",", "section", ",", "name", ")", ":", "cached_value", "=", "self", ".", "cache", ".", "get", "(", "self", ".", "get_cache_key", "(", "section", ",", "name", ")", ",", "CachedValueNotFound", ")", "if", "cached_value", "is", "CachedValueNotFound", ":", "raise", "CachedValueNotFound", "if", "cached_value", "==", "preferences_settings", ".", "CACHE_NONE_VALUE", ":", "cached_value", "=", "None", "return", "self", ".", "registry", ".", "get", "(", "section", "=", "section", ",", "name", "=", "name", ")", ".", "serializer", ".", "deserialize", "(", "cached_value", ")" ]
Return a preference raw_value from cache
[ "Return", "a", "preference", "raw_value", "from", "cache" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/managers.py#L60-L71
17,165
EliotBerriot/django-dynamic-preferences
dynamic_preferences/managers.py
PreferencesManager.many_from_cache
def many_from_cache(self, preferences): """ Return cached value for given preferences missing preferences will be skipped """ keys = { p: self.get_cache_key(p.section.name, p.name) for p in preferences } cached = self.cache.get_many(list(keys.values())) for k, v in cached.items(): # we replace dummy cached values by None here, if needed if v == preferences_settings.CACHE_NONE_VALUE: cached[k] = None # we have to remap returned value since the underlying cached keys # are not usable for an end user return { p.identifier(): p.serializer.deserialize(cached[k]) for p, k in keys.items() if k in cached }
python
def many_from_cache(self, preferences): """ Return cached value for given preferences missing preferences will be skipped """ keys = { p: self.get_cache_key(p.section.name, p.name) for p in preferences } cached = self.cache.get_many(list(keys.values())) for k, v in cached.items(): # we replace dummy cached values by None here, if needed if v == preferences_settings.CACHE_NONE_VALUE: cached[k] = None # we have to remap returned value since the underlying cached keys # are not usable for an end user return { p.identifier(): p.serializer.deserialize(cached[k]) for p, k in keys.items() if k in cached }
[ "def", "many_from_cache", "(", "self", ",", "preferences", ")", ":", "keys", "=", "{", "p", ":", "self", ".", "get_cache_key", "(", "p", ".", "section", ".", "name", ",", "p", ".", "name", ")", "for", "p", "in", "preferences", "}", "cached", "=", "self", ".", "cache", ".", "get_many", "(", "list", "(", "keys", ".", "values", "(", ")", ")", ")", "for", "k", ",", "v", "in", "cached", ".", "items", "(", ")", ":", "# we replace dummy cached values by None here, if needed", "if", "v", "==", "preferences_settings", ".", "CACHE_NONE_VALUE", ":", "cached", "[", "k", "]", "=", "None", "# we have to remap returned value since the underlying cached keys", "# are not usable for an end user", "return", "{", "p", ".", "identifier", "(", ")", ":", "p", ".", "serializer", ".", "deserialize", "(", "cached", "[", "k", "]", ")", "for", "p", ",", "k", "in", "keys", ".", "items", "(", ")", "if", "k", "in", "cached", "}" ]
Return cached value for given preferences missing preferences will be skipped
[ "Return", "cached", "value", "for", "given", "preferences", "missing", "preferences", "will", "be", "skipped" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/managers.py#L73-L95
17,166
EliotBerriot/django-dynamic-preferences
dynamic_preferences/managers.py
PreferencesManager.all
def all(self): """Return a dictionary containing all preferences by section Loaded from cache or from db in case of cold cache """ if not preferences_settings.ENABLE_CACHE: return self.load_from_db() preferences = self.registry.preferences() # first we hit the cache once for all existing preferences a = self.many_from_cache(preferences) if len(a) == len(preferences): return a # avoid database hit if not necessary # then we fill those that miss, but exist in the database # (just hit the database for all of them, filtering is complicated, and # in most cases you'd need to grab the majority of them anyway) a.update(self.load_from_db(cache=True)) return a
python
def all(self): """Return a dictionary containing all preferences by section Loaded from cache or from db in case of cold cache """ if not preferences_settings.ENABLE_CACHE: return self.load_from_db() preferences = self.registry.preferences() # first we hit the cache once for all existing preferences a = self.many_from_cache(preferences) if len(a) == len(preferences): return a # avoid database hit if not necessary # then we fill those that miss, but exist in the database # (just hit the database for all of them, filtering is complicated, and # in most cases you'd need to grab the majority of them anyway) a.update(self.load_from_db(cache=True)) return a
[ "def", "all", "(", "self", ")", ":", "if", "not", "preferences_settings", ".", "ENABLE_CACHE", ":", "return", "self", ".", "load_from_db", "(", ")", "preferences", "=", "self", ".", "registry", ".", "preferences", "(", ")", "# first we hit the cache once for all existing preferences", "a", "=", "self", ".", "many_from_cache", "(", "preferences", ")", "if", "len", "(", "a", ")", "==", "len", "(", "preferences", ")", ":", "return", "a", "# avoid database hit if not necessary", "# then we fill those that miss, but exist in the database", "# (just hit the database for all of them, filtering is complicated, and", "# in most cases you'd need to grab the majority of them anyway)", "a", ".", "update", "(", "self", ".", "load_from_db", "(", "cache", "=", "True", ")", ")", "return", "a" ]
Return a dictionary containing all preferences by section Loaded from cache or from db in case of cold cache
[ "Return", "a", "dictionary", "containing", "all", "preferences", "by", "section", "Loaded", "from", "cache", "or", "from", "db", "in", "case", "of", "cold", "cache" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/managers.py#L182-L200
17,167
EliotBerriot/django-dynamic-preferences
dynamic_preferences/managers.py
PreferencesManager.load_from_db
def load_from_db(self, cache=False): """Return a dictionary of preferences by section directly from DB""" a = {} db_prefs = {p.preference.identifier(): p for p in self.queryset} for preference in self.registry.preferences(): try: db_pref = db_prefs[preference.identifier()] except KeyError: db_pref = self.create_db_pref( section=preference.section.name, name=preference.name, value=preference.get('default')) else: # cache if create_db_pref() hasn't already done so if cache: self.to_cache(db_pref) a[preference.identifier()] = db_pref.value return a
python
def load_from_db(self, cache=False): """Return a dictionary of preferences by section directly from DB""" a = {} db_prefs = {p.preference.identifier(): p for p in self.queryset} for preference in self.registry.preferences(): try: db_pref = db_prefs[preference.identifier()] except KeyError: db_pref = self.create_db_pref( section=preference.section.name, name=preference.name, value=preference.get('default')) else: # cache if create_db_pref() hasn't already done so if cache: self.to_cache(db_pref) a[preference.identifier()] = db_pref.value return a
[ "def", "load_from_db", "(", "self", ",", "cache", "=", "False", ")", ":", "a", "=", "{", "}", "db_prefs", "=", "{", "p", ".", "preference", ".", "identifier", "(", ")", ":", "p", "for", "p", "in", "self", ".", "queryset", "}", "for", "preference", "in", "self", ".", "registry", ".", "preferences", "(", ")", ":", "try", ":", "db_pref", "=", "db_prefs", "[", "preference", ".", "identifier", "(", ")", "]", "except", "KeyError", ":", "db_pref", "=", "self", ".", "create_db_pref", "(", "section", "=", "preference", ".", "section", ".", "name", ",", "name", "=", "preference", ".", "name", ",", "value", "=", "preference", ".", "get", "(", "'default'", ")", ")", "else", ":", "# cache if create_db_pref() hasn't already done so", "if", "cache", ":", "self", ".", "to_cache", "(", "db_pref", ")", "a", "[", "preference", ".", "identifier", "(", ")", "]", "=", "db_pref", ".", "value", "return", "a" ]
Return a dictionary of preferences by section directly from DB
[ "Return", "a", "dictionary", "of", "preferences", "by", "section", "directly", "from", "DB" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/managers.py#L202-L221
17,168
EliotBerriot/django-dynamic-preferences
dynamic_preferences/api/serializers.py
PreferenceSerializer.validate_value
def validate_value(self, value): """ We call validation from the underlying form field """ field = self.instance.preference.setup_field() value = field.to_python(value) field.validate(value) field.run_validators(value) return value
python
def validate_value(self, value): """ We call validation from the underlying form field """ field = self.instance.preference.setup_field() value = field.to_python(value) field.validate(value) field.run_validators(value) return value
[ "def", "validate_value", "(", "self", ",", "value", ")", ":", "field", "=", "self", ".", "instance", ".", "preference", ".", "setup_field", "(", ")", "value", "=", "field", ".", "to_python", "(", "value", ")", "field", ".", "validate", "(", "value", ")", "field", ".", "run_validators", "(", "value", ")", "return", "value" ]
We call validation from the underlying form field
[ "We", "call", "validation", "from", "the", "underlying", "form", "field" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/api/serializers.py#L58-L66
17,169
EliotBerriot/django-dynamic-preferences
dynamic_preferences/serializers.py
StringSerializer.to_python
def to_python(cls, value, **kwargs): """String deserialisation just return the value as a string""" if not value: return '' try: return str(value) except: pass try: return value.encode('utf-8') except: pass raise cls.exception("Cannot deserialize value {0} tostring".format(value))
python
def to_python(cls, value, **kwargs): """String deserialisation just return the value as a string""" if not value: return '' try: return str(value) except: pass try: return value.encode('utf-8') except: pass raise cls.exception("Cannot deserialize value {0} tostring".format(value))
[ "def", "to_python", "(", "cls", ",", "value", ",", "*", "*", "kwargs", ")", ":", "if", "not", "value", ":", "return", "''", "try", ":", "return", "str", "(", "value", ")", "except", ":", "pass", "try", ":", "return", "value", ".", "encode", "(", "'utf-8'", ")", "except", ":", "pass", "raise", "cls", ".", "exception", "(", "\"Cannot deserialize value {0} tostring\"", ".", "format", "(", "value", ")", ")" ]
String deserialisation just return the value as a string
[ "String", "deserialisation", "just", "return", "the", "value", "as", "a", "string" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/serializers.py#L191-L203
17,170
EliotBerriot/django-dynamic-preferences
dynamic_preferences/registries.py
PreferenceModelsRegistry.get_by_instance
def get_by_instance(self, instance): """Return a preference registry using a model instance""" # we iterate throught registered preference models in order to get the instance class # and check if instance is and instance of this class for model, registry in self.items(): try: instance_class = model._meta.get_field('instance').remote_field.model if isinstance(instance, instance_class): return registry except FieldDoesNotExist: # global preferences pass return None
python
def get_by_instance(self, instance): """Return a preference registry using a model instance""" # we iterate throught registered preference models in order to get the instance class # and check if instance is and instance of this class for model, registry in self.items(): try: instance_class = model._meta.get_field('instance').remote_field.model if isinstance(instance, instance_class): return registry except FieldDoesNotExist: # global preferences pass return None
[ "def", "get_by_instance", "(", "self", ",", "instance", ")", ":", "# we iterate throught registered preference models in order to get the instance class", "# and check if instance is and instance of this class", "for", "model", ",", "registry", "in", "self", ".", "items", "(", ")", ":", "try", ":", "instance_class", "=", "model", ".", "_meta", ".", "get_field", "(", "'instance'", ")", ".", "remote_field", ".", "model", "if", "isinstance", "(", "instance", ",", "instance_class", ")", ":", "return", "registry", "except", "FieldDoesNotExist", ":", "# global preferences", "pass", "return", "None" ]
Return a preference registry using a model instance
[ "Return", "a", "preference", "registry", "using", "a", "model", "instance" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/registries.py#L60-L72
17,171
EliotBerriot/django-dynamic-preferences
dynamic_preferences/registries.py
PreferenceRegistry.register
def register(self, preference_class): """ Store the given preference class in the registry. :param preference_class: a :py:class:`prefs.Preference` subclass """ preference = preference_class(registry=self) self.section_objects[preference.section.name] = preference.section try: self[preference.section.name][preference.name] = preference except KeyError: self[preference.section.name] = collections.OrderedDict() self[preference.section.name][preference.name] = preference return preference_class
python
def register(self, preference_class): """ Store the given preference class in the registry. :param preference_class: a :py:class:`prefs.Preference` subclass """ preference = preference_class(registry=self) self.section_objects[preference.section.name] = preference.section try: self[preference.section.name][preference.name] = preference except KeyError: self[preference.section.name] = collections.OrderedDict() self[preference.section.name][preference.name] = preference return preference_class
[ "def", "register", "(", "self", ",", "preference_class", ")", ":", "preference", "=", "preference_class", "(", "registry", "=", "self", ")", "self", ".", "section_objects", "[", "preference", ".", "section", ".", "name", "]", "=", "preference", ".", "section", "try", ":", "self", "[", "preference", ".", "section", ".", "name", "]", "[", "preference", ".", "name", "]", "=", "preference", "except", "KeyError", ":", "self", "[", "preference", ".", "section", ".", "name", "]", "=", "collections", ".", "OrderedDict", "(", ")", "self", "[", "preference", ".", "section", ".", "name", "]", "[", "preference", ".", "name", "]", "=", "preference", "return", "preference_class" ]
Store the given preference class in the registry. :param preference_class: a :py:class:`prefs.Preference` subclass
[ "Store", "the", "given", "preference", "class", "in", "the", "registry", "." ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/registries.py#L105-L121
17,172
EliotBerriot/django-dynamic-preferences
dynamic_preferences/registries.py
PreferenceRegistry.get
def get(self, name, section=None, fallback=False): """ Returns a previously registered preference :param section: The section name under which the preference is registered :type section: str. :param name: The name of the preference. You can use dotted notation 'section.name' if you want to avoid providing section param :type name: str. :param fallback: Should we return a dummy preference object instead of raising an error if no preference is found? :type name: bool. :return: a :py:class:`prefs.BasePreference` instance """ # try dotted notation try: _section, name = name.split( preferences_settings.SECTION_KEY_SEPARATOR) return self[_section][name] except ValueError: pass # use standard params try: return self[section][name] except KeyError: if fallback: return self._fallback(section_name=section, pref_name=name) raise NotFoundInRegistry("No such preference in {0} with section={1} and name={2}".format( self.__class__.__name__, section, name))
python
def get(self, name, section=None, fallback=False): """ Returns a previously registered preference :param section: The section name under which the preference is registered :type section: str. :param name: The name of the preference. You can use dotted notation 'section.name' if you want to avoid providing section param :type name: str. :param fallback: Should we return a dummy preference object instead of raising an error if no preference is found? :type name: bool. :return: a :py:class:`prefs.BasePreference` instance """ # try dotted notation try: _section, name = name.split( preferences_settings.SECTION_KEY_SEPARATOR) return self[_section][name] except ValueError: pass # use standard params try: return self[section][name] except KeyError: if fallback: return self._fallback(section_name=section, pref_name=name) raise NotFoundInRegistry("No such preference in {0} with section={1} and name={2}".format( self.__class__.__name__, section, name))
[ "def", "get", "(", "self", ",", "name", ",", "section", "=", "None", ",", "fallback", "=", "False", ")", ":", "# try dotted notation", "try", ":", "_section", ",", "name", "=", "name", ".", "split", "(", "preferences_settings", ".", "SECTION_KEY_SEPARATOR", ")", "return", "self", "[", "_section", "]", "[", "name", "]", "except", "ValueError", ":", "pass", "# use standard params", "try", ":", "return", "self", "[", "section", "]", "[", "name", "]", "except", "KeyError", ":", "if", "fallback", ":", "return", "self", ".", "_fallback", "(", "section_name", "=", "section", ",", "pref_name", "=", "name", ")", "raise", "NotFoundInRegistry", "(", "\"No such preference in {0} with section={1} and name={2}\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "section", ",", "name", ")", ")" ]
Returns a previously registered preference :param section: The section name under which the preference is registered :type section: str. :param name: The name of the preference. You can use dotted notation 'section.name' if you want to avoid providing section param :type name: str. :param fallback: Should we return a dummy preference object instead of raising an error if no preference is found? :type name: bool. :return: a :py:class:`prefs.BasePreference` instance
[ "Returns", "a", "previously", "registered", "preference" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/registries.py#L146-L175
17,173
EliotBerriot/django-dynamic-preferences
dynamic_preferences/registries.py
PreferenceRegistry.manager
def manager(self, **kwargs): """Return a preference manager that can be used to retrieve preference values""" return PreferencesManager(registry=self, model=self.preference_model, **kwargs)
python
def manager(self, **kwargs): """Return a preference manager that can be used to retrieve preference values""" return PreferencesManager(registry=self, model=self.preference_model, **kwargs)
[ "def", "manager", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "PreferencesManager", "(", "registry", "=", "self", ",", "model", "=", "self", ".", "preference_model", ",", "*", "*", "kwargs", ")" ]
Return a preference manager that can be used to retrieve preference values
[ "Return", "a", "preference", "manager", "that", "can", "be", "used", "to", "retrieve", "preference", "values" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/registries.py#L186-L188
17,174
EliotBerriot/django-dynamic-preferences
dynamic_preferences/registries.py
PreferenceRegistry.preferences
def preferences(self, section=None): """ Return a list of all registered preferences or a list of preferences registered for a given section :param section: The section name under which the preference is registered :type section: str. :return: a list of :py:class:`prefs.BasePreference` instances """ if section is None: return [self[section][name] for section in self for name in self[section]] else: return [self[section][name] for name in self[section]]
python
def preferences(self, section=None): """ Return a list of all registered preferences or a list of preferences registered for a given section :param section: The section name under which the preference is registered :type section: str. :return: a list of :py:class:`prefs.BasePreference` instances """ if section is None: return [self[section][name] for section in self for name in self[section]] else: return [self[section][name] for name in self[section]]
[ "def", "preferences", "(", "self", ",", "section", "=", "None", ")", ":", "if", "section", "is", "None", ":", "return", "[", "self", "[", "section", "]", "[", "name", "]", "for", "section", "in", "self", "for", "name", "in", "self", "[", "section", "]", "]", "else", ":", "return", "[", "self", "[", "section", "]", "[", "name", "]", "for", "name", "in", "self", "[", "section", "]", "]" ]
Return a list of all registered preferences or a list of preferences registered for a given section :param section: The section name under which the preference is registered :type section: str. :return: a list of :py:class:`prefs.BasePreference` instances
[ "Return", "a", "list", "of", "all", "registered", "preferences", "or", "a", "list", "of", "preferences", "registered", "for", "a", "given", "section" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/registries.py#L198-L211
17,175
EliotBerriot/django-dynamic-preferences
dynamic_preferences/api/viewsets.py
PreferenceViewSet.get_queryset
def get_queryset(self): """ We just ensure preferences are actually populated before fetching from db """ self.init_preferences() queryset = super(PreferenceViewSet, self).get_queryset() section = self.request.query_params.get('section') if section: queryset = queryset.filter(section=section) return queryset
python
def get_queryset(self): """ We just ensure preferences are actually populated before fetching from db """ self.init_preferences() queryset = super(PreferenceViewSet, self).get_queryset() section = self.request.query_params.get('section') if section: queryset = queryset.filter(section=section) return queryset
[ "def", "get_queryset", "(", "self", ")", ":", "self", ".", "init_preferences", "(", ")", "queryset", "=", "super", "(", "PreferenceViewSet", ",", "self", ")", ".", "get_queryset", "(", ")", "section", "=", "self", ".", "request", ".", "query_params", ".", "get", "(", "'section'", ")", "if", "section", ":", "queryset", "=", "queryset", ".", "filter", "(", "section", "=", "section", ")", "return", "queryset" ]
We just ensure preferences are actually populated before fetching from db
[ "We", "just", "ensure", "preferences", "are", "actually", "populated", "before", "fetching", "from", "db" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/api/viewsets.py#L30-L42
17,176
EliotBerriot/django-dynamic-preferences
dynamic_preferences/api/viewsets.py
PreferenceViewSet.bulk
def bulk(self, request, *args, **kwargs): """ Update multiple preferences at once this is a long method because we ensure everything is valid before actually persisting the changes """ manager = self.get_manager() errors = {} preferences = [] payload = request.data # first, we check updated preferences actually exists in the registry try: for identifier, value in payload.items(): try: preferences.append( self.queryset.model.registry.get(identifier)) except exceptions.NotFoundInRegistry: errors[identifier] = 'invalid preference' except (TypeError, AttributeError): return Response('invalid payload', status=400) if errors: return Response(errors, status=400) # now, we generate an optimized Q objects to retrieve all matching # preferences at once from database queries = [ Q(section=p.section.name, name=p.name) for p in preferences ] query = queries[0] for q in queries[1:]: query |= q preferences_qs = self.get_queryset().filter(query) # next, we generate a serializer for each database preference serializer_objects = [] for p in preferences_qs: s = self.get_serializer_class()( p, data={'value': payload[p.preference.identifier()]}) serializer_objects.append(s) validation_errors = {} # we check if any serializer is invalid for s in serializer_objects: if s.is_valid(): continue validation_errors[s.instance.preference.identifier()] = s.errors if validation_errors: return Response(validation_errors, status=400) for s in serializer_objects: s.save() return Response( [s.data for s in serializer_objects], status=200, )
python
def bulk(self, request, *args, **kwargs): """ Update multiple preferences at once this is a long method because we ensure everything is valid before actually persisting the changes """ manager = self.get_manager() errors = {} preferences = [] payload = request.data # first, we check updated preferences actually exists in the registry try: for identifier, value in payload.items(): try: preferences.append( self.queryset.model.registry.get(identifier)) except exceptions.NotFoundInRegistry: errors[identifier] = 'invalid preference' except (TypeError, AttributeError): return Response('invalid payload', status=400) if errors: return Response(errors, status=400) # now, we generate an optimized Q objects to retrieve all matching # preferences at once from database queries = [ Q(section=p.section.name, name=p.name) for p in preferences ] query = queries[0] for q in queries[1:]: query |= q preferences_qs = self.get_queryset().filter(query) # next, we generate a serializer for each database preference serializer_objects = [] for p in preferences_qs: s = self.get_serializer_class()( p, data={'value': payload[p.preference.identifier()]}) serializer_objects.append(s) validation_errors = {} # we check if any serializer is invalid for s in serializer_objects: if s.is_valid(): continue validation_errors[s.instance.preference.identifier()] = s.errors if validation_errors: return Response(validation_errors, status=400) for s in serializer_objects: s.save() return Response( [s.data for s in serializer_objects], status=200, )
[ "def", "bulk", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "manager", "=", "self", ".", "get_manager", "(", ")", "errors", "=", "{", "}", "preferences", "=", "[", "]", "payload", "=", "request", ".", "data", "# first, we check updated preferences actually exists in the registry", "try", ":", "for", "identifier", ",", "value", "in", "payload", ".", "items", "(", ")", ":", "try", ":", "preferences", ".", "append", "(", "self", ".", "queryset", ".", "model", ".", "registry", ".", "get", "(", "identifier", ")", ")", "except", "exceptions", ".", "NotFoundInRegistry", ":", "errors", "[", "identifier", "]", "=", "'invalid preference'", "except", "(", "TypeError", ",", "AttributeError", ")", ":", "return", "Response", "(", "'invalid payload'", ",", "status", "=", "400", ")", "if", "errors", ":", "return", "Response", "(", "errors", ",", "status", "=", "400", ")", "# now, we generate an optimized Q objects to retrieve all matching", "# preferences at once from database", "queries", "=", "[", "Q", "(", "section", "=", "p", ".", "section", ".", "name", ",", "name", "=", "p", ".", "name", ")", "for", "p", "in", "preferences", "]", "query", "=", "queries", "[", "0", "]", "for", "q", "in", "queries", "[", "1", ":", "]", ":", "query", "|=", "q", "preferences_qs", "=", "self", ".", "get_queryset", "(", ")", ".", "filter", "(", "query", ")", "# next, we generate a serializer for each database preference", "serializer_objects", "=", "[", "]", "for", "p", "in", "preferences_qs", ":", "s", "=", "self", ".", "get_serializer_class", "(", ")", "(", "p", ",", "data", "=", "{", "'value'", ":", "payload", "[", "p", ".", "preference", ".", "identifier", "(", ")", "]", "}", ")", "serializer_objects", ".", "append", "(", "s", ")", "validation_errors", "=", "{", "}", "# we check if any serializer is invalid", "for", "s", "in", "serializer_objects", ":", "if", "s", ".", "is_valid", "(", ")", ":", "continue", "validation_errors", "[", "s", ".", "instance", ".", "preference", ".", "identifier", "(", ")", "]", "=", "s", ".", "errors", "if", "validation_errors", ":", "return", "Response", "(", "validation_errors", ",", "status", "=", "400", ")", "for", "s", "in", "serializer_objects", ":", "s", ".", "save", "(", ")", "return", "Response", "(", "[", "s", ".", "data", "for", "s", "in", "serializer_objects", "]", ",", "status", "=", "200", ",", ")" ]
Update multiple preferences at once this is a long method because we ensure everything is valid before actually persisting the changes
[ "Update", "multiple", "preferences", "at", "once" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/api/viewsets.py#L82-L144
17,177
EliotBerriot/django-dynamic-preferences
dynamic_preferences/models.py
BasePreferenceModel.set_value
def set_value(self, value): """ Save serialized self.value to self.raw_value """ self.raw_value = self.preference.serializer.serialize(value)
python
def set_value(self, value): """ Save serialized self.value to self.raw_value """ self.raw_value = self.preference.serializer.serialize(value)
[ "def", "set_value", "(", "self", ",", "value", ")", ":", "self", ".", "raw_value", "=", "self", ".", "preference", ".", "serializer", ".", "serialize", "(", "value", ")" ]
Save serialized self.value to self.raw_value
[ "Save", "serialized", "self", ".", "value", "to", "self", ".", "raw_value" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/models.py#L49-L53
17,178
EliotBerriot/django-dynamic-preferences
dynamic_preferences/management/commands/checkpreferences.py
delete_preferences
def delete_preferences(queryset): """ Delete preferences objects if they are not present in registry. Return a list of deleted objects """ deleted = [] # Iterate through preferences. If an error is raised when accessing preference object, just delete it for p in queryset: try: pref = p.registry.get(section=p.section, name=p.name, fallback=False) except NotFoundInRegistry: p.delete() deleted.append(p) return deleted
python
def delete_preferences(queryset): """ Delete preferences objects if they are not present in registry. Return a list of deleted objects """ deleted = [] # Iterate through preferences. If an error is raised when accessing preference object, just delete it for p in queryset: try: pref = p.registry.get(section=p.section, name=p.name, fallback=False) except NotFoundInRegistry: p.delete() deleted.append(p) return deleted
[ "def", "delete_preferences", "(", "queryset", ")", ":", "deleted", "=", "[", "]", "# Iterate through preferences. If an error is raised when accessing preference object, just delete it", "for", "p", "in", "queryset", ":", "try", ":", "pref", "=", "p", ".", "registry", ".", "get", "(", "section", "=", "p", ".", "section", ",", "name", "=", "p", ".", "name", ",", "fallback", "=", "False", ")", "except", "NotFoundInRegistry", ":", "p", ".", "delete", "(", ")", "deleted", ".", "append", "(", "p", ")", "return", "deleted" ]
Delete preferences objects if they are not present in registry. Return a list of deleted objects
[ "Delete", "preferences", "objects", "if", "they", "are", "not", "present", "in", "registry", ".", "Return", "a", "list", "of", "deleted", "objects" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/management/commands/checkpreferences.py#L10-L24
17,179
EliotBerriot/django-dynamic-preferences
dynamic_preferences/types.py
create_deletion_handler
def create_deletion_handler(preference): """ Will generate a dynamic handler to purge related preference on instance deletion """ def delete_related_preferences(sender, instance, *args, **kwargs): queryset = preference.registry.preference_model.objects\ .filter(name=preference.name, section=preference.section) related_preferences = queryset.filter( raw_value=preference.serializer.serialize(instance)) related_preferences.delete() return delete_related_preferences
python
def create_deletion_handler(preference): """ Will generate a dynamic handler to purge related preference on instance deletion """ def delete_related_preferences(sender, instance, *args, **kwargs): queryset = preference.registry.preference_model.objects\ .filter(name=preference.name, section=preference.section) related_preferences = queryset.filter( raw_value=preference.serializer.serialize(instance)) related_preferences.delete() return delete_related_preferences
[ "def", "create_deletion_handler", "(", "preference", ")", ":", "def", "delete_related_preferences", "(", "sender", ",", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "queryset", "=", "preference", ".", "registry", ".", "preference_model", ".", "objects", ".", "filter", "(", "name", "=", "preference", ".", "name", ",", "section", "=", "preference", ".", "section", ")", "related_preferences", "=", "queryset", ".", "filter", "(", "raw_value", "=", "preference", ".", "serializer", ".", "serialize", "(", "instance", ")", ")", "related_preferences", ".", "delete", "(", ")", "return", "delete_related_preferences" ]
Will generate a dynamic handler to purge related preference on instance deletion
[ "Will", "generate", "a", "dynamic", "handler", "to", "purge", "related", "preference", "on", "instance", "deletion" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/types.py#L250-L262
17,180
EliotBerriot/django-dynamic-preferences
dynamic_preferences/types.py
BasePreferenceType.get_field_kwargs
def get_field_kwargs(self): """ Return a dict of arguments to use as parameters for the field class instianciation. This will use :py:attr:`field_kwargs` as a starter, and use sensible defaults for a few attributes: - :py:attr:`instance.verbose_name` for the field label - :py:attr:`instance.help_text` for the field help text - :py:attr:`instance.widget` for the field widget - :py:attr:`instance.required` defined if the value is required or not - :py:attr:`instance.initial` defined if the initial value """ kwargs = self.field_kwargs.copy() kwargs.setdefault('label', self.get('verbose_name')) kwargs.setdefault('help_text', self.get('help_text')) kwargs.setdefault('widget', self.get('widget')) kwargs.setdefault('required', self.get('required')) kwargs.setdefault('initial', self.initial) kwargs.setdefault('validators', []) kwargs['validators'].append(self.validate) return kwargs
python
def get_field_kwargs(self): """ Return a dict of arguments to use as parameters for the field class instianciation. This will use :py:attr:`field_kwargs` as a starter, and use sensible defaults for a few attributes: - :py:attr:`instance.verbose_name` for the field label - :py:attr:`instance.help_text` for the field help text - :py:attr:`instance.widget` for the field widget - :py:attr:`instance.required` defined if the value is required or not - :py:attr:`instance.initial` defined if the initial value """ kwargs = self.field_kwargs.copy() kwargs.setdefault('label', self.get('verbose_name')) kwargs.setdefault('help_text', self.get('help_text')) kwargs.setdefault('widget', self.get('widget')) kwargs.setdefault('required', self.get('required')) kwargs.setdefault('initial', self.initial) kwargs.setdefault('validators', []) kwargs['validators'].append(self.validate) return kwargs
[ "def", "get_field_kwargs", "(", "self", ")", ":", "kwargs", "=", "self", ".", "field_kwargs", ".", "copy", "(", ")", "kwargs", ".", "setdefault", "(", "'label'", ",", "self", ".", "get", "(", "'verbose_name'", ")", ")", "kwargs", ".", "setdefault", "(", "'help_text'", ",", "self", ".", "get", "(", "'help_text'", ")", ")", "kwargs", ".", "setdefault", "(", "'widget'", ",", "self", ".", "get", "(", "'widget'", ")", ")", "kwargs", ".", "setdefault", "(", "'required'", ",", "self", ".", "get", "(", "'required'", ")", ")", "kwargs", ".", "setdefault", "(", "'initial'", ",", "self", ".", "initial", ")", "kwargs", ".", "setdefault", "(", "'validators'", ",", "[", "]", ")", "kwargs", "[", "'validators'", "]", ".", "append", "(", "self", ".", "validate", ")", "return", "kwargs" ]
Return a dict of arguments to use as parameters for the field class instianciation. This will use :py:attr:`field_kwargs` as a starter, and use sensible defaults for a few attributes: - :py:attr:`instance.verbose_name` for the field label - :py:attr:`instance.help_text` for the field help text - :py:attr:`instance.widget` for the field widget - :py:attr:`instance.required` defined if the value is required or not - :py:attr:`instance.initial` defined if the initial value
[ "Return", "a", "dict", "of", "arguments", "to", "use", "as", "parameters", "for", "the", "field", "class", "instianciation", "." ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/types.py#L84-L106
17,181
EliotBerriot/django-dynamic-preferences
dynamic_preferences/types.py
BasePreferenceType.get_api_field_data
def get_api_field_data(self): """ Field data to serialize for use on front-end side, for example will include choices available for a choice field """ field = self.setup_field() d = { 'class': field.__class__.__name__, 'widget': { 'class': field.widget.__class__.__name__ } } try: d['input_type'] = field.widget.input_type except AttributeError: # some widgets, such as Select do not have an input type # in django < 1.11 d['input_type'] = None return d
python
def get_api_field_data(self): """ Field data to serialize for use on front-end side, for example will include choices available for a choice field """ field = self.setup_field() d = { 'class': field.__class__.__name__, 'widget': { 'class': field.widget.__class__.__name__ } } try: d['input_type'] = field.widget.input_type except AttributeError: # some widgets, such as Select do not have an input type # in django < 1.11 d['input_type'] = None return d
[ "def", "get_api_field_data", "(", "self", ")", ":", "field", "=", "self", ".", "setup_field", "(", ")", "d", "=", "{", "'class'", ":", "field", ".", "__class__", ".", "__name__", ",", "'widget'", ":", "{", "'class'", ":", "field", ".", "widget", ".", "__class__", ".", "__name__", "}", "}", "try", ":", "d", "[", "'input_type'", "]", "=", "field", ".", "widget", ".", "input_type", "except", "AttributeError", ":", "# some widgets, such as Select do not have an input type", "# in django < 1.11", "d", "[", "'input_type'", "]", "=", "None", "return", "d" ]
Field data to serialize for use on front-end side, for example will include choices available for a choice field
[ "Field", "data", "to", "serialize", "for", "use", "on", "front", "-", "end", "side", "for", "example", "will", "include", "choices", "available", "for", "a", "choice", "field" ]
12eab4f17b960290525b215d954d1b5fb91199df
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/types.py#L120-L140
17,182
Woile/commitizen
commitizen/factory.py
commiter_factory
def commiter_factory(config: dict) -> BaseCommitizen: """Return the correct commitizen existing in the registry.""" name: str = config["name"] try: _cz = registry[name](config) except KeyError: msg_error = ( "The commiter has not been found in the system.\n\n" f"Try running 'pip install {name}'\n" ) out.error(msg_error) raise SystemExit(NO_COMMITIZEN_FOUND) else: return _cz
python
def commiter_factory(config: dict) -> BaseCommitizen: """Return the correct commitizen existing in the registry.""" name: str = config["name"] try: _cz = registry[name](config) except KeyError: msg_error = ( "The commiter has not been found in the system.\n\n" f"Try running 'pip install {name}'\n" ) out.error(msg_error) raise SystemExit(NO_COMMITIZEN_FOUND) else: return _cz
[ "def", "commiter_factory", "(", "config", ":", "dict", ")", "->", "BaseCommitizen", ":", "name", ":", "str", "=", "config", "[", "\"name\"", "]", "try", ":", "_cz", "=", "registry", "[", "name", "]", "(", "config", ")", "except", "KeyError", ":", "msg_error", "=", "(", "\"The commiter has not been found in the system.\\n\\n\"", "f\"Try running 'pip install {name}'\\n\"", ")", "out", ".", "error", "(", "msg_error", ")", "raise", "SystemExit", "(", "NO_COMMITIZEN_FOUND", ")", "else", ":", "return", "_cz" ]
Return the correct commitizen existing in the registry.
[ "Return", "the", "correct", "commitizen", "existing", "in", "the", "registry", "." ]
bc54b9a4b6ad281620179a1ed417c01addde55f6
https://github.com/Woile/commitizen/blob/bc54b9a4b6ad281620179a1ed417c01addde55f6/commitizen/factory.py#L8-L21
17,183
Woile/commitizen
commitizen/bump.py
generate_version
def generate_version( current_version: str, increment: str, prerelease: Optional[str] = None ) -> Version: """Based on the given increment a proper semver will be generated. For now the rules and versioning scheme is based on python's PEP 0440. More info: https://www.python.org/dev/peps/pep-0440/ Example: PATCH 1.0.0 -> 1.0.1 MINOR 1.0.0 -> 1.1.0 MAJOR 1.0.0 -> 2.0.0 """ pre_version = prerelease_generator(current_version, prerelease=prerelease) semver = semver_generator(current_version, increment=increment) # TODO: post version # TODO: dev version return Version(f"{semver}{pre_version}")
python
def generate_version( current_version: str, increment: str, prerelease: Optional[str] = None ) -> Version: """Based on the given increment a proper semver will be generated. For now the rules and versioning scheme is based on python's PEP 0440. More info: https://www.python.org/dev/peps/pep-0440/ Example: PATCH 1.0.0 -> 1.0.1 MINOR 1.0.0 -> 1.1.0 MAJOR 1.0.0 -> 2.0.0 """ pre_version = prerelease_generator(current_version, prerelease=prerelease) semver = semver_generator(current_version, increment=increment) # TODO: post version # TODO: dev version return Version(f"{semver}{pre_version}")
[ "def", "generate_version", "(", "current_version", ":", "str", ",", "increment", ":", "str", ",", "prerelease", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Version", ":", "pre_version", "=", "prerelease_generator", "(", "current_version", ",", "prerelease", "=", "prerelease", ")", "semver", "=", "semver_generator", "(", "current_version", ",", "increment", "=", "increment", ")", "# TODO: post version", "# TODO: dev version", "return", "Version", "(", "f\"{semver}{pre_version}\"", ")" ]
Based on the given increment a proper semver will be generated. For now the rules and versioning scheme is based on python's PEP 0440. More info: https://www.python.org/dev/peps/pep-0440/ Example: PATCH 1.0.0 -> 1.0.1 MINOR 1.0.0 -> 1.1.0 MAJOR 1.0.0 -> 2.0.0
[ "Based", "on", "the", "given", "increment", "a", "proper", "semver", "will", "be", "generated", "." ]
bc54b9a4b6ad281620179a1ed417c01addde55f6
https://github.com/Woile/commitizen/blob/bc54b9a4b6ad281620179a1ed417c01addde55f6/commitizen/bump.py#L94-L112
17,184
Woile/commitizen
commitizen/bump.py
update_version_in_files
def update_version_in_files(current_version: str, new_version: str, files: list): """Change old version to the new one in every file given. Note that this version is not the tag formatted one. So for example, your tag could look like `v1.0.0` while your version in the package like `1.0.0`. """ for filepath in files: # Read in the file with open(filepath, "r") as file: filedata = file.read() # Replace the target string filedata = filedata.replace(current_version, new_version) # Write the file out again with open(filepath, "w") as file: file.write(filedata)
python
def update_version_in_files(current_version: str, new_version: str, files: list): """Change old version to the new one in every file given. Note that this version is not the tag formatted one. So for example, your tag could look like `v1.0.0` while your version in the package like `1.0.0`. """ for filepath in files: # Read in the file with open(filepath, "r") as file: filedata = file.read() # Replace the target string filedata = filedata.replace(current_version, new_version) # Write the file out again with open(filepath, "w") as file: file.write(filedata)
[ "def", "update_version_in_files", "(", "current_version", ":", "str", ",", "new_version", ":", "str", ",", "files", ":", "list", ")", ":", "for", "filepath", "in", "files", ":", "# Read in the file", "with", "open", "(", "filepath", ",", "\"r\"", ")", "as", "file", ":", "filedata", "=", "file", ".", "read", "(", ")", "# Replace the target string", "filedata", "=", "filedata", ".", "replace", "(", "current_version", ",", "new_version", ")", "# Write the file out again", "with", "open", "(", "filepath", ",", "\"w\"", ")", "as", "file", ":", "file", ".", "write", "(", "filedata", ")" ]
Change old version to the new one in every file given. Note that this version is not the tag formatted one. So for example, your tag could look like `v1.0.0` while your version in the package like `1.0.0`.
[ "Change", "old", "version", "to", "the", "new", "one", "in", "every", "file", "given", "." ]
bc54b9a4b6ad281620179a1ed417c01addde55f6
https://github.com/Woile/commitizen/blob/bc54b9a4b6ad281620179a1ed417c01addde55f6/commitizen/bump.py#L115-L132
17,185
Woile/commitizen
commitizen/bump.py
create_tag
def create_tag(version: Union[Version, str], tag_format: Optional[str] = None): """The tag and the software version might be different. That's why this function exists. Example: | tag | version (PEP 0440) | | --- | ------- | | v0.9.0 | 0.9.0 | | ver1.0.0 | 1.0.0 | | ver1.0.0.a0 | 1.0.0a0 | """ if isinstance(version, str): version = Version(version) if not tag_format: return version.public major, minor, patch = version.release prerelease = "" if version.is_prerelease: prerelease = f"{version.pre[0]}{version.pre[1]}" t = Template(tag_format) return t.safe_substitute( version=version, major=major, minor=minor, patch=patch, prerelease=prerelease )
python
def create_tag(version: Union[Version, str], tag_format: Optional[str] = None): """The tag and the software version might be different. That's why this function exists. Example: | tag | version (PEP 0440) | | --- | ------- | | v0.9.0 | 0.9.0 | | ver1.0.0 | 1.0.0 | | ver1.0.0.a0 | 1.0.0a0 | """ if isinstance(version, str): version = Version(version) if not tag_format: return version.public major, minor, patch = version.release prerelease = "" if version.is_prerelease: prerelease = f"{version.pre[0]}{version.pre[1]}" t = Template(tag_format) return t.safe_substitute( version=version, major=major, minor=minor, patch=patch, prerelease=prerelease )
[ "def", "create_tag", "(", "version", ":", "Union", "[", "Version", ",", "str", "]", ",", "tag_format", ":", "Optional", "[", "str", "]", "=", "None", ")", ":", "if", "isinstance", "(", "version", ",", "str", ")", ":", "version", "=", "Version", "(", "version", ")", "if", "not", "tag_format", ":", "return", "version", ".", "public", "major", ",", "minor", ",", "patch", "=", "version", ".", "release", "prerelease", "=", "\"\"", "if", "version", ".", "is_prerelease", ":", "prerelease", "=", "f\"{version.pre[0]}{version.pre[1]}\"", "t", "=", "Template", "(", "tag_format", ")", "return", "t", ".", "safe_substitute", "(", "version", "=", "version", ",", "major", "=", "major", ",", "minor", "=", "minor", ",", "patch", "=", "patch", ",", "prerelease", "=", "prerelease", ")" ]
The tag and the software version might be different. That's why this function exists. Example: | tag | version (PEP 0440) | | --- | ------- | | v0.9.0 | 0.9.0 | | ver1.0.0 | 1.0.0 | | ver1.0.0.a0 | 1.0.0a0 |
[ "The", "tag", "and", "the", "software", "version", "might", "be", "different", "." ]
bc54b9a4b6ad281620179a1ed417c01addde55f6
https://github.com/Woile/commitizen/blob/bc54b9a4b6ad281620179a1ed417c01addde55f6/commitizen/bump.py#L135-L163
17,186
Woile/commitizen
commitizen/config.py
read_pyproject_conf
def read_pyproject_conf(data: str) -> dict: """We expect to have a section in pyproject looking like ``` [tool.commitizen] name = "cz_conventional_commits" ``` """ doc = parse(data) try: return doc["tool"]["commitizen"] except exceptions.NonExistentKey: return {}
python
def read_pyproject_conf(data: str) -> dict: """We expect to have a section in pyproject looking like ``` [tool.commitizen] name = "cz_conventional_commits" ``` """ doc = parse(data) try: return doc["tool"]["commitizen"] except exceptions.NonExistentKey: return {}
[ "def", "read_pyproject_conf", "(", "data", ":", "str", ")", "->", "dict", ":", "doc", "=", "parse", "(", "data", ")", "try", ":", "return", "doc", "[", "\"tool\"", "]", "[", "\"commitizen\"", "]", "except", "exceptions", ".", "NonExistentKey", ":", "return", "{", "}" ]
We expect to have a section in pyproject looking like ``` [tool.commitizen] name = "cz_conventional_commits" ```
[ "We", "expect", "to", "have", "a", "section", "in", "pyproject", "looking", "like" ]
bc54b9a4b6ad281620179a1ed417c01addde55f6
https://github.com/Woile/commitizen/blob/bc54b9a4b6ad281620179a1ed417c01addde55f6/commitizen/config.py#L39-L51
17,187
Woile/commitizen
commitizen/config.py
read_raw_parser_conf
def read_raw_parser_conf(data: str) -> dict: """We expect to have a section like this ``` [commitizen] name = cz_jira files = [ "commitizen/__version__.py", "pyproject.toml" ] # this tab at the end is important ``` """ config = configparser.ConfigParser(allow_no_value=True) config.read_string(data) try: _data: dict = dict(config["commitizen"]) if "files" in _data: files = _data["files"] _f = json.loads(files) _data.update({"files": _f}) return _data except KeyError: return {}
python
def read_raw_parser_conf(data: str) -> dict: """We expect to have a section like this ``` [commitizen] name = cz_jira files = [ "commitizen/__version__.py", "pyproject.toml" ] # this tab at the end is important ``` """ config = configparser.ConfigParser(allow_no_value=True) config.read_string(data) try: _data: dict = dict(config["commitizen"]) if "files" in _data: files = _data["files"] _f = json.loads(files) _data.update({"files": _f}) return _data except KeyError: return {}
[ "def", "read_raw_parser_conf", "(", "data", ":", "str", ")", "->", "dict", ":", "config", "=", "configparser", ".", "ConfigParser", "(", "allow_no_value", "=", "True", ")", "config", ".", "read_string", "(", "data", ")", "try", ":", "_data", ":", "dict", "=", "dict", "(", "config", "[", "\"commitizen\"", "]", ")", "if", "\"files\"", "in", "_data", ":", "files", "=", "_data", "[", "\"files\"", "]", "_f", "=", "json", ".", "loads", "(", "files", ")", "_data", ".", "update", "(", "{", "\"files\"", ":", "_f", "}", ")", "return", "_data", "except", "KeyError", ":", "return", "{", "}" ]
We expect to have a section like this ``` [commitizen] name = cz_jira files = [ "commitizen/__version__.py", "pyproject.toml" ] # this tab at the end is important ```
[ "We", "expect", "to", "have", "a", "section", "like", "this" ]
bc54b9a4b6ad281620179a1ed417c01addde55f6
https://github.com/Woile/commitizen/blob/bc54b9a4b6ad281620179a1ed417c01addde55f6/commitizen/config.py#L54-L78
17,188
Woile/commitizen
commitizen/config.py
set_key
def set_key(key: str, value: str) -> dict: """Set or update a key in the conf. For now only strings are supported. We use to update the version number. """ if not _conf.path: return {} if "toml" in _conf.path: with open(_conf.path, "r") as f: parser = parse(f.read()) parser["tool"]["commitizen"][key] = value with open(_conf.path, "w") as f: f.write(parser.as_string()) else: parser = configparser.ConfigParser() parser.read(_conf.path) parser["commitizen"][key] = value with open(_conf.path, "w") as f: parser.write(f) return _conf.config
python
def set_key(key: str, value: str) -> dict: """Set or update a key in the conf. For now only strings are supported. We use to update the version number. """ if not _conf.path: return {} if "toml" in _conf.path: with open(_conf.path, "r") as f: parser = parse(f.read()) parser["tool"]["commitizen"][key] = value with open(_conf.path, "w") as f: f.write(parser.as_string()) else: parser = configparser.ConfigParser() parser.read(_conf.path) parser["commitizen"][key] = value with open(_conf.path, "w") as f: parser.write(f) return _conf.config
[ "def", "set_key", "(", "key", ":", "str", ",", "value", ":", "str", ")", "->", "dict", ":", "if", "not", "_conf", ".", "path", ":", "return", "{", "}", "if", "\"toml\"", "in", "_conf", ".", "path", ":", "with", "open", "(", "_conf", ".", "path", ",", "\"r\"", ")", "as", "f", ":", "parser", "=", "parse", "(", "f", ".", "read", "(", ")", ")", "parser", "[", "\"tool\"", "]", "[", "\"commitizen\"", "]", "[", "key", "]", "=", "value", "with", "open", "(", "_conf", ".", "path", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "parser", ".", "as_string", "(", ")", ")", "else", ":", "parser", "=", "configparser", ".", "ConfigParser", "(", ")", "parser", ".", "read", "(", "_conf", ".", "path", ")", "parser", "[", "\"commitizen\"", "]", "[", "key", "]", "=", "value", "with", "open", "(", "_conf", ".", "path", ",", "\"w\"", ")", "as", "f", ":", "parser", ".", "write", "(", "f", ")", "return", "_conf", ".", "config" ]
Set or update a key in the conf. For now only strings are supported. We use to update the version number.
[ "Set", "or", "update", "a", "key", "in", "the", "conf", "." ]
bc54b9a4b6ad281620179a1ed417c01addde55f6
https://github.com/Woile/commitizen/blob/bc54b9a4b6ad281620179a1ed417c01addde55f6/commitizen/config.py#L130-L152
17,189
pysathq/pysat
pysat/_fileio.py
FileObject.close
def close(self): """ Close a file pointer. """ if self.fp: self.fp.close() self.fp = None if self.fp_extra: self.fp_extra.close() self.fp_extra = None self.ctype = None
python
def close(self): """ Close a file pointer. """ if self.fp: self.fp.close() self.fp = None if self.fp_extra: self.fp_extra.close() self.fp_extra = None self.ctype = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "fp", ":", "self", ".", "fp", ".", "close", "(", ")", "self", ".", "fp", "=", "None", "if", "self", ".", "fp_extra", ":", "self", ".", "fp_extra", ".", "close", "(", ")", "self", ".", "fp_extra", "=", "None", "self", ".", "ctype", "=", "None" ]
Close a file pointer.
[ "Close", "a", "file", "pointer", "." ]
522742e8f2d4c6ac50ecd9087f7a346206774c67
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/_fileio.py#L147-L160
17,190
pysathq/pysat
pysat/_fileio.py
FileObject.get_compression_type
def get_compression_type(self, file_name): """ Determine compression type for a given file using its extension. :param file_name: a given file name :type file_name: str """ ext = os.path.splitext(file_name)[1] if ext == '.gz': self.ctype = 'gzip' elif ext == '.bz2': self.ctype = 'bzip2' elif ext in ('.xz', '.lzma'): self.ctype = 'lzma' else: self.ctype = None
python
def get_compression_type(self, file_name): """ Determine compression type for a given file using its extension. :param file_name: a given file name :type file_name: str """ ext = os.path.splitext(file_name)[1] if ext == '.gz': self.ctype = 'gzip' elif ext == '.bz2': self.ctype = 'bzip2' elif ext in ('.xz', '.lzma'): self.ctype = 'lzma' else: self.ctype = None
[ "def", "get_compression_type", "(", "self", ",", "file_name", ")", ":", "ext", "=", "os", ".", "path", ".", "splitext", "(", "file_name", ")", "[", "1", "]", "if", "ext", "==", "'.gz'", ":", "self", ".", "ctype", "=", "'gzip'", "elif", "ext", "==", "'.bz2'", ":", "self", ".", "ctype", "=", "'bzip2'", "elif", "ext", "in", "(", "'.xz'", ",", "'.lzma'", ")", ":", "self", ".", "ctype", "=", "'lzma'", "else", ":", "self", ".", "ctype", "=", "None" ]
Determine compression type for a given file using its extension. :param file_name: a given file name :type file_name: str
[ "Determine", "compression", "type", "for", "a", "given", "file", "using", "its", "extension", "." ]
522742e8f2d4c6ac50ecd9087f7a346206774c67
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/_fileio.py#L162-L179
17,191
pysathq/pysat
solvers/prepare.py
do
def do(to_install): """ Prepare all solvers specified in the command line. """ for solver in to_install: print('preparing {0}'.format(solver)) download_archive(sources[solver]) extract_archive(sources[solver][-1], solver) adapt_files(solver) patch_solver(solver) compile_solver(solver)
python
def do(to_install): """ Prepare all solvers specified in the command line. """ for solver in to_install: print('preparing {0}'.format(solver)) download_archive(sources[solver]) extract_archive(sources[solver][-1], solver) adapt_files(solver) patch_solver(solver) compile_solver(solver)
[ "def", "do", "(", "to_install", ")", ":", "for", "solver", "in", "to_install", ":", "print", "(", "'preparing {0}'", ".", "format", "(", "solver", ")", ")", "download_archive", "(", "sources", "[", "solver", "]", ")", "extract_archive", "(", "sources", "[", "solver", "]", "[", "-", "1", "]", ",", "solver", ")", "adapt_files", "(", "solver", ")", "patch_solver", "(", "solver", ")", "compile_solver", "(", "solver", ")" ]
Prepare all solvers specified in the command line.
[ "Prepare", "all", "solvers", "specified", "in", "the", "command", "line", "." ]
522742e8f2d4c6ac50ecd9087f7a346206774c67
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/solvers/prepare.py#L272-L284
17,192
pysathq/pysat
solvers/prepare.py
adapt_files
def adapt_files(solver): """ Rename and remove files whenever necessary. """ print("adapting {0}'s files".format(solver)) root = os.path.join('solvers', solver) for arch in to_extract[solver]: arch = os.path.join(root, arch) extract_archive(arch, solver, put_inside=True) for fnames in to_move[solver]: old = os.path.join(root, fnames[0]) new = os.path.join(root, fnames[1]) os.rename(old, new) for f in to_remove[solver]: f = os.path.join(root, f) if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f)
python
def adapt_files(solver): """ Rename and remove files whenever necessary. """ print("adapting {0}'s files".format(solver)) root = os.path.join('solvers', solver) for arch in to_extract[solver]: arch = os.path.join(root, arch) extract_archive(arch, solver, put_inside=True) for fnames in to_move[solver]: old = os.path.join(root, fnames[0]) new = os.path.join(root, fnames[1]) os.rename(old, new) for f in to_remove[solver]: f = os.path.join(root, f) if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f)
[ "def", "adapt_files", "(", "solver", ")", ":", "print", "(", "\"adapting {0}'s files\"", ".", "format", "(", "solver", ")", ")", "root", "=", "os", ".", "path", ".", "join", "(", "'solvers'", ",", "solver", ")", "for", "arch", "in", "to_extract", "[", "solver", "]", ":", "arch", "=", "os", ".", "path", ".", "join", "(", "root", ",", "arch", ")", "extract_archive", "(", "arch", ",", "solver", ",", "put_inside", "=", "True", ")", "for", "fnames", "in", "to_move", "[", "solver", "]", ":", "old", "=", "os", ".", "path", ".", "join", "(", "root", ",", "fnames", "[", "0", "]", ")", "new", "=", "os", ".", "path", ".", "join", "(", "root", ",", "fnames", "[", "1", "]", ")", "os", ".", "rename", "(", "old", ",", "new", ")", "for", "f", "in", "to_remove", "[", "solver", "]", ":", "f", "=", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", "if", "os", ".", "path", ".", "isdir", "(", "f", ")", ":", "shutil", ".", "rmtree", "(", "f", ")", "else", ":", "os", ".", "remove", "(", "f", ")" ]
Rename and remove files whenever necessary.
[ "Rename", "and", "remove", "files", "whenever", "necessary", "." ]
522742e8f2d4c6ac50ecd9087f7a346206774c67
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/solvers/prepare.py#L380-L402
17,193
pysathq/pysat
examples/mcsls.py
MCSls._map_extlit
def _map_extlit(self, l): """ Map an external variable to an internal one if necessary. This method is used when new clauses are added to the formula incrementally, which may result in introducing new variables clashing with the previously used *clause selectors*. The method makes sure no clash occurs, i.e. it maps the original variables used in the new problem clauses to the newly introduced auxiliary variables (see :func:`add_clause`). Given an integer literal, a fresh literal is returned. The returned integer has the same sign as the input literal. :param l: literal to map :type l: int :rtype: int """ v = abs(l) if v in self.vmap.e2i: return int(copysign(self.vmap.e2i[v], l)) else: self.topv += 1 self.vmap.e2i[v] = self.topv self.vmap.i2e[self.topv] = v return int(copysign(self.topv, l))
python
def _map_extlit(self, l): """ Map an external variable to an internal one if necessary. This method is used when new clauses are added to the formula incrementally, which may result in introducing new variables clashing with the previously used *clause selectors*. The method makes sure no clash occurs, i.e. it maps the original variables used in the new problem clauses to the newly introduced auxiliary variables (see :func:`add_clause`). Given an integer literal, a fresh literal is returned. The returned integer has the same sign as the input literal. :param l: literal to map :type l: int :rtype: int """ v = abs(l) if v in self.vmap.e2i: return int(copysign(self.vmap.e2i[v], l)) else: self.topv += 1 self.vmap.e2i[v] = self.topv self.vmap.i2e[self.topv] = v return int(copysign(self.topv, l))
[ "def", "_map_extlit", "(", "self", ",", "l", ")", ":", "v", "=", "abs", "(", "l", ")", "if", "v", "in", "self", ".", "vmap", ".", "e2i", ":", "return", "int", "(", "copysign", "(", "self", ".", "vmap", ".", "e2i", "[", "v", "]", ",", "l", ")", ")", "else", ":", "self", ".", "topv", "+=", "1", "self", ".", "vmap", ".", "e2i", "[", "v", "]", "=", "self", ".", "topv", "self", ".", "vmap", ".", "i2e", "[", "self", ".", "topv", "]", "=", "v", "return", "int", "(", "copysign", "(", "self", ".", "topv", ",", "l", ")", ")" ]
Map an external variable to an internal one if necessary. This method is used when new clauses are added to the formula incrementally, which may result in introducing new variables clashing with the previously used *clause selectors*. The method makes sure no clash occurs, i.e. it maps the original variables used in the new problem clauses to the newly introduced auxiliary variables (see :func:`add_clause`). Given an integer literal, a fresh literal is returned. The returned integer has the same sign as the input literal. :param l: literal to map :type l: int :rtype: int
[ "Map", "an", "external", "variable", "to", "an", "internal", "one", "if", "necessary", "." ]
522742e8f2d4c6ac50ecd9087f7a346206774c67
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/examples/mcsls.py#L409-L439
17,194
pysathq/pysat
examples/hitman.py
Hitman.init
def init(self, bootstrap_with): """ This method serves for initializing the hitting set solver with a given list of sets to hit. Concretely, the hitting set problem is encoded into partial MaxSAT as outlined above, which is then fed either to a MaxSAT solver or an MCS enumerator. :param bootstrap_with: input set of sets to hit :type bootstrap_with: iterable(iterable(obj)) """ # formula encoding the sets to hit formula = WCNF() # hard clauses for to_hit in bootstrap_with: to_hit = list(map(lambda obj: self.idpool.id(obj), to_hit)) formula.append(to_hit) # soft clauses for obj_id in six.iterkeys(self.idpool.id2obj): formula.append([-obj_id], weight=1) if self.htype == 'rc2': # using the RC2-A options from MaxSAT evaluation 2018 self.oracle = RC2(formula, solver=self.solver, adapt=False, exhaust=True, trim=5) elif self.htype == 'lbx': self.oracle = LBX(formula, solver_name=self.solver, use_cld=True) else: self.oracle = MCSls(formula, solver_name=self.solver, use_cld=True)
python
def init(self, bootstrap_with): """ This method serves for initializing the hitting set solver with a given list of sets to hit. Concretely, the hitting set problem is encoded into partial MaxSAT as outlined above, which is then fed either to a MaxSAT solver or an MCS enumerator. :param bootstrap_with: input set of sets to hit :type bootstrap_with: iterable(iterable(obj)) """ # formula encoding the sets to hit formula = WCNF() # hard clauses for to_hit in bootstrap_with: to_hit = list(map(lambda obj: self.idpool.id(obj), to_hit)) formula.append(to_hit) # soft clauses for obj_id in six.iterkeys(self.idpool.id2obj): formula.append([-obj_id], weight=1) if self.htype == 'rc2': # using the RC2-A options from MaxSAT evaluation 2018 self.oracle = RC2(formula, solver=self.solver, adapt=False, exhaust=True, trim=5) elif self.htype == 'lbx': self.oracle = LBX(formula, solver_name=self.solver, use_cld=True) else: self.oracle = MCSls(formula, solver_name=self.solver, use_cld=True)
[ "def", "init", "(", "self", ",", "bootstrap_with", ")", ":", "# formula encoding the sets to hit", "formula", "=", "WCNF", "(", ")", "# hard clauses", "for", "to_hit", "in", "bootstrap_with", ":", "to_hit", "=", "list", "(", "map", "(", "lambda", "obj", ":", "self", ".", "idpool", ".", "id", "(", "obj", ")", ",", "to_hit", ")", ")", "formula", ".", "append", "(", "to_hit", ")", "# soft clauses", "for", "obj_id", "in", "six", ".", "iterkeys", "(", "self", ".", "idpool", ".", "id2obj", ")", ":", "formula", ".", "append", "(", "[", "-", "obj_id", "]", ",", "weight", "=", "1", ")", "if", "self", ".", "htype", "==", "'rc2'", ":", "# using the RC2-A options from MaxSAT evaluation 2018", "self", ".", "oracle", "=", "RC2", "(", "formula", ",", "solver", "=", "self", ".", "solver", ",", "adapt", "=", "False", ",", "exhaust", "=", "True", ",", "trim", "=", "5", ")", "elif", "self", ".", "htype", "==", "'lbx'", ":", "self", ".", "oracle", "=", "LBX", "(", "formula", ",", "solver_name", "=", "self", ".", "solver", ",", "use_cld", "=", "True", ")", "else", ":", "self", ".", "oracle", "=", "MCSls", "(", "formula", ",", "solver_name", "=", "self", ".", "solver", ",", "use_cld", "=", "True", ")" ]
This method serves for initializing the hitting set solver with a given list of sets to hit. Concretely, the hitting set problem is encoded into partial MaxSAT as outlined above, which is then fed either to a MaxSAT solver or an MCS enumerator. :param bootstrap_with: input set of sets to hit :type bootstrap_with: iterable(iterable(obj))
[ "This", "method", "serves", "for", "initializing", "the", "hitting", "set", "solver", "with", "a", "given", "list", "of", "sets", "to", "hit", ".", "Concretely", "the", "hitting", "set", "problem", "is", "encoded", "into", "partial", "MaxSAT", "as", "outlined", "above", "which", "is", "then", "fed", "either", "to", "a", "MaxSAT", "solver", "or", "an", "MCS", "enumerator", "." ]
522742e8f2d4c6ac50ecd9087f7a346206774c67
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/examples/hitman.py#L254-L285
17,195
pysathq/pysat
examples/hitman.py
Hitman.get
def get(self): """ This method computes and returns a hitting set. The hitting set is obtained using the underlying oracle operating the MaxSAT problem formulation. The computed solution is mapped back to objects of the problem domain. :rtype: list(obj) """ model = self.oracle.compute() if model: if self.htype == 'rc2': # extracting a hitting set self.hset = filter(lambda v: v > 0, model) else: self.hset = model return list(map(lambda vid: self.idpool.id2obj[vid], self.hset))
python
def get(self): """ This method computes and returns a hitting set. The hitting set is obtained using the underlying oracle operating the MaxSAT problem formulation. The computed solution is mapped back to objects of the problem domain. :rtype: list(obj) """ model = self.oracle.compute() if model: if self.htype == 'rc2': # extracting a hitting set self.hset = filter(lambda v: v > 0, model) else: self.hset = model return list(map(lambda vid: self.idpool.id2obj[vid], self.hset))
[ "def", "get", "(", "self", ")", ":", "model", "=", "self", ".", "oracle", ".", "compute", "(", ")", "if", "model", ":", "if", "self", ".", "htype", "==", "'rc2'", ":", "# extracting a hitting set", "self", ".", "hset", "=", "filter", "(", "lambda", "v", ":", "v", ">", "0", ",", "model", ")", "else", ":", "self", ".", "hset", "=", "model", "return", "list", "(", "map", "(", "lambda", "vid", ":", "self", ".", "idpool", ".", "id2obj", "[", "vid", "]", ",", "self", ".", "hset", ")", ")" ]
This method computes and returns a hitting set. The hitting set is obtained using the underlying oracle operating the MaxSAT problem formulation. The computed solution is mapped back to objects of the problem domain. :rtype: list(obj)
[ "This", "method", "computes", "and", "returns", "a", "hitting", "set", ".", "The", "hitting", "set", "is", "obtained", "using", "the", "underlying", "oracle", "operating", "the", "MaxSAT", "problem", "formulation", ".", "The", "computed", "solution", "is", "mapped", "back", "to", "objects", "of", "the", "problem", "domain", "." ]
522742e8f2d4c6ac50ecd9087f7a346206774c67
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/examples/hitman.py#L296-L315
17,196
pysathq/pysat
examples/hitman.py
Hitman.hit
def hit(self, to_hit): """ This method adds a new set to hit to the hitting set solver. This is done by translating the input iterable of objects into a list of Boolean variables in the MaxSAT problem formulation. :param to_hit: a new set to hit :type to_hit: iterable(obj) """ # translating objects to variables to_hit = list(map(lambda obj: self.idpool.id(obj), to_hit)) # a soft clause should be added for each new object new_obj = list(filter(lambda vid: vid not in self.oracle.vmap.e2i, to_hit)) # new hard clause self.oracle.add_clause(to_hit) # new soft clauses for vid in new_obj: self.oracle.add_clause([-vid], 1)
python
def hit(self, to_hit): """ This method adds a new set to hit to the hitting set solver. This is done by translating the input iterable of objects into a list of Boolean variables in the MaxSAT problem formulation. :param to_hit: a new set to hit :type to_hit: iterable(obj) """ # translating objects to variables to_hit = list(map(lambda obj: self.idpool.id(obj), to_hit)) # a soft clause should be added for each new object new_obj = list(filter(lambda vid: vid not in self.oracle.vmap.e2i, to_hit)) # new hard clause self.oracle.add_clause(to_hit) # new soft clauses for vid in new_obj: self.oracle.add_clause([-vid], 1)
[ "def", "hit", "(", "self", ",", "to_hit", ")", ":", "# translating objects to variables", "to_hit", "=", "list", "(", "map", "(", "lambda", "obj", ":", "self", ".", "idpool", ".", "id", "(", "obj", ")", ",", "to_hit", ")", ")", "# a soft clause should be added for each new object", "new_obj", "=", "list", "(", "filter", "(", "lambda", "vid", ":", "vid", "not", "in", "self", ".", "oracle", ".", "vmap", ".", "e2i", ",", "to_hit", ")", ")", "# new hard clause", "self", ".", "oracle", ".", "add_clause", "(", "to_hit", ")", "# new soft clauses", "for", "vid", "in", "new_obj", ":", "self", ".", "oracle", ".", "add_clause", "(", "[", "-", "vid", "]", ",", "1", ")" ]
This method adds a new set to hit to the hitting set solver. This is done by translating the input iterable of objects into a list of Boolean variables in the MaxSAT problem formulation. :param to_hit: a new set to hit :type to_hit: iterable(obj)
[ "This", "method", "adds", "a", "new", "set", "to", "hit", "to", "the", "hitting", "set", "solver", ".", "This", "is", "done", "by", "translating", "the", "input", "iterable", "of", "objects", "into", "a", "list", "of", "Boolean", "variables", "in", "the", "MaxSAT", "problem", "formulation", "." ]
522742e8f2d4c6ac50ecd9087f7a346206774c67
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/examples/hitman.py#L317-L338
17,197
pysathq/pysat
examples/hitman.py
Hitman.block
def block(self, to_block): """ The method serves for imposing a constraint forbidding the hitting set solver to compute a given hitting set. Each set to block is encoded as a hard clause in the MaxSAT problem formulation, which is then added to the underlying oracle. :param to_block: a set to block :type to_block: iterable(obj) """ # translating objects to variables to_block = list(map(lambda obj: self.idpool.id(obj), to_block)) # a soft clause should be added for each new object new_obj = list(filter(lambda vid: vid not in self.oracle.vmap.e2i, to_block)) # new hard clause self.oracle.add_clause([-vid for vid in to_block]) # new soft clauses for vid in new_obj: self.oracle.add_clause([-vid], 1)
python
def block(self, to_block): """ The method serves for imposing a constraint forbidding the hitting set solver to compute a given hitting set. Each set to block is encoded as a hard clause in the MaxSAT problem formulation, which is then added to the underlying oracle. :param to_block: a set to block :type to_block: iterable(obj) """ # translating objects to variables to_block = list(map(lambda obj: self.idpool.id(obj), to_block)) # a soft clause should be added for each new object new_obj = list(filter(lambda vid: vid not in self.oracle.vmap.e2i, to_block)) # new hard clause self.oracle.add_clause([-vid for vid in to_block]) # new soft clauses for vid in new_obj: self.oracle.add_clause([-vid], 1)
[ "def", "block", "(", "self", ",", "to_block", ")", ":", "# translating objects to variables", "to_block", "=", "list", "(", "map", "(", "lambda", "obj", ":", "self", ".", "idpool", ".", "id", "(", "obj", ")", ",", "to_block", ")", ")", "# a soft clause should be added for each new object", "new_obj", "=", "list", "(", "filter", "(", "lambda", "vid", ":", "vid", "not", "in", "self", ".", "oracle", ".", "vmap", ".", "e2i", ",", "to_block", ")", ")", "# new hard clause", "self", ".", "oracle", ".", "add_clause", "(", "[", "-", "vid", "for", "vid", "in", "to_block", "]", ")", "# new soft clauses", "for", "vid", "in", "new_obj", ":", "self", ".", "oracle", ".", "add_clause", "(", "[", "-", "vid", "]", ",", "1", ")" ]
The method serves for imposing a constraint forbidding the hitting set solver to compute a given hitting set. Each set to block is encoded as a hard clause in the MaxSAT problem formulation, which is then added to the underlying oracle. :param to_block: a set to block :type to_block: iterable(obj)
[ "The", "method", "serves", "for", "imposing", "a", "constraint", "forbidding", "the", "hitting", "set", "solver", "to", "compute", "a", "given", "hitting", "set", ".", "Each", "set", "to", "block", "is", "encoded", "as", "a", "hard", "clause", "in", "the", "MaxSAT", "problem", "formulation", "which", "is", "then", "added", "to", "the", "underlying", "oracle", "." ]
522742e8f2d4c6ac50ecd9087f7a346206774c67
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/examples/hitman.py#L340-L362
17,198
pysathq/pysat
examples/musx.py
MUSX._compute
def _compute(self, approx): """ Deletion-based MUS extraction. Given an over-approximation of an MUS, i.e. an unsatisfiable core previously returned by a SAT oracle, the method represents a loop, which at each iteration removes a clause from the core and checks whether the remaining clauses of the approximation are unsatisfiable together with the hard clauses. Soft clauses are (de)activated using the standard MiniSat-like assumptions interface [2]_. Each soft clause :math:`c` is augmented with a selector literal :math:`s`, e.g. :math:`(c) \gets (c \\vee \\neg{s})`. As a result, clause :math:`c` can be activated by assuming literal :math:`s`. The over-approximation provided as an input is specified as a list of selector literals for clauses in the unsatisfiable core. .. [2] Niklas Eén, Niklas Sörensson. *Temporal induction by incremental SAT solving*. Electr. Notes Theor. Comput. Sci. 89(4). 2003. pp. 543-560 :param approx: an over-approximation of an MUS :type approx: list(int) Note that the method does not return. Instead, after its execution, the input over-approximation is refined and contains an MUS. """ i = 0 while i < len(approx): to_test = approx[:i] + approx[(i + 1):] sel, clid = approx[i], self.vmap[approx[i]] if self.verbose > 1: print('c testing clid: {0}'.format(clid), end='') if self.oracle.solve(assumptions=to_test): if self.verbose > 1: print(' -> sat (keeping {0})'.format(clid)) i += 1 else: if self.verbose > 1: print(' -> unsat (removing {0})'.format(clid)) approx = to_test
python
def _compute(self, approx): """ Deletion-based MUS extraction. Given an over-approximation of an MUS, i.e. an unsatisfiable core previously returned by a SAT oracle, the method represents a loop, which at each iteration removes a clause from the core and checks whether the remaining clauses of the approximation are unsatisfiable together with the hard clauses. Soft clauses are (de)activated using the standard MiniSat-like assumptions interface [2]_. Each soft clause :math:`c` is augmented with a selector literal :math:`s`, e.g. :math:`(c) \gets (c \\vee \\neg{s})`. As a result, clause :math:`c` can be activated by assuming literal :math:`s`. The over-approximation provided as an input is specified as a list of selector literals for clauses in the unsatisfiable core. .. [2] Niklas Eén, Niklas Sörensson. *Temporal induction by incremental SAT solving*. Electr. Notes Theor. Comput. Sci. 89(4). 2003. pp. 543-560 :param approx: an over-approximation of an MUS :type approx: list(int) Note that the method does not return. Instead, after its execution, the input over-approximation is refined and contains an MUS. """ i = 0 while i < len(approx): to_test = approx[:i] + approx[(i + 1):] sel, clid = approx[i], self.vmap[approx[i]] if self.verbose > 1: print('c testing clid: {0}'.format(clid), end='') if self.oracle.solve(assumptions=to_test): if self.verbose > 1: print(' -> sat (keeping {0})'.format(clid)) i += 1 else: if self.verbose > 1: print(' -> unsat (removing {0})'.format(clid)) approx = to_test
[ "def", "_compute", "(", "self", ",", "approx", ")", ":", "i", "=", "0", "while", "i", "<", "len", "(", "approx", ")", ":", "to_test", "=", "approx", "[", ":", "i", "]", "+", "approx", "[", "(", "i", "+", "1", ")", ":", "]", "sel", ",", "clid", "=", "approx", "[", "i", "]", ",", "self", ".", "vmap", "[", "approx", "[", "i", "]", "]", "if", "self", ".", "verbose", ">", "1", ":", "print", "(", "'c testing clid: {0}'", ".", "format", "(", "clid", ")", ",", "end", "=", "''", ")", "if", "self", ".", "oracle", ".", "solve", "(", "assumptions", "=", "to_test", ")", ":", "if", "self", ".", "verbose", ">", "1", ":", "print", "(", "' -> sat (keeping {0})'", ".", "format", "(", "clid", ")", ")", "i", "+=", "1", "else", ":", "if", "self", ".", "verbose", ">", "1", ":", "print", "(", "' -> unsat (removing {0})'", ".", "format", "(", "clid", ")", ")", "approx", "=", "to_test" ]
Deletion-based MUS extraction. Given an over-approximation of an MUS, i.e. an unsatisfiable core previously returned by a SAT oracle, the method represents a loop, which at each iteration removes a clause from the core and checks whether the remaining clauses of the approximation are unsatisfiable together with the hard clauses. Soft clauses are (de)activated using the standard MiniSat-like assumptions interface [2]_. Each soft clause :math:`c` is augmented with a selector literal :math:`s`, e.g. :math:`(c) \gets (c \\vee \\neg{s})`. As a result, clause :math:`c` can be activated by assuming literal :math:`s`. The over-approximation provided as an input is specified as a list of selector literals for clauses in the unsatisfiable core. .. [2] Niklas Eén, Niklas Sörensson. *Temporal induction by incremental SAT solving*. Electr. Notes Theor. Comput. Sci. 89(4). 2003. pp. 543-560 :param approx: an over-approximation of an MUS :type approx: list(int) Note that the method does not return. Instead, after its execution, the input over-approximation is refined and contains an MUS.
[ "Deletion", "-", "based", "MUS", "extraction", ".", "Given", "an", "over", "-", "approximation", "of", "an", "MUS", "i", ".", "e", ".", "an", "unsatisfiable", "core", "previously", "returned", "by", "a", "SAT", "oracle", "the", "method", "represents", "a", "loop", "which", "at", "each", "iteration", "removes", "a", "clause", "from", "the", "core", "and", "checks", "whether", "the", "remaining", "clauses", "of", "the", "approximation", "are", "unsatisfiable", "together", "with", "the", "hard", "clauses", "." ]
522742e8f2d4c6ac50ecd9087f7a346206774c67
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/examples/musx.py#L204-L250
17,199
pysathq/pysat
setup.py
build.run
def run(self): """ Download, patch and compile SAT solvers before building. """ # download and compile solvers prepare.do(to_install) # now, do standard build distutils.command.build.build.run(self)
python
def run(self): """ Download, patch and compile SAT solvers before building. """ # download and compile solvers prepare.do(to_install) # now, do standard build distutils.command.build.build.run(self)
[ "def", "run", "(", "self", ")", ":", "# download and compile solvers", "prepare", ".", "do", "(", "to_install", ")", "# now, do standard build", "distutils", ".", "command", ".", "build", ".", "build", ".", "run", "(", "self", ")" ]
Download, patch and compile SAT solvers before building.
[ "Download", "patch", "and", "compile", "SAT", "solvers", "before", "building", "." ]
522742e8f2d4c6ac50ecd9087f7a346206774c67
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/setup.py#L61-L70