id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
232,800
sebp/scikit-survival
sksurv/nonparametric.py
SurvivalFunctionEstimator.predict_proba
def predict_proba(self, time): """Return probability of an event after given time point. :math:`\\hat{S}(t) = P(T > t)` Parameters ---------- time : array, shape = (n_samples,) Time to estimate probability at. Returns ------- prob : array, shape = (n_samples,) Probability of an event. """ check_is_fitted(self, "unique_time_") time = check_array(time, ensure_2d=False) # K-M is undefined if estimate at last time point is non-zero extends = time > self.unique_time_[-1] if self.prob_[-1] > 0 and extends.any(): raise ValueError("time must be smaller than largest " "observed time point: {}".format(self.unique_time_[-1])) # beyond last time point is zero probability Shat = numpy.empty(time.shape, dtype=float) Shat[extends] = 0.0 valid = ~extends time = time[valid] idx = numpy.searchsorted(self.unique_time_, time) # for non-exact matches, we need to shift the index to left eps = numpy.finfo(self.unique_time_.dtype).eps exact = numpy.absolute(self.unique_time_[idx] - time) < eps idx[~exact] -= 1 Shat[valid] = self.prob_[idx] return Shat
python
def predict_proba(self, time): """Return probability of an event after given time point. :math:`\\hat{S}(t) = P(T > t)` Parameters ---------- time : array, shape = (n_samples,) Time to estimate probability at. Returns ------- prob : array, shape = (n_samples,) Probability of an event. """ check_is_fitted(self, "unique_time_") time = check_array(time, ensure_2d=False) # K-M is undefined if estimate at last time point is non-zero extends = time > self.unique_time_[-1] if self.prob_[-1] > 0 and extends.any(): raise ValueError("time must be smaller than largest " "observed time point: {}".format(self.unique_time_[-1])) # beyond last time point is zero probability Shat = numpy.empty(time.shape, dtype=float) Shat[extends] = 0.0 valid = ~extends time = time[valid] idx = numpy.searchsorted(self.unique_time_, time) # for non-exact matches, we need to shift the index to left eps = numpy.finfo(self.unique_time_.dtype).eps exact = numpy.absolute(self.unique_time_[idx] - time) < eps idx[~exact] -= 1 Shat[valid] = self.prob_[idx] return Shat
[ "def", "predict_proba", "(", "self", ",", "time", ")", ":", "check_is_fitted", "(", "self", ",", "\"unique_time_\"", ")", "time", "=", "check_array", "(", "time", ",", "ensure_2d", "=", "False", ")", "# K-M is undefined if estimate at last time point is non-zero", "extends", "=", "time", ">", "self", ".", "unique_time_", "[", "-", "1", "]", "if", "self", ".", "prob_", "[", "-", "1", "]", ">", "0", "and", "extends", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"time must be smaller than largest \"", "\"observed time point: {}\"", ".", "format", "(", "self", ".", "unique_time_", "[", "-", "1", "]", ")", ")", "# beyond last time point is zero probability", "Shat", "=", "numpy", ".", "empty", "(", "time", ".", "shape", ",", "dtype", "=", "float", ")", "Shat", "[", "extends", "]", "=", "0.0", "valid", "=", "~", "extends", "time", "=", "time", "[", "valid", "]", "idx", "=", "numpy", ".", "searchsorted", "(", "self", ".", "unique_time_", ",", "time", ")", "# for non-exact matches, we need to shift the index to left", "eps", "=", "numpy", ".", "finfo", "(", "self", ".", "unique_time_", ".", "dtype", ")", ".", "eps", "exact", "=", "numpy", ".", "absolute", "(", "self", ".", "unique_time_", "[", "idx", "]", "-", "time", ")", "<", "eps", "idx", "[", "~", "exact", "]", "-=", "1", "Shat", "[", "valid", "]", "=", "self", ".", "prob_", "[", "idx", "]", "return", "Shat" ]
Return probability of an event after given time point. :math:`\\hat{S}(t) = P(T > t)` Parameters ---------- time : array, shape = (n_samples,) Time to estimate probability at. Returns ------- prob : array, shape = (n_samples,) Probability of an event.
[ "Return", "probability", "of", "an", "event", "after", "given", "time", "point", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/nonparametric.py#L327-L364
232,801
sebp/scikit-survival
sksurv/nonparametric.py
CensoringDistributionEstimator.fit
def fit(self, y): """Estimate censoring distribution from training data. Parameters ---------- y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. Returns ------- self """ event, time = check_y_survival(y) if event.all(): self.unique_time_ = numpy.unique(time) self.prob_ = numpy.ones(self.unique_time_.shape[0]) else: unique_time, prob = kaplan_meier_estimator(~event, time) self.unique_time_ = numpy.concatenate(([-numpy.infty], unique_time)) self.prob_ = numpy.concatenate(([1.], prob)) return self
python
def fit(self, y): """Estimate censoring distribution from training data. Parameters ---------- y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. Returns ------- self """ event, time = check_y_survival(y) if event.all(): self.unique_time_ = numpy.unique(time) self.prob_ = numpy.ones(self.unique_time_.shape[0]) else: unique_time, prob = kaplan_meier_estimator(~event, time) self.unique_time_ = numpy.concatenate(([-numpy.infty], unique_time)) self.prob_ = numpy.concatenate(([1.], prob)) return self
[ "def", "fit", "(", "self", ",", "y", ")", ":", "event", ",", "time", "=", "check_y_survival", "(", "y", ")", "if", "event", ".", "all", "(", ")", ":", "self", ".", "unique_time_", "=", "numpy", ".", "unique", "(", "time", ")", "self", ".", "prob_", "=", "numpy", ".", "ones", "(", "self", ".", "unique_time_", ".", "shape", "[", "0", "]", ")", "else", ":", "unique_time", ",", "prob", "=", "kaplan_meier_estimator", "(", "~", "event", ",", "time", ")", "self", ".", "unique_time_", "=", "numpy", ".", "concatenate", "(", "(", "[", "-", "numpy", ".", "infty", "]", ",", "unique_time", ")", ")", "self", ".", "prob_", "=", "numpy", ".", "concatenate", "(", "(", "[", "1.", "]", ",", "prob", ")", ")", "return", "self" ]
Estimate censoring distribution from training data. Parameters ---------- y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. Returns ------- self
[ "Estimate", "censoring", "distribution", "from", "training", "data", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/nonparametric.py#L370-L393
232,802
sebp/scikit-survival
sksurv/nonparametric.py
CensoringDistributionEstimator.predict_ipcw
def predict_ipcw(self, y): """Return inverse probability of censoring weights at given time points. :math:`\\omega_i = \\delta_i / \\hat{G}(y_i)` Parameters ---------- y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. Returns ------- ipcw : array, shape = (n_samples,) Inverse probability of censoring weights. """ event, time = check_y_survival(y) Ghat = self.predict_proba(time[event]) if (Ghat == 0.0).any(): raise ValueError("censoring survival function is zero at one or more time points") weights = numpy.zeros(time.shape[0]) weights[event] = 1.0 / Ghat return weights
python
def predict_ipcw(self, y): """Return inverse probability of censoring weights at given time points. :math:`\\omega_i = \\delta_i / \\hat{G}(y_i)` Parameters ---------- y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. Returns ------- ipcw : array, shape = (n_samples,) Inverse probability of censoring weights. """ event, time = check_y_survival(y) Ghat = self.predict_proba(time[event]) if (Ghat == 0.0).any(): raise ValueError("censoring survival function is zero at one or more time points") weights = numpy.zeros(time.shape[0]) weights[event] = 1.0 / Ghat return weights
[ "def", "predict_ipcw", "(", "self", ",", "y", ")", ":", "event", ",", "time", "=", "check_y_survival", "(", "y", ")", "Ghat", "=", "self", ".", "predict_proba", "(", "time", "[", "event", "]", ")", "if", "(", "Ghat", "==", "0.0", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"censoring survival function is zero at one or more time points\"", ")", "weights", "=", "numpy", ".", "zeros", "(", "time", ".", "shape", "[", "0", "]", ")", "weights", "[", "event", "]", "=", "1.0", "/", "Ghat", "return", "weights" ]
Return inverse probability of censoring weights at given time points. :math:`\\omega_i = \\delta_i / \\hat{G}(y_i)` Parameters ---------- y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. Returns ------- ipcw : array, shape = (n_samples,) Inverse probability of censoring weights.
[ "Return", "inverse", "probability", "of", "censoring", "weights", "at", "given", "time", "points", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/nonparametric.py#L395-L421
232,803
sebp/scikit-survival
sksurv/metrics.py
concordance_index_censored
def concordance_index_censored(event_indicator, event_time, estimate, tied_tol=1e-8): """Concordance index for right-censored data The concordance index is defined as the proportion of all comparable pairs in which the predictions and outcomes are concordant. Samples are comparable if for at least one of them an event occurred. If the estimated risk is larger for the sample with a higher time of event/censoring, the predictions of that pair are said to be concordant. If an event occurred for one sample and the other is known to be event-free at least until the time of event of the first, the second sample is assumed to *outlive* the first. When predicted risks are identical for a pair, 0.5 rather than 1 is added to the count of concordant pairs. A pair is not comparable if an event occurred for both of them at the same time or an event occurred for one of them but the time of censoring is smaller than the time of event of the first one. Parameters ---------- event_indicator : array-like, shape = (n_samples,) Boolean array denotes whether an event occurred event_time : array-like, shape = (n_samples,) Array containing the time of an event or time of censoring estimate : array-like, shape = (n_samples,) Estimated risk of experiencing an event tied_tol : float, optional, default: 1e-8 The tolerance value for considering ties. If the absolute difference between risk scores is smaller or equal than `tied_tol`, risk scores are considered tied. Returns ------- cindex : float Concordance index concordant : int Number of concordant pairs discordant : int Number of discordant pairs tied_risk : int Number of pairs having tied estimated risks tied_time : int Number of comparable pairs sharing the same time References ---------- .. [1] Harrell, F.E., Califf, R.M., Pryor, D.B., Lee, K.L., Rosati, R.A, "Multivariable prognostic models: issues in developing models, evaluating assumptions and adequacy, and measuring and reducing errors", Statistics in Medicine, 15(4), 361-87, 1996. """ event_indicator, event_time, estimate = _check_inputs( event_indicator, event_time, estimate) w = numpy.ones_like(estimate) return _estimate_concordance_index(event_indicator, event_time, estimate, w, tied_tol)
python
def concordance_index_censored(event_indicator, event_time, estimate, tied_tol=1e-8): """Concordance index for right-censored data The concordance index is defined as the proportion of all comparable pairs in which the predictions and outcomes are concordant. Samples are comparable if for at least one of them an event occurred. If the estimated risk is larger for the sample with a higher time of event/censoring, the predictions of that pair are said to be concordant. If an event occurred for one sample and the other is known to be event-free at least until the time of event of the first, the second sample is assumed to *outlive* the first. When predicted risks are identical for a pair, 0.5 rather than 1 is added to the count of concordant pairs. A pair is not comparable if an event occurred for both of them at the same time or an event occurred for one of them but the time of censoring is smaller than the time of event of the first one. Parameters ---------- event_indicator : array-like, shape = (n_samples,) Boolean array denotes whether an event occurred event_time : array-like, shape = (n_samples,) Array containing the time of an event or time of censoring estimate : array-like, shape = (n_samples,) Estimated risk of experiencing an event tied_tol : float, optional, default: 1e-8 The tolerance value for considering ties. If the absolute difference between risk scores is smaller or equal than `tied_tol`, risk scores are considered tied. Returns ------- cindex : float Concordance index concordant : int Number of concordant pairs discordant : int Number of discordant pairs tied_risk : int Number of pairs having tied estimated risks tied_time : int Number of comparable pairs sharing the same time References ---------- .. [1] Harrell, F.E., Califf, R.M., Pryor, D.B., Lee, K.L., Rosati, R.A, "Multivariable prognostic models: issues in developing models, evaluating assumptions and adequacy, and measuring and reducing errors", Statistics in Medicine, 15(4), 361-87, 1996. """ event_indicator, event_time, estimate = _check_inputs( event_indicator, event_time, estimate) w = numpy.ones_like(estimate) return _estimate_concordance_index(event_indicator, event_time, estimate, w, tied_tol)
[ "def", "concordance_index_censored", "(", "event_indicator", ",", "event_time", ",", "estimate", ",", "tied_tol", "=", "1e-8", ")", ":", "event_indicator", ",", "event_time", ",", "estimate", "=", "_check_inputs", "(", "event_indicator", ",", "event_time", ",", "estimate", ")", "w", "=", "numpy", ".", "ones_like", "(", "estimate", ")", "return", "_estimate_concordance_index", "(", "event_indicator", ",", "event_time", ",", "estimate", ",", "w", ",", "tied_tol", ")" ]
Concordance index for right-censored data The concordance index is defined as the proportion of all comparable pairs in which the predictions and outcomes are concordant. Samples are comparable if for at least one of them an event occurred. If the estimated risk is larger for the sample with a higher time of event/censoring, the predictions of that pair are said to be concordant. If an event occurred for one sample and the other is known to be event-free at least until the time of event of the first, the second sample is assumed to *outlive* the first. When predicted risks are identical for a pair, 0.5 rather than 1 is added to the count of concordant pairs. A pair is not comparable if an event occurred for both of them at the same time or an event occurred for one of them but the time of censoring is smaller than the time of event of the first one. Parameters ---------- event_indicator : array-like, shape = (n_samples,) Boolean array denotes whether an event occurred event_time : array-like, shape = (n_samples,) Array containing the time of an event or time of censoring estimate : array-like, shape = (n_samples,) Estimated risk of experiencing an event tied_tol : float, optional, default: 1e-8 The tolerance value for considering ties. If the absolute difference between risk scores is smaller or equal than `tied_tol`, risk scores are considered tied. Returns ------- cindex : float Concordance index concordant : int Number of concordant pairs discordant : int Number of discordant pairs tied_risk : int Number of pairs having tied estimated risks tied_time : int Number of comparable pairs sharing the same time References ---------- .. [1] Harrell, F.E., Califf, R.M., Pryor, D.B., Lee, K.L., Rosati, R.A, "Multivariable prognostic models: issues in developing models, evaluating assumptions and adequacy, and measuring and reducing errors", Statistics in Medicine, 15(4), 361-87, 1996.
[ "Concordance", "index", "for", "right", "-", "censored", "data" ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/metrics.py#L111-L174
232,804
sebp/scikit-survival
sksurv/metrics.py
concordance_index_ipcw
def concordance_index_ipcw(survival_train, survival_test, estimate, tau=None, tied_tol=1e-8): """Concordance index for right-censored data based on inverse probability of censoring weights. This is an alternative to the estimator in :func:`concordance_index_censored` that does not depend on the distribution of censoring times in the test data. Therefore, the estimate is unbiased and consistent for a population concordance measure that is free of censoring. It is based on inverse probability of censoring weights, thus requires access to survival times from the training data to estimate the censoring distribution. Note that this requires that survival times `survival_test` lie within the range of survival times `survival_train`. This can be achieved by specifying the truncation time `tau`. The resulting `cindex` tells how well the given prediction model works in predicting events that occur in the time range from 0 to `tau`. The estimator uses the Kaplan-Meier estimator to estimate the censoring survivor function. Therefore, it is restricted to situations where the random censoring assumption holds and censoring is independent of the features. Parameters ---------- survival_train : structured array, shape = (n_train_samples,) Survival times for training data to estimate the censoring distribution from. A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. survival_test : structured array, shape = (n_samples,) Survival times of test data. A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. estimate : array-like, shape = (n_samples,) Estimated risk of experiencing an event of test data. tau : float, optional Truncation time. The survival function for the underlying censoring time distribution :math:`D` needs to be positive at `tau`, i.e., `tau` should be chosen such that the probability of being censored after time `tau` is non-zero: :math:`P(D > \\tau) > 0`. If `None`, no truncation is performed. tied_tol : float, optional, default: 1e-8 The tolerance value for considering ties. If the absolute difference between risk scores is smaller or equal than `tied_tol`, risk scores are considered tied. Returns ------- cindex : float Concordance index concordant : int Number of concordant pairs discordant : int Number of discordant pairs tied_risk : int Number of pairs having tied estimated risks tied_time : int Number of comparable pairs sharing the same time References ---------- .. [1] Uno, H., Cai, T., Pencina, M. J., D’Agostino, R. B., & Wei, L. J. (2011). "On the C-statistics for evaluating overall adequacy of risk prediction procedures with censored survival data". Statistics in Medicine, 30(10), 1105–1117. """ test_event, test_time = check_y_survival(survival_test) if tau is not None: survival_test = survival_test[test_time < tau] estimate = check_array(estimate, ensure_2d=False) check_consistent_length(test_event, test_time, estimate) cens = CensoringDistributionEstimator() cens.fit(survival_train) ipcw = cens.predict_ipcw(survival_test) w = numpy.square(ipcw) return _estimate_concordance_index(test_event, test_time, estimate, w, tied_tol)
python
def concordance_index_ipcw(survival_train, survival_test, estimate, tau=None, tied_tol=1e-8): """Concordance index for right-censored data based on inverse probability of censoring weights. This is an alternative to the estimator in :func:`concordance_index_censored` that does not depend on the distribution of censoring times in the test data. Therefore, the estimate is unbiased and consistent for a population concordance measure that is free of censoring. It is based on inverse probability of censoring weights, thus requires access to survival times from the training data to estimate the censoring distribution. Note that this requires that survival times `survival_test` lie within the range of survival times `survival_train`. This can be achieved by specifying the truncation time `tau`. The resulting `cindex` tells how well the given prediction model works in predicting events that occur in the time range from 0 to `tau`. The estimator uses the Kaplan-Meier estimator to estimate the censoring survivor function. Therefore, it is restricted to situations where the random censoring assumption holds and censoring is independent of the features. Parameters ---------- survival_train : structured array, shape = (n_train_samples,) Survival times for training data to estimate the censoring distribution from. A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. survival_test : structured array, shape = (n_samples,) Survival times of test data. A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. estimate : array-like, shape = (n_samples,) Estimated risk of experiencing an event of test data. tau : float, optional Truncation time. The survival function for the underlying censoring time distribution :math:`D` needs to be positive at `tau`, i.e., `tau` should be chosen such that the probability of being censored after time `tau` is non-zero: :math:`P(D > \\tau) > 0`. If `None`, no truncation is performed. tied_tol : float, optional, default: 1e-8 The tolerance value for considering ties. If the absolute difference between risk scores is smaller or equal than `tied_tol`, risk scores are considered tied. Returns ------- cindex : float Concordance index concordant : int Number of concordant pairs discordant : int Number of discordant pairs tied_risk : int Number of pairs having tied estimated risks tied_time : int Number of comparable pairs sharing the same time References ---------- .. [1] Uno, H., Cai, T., Pencina, M. J., D’Agostino, R. B., & Wei, L. J. (2011). "On the C-statistics for evaluating overall adequacy of risk prediction procedures with censored survival data". Statistics in Medicine, 30(10), 1105–1117. """ test_event, test_time = check_y_survival(survival_test) if tau is not None: survival_test = survival_test[test_time < tau] estimate = check_array(estimate, ensure_2d=False) check_consistent_length(test_event, test_time, estimate) cens = CensoringDistributionEstimator() cens.fit(survival_train) ipcw = cens.predict_ipcw(survival_test) w = numpy.square(ipcw) return _estimate_concordance_index(test_event, test_time, estimate, w, tied_tol)
[ "def", "concordance_index_ipcw", "(", "survival_train", ",", "survival_test", ",", "estimate", ",", "tau", "=", "None", ",", "tied_tol", "=", "1e-8", ")", ":", "test_event", ",", "test_time", "=", "check_y_survival", "(", "survival_test", ")", "if", "tau", "is", "not", "None", ":", "survival_test", "=", "survival_test", "[", "test_time", "<", "tau", "]", "estimate", "=", "check_array", "(", "estimate", ",", "ensure_2d", "=", "False", ")", "check_consistent_length", "(", "test_event", ",", "test_time", ",", "estimate", ")", "cens", "=", "CensoringDistributionEstimator", "(", ")", "cens", ".", "fit", "(", "survival_train", ")", "ipcw", "=", "cens", ".", "predict_ipcw", "(", "survival_test", ")", "w", "=", "numpy", ".", "square", "(", "ipcw", ")", "return", "_estimate_concordance_index", "(", "test_event", ",", "test_time", ",", "estimate", ",", "w", ",", "tied_tol", ")" ]
Concordance index for right-censored data based on inverse probability of censoring weights. This is an alternative to the estimator in :func:`concordance_index_censored` that does not depend on the distribution of censoring times in the test data. Therefore, the estimate is unbiased and consistent for a population concordance measure that is free of censoring. It is based on inverse probability of censoring weights, thus requires access to survival times from the training data to estimate the censoring distribution. Note that this requires that survival times `survival_test` lie within the range of survival times `survival_train`. This can be achieved by specifying the truncation time `tau`. The resulting `cindex` tells how well the given prediction model works in predicting events that occur in the time range from 0 to `tau`. The estimator uses the Kaplan-Meier estimator to estimate the censoring survivor function. Therefore, it is restricted to situations where the random censoring assumption holds and censoring is independent of the features. Parameters ---------- survival_train : structured array, shape = (n_train_samples,) Survival times for training data to estimate the censoring distribution from. A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. survival_test : structured array, shape = (n_samples,) Survival times of test data. A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. estimate : array-like, shape = (n_samples,) Estimated risk of experiencing an event of test data. tau : float, optional Truncation time. The survival function for the underlying censoring time distribution :math:`D` needs to be positive at `tau`, i.e., `tau` should be chosen such that the probability of being censored after time `tau` is non-zero: :math:`P(D > \\tau) > 0`. If `None`, no truncation is performed. tied_tol : float, optional, default: 1e-8 The tolerance value for considering ties. If the absolute difference between risk scores is smaller or equal than `tied_tol`, risk scores are considered tied. Returns ------- cindex : float Concordance index concordant : int Number of concordant pairs discordant : int Number of discordant pairs tied_risk : int Number of pairs having tied estimated risks tied_time : int Number of comparable pairs sharing the same time References ---------- .. [1] Uno, H., Cai, T., Pencina, M. J., D’Agostino, R. B., & Wei, L. J. (2011). "On the C-statistics for evaluating overall adequacy of risk prediction procedures with censored survival data". Statistics in Medicine, 30(10), 1105–1117.
[ "Concordance", "index", "for", "right", "-", "censored", "data", "based", "on", "inverse", "probability", "of", "censoring", "weights", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/metrics.py#L177-L266
232,805
sebp/scikit-survival
sksurv/kernels/clinical.py
_nominal_kernel
def _nominal_kernel(x, y, out): """Number of features that match exactly""" for i in range(x.shape[0]): for j in range(y.shape[0]): out[i, j] += (x[i, :] == y[j, :]).sum() return out
python
def _nominal_kernel(x, y, out): """Number of features that match exactly""" for i in range(x.shape[0]): for j in range(y.shape[0]): out[i, j] += (x[i, :] == y[j, :]).sum() return out
[ "def", "_nominal_kernel", "(", "x", ",", "y", ",", "out", ")", ":", "for", "i", "in", "range", "(", "x", ".", "shape", "[", "0", "]", ")", ":", "for", "j", "in", "range", "(", "y", ".", "shape", "[", "0", "]", ")", ":", "out", "[", "i", ",", "j", "]", "+=", "(", "x", "[", "i", ",", ":", "]", "==", "y", "[", "j", ",", ":", "]", ")", ".", "sum", "(", ")", "return", "out" ]
Number of features that match exactly
[ "Number", "of", "features", "that", "match", "exactly" ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/kernels/clinical.py#L26-L32
232,806
sebp/scikit-survival
sksurv/kernels/clinical.py
_get_continuous_and_ordinal_array
def _get_continuous_and_ordinal_array(x): """Convert array from continuous and ordered categorical columns""" nominal_columns = x.select_dtypes(include=['object', 'category']).columns ordinal_columns = pandas.Index([v for v in nominal_columns if x[v].cat.ordered]) continuous_columns = x.select_dtypes(include=[numpy.number]).columns x_num = x.loc[:, continuous_columns].astype(numpy.float64).values if len(ordinal_columns) > 0: x = _ordinal_as_numeric(x, ordinal_columns) nominal_columns = nominal_columns.difference(ordinal_columns) x_out = numpy.column_stack((x_num, x)) else: x_out = x_num return x_out, nominal_columns
python
def _get_continuous_and_ordinal_array(x): """Convert array from continuous and ordered categorical columns""" nominal_columns = x.select_dtypes(include=['object', 'category']).columns ordinal_columns = pandas.Index([v for v in nominal_columns if x[v].cat.ordered]) continuous_columns = x.select_dtypes(include=[numpy.number]).columns x_num = x.loc[:, continuous_columns].astype(numpy.float64).values if len(ordinal_columns) > 0: x = _ordinal_as_numeric(x, ordinal_columns) nominal_columns = nominal_columns.difference(ordinal_columns) x_out = numpy.column_stack((x_num, x)) else: x_out = x_num return x_out, nominal_columns
[ "def", "_get_continuous_and_ordinal_array", "(", "x", ")", ":", "nominal_columns", "=", "x", ".", "select_dtypes", "(", "include", "=", "[", "'object'", ",", "'category'", "]", ")", ".", "columns", "ordinal_columns", "=", "pandas", ".", "Index", "(", "[", "v", "for", "v", "in", "nominal_columns", "if", "x", "[", "v", "]", ".", "cat", ".", "ordered", "]", ")", "continuous_columns", "=", "x", ".", "select_dtypes", "(", "include", "=", "[", "numpy", ".", "number", "]", ")", ".", "columns", "x_num", "=", "x", ".", "loc", "[", ":", ",", "continuous_columns", "]", ".", "astype", "(", "numpy", ".", "float64", ")", ".", "values", "if", "len", "(", "ordinal_columns", ")", ">", "0", ":", "x", "=", "_ordinal_as_numeric", "(", "x", ",", "ordinal_columns", ")", "nominal_columns", "=", "nominal_columns", ".", "difference", "(", "ordinal_columns", ")", "x_out", "=", "numpy", ".", "column_stack", "(", "(", "x_num", ",", "x", ")", ")", "else", ":", "x_out", "=", "x_num", "return", "x_out", ",", "nominal_columns" ]
Convert array from continuous and ordered categorical columns
[ "Convert", "array", "from", "continuous", "and", "ordered", "categorical", "columns" ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/kernels/clinical.py#L35-L50
232,807
sebp/scikit-survival
sksurv/kernels/clinical.py
clinical_kernel
def clinical_kernel(x, y=None): """Computes clinical kernel The clinical kernel distinguishes between continuous ordinal,and nominal variables. Parameters ---------- x : pandas.DataFrame, shape = (n_samples_x, n_features) Training data y : pandas.DataFrame, shape = (n_samples_y, n_features) Testing data Returns ------- kernel : array, shape = (n_samples_x, n_samples_y) Kernel matrix. Values are normalized to lie within [0, 1]. References ---------- .. [1] Daemen, A., De Moor, B., "Development of a kernel function for clinical data". Annual International Conference of the IEEE Engineering in Medicine and Biology Society, 5913-7, 2009 """ if y is not None: if x.shape[1] != y.shape[1]: raise ValueError('x and y have different number of features') if not x.columns.equals(y.columns): raise ValueError('columns do not match') else: y = x mat = numpy.zeros((x.shape[0], y.shape[0]), dtype=float) x_numeric, nominal_columns = _get_continuous_and_ordinal_array(x) if id(x) != id(y): y_numeric, _ = _get_continuous_and_ordinal_array(y) else: y_numeric = x_numeric continuous_ordinal_kernel(x_numeric, y_numeric, mat) _nominal_kernel(x.loc[:, nominal_columns].values, y.loc[:, nominal_columns].values, mat) mat /= x.shape[1] return mat
python
def clinical_kernel(x, y=None): """Computes clinical kernel The clinical kernel distinguishes between continuous ordinal,and nominal variables. Parameters ---------- x : pandas.DataFrame, shape = (n_samples_x, n_features) Training data y : pandas.DataFrame, shape = (n_samples_y, n_features) Testing data Returns ------- kernel : array, shape = (n_samples_x, n_samples_y) Kernel matrix. Values are normalized to lie within [0, 1]. References ---------- .. [1] Daemen, A., De Moor, B., "Development of a kernel function for clinical data". Annual International Conference of the IEEE Engineering in Medicine and Biology Society, 5913-7, 2009 """ if y is not None: if x.shape[1] != y.shape[1]: raise ValueError('x and y have different number of features') if not x.columns.equals(y.columns): raise ValueError('columns do not match') else: y = x mat = numpy.zeros((x.shape[0], y.shape[0]), dtype=float) x_numeric, nominal_columns = _get_continuous_and_ordinal_array(x) if id(x) != id(y): y_numeric, _ = _get_continuous_and_ordinal_array(y) else: y_numeric = x_numeric continuous_ordinal_kernel(x_numeric, y_numeric, mat) _nominal_kernel(x.loc[:, nominal_columns].values, y.loc[:, nominal_columns].values, mat) mat /= x.shape[1] return mat
[ "def", "clinical_kernel", "(", "x", ",", "y", "=", "None", ")", ":", "if", "y", "is", "not", "None", ":", "if", "x", ".", "shape", "[", "1", "]", "!=", "y", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "'x and y have different number of features'", ")", "if", "not", "x", ".", "columns", ".", "equals", "(", "y", ".", "columns", ")", ":", "raise", "ValueError", "(", "'columns do not match'", ")", "else", ":", "y", "=", "x", "mat", "=", "numpy", ".", "zeros", "(", "(", "x", ".", "shape", "[", "0", "]", ",", "y", ".", "shape", "[", "0", "]", ")", ",", "dtype", "=", "float", ")", "x_numeric", ",", "nominal_columns", "=", "_get_continuous_and_ordinal_array", "(", "x", ")", "if", "id", "(", "x", ")", "!=", "id", "(", "y", ")", ":", "y_numeric", ",", "_", "=", "_get_continuous_and_ordinal_array", "(", "y", ")", "else", ":", "y_numeric", "=", "x_numeric", "continuous_ordinal_kernel", "(", "x_numeric", ",", "y_numeric", ",", "mat", ")", "_nominal_kernel", "(", "x", ".", "loc", "[", ":", ",", "nominal_columns", "]", ".", "values", ",", "y", ".", "loc", "[", ":", ",", "nominal_columns", "]", ".", "values", ",", "mat", ")", "mat", "/=", "x", ".", "shape", "[", "1", "]", "return", "mat" ]
Computes clinical kernel The clinical kernel distinguishes between continuous ordinal,and nominal variables. Parameters ---------- x : pandas.DataFrame, shape = (n_samples_x, n_features) Training data y : pandas.DataFrame, shape = (n_samples_y, n_features) Testing data Returns ------- kernel : array, shape = (n_samples_x, n_samples_y) Kernel matrix. Values are normalized to lie within [0, 1]. References ---------- .. [1] Daemen, A., De Moor, B., "Development of a kernel function for clinical data". Annual International Conference of the IEEE Engineering in Medicine and Biology Society, 5913-7, 2009
[ "Computes", "clinical", "kernel" ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/kernels/clinical.py#L61-L107
232,808
sebp/scikit-survival
sksurv/kernels/clinical.py
ClinicalKernelTransform._prepare_by_column_dtype
def _prepare_by_column_dtype(self, X): """Get distance functions for each column's dtype""" if not isinstance(X, pandas.DataFrame): raise TypeError('X must be a pandas DataFrame') numeric_columns = [] nominal_columns = [] numeric_ranges = [] fit_data = numpy.empty_like(X) for i, dt in enumerate(X.dtypes): col = X.iloc[:, i] if is_categorical_dtype(dt): if col.cat.ordered: numeric_ranges.append(col.cat.codes.max() - col.cat.codes.min()) numeric_columns.append(i) else: nominal_columns.append(i) col = col.cat.codes elif is_numeric_dtype(dt): numeric_ranges.append(col.max() - col.min()) numeric_columns.append(i) else: raise TypeError('unsupported dtype: %r' % dt) fit_data[:, i] = col.values self._numeric_columns = numpy.asarray(numeric_columns) self._nominal_columns = numpy.asarray(nominal_columns) self._numeric_ranges = numpy.asarray(numeric_ranges, dtype=float) self.X_fit_ = fit_data
python
def _prepare_by_column_dtype(self, X): """Get distance functions for each column's dtype""" if not isinstance(X, pandas.DataFrame): raise TypeError('X must be a pandas DataFrame') numeric_columns = [] nominal_columns = [] numeric_ranges = [] fit_data = numpy.empty_like(X) for i, dt in enumerate(X.dtypes): col = X.iloc[:, i] if is_categorical_dtype(dt): if col.cat.ordered: numeric_ranges.append(col.cat.codes.max() - col.cat.codes.min()) numeric_columns.append(i) else: nominal_columns.append(i) col = col.cat.codes elif is_numeric_dtype(dt): numeric_ranges.append(col.max() - col.min()) numeric_columns.append(i) else: raise TypeError('unsupported dtype: %r' % dt) fit_data[:, i] = col.values self._numeric_columns = numpy.asarray(numeric_columns) self._nominal_columns = numpy.asarray(nominal_columns) self._numeric_ranges = numpy.asarray(numeric_ranges, dtype=float) self.X_fit_ = fit_data
[ "def", "_prepare_by_column_dtype", "(", "self", ",", "X", ")", ":", "if", "not", "isinstance", "(", "X", ",", "pandas", ".", "DataFrame", ")", ":", "raise", "TypeError", "(", "'X must be a pandas DataFrame'", ")", "numeric_columns", "=", "[", "]", "nominal_columns", "=", "[", "]", "numeric_ranges", "=", "[", "]", "fit_data", "=", "numpy", ".", "empty_like", "(", "X", ")", "for", "i", ",", "dt", "in", "enumerate", "(", "X", ".", "dtypes", ")", ":", "col", "=", "X", ".", "iloc", "[", ":", ",", "i", "]", "if", "is_categorical_dtype", "(", "dt", ")", ":", "if", "col", ".", "cat", ".", "ordered", ":", "numeric_ranges", ".", "append", "(", "col", ".", "cat", ".", "codes", ".", "max", "(", ")", "-", "col", ".", "cat", ".", "codes", ".", "min", "(", ")", ")", "numeric_columns", ".", "append", "(", "i", ")", "else", ":", "nominal_columns", ".", "append", "(", "i", ")", "col", "=", "col", ".", "cat", ".", "codes", "elif", "is_numeric_dtype", "(", "dt", ")", ":", "numeric_ranges", ".", "append", "(", "col", ".", "max", "(", ")", "-", "col", ".", "min", "(", ")", ")", "numeric_columns", ".", "append", "(", "i", ")", "else", ":", "raise", "TypeError", "(", "'unsupported dtype: %r'", "%", "dt", ")", "fit_data", "[", ":", ",", "i", "]", "=", "col", ".", "values", "self", ".", "_numeric_columns", "=", "numpy", ".", "asarray", "(", "numeric_columns", ")", "self", ".", "_nominal_columns", "=", "numpy", ".", "asarray", "(", "nominal_columns", ")", "self", ".", "_numeric_ranges", "=", "numpy", ".", "asarray", "(", "numeric_ranges", ",", "dtype", "=", "float", ")", "self", ".", "X_fit_", "=", "fit_data" ]
Get distance functions for each column's dtype
[ "Get", "distance", "functions", "for", "each", "column", "s", "dtype" ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/kernels/clinical.py#L153-L185
232,809
sebp/scikit-survival
sksurv/kernels/clinical.py
ClinicalKernelTransform.fit
def fit(self, X, y=None, **kwargs): """Determine transformation parameters from data in X. Subsequent calls to `transform(Y)` compute the pairwise distance to `X`. Parameters of the clinical kernel are only updated if `fit_once` is `False`, otherwise you have to explicitly call `prepare()` once. Parameters ---------- X: pandas.DataFrame, shape = (n_samples, n_features) Data to estimate parameters from. y : None Argument is ignored (included for compatibility reasons). kwargs : dict Argument is ignored (included for compatibility reasons). Returns ------- self : object Returns the instance itself. """ if X.ndim != 2: raise ValueError("expected 2d array, but got %d" % X.ndim) if self.fit_once: self.X_fit_ = X else: self._prepare_by_column_dtype(X) return self
python
def fit(self, X, y=None, **kwargs): """Determine transformation parameters from data in X. Subsequent calls to `transform(Y)` compute the pairwise distance to `X`. Parameters of the clinical kernel are only updated if `fit_once` is `False`, otherwise you have to explicitly call `prepare()` once. Parameters ---------- X: pandas.DataFrame, shape = (n_samples, n_features) Data to estimate parameters from. y : None Argument is ignored (included for compatibility reasons). kwargs : dict Argument is ignored (included for compatibility reasons). Returns ------- self : object Returns the instance itself. """ if X.ndim != 2: raise ValueError("expected 2d array, but got %d" % X.ndim) if self.fit_once: self.X_fit_ = X else: self._prepare_by_column_dtype(X) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "X", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"expected 2d array, but got %d\"", "%", "X", ".", "ndim", ")", "if", "self", ".", "fit_once", ":", "self", ".", "X_fit_", "=", "X", "else", ":", "self", ".", "_prepare_by_column_dtype", "(", "X", ")", "return", "self" ]
Determine transformation parameters from data in X. Subsequent calls to `transform(Y)` compute the pairwise distance to `X`. Parameters of the clinical kernel are only updated if `fit_once` is `False`, otherwise you have to explicitly call `prepare()` once. Parameters ---------- X: pandas.DataFrame, shape = (n_samples, n_features) Data to estimate parameters from. y : None Argument is ignored (included for compatibility reasons). kwargs : dict Argument is ignored (included for compatibility reasons). Returns ------- self : object Returns the instance itself.
[ "Determine", "transformation", "parameters", "from", "data", "in", "X", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/kernels/clinical.py#L187-L220
232,810
sebp/scikit-survival
sksurv/kernels/clinical.py
ClinicalKernelTransform.transform
def transform(self, Y): r"""Compute all pairwise distances between `self.X_fit_` and `Y`. Parameters ---------- y : array-like, shape = (n_samples_y, n_features) Returns ------- kernel : ndarray, shape = (n_samples_y, n_samples_X_fit\_) Kernel matrix. Values are normalized to lie within [0, 1]. """ check_is_fitted(self, 'X_fit_') n_samples_x, n_features = self.X_fit_.shape Y = numpy.asarray(Y) if Y.shape[1] != n_features: raise ValueError('expected array with %d features, but got %d' % (n_features, Y.shape[1])) n_samples_y = Y.shape[0] mat = numpy.zeros((n_samples_y, n_samples_x), dtype=float) continuous_ordinal_kernel_with_ranges(Y[:, self._numeric_columns].astype(numpy.float64), self.X_fit_[:, self._numeric_columns].astype(numpy.float64), self._numeric_ranges, mat) if len(self._nominal_columns) > 0: _nominal_kernel(Y[:, self._nominal_columns], self.X_fit_[:, self._nominal_columns], mat) mat /= n_features return mat
python
def transform(self, Y): r"""Compute all pairwise distances between `self.X_fit_` and `Y`. Parameters ---------- y : array-like, shape = (n_samples_y, n_features) Returns ------- kernel : ndarray, shape = (n_samples_y, n_samples_X_fit\_) Kernel matrix. Values are normalized to lie within [0, 1]. """ check_is_fitted(self, 'X_fit_') n_samples_x, n_features = self.X_fit_.shape Y = numpy.asarray(Y) if Y.shape[1] != n_features: raise ValueError('expected array with %d features, but got %d' % (n_features, Y.shape[1])) n_samples_y = Y.shape[0] mat = numpy.zeros((n_samples_y, n_samples_x), dtype=float) continuous_ordinal_kernel_with_ranges(Y[:, self._numeric_columns].astype(numpy.float64), self.X_fit_[:, self._numeric_columns].astype(numpy.float64), self._numeric_ranges, mat) if len(self._nominal_columns) > 0: _nominal_kernel(Y[:, self._nominal_columns], self.X_fit_[:, self._nominal_columns], mat) mat /= n_features return mat
[ "def", "transform", "(", "self", ",", "Y", ")", ":", "check_is_fitted", "(", "self", ",", "'X_fit_'", ")", "n_samples_x", ",", "n_features", "=", "self", ".", "X_fit_", ".", "shape", "Y", "=", "numpy", ".", "asarray", "(", "Y", ")", "if", "Y", ".", "shape", "[", "1", "]", "!=", "n_features", ":", "raise", "ValueError", "(", "'expected array with %d features, but got %d'", "%", "(", "n_features", ",", "Y", ".", "shape", "[", "1", "]", ")", ")", "n_samples_y", "=", "Y", ".", "shape", "[", "0", "]", "mat", "=", "numpy", ".", "zeros", "(", "(", "n_samples_y", ",", "n_samples_x", ")", ",", "dtype", "=", "float", ")", "continuous_ordinal_kernel_with_ranges", "(", "Y", "[", ":", ",", "self", ".", "_numeric_columns", "]", ".", "astype", "(", "numpy", ".", "float64", ")", ",", "self", ".", "X_fit_", "[", ":", ",", "self", ".", "_numeric_columns", "]", ".", "astype", "(", "numpy", ".", "float64", ")", ",", "self", ".", "_numeric_ranges", ",", "mat", ")", "if", "len", "(", "self", ".", "_nominal_columns", ")", ">", "0", ":", "_nominal_kernel", "(", "Y", "[", ":", ",", "self", ".", "_nominal_columns", "]", ",", "self", ".", "X_fit_", "[", ":", ",", "self", ".", "_nominal_columns", "]", ",", "mat", ")", "mat", "/=", "n_features", "return", "mat" ]
r"""Compute all pairwise distances between `self.X_fit_` and `Y`. Parameters ---------- y : array-like, shape = (n_samples_y, n_features) Returns ------- kernel : ndarray, shape = (n_samples_y, n_samples_X_fit\_) Kernel matrix. Values are normalized to lie within [0, 1].
[ "r", "Compute", "all", "pairwise", "distances", "between", "self", ".", "X_fit_", "and", "Y", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/kernels/clinical.py#L222-L257
232,811
sebp/scikit-survival
sksurv/ensemble/boosting.py
_fit_stage_componentwise
def _fit_stage_componentwise(X, residuals, sample_weight, **fit_params): """Fit component-wise weighted least squares model""" n_features = X.shape[1] base_learners = [] error = numpy.empty(n_features) for component in range(n_features): learner = ComponentwiseLeastSquares(component).fit(X, residuals, sample_weight) l_pred = learner.predict(X) error[component] = squared_norm(residuals - l_pred) base_learners.append(learner) # TODO: could use bottleneck.nanargmin for speed best_component = numpy.nanargmin(error) best_learner = base_learners[best_component] return best_learner
python
def _fit_stage_componentwise(X, residuals, sample_weight, **fit_params): """Fit component-wise weighted least squares model""" n_features = X.shape[1] base_learners = [] error = numpy.empty(n_features) for component in range(n_features): learner = ComponentwiseLeastSquares(component).fit(X, residuals, sample_weight) l_pred = learner.predict(X) error[component] = squared_norm(residuals - l_pred) base_learners.append(learner) # TODO: could use bottleneck.nanargmin for speed best_component = numpy.nanargmin(error) best_learner = base_learners[best_component] return best_learner
[ "def", "_fit_stage_componentwise", "(", "X", ",", "residuals", ",", "sample_weight", ",", "*", "*", "fit_params", ")", ":", "n_features", "=", "X", ".", "shape", "[", "1", "]", "base_learners", "=", "[", "]", "error", "=", "numpy", ".", "empty", "(", "n_features", ")", "for", "component", "in", "range", "(", "n_features", ")", ":", "learner", "=", "ComponentwiseLeastSquares", "(", "component", ")", ".", "fit", "(", "X", ",", "residuals", ",", "sample_weight", ")", "l_pred", "=", "learner", ".", "predict", "(", "X", ")", "error", "[", "component", "]", "=", "squared_norm", "(", "residuals", "-", "l_pred", ")", "base_learners", ".", "append", "(", "learner", ")", "# TODO: could use bottleneck.nanargmin for speed", "best_component", "=", "numpy", ".", "nanargmin", "(", "error", ")", "best_learner", "=", "base_learners", "[", "best_component", "]", "return", "best_learner" ]
Fit component-wise weighted least squares model
[ "Fit", "component", "-", "wise", "weighted", "least", "squares", "model" ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/ensemble/boosting.py#L72-L87
232,812
sebp/scikit-survival
sksurv/ensemble/boosting.py
ComponentwiseGradientBoostingSurvivalAnalysis.coef_
def coef_(self): """Return the aggregated coefficients. Returns ------- coef_ : ndarray, shape = (n_features + 1,) Coefficients of features. The first element denotes the intercept. """ coef = numpy.zeros(self.n_features_ + 1, dtype=float) for estimator in self.estimators_: coef[estimator.component] += self.learning_rate * estimator.coef_ return coef
python
def coef_(self): """Return the aggregated coefficients. Returns ------- coef_ : ndarray, shape = (n_features + 1,) Coefficients of features. The first element denotes the intercept. """ coef = numpy.zeros(self.n_features_ + 1, dtype=float) for estimator in self.estimators_: coef[estimator.component] += self.learning_rate * estimator.coef_ return coef
[ "def", "coef_", "(", "self", ")", ":", "coef", "=", "numpy", ".", "zeros", "(", "self", ".", "n_features_", "+", "1", ",", "dtype", "=", "float", ")", "for", "estimator", "in", "self", ".", "estimators_", ":", "coef", "[", "estimator", ".", "component", "]", "+=", "self", ".", "learning_rate", "*", "estimator", ".", "coef_", "return", "coef" ]
Return the aggregated coefficients. Returns ------- coef_ : ndarray, shape = (n_features + 1,) Coefficients of features. The first element denotes the intercept.
[ "Return", "the", "aggregated", "coefficients", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/ensemble/boosting.py#L338-L351
232,813
sebp/scikit-survival
sksurv/ensemble/boosting.py
GradientBoostingSurvivalAnalysis._fit_stage
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask, random_state, scale, X_idx_sorted, X_csc=None, X_csr=None): """Fit another stage of ``n_classes_`` trees to the boosting model. """ assert sample_mask.dtype == numpy.bool loss = self.loss_ # whether to use dropout in next iteration do_dropout = self.dropout_rate > 0. and 0 < i < len(scale) - 1 for k in range(loss.K): residual = loss.negative_gradient(y, y_pred, k=k, sample_weight=sample_weight) # induce regression tree on residuals tree = DecisionTreeRegressor( criterion=self.criterion, splitter='best', max_depth=self.max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, min_weight_fraction_leaf=self.min_weight_fraction_leaf, min_impurity_split=self.min_impurity_split, min_impurity_decrease=self.min_impurity_decrease, max_features=self.max_features, max_leaf_nodes=self.max_leaf_nodes, random_state=random_state, presort=self.presort) if self.subsample < 1.0: # no inplace multiplication! sample_weight = sample_weight * sample_mask.astype(numpy.float64) X = X_csr if X_csr is not None else X tree.fit(X, residual, sample_weight=sample_weight, check_input=False, X_idx_sorted=X_idx_sorted) # add tree to ensemble self.estimators_[i, k] = tree # update tree leaves if do_dropout: # select base learners to be dropped for next iteration drop_model, n_dropped = _sample_binomial_plus_one(self.dropout_rate, i + 1, random_state) # adjust scaling factor of tree that is going to be trained in next iteration scale[i + 1] = 1. / (n_dropped + 1.) y_pred[:, k] = 0 for m in range(i + 1): if drop_model[m] == 1: # adjust scaling factor of dropped trees scale[m] *= n_dropped / (n_dropped + 1.) else: # pseudoresponse of next iteration (without contribution of dropped trees) y_pred[:, k] += self.learning_rate * scale[m] * self.estimators_[m, k].predict(X).ravel() else: # update tree leaves loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred, sample_weight, sample_mask, self.learning_rate, k=k) return y_pred
python
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask, random_state, scale, X_idx_sorted, X_csc=None, X_csr=None): """Fit another stage of ``n_classes_`` trees to the boosting model. """ assert sample_mask.dtype == numpy.bool loss = self.loss_ # whether to use dropout in next iteration do_dropout = self.dropout_rate > 0. and 0 < i < len(scale) - 1 for k in range(loss.K): residual = loss.negative_gradient(y, y_pred, k=k, sample_weight=sample_weight) # induce regression tree on residuals tree = DecisionTreeRegressor( criterion=self.criterion, splitter='best', max_depth=self.max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, min_weight_fraction_leaf=self.min_weight_fraction_leaf, min_impurity_split=self.min_impurity_split, min_impurity_decrease=self.min_impurity_decrease, max_features=self.max_features, max_leaf_nodes=self.max_leaf_nodes, random_state=random_state, presort=self.presort) if self.subsample < 1.0: # no inplace multiplication! sample_weight = sample_weight * sample_mask.astype(numpy.float64) X = X_csr if X_csr is not None else X tree.fit(X, residual, sample_weight=sample_weight, check_input=False, X_idx_sorted=X_idx_sorted) # add tree to ensemble self.estimators_[i, k] = tree # update tree leaves if do_dropout: # select base learners to be dropped for next iteration drop_model, n_dropped = _sample_binomial_plus_one(self.dropout_rate, i + 1, random_state) # adjust scaling factor of tree that is going to be trained in next iteration scale[i + 1] = 1. / (n_dropped + 1.) y_pred[:, k] = 0 for m in range(i + 1): if drop_model[m] == 1: # adjust scaling factor of dropped trees scale[m] *= n_dropped / (n_dropped + 1.) else: # pseudoresponse of next iteration (without contribution of dropped trees) y_pred[:, k] += self.learning_rate * scale[m] * self.estimators_[m, k].predict(X).ravel() else: # update tree leaves loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred, sample_weight, sample_mask, self.learning_rate, k=k) return y_pred
[ "def", "_fit_stage", "(", "self", ",", "i", ",", "X", ",", "y", ",", "y_pred", ",", "sample_weight", ",", "sample_mask", ",", "random_state", ",", "scale", ",", "X_idx_sorted", ",", "X_csc", "=", "None", ",", "X_csr", "=", "None", ")", ":", "assert", "sample_mask", ".", "dtype", "==", "numpy", ".", "bool", "loss", "=", "self", ".", "loss_", "# whether to use dropout in next iteration", "do_dropout", "=", "self", ".", "dropout_rate", ">", "0.", "and", "0", "<", "i", "<", "len", "(", "scale", ")", "-", "1", "for", "k", "in", "range", "(", "loss", ".", "K", ")", ":", "residual", "=", "loss", ".", "negative_gradient", "(", "y", ",", "y_pred", ",", "k", "=", "k", ",", "sample_weight", "=", "sample_weight", ")", "# induce regression tree on residuals", "tree", "=", "DecisionTreeRegressor", "(", "criterion", "=", "self", ".", "criterion", ",", "splitter", "=", "'best'", ",", "max_depth", "=", "self", ".", "max_depth", ",", "min_samples_split", "=", "self", ".", "min_samples_split", ",", "min_samples_leaf", "=", "self", ".", "min_samples_leaf", ",", "min_weight_fraction_leaf", "=", "self", ".", "min_weight_fraction_leaf", ",", "min_impurity_split", "=", "self", ".", "min_impurity_split", ",", "min_impurity_decrease", "=", "self", ".", "min_impurity_decrease", ",", "max_features", "=", "self", ".", "max_features", ",", "max_leaf_nodes", "=", "self", ".", "max_leaf_nodes", ",", "random_state", "=", "random_state", ",", "presort", "=", "self", ".", "presort", ")", "if", "self", ".", "subsample", "<", "1.0", ":", "# no inplace multiplication!", "sample_weight", "=", "sample_weight", "*", "sample_mask", ".", "astype", "(", "numpy", ".", "float64", ")", "X", "=", "X_csr", "if", "X_csr", "is", "not", "None", "else", "X", "tree", ".", "fit", "(", "X", ",", "residual", ",", "sample_weight", "=", "sample_weight", ",", "check_input", "=", "False", ",", "X_idx_sorted", "=", "X_idx_sorted", ")", "# add tree to ensemble", "self", ".", "estimators_", "[", "i", ",", "k", "]", "=", "tree", "# update tree leaves", "if", "do_dropout", ":", "# select base learners to be dropped for next iteration", "drop_model", ",", "n_dropped", "=", "_sample_binomial_plus_one", "(", "self", ".", "dropout_rate", ",", "i", "+", "1", ",", "random_state", ")", "# adjust scaling factor of tree that is going to be trained in next iteration", "scale", "[", "i", "+", "1", "]", "=", "1.", "/", "(", "n_dropped", "+", "1.", ")", "y_pred", "[", ":", ",", "k", "]", "=", "0", "for", "m", "in", "range", "(", "i", "+", "1", ")", ":", "if", "drop_model", "[", "m", "]", "==", "1", ":", "# adjust scaling factor of dropped trees", "scale", "[", "m", "]", "*=", "n_dropped", "/", "(", "n_dropped", "+", "1.", ")", "else", ":", "# pseudoresponse of next iteration (without contribution of dropped trees)", "y_pred", "[", ":", ",", "k", "]", "+=", "self", ".", "learning_rate", "*", "scale", "[", "m", "]", "*", "self", ".", "estimators_", "[", "m", ",", "k", "]", ".", "predict", "(", "X", ")", ".", "ravel", "(", ")", "else", ":", "# update tree leaves", "loss", ".", "update_terminal_regions", "(", "tree", ".", "tree_", ",", "X", ",", "y", ",", "residual", ",", "y_pred", ",", "sample_weight", ",", "sample_mask", ",", "self", ".", "learning_rate", ",", "k", "=", "k", ")", "return", "y_pred" ]
Fit another stage of ``n_classes_`` trees to the boosting model.
[ "Fit", "another", "stage", "of", "n_classes_", "trees", "to", "the", "boosting", "model", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/ensemble/boosting.py#L609-L671
232,814
sebp/scikit-survival
sksurv/ensemble/boosting.py
GradientBoostingSurvivalAnalysis._fit_stages
def _fit_stages(self, X, y, y_pred, sample_weight, random_state, begin_at_stage=0, monitor=None, X_idx_sorted=None): """Iteratively fits the stages. For each stage it computes the progress (OOB, train score) and delegates to ``_fit_stage``. Returns the number of stages fit; might differ from ``n_estimators`` due to early stopping. """ n_samples = X.shape[0] do_oob = self.subsample < 1.0 sample_mask = numpy.ones((n_samples, ), dtype=numpy.bool) n_inbag = max(1, int(self.subsample * n_samples)) loss_ = self.loss_ if self.verbose: verbose_reporter = VerboseReporter(self.verbose) verbose_reporter.init(self, begin_at_stage) X_csc = csc_matrix(X) if issparse(X) else None X_csr = csr_matrix(X) if issparse(X) else None if self.dropout_rate > 0.: scale = numpy.ones(self.n_estimators, dtype=float) else: scale = None # perform boosting iterations i = begin_at_stage for i in range(begin_at_stage, self.n_estimators): # subsampling if do_oob: sample_mask = _random_sample_mask(n_samples, n_inbag, random_state) # OOB score before adding this stage y_oob_sample = y[~sample_mask] old_oob_score = loss_(y_oob_sample, y_pred[~sample_mask], sample_weight[~sample_mask]) # fit next stage of trees y_pred = self._fit_stage(i, X, y, y_pred, sample_weight, sample_mask, random_state, scale, X_idx_sorted, X_csc, X_csr) # track deviance (= loss) if do_oob: self.train_score_[i] = loss_(y[sample_mask], y_pred[sample_mask], sample_weight[sample_mask]) self.oob_improvement_[i] = (old_oob_score - loss_(y_oob_sample, y_pred[~sample_mask], sample_weight[~sample_mask])) else: # no need to fancy index w/ no subsampling self.train_score_[i] = loss_(y, y_pred, sample_weight) if self.verbose > 0: verbose_reporter.update(i, self) if monitor is not None: early_stopping = monitor(i, self, locals()) if early_stopping: break if self.dropout_rate > 0.: self.scale_ = scale return i + 1
python
def _fit_stages(self, X, y, y_pred, sample_weight, random_state, begin_at_stage=0, monitor=None, X_idx_sorted=None): """Iteratively fits the stages. For each stage it computes the progress (OOB, train score) and delegates to ``_fit_stage``. Returns the number of stages fit; might differ from ``n_estimators`` due to early stopping. """ n_samples = X.shape[0] do_oob = self.subsample < 1.0 sample_mask = numpy.ones((n_samples, ), dtype=numpy.bool) n_inbag = max(1, int(self.subsample * n_samples)) loss_ = self.loss_ if self.verbose: verbose_reporter = VerboseReporter(self.verbose) verbose_reporter.init(self, begin_at_stage) X_csc = csc_matrix(X) if issparse(X) else None X_csr = csr_matrix(X) if issparse(X) else None if self.dropout_rate > 0.: scale = numpy.ones(self.n_estimators, dtype=float) else: scale = None # perform boosting iterations i = begin_at_stage for i in range(begin_at_stage, self.n_estimators): # subsampling if do_oob: sample_mask = _random_sample_mask(n_samples, n_inbag, random_state) # OOB score before adding this stage y_oob_sample = y[~sample_mask] old_oob_score = loss_(y_oob_sample, y_pred[~sample_mask], sample_weight[~sample_mask]) # fit next stage of trees y_pred = self._fit_stage(i, X, y, y_pred, sample_weight, sample_mask, random_state, scale, X_idx_sorted, X_csc, X_csr) # track deviance (= loss) if do_oob: self.train_score_[i] = loss_(y[sample_mask], y_pred[sample_mask], sample_weight[sample_mask]) self.oob_improvement_[i] = (old_oob_score - loss_(y_oob_sample, y_pred[~sample_mask], sample_weight[~sample_mask])) else: # no need to fancy index w/ no subsampling self.train_score_[i] = loss_(y, y_pred, sample_weight) if self.verbose > 0: verbose_reporter.update(i, self) if monitor is not None: early_stopping = monitor(i, self, locals()) if early_stopping: break if self.dropout_rate > 0.: self.scale_ = scale return i + 1
[ "def", "_fit_stages", "(", "self", ",", "X", ",", "y", ",", "y_pred", ",", "sample_weight", ",", "random_state", ",", "begin_at_stage", "=", "0", ",", "monitor", "=", "None", ",", "X_idx_sorted", "=", "None", ")", ":", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "do_oob", "=", "self", ".", "subsample", "<", "1.0", "sample_mask", "=", "numpy", ".", "ones", "(", "(", "n_samples", ",", ")", ",", "dtype", "=", "numpy", ".", "bool", ")", "n_inbag", "=", "max", "(", "1", ",", "int", "(", "self", ".", "subsample", "*", "n_samples", ")", ")", "loss_", "=", "self", ".", "loss_", "if", "self", ".", "verbose", ":", "verbose_reporter", "=", "VerboseReporter", "(", "self", ".", "verbose", ")", "verbose_reporter", ".", "init", "(", "self", ",", "begin_at_stage", ")", "X_csc", "=", "csc_matrix", "(", "X", ")", "if", "issparse", "(", "X", ")", "else", "None", "X_csr", "=", "csr_matrix", "(", "X", ")", "if", "issparse", "(", "X", ")", "else", "None", "if", "self", ".", "dropout_rate", ">", "0.", ":", "scale", "=", "numpy", ".", "ones", "(", "self", ".", "n_estimators", ",", "dtype", "=", "float", ")", "else", ":", "scale", "=", "None", "# perform boosting iterations", "i", "=", "begin_at_stage", "for", "i", "in", "range", "(", "begin_at_stage", ",", "self", ".", "n_estimators", ")", ":", "# subsampling", "if", "do_oob", ":", "sample_mask", "=", "_random_sample_mask", "(", "n_samples", ",", "n_inbag", ",", "random_state", ")", "# OOB score before adding this stage", "y_oob_sample", "=", "y", "[", "~", "sample_mask", "]", "old_oob_score", "=", "loss_", "(", "y_oob_sample", ",", "y_pred", "[", "~", "sample_mask", "]", ",", "sample_weight", "[", "~", "sample_mask", "]", ")", "# fit next stage of trees", "y_pred", "=", "self", ".", "_fit_stage", "(", "i", ",", "X", ",", "y", ",", "y_pred", ",", "sample_weight", ",", "sample_mask", ",", "random_state", ",", "scale", ",", "X_idx_sorted", ",", "X_csc", ",", "X_csr", ")", "# track deviance (= loss)", "if", "do_oob", ":", "self", ".", "train_score_", "[", "i", "]", "=", "loss_", "(", "y", "[", "sample_mask", "]", ",", "y_pred", "[", "sample_mask", "]", ",", "sample_weight", "[", "sample_mask", "]", ")", "self", ".", "oob_improvement_", "[", "i", "]", "=", "(", "old_oob_score", "-", "loss_", "(", "y_oob_sample", ",", "y_pred", "[", "~", "sample_mask", "]", ",", "sample_weight", "[", "~", "sample_mask", "]", ")", ")", "else", ":", "# no need to fancy index w/ no subsampling", "self", ".", "train_score_", "[", "i", "]", "=", "loss_", "(", "y", ",", "y_pred", ",", "sample_weight", ")", "if", "self", ".", "verbose", ">", "0", ":", "verbose_reporter", ".", "update", "(", "i", ",", "self", ")", "if", "monitor", "is", "not", "None", ":", "early_stopping", "=", "monitor", "(", "i", ",", "self", ",", "locals", "(", ")", ")", "if", "early_stopping", ":", "break", "if", "self", ".", "dropout_rate", ">", "0.", ":", "self", ".", "scale_", "=", "scale", "return", "i", "+", "1" ]
Iteratively fits the stages. For each stage it computes the progress (OOB, train score) and delegates to ``_fit_stage``. Returns the number of stages fit; might differ from ``n_estimators`` due to early stopping.
[ "Iteratively", "fits", "the", "stages", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/ensemble/boosting.py#L673-L741
232,815
sebp/scikit-survival
sksurv/ensemble/boosting.py
GradientBoostingSurvivalAnalysis.fit
def fit(self, X, y, sample_weight=None, monitor=None): """Fit the gradient boosting model. Parameters ---------- X : array-like, shape = (n_samples, n_features) Data matrix y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. sample_weight : array-like, shape = (n_samples,), optional Weights given to each sample. If omitted, all samples have weight 1. monitor : callable, optional The monitor is called after each iteration with the current iteration, a reference to the estimator and the local variables of ``_fit_stages`` as keyword arguments ``callable(i, self, locals())``. If the callable returns ``True`` the fitting procedure is stopped. The monitor can be used for various things such as computing held-out estimates, early stopping, model introspect, and snapshoting. Returns ------- self : object Returns self. """ random_state = check_random_state(self.random_state) X, event, time = check_arrays_survival(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE) n_samples, self.n_features_ = X.shape X = X.astype(DTYPE) if sample_weight is None: sample_weight = numpy.ones(n_samples, dtype=numpy.float32) else: sample_weight = column_or_1d(sample_weight, warn=True) check_consistent_length(X, sample_weight) self._check_params() self.loss_ = LOSS_FUNCTIONS[self.loss](1) if isinstance(self.loss_, (CensoredSquaredLoss, IPCWLeastSquaresError)): time = numpy.log(time) self._init_state() self.init_.fit(X, (event, time), sample_weight) y_pred = self.init_.predict(X) begin_at_stage = 0 if self.presort is True and issparse(X): raise ValueError( "Presorting is not supported for sparse matrices.") presort = self.presort # Allow presort to be 'auto', which means True if the dataset is dense, # otherwise it will be False. if presort == 'auto': presort = not issparse(X) X_idx_sorted = None if presort: X_idx_sorted = numpy.asfortranarray(numpy.argsort(X, axis=0), dtype=numpy.int32) # fit the boosting stages y = numpy.fromiter(zip(event, time), dtype=[('event', numpy.bool), ('time', numpy.float64)]) n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state, begin_at_stage, monitor, X_idx_sorted) # change shape of arrays after fit (early-stopping or additional tests) if n_stages != self.estimators_.shape[0]: self.estimators_ = self.estimators_[:n_stages] self.train_score_ = self.train_score_[:n_stages] if hasattr(self, 'oob_improvement_'): self.oob_improvement_ = self.oob_improvement_[:n_stages] self.n_estimators_ = n_stages return self
python
def fit(self, X, y, sample_weight=None, monitor=None): """Fit the gradient boosting model. Parameters ---------- X : array-like, shape = (n_samples, n_features) Data matrix y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. sample_weight : array-like, shape = (n_samples,), optional Weights given to each sample. If omitted, all samples have weight 1. monitor : callable, optional The monitor is called after each iteration with the current iteration, a reference to the estimator and the local variables of ``_fit_stages`` as keyword arguments ``callable(i, self, locals())``. If the callable returns ``True`` the fitting procedure is stopped. The monitor can be used for various things such as computing held-out estimates, early stopping, model introspect, and snapshoting. Returns ------- self : object Returns self. """ random_state = check_random_state(self.random_state) X, event, time = check_arrays_survival(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE) n_samples, self.n_features_ = X.shape X = X.astype(DTYPE) if sample_weight is None: sample_weight = numpy.ones(n_samples, dtype=numpy.float32) else: sample_weight = column_or_1d(sample_weight, warn=True) check_consistent_length(X, sample_weight) self._check_params() self.loss_ = LOSS_FUNCTIONS[self.loss](1) if isinstance(self.loss_, (CensoredSquaredLoss, IPCWLeastSquaresError)): time = numpy.log(time) self._init_state() self.init_.fit(X, (event, time), sample_weight) y_pred = self.init_.predict(X) begin_at_stage = 0 if self.presort is True and issparse(X): raise ValueError( "Presorting is not supported for sparse matrices.") presort = self.presort # Allow presort to be 'auto', which means True if the dataset is dense, # otherwise it will be False. if presort == 'auto': presort = not issparse(X) X_idx_sorted = None if presort: X_idx_sorted = numpy.asfortranarray(numpy.argsort(X, axis=0), dtype=numpy.int32) # fit the boosting stages y = numpy.fromiter(zip(event, time), dtype=[('event', numpy.bool), ('time', numpy.float64)]) n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state, begin_at_stage, monitor, X_idx_sorted) # change shape of arrays after fit (early-stopping or additional tests) if n_stages != self.estimators_.shape[0]: self.estimators_ = self.estimators_[:n_stages] self.train_score_ = self.train_score_[:n_stages] if hasattr(self, 'oob_improvement_'): self.oob_improvement_ = self.oob_improvement_[:n_stages] self.n_estimators_ = n_stages return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", ",", "sample_weight", "=", "None", ",", "monitor", "=", "None", ")", ":", "random_state", "=", "check_random_state", "(", "self", ".", "random_state", ")", "X", ",", "event", ",", "time", "=", "check_arrays_survival", "(", "X", ",", "y", ",", "accept_sparse", "=", "[", "'csr'", ",", "'csc'", ",", "'coo'", "]", ",", "dtype", "=", "DTYPE", ")", "n_samples", ",", "self", ".", "n_features_", "=", "X", ".", "shape", "X", "=", "X", ".", "astype", "(", "DTYPE", ")", "if", "sample_weight", "is", "None", ":", "sample_weight", "=", "numpy", ".", "ones", "(", "n_samples", ",", "dtype", "=", "numpy", ".", "float32", ")", "else", ":", "sample_weight", "=", "column_or_1d", "(", "sample_weight", ",", "warn", "=", "True", ")", "check_consistent_length", "(", "X", ",", "sample_weight", ")", "self", ".", "_check_params", "(", ")", "self", ".", "loss_", "=", "LOSS_FUNCTIONS", "[", "self", ".", "loss", "]", "(", "1", ")", "if", "isinstance", "(", "self", ".", "loss_", ",", "(", "CensoredSquaredLoss", ",", "IPCWLeastSquaresError", ")", ")", ":", "time", "=", "numpy", ".", "log", "(", "time", ")", "self", ".", "_init_state", "(", ")", "self", ".", "init_", ".", "fit", "(", "X", ",", "(", "event", ",", "time", ")", ",", "sample_weight", ")", "y_pred", "=", "self", ".", "init_", ".", "predict", "(", "X", ")", "begin_at_stage", "=", "0", "if", "self", ".", "presort", "is", "True", "and", "issparse", "(", "X", ")", ":", "raise", "ValueError", "(", "\"Presorting is not supported for sparse matrices.\"", ")", "presort", "=", "self", ".", "presort", "# Allow presort to be 'auto', which means True if the dataset is dense,", "# otherwise it will be False.", "if", "presort", "==", "'auto'", ":", "presort", "=", "not", "issparse", "(", "X", ")", "X_idx_sorted", "=", "None", "if", "presort", ":", "X_idx_sorted", "=", "numpy", ".", "asfortranarray", "(", "numpy", ".", "argsort", "(", "X", ",", "axis", "=", "0", ")", ",", "dtype", "=", "numpy", ".", "int32", ")", "# fit the boosting stages", "y", "=", "numpy", ".", "fromiter", "(", "zip", "(", "event", ",", "time", ")", ",", "dtype", "=", "[", "(", "'event'", ",", "numpy", ".", "bool", ")", ",", "(", "'time'", ",", "numpy", ".", "float64", ")", "]", ")", "n_stages", "=", "self", ".", "_fit_stages", "(", "X", ",", "y", ",", "y_pred", ",", "sample_weight", ",", "random_state", ",", "begin_at_stage", ",", "monitor", ",", "X_idx_sorted", ")", "# change shape of arrays after fit (early-stopping or additional tests)", "if", "n_stages", "!=", "self", ".", "estimators_", ".", "shape", "[", "0", "]", ":", "self", ".", "estimators_", "=", "self", ".", "estimators_", "[", ":", "n_stages", "]", "self", ".", "train_score_", "=", "self", ".", "train_score_", "[", ":", "n_stages", "]", "if", "hasattr", "(", "self", ",", "'oob_improvement_'", ")", ":", "self", ".", "oob_improvement_", "=", "self", ".", "oob_improvement_", "[", ":", "n_stages", "]", "self", ".", "n_estimators_", "=", "n_stages", "return", "self" ]
Fit the gradient boosting model. Parameters ---------- X : array-like, shape = (n_samples, n_features) Data matrix y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. sample_weight : array-like, shape = (n_samples,), optional Weights given to each sample. If omitted, all samples have weight 1. monitor : callable, optional The monitor is called after each iteration with the current iteration, a reference to the estimator and the local variables of ``_fit_stages`` as keyword arguments ``callable(i, self, locals())``. If the callable returns ``True`` the fitting procedure is stopped. The monitor can be used for various things such as computing held-out estimates, early stopping, model introspect, and snapshoting. Returns ------- self : object Returns self.
[ "Fit", "the", "gradient", "boosting", "model", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/ensemble/boosting.py#L743-L823
232,816
sebp/scikit-survival
sksurv/ensemble/boosting.py
GradientBoostingSurvivalAnalysis.staged_predict
def staged_predict(self, X): """Predict hazard at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like, shape = (n_samples, n_features) The input samples. Returns ------- y : generator of array of shape = (n_samples,) The predicted value of the input samples. """ check_is_fitted(self, 'estimators_') # if dropout wasn't used during training, proceed as usual, # otherwise consider scaling factor of individual trees if not hasattr(self, "scale_"): for y in self._staged_decision_function(X): yield self._scale_prediction(y.ravel()) else: for y in self._dropout_staged_decision_function(X): yield self._scale_prediction(y.ravel())
python
def staged_predict(self, X): """Predict hazard at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like, shape = (n_samples, n_features) The input samples. Returns ------- y : generator of array of shape = (n_samples,) The predicted value of the input samples. """ check_is_fitted(self, 'estimators_') # if dropout wasn't used during training, proceed as usual, # otherwise consider scaling factor of individual trees if not hasattr(self, "scale_"): for y in self._staged_decision_function(X): yield self._scale_prediction(y.ravel()) else: for y in self._dropout_staged_decision_function(X): yield self._scale_prediction(y.ravel())
[ "def", "staged_predict", "(", "self", ",", "X", ")", ":", "check_is_fitted", "(", "self", ",", "'estimators_'", ")", "# if dropout wasn't used during training, proceed as usual,", "# otherwise consider scaling factor of individual trees", "if", "not", "hasattr", "(", "self", ",", "\"scale_\"", ")", ":", "for", "y", "in", "self", ".", "_staged_decision_function", "(", "X", ")", ":", "yield", "self", ".", "_scale_prediction", "(", "y", ".", "ravel", "(", ")", ")", "else", ":", "for", "y", "in", "self", ".", "_dropout_staged_decision_function", "(", "X", ")", ":", "yield", "self", ".", "_scale_prediction", "(", "y", ".", "ravel", "(", ")", ")" ]
Predict hazard at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like, shape = (n_samples, n_features) The input samples. Returns ------- y : generator of array of shape = (n_samples,) The predicted value of the input samples.
[ "Predict", "hazard", "at", "each", "stage", "for", "X", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/ensemble/boosting.py#L886-L911
232,817
sebp/scikit-survival
sksurv/svm/minlip.py
MinlipSurvivalAnalysis.fit
def fit(self, X, y): """Build a MINLIP survival model from training data. Parameters ---------- X : array-like, shape = (n_samples, n_features) Data matrix. y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. Returns ------- self """ X, event, time = check_arrays_survival(X, y) self._fit(X, event, time) return self
python
def fit(self, X, y): """Build a MINLIP survival model from training data. Parameters ---------- X : array-like, shape = (n_samples, n_features) Data matrix. y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. Returns ------- self """ X, event, time = check_arrays_survival(X, y) self._fit(X, event, time) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", ")", ":", "X", ",", "event", ",", "time", "=", "check_arrays_survival", "(", "X", ",", "y", ")", "self", ".", "_fit", "(", "X", ",", "event", ",", "time", ")", "return", "self" ]
Build a MINLIP survival model from training data. Parameters ---------- X : array-like, shape = (n_samples, n_features) Data matrix. y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. Returns ------- self
[ "Build", "a", "MINLIP", "survival", "model", "from", "training", "data", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/svm/minlip.py#L227-L247
232,818
sebp/scikit-survival
sksurv/svm/minlip.py
MinlipSurvivalAnalysis.predict
def predict(self, X): """Predict risk score of experiencing an event. Higher scores indicate shorter survival (high risk), lower scores longer survival (low risk). Parameters ---------- X : array-like, shape = (n_samples, n_features) The input samples. Returns ------- y : ndarray, shape = (n_samples,) Predicted risk. """ K = self._get_kernel(X, self.X_fit_) pred = -numpy.dot(self.coef_, K.T) return pred.ravel()
python
def predict(self, X): """Predict risk score of experiencing an event. Higher scores indicate shorter survival (high risk), lower scores longer survival (low risk). Parameters ---------- X : array-like, shape = (n_samples, n_features) The input samples. Returns ------- y : ndarray, shape = (n_samples,) Predicted risk. """ K = self._get_kernel(X, self.X_fit_) pred = -numpy.dot(self.coef_, K.T) return pred.ravel()
[ "def", "predict", "(", "self", ",", "X", ")", ":", "K", "=", "self", ".", "_get_kernel", "(", "X", ",", "self", ".", "X_fit_", ")", "pred", "=", "-", "numpy", ".", "dot", "(", "self", ".", "coef_", ",", "K", ".", "T", ")", "return", "pred", ".", "ravel", "(", ")" ]
Predict risk score of experiencing an event. Higher scores indicate shorter survival (high risk), lower scores longer survival (low risk). Parameters ---------- X : array-like, shape = (n_samples, n_features) The input samples. Returns ------- y : ndarray, shape = (n_samples,) Predicted risk.
[ "Predict", "risk", "score", "of", "experiencing", "an", "event", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/svm/minlip.py#L249-L267
232,819
sebp/scikit-survival
sksurv/datasets/base.py
get_x_y
def get_x_y(data_frame, attr_labels, pos_label=None, survival=True): """Split data frame into features and labels. Parameters ---------- data_frame : pandas.DataFrame, shape = (n_samples, n_columns) A data frame. attr_labels : sequence of str or None A list of one or more columns that are considered the label. If `survival` is `True`, then attr_labels has two elements: 1) the name of the column denoting the event indicator, and 2) the name of the column denoting the survival time. If the sequence contains `None`, then labels are not retrieved and only a data frame with features is returned. pos_label : any, optional Which value of the event indicator column denotes that a patient experienced an event. This value is ignored if `survival` is `False`. survival : bool, optional, default: True Whether to return `y` that can be used for survival analysis. Returns ------- X : pandas.DataFrame, shape = (n_samples, n_columns - len(attr_labels)) Data frame containing features. y : None or pandas.DataFrame, shape = (n_samples, len(attr_labels)) Data frame containing columns with supervised information. If `survival` was `True`, then the column denoting the event indicator will be boolean and survival times will be float. If `attr_labels` contains `None`, y is set to `None`. """ if survival: if len(attr_labels) != 2: raise ValueError("expected sequence of length two for attr_labels, but got %d" % len(attr_labels)) if pos_label is None: raise ValueError("pos_label needs to be specified if survival=True") return _get_x_y_survival(data_frame, attr_labels[0], attr_labels[1], pos_label) return _get_x_y_other(data_frame, attr_labels)
python
def get_x_y(data_frame, attr_labels, pos_label=None, survival=True): """Split data frame into features and labels. Parameters ---------- data_frame : pandas.DataFrame, shape = (n_samples, n_columns) A data frame. attr_labels : sequence of str or None A list of one or more columns that are considered the label. If `survival` is `True`, then attr_labels has two elements: 1) the name of the column denoting the event indicator, and 2) the name of the column denoting the survival time. If the sequence contains `None`, then labels are not retrieved and only a data frame with features is returned. pos_label : any, optional Which value of the event indicator column denotes that a patient experienced an event. This value is ignored if `survival` is `False`. survival : bool, optional, default: True Whether to return `y` that can be used for survival analysis. Returns ------- X : pandas.DataFrame, shape = (n_samples, n_columns - len(attr_labels)) Data frame containing features. y : None or pandas.DataFrame, shape = (n_samples, len(attr_labels)) Data frame containing columns with supervised information. If `survival` was `True`, then the column denoting the event indicator will be boolean and survival times will be float. If `attr_labels` contains `None`, y is set to `None`. """ if survival: if len(attr_labels) != 2: raise ValueError("expected sequence of length two for attr_labels, but got %d" % len(attr_labels)) if pos_label is None: raise ValueError("pos_label needs to be specified if survival=True") return _get_x_y_survival(data_frame, attr_labels[0], attr_labels[1], pos_label) return _get_x_y_other(data_frame, attr_labels)
[ "def", "get_x_y", "(", "data_frame", ",", "attr_labels", ",", "pos_label", "=", "None", ",", "survival", "=", "True", ")", ":", "if", "survival", ":", "if", "len", "(", "attr_labels", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"expected sequence of length two for attr_labels, but got %d\"", "%", "len", "(", "attr_labels", ")", ")", "if", "pos_label", "is", "None", ":", "raise", "ValueError", "(", "\"pos_label needs to be specified if survival=True\"", ")", "return", "_get_x_y_survival", "(", "data_frame", ",", "attr_labels", "[", "0", "]", ",", "attr_labels", "[", "1", "]", ",", "pos_label", ")", "return", "_get_x_y_other", "(", "data_frame", ",", "attr_labels", ")" ]
Split data frame into features and labels. Parameters ---------- data_frame : pandas.DataFrame, shape = (n_samples, n_columns) A data frame. attr_labels : sequence of str or None A list of one or more columns that are considered the label. If `survival` is `True`, then attr_labels has two elements: 1) the name of the column denoting the event indicator, and 2) the name of the column denoting the survival time. If the sequence contains `None`, then labels are not retrieved and only a data frame with features is returned. pos_label : any, optional Which value of the event indicator column denotes that a patient experienced an event. This value is ignored if `survival` is `False`. survival : bool, optional, default: True Whether to return `y` that can be used for survival analysis. Returns ------- X : pandas.DataFrame, shape = (n_samples, n_columns - len(attr_labels)) Data frame containing features. y : None or pandas.DataFrame, shape = (n_samples, len(attr_labels)) Data frame containing columns with supervised information. If `survival` was `True`, then the column denoting the event indicator will be boolean and survival times will be float. If `attr_labels` contains `None`, y is set to `None`.
[ "Split", "data", "frame", "into", "features", "and", "labels", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/datasets/base.py#L46-L88
232,820
sebp/scikit-survival
sksurv/datasets/base.py
load_arff_files_standardized
def load_arff_files_standardized(path_training, attr_labels, pos_label=None, path_testing=None, survival=True, standardize_numeric=True, to_numeric=True): """Load dataset in ARFF format. Parameters ---------- path_training : str Path to ARFF file containing data. attr_labels : sequence of str Names of attributes denoting dependent variables. If ``survival`` is set, it must be a sequence with two items: the name of the event indicator and the name of the survival/censoring time. pos_label : any type, optional Value corresponding to an event in survival analysis. Only considered if ``survival`` is ``True``. path_testing : str, optional Path to ARFF file containing hold-out data. Only columns that are available in both training and testing are considered (excluding dependent variables). If ``standardize_numeric`` is set, data is normalized by considering both training and testing data. survival : bool, optional, default: True Whether the dependent variables denote event indicator and survival/censoring time. standardize_numeric : bool, optional, default: True Whether to standardize data to zero mean and unit variance. See :func:`sksurv.column.standardize`. to_numeric : boo, optional, default: True Whether to convert categorical variables to numeric values. See :func:`sksurv.column.categorical_to_numeric`. Returns ------- x_train : pandas.DataFrame, shape = (n_train, n_features) Training data. y_train : pandas.DataFrame, shape = (n_train, n_labels) Dependent variables of training data. x_test : None or pandas.DataFrame, shape = (n_train, n_features) Testing data if `path_testing` was provided. y_test : None or pandas.DataFrame, shape = (n_train, n_labels) Dependent variables of testing data if `path_testing` was provided. """ dataset = loadarff(path_training) if "index" in dataset.columns: dataset.index = dataset["index"].astype(object) dataset.drop("index", axis=1, inplace=True) x_train, y_train = get_x_y(dataset, attr_labels, pos_label, survival) if path_testing is not None: x_test, y_test = _load_arff_testing(path_testing, attr_labels, pos_label, survival) if len(x_train.columns.symmetric_difference(x_test.columns)) > 0: warnings.warn("Restricting columns to intersection between training and testing data", stacklevel=2) cols = x_train.columns.intersection(x_test.columns) if len(cols) == 0: raise ValueError("columns of training and test data do not intersect") x_train = x_train.loc[:, cols] x_test = x_test.loc[:, cols] x = safe_concat((x_train, x_test), axis=0) if standardize_numeric: x = standardize(x) if to_numeric: x = categorical_to_numeric(x) n_train = x_train.shape[0] x_train = x.iloc[:n_train, :] x_test = x.iloc[n_train:, :] else: if standardize_numeric: x_train = standardize(x_train) if to_numeric: x_train = categorical_to_numeric(x_train) x_test = None y_test = None return x_train, y_train, x_test, y_test
python
def load_arff_files_standardized(path_training, attr_labels, pos_label=None, path_testing=None, survival=True, standardize_numeric=True, to_numeric=True): """Load dataset in ARFF format. Parameters ---------- path_training : str Path to ARFF file containing data. attr_labels : sequence of str Names of attributes denoting dependent variables. If ``survival`` is set, it must be a sequence with two items: the name of the event indicator and the name of the survival/censoring time. pos_label : any type, optional Value corresponding to an event in survival analysis. Only considered if ``survival`` is ``True``. path_testing : str, optional Path to ARFF file containing hold-out data. Only columns that are available in both training and testing are considered (excluding dependent variables). If ``standardize_numeric`` is set, data is normalized by considering both training and testing data. survival : bool, optional, default: True Whether the dependent variables denote event indicator and survival/censoring time. standardize_numeric : bool, optional, default: True Whether to standardize data to zero mean and unit variance. See :func:`sksurv.column.standardize`. to_numeric : boo, optional, default: True Whether to convert categorical variables to numeric values. See :func:`sksurv.column.categorical_to_numeric`. Returns ------- x_train : pandas.DataFrame, shape = (n_train, n_features) Training data. y_train : pandas.DataFrame, shape = (n_train, n_labels) Dependent variables of training data. x_test : None or pandas.DataFrame, shape = (n_train, n_features) Testing data if `path_testing` was provided. y_test : None or pandas.DataFrame, shape = (n_train, n_labels) Dependent variables of testing data if `path_testing` was provided. """ dataset = loadarff(path_training) if "index" in dataset.columns: dataset.index = dataset["index"].astype(object) dataset.drop("index", axis=1, inplace=True) x_train, y_train = get_x_y(dataset, attr_labels, pos_label, survival) if path_testing is not None: x_test, y_test = _load_arff_testing(path_testing, attr_labels, pos_label, survival) if len(x_train.columns.symmetric_difference(x_test.columns)) > 0: warnings.warn("Restricting columns to intersection between training and testing data", stacklevel=2) cols = x_train.columns.intersection(x_test.columns) if len(cols) == 0: raise ValueError("columns of training and test data do not intersect") x_train = x_train.loc[:, cols] x_test = x_test.loc[:, cols] x = safe_concat((x_train, x_test), axis=0) if standardize_numeric: x = standardize(x) if to_numeric: x = categorical_to_numeric(x) n_train = x_train.shape[0] x_train = x.iloc[:n_train, :] x_test = x.iloc[n_train:, :] else: if standardize_numeric: x_train = standardize(x_train) if to_numeric: x_train = categorical_to_numeric(x_train) x_test = None y_test = None return x_train, y_train, x_test, y_test
[ "def", "load_arff_files_standardized", "(", "path_training", ",", "attr_labels", ",", "pos_label", "=", "None", ",", "path_testing", "=", "None", ",", "survival", "=", "True", ",", "standardize_numeric", "=", "True", ",", "to_numeric", "=", "True", ")", ":", "dataset", "=", "loadarff", "(", "path_training", ")", "if", "\"index\"", "in", "dataset", ".", "columns", ":", "dataset", ".", "index", "=", "dataset", "[", "\"index\"", "]", ".", "astype", "(", "object", ")", "dataset", ".", "drop", "(", "\"index\"", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")", "x_train", ",", "y_train", "=", "get_x_y", "(", "dataset", ",", "attr_labels", ",", "pos_label", ",", "survival", ")", "if", "path_testing", "is", "not", "None", ":", "x_test", ",", "y_test", "=", "_load_arff_testing", "(", "path_testing", ",", "attr_labels", ",", "pos_label", ",", "survival", ")", "if", "len", "(", "x_train", ".", "columns", ".", "symmetric_difference", "(", "x_test", ".", "columns", ")", ")", ">", "0", ":", "warnings", ".", "warn", "(", "\"Restricting columns to intersection between training and testing data\"", ",", "stacklevel", "=", "2", ")", "cols", "=", "x_train", ".", "columns", ".", "intersection", "(", "x_test", ".", "columns", ")", "if", "len", "(", "cols", ")", "==", "0", ":", "raise", "ValueError", "(", "\"columns of training and test data do not intersect\"", ")", "x_train", "=", "x_train", ".", "loc", "[", ":", ",", "cols", "]", "x_test", "=", "x_test", ".", "loc", "[", ":", ",", "cols", "]", "x", "=", "safe_concat", "(", "(", "x_train", ",", "x_test", ")", ",", "axis", "=", "0", ")", "if", "standardize_numeric", ":", "x", "=", "standardize", "(", "x", ")", "if", "to_numeric", ":", "x", "=", "categorical_to_numeric", "(", "x", ")", "n_train", "=", "x_train", ".", "shape", "[", "0", "]", "x_train", "=", "x", ".", "iloc", "[", ":", "n_train", ",", ":", "]", "x_test", "=", "x", ".", "iloc", "[", "n_train", ":", ",", ":", "]", "else", ":", "if", "standardize_numeric", ":", "x_train", "=", "standardize", "(", "x_train", ")", "if", "to_numeric", ":", "x_train", "=", "categorical_to_numeric", "(", "x_train", ")", "x_test", "=", "None", "y_test", "=", "None", "return", "x_train", ",", "y_train", ",", "x_test", ",", "y_test" ]
Load dataset in ARFF format. Parameters ---------- path_training : str Path to ARFF file containing data. attr_labels : sequence of str Names of attributes denoting dependent variables. If ``survival`` is set, it must be a sequence with two items: the name of the event indicator and the name of the survival/censoring time. pos_label : any type, optional Value corresponding to an event in survival analysis. Only considered if ``survival`` is ``True``. path_testing : str, optional Path to ARFF file containing hold-out data. Only columns that are available in both training and testing are considered (excluding dependent variables). If ``standardize_numeric`` is set, data is normalized by considering both training and testing data. survival : bool, optional, default: True Whether the dependent variables denote event indicator and survival/censoring time. standardize_numeric : bool, optional, default: True Whether to standardize data to zero mean and unit variance. See :func:`sksurv.column.standardize`. to_numeric : boo, optional, default: True Whether to convert categorical variables to numeric values. See :func:`sksurv.column.categorical_to_numeric`. Returns ------- x_train : pandas.DataFrame, shape = (n_train, n_features) Training data. y_train : pandas.DataFrame, shape = (n_train, n_labels) Dependent variables of training data. x_test : None or pandas.DataFrame, shape = (n_train, n_features) Testing data if `path_testing` was provided. y_test : None or pandas.DataFrame, shape = (n_train, n_labels) Dependent variables of testing data if `path_testing` was provided.
[ "Load", "dataset", "in", "ARFF", "format", "." ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/datasets/base.py#L91-L179
232,821
sebp/scikit-survival
sksurv/datasets/base.py
load_aids
def load_aids(endpoint="aids"): """Load and return the AIDS Clinical Trial dataset The dataset has 1,151 samples and 11 features. The dataset has 2 endpoints: 1. AIDS defining event, which occurred for 96 patients (8.3%) 2. Death, which occurred for 26 patients (2.3%) Parameters ---------- endpoint : aids|death The endpoint Returns ------- x : pandas.DataFrame The measurements for each patient. y : structured array with 2 fields *censor*: boolean indicating whether the endpoint has been reached or the event time is right censored. *time*: total length of follow-up If ``endpoint`` is death, the fields are named *censor_d* and *time_d*. References ---------- .. [1] http://www.umass.edu/statdata/statdata/data/ .. [2] Hosmer, D., Lemeshow, S., May, S.: "Applied Survival Analysis: Regression Modeling of Time to Event Data." John Wiley & Sons, Inc. (2008) """ labels_aids = ['censor', 'time'] labels_death = ['censor_d', 'time_d'] if endpoint == "aids": attr_labels = labels_aids drop_columns = labels_death elif endpoint == "death": attr_labels = labels_death drop_columns = labels_aids else: raise ValueError("endpoint must be 'aids' or 'death'") fn = resource_filename(__name__, 'data/actg320.arff') x, y = get_x_y(loadarff(fn), attr_labels=attr_labels, pos_label='1') x.drop(drop_columns, axis=1, inplace=True) return x, y
python
def load_aids(endpoint="aids"): """Load and return the AIDS Clinical Trial dataset The dataset has 1,151 samples and 11 features. The dataset has 2 endpoints: 1. AIDS defining event, which occurred for 96 patients (8.3%) 2. Death, which occurred for 26 patients (2.3%) Parameters ---------- endpoint : aids|death The endpoint Returns ------- x : pandas.DataFrame The measurements for each patient. y : structured array with 2 fields *censor*: boolean indicating whether the endpoint has been reached or the event time is right censored. *time*: total length of follow-up If ``endpoint`` is death, the fields are named *censor_d* and *time_d*. References ---------- .. [1] http://www.umass.edu/statdata/statdata/data/ .. [2] Hosmer, D., Lemeshow, S., May, S.: "Applied Survival Analysis: Regression Modeling of Time to Event Data." John Wiley & Sons, Inc. (2008) """ labels_aids = ['censor', 'time'] labels_death = ['censor_d', 'time_d'] if endpoint == "aids": attr_labels = labels_aids drop_columns = labels_death elif endpoint == "death": attr_labels = labels_death drop_columns = labels_aids else: raise ValueError("endpoint must be 'aids' or 'death'") fn = resource_filename(__name__, 'data/actg320.arff') x, y = get_x_y(loadarff(fn), attr_labels=attr_labels, pos_label='1') x.drop(drop_columns, axis=1, inplace=True) return x, y
[ "def", "load_aids", "(", "endpoint", "=", "\"aids\"", ")", ":", "labels_aids", "=", "[", "'censor'", ",", "'time'", "]", "labels_death", "=", "[", "'censor_d'", ",", "'time_d'", "]", "if", "endpoint", "==", "\"aids\"", ":", "attr_labels", "=", "labels_aids", "drop_columns", "=", "labels_death", "elif", "endpoint", "==", "\"death\"", ":", "attr_labels", "=", "labels_death", "drop_columns", "=", "labels_aids", "else", ":", "raise", "ValueError", "(", "\"endpoint must be 'aids' or 'death'\"", ")", "fn", "=", "resource_filename", "(", "__name__", ",", "'data/actg320.arff'", ")", "x", ",", "y", "=", "get_x_y", "(", "loadarff", "(", "fn", ")", ",", "attr_labels", "=", "attr_labels", ",", "pos_label", "=", "'1'", ")", "x", ".", "drop", "(", "drop_columns", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")", "return", "x", ",", "y" ]
Load and return the AIDS Clinical Trial dataset The dataset has 1,151 samples and 11 features. The dataset has 2 endpoints: 1. AIDS defining event, which occurred for 96 patients (8.3%) 2. Death, which occurred for 26 patients (2.3%) Parameters ---------- endpoint : aids|death The endpoint Returns ------- x : pandas.DataFrame The measurements for each patient. y : structured array with 2 fields *censor*: boolean indicating whether the endpoint has been reached or the event time is right censored. *time*: total length of follow-up If ``endpoint`` is death, the fields are named *censor_d* and *time_d*. References ---------- .. [1] http://www.umass.edu/statdata/statdata/data/ .. [2] Hosmer, D., Lemeshow, S., May, S.: "Applied Survival Analysis: Regression Modeling of Time to Event Data." John Wiley & Sons, Inc. (2008)
[ "Load", "and", "return", "the", "AIDS", "Clinical", "Trial", "dataset" ]
cfc99fd20454cdd6f4f20fe331b39f2191ccaabc
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/datasets/base.py#L284-L333
232,822
seemethere/nba_py
nba_py/__init__.py
_api_scrape
def _api_scrape(json_inp, ndx): """ Internal method to streamline the getting of data from the json Args: json_inp (json): json input from our caller ndx (int): index where the data is located in the api Returns: If pandas is present: DataFrame (pandas.DataFrame): data set from ndx within the API's json else: A dictionary of both headers and values from the page """ try: headers = json_inp['resultSets'][ndx]['headers'] values = json_inp['resultSets'][ndx]['rowSet'] except KeyError: # This is so ugly but this is what you get when your data comes out # in not a standard format try: headers = json_inp['resultSet'][ndx]['headers'] values = json_inp['resultSet'][ndx]['rowSet'] except KeyError: # Added for results that only include one set (ex. LeagueLeaders) headers = json_inp['resultSet']['headers'] values = json_inp['resultSet']['rowSet'] if HAS_PANDAS: return DataFrame(values, columns=headers) else: # Taken from www.github.com/bradleyfay/py-goldsberry return [dict(zip(headers, value)) for value in values]
python
def _api_scrape(json_inp, ndx): """ Internal method to streamline the getting of data from the json Args: json_inp (json): json input from our caller ndx (int): index where the data is located in the api Returns: If pandas is present: DataFrame (pandas.DataFrame): data set from ndx within the API's json else: A dictionary of both headers and values from the page """ try: headers = json_inp['resultSets'][ndx]['headers'] values = json_inp['resultSets'][ndx]['rowSet'] except KeyError: # This is so ugly but this is what you get when your data comes out # in not a standard format try: headers = json_inp['resultSet'][ndx]['headers'] values = json_inp['resultSet'][ndx]['rowSet'] except KeyError: # Added for results that only include one set (ex. LeagueLeaders) headers = json_inp['resultSet']['headers'] values = json_inp['resultSet']['rowSet'] if HAS_PANDAS: return DataFrame(values, columns=headers) else: # Taken from www.github.com/bradleyfay/py-goldsberry return [dict(zip(headers, value)) for value in values]
[ "def", "_api_scrape", "(", "json_inp", ",", "ndx", ")", ":", "try", ":", "headers", "=", "json_inp", "[", "'resultSets'", "]", "[", "ndx", "]", "[", "'headers'", "]", "values", "=", "json_inp", "[", "'resultSets'", "]", "[", "ndx", "]", "[", "'rowSet'", "]", "except", "KeyError", ":", "# This is so ugly but this is what you get when your data comes out", "# in not a standard format", "try", ":", "headers", "=", "json_inp", "[", "'resultSet'", "]", "[", "ndx", "]", "[", "'headers'", "]", "values", "=", "json_inp", "[", "'resultSet'", "]", "[", "ndx", "]", "[", "'rowSet'", "]", "except", "KeyError", ":", "# Added for results that only include one set (ex. LeagueLeaders)", "headers", "=", "json_inp", "[", "'resultSet'", "]", "[", "'headers'", "]", "values", "=", "json_inp", "[", "'resultSet'", "]", "[", "'rowSet'", "]", "if", "HAS_PANDAS", ":", "return", "DataFrame", "(", "values", ",", "columns", "=", "headers", ")", "else", ":", "# Taken from www.github.com/bradleyfay/py-goldsberry", "return", "[", "dict", "(", "zip", "(", "headers", ",", "value", ")", ")", "for", "value", "in", "values", "]" ]
Internal method to streamline the getting of data from the json Args: json_inp (json): json input from our caller ndx (int): index where the data is located in the api Returns: If pandas is present: DataFrame (pandas.DataFrame): data set from ndx within the API's json else: A dictionary of both headers and values from the page
[ "Internal", "method", "to", "streamline", "the", "getting", "of", "data", "from", "the", "json" ]
ffeaf4251d796ff9313367a752a45a0d7b16489e
https://github.com/seemethere/nba_py/blob/ffeaf4251d796ff9313367a752a45a0d7b16489e/nba_py/__init__.py#L34-L67
232,823
seemethere/nba_py
nba_py/player.py
get_player
def get_player(first_name, last_name=None, season=constants.CURRENT_SEASON, only_current=0, just_id=True): """ Calls our PlayerList class to get a full list of players and then returns just an id if specified or the full row of player information Args: :first_name: First name of the player :last_name: Last name of the player (this is None if the player only has first name [Nene]) :only_current: Only wants the current list of players :just_id: Only wants the id of the player Returns: Either the ID or full row of information of the player inputted Raises: :PlayerNotFoundException:: """ if last_name is None: name = first_name.lower() else: name = '{}, {}'.format(last_name, first_name).lower() pl = PlayerList(season=season, only_current=only_current).info() hdr = 'DISPLAY_LAST_COMMA_FIRST' if HAS_PANDAS: item = pl[pl.DISPLAY_LAST_COMMA_FIRST.str.lower() == name] else: item = next(plyr for plyr in pl if str(plyr[hdr]).lower() == name) if len(item) == 0: raise PlayerNotFoundException elif just_id: return item['PERSON_ID'] else: return item
python
def get_player(first_name, last_name=None, season=constants.CURRENT_SEASON, only_current=0, just_id=True): """ Calls our PlayerList class to get a full list of players and then returns just an id if specified or the full row of player information Args: :first_name: First name of the player :last_name: Last name of the player (this is None if the player only has first name [Nene]) :only_current: Only wants the current list of players :just_id: Only wants the id of the player Returns: Either the ID or full row of information of the player inputted Raises: :PlayerNotFoundException:: """ if last_name is None: name = first_name.lower() else: name = '{}, {}'.format(last_name, first_name).lower() pl = PlayerList(season=season, only_current=only_current).info() hdr = 'DISPLAY_LAST_COMMA_FIRST' if HAS_PANDAS: item = pl[pl.DISPLAY_LAST_COMMA_FIRST.str.lower() == name] else: item = next(plyr for plyr in pl if str(plyr[hdr]).lower() == name) if len(item) == 0: raise PlayerNotFoundException elif just_id: return item['PERSON_ID'] else: return item
[ "def", "get_player", "(", "first_name", ",", "last_name", "=", "None", ",", "season", "=", "constants", ".", "CURRENT_SEASON", ",", "only_current", "=", "0", ",", "just_id", "=", "True", ")", ":", "if", "last_name", "is", "None", ":", "name", "=", "first_name", ".", "lower", "(", ")", "else", ":", "name", "=", "'{}, {}'", ".", "format", "(", "last_name", ",", "first_name", ")", ".", "lower", "(", ")", "pl", "=", "PlayerList", "(", "season", "=", "season", ",", "only_current", "=", "only_current", ")", ".", "info", "(", ")", "hdr", "=", "'DISPLAY_LAST_COMMA_FIRST'", "if", "HAS_PANDAS", ":", "item", "=", "pl", "[", "pl", ".", "DISPLAY_LAST_COMMA_FIRST", ".", "str", ".", "lower", "(", ")", "==", "name", "]", "else", ":", "item", "=", "next", "(", "plyr", "for", "plyr", "in", "pl", "if", "str", "(", "plyr", "[", "hdr", "]", ")", ".", "lower", "(", ")", "==", "name", ")", "if", "len", "(", "item", ")", "==", "0", ":", "raise", "PlayerNotFoundException", "elif", "just_id", ":", "return", "item", "[", "'PERSON_ID'", "]", "else", ":", "return", "item" ]
Calls our PlayerList class to get a full list of players and then returns just an id if specified or the full row of player information Args: :first_name: First name of the player :last_name: Last name of the player (this is None if the player only has first name [Nene]) :only_current: Only wants the current list of players :just_id: Only wants the id of the player Returns: Either the ID or full row of information of the player inputted Raises: :PlayerNotFoundException::
[ "Calls", "our", "PlayerList", "class", "to", "get", "a", "full", "list", "of", "players", "and", "then", "returns", "just", "an", "id", "if", "specified", "or", "the", "full", "row", "of", "player", "information" ]
ffeaf4251d796ff9313367a752a45a0d7b16489e
https://github.com/seemethere/nba_py/blob/ffeaf4251d796ff9313367a752a45a0d7b16489e/nba_py/player.py#L9-L46
232,824
ishikota/PyPokerEngine
pypokerengine/players.py
BasePokerPlayer.respond_to_ask
def respond_to_ask(self, message): """Called from Dealer when ask message received from RoundManager""" valid_actions, hole_card, round_state = self.__parse_ask_message(message) return self.declare_action(valid_actions, hole_card, round_state)
python
def respond_to_ask(self, message): """Called from Dealer when ask message received from RoundManager""" valid_actions, hole_card, round_state = self.__parse_ask_message(message) return self.declare_action(valid_actions, hole_card, round_state)
[ "def", "respond_to_ask", "(", "self", ",", "message", ")", ":", "valid_actions", ",", "hole_card", ",", "round_state", "=", "self", ".", "__parse_ask_message", "(", "message", ")", "return", "self", ".", "declare_action", "(", "valid_actions", ",", "hole_card", ",", "round_state", ")" ]
Called from Dealer when ask message received from RoundManager
[ "Called", "from", "Dealer", "when", "ask", "message", "received", "from", "RoundManager" ]
a52a048a15da276005eca4acae96fb6eeb4dc034
https://github.com/ishikota/PyPokerEngine/blob/a52a048a15da276005eca4acae96fb6eeb4dc034/pypokerengine/players.py#L45-L48
232,825
ishikota/PyPokerEngine
pypokerengine/players.py
BasePokerPlayer.receive_notification
def receive_notification(self, message): """Called from Dealer when notification received from RoundManager""" msg_type = message["message_type"] if msg_type == "game_start_message": info = self.__parse_game_start_message(message) self.receive_game_start_message(info) elif msg_type == "round_start_message": round_count, hole, seats = self.__parse_round_start_message(message) self.receive_round_start_message(round_count, hole, seats) elif msg_type == "street_start_message": street, state = self.__parse_street_start_message(message) self.receive_street_start_message(street, state) elif msg_type == "game_update_message": new_action, round_state = self.__parse_game_update_message(message) self.receive_game_update_message(new_action, round_state) elif msg_type == "round_result_message": winners, hand_info, state = self.__parse_round_result_message(message) self.receive_round_result_message(winners, hand_info, state)
python
def receive_notification(self, message): """Called from Dealer when notification received from RoundManager""" msg_type = message["message_type"] if msg_type == "game_start_message": info = self.__parse_game_start_message(message) self.receive_game_start_message(info) elif msg_type == "round_start_message": round_count, hole, seats = self.__parse_round_start_message(message) self.receive_round_start_message(round_count, hole, seats) elif msg_type == "street_start_message": street, state = self.__parse_street_start_message(message) self.receive_street_start_message(street, state) elif msg_type == "game_update_message": new_action, round_state = self.__parse_game_update_message(message) self.receive_game_update_message(new_action, round_state) elif msg_type == "round_result_message": winners, hand_info, state = self.__parse_round_result_message(message) self.receive_round_result_message(winners, hand_info, state)
[ "def", "receive_notification", "(", "self", ",", "message", ")", ":", "msg_type", "=", "message", "[", "\"message_type\"", "]", "if", "msg_type", "==", "\"game_start_message\"", ":", "info", "=", "self", ".", "__parse_game_start_message", "(", "message", ")", "self", ".", "receive_game_start_message", "(", "info", ")", "elif", "msg_type", "==", "\"round_start_message\"", ":", "round_count", ",", "hole", ",", "seats", "=", "self", ".", "__parse_round_start_message", "(", "message", ")", "self", ".", "receive_round_start_message", "(", "round_count", ",", "hole", ",", "seats", ")", "elif", "msg_type", "==", "\"street_start_message\"", ":", "street", ",", "state", "=", "self", ".", "__parse_street_start_message", "(", "message", ")", "self", ".", "receive_street_start_message", "(", "street", ",", "state", ")", "elif", "msg_type", "==", "\"game_update_message\"", ":", "new_action", ",", "round_state", "=", "self", ".", "__parse_game_update_message", "(", "message", ")", "self", ".", "receive_game_update_message", "(", "new_action", ",", "round_state", ")", "elif", "msg_type", "==", "\"round_result_message\"", ":", "winners", ",", "hand_info", ",", "state", "=", "self", ".", "__parse_round_result_message", "(", "message", ")", "self", ".", "receive_round_result_message", "(", "winners", ",", "hand_info", ",", "state", ")" ]
Called from Dealer when notification received from RoundManager
[ "Called", "from", "Dealer", "when", "notification", "received", "from", "RoundManager" ]
a52a048a15da276005eca4acae96fb6eeb4dc034
https://github.com/ishikota/PyPokerEngine/blob/a52a048a15da276005eca4acae96fb6eeb4dc034/pypokerengine/players.py#L50-L72
232,826
alex-sherman/unsync
examples/mixing_methods.py
result_continuation
async def result_continuation(task): """A preliminary result processor we'll chain on to the original task This will get executed wherever the source task was executed, in this case one of the threads in the ThreadPoolExecutor""" await asyncio.sleep(0.1) num, res = task.result() return num, res * 2
python
async def result_continuation(task): """A preliminary result processor we'll chain on to the original task This will get executed wherever the source task was executed, in this case one of the threads in the ThreadPoolExecutor""" await asyncio.sleep(0.1) num, res = task.result() return num, res * 2
[ "async", "def", "result_continuation", "(", "task", ")", ":", "await", "asyncio", ".", "sleep", "(", "0.1", ")", "num", ",", "res", "=", "task", ".", "result", "(", ")", "return", "num", ",", "res", "*", "2" ]
A preliminary result processor we'll chain on to the original task This will get executed wherever the source task was executed, in this case one of the threads in the ThreadPoolExecutor
[ "A", "preliminary", "result", "processor", "we", "ll", "chain", "on", "to", "the", "original", "task", "This", "will", "get", "executed", "wherever", "the", "source", "task", "was", "executed", "in", "this", "case", "one", "of", "the", "threads", "in", "the", "ThreadPoolExecutor" ]
a52a0b04980dcaf6dc2fd734aa9d7be9d8960bbe
https://github.com/alex-sherman/unsync/blob/a52a0b04980dcaf6dc2fd734aa9d7be9d8960bbe/examples/mixing_methods.py#L16-L22
232,827
alex-sherman/unsync
examples/mixing_methods.py
result_processor
async def result_processor(tasks): """An async result aggregator that combines all the results This gets executed in unsync.loop and unsync.thread""" output = {} for task in tasks: num, res = await task output[num] = res return output
python
async def result_processor(tasks): """An async result aggregator that combines all the results This gets executed in unsync.loop and unsync.thread""" output = {} for task in tasks: num, res = await task output[num] = res return output
[ "async", "def", "result_processor", "(", "tasks", ")", ":", "output", "=", "{", "}", "for", "task", "in", "tasks", ":", "num", ",", "res", "=", "await", "task", "output", "[", "num", "]", "=", "res", "return", "output" ]
An async result aggregator that combines all the results This gets executed in unsync.loop and unsync.thread
[ "An", "async", "result", "aggregator", "that", "combines", "all", "the", "results", "This", "gets", "executed", "in", "unsync", ".", "loop", "and", "unsync", ".", "thread" ]
a52a0b04980dcaf6dc2fd734aa9d7be9d8960bbe
https://github.com/alex-sherman/unsync/blob/a52a0b04980dcaf6dc2fd734aa9d7be9d8960bbe/examples/mixing_methods.py#L25-L32
232,828
fastavro/fastavro
fastavro/_read_py.py
read_union
def read_union(fo, writer_schema, reader_schema=None): """A union is encoded by first writing a long value indicating the zero-based position within the union of the schema of its value. The value is then encoded per the indicated schema within the union. """ # schema resolution index = read_long(fo) if reader_schema: # Handle case where the reader schema is just a single type (not union) if not isinstance(reader_schema, list): if match_types(writer_schema[index], reader_schema): return read_data(fo, writer_schema[index], reader_schema) else: for schema in reader_schema: if match_types(writer_schema[index], schema): return read_data(fo, writer_schema[index], schema) msg = 'schema mismatch: %s not found in %s' % \ (writer_schema, reader_schema) raise SchemaResolutionError(msg) else: return read_data(fo, writer_schema[index])
python
def read_union(fo, writer_schema, reader_schema=None): """A union is encoded by first writing a long value indicating the zero-based position within the union of the schema of its value. The value is then encoded per the indicated schema within the union. """ # schema resolution index = read_long(fo) if reader_schema: # Handle case where the reader schema is just a single type (not union) if not isinstance(reader_schema, list): if match_types(writer_schema[index], reader_schema): return read_data(fo, writer_schema[index], reader_schema) else: for schema in reader_schema: if match_types(writer_schema[index], schema): return read_data(fo, writer_schema[index], schema) msg = 'schema mismatch: %s not found in %s' % \ (writer_schema, reader_schema) raise SchemaResolutionError(msg) else: return read_data(fo, writer_schema[index])
[ "def", "read_union", "(", "fo", ",", "writer_schema", ",", "reader_schema", "=", "None", ")", ":", "# schema resolution", "index", "=", "read_long", "(", "fo", ")", "if", "reader_schema", ":", "# Handle case where the reader schema is just a single type (not union)", "if", "not", "isinstance", "(", "reader_schema", ",", "list", ")", ":", "if", "match_types", "(", "writer_schema", "[", "index", "]", ",", "reader_schema", ")", ":", "return", "read_data", "(", "fo", ",", "writer_schema", "[", "index", "]", ",", "reader_schema", ")", "else", ":", "for", "schema", "in", "reader_schema", ":", "if", "match_types", "(", "writer_schema", "[", "index", "]", ",", "schema", ")", ":", "return", "read_data", "(", "fo", ",", "writer_schema", "[", "index", "]", ",", "schema", ")", "msg", "=", "'schema mismatch: %s not found in %s'", "%", "(", "writer_schema", ",", "reader_schema", ")", "raise", "SchemaResolutionError", "(", "msg", ")", "else", ":", "return", "read_data", "(", "fo", ",", "writer_schema", "[", "index", "]", ")" ]
A union is encoded by first writing a long value indicating the zero-based position within the union of the schema of its value. The value is then encoded per the indicated schema within the union.
[ "A", "union", "is", "encoded", "by", "first", "writing", "a", "long", "value", "indicating", "the", "zero", "-", "based", "position", "within", "the", "union", "of", "the", "schema", "of", "its", "value", "." ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_read_py.py#L345-L366
232,829
fastavro/fastavro
fastavro/_read_py.py
read_data
def read_data(fo, writer_schema, reader_schema=None): """Read data from file object according to schema.""" record_type = extract_record_type(writer_schema) logical_type = extract_logical_type(writer_schema) if reader_schema and record_type in AVRO_TYPES: # If the schemas are the same, set the reader schema to None so that no # schema resolution is done for this call or future recursive calls if writer_schema == reader_schema: reader_schema = None else: match_schemas(writer_schema, reader_schema) reader_fn = READERS.get(record_type) if reader_fn: try: data = reader_fn(fo, writer_schema, reader_schema) except StructError: raise EOFError('cannot read %s from %s' % (record_type, fo)) if 'logicalType' in writer_schema: fn = LOGICAL_READERS.get(logical_type) if fn: return fn(data, writer_schema, reader_schema) if reader_schema is not None: return maybe_promote( data, record_type, extract_record_type(reader_schema) ) else: return data else: return read_data( fo, SCHEMA_DEFS[record_type], SCHEMA_DEFS.get(reader_schema) )
python
def read_data(fo, writer_schema, reader_schema=None): """Read data from file object according to schema.""" record_type = extract_record_type(writer_schema) logical_type = extract_logical_type(writer_schema) if reader_schema and record_type in AVRO_TYPES: # If the schemas are the same, set the reader schema to None so that no # schema resolution is done for this call or future recursive calls if writer_schema == reader_schema: reader_schema = None else: match_schemas(writer_schema, reader_schema) reader_fn = READERS.get(record_type) if reader_fn: try: data = reader_fn(fo, writer_schema, reader_schema) except StructError: raise EOFError('cannot read %s from %s' % (record_type, fo)) if 'logicalType' in writer_schema: fn = LOGICAL_READERS.get(logical_type) if fn: return fn(data, writer_schema, reader_schema) if reader_schema is not None: return maybe_promote( data, record_type, extract_record_type(reader_schema) ) else: return data else: return read_data( fo, SCHEMA_DEFS[record_type], SCHEMA_DEFS.get(reader_schema) )
[ "def", "read_data", "(", "fo", ",", "writer_schema", ",", "reader_schema", "=", "None", ")", ":", "record_type", "=", "extract_record_type", "(", "writer_schema", ")", "logical_type", "=", "extract_logical_type", "(", "writer_schema", ")", "if", "reader_schema", "and", "record_type", "in", "AVRO_TYPES", ":", "# If the schemas are the same, set the reader schema to None so that no", "# schema resolution is done for this call or future recursive calls", "if", "writer_schema", "==", "reader_schema", ":", "reader_schema", "=", "None", "else", ":", "match_schemas", "(", "writer_schema", ",", "reader_schema", ")", "reader_fn", "=", "READERS", ".", "get", "(", "record_type", ")", "if", "reader_fn", ":", "try", ":", "data", "=", "reader_fn", "(", "fo", ",", "writer_schema", ",", "reader_schema", ")", "except", "StructError", ":", "raise", "EOFError", "(", "'cannot read %s from %s'", "%", "(", "record_type", ",", "fo", ")", ")", "if", "'logicalType'", "in", "writer_schema", ":", "fn", "=", "LOGICAL_READERS", ".", "get", "(", "logical_type", ")", "if", "fn", ":", "return", "fn", "(", "data", ",", "writer_schema", ",", "reader_schema", ")", "if", "reader_schema", "is", "not", "None", ":", "return", "maybe_promote", "(", "data", ",", "record_type", ",", "extract_record_type", "(", "reader_schema", ")", ")", "else", ":", "return", "data", "else", ":", "return", "read_data", "(", "fo", ",", "SCHEMA_DEFS", "[", "record_type", "]", ",", "SCHEMA_DEFS", ".", "get", "(", "reader_schema", ")", ")" ]
Read data from file object according to schema.
[ "Read", "data", "from", "file", "object", "according", "to", "schema", "." ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_read_py.py#L477-L516
232,830
fastavro/fastavro
fastavro/_read_py.py
_iter_avro_records
def _iter_avro_records(fo, header, codec, writer_schema, reader_schema): """Return iterator over avro records.""" sync_marker = header['sync'] read_block = BLOCK_READERS.get(codec) if not read_block: raise ValueError('Unrecognized codec: %r' % codec) block_count = 0 while True: try: block_count = read_long(fo) except StopIteration: return block_fo = read_block(fo) for i in xrange(block_count): yield read_data(block_fo, writer_schema, reader_schema) skip_sync(fo, sync_marker)
python
def _iter_avro_records(fo, header, codec, writer_schema, reader_schema): """Return iterator over avro records.""" sync_marker = header['sync'] read_block = BLOCK_READERS.get(codec) if not read_block: raise ValueError('Unrecognized codec: %r' % codec) block_count = 0 while True: try: block_count = read_long(fo) except StopIteration: return block_fo = read_block(fo) for i in xrange(block_count): yield read_data(block_fo, writer_schema, reader_schema) skip_sync(fo, sync_marker)
[ "def", "_iter_avro_records", "(", "fo", ",", "header", ",", "codec", ",", "writer_schema", ",", "reader_schema", ")", ":", "sync_marker", "=", "header", "[", "'sync'", "]", "read_block", "=", "BLOCK_READERS", ".", "get", "(", "codec", ")", "if", "not", "read_block", ":", "raise", "ValueError", "(", "'Unrecognized codec: %r'", "%", "codec", ")", "block_count", "=", "0", "while", "True", ":", "try", ":", "block_count", "=", "read_long", "(", "fo", ")", "except", "StopIteration", ":", "return", "block_fo", "=", "read_block", "(", "fo", ")", "for", "i", "in", "xrange", "(", "block_count", ")", ":", "yield", "read_data", "(", "block_fo", ",", "writer_schema", ",", "reader_schema", ")", "skip_sync", "(", "fo", ",", "sync_marker", ")" ]
Return iterator over avro records.
[ "Return", "iterator", "over", "avro", "records", "." ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_read_py.py#L559-L579
232,831
fastavro/fastavro
fastavro/_read_py.py
_iter_avro_blocks
def _iter_avro_blocks(fo, header, codec, writer_schema, reader_schema): """Return iterator over avro blocks.""" sync_marker = header['sync'] read_block = BLOCK_READERS.get(codec) if not read_block: raise ValueError('Unrecognized codec: %r' % codec) while True: offset = fo.tell() try: num_block_records = read_long(fo) except StopIteration: return block_bytes = read_block(fo) skip_sync(fo, sync_marker) size = fo.tell() - offset yield Block( block_bytes, num_block_records, codec, reader_schema, writer_schema, offset, size )
python
def _iter_avro_blocks(fo, header, codec, writer_schema, reader_schema): """Return iterator over avro blocks.""" sync_marker = header['sync'] read_block = BLOCK_READERS.get(codec) if not read_block: raise ValueError('Unrecognized codec: %r' % codec) while True: offset = fo.tell() try: num_block_records = read_long(fo) except StopIteration: return block_bytes = read_block(fo) skip_sync(fo, sync_marker) size = fo.tell() - offset yield Block( block_bytes, num_block_records, codec, reader_schema, writer_schema, offset, size )
[ "def", "_iter_avro_blocks", "(", "fo", ",", "header", ",", "codec", ",", "writer_schema", ",", "reader_schema", ")", ":", "sync_marker", "=", "header", "[", "'sync'", "]", "read_block", "=", "BLOCK_READERS", ".", "get", "(", "codec", ")", "if", "not", "read_block", ":", "raise", "ValueError", "(", "'Unrecognized codec: %r'", "%", "codec", ")", "while", "True", ":", "offset", "=", "fo", ".", "tell", "(", ")", "try", ":", "num_block_records", "=", "read_long", "(", "fo", ")", "except", "StopIteration", ":", "return", "block_bytes", "=", "read_block", "(", "fo", ")", "skip_sync", "(", "fo", ",", "sync_marker", ")", "size", "=", "fo", ".", "tell", "(", ")", "-", "offset", "yield", "Block", "(", "block_bytes", ",", "num_block_records", ",", "codec", ",", "reader_schema", ",", "writer_schema", ",", "offset", ",", "size", ")" ]
Return iterator over avro blocks.
[ "Return", "iterator", "over", "avro", "blocks", "." ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_read_py.py#L582-L606
232,832
fastavro/fastavro
fastavro/_write_py.py
prepare_timestamp_millis
def prepare_timestamp_millis(data, schema): """Converts datetime.datetime object to int timestamp with milliseconds """ if isinstance(data, datetime.datetime): if data.tzinfo is not None: delta = (data - epoch) return int(delta.total_seconds() * MLS_PER_SECOND) t = int(time.mktime(data.timetuple())) * MLS_PER_SECOND + int( data.microsecond / 1000) return t else: return data
python
def prepare_timestamp_millis(data, schema): """Converts datetime.datetime object to int timestamp with milliseconds """ if isinstance(data, datetime.datetime): if data.tzinfo is not None: delta = (data - epoch) return int(delta.total_seconds() * MLS_PER_SECOND) t = int(time.mktime(data.timetuple())) * MLS_PER_SECOND + int( data.microsecond / 1000) return t else: return data
[ "def", "prepare_timestamp_millis", "(", "data", ",", "schema", ")", ":", "if", "isinstance", "(", "data", ",", "datetime", ".", "datetime", ")", ":", "if", "data", ".", "tzinfo", "is", "not", "None", ":", "delta", "=", "(", "data", "-", "epoch", ")", "return", "int", "(", "delta", ".", "total_seconds", "(", ")", "*", "MLS_PER_SECOND", ")", "t", "=", "int", "(", "time", ".", "mktime", "(", "data", ".", "timetuple", "(", ")", ")", ")", "*", "MLS_PER_SECOND", "+", "int", "(", "data", ".", "microsecond", "/", "1000", ")", "return", "t", "else", ":", "return", "data" ]
Converts datetime.datetime object to int timestamp with milliseconds
[ "Converts", "datetime", ".", "datetime", "object", "to", "int", "timestamp", "with", "milliseconds" ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L43-L54
232,833
fastavro/fastavro
fastavro/_write_py.py
prepare_timestamp_micros
def prepare_timestamp_micros(data, schema): """Converts datetime.datetime to int timestamp with microseconds""" if isinstance(data, datetime.datetime): if data.tzinfo is not None: delta = (data - epoch) return int(delta.total_seconds() * MCS_PER_SECOND) t = int(time.mktime(data.timetuple())) * MCS_PER_SECOND + \ data.microsecond return t else: return data
python
def prepare_timestamp_micros(data, schema): """Converts datetime.datetime to int timestamp with microseconds""" if isinstance(data, datetime.datetime): if data.tzinfo is not None: delta = (data - epoch) return int(delta.total_seconds() * MCS_PER_SECOND) t = int(time.mktime(data.timetuple())) * MCS_PER_SECOND + \ data.microsecond return t else: return data
[ "def", "prepare_timestamp_micros", "(", "data", ",", "schema", ")", ":", "if", "isinstance", "(", "data", ",", "datetime", ".", "datetime", ")", ":", "if", "data", ".", "tzinfo", "is", "not", "None", ":", "delta", "=", "(", "data", "-", "epoch", ")", "return", "int", "(", "delta", ".", "total_seconds", "(", ")", "*", "MCS_PER_SECOND", ")", "t", "=", "int", "(", "time", ".", "mktime", "(", "data", ".", "timetuple", "(", ")", ")", ")", "*", "MCS_PER_SECOND", "+", "data", ".", "microsecond", "return", "t", "else", ":", "return", "data" ]
Converts datetime.datetime to int timestamp with microseconds
[ "Converts", "datetime", ".", "datetime", "to", "int", "timestamp", "with", "microseconds" ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L57-L67
232,834
fastavro/fastavro
fastavro/_write_py.py
prepare_date
def prepare_date(data, schema): """Converts datetime.date to int timestamp""" if isinstance(data, datetime.date): return data.toordinal() - DAYS_SHIFT else: return data
python
def prepare_date(data, schema): """Converts datetime.date to int timestamp""" if isinstance(data, datetime.date): return data.toordinal() - DAYS_SHIFT else: return data
[ "def", "prepare_date", "(", "data", ",", "schema", ")", ":", "if", "isinstance", "(", "data", ",", "datetime", ".", "date", ")", ":", "return", "data", ".", "toordinal", "(", ")", "-", "DAYS_SHIFT", "else", ":", "return", "data" ]
Converts datetime.date to int timestamp
[ "Converts", "datetime", ".", "date", "to", "int", "timestamp" ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L70-L75
232,835
fastavro/fastavro
fastavro/_write_py.py
prepare_uuid
def prepare_uuid(data, schema): """Converts uuid.UUID to string formatted UUID xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx """ if isinstance(data, uuid.UUID): return str(data) else: return data
python
def prepare_uuid(data, schema): """Converts uuid.UUID to string formatted UUID xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx """ if isinstance(data, uuid.UUID): return str(data) else: return data
[ "def", "prepare_uuid", "(", "data", ",", "schema", ")", ":", "if", "isinstance", "(", "data", ",", "uuid", ".", "UUID", ")", ":", "return", "str", "(", "data", ")", "else", ":", "return", "data" ]
Converts uuid.UUID to string formatted UUID xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
[ "Converts", "uuid", ".", "UUID", "to", "string", "formatted", "UUID", "xxxxxxxx", "-", "xxxx", "-", "xxxx", "-", "xxxx", "-", "xxxxxxxxxxxx" ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L78-L85
232,836
fastavro/fastavro
fastavro/_write_py.py
prepare_time_millis
def prepare_time_millis(data, schema): """Convert datetime.time to int timestamp with milliseconds""" if isinstance(data, datetime.time): return int( data.hour * MLS_PER_HOUR + data.minute * MLS_PER_MINUTE + data.second * MLS_PER_SECOND + int(data.microsecond / 1000)) else: return data
python
def prepare_time_millis(data, schema): """Convert datetime.time to int timestamp with milliseconds""" if isinstance(data, datetime.time): return int( data.hour * MLS_PER_HOUR + data.minute * MLS_PER_MINUTE + data.second * MLS_PER_SECOND + int(data.microsecond / 1000)) else: return data
[ "def", "prepare_time_millis", "(", "data", ",", "schema", ")", ":", "if", "isinstance", "(", "data", ",", "datetime", ".", "time", ")", ":", "return", "int", "(", "data", ".", "hour", "*", "MLS_PER_HOUR", "+", "data", ".", "minute", "*", "MLS_PER_MINUTE", "+", "data", ".", "second", "*", "MLS_PER_SECOND", "+", "int", "(", "data", ".", "microsecond", "/", "1000", ")", ")", "else", ":", "return", "data" ]
Convert datetime.time to int timestamp with milliseconds
[ "Convert", "datetime", ".", "time", "to", "int", "timestamp", "with", "milliseconds" ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L88-L95
232,837
fastavro/fastavro
fastavro/_write_py.py
prepare_time_micros
def prepare_time_micros(data, schema): """Convert datetime.time to int timestamp with microseconds""" if isinstance(data, datetime.time): return long(data.hour * MCS_PER_HOUR + data.minute * MCS_PER_MINUTE + data.second * MCS_PER_SECOND + data.microsecond) else: return data
python
def prepare_time_micros(data, schema): """Convert datetime.time to int timestamp with microseconds""" if isinstance(data, datetime.time): return long(data.hour * MCS_PER_HOUR + data.minute * MCS_PER_MINUTE + data.second * MCS_PER_SECOND + data.microsecond) else: return data
[ "def", "prepare_time_micros", "(", "data", ",", "schema", ")", ":", "if", "isinstance", "(", "data", ",", "datetime", ".", "time", ")", ":", "return", "long", "(", "data", ".", "hour", "*", "MCS_PER_HOUR", "+", "data", ".", "minute", "*", "MCS_PER_MINUTE", "+", "data", ".", "second", "*", "MCS_PER_SECOND", "+", "data", ".", "microsecond", ")", "else", ":", "return", "data" ]
Convert datetime.time to int timestamp with microseconds
[ "Convert", "datetime", ".", "time", "to", "int", "timestamp", "with", "microseconds" ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L98-L104
232,838
fastavro/fastavro
fastavro/_write_py.py
prepare_bytes_decimal
def prepare_bytes_decimal(data, schema): """Convert decimal.Decimal to bytes""" if not isinstance(data, decimal.Decimal): return data scale = schema.get('scale', 0) # based on https://github.com/apache/avro/pull/82/ sign, digits, exp = data.as_tuple() if -exp > scale: raise ValueError( 'Scale provided in schema does not match the decimal') delta = exp + scale if delta > 0: digits = digits + (0,) * delta unscaled_datum = 0 for digit in digits: unscaled_datum = (unscaled_datum * 10) + digit bits_req = unscaled_datum.bit_length() + 1 if sign: unscaled_datum = (1 << bits_req) - unscaled_datum bytes_req = bits_req // 8 padding_bits = ~((1 << bits_req) - 1) if sign else 0 packed_bits = padding_bits | unscaled_datum bytes_req += 1 if (bytes_req << 3) < bits_req else 0 tmp = MemoryIO() for index in range(bytes_req - 1, -1, -1): bits_to_write = packed_bits >> (8 * index) tmp.write(mk_bits(bits_to_write & 0xff)) return tmp.getvalue()
python
def prepare_bytes_decimal(data, schema): """Convert decimal.Decimal to bytes""" if not isinstance(data, decimal.Decimal): return data scale = schema.get('scale', 0) # based on https://github.com/apache/avro/pull/82/ sign, digits, exp = data.as_tuple() if -exp > scale: raise ValueError( 'Scale provided in schema does not match the decimal') delta = exp + scale if delta > 0: digits = digits + (0,) * delta unscaled_datum = 0 for digit in digits: unscaled_datum = (unscaled_datum * 10) + digit bits_req = unscaled_datum.bit_length() + 1 if sign: unscaled_datum = (1 << bits_req) - unscaled_datum bytes_req = bits_req // 8 padding_bits = ~((1 << bits_req) - 1) if sign else 0 packed_bits = padding_bits | unscaled_datum bytes_req += 1 if (bytes_req << 3) < bits_req else 0 tmp = MemoryIO() for index in range(bytes_req - 1, -1, -1): bits_to_write = packed_bits >> (8 * index) tmp.write(mk_bits(bits_to_write & 0xff)) return tmp.getvalue()
[ "def", "prepare_bytes_decimal", "(", "data", ",", "schema", ")", ":", "if", "not", "isinstance", "(", "data", ",", "decimal", ".", "Decimal", ")", ":", "return", "data", "scale", "=", "schema", ".", "get", "(", "'scale'", ",", "0", ")", "# based on https://github.com/apache/avro/pull/82/", "sign", ",", "digits", ",", "exp", "=", "data", ".", "as_tuple", "(", ")", "if", "-", "exp", ">", "scale", ":", "raise", "ValueError", "(", "'Scale provided in schema does not match the decimal'", ")", "delta", "=", "exp", "+", "scale", "if", "delta", ">", "0", ":", "digits", "=", "digits", "+", "(", "0", ",", ")", "*", "delta", "unscaled_datum", "=", "0", "for", "digit", "in", "digits", ":", "unscaled_datum", "=", "(", "unscaled_datum", "*", "10", ")", "+", "digit", "bits_req", "=", "unscaled_datum", ".", "bit_length", "(", ")", "+", "1", "if", "sign", ":", "unscaled_datum", "=", "(", "1", "<<", "bits_req", ")", "-", "unscaled_datum", "bytes_req", "=", "bits_req", "//", "8", "padding_bits", "=", "~", "(", "(", "1", "<<", "bits_req", ")", "-", "1", ")", "if", "sign", "else", "0", "packed_bits", "=", "padding_bits", "|", "unscaled_datum", "bytes_req", "+=", "1", "if", "(", "bytes_req", "<<", "3", ")", "<", "bits_req", "else", "0", "tmp", "=", "MemoryIO", "(", ")", "for", "index", "in", "range", "(", "bytes_req", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "bits_to_write", "=", "packed_bits", ">>", "(", "8", "*", "index", ")", "tmp", ".", "write", "(", "mk_bits", "(", "bits_to_write", "&", "0xff", ")", ")", "return", "tmp", ".", "getvalue", "(", ")" ]
Convert decimal.Decimal to bytes
[ "Convert", "decimal", ".", "Decimal", "to", "bytes" ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L107-L145
232,839
fastavro/fastavro
fastavro/_write_py.py
prepare_fixed_decimal
def prepare_fixed_decimal(data, schema): """Converts decimal.Decimal to fixed length bytes array""" if not isinstance(data, decimal.Decimal): return data scale = schema.get('scale', 0) size = schema['size'] # based on https://github.com/apache/avro/pull/82/ sign, digits, exp = data.as_tuple() if -exp > scale: raise ValueError( 'Scale provided in schema does not match the decimal') delta = exp + scale if delta > 0: digits = digits + (0,) * delta unscaled_datum = 0 for digit in digits: unscaled_datum = (unscaled_datum * 10) + digit bits_req = unscaled_datum.bit_length() + 1 size_in_bits = size * 8 offset_bits = size_in_bits - bits_req mask = 2 ** size_in_bits - 1 bit = 1 for i in range(bits_req): mask ^= bit bit <<= 1 if bits_req < 8: bytes_req = 1 else: bytes_req = bits_req // 8 if bits_req % 8 != 0: bytes_req += 1 tmp = MemoryIO() if sign: unscaled_datum = (1 << bits_req) - unscaled_datum unscaled_datum = mask | unscaled_datum for index in range(size - 1, -1, -1): bits_to_write = unscaled_datum >> (8 * index) tmp.write(mk_bits(bits_to_write & 0xff)) else: for i in range(offset_bits // 8): tmp.write(mk_bits(0)) for index in range(bytes_req - 1, -1, -1): bits_to_write = unscaled_datum >> (8 * index) tmp.write(mk_bits(bits_to_write & 0xff)) return tmp.getvalue()
python
def prepare_fixed_decimal(data, schema): """Converts decimal.Decimal to fixed length bytes array""" if not isinstance(data, decimal.Decimal): return data scale = schema.get('scale', 0) size = schema['size'] # based on https://github.com/apache/avro/pull/82/ sign, digits, exp = data.as_tuple() if -exp > scale: raise ValueError( 'Scale provided in schema does not match the decimal') delta = exp + scale if delta > 0: digits = digits + (0,) * delta unscaled_datum = 0 for digit in digits: unscaled_datum = (unscaled_datum * 10) + digit bits_req = unscaled_datum.bit_length() + 1 size_in_bits = size * 8 offset_bits = size_in_bits - bits_req mask = 2 ** size_in_bits - 1 bit = 1 for i in range(bits_req): mask ^= bit bit <<= 1 if bits_req < 8: bytes_req = 1 else: bytes_req = bits_req // 8 if bits_req % 8 != 0: bytes_req += 1 tmp = MemoryIO() if sign: unscaled_datum = (1 << bits_req) - unscaled_datum unscaled_datum = mask | unscaled_datum for index in range(size - 1, -1, -1): bits_to_write = unscaled_datum >> (8 * index) tmp.write(mk_bits(bits_to_write & 0xff)) else: for i in range(offset_bits // 8): tmp.write(mk_bits(0)) for index in range(bytes_req - 1, -1, -1): bits_to_write = unscaled_datum >> (8 * index) tmp.write(mk_bits(bits_to_write & 0xff)) return tmp.getvalue()
[ "def", "prepare_fixed_decimal", "(", "data", ",", "schema", ")", ":", "if", "not", "isinstance", "(", "data", ",", "decimal", ".", "Decimal", ")", ":", "return", "data", "scale", "=", "schema", ".", "get", "(", "'scale'", ",", "0", ")", "size", "=", "schema", "[", "'size'", "]", "# based on https://github.com/apache/avro/pull/82/", "sign", ",", "digits", ",", "exp", "=", "data", ".", "as_tuple", "(", ")", "if", "-", "exp", ">", "scale", ":", "raise", "ValueError", "(", "'Scale provided in schema does not match the decimal'", ")", "delta", "=", "exp", "+", "scale", "if", "delta", ">", "0", ":", "digits", "=", "digits", "+", "(", "0", ",", ")", "*", "delta", "unscaled_datum", "=", "0", "for", "digit", "in", "digits", ":", "unscaled_datum", "=", "(", "unscaled_datum", "*", "10", ")", "+", "digit", "bits_req", "=", "unscaled_datum", ".", "bit_length", "(", ")", "+", "1", "size_in_bits", "=", "size", "*", "8", "offset_bits", "=", "size_in_bits", "-", "bits_req", "mask", "=", "2", "**", "size_in_bits", "-", "1", "bit", "=", "1", "for", "i", "in", "range", "(", "bits_req", ")", ":", "mask", "^=", "bit", "bit", "<<=", "1", "if", "bits_req", "<", "8", ":", "bytes_req", "=", "1", "else", ":", "bytes_req", "=", "bits_req", "//", "8", "if", "bits_req", "%", "8", "!=", "0", ":", "bytes_req", "+=", "1", "tmp", "=", "MemoryIO", "(", ")", "if", "sign", ":", "unscaled_datum", "=", "(", "1", "<<", "bits_req", ")", "-", "unscaled_datum", "unscaled_datum", "=", "mask", "|", "unscaled_datum", "for", "index", "in", "range", "(", "size", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "bits_to_write", "=", "unscaled_datum", ">>", "(", "8", "*", "index", ")", "tmp", ".", "write", "(", "mk_bits", "(", "bits_to_write", "&", "0xff", ")", ")", "else", ":", "for", "i", "in", "range", "(", "offset_bits", "//", "8", ")", ":", "tmp", ".", "write", "(", "mk_bits", "(", "0", ")", ")", "for", "index", "in", "range", "(", "bytes_req", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "bits_to_write", "=", "unscaled_datum", ">>", "(", "8", "*", "index", ")", "tmp", ".", "write", "(", "mk_bits", "(", "bits_to_write", "&", "0xff", ")", ")", "return", "tmp", ".", "getvalue", "(", ")" ]
Converts decimal.Decimal to fixed length bytes array
[ "Converts", "decimal", ".", "Decimal", "to", "fixed", "length", "bytes", "array" ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L148-L203
232,840
fastavro/fastavro
fastavro/_write_py.py
write_crc32
def write_crc32(fo, bytes): """A 4-byte, big-endian CRC32 checksum""" data = crc32(bytes) & 0xFFFFFFFF fo.write(pack('>I', data))
python
def write_crc32(fo, bytes): """A 4-byte, big-endian CRC32 checksum""" data = crc32(bytes) & 0xFFFFFFFF fo.write(pack('>I', data))
[ "def", "write_crc32", "(", "fo", ",", "bytes", ")", ":", "data", "=", "crc32", "(", "bytes", ")", "&", "0xFFFFFFFF", "fo", ".", "write", "(", "pack", "(", "'>I'", ",", "data", ")", ")" ]
A 4-byte, big-endian CRC32 checksum
[ "A", "4", "-", "byte", "big", "-", "endian", "CRC32", "checksum" ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L245-L248
232,841
fastavro/fastavro
fastavro/_write_py.py
write_union
def write_union(fo, datum, schema): """A union is encoded by first writing a long value indicating the zero-based position within the union of the schema of its value. The value is then encoded per the indicated schema within the union.""" if isinstance(datum, tuple): (name, datum) = datum for index, candidate in enumerate(schema): if extract_record_type(candidate) == 'record': schema_name = candidate['name'] else: schema_name = candidate if name == schema_name: break else: msg = 'provided union type name %s not found in schema %s' \ % (name, schema) raise ValueError(msg) else: pytype = type(datum) best_match_index = -1 most_fields = -1 for index, candidate in enumerate(schema): if validate(datum, candidate, raise_errors=False): if extract_record_type(candidate) == 'record': fields = len(candidate['fields']) if fields > most_fields: best_match_index = index most_fields = fields else: best_match_index = index break if best_match_index < 0: msg = '%r (type %s) do not match %s' % (datum, pytype, schema) raise ValueError(msg) index = best_match_index # write data write_long(fo, index) write_data(fo, datum, schema[index])
python
def write_union(fo, datum, schema): """A union is encoded by first writing a long value indicating the zero-based position within the union of the schema of its value. The value is then encoded per the indicated schema within the union.""" if isinstance(datum, tuple): (name, datum) = datum for index, candidate in enumerate(schema): if extract_record_type(candidate) == 'record': schema_name = candidate['name'] else: schema_name = candidate if name == schema_name: break else: msg = 'provided union type name %s not found in schema %s' \ % (name, schema) raise ValueError(msg) else: pytype = type(datum) best_match_index = -1 most_fields = -1 for index, candidate in enumerate(schema): if validate(datum, candidate, raise_errors=False): if extract_record_type(candidate) == 'record': fields = len(candidate['fields']) if fields > most_fields: best_match_index = index most_fields = fields else: best_match_index = index break if best_match_index < 0: msg = '%r (type %s) do not match %s' % (datum, pytype, schema) raise ValueError(msg) index = best_match_index # write data write_long(fo, index) write_data(fo, datum, schema[index])
[ "def", "write_union", "(", "fo", ",", "datum", ",", "schema", ")", ":", "if", "isinstance", "(", "datum", ",", "tuple", ")", ":", "(", "name", ",", "datum", ")", "=", "datum", "for", "index", ",", "candidate", "in", "enumerate", "(", "schema", ")", ":", "if", "extract_record_type", "(", "candidate", ")", "==", "'record'", ":", "schema_name", "=", "candidate", "[", "'name'", "]", "else", ":", "schema_name", "=", "candidate", "if", "name", "==", "schema_name", ":", "break", "else", ":", "msg", "=", "'provided union type name %s not found in schema %s'", "%", "(", "name", ",", "schema", ")", "raise", "ValueError", "(", "msg", ")", "else", ":", "pytype", "=", "type", "(", "datum", ")", "best_match_index", "=", "-", "1", "most_fields", "=", "-", "1", "for", "index", ",", "candidate", "in", "enumerate", "(", "schema", ")", ":", "if", "validate", "(", "datum", ",", "candidate", ",", "raise_errors", "=", "False", ")", ":", "if", "extract_record_type", "(", "candidate", ")", "==", "'record'", ":", "fields", "=", "len", "(", "candidate", "[", "'fields'", "]", ")", "if", "fields", ">", "most_fields", ":", "best_match_index", "=", "index", "most_fields", "=", "fields", "else", ":", "best_match_index", "=", "index", "break", "if", "best_match_index", "<", "0", ":", "msg", "=", "'%r (type %s) do not match %s'", "%", "(", "datum", ",", "pytype", ",", "schema", ")", "raise", "ValueError", "(", "msg", ")", "index", "=", "best_match_index", "# write data", "write_long", "(", "fo", ",", "index", ")", "write_data", "(", "fo", ",", "datum", ",", "schema", "[", "index", "]", ")" ]
A union is encoded by first writing a long value indicating the zero-based position within the union of the schema of its value. The value is then encoded per the indicated schema within the union.
[ "A", "union", "is", "encoded", "by", "first", "writing", "a", "long", "value", "indicating", "the", "zero", "-", "based", "position", "within", "the", "union", "of", "the", "schema", "of", "its", "value", ".", "The", "value", "is", "then", "encoded", "per", "the", "indicated", "schema", "within", "the", "union", "." ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L302-L341
232,842
fastavro/fastavro
fastavro/_write_py.py
write_data
def write_data(fo, datum, schema): """Write a datum of data to output stream. Paramaters ---------- fo: file-like Output file datum: object Data to write schema: dict Schemda to use """ record_type = extract_record_type(schema) logical_type = extract_logical_type(schema) fn = WRITERS.get(record_type) if fn: if logical_type: prepare = LOGICAL_WRITERS.get(logical_type) if prepare: datum = prepare(datum, schema) return fn(fo, datum, schema) else: return write_data(fo, datum, SCHEMA_DEFS[record_type])
python
def write_data(fo, datum, schema): """Write a datum of data to output stream. Paramaters ---------- fo: file-like Output file datum: object Data to write schema: dict Schemda to use """ record_type = extract_record_type(schema) logical_type = extract_logical_type(schema) fn = WRITERS.get(record_type) if fn: if logical_type: prepare = LOGICAL_WRITERS.get(logical_type) if prepare: datum = prepare(datum, schema) return fn(fo, datum, schema) else: return write_data(fo, datum, SCHEMA_DEFS[record_type])
[ "def", "write_data", "(", "fo", ",", "datum", ",", "schema", ")", ":", "record_type", "=", "extract_record_type", "(", "schema", ")", "logical_type", "=", "extract_logical_type", "(", "schema", ")", "fn", "=", "WRITERS", ".", "get", "(", "record_type", ")", "if", "fn", ":", "if", "logical_type", ":", "prepare", "=", "LOGICAL_WRITERS", ".", "get", "(", "logical_type", ")", "if", "prepare", ":", "datum", "=", "prepare", "(", "datum", ",", "schema", ")", "return", "fn", "(", "fo", ",", "datum", ",", "schema", ")", "else", ":", "return", "write_data", "(", "fo", ",", "datum", ",", "SCHEMA_DEFS", "[", "record_type", "]", ")" ]
Write a datum of data to output stream. Paramaters ---------- fo: file-like Output file datum: object Data to write schema: dict Schemda to use
[ "Write", "a", "datum", "of", "data", "to", "output", "stream", "." ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L390-L414
232,843
fastavro/fastavro
fastavro/_write_py.py
null_write_block
def null_write_block(fo, block_bytes): """Write block in "null" codec.""" write_long(fo, len(block_bytes)) fo.write(block_bytes)
python
def null_write_block(fo, block_bytes): """Write block in "null" codec.""" write_long(fo, len(block_bytes)) fo.write(block_bytes)
[ "def", "null_write_block", "(", "fo", ",", "block_bytes", ")", ":", "write_long", "(", "fo", ",", "len", "(", "block_bytes", ")", ")", "fo", ".", "write", "(", "block_bytes", ")" ]
Write block in "null" codec.
[ "Write", "block", "in", "null", "codec", "." ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L426-L429
232,844
fastavro/fastavro
fastavro/_write_py.py
deflate_write_block
def deflate_write_block(fo, block_bytes): """Write block in "deflate" codec.""" # The first two characters and last character are zlib # wrappers around deflate data. data = compress(block_bytes)[2:-1] write_long(fo, len(data)) fo.write(data)
python
def deflate_write_block(fo, block_bytes): """Write block in "deflate" codec.""" # The first two characters and last character are zlib # wrappers around deflate data. data = compress(block_bytes)[2:-1] write_long(fo, len(data)) fo.write(data)
[ "def", "deflate_write_block", "(", "fo", ",", "block_bytes", ")", ":", "# The first two characters and last character are zlib", "# wrappers around deflate data.", "data", "=", "compress", "(", "block_bytes", ")", "[", "2", ":", "-", "1", "]", "write_long", "(", "fo", ",", "len", "(", "data", ")", ")", "fo", ".", "write", "(", "data", ")" ]
Write block in "deflate" codec.
[ "Write", "block", "in", "deflate", "codec", "." ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L432-L439
232,845
fastavro/fastavro
fastavro/_write_py.py
schemaless_writer
def schemaless_writer(fo, schema, record): """Write a single record without the schema or header information Parameters ---------- fo: file-like Output file schema: dict Schema record: dict Record to write Example:: parsed_schema = fastavro.parse_schema(schema) with open('file.avro', 'rb') as fp: fastavro.schemaless_writer(fp, parsed_schema, record) Note: The ``schemaless_writer`` can only write a single record. """ schema = parse_schema(schema) write_data(fo, record, schema)
python
def schemaless_writer(fo, schema, record): """Write a single record without the schema or header information Parameters ---------- fo: file-like Output file schema: dict Schema record: dict Record to write Example:: parsed_schema = fastavro.parse_schema(schema) with open('file.avro', 'rb') as fp: fastavro.schemaless_writer(fp, parsed_schema, record) Note: The ``schemaless_writer`` can only write a single record. """ schema = parse_schema(schema) write_data(fo, record, schema)
[ "def", "schemaless_writer", "(", "fo", ",", "schema", ",", "record", ")", ":", "schema", "=", "parse_schema", "(", "schema", ")", "write_data", "(", "fo", ",", "record", ",", "schema", ")" ]
Write a single record without the schema or header information Parameters ---------- fo: file-like Output file schema: dict Schema record: dict Record to write Example:: parsed_schema = fastavro.parse_schema(schema) with open('file.avro', 'rb') as fp: fastavro.schemaless_writer(fp, parsed_schema, record) Note: The ``schemaless_writer`` can only write a single record.
[ "Write", "a", "single", "record", "without", "the", "schema", "or", "header", "information" ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L636-L658
232,846
fastavro/fastavro
fastavro/_validation_py.py
validate_int
def validate_int(datum, **kwargs): """ Check that the data value is a non floating point number with size less that Int32. Also support for logicalType timestamp validation with datetime. Int32 = -2147483648<=datum<=2147483647 conditional python types (int, long, numbers.Integral, datetime.time, datetime.datetime, datetime.date) Parameters ---------- datum: Any Data being validated kwargs: Any Unused kwargs """ return ( (isinstance(datum, (int, long, numbers.Integral)) and INT_MIN_VALUE <= datum <= INT_MAX_VALUE and not isinstance(datum, bool)) or isinstance( datum, (datetime.time, datetime.datetime, datetime.date) ) )
python
def validate_int(datum, **kwargs): """ Check that the data value is a non floating point number with size less that Int32. Also support for logicalType timestamp validation with datetime. Int32 = -2147483648<=datum<=2147483647 conditional python types (int, long, numbers.Integral, datetime.time, datetime.datetime, datetime.date) Parameters ---------- datum: Any Data being validated kwargs: Any Unused kwargs """ return ( (isinstance(datum, (int, long, numbers.Integral)) and INT_MIN_VALUE <= datum <= INT_MAX_VALUE and not isinstance(datum, bool)) or isinstance( datum, (datetime.time, datetime.datetime, datetime.date) ) )
[ "def", "validate_int", "(", "datum", ",", "*", "*", "kwargs", ")", ":", "return", "(", "(", "isinstance", "(", "datum", ",", "(", "int", ",", "long", ",", "numbers", ".", "Integral", ")", ")", "and", "INT_MIN_VALUE", "<=", "datum", "<=", "INT_MAX_VALUE", "and", "not", "isinstance", "(", "datum", ",", "bool", ")", ")", "or", "isinstance", "(", "datum", ",", "(", "datetime", ".", "time", ",", "datetime", ".", "datetime", ",", "datetime", ".", "date", ")", ")", ")" ]
Check that the data value is a non floating point number with size less that Int32. Also support for logicalType timestamp validation with datetime. Int32 = -2147483648<=datum<=2147483647 conditional python types (int, long, numbers.Integral, datetime.time, datetime.datetime, datetime.date) Parameters ---------- datum: Any Data being validated kwargs: Any Unused kwargs
[ "Check", "that", "the", "data", "value", "is", "a", "non", "floating", "point", "number", "with", "size", "less", "that", "Int32", ".", "Also", "support", "for", "logicalType", "timestamp", "validation", "with", "datetime", "." ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_validation_py.py#L79-L105
232,847
fastavro/fastavro
fastavro/_validation_py.py
validate_float
def validate_float(datum, **kwargs): """ Check that the data value is a floating point number or double precision. conditional python types (int, long, float, numbers.Real) Parameters ---------- datum: Any Data being validated kwargs: Any Unused kwargs """ return ( isinstance(datum, (int, long, float, numbers.Real)) and not isinstance(datum, bool) )
python
def validate_float(datum, **kwargs): """ Check that the data value is a floating point number or double precision. conditional python types (int, long, float, numbers.Real) Parameters ---------- datum: Any Data being validated kwargs: Any Unused kwargs """ return ( isinstance(datum, (int, long, float, numbers.Real)) and not isinstance(datum, bool) )
[ "def", "validate_float", "(", "datum", ",", "*", "*", "kwargs", ")", ":", "return", "(", "isinstance", "(", "datum", ",", "(", "int", ",", "long", ",", "float", ",", "numbers", ".", "Real", ")", ")", "and", "not", "isinstance", "(", "datum", ",", "bool", ")", ")" ]
Check that the data value is a floating point number or double precision. conditional python types (int, long, float, numbers.Real) Parameters ---------- datum: Any Data being validated kwargs: Any Unused kwargs
[ "Check", "that", "the", "data", "value", "is", "a", "floating", "point", "number", "or", "double", "precision", "." ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_validation_py.py#L137-L155
232,848
fastavro/fastavro
fastavro/_validation_py.py
validate_record
def validate_record(datum, schema, parent_ns=None, raise_errors=True): """ Check that the data is a Mapping type with all schema defined fields validated as True. Parameters ---------- datum: Any Data being validated schema: dict Schema parent_ns: str parent namespace raise_errors: bool If true, raises ValidationError on invalid data """ _, namespace = schema_name(schema, parent_ns) return ( isinstance(datum, Mapping) and all(validate(datum=datum.get(f['name'], f.get('default', no_value)), schema=f['type'], field='{}.{}'.format(namespace, f['name']), raise_errors=raise_errors) for f in schema['fields'] ) )
python
def validate_record(datum, schema, parent_ns=None, raise_errors=True): """ Check that the data is a Mapping type with all schema defined fields validated as True. Parameters ---------- datum: Any Data being validated schema: dict Schema parent_ns: str parent namespace raise_errors: bool If true, raises ValidationError on invalid data """ _, namespace = schema_name(schema, parent_ns) return ( isinstance(datum, Mapping) and all(validate(datum=datum.get(f['name'], f.get('default', no_value)), schema=f['type'], field='{}.{}'.format(namespace, f['name']), raise_errors=raise_errors) for f in schema['fields'] ) )
[ "def", "validate_record", "(", "datum", ",", "schema", ",", "parent_ns", "=", "None", ",", "raise_errors", "=", "True", ")", ":", "_", ",", "namespace", "=", "schema_name", "(", "schema", ",", "parent_ns", ")", "return", "(", "isinstance", "(", "datum", ",", "Mapping", ")", "and", "all", "(", "validate", "(", "datum", "=", "datum", ".", "get", "(", "f", "[", "'name'", "]", ",", "f", ".", "get", "(", "'default'", ",", "no_value", ")", ")", ",", "schema", "=", "f", "[", "'type'", "]", ",", "field", "=", "'{}.{}'", ".", "format", "(", "namespace", ",", "f", "[", "'name'", "]", ")", ",", "raise_errors", "=", "raise_errors", ")", "for", "f", "in", "schema", "[", "'fields'", "]", ")", ")" ]
Check that the data is a Mapping type with all schema defined fields validated as True. Parameters ---------- datum: Any Data being validated schema: dict Schema parent_ns: str parent namespace raise_errors: bool If true, raises ValidationError on invalid data
[ "Check", "that", "the", "data", "is", "a", "Mapping", "type", "with", "all", "schema", "defined", "fields", "validated", "as", "True", "." ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_validation_py.py#L245-L270
232,849
fastavro/fastavro
fastavro/_validation_py.py
validate_union
def validate_union(datum, schema, parent_ns=None, raise_errors=True): """ Check that the data is a list type with possible options to validate as True. Parameters ---------- datum: Any Data being validated schema: dict Schema parent_ns: str parent namespace raise_errors: bool If true, raises ValidationError on invalid data """ if isinstance(datum, tuple): (name, datum) = datum for candidate in schema: if extract_record_type(candidate) == 'record': if name == candidate["name"]: return validate(datum, schema=candidate, field=parent_ns, raise_errors=raise_errors) else: return False errors = [] for s in schema: try: ret = validate(datum, schema=s, field=parent_ns, raise_errors=raise_errors) if ret: # We exit on the first passing type in Unions return True except ValidationError as e: errors.extend(e.errors) if raise_errors: raise ValidationError(*errors) return False
python
def validate_union(datum, schema, parent_ns=None, raise_errors=True): """ Check that the data is a list type with possible options to validate as True. Parameters ---------- datum: Any Data being validated schema: dict Schema parent_ns: str parent namespace raise_errors: bool If true, raises ValidationError on invalid data """ if isinstance(datum, tuple): (name, datum) = datum for candidate in schema: if extract_record_type(candidate) == 'record': if name == candidate["name"]: return validate(datum, schema=candidate, field=parent_ns, raise_errors=raise_errors) else: return False errors = [] for s in schema: try: ret = validate(datum, schema=s, field=parent_ns, raise_errors=raise_errors) if ret: # We exit on the first passing type in Unions return True except ValidationError as e: errors.extend(e.errors) if raise_errors: raise ValidationError(*errors) return False
[ "def", "validate_union", "(", "datum", ",", "schema", ",", "parent_ns", "=", "None", ",", "raise_errors", "=", "True", ")", ":", "if", "isinstance", "(", "datum", ",", "tuple", ")", ":", "(", "name", ",", "datum", ")", "=", "datum", "for", "candidate", "in", "schema", ":", "if", "extract_record_type", "(", "candidate", ")", "==", "'record'", ":", "if", "name", "==", "candidate", "[", "\"name\"", "]", ":", "return", "validate", "(", "datum", ",", "schema", "=", "candidate", ",", "field", "=", "parent_ns", ",", "raise_errors", "=", "raise_errors", ")", "else", ":", "return", "False", "errors", "=", "[", "]", "for", "s", "in", "schema", ":", "try", ":", "ret", "=", "validate", "(", "datum", ",", "schema", "=", "s", ",", "field", "=", "parent_ns", ",", "raise_errors", "=", "raise_errors", ")", "if", "ret", ":", "# We exit on the first passing type in Unions", "return", "True", "except", "ValidationError", "as", "e", ":", "errors", ".", "extend", "(", "e", ".", "errors", ")", "if", "raise_errors", ":", "raise", "ValidationError", "(", "*", "errors", ")", "return", "False" ]
Check that the data is a list type with possible options to validate as True. Parameters ---------- datum: Any Data being validated schema: dict Schema parent_ns: str parent namespace raise_errors: bool If true, raises ValidationError on invalid data
[ "Check", "that", "the", "data", "is", "a", "list", "type", "with", "possible", "options", "to", "validate", "as", "True", "." ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_validation_py.py#L273-L313
232,850
fastavro/fastavro
fastavro/_validation_py.py
validate_many
def validate_many(records, schema, raise_errors=True): """ Validate a list of data! Parameters ---------- records: iterable List of records to validate schema: dict Schema raise_errors: bool, optional If true, errors are raised for invalid data. If false, a simple True (valid) or False (invalid) result is returned Example:: from fastavro.validation import validate_many schema = {...} records = [{...}, {...}, ...] validate_many(records, schema) """ errors = [] results = [] for record in records: try: results.append(validate(record, schema, raise_errors=raise_errors)) except ValidationError as e: errors.extend(e.errors) if raise_errors and errors: raise ValidationError(*errors) return all(results)
python
def validate_many(records, schema, raise_errors=True): """ Validate a list of data! Parameters ---------- records: iterable List of records to validate schema: dict Schema raise_errors: bool, optional If true, errors are raised for invalid data. If false, a simple True (valid) or False (invalid) result is returned Example:: from fastavro.validation import validate_many schema = {...} records = [{...}, {...}, ...] validate_many(records, schema) """ errors = [] results = [] for record in records: try: results.append(validate(record, schema, raise_errors=raise_errors)) except ValidationError as e: errors.extend(e.errors) if raise_errors and errors: raise ValidationError(*errors) return all(results)
[ "def", "validate_many", "(", "records", ",", "schema", ",", "raise_errors", "=", "True", ")", ":", "errors", "=", "[", "]", "results", "=", "[", "]", "for", "record", "in", "records", ":", "try", ":", "results", ".", "append", "(", "validate", "(", "record", ",", "schema", ",", "raise_errors", "=", "raise_errors", ")", ")", "except", "ValidationError", "as", "e", ":", "errors", ".", "extend", "(", "e", ".", "errors", ")", "if", "raise_errors", "and", "errors", ":", "raise", "ValidationError", "(", "*", "errors", ")", "return", "all", "(", "results", ")" ]
Validate a list of data! Parameters ---------- records: iterable List of records to validate schema: dict Schema raise_errors: bool, optional If true, errors are raised for invalid data. If false, a simple True (valid) or False (invalid) result is returned Example:: from fastavro.validation import validate_many schema = {...} records = [{...}, {...}, ...] validate_many(records, schema)
[ "Validate", "a", "list", "of", "data!" ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_validation_py.py#L383-L414
232,851
fastavro/fastavro
fastavro/_schema_py.py
parse_schema
def parse_schema(schema, _write_hint=True, _force=False): """Returns a parsed avro schema It is not necessary to call parse_schema but doing so and saving the parsed schema for use later will make future operations faster as the schema will not need to be reparsed. Parameters ---------- schema: dict Input schema _write_hint: bool Internal API argument specifying whether or not the __fastavro_parsed marker should be added to the schema _force: bool Internal API argument. If True, the schema will always be parsed even if it has been parsed and has the __fastavro_parsed marker Example:: from fastavro import parse_schema from fastavro import writer parsed_schema = parse_schema(original_schema) with open('weather.avro', 'wb') as out: writer(out, parsed_schema, records) """ if _force: return _parse_schema(schema, "", _write_hint) elif isinstance(schema, dict) and "__fastavro_parsed" in schema: return schema else: return _parse_schema(schema, "", _write_hint)
python
def parse_schema(schema, _write_hint=True, _force=False): """Returns a parsed avro schema It is not necessary to call parse_schema but doing so and saving the parsed schema for use later will make future operations faster as the schema will not need to be reparsed. Parameters ---------- schema: dict Input schema _write_hint: bool Internal API argument specifying whether or not the __fastavro_parsed marker should be added to the schema _force: bool Internal API argument. If True, the schema will always be parsed even if it has been parsed and has the __fastavro_parsed marker Example:: from fastavro import parse_schema from fastavro import writer parsed_schema = parse_schema(original_schema) with open('weather.avro', 'wb') as out: writer(out, parsed_schema, records) """ if _force: return _parse_schema(schema, "", _write_hint) elif isinstance(schema, dict) and "__fastavro_parsed" in schema: return schema else: return _parse_schema(schema, "", _write_hint)
[ "def", "parse_schema", "(", "schema", ",", "_write_hint", "=", "True", ",", "_force", "=", "False", ")", ":", "if", "_force", ":", "return", "_parse_schema", "(", "schema", ",", "\"\"", ",", "_write_hint", ")", "elif", "isinstance", "(", "schema", ",", "dict", ")", "and", "\"__fastavro_parsed\"", "in", "schema", ":", "return", "schema", "else", ":", "return", "_parse_schema", "(", "schema", ",", "\"\"", ",", "_write_hint", ")" ]
Returns a parsed avro schema It is not necessary to call parse_schema but doing so and saving the parsed schema for use later will make future operations faster as the schema will not need to be reparsed. Parameters ---------- schema: dict Input schema _write_hint: bool Internal API argument specifying whether or not the __fastavro_parsed marker should be added to the schema _force: bool Internal API argument. If True, the schema will always be parsed even if it has been parsed and has the __fastavro_parsed marker Example:: from fastavro import parse_schema from fastavro import writer parsed_schema = parse_schema(original_schema) with open('weather.avro', 'wb') as out: writer(out, parsed_schema, records)
[ "Returns", "a", "parsed", "avro", "schema" ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_schema_py.py#L53-L86
232,852
fastavro/fastavro
fastavro/_schema_py.py
load_schema
def load_schema(schema_path): ''' Returns a schema loaded from the file at `schema_path`. Will recursively load referenced schemas assuming they can be found in files in the same directory and named with the convention `<type_name>.avsc`. ''' with open(schema_path) as fd: schema = json.load(fd) schema_dir, schema_file = path.split(schema_path) return _load_schema(schema, schema_dir)
python
def load_schema(schema_path): ''' Returns a schema loaded from the file at `schema_path`. Will recursively load referenced schemas assuming they can be found in files in the same directory and named with the convention `<type_name>.avsc`. ''' with open(schema_path) as fd: schema = json.load(fd) schema_dir, schema_file = path.split(schema_path) return _load_schema(schema, schema_dir)
[ "def", "load_schema", "(", "schema_path", ")", ":", "with", "open", "(", "schema_path", ")", "as", "fd", ":", "schema", "=", "json", ".", "load", "(", "fd", ")", "schema_dir", ",", "schema_file", "=", "path", ".", "split", "(", "schema_path", ")", "return", "_load_schema", "(", "schema", ",", "schema_dir", ")" ]
Returns a schema loaded from the file at `schema_path`. Will recursively load referenced schemas assuming they can be found in files in the same directory and named with the convention `<type_name>.avsc`.
[ "Returns", "a", "schema", "loaded", "from", "the", "file", "at", "schema_path", "." ]
bafe826293e19eb93e77bbb0f6adfa059c7884b2
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_schema_py.py#L212-L223
232,853
alejandroautalan/pygubu
pygubu/widgets/simpletooltip.py
ToolTip.showtip
def showtip(self, text): "Display text in tooltip window" self.text = text if self.tipwindow or not self.text: return x, y, cx, cy = self.widget.bbox("insert") x = x + self.widget.winfo_rootx() + 27 y = y + cy + self.widget.winfo_rooty() +27 self.tipwindow = tw = tk.Toplevel(self.widget) tw.wm_overrideredirect(1) tw.wm_geometry("+%d+%d" % (x, y)) try: # For Mac OS tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "noActivates") except tk.TclError: pass label = tk.Label(tw, text=self.text, justify=tk.LEFT, background="#ffffe0", foreground="black", relief=tk.SOLID, borderwidth=1, font=("tahoma", "8", "normal")) label.pack(ipadx=1)
python
def showtip(self, text): "Display text in tooltip window" self.text = text if self.tipwindow or not self.text: return x, y, cx, cy = self.widget.bbox("insert") x = x + self.widget.winfo_rootx() + 27 y = y + cy + self.widget.winfo_rooty() +27 self.tipwindow = tw = tk.Toplevel(self.widget) tw.wm_overrideredirect(1) tw.wm_geometry("+%d+%d" % (x, y)) try: # For Mac OS tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "noActivates") except tk.TclError: pass label = tk.Label(tw, text=self.text, justify=tk.LEFT, background="#ffffe0", foreground="black", relief=tk.SOLID, borderwidth=1, font=("tahoma", "8", "normal")) label.pack(ipadx=1)
[ "def", "showtip", "(", "self", ",", "text", ")", ":", "self", ".", "text", "=", "text", "if", "self", ".", "tipwindow", "or", "not", "self", ".", "text", ":", "return", "x", ",", "y", ",", "cx", ",", "cy", "=", "self", ".", "widget", ".", "bbox", "(", "\"insert\"", ")", "x", "=", "x", "+", "self", ".", "widget", ".", "winfo_rootx", "(", ")", "+", "27", "y", "=", "y", "+", "cy", "+", "self", ".", "widget", ".", "winfo_rooty", "(", ")", "+", "27", "self", ".", "tipwindow", "=", "tw", "=", "tk", ".", "Toplevel", "(", "self", ".", "widget", ")", "tw", ".", "wm_overrideredirect", "(", "1", ")", "tw", ".", "wm_geometry", "(", "\"+%d+%d\"", "%", "(", "x", ",", "y", ")", ")", "try", ":", "# For Mac OS", "tw", ".", "tk", ".", "call", "(", "\"::tk::unsupported::MacWindowStyle\"", ",", "\"style\"", ",", "tw", ".", "_w", ",", "\"help\"", ",", "\"noActivates\"", ")", "except", "tk", ".", "TclError", ":", "pass", "label", "=", "tk", ".", "Label", "(", "tw", ",", "text", "=", "self", ".", "text", ",", "justify", "=", "tk", ".", "LEFT", ",", "background", "=", "\"#ffffe0\"", ",", "foreground", "=", "\"black\"", ",", "relief", "=", "tk", ".", "SOLID", ",", "borderwidth", "=", "1", ",", "font", "=", "(", "\"tahoma\"", ",", "\"8\"", ",", "\"normal\"", ")", ")", "label", ".", "pack", "(", "ipadx", "=", "1", ")" ]
Display text in tooltip window
[ "Display", "text", "in", "tooltip", "window" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/simpletooltip.py#L20-L42
232,854
alejandroautalan/pygubu
pygubu/__init__.py
TkApplication.run
def run(self): """Ejecute the main loop.""" self.toplevel.protocol("WM_DELETE_WINDOW", self.__on_window_close) self.toplevel.mainloop()
python
def run(self): """Ejecute the main loop.""" self.toplevel.protocol("WM_DELETE_WINDOW", self.__on_window_close) self.toplevel.mainloop()
[ "def", "run", "(", "self", ")", ":", "self", ".", "toplevel", ".", "protocol", "(", "\"WM_DELETE_WINDOW\"", ",", "self", ".", "__on_window_close", ")", "self", ".", "toplevel", ".", "mainloop", "(", ")" ]
Ejecute the main loop.
[ "Ejecute", "the", "main", "loop", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/__init__.py#L41-L45
232,855
alejandroautalan/pygubu
examples/py2exe/myapp.py
MyApplication.create_regpoly
def create_regpoly(self, x0, y0, x1, y1, sides=0, start=90, extent=360, **kw): """Create a regular polygon""" coords = self.__regpoly_coords(x0, y0, x1, y1, sides, start, extent) return self.canvas.create_polygon(*coords, **kw)
python
def create_regpoly(self, x0, y0, x1, y1, sides=0, start=90, extent=360, **kw): """Create a regular polygon""" coords = self.__regpoly_coords(x0, y0, x1, y1, sides, start, extent) return self.canvas.create_polygon(*coords, **kw)
[ "def", "create_regpoly", "(", "self", ",", "x0", ",", "y0", ",", "x1", ",", "y1", ",", "sides", "=", "0", ",", "start", "=", "90", ",", "extent", "=", "360", ",", "*", "*", "kw", ")", ":", "coords", "=", "self", ".", "__regpoly_coords", "(", "x0", ",", "y0", ",", "x1", ",", "y1", ",", "sides", ",", "start", ",", "extent", ")", "return", "self", ".", "canvas", ".", "create_polygon", "(", "*", "coords", ",", "*", "*", "kw", ")" ]
Create a regular polygon
[ "Create", "a", "regular", "polygon" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/examples/py2exe/myapp.py#L131-L134
232,856
alejandroautalan/pygubu
examples/py2exe/myapp.py
MyApplication.__regpoly_coords
def __regpoly_coords(self, x0, y0, x1, y1, sides, start, extent): """Create the coordinates of the regular polygon specified""" coords = [] if extent == 0: return coords xm = (x0 + x1) / 2. ym = (y0 + y1) / 2. rx = xm - x0 ry = ym - y0 n = sides if n == 0: # 0 sides => circle n = round((rx + ry) * .5) if n < 2: n = 4 # Extent can be negative dirv = 1 if extent > 0 else -1 if abs(extent) > 360: extent = dirv * abs(extent) % 360 step = dirv * 360 / n numsteps = 1 + extent / float(step) numsteps_int = int(numsteps) i = 0 while i < numsteps_int: rad = (start - i * step) * DEG2RAD x = rx * math.cos(rad) y = ry * math.sin(rad) coords.append((xm+x, ym-y)) i += 1 # Figure out where last segment should end if numsteps != numsteps_int: # Vecter V1 is last drawn vertext (x,y) from above # Vector V2 is the edge of the polygon rad2 = (start - numsteps_int * step) * DEG2RAD x2 = rx * math.cos(rad2) - x y2 = ry * math.sin(rad2) - y # Vector V3 is unit vector in direction we end at rad3 = (start - extent) * DEG2RAD x3 = math.cos(rad3) y3 = math.sin(rad3) # Find where V3 crosses V1+V2 => find j s.t. V1 + kV2 = jV3 j = (x*y2 - x2*y) / (x3*y2 - x2*y3) coords.append((xm + j * x3, ym - j * y3)) return coords
python
def __regpoly_coords(self, x0, y0, x1, y1, sides, start, extent): """Create the coordinates of the regular polygon specified""" coords = [] if extent == 0: return coords xm = (x0 + x1) / 2. ym = (y0 + y1) / 2. rx = xm - x0 ry = ym - y0 n = sides if n == 0: # 0 sides => circle n = round((rx + ry) * .5) if n < 2: n = 4 # Extent can be negative dirv = 1 if extent > 0 else -1 if abs(extent) > 360: extent = dirv * abs(extent) % 360 step = dirv * 360 / n numsteps = 1 + extent / float(step) numsteps_int = int(numsteps) i = 0 while i < numsteps_int: rad = (start - i * step) * DEG2RAD x = rx * math.cos(rad) y = ry * math.sin(rad) coords.append((xm+x, ym-y)) i += 1 # Figure out where last segment should end if numsteps != numsteps_int: # Vecter V1 is last drawn vertext (x,y) from above # Vector V2 is the edge of the polygon rad2 = (start - numsteps_int * step) * DEG2RAD x2 = rx * math.cos(rad2) - x y2 = ry * math.sin(rad2) - y # Vector V3 is unit vector in direction we end at rad3 = (start - extent) * DEG2RAD x3 = math.cos(rad3) y3 = math.sin(rad3) # Find where V3 crosses V1+V2 => find j s.t. V1 + kV2 = jV3 j = (x*y2 - x2*y) / (x3*y2 - x2*y3) coords.append((xm + j * x3, ym - j * y3)) return coords
[ "def", "__regpoly_coords", "(", "self", ",", "x0", ",", "y0", ",", "x1", ",", "y1", ",", "sides", ",", "start", ",", "extent", ")", ":", "coords", "=", "[", "]", "if", "extent", "==", "0", ":", "return", "coords", "xm", "=", "(", "x0", "+", "x1", ")", "/", "2.", "ym", "=", "(", "y0", "+", "y1", ")", "/", "2.", "rx", "=", "xm", "-", "x0", "ry", "=", "ym", "-", "y0", "n", "=", "sides", "if", "n", "==", "0", ":", "# 0 sides => circle", "n", "=", "round", "(", "(", "rx", "+", "ry", ")", "*", ".5", ")", "if", "n", "<", "2", ":", "n", "=", "4", "# Extent can be negative", "dirv", "=", "1", "if", "extent", ">", "0", "else", "-", "1", "if", "abs", "(", "extent", ")", ">", "360", ":", "extent", "=", "dirv", "*", "abs", "(", "extent", ")", "%", "360", "step", "=", "dirv", "*", "360", "/", "n", "numsteps", "=", "1", "+", "extent", "/", "float", "(", "step", ")", "numsteps_int", "=", "int", "(", "numsteps", ")", "i", "=", "0", "while", "i", "<", "numsteps_int", ":", "rad", "=", "(", "start", "-", "i", "*", "step", ")", "*", "DEG2RAD", "x", "=", "rx", "*", "math", ".", "cos", "(", "rad", ")", "y", "=", "ry", "*", "math", ".", "sin", "(", "rad", ")", "coords", ".", "append", "(", "(", "xm", "+", "x", ",", "ym", "-", "y", ")", ")", "i", "+=", "1", "# Figure out where last segment should end", "if", "numsteps", "!=", "numsteps_int", ":", "# Vecter V1 is last drawn vertext (x,y) from above", "# Vector V2 is the edge of the polygon", "rad2", "=", "(", "start", "-", "numsteps_int", "*", "step", ")", "*", "DEG2RAD", "x2", "=", "rx", "*", "math", ".", "cos", "(", "rad2", ")", "-", "x", "y2", "=", "ry", "*", "math", ".", "sin", "(", "rad2", ")", "-", "y", "# Vector V3 is unit vector in direction we end at", "rad3", "=", "(", "start", "-", "extent", ")", "*", "DEG2RAD", "x3", "=", "math", ".", "cos", "(", "rad3", ")", "y3", "=", "math", ".", "sin", "(", "rad3", ")", "# Find where V3 crosses V1+V2 => find j s.t. V1 + kV2 = jV3", "j", "=", "(", "x", "*", "y2", "-", "x2", "*", "y", ")", "/", "(", "x3", "*", "y2", "-", "x2", "*", "y3", ")", "coords", ".", "append", "(", "(", "xm", "+", "j", "*", "x3", ",", "ym", "-", "j", "*", "y3", ")", ")", "return", "coords" ]
Create the coordinates of the regular polygon specified
[ "Create", "the", "coordinates", "of", "the", "regular", "polygon", "specified" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/examples/py2exe/myapp.py#L136-L189
232,857
alejandroautalan/pygubu
pygubu/builder/__init__.py
Builder.get_image
def get_image(self, path): """Return tk image corresponding to name which is taken form path.""" image = '' name = os.path.basename(path) if not StockImage.is_registered(name): ipath = self.__find_image(path) if ipath is not None: StockImage.register(name, ipath) else: msg = "Image '{0}' not found in resource paths.".format(name) logger.warning(msg) try: image = StockImage.get(name) except StockImageException: # TODO: notify something here. pass return image
python
def get_image(self, path): """Return tk image corresponding to name which is taken form path.""" image = '' name = os.path.basename(path) if not StockImage.is_registered(name): ipath = self.__find_image(path) if ipath is not None: StockImage.register(name, ipath) else: msg = "Image '{0}' not found in resource paths.".format(name) logger.warning(msg) try: image = StockImage.get(name) except StockImageException: # TODO: notify something here. pass return image
[ "def", "get_image", "(", "self", ",", "path", ")", ":", "image", "=", "''", "name", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "if", "not", "StockImage", ".", "is_registered", "(", "name", ")", ":", "ipath", "=", "self", ".", "__find_image", "(", "path", ")", "if", "ipath", "is", "not", "None", ":", "StockImage", ".", "register", "(", "name", ",", "ipath", ")", "else", ":", "msg", "=", "\"Image '{0}' not found in resource paths.\"", ".", "format", "(", "name", ")", "logger", ".", "warning", "(", "msg", ")", "try", ":", "image", "=", "StockImage", ".", "get", "(", "name", ")", "except", "StockImageException", ":", "# TODO: notify something here.", "pass", "return", "image" ]
Return tk image corresponding to name which is taken form path.
[ "Return", "tk", "image", "corresponding", "to", "name", "which", "is", "taken", "form", "path", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L195-L211
232,858
alejandroautalan/pygubu
pygubu/builder/__init__.py
Builder.import_variables
def import_variables(self, container, varnames=None): """Helper method to avoid call get_variable for every variable.""" if varnames is None: for keyword in self.tkvariables: setattr(container, keyword, self.tkvariables[keyword]) else: for keyword in varnames: if keyword in self.tkvariables: setattr(container, keyword, self.tkvariables[keyword])
python
def import_variables(self, container, varnames=None): """Helper method to avoid call get_variable for every variable.""" if varnames is None: for keyword in self.tkvariables: setattr(container, keyword, self.tkvariables[keyword]) else: for keyword in varnames: if keyword in self.tkvariables: setattr(container, keyword, self.tkvariables[keyword])
[ "def", "import_variables", "(", "self", ",", "container", ",", "varnames", "=", "None", ")", ":", "if", "varnames", "is", "None", ":", "for", "keyword", "in", "self", ".", "tkvariables", ":", "setattr", "(", "container", ",", "keyword", ",", "self", ".", "tkvariables", "[", "keyword", "]", ")", "else", ":", "for", "keyword", "in", "varnames", ":", "if", "keyword", "in", "self", ".", "tkvariables", ":", "setattr", "(", "container", ",", "keyword", ",", "self", ".", "tkvariables", "[", "keyword", "]", ")" ]
Helper method to avoid call get_variable for every variable.
[ "Helper", "method", "to", "avoid", "call", "get_variable", "for", "every", "variable", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L228-L236
232,859
alejandroautalan/pygubu
pygubu/builder/__init__.py
Builder.create_variable
def create_variable(self, varname, vtype=None): """Create a tk variable. If the variable was created previously return that instance. """ var_types = ('string', 'int', 'boolean', 'double') vname = varname var = None type_from_name = 'string' # default type if ':' in varname: type_from_name, vname = varname.split(':') # Fix incorrect order bug #33 if type_from_name not in (var_types): # Swap order type_from_name, vname = vname, type_from_name if type_from_name not in (var_types): raise Exception('Undefined variable type in "{0}"'.format(varname)) if vname in self.tkvariables: var = self.tkvariables[vname] else: if vtype is None: # get type from name if type_from_name == 'int': var = tkinter.IntVar() elif type_from_name == 'boolean': var = tkinter.BooleanVar() elif type_from_name == 'double': var = tkinter.DoubleVar() else: var = tkinter.StringVar() else: var = vtype() self.tkvariables[vname] = var return var
python
def create_variable(self, varname, vtype=None): """Create a tk variable. If the variable was created previously return that instance. """ var_types = ('string', 'int', 'boolean', 'double') vname = varname var = None type_from_name = 'string' # default type if ':' in varname: type_from_name, vname = varname.split(':') # Fix incorrect order bug #33 if type_from_name not in (var_types): # Swap order type_from_name, vname = vname, type_from_name if type_from_name not in (var_types): raise Exception('Undefined variable type in "{0}"'.format(varname)) if vname in self.tkvariables: var = self.tkvariables[vname] else: if vtype is None: # get type from name if type_from_name == 'int': var = tkinter.IntVar() elif type_from_name == 'boolean': var = tkinter.BooleanVar() elif type_from_name == 'double': var = tkinter.DoubleVar() else: var = tkinter.StringVar() else: var = vtype() self.tkvariables[vname] = var return var
[ "def", "create_variable", "(", "self", ",", "varname", ",", "vtype", "=", "None", ")", ":", "var_types", "=", "(", "'string'", ",", "'int'", ",", "'boolean'", ",", "'double'", ")", "vname", "=", "varname", "var", "=", "None", "type_from_name", "=", "'string'", "# default type", "if", "':'", "in", "varname", ":", "type_from_name", ",", "vname", "=", "varname", ".", "split", "(", "':'", ")", "# Fix incorrect order bug #33", "if", "type_from_name", "not", "in", "(", "var_types", ")", ":", "# Swap order", "type_from_name", ",", "vname", "=", "vname", ",", "type_from_name", "if", "type_from_name", "not", "in", "(", "var_types", ")", ":", "raise", "Exception", "(", "'Undefined variable type in \"{0}\"'", ".", "format", "(", "varname", ")", ")", "if", "vname", "in", "self", ".", "tkvariables", ":", "var", "=", "self", ".", "tkvariables", "[", "vname", "]", "else", ":", "if", "vtype", "is", "None", ":", "# get type from name", "if", "type_from_name", "==", "'int'", ":", "var", "=", "tkinter", ".", "IntVar", "(", ")", "elif", "type_from_name", "==", "'boolean'", ":", "var", "=", "tkinter", ".", "BooleanVar", "(", ")", "elif", "type_from_name", "==", "'double'", ":", "var", "=", "tkinter", ".", "DoubleVar", "(", ")", "else", ":", "var", "=", "tkinter", ".", "StringVar", "(", ")", "else", ":", "var", "=", "vtype", "(", ")", "self", ".", "tkvariables", "[", "vname", "]", "=", "var", "return", "var" ]
Create a tk variable. If the variable was created previously return that instance.
[ "Create", "a", "tk", "variable", ".", "If", "the", "variable", "was", "created", "previously", "return", "that", "instance", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L238-L273
232,860
alejandroautalan/pygubu
pygubu/builder/__init__.py
Builder.add_from_file
def add_from_file(self, fpath): """Load ui definition from file.""" if self.tree is None: base, name = os.path.split(fpath) self.add_resource_path(base) self.tree = tree = ET.parse(fpath) self.root = tree.getroot() self.objects = {} else: # TODO: append to current tree pass
python
def add_from_file(self, fpath): """Load ui definition from file.""" if self.tree is None: base, name = os.path.split(fpath) self.add_resource_path(base) self.tree = tree = ET.parse(fpath) self.root = tree.getroot() self.objects = {} else: # TODO: append to current tree pass
[ "def", "add_from_file", "(", "self", ",", "fpath", ")", ":", "if", "self", ".", "tree", "is", "None", ":", "base", ",", "name", "=", "os", ".", "path", ".", "split", "(", "fpath", ")", "self", ".", "add_resource_path", "(", "base", ")", "self", ".", "tree", "=", "tree", "=", "ET", ".", "parse", "(", "fpath", ")", "self", ".", "root", "=", "tree", ".", "getroot", "(", ")", "self", ".", "objects", "=", "{", "}", "else", ":", "# TODO: append to current tree", "pass" ]
Load ui definition from file.
[ "Load", "ui", "definition", "from", "file", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L275-L285
232,861
alejandroautalan/pygubu
pygubu/builder/__init__.py
Builder.add_from_string
def add_from_string(self, strdata): """Load ui definition from string.""" if self.tree is None: self.tree = tree = ET.ElementTree(ET.fromstring(strdata)) self.root = tree.getroot() self.objects = {} else: # TODO: append to current tree pass
python
def add_from_string(self, strdata): """Load ui definition from string.""" if self.tree is None: self.tree = tree = ET.ElementTree(ET.fromstring(strdata)) self.root = tree.getroot() self.objects = {} else: # TODO: append to current tree pass
[ "def", "add_from_string", "(", "self", ",", "strdata", ")", ":", "if", "self", ".", "tree", "is", "None", ":", "self", ".", "tree", "=", "tree", "=", "ET", ".", "ElementTree", "(", "ET", ".", "fromstring", "(", "strdata", ")", ")", "self", ".", "root", "=", "tree", ".", "getroot", "(", ")", "self", ".", "objects", "=", "{", "}", "else", ":", "# TODO: append to current tree", "pass" ]
Load ui definition from string.
[ "Load", "ui", "definition", "from", "string", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L287-L295
232,862
alejandroautalan/pygubu
pygubu/builder/__init__.py
Builder.add_from_xmlnode
def add_from_xmlnode(self, element): """Load ui definition from xml.etree.Element node.""" if self.tree is None: root = ET.Element('interface') root.append(element) self.tree = tree = ET.ElementTree(root) self.root = tree.getroot() self.objects = {} # ET.dump(tree) else: # TODO: append to current tree pass
python
def add_from_xmlnode(self, element): """Load ui definition from xml.etree.Element node.""" if self.tree is None: root = ET.Element('interface') root.append(element) self.tree = tree = ET.ElementTree(root) self.root = tree.getroot() self.objects = {} # ET.dump(tree) else: # TODO: append to current tree pass
[ "def", "add_from_xmlnode", "(", "self", ",", "element", ")", ":", "if", "self", ".", "tree", "is", "None", ":", "root", "=", "ET", ".", "Element", "(", "'interface'", ")", "root", ".", "append", "(", "element", ")", "self", ".", "tree", "=", "tree", "=", "ET", ".", "ElementTree", "(", "root", ")", "self", ".", "root", "=", "tree", ".", "getroot", "(", ")", "self", ".", "objects", "=", "{", "}", "# ET.dump(tree)", "else", ":", "# TODO: append to current tree", "pass" ]
Load ui definition from xml.etree.Element node.
[ "Load", "ui", "definition", "from", "xml", ".", "etree", ".", "Element", "node", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L297-L308
232,863
alejandroautalan/pygubu
pygubu/builder/__init__.py
Builder.get_object
def get_object(self, name, master=None): """Find and create the widget named name. Use master as parent. If widget was already created, return that instance.""" widget = None if name in self.objects: widget = self.objects[name].widget else: xpath = ".//object[@id='{0}']".format(name) node = self.tree.find(xpath) if node is not None: root = BuilderObject(self, dict()) root.widget = master bobject = self._realize(root, node) widget = bobject.widget if widget is None: msg = 'Widget "{0}" not defined.'.format(name) raise Exception(msg) return widget
python
def get_object(self, name, master=None): """Find and create the widget named name. Use master as parent. If widget was already created, return that instance.""" widget = None if name in self.objects: widget = self.objects[name].widget else: xpath = ".//object[@id='{0}']".format(name) node = self.tree.find(xpath) if node is not None: root = BuilderObject(self, dict()) root.widget = master bobject = self._realize(root, node) widget = bobject.widget if widget is None: msg = 'Widget "{0}" not defined.'.format(name) raise Exception(msg) return widget
[ "def", "get_object", "(", "self", ",", "name", ",", "master", "=", "None", ")", ":", "widget", "=", "None", "if", "name", "in", "self", ".", "objects", ":", "widget", "=", "self", ".", "objects", "[", "name", "]", ".", "widget", "else", ":", "xpath", "=", "\".//object[@id='{0}']\"", ".", "format", "(", "name", ")", "node", "=", "self", ".", "tree", ".", "find", "(", "xpath", ")", "if", "node", "is", "not", "None", ":", "root", "=", "BuilderObject", "(", "self", ",", "dict", "(", ")", ")", "root", ".", "widget", "=", "master", "bobject", "=", "self", ".", "_realize", "(", "root", ",", "node", ")", "widget", "=", "bobject", ".", "widget", "if", "widget", "is", "None", ":", "msg", "=", "'Widget \"{0}\" not defined.'", ".", "format", "(", "name", ")", "raise", "Exception", "(", "msg", ")", "return", "widget" ]
Find and create the widget named name. Use master as parent. If widget was already created, return that instance.
[ "Find", "and", "create", "the", "widget", "named", "name", ".", "Use", "master", "as", "parent", ".", "If", "widget", "was", "already", "created", "return", "that", "instance", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L310-L328
232,864
alejandroautalan/pygubu
pygubu/builder/__init__.py
Builder._realize
def _realize(self, master, element): """Builds a widget from xml element using master as parent.""" data = data_xmlnode_to_dict(element, self.translator) cname = data['class'] uniqueid = data['id'] if cname not in CLASS_MAP: self._import_class(cname) if cname in CLASS_MAP: self._pre_process_data(data) parent = CLASS_MAP[cname].builder.factory(self, data) widget = parent.realize(master) self.objects[uniqueid] = parent xpath = "./child" children = element.findall(xpath) for child in children: child_xml = child.find('./object') child = self._realize(parent, child_xml) parent.add_child(child) parent.configure() parent.layout() return parent else: raise Exception('Class "{0}" not mapped'.format(cname))
python
def _realize(self, master, element): """Builds a widget from xml element using master as parent.""" data = data_xmlnode_to_dict(element, self.translator) cname = data['class'] uniqueid = data['id'] if cname not in CLASS_MAP: self._import_class(cname) if cname in CLASS_MAP: self._pre_process_data(data) parent = CLASS_MAP[cname].builder.factory(self, data) widget = parent.realize(master) self.objects[uniqueid] = parent xpath = "./child" children = element.findall(xpath) for child in children: child_xml = child.find('./object') child = self._realize(parent, child_xml) parent.add_child(child) parent.configure() parent.layout() return parent else: raise Exception('Class "{0}" not mapped'.format(cname))
[ "def", "_realize", "(", "self", ",", "master", ",", "element", ")", ":", "data", "=", "data_xmlnode_to_dict", "(", "element", ",", "self", ".", "translator", ")", "cname", "=", "data", "[", "'class'", "]", "uniqueid", "=", "data", "[", "'id'", "]", "if", "cname", "not", "in", "CLASS_MAP", ":", "self", ".", "_import_class", "(", "cname", ")", "if", "cname", "in", "CLASS_MAP", ":", "self", ".", "_pre_process_data", "(", "data", ")", "parent", "=", "CLASS_MAP", "[", "cname", "]", ".", "builder", ".", "factory", "(", "self", ",", "data", ")", "widget", "=", "parent", ".", "realize", "(", "master", ")", "self", ".", "objects", "[", "uniqueid", "]", "=", "parent", "xpath", "=", "\"./child\"", "children", "=", "element", ".", "findall", "(", "xpath", ")", "for", "child", "in", "children", ":", "child_xml", "=", "child", ".", "find", "(", "'./object'", ")", "child", "=", "self", ".", "_realize", "(", "parent", ",", "child_xml", ")", "parent", ".", "add_child", "(", "child", ")", "parent", ".", "configure", "(", ")", "parent", ".", "layout", "(", ")", "return", "parent", "else", ":", "raise", "Exception", "(", "'Class \"{0}\" not mapped'", ".", "format", "(", "cname", ")", ")" ]
Builds a widget from xml element using master as parent.
[ "Builds", "a", "widget", "from", "xml", "element", "using", "master", "as", "parent", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L349-L377
232,865
alejandroautalan/pygubu
pygubu/builder/__init__.py
Builder.connect_callbacks
def connect_callbacks(self, callbacks_bag): """Connect callbacks specified in callbacks_bag with callbacks defined in the ui definition. Return a list with the name of the callbacks not connected. """ notconnected = [] for wname, builderobj in self.objects.items(): missing = builderobj.connect_commands(callbacks_bag) if missing is not None: notconnected.extend(missing) missing = builderobj.connect_bindings(callbacks_bag) if missing is not None: notconnected.extend(missing) if notconnected: notconnected = list(set(notconnected)) msg = 'Missing callbacks for commands: {}'.format(notconnected) logger.warning(msg) return notconnected else: return None
python
def connect_callbacks(self, callbacks_bag): """Connect callbacks specified in callbacks_bag with callbacks defined in the ui definition. Return a list with the name of the callbacks not connected. """ notconnected = [] for wname, builderobj in self.objects.items(): missing = builderobj.connect_commands(callbacks_bag) if missing is not None: notconnected.extend(missing) missing = builderobj.connect_bindings(callbacks_bag) if missing is not None: notconnected.extend(missing) if notconnected: notconnected = list(set(notconnected)) msg = 'Missing callbacks for commands: {}'.format(notconnected) logger.warning(msg) return notconnected else: return None
[ "def", "connect_callbacks", "(", "self", ",", "callbacks_bag", ")", ":", "notconnected", "=", "[", "]", "for", "wname", ",", "builderobj", "in", "self", ".", "objects", ".", "items", "(", ")", ":", "missing", "=", "builderobj", ".", "connect_commands", "(", "callbacks_bag", ")", "if", "missing", "is", "not", "None", ":", "notconnected", ".", "extend", "(", "missing", ")", "missing", "=", "builderobj", ".", "connect_bindings", "(", "callbacks_bag", ")", "if", "missing", "is", "not", "None", ":", "notconnected", ".", "extend", "(", "missing", ")", "if", "notconnected", ":", "notconnected", "=", "list", "(", "set", "(", "notconnected", ")", ")", "msg", "=", "'Missing callbacks for commands: {}'", ".", "format", "(", "notconnected", ")", "logger", ".", "warning", "(", "msg", ")", "return", "notconnected", "else", ":", "return", "None" ]
Connect callbacks specified in callbacks_bag with callbacks defined in the ui definition. Return a list with the name of the callbacks not connected.
[ "Connect", "callbacks", "specified", "in", "callbacks_bag", "with", "callbacks", "defined", "in", "the", "ui", "definition", ".", "Return", "a", "list", "with", "the", "name", "of", "the", "callbacks", "not", "connected", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/__init__.py#L388-L407
232,866
alejandroautalan/pygubu
pygubudesigner/util/selecttool.py
SelectTool._start_selecting
def _start_selecting(self, event): """Comienza con el proceso de seleccion.""" self._selecting = True canvas = self._canvas x = canvas.canvasx(event.x) y = canvas.canvasy(event.y) self._sstart = (x, y) if not self._sobject: self._sobject = canvas.create_rectangle( self._sstart[0], self._sstart[1], x, y, dash=(3,5), outline='#0000ff' ) canvas.itemconfigure(self._sobject, state=tk.NORMAL)
python
def _start_selecting(self, event): """Comienza con el proceso de seleccion.""" self._selecting = True canvas = self._canvas x = canvas.canvasx(event.x) y = canvas.canvasy(event.y) self._sstart = (x, y) if not self._sobject: self._sobject = canvas.create_rectangle( self._sstart[0], self._sstart[1], x, y, dash=(3,5), outline='#0000ff' ) canvas.itemconfigure(self._sobject, state=tk.NORMAL)
[ "def", "_start_selecting", "(", "self", ",", "event", ")", ":", "self", ".", "_selecting", "=", "True", "canvas", "=", "self", ".", "_canvas", "x", "=", "canvas", ".", "canvasx", "(", "event", ".", "x", ")", "y", "=", "canvas", ".", "canvasy", "(", "event", ".", "y", ")", "self", ".", "_sstart", "=", "(", "x", ",", "y", ")", "if", "not", "self", ".", "_sobject", ":", "self", ".", "_sobject", "=", "canvas", ".", "create_rectangle", "(", "self", ".", "_sstart", "[", "0", "]", ",", "self", ".", "_sstart", "[", "1", "]", ",", "x", ",", "y", ",", "dash", "=", "(", "3", ",", "5", ")", ",", "outline", "=", "'#0000ff'", ")", "canvas", ".", "itemconfigure", "(", "self", ".", "_sobject", ",", "state", "=", "tk", ".", "NORMAL", ")" ]
Comienza con el proceso de seleccion.
[ "Comienza", "con", "el", "proceso", "de", "seleccion", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/util/selecttool.py#L51-L63
232,867
alejandroautalan/pygubu
pygubudesigner/util/selecttool.py
SelectTool._keep_selecting
def _keep_selecting(self, event): """Continua con el proceso de seleccion. Crea o redimensiona el cuadro de seleccion de acuerdo con la posicion del raton.""" canvas = self._canvas x = canvas.canvasx(event.x) y = canvas.canvasy(event.y) canvas.coords(self._sobject, self._sstart[0], self._sstart[1], x, y)
python
def _keep_selecting(self, event): """Continua con el proceso de seleccion. Crea o redimensiona el cuadro de seleccion de acuerdo con la posicion del raton.""" canvas = self._canvas x = canvas.canvasx(event.x) y = canvas.canvasy(event.y) canvas.coords(self._sobject, self._sstart[0], self._sstart[1], x, y)
[ "def", "_keep_selecting", "(", "self", ",", "event", ")", ":", "canvas", "=", "self", ".", "_canvas", "x", "=", "canvas", ".", "canvasx", "(", "event", ".", "x", ")", "y", "=", "canvas", ".", "canvasy", "(", "event", ".", "y", ")", "canvas", ".", "coords", "(", "self", ".", "_sobject", ",", "self", ".", "_sstart", "[", "0", "]", ",", "self", ".", "_sstart", "[", "1", "]", ",", "x", ",", "y", ")" ]
Continua con el proceso de seleccion. Crea o redimensiona el cuadro de seleccion de acuerdo con la posicion del raton.
[ "Continua", "con", "el", "proceso", "de", "seleccion", ".", "Crea", "o", "redimensiona", "el", "cuadro", "de", "seleccion", "de", "acuerdo", "con", "la", "posicion", "del", "raton", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/util/selecttool.py#L65-L73
232,868
alejandroautalan/pygubu
pygubudesigner/util/selecttool.py
SelectTool._finish_selecting
def _finish_selecting(self, event): """Finaliza la seleccion. Marca como seleccionados todos los objetos que se encuentran dentro del recuadro de seleccion.""" self._selecting = False canvas = self._canvas x = canvas.canvasx(event.x) y = canvas.canvasy(event.y) canvas.coords(self._sobject, -1, -1, -1, -1) canvas.itemconfigure(self._sobject, state=tk.HIDDEN) sel_region = self._sstart[0], self._sstart[1], x, y canvas.region_selected = sel_region canvas.event_generate('<<RegionSelected>>')
python
def _finish_selecting(self, event): """Finaliza la seleccion. Marca como seleccionados todos los objetos que se encuentran dentro del recuadro de seleccion.""" self._selecting = False canvas = self._canvas x = canvas.canvasx(event.x) y = canvas.canvasy(event.y) canvas.coords(self._sobject, -1, -1, -1, -1) canvas.itemconfigure(self._sobject, state=tk.HIDDEN) sel_region = self._sstart[0], self._sstart[1], x, y canvas.region_selected = sel_region canvas.event_generate('<<RegionSelected>>')
[ "def", "_finish_selecting", "(", "self", ",", "event", ")", ":", "self", ".", "_selecting", "=", "False", "canvas", "=", "self", ".", "_canvas", "x", "=", "canvas", ".", "canvasx", "(", "event", ".", "x", ")", "y", "=", "canvas", ".", "canvasy", "(", "event", ".", "y", ")", "canvas", ".", "coords", "(", "self", ".", "_sobject", ",", "-", "1", ",", "-", "1", ",", "-", "1", ",", "-", "1", ")", "canvas", ".", "itemconfigure", "(", "self", ".", "_sobject", ",", "state", "=", "tk", ".", "HIDDEN", ")", "sel_region", "=", "self", ".", "_sstart", "[", "0", "]", ",", "self", ".", "_sstart", "[", "1", "]", ",", "x", ",", "y", "canvas", ".", "region_selected", "=", "sel_region", "canvas", ".", "event_generate", "(", "'<<RegionSelected>>'", ")" ]
Finaliza la seleccion. Marca como seleccionados todos los objetos que se encuentran dentro del recuadro de seleccion.
[ "Finaliza", "la", "seleccion", ".", "Marca", "como", "seleccionados", "todos", "los", "objetos", "que", "se", "encuentran", "dentro", "del", "recuadro", "de", "seleccion", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/util/selecttool.py#L75-L89
232,869
alejandroautalan/pygubu
pygubu/widgets/calendarframe.py
matrix_coords
def matrix_coords(rows, cols, rowh, colw, ox=0, oy=0): "Generate coords for a matrix of rects" for i, f, c in rowmajor(rows, cols): x = ox + c * colw y = oy + f * rowh x1 = x + colw y1 = y + rowh yield (i, x, y, x1, y1)
python
def matrix_coords(rows, cols, rowh, colw, ox=0, oy=0): "Generate coords for a matrix of rects" for i, f, c in rowmajor(rows, cols): x = ox + c * colw y = oy + f * rowh x1 = x + colw y1 = y + rowh yield (i, x, y, x1, y1)
[ "def", "matrix_coords", "(", "rows", ",", "cols", ",", "rowh", ",", "colw", ",", "ox", "=", "0", ",", "oy", "=", "0", ")", ":", "for", "i", ",", "f", ",", "c", "in", "rowmajor", "(", "rows", ",", "cols", ")", ":", "x", "=", "ox", "+", "c", "*", "colw", "y", "=", "oy", "+", "f", "*", "rowh", "x1", "=", "x", "+", "colw", "y1", "=", "y", "+", "rowh", "yield", "(", "i", ",", "x", ",", "y", ",", "x1", ",", "y1", ")" ]
Generate coords for a matrix of rects
[ "Generate", "coords", "for", "a", "matrix", "of", "rects" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/calendarframe.py#L40-L47
232,870
alejandroautalan/pygubu
pygubudesigner/util/__init__.py
ArrayVar.get
def get(self): '''Return a dictionary that represents the Tcl array''' value = {} for (elementname, elementvar) in self._elementvars.items(): value[elementname] = elementvar.get() return value
python
def get(self): '''Return a dictionary that represents the Tcl array''' value = {} for (elementname, elementvar) in self._elementvars.items(): value[elementname] = elementvar.get() return value
[ "def", "get", "(", "self", ")", ":", "value", "=", "{", "}", "for", "(", "elementname", ",", "elementvar", ")", "in", "self", ".", "_elementvars", ".", "items", "(", ")", ":", "value", "[", "elementname", "]", "=", "elementvar", ".", "get", "(", ")", "return", "value" ]
Return a dictionary that represents the Tcl array
[ "Return", "a", "dictionary", "that", "represents", "the", "Tcl", "array" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/util/__init__.py#L96-L101
232,871
alejandroautalan/pygubu
pygubu/widgets/editabletreeview.py
EditableTreeview.yview
def yview(self, *args): """Update inplace widgets position when doing vertical scroll""" self.after_idle(self.__updateWnds) ttk.Treeview.yview(self, *args)
python
def yview(self, *args): """Update inplace widgets position when doing vertical scroll""" self.after_idle(self.__updateWnds) ttk.Treeview.yview(self, *args)
[ "def", "yview", "(", "self", ",", "*", "args", ")", ":", "self", ".", "after_idle", "(", "self", ".", "__updateWnds", ")", "ttk", ".", "Treeview", ".", "yview", "(", "self", ",", "*", "args", ")" ]
Update inplace widgets position when doing vertical scroll
[ "Update", "inplace", "widgets", "position", "when", "doing", "vertical", "scroll" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/editabletreeview.py#L106-L109
232,872
alejandroautalan/pygubu
pygubu/widgets/editabletreeview.py
EditableTreeview.xview
def xview(self, *args): """Update inplace widgets position when doing horizontal scroll""" self.after_idle(self.__updateWnds) ttk.Treeview.xview(self, *args)
python
def xview(self, *args): """Update inplace widgets position when doing horizontal scroll""" self.after_idle(self.__updateWnds) ttk.Treeview.xview(self, *args)
[ "def", "xview", "(", "self", ",", "*", "args", ")", ":", "self", ".", "after_idle", "(", "self", ".", "__updateWnds", ")", "ttk", ".", "Treeview", ".", "xview", "(", "self", ",", "*", "args", ")" ]
Update inplace widgets position when doing horizontal scroll
[ "Update", "inplace", "widgets", "position", "when", "doing", "horizontal", "scroll" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/editabletreeview.py#L119-L122
232,873
alejandroautalan/pygubu
pygubu/widgets/editabletreeview.py
EditableTreeview.__check_focus
def __check_focus(self, event): """Checks if the focus has changed""" #print('Event:', event.type, event.x, event.y) changed = False if not self._curfocus: changed = True elif self._curfocus != self.focus(): self.__clear_inplace_widgets() changed = True newfocus = self.focus() if changed: if newfocus: #print('Focus changed to:', newfocus) self._curfocus= newfocus self.__focus(newfocus) self.__updateWnds()
python
def __check_focus(self, event): """Checks if the focus has changed""" #print('Event:', event.type, event.x, event.y) changed = False if not self._curfocus: changed = True elif self._curfocus != self.focus(): self.__clear_inplace_widgets() changed = True newfocus = self.focus() if changed: if newfocus: #print('Focus changed to:', newfocus) self._curfocus= newfocus self.__focus(newfocus) self.__updateWnds()
[ "def", "__check_focus", "(", "self", ",", "event", ")", ":", "#print('Event:', event.type, event.x, event.y)", "changed", "=", "False", "if", "not", "self", ".", "_curfocus", ":", "changed", "=", "True", "elif", "self", ".", "_curfocus", "!=", "self", ".", "focus", "(", ")", ":", "self", ".", "__clear_inplace_widgets", "(", ")", "changed", "=", "True", "newfocus", "=", "self", ".", "focus", "(", ")", "if", "changed", ":", "if", "newfocus", ":", "#print('Focus changed to:', newfocus)", "self", ".", "_curfocus", "=", "newfocus", "self", ".", "__focus", "(", "newfocus", ")", "self", ".", "__updateWnds", "(", ")" ]
Checks if the focus has changed
[ "Checks", "if", "the", "focus", "has", "changed" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/editabletreeview.py#L132-L147
232,874
alejandroautalan/pygubu
pygubu/widgets/editabletreeview.py
EditableTreeview.__focus
def __focus(self, item): """Called when focus item has changed""" cols = self.__get_display_columns() for col in cols: self.__event_info =(col,item) self.event_generate('<<TreeviewInplaceEdit>>') if col in self._inplace_widgets: w = self._inplace_widgets[col] w.bind('<Key-Tab>', lambda e: w.tk_focusNext().focus_set()) w.bind('<Shift-Key-Tab>', lambda e: w.tk_focusPrev().focus_set())
python
def __focus(self, item): """Called when focus item has changed""" cols = self.__get_display_columns() for col in cols: self.__event_info =(col,item) self.event_generate('<<TreeviewInplaceEdit>>') if col in self._inplace_widgets: w = self._inplace_widgets[col] w.bind('<Key-Tab>', lambda e: w.tk_focusNext().focus_set()) w.bind('<Shift-Key-Tab>', lambda e: w.tk_focusPrev().focus_set())
[ "def", "__focus", "(", "self", ",", "item", ")", ":", "cols", "=", "self", ".", "__get_display_columns", "(", ")", "for", "col", "in", "cols", ":", "self", ".", "__event_info", "=", "(", "col", ",", "item", ")", "self", ".", "event_generate", "(", "'<<TreeviewInplaceEdit>>'", ")", "if", "col", "in", "self", ".", "_inplace_widgets", ":", "w", "=", "self", ".", "_inplace_widgets", "[", "col", "]", "w", ".", "bind", "(", "'<Key-Tab>'", ",", "lambda", "e", ":", "w", ".", "tk_focusNext", "(", ")", ".", "focus_set", "(", ")", ")", "w", ".", "bind", "(", "'<Shift-Key-Tab>'", ",", "lambda", "e", ":", "w", ".", "tk_focusPrev", "(", ")", ".", "focus_set", "(", ")", ")" ]
Called when focus item has changed
[ "Called", "when", "focus", "item", "has", "changed" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/editabletreeview.py#L149-L160
232,875
alejandroautalan/pygubu
pygubu/widgets/editabletreeview.py
EditableTreeview.__clear_inplace_widgets
def __clear_inplace_widgets(self): """Remove all inplace edit widgets.""" cols = self.__get_display_columns() #print('Clear:', cols) for c in cols: if c in self._inplace_widgets: widget = self._inplace_widgets[c] widget.place_forget() self._inplace_widgets_show.pop(c, None)
python
def __clear_inplace_widgets(self): """Remove all inplace edit widgets.""" cols = self.__get_display_columns() #print('Clear:', cols) for c in cols: if c in self._inplace_widgets: widget = self._inplace_widgets[c] widget.place_forget() self._inplace_widgets_show.pop(c, None)
[ "def", "__clear_inplace_widgets", "(", "self", ")", ":", "cols", "=", "self", ".", "__get_display_columns", "(", ")", "#print('Clear:', cols)", "for", "c", "in", "cols", ":", "if", "c", "in", "self", ".", "_inplace_widgets", ":", "widget", "=", "self", ".", "_inplace_widgets", "[", "c", "]", "widget", ".", "place_forget", "(", ")", "self", ".", "_inplace_widgets_show", ".", "pop", "(", "c", ",", "None", ")" ]
Remove all inplace edit widgets.
[ "Remove", "all", "inplace", "edit", "widgets", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/widgets/editabletreeview.py#L179-L187
232,876
alejandroautalan/pygubu
setup.py
CustomInstall.run
def run(self): """Run parent install, and then save the install dir in the script.""" install.run(self) # # Remove old pygubu.py from scripts path if exists spath = os.path.join(self.install_scripts, 'pygubu') for ext in ('.py', '.pyw'): filename = spath + ext if os.path.exists(filename): os.remove(filename) # # Remove old pygubu-designer.bat if platform.system() == 'Windows': spath = os.path.join(self.install_scripts, 'pygubu-designer.bat') if os.path.exists(spath): os.remove(spath)
python
def run(self): """Run parent install, and then save the install dir in the script.""" install.run(self) # # Remove old pygubu.py from scripts path if exists spath = os.path.join(self.install_scripts, 'pygubu') for ext in ('.py', '.pyw'): filename = spath + ext if os.path.exists(filename): os.remove(filename) # # Remove old pygubu-designer.bat if platform.system() == 'Windows': spath = os.path.join(self.install_scripts, 'pygubu-designer.bat') if os.path.exists(spath): os.remove(spath)
[ "def", "run", "(", "self", ")", ":", "install", ".", "run", "(", "self", ")", "#", "# Remove old pygubu.py from scripts path if exists", "spath", "=", "os", ".", "path", ".", "join", "(", "self", ".", "install_scripts", ",", "'pygubu'", ")", "for", "ext", "in", "(", "'.py'", ",", "'.pyw'", ")", ":", "filename", "=", "spath", "+", "ext", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "os", ".", "remove", "(", "filename", ")", "#", "# Remove old pygubu-designer.bat", "if", "platform", ".", "system", "(", ")", "==", "'Windows'", ":", "spath", "=", "os", ".", "path", ".", "join", "(", "self", ".", "install_scripts", ",", "'pygubu-designer.bat'", ")", "if", "os", ".", "path", ".", "exists", "(", "spath", ")", ":", "os", ".", "remove", "(", "spath", ")" ]
Run parent install, and then save the install dir in the script.
[ "Run", "parent", "install", "and", "then", "save", "the", "install", "dir", "in", "the", "script", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/setup.py#L30-L46
232,877
alejandroautalan/pygubu
pygubudesigner/propertieseditor.py
PropertiesEditor.hide_all
def hide_all(self): """Hide all properties from property editor.""" self.current = None for _v, (label, widget) in self._propbag.items(): label.grid_remove() widget.grid_remove()
python
def hide_all(self): """Hide all properties from property editor.""" self.current = None for _v, (label, widget) in self._propbag.items(): label.grid_remove() widget.grid_remove()
[ "def", "hide_all", "(", "self", ")", ":", "self", ".", "current", "=", "None", "for", "_v", ",", "(", "label", ",", "widget", ")", "in", "self", ".", "_propbag", ".", "items", "(", ")", ":", "label", ".", "grid_remove", "(", ")", "widget", ".", "grid_remove", "(", ")" ]
Hide all properties from property editor.
[ "Hide", "all", "properties", "from", "property", "editor", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/propertieseditor.py#L150-L156
232,878
alejandroautalan/pygubu
pygubu/builder/builderobject.py
BuilderObject._get_init_args
def _get_init_args(self): """Creates dict with properties marked as readonly""" args = {} for rop in self.ro_properties: if rop in self.properties: args[rop] = self.properties[rop] return args
python
def _get_init_args(self): """Creates dict with properties marked as readonly""" args = {} for rop in self.ro_properties: if rop in self.properties: args[rop] = self.properties[rop] return args
[ "def", "_get_init_args", "(", "self", ")", ":", "args", "=", "{", "}", "for", "rop", "in", "self", ".", "ro_properties", ":", "if", "rop", "in", "self", ".", "properties", ":", "args", "[", "rop", "]", "=", "self", ".", "properties", "[", "rop", "]", "return", "args" ]
Creates dict with properties marked as readonly
[ "Creates", "dict", "with", "properties", "marked", "as", "readonly" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/builder/builderobject.py#L86-L93
232,879
alejandroautalan/pygubu
pygubudesigner/previewer.py
OnCanvasMenuPreview._calculate_menu_wh
def _calculate_menu_wh(self): """ Calculate menu widht and height.""" w = iw = 50 h = ih = 0 # menu.index returns None if there are no choices index = self._menu.index(tk.END) index = index if index is not None else 0 count = index + 1 # First calculate using the font paramters of root menu: font = self._menu.cget('font') font = self._get_font(font) for i in range(0, count): mtype = self._menu.type(i) if mtype == 'tearoff': continue label = 'default' ifont = 'TkMenuFont' if mtype != 'separator': label = self._menu.entrycget(i, 'label') ifont = self._menu.entrycget(i, 'font') wpx = font.measure(label) hpx = font.metrics('linespace') w += wpx if hpx > h: h = hpx * 2 # Calculate using font configured for each subitem ifont = self._get_font(ifont) wpx = ifont.measure(label) hpx = ifont.metrics('linespace') iw += wpx if hpx > ih: ih = hpx * 2 # Then compare 2 sizes and use the greatest w = max(w, iw, 100) h = max(h, ih, 25) self._cwidth = w + int(w * 0.25) self._cheight = h + int(h * 0.25)
python
def _calculate_menu_wh(self): """ Calculate menu widht and height.""" w = iw = 50 h = ih = 0 # menu.index returns None if there are no choices index = self._menu.index(tk.END) index = index if index is not None else 0 count = index + 1 # First calculate using the font paramters of root menu: font = self._menu.cget('font') font = self._get_font(font) for i in range(0, count): mtype = self._menu.type(i) if mtype == 'tearoff': continue label = 'default' ifont = 'TkMenuFont' if mtype != 'separator': label = self._menu.entrycget(i, 'label') ifont = self._menu.entrycget(i, 'font') wpx = font.measure(label) hpx = font.metrics('linespace') w += wpx if hpx > h: h = hpx * 2 # Calculate using font configured for each subitem ifont = self._get_font(ifont) wpx = ifont.measure(label) hpx = ifont.metrics('linespace') iw += wpx if hpx > ih: ih = hpx * 2 # Then compare 2 sizes and use the greatest w = max(w, iw, 100) h = max(h, ih, 25) self._cwidth = w + int(w * 0.25) self._cheight = h + int(h * 0.25)
[ "def", "_calculate_menu_wh", "(", "self", ")", ":", "w", "=", "iw", "=", "50", "h", "=", "ih", "=", "0", "# menu.index returns None if there are no choices", "index", "=", "self", ".", "_menu", ".", "index", "(", "tk", ".", "END", ")", "index", "=", "index", "if", "index", "is", "not", "None", "else", "0", "count", "=", "index", "+", "1", "# First calculate using the font paramters of root menu:", "font", "=", "self", ".", "_menu", ".", "cget", "(", "'font'", ")", "font", "=", "self", ".", "_get_font", "(", "font", ")", "for", "i", "in", "range", "(", "0", ",", "count", ")", ":", "mtype", "=", "self", ".", "_menu", ".", "type", "(", "i", ")", "if", "mtype", "==", "'tearoff'", ":", "continue", "label", "=", "'default'", "ifont", "=", "'TkMenuFont'", "if", "mtype", "!=", "'separator'", ":", "label", "=", "self", ".", "_menu", ".", "entrycget", "(", "i", ",", "'label'", ")", "ifont", "=", "self", ".", "_menu", ".", "entrycget", "(", "i", ",", "'font'", ")", "wpx", "=", "font", ".", "measure", "(", "label", ")", "hpx", "=", "font", ".", "metrics", "(", "'linespace'", ")", "w", "+=", "wpx", "if", "hpx", ">", "h", ":", "h", "=", "hpx", "*", "2", "# Calculate using font configured for each subitem", "ifont", "=", "self", ".", "_get_font", "(", "ifont", ")", "wpx", "=", "ifont", ".", "measure", "(", "label", ")", "hpx", "=", "ifont", ".", "metrics", "(", "'linespace'", ")", "iw", "+=", "wpx", "if", "hpx", ">", "ih", ":", "ih", "=", "hpx", "*", "2", "# Then compare 2 sizes and use the greatest", "w", "=", "max", "(", "w", ",", "iw", ",", "100", ")", "h", "=", "max", "(", "h", ",", "ih", ",", "25", ")", "self", ".", "_cwidth", "=", "w", "+", "int", "(", "w", "*", "0.25", ")", "self", ".", "_cheight", "=", "h", "+", "int", "(", "h", "*", "0.25", ")" ]
Calculate menu widht and height.
[ "Calculate", "menu", "widht", "and", "height", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/previewer.py#L283-L320
232,880
alejandroautalan/pygubu
pygubudesigner/previewer.py
PreviewHelper._over_resizer
def _over_resizer(self, x, y): "Returns True if mouse is over a resizer" over_resizer = False c = self.canvas ids = c.find_overlapping(x, y, x, y) if ids: o = ids[0] tags = c.gettags(o) if 'resizer' in tags: over_resizer = True return over_resizer
python
def _over_resizer(self, x, y): "Returns True if mouse is over a resizer" over_resizer = False c = self.canvas ids = c.find_overlapping(x, y, x, y) if ids: o = ids[0] tags = c.gettags(o) if 'resizer' in tags: over_resizer = True return over_resizer
[ "def", "_over_resizer", "(", "self", ",", "x", ",", "y", ")", ":", "over_resizer", "=", "False", "c", "=", "self", ".", "canvas", "ids", "=", "c", ".", "find_overlapping", "(", "x", ",", "y", ",", "x", ",", "y", ")", "if", "ids", ":", "o", "=", "ids", "[", "0", "]", "tags", "=", "c", ".", "gettags", "(", "o", ")", "if", "'resizer'", "in", "tags", ":", "over_resizer", "=", "True", "return", "over_resizer" ]
Returns True if mouse is over a resizer
[ "Returns", "True", "if", "mouse", "is", "over", "a", "resizer" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/previewer.py#L453-L464
232,881
alejandroautalan/pygubu
pygubudesigner/previewer.py
PreviewHelper.resize_preview
def resize_preview(self, dw, dh): "Resizes preview that is currently dragged" # identify preview if self._objects_moving: id_ = self._objects_moving[0] tags = self.canvas.gettags(id_) for tag in tags: if tag.startswith('preview_'): _, ident = tag.split('preview_') preview = self.previews[ident] preview.resize_by(dw, dh) self.move_previews() break self._update_cregion()
python
def resize_preview(self, dw, dh): "Resizes preview that is currently dragged" # identify preview if self._objects_moving: id_ = self._objects_moving[0] tags = self.canvas.gettags(id_) for tag in tags: if tag.startswith('preview_'): _, ident = tag.split('preview_') preview = self.previews[ident] preview.resize_by(dw, dh) self.move_previews() break self._update_cregion()
[ "def", "resize_preview", "(", "self", ",", "dw", ",", "dh", ")", ":", "# identify preview", "if", "self", ".", "_objects_moving", ":", "id_", "=", "self", ".", "_objects_moving", "[", "0", "]", "tags", "=", "self", ".", "canvas", ".", "gettags", "(", "id_", ")", "for", "tag", "in", "tags", ":", "if", "tag", ".", "startswith", "(", "'preview_'", ")", ":", "_", ",", "ident", "=", "tag", ".", "split", "(", "'preview_'", ")", "preview", "=", "self", ".", "previews", "[", "ident", "]", "preview", ".", "resize_by", "(", "dw", ",", "dh", ")", "self", ".", "move_previews", "(", ")", "break", "self", ".", "_update_cregion", "(", ")" ]
Resizes preview that is currently dragged
[ "Resizes", "preview", "that", "is", "currently", "dragged" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/previewer.py#L466-L480
232,882
alejandroautalan/pygubu
pygubudesigner/previewer.py
PreviewHelper.move_previews
def move_previews(self): "Move previews after a resize event" # calculate new positions min_y = self._calc_preview_ypos() for idx, (key, p) in enumerate(self.previews.items()): new_dy = min_y[idx] - p.y self.previews[key].move_by(0, new_dy) self._update_cregion() self.show_selected(self._sel_id, self._sel_widget)
python
def move_previews(self): "Move previews after a resize event" # calculate new positions min_y = self._calc_preview_ypos() for idx, (key, p) in enumerate(self.previews.items()): new_dy = min_y[idx] - p.y self.previews[key].move_by(0, new_dy) self._update_cregion() self.show_selected(self._sel_id, self._sel_widget)
[ "def", "move_previews", "(", "self", ")", ":", "# calculate new positions", "min_y", "=", "self", ".", "_calc_preview_ypos", "(", ")", "for", "idx", ",", "(", "key", ",", "p", ")", "in", "enumerate", "(", "self", ".", "previews", ".", "items", "(", ")", ")", ":", "new_dy", "=", "min_y", "[", "idx", "]", "-", "p", ".", "y", "self", ".", "previews", "[", "key", "]", ".", "move_by", "(", "0", ",", "new_dy", ")", "self", ".", "_update_cregion", "(", ")", "self", ".", "show_selected", "(", "self", ".", "_sel_id", ",", "self", ".", "_sel_widget", ")" ]
Move previews after a resize event
[ "Move", "previews", "after", "a", "resize", "event" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/previewer.py#L490-L499
232,883
alejandroautalan/pygubu
pygubudesigner/previewer.py
PreviewHelper._calc_preview_ypos
def _calc_preview_ypos(self): "Calculates the previews positions on canvas" y = 10 min_y = [y] for k, p in self.previews.items(): y += p.height() + self.padding min_y.append(y) return min_y
python
def _calc_preview_ypos(self): "Calculates the previews positions on canvas" y = 10 min_y = [y] for k, p in self.previews.items(): y += p.height() + self.padding min_y.append(y) return min_y
[ "def", "_calc_preview_ypos", "(", "self", ")", ":", "y", "=", "10", "min_y", "=", "[", "y", "]", "for", "k", ",", "p", "in", "self", ".", "previews", ".", "items", "(", ")", ":", "y", "+=", "p", ".", "height", "(", ")", "+", "self", ".", "padding", "min_y", ".", "append", "(", "y", ")", "return", "min_y" ]
Calculates the previews positions on canvas
[ "Calculates", "the", "previews", "positions", "on", "canvas" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/previewer.py#L501-L509
232,884
alejandroautalan/pygubu
pygubudesigner/previewer.py
PreviewHelper._get_slot
def _get_slot(self): "Returns the next coordinates for a preview" x = y = 10 for k, p in self.previews.items(): y += p.height() + self.padding return x, y
python
def _get_slot(self): "Returns the next coordinates for a preview" x = y = 10 for k, p in self.previews.items(): y += p.height() + self.padding return x, y
[ "def", "_get_slot", "(", "self", ")", ":", "x", "=", "y", "=", "10", "for", "k", ",", "p", "in", "self", ".", "previews", ".", "items", "(", ")", ":", "y", "+=", "p", ".", "height", "(", ")", "+", "self", ".", "padding", "return", "x", ",", "y" ]
Returns the next coordinates for a preview
[ "Returns", "the", "next", "coordinates", "for", "a", "preview" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/previewer.py#L511-L517
232,885
alejandroautalan/pygubu
pygubu/stockimage.py
StockImage.clear_cache
def clear_cache(cls): """Call this before closing tk root""" #Prevent tkinter errors on python 2 ?? for key in cls._cached: cls._cached[key] = None cls._cached = {}
python
def clear_cache(cls): """Call this before closing tk root""" #Prevent tkinter errors on python 2 ?? for key in cls._cached: cls._cached[key] = None cls._cached = {}
[ "def", "clear_cache", "(", "cls", ")", ":", "#Prevent tkinter errors on python 2 ??", "for", "key", "in", "cls", ".", "_cached", ":", "cls", ".", "_cached", "[", "key", "]", "=", "None", "cls", ".", "_cached", "=", "{", "}" ]
Call this before closing tk root
[ "Call", "this", "before", "closing", "tk", "root" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/stockimage.py#L54-L59
232,886
alejandroautalan/pygubu
pygubu/stockimage.py
StockImage.register
def register(cls, key, filename): """Register a image file using key""" if key in cls._stock: logger.info('Warning, replacing resource ' + str(key)) cls._stock[key] = {'type': 'custom', 'filename': filename} logger.info('%s registered as %s' % (filename, key))
python
def register(cls, key, filename): """Register a image file using key""" if key in cls._stock: logger.info('Warning, replacing resource ' + str(key)) cls._stock[key] = {'type': 'custom', 'filename': filename} logger.info('%s registered as %s' % (filename, key))
[ "def", "register", "(", "cls", ",", "key", ",", "filename", ")", ":", "if", "key", "in", "cls", ".", "_stock", ":", "logger", ".", "info", "(", "'Warning, replacing resource '", "+", "str", "(", "key", ")", ")", "cls", ".", "_stock", "[", "key", "]", "=", "{", "'type'", ":", "'custom'", ",", "'filename'", ":", "filename", "}", "logger", ".", "info", "(", "'%s registered as %s'", "%", "(", "filename", ",", "key", ")", ")" ]
Register a image file using key
[ "Register", "a", "image", "file", "using", "key" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/stockimage.py#L62-L68
232,887
alejandroautalan/pygubu
pygubu/stockimage.py
StockImage.register_from_data
def register_from_data(cls, key, format, data): """Register a image data using key""" if key in cls._stock: logger.info('Warning, replacing resource ' + str(key)) cls._stock[key] = {'type': 'data', 'data': data, 'format': format } logger.info('%s registered as %s' % ('data', key))
python
def register_from_data(cls, key, format, data): """Register a image data using key""" if key in cls._stock: logger.info('Warning, replacing resource ' + str(key)) cls._stock[key] = {'type': 'data', 'data': data, 'format': format } logger.info('%s registered as %s' % ('data', key))
[ "def", "register_from_data", "(", "cls", ",", "key", ",", "format", ",", "data", ")", ":", "if", "key", "in", "cls", ".", "_stock", ":", "logger", ".", "info", "(", "'Warning, replacing resource '", "+", "str", "(", "key", ")", ")", "cls", ".", "_stock", "[", "key", "]", "=", "{", "'type'", ":", "'data'", ",", "'data'", ":", "data", ",", "'format'", ":", "format", "}", "logger", ".", "info", "(", "'%s registered as %s'", "%", "(", "'data'", ",", "key", ")", ")" ]
Register a image data using key
[ "Register", "a", "image", "data", "using", "key" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/stockimage.py#L71-L77
232,888
alejandroautalan/pygubu
pygubu/stockimage.py
StockImage.register_created
def register_created(cls, key, image): """Register an already created image using key""" if key in cls._stock: logger.info('Warning, replacing resource ' + str(key)) cls._stock[key] = {'type': 'created', 'image': image} logger.info('%s registered as %s' % ('data', key))
python
def register_created(cls, key, image): """Register an already created image using key""" if key in cls._stock: logger.info('Warning, replacing resource ' + str(key)) cls._stock[key] = {'type': 'created', 'image': image} logger.info('%s registered as %s' % ('data', key))
[ "def", "register_created", "(", "cls", ",", "key", ",", "image", ")", ":", "if", "key", "in", "cls", ".", "_stock", ":", "logger", ".", "info", "(", "'Warning, replacing resource '", "+", "str", "(", "key", ")", ")", "cls", ".", "_stock", "[", "key", "]", "=", "{", "'type'", ":", "'created'", ",", "'image'", ":", "image", "}", "logger", ".", "info", "(", "'%s registered as %s'", "%", "(", "'data'", ",", "key", ")", ")" ]
Register an already created image using key
[ "Register", "an", "already", "created", "image", "using", "key" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/stockimage.py#L80-L86
232,889
alejandroautalan/pygubu
pygubu/stockimage.py
StockImage._load_image
def _load_image(cls, rkey): """Load image from file or return the cached instance.""" v = cls._stock[rkey] img = None itype = v['type'] if itype in ('stock', 'data'): img = tk.PhotoImage(format=v['format'], data=v['data']) elif itype == 'created': img = v['image'] else: img = tk.PhotoImage(file=v['filename']) cls._cached[rkey] = img logger.info('Loaded resource %s.' % rkey) return img
python
def _load_image(cls, rkey): """Load image from file or return the cached instance.""" v = cls._stock[rkey] img = None itype = v['type'] if itype in ('stock', 'data'): img = tk.PhotoImage(format=v['format'], data=v['data']) elif itype == 'created': img = v['image'] else: img = tk.PhotoImage(file=v['filename']) cls._cached[rkey] = img logger.info('Loaded resource %s.' % rkey) return img
[ "def", "_load_image", "(", "cls", ",", "rkey", ")", ":", "v", "=", "cls", ".", "_stock", "[", "rkey", "]", "img", "=", "None", "itype", "=", "v", "[", "'type'", "]", "if", "itype", "in", "(", "'stock'", ",", "'data'", ")", ":", "img", "=", "tk", ".", "PhotoImage", "(", "format", "=", "v", "[", "'format'", "]", ",", "data", "=", "v", "[", "'data'", "]", ")", "elif", "itype", "==", "'created'", ":", "img", "=", "v", "[", "'image'", "]", "else", ":", "img", "=", "tk", ".", "PhotoImage", "(", "file", "=", "v", "[", "'filename'", "]", ")", "cls", ".", "_cached", "[", "rkey", "]", "=", "img", "logger", ".", "info", "(", "'Loaded resource %s.'", "%", "rkey", ")", "return", "img" ]
Load image from file or return the cached instance.
[ "Load", "image", "from", "file", "or", "return", "the", "cached", "instance", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/stockimage.py#L107-L121
232,890
alejandroautalan/pygubu
pygubu/stockimage.py
StockImage.get
def get(cls, rkey): """Get image previously registered with key rkey. If key not exist, raise StockImageException """ if rkey in cls._cached: logger.info('Resource %s is in cache.' % rkey) return cls._cached[rkey] if rkey in cls._stock: img = cls._load_image(rkey) return img else: raise StockImageException('StockImage: %s not registered.' % rkey)
python
def get(cls, rkey): """Get image previously registered with key rkey. If key not exist, raise StockImageException """ if rkey in cls._cached: logger.info('Resource %s is in cache.' % rkey) return cls._cached[rkey] if rkey in cls._stock: img = cls._load_image(rkey) return img else: raise StockImageException('StockImage: %s not registered.' % rkey)
[ "def", "get", "(", "cls", ",", "rkey", ")", ":", "if", "rkey", "in", "cls", ".", "_cached", ":", "logger", ".", "info", "(", "'Resource %s is in cache.'", "%", "rkey", ")", "return", "cls", ".", "_cached", "[", "rkey", "]", "if", "rkey", "in", "cls", ".", "_stock", ":", "img", "=", "cls", ".", "_load_image", "(", "rkey", ")", "return", "img", "else", ":", "raise", "StockImageException", "(", "'StockImage: %s not registered.'", "%", "rkey", ")" ]
Get image previously registered with key rkey. If key not exist, raise StockImageException
[ "Get", "image", "previously", "registered", "with", "key", "rkey", ".", "If", "key", "not", "exist", "raise", "StockImageException" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/stockimage.py#L124-L136
232,891
alejandroautalan/pygubu
pygubudesigner/uitreeeditor.py
WidgetsTreeEditor.config_treeview
def config_treeview(self): """Sets treeview columns and other params""" tree = self.treeview tree.bind('<Double-1>', self.on_treeview_double_click) tree.bind('<<TreeviewSelect>>', self.on_treeview_select, add='+')
python
def config_treeview(self): """Sets treeview columns and other params""" tree = self.treeview tree.bind('<Double-1>', self.on_treeview_double_click) tree.bind('<<TreeviewSelect>>', self.on_treeview_select, add='+')
[ "def", "config_treeview", "(", "self", ")", ":", "tree", "=", "self", ".", "treeview", "tree", ".", "bind", "(", "'<Double-1>'", ",", "self", ".", "on_treeview_double_click", ")", "tree", ".", "bind", "(", "'<<TreeviewSelect>>'", ",", "self", ".", "on_treeview_select", ",", "add", "=", "'+'", ")" ]
Sets treeview columns and other params
[ "Sets", "treeview", "columns", "and", "other", "params" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/uitreeeditor.py#L79-L83
232,892
alejandroautalan/pygubu
pygubudesigner/uitreeeditor.py
WidgetsTreeEditor.get_toplevel_parent
def get_toplevel_parent(self, treeitem): """Returns the top level parent for treeitem.""" tv = self.treeview toplevel_items = tv.get_children() item = treeitem while not (item in toplevel_items): item = tv.parent(item) return item
python
def get_toplevel_parent(self, treeitem): """Returns the top level parent for treeitem.""" tv = self.treeview toplevel_items = tv.get_children() item = treeitem while not (item in toplevel_items): item = tv.parent(item) return item
[ "def", "get_toplevel_parent", "(", "self", ",", "treeitem", ")", ":", "tv", "=", "self", ".", "treeview", "toplevel_items", "=", "tv", ".", "get_children", "(", ")", "item", "=", "treeitem", "while", "not", "(", "item", "in", "toplevel_items", ")", ":", "item", "=", "tv", ".", "parent", "(", "item", ")", "return", "item" ]
Returns the top level parent for treeitem.
[ "Returns", "the", "top", "level", "parent", "for", "treeitem", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/uitreeeditor.py#L85-L94
232,893
alejandroautalan/pygubu
pygubudesigner/uitreeeditor.py
WidgetsTreeEditor.draw_widget
def draw_widget(self, item): """Create a preview of the selected treeview item""" if item: self.filter_remove(remember=True) selected_id = self.treedata[item]['id'] item = self.get_toplevel_parent(item) widget_id = self.treedata[item]['id'] wclass = self.treedata[item]['class'] xmlnode = self.tree_node_to_xml('', item) self.previewer.draw(item, widget_id, xmlnode, wclass) self.previewer.show_selected(item, selected_id) self.filter_restore()
python
def draw_widget(self, item): """Create a preview of the selected treeview item""" if item: self.filter_remove(remember=True) selected_id = self.treedata[item]['id'] item = self.get_toplevel_parent(item) widget_id = self.treedata[item]['id'] wclass = self.treedata[item]['class'] xmlnode = self.tree_node_to_xml('', item) self.previewer.draw(item, widget_id, xmlnode, wclass) self.previewer.show_selected(item, selected_id) self.filter_restore()
[ "def", "draw_widget", "(", "self", ",", "item", ")", ":", "if", "item", ":", "self", ".", "filter_remove", "(", "remember", "=", "True", ")", "selected_id", "=", "self", ".", "treedata", "[", "item", "]", "[", "'id'", "]", "item", "=", "self", ".", "get_toplevel_parent", "(", "item", ")", "widget_id", "=", "self", ".", "treedata", "[", "item", "]", "[", "'id'", "]", "wclass", "=", "self", ".", "treedata", "[", "item", "]", "[", "'class'", "]", "xmlnode", "=", "self", ".", "tree_node_to_xml", "(", "''", ",", "item", ")", "self", ".", "previewer", ".", "draw", "(", "item", ",", "widget_id", ",", "xmlnode", ",", "wclass", ")", "self", ".", "previewer", ".", "show_selected", "(", "item", ",", "selected_id", ")", "self", ".", "filter_restore", "(", ")" ]
Create a preview of the selected treeview item
[ "Create", "a", "preview", "of", "the", "selected", "treeview", "item" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/uitreeeditor.py#L96-L107
232,894
alejandroautalan/pygubu
pygubudesigner/uitreeeditor.py
WidgetsTreeEditor.on_treeview_delete_selection
def on_treeview_delete_selection(self, event=None): """Removes selected items from treeview""" tv = self.treeview selection = tv.selection() # Need to remove filter self.filter_remove(remember=True) toplevel_items = tv.get_children() parents_to_redraw = set() for item in selection: try: parent = '' if item not in toplevel_items: parent = self.get_toplevel_parent(item) else: self.previewer.delete(item) del self.treedata[item] tv.delete(item) self.app.set_changed() if parent: self._update_max_grid_rc(parent) parents_to_redraw.add(parent) self.widget_editor.hide_all() except tk.TclError: # Selection of parent and child items ?? # TODO: notify something here pass # redraw widgets for item in parents_to_redraw: self.draw_widget(item) # restore filter self.filter_restore()
python
def on_treeview_delete_selection(self, event=None): """Removes selected items from treeview""" tv = self.treeview selection = tv.selection() # Need to remove filter self.filter_remove(remember=True) toplevel_items = tv.get_children() parents_to_redraw = set() for item in selection: try: parent = '' if item not in toplevel_items: parent = self.get_toplevel_parent(item) else: self.previewer.delete(item) del self.treedata[item] tv.delete(item) self.app.set_changed() if parent: self._update_max_grid_rc(parent) parents_to_redraw.add(parent) self.widget_editor.hide_all() except tk.TclError: # Selection of parent and child items ?? # TODO: notify something here pass # redraw widgets for item in parents_to_redraw: self.draw_widget(item) # restore filter self.filter_restore()
[ "def", "on_treeview_delete_selection", "(", "self", ",", "event", "=", "None", ")", ":", "tv", "=", "self", ".", "treeview", "selection", "=", "tv", ".", "selection", "(", ")", "# Need to remove filter", "self", ".", "filter_remove", "(", "remember", "=", "True", ")", "toplevel_items", "=", "tv", ".", "get_children", "(", ")", "parents_to_redraw", "=", "set", "(", ")", "for", "item", "in", "selection", ":", "try", ":", "parent", "=", "''", "if", "item", "not", "in", "toplevel_items", ":", "parent", "=", "self", ".", "get_toplevel_parent", "(", "item", ")", "else", ":", "self", ".", "previewer", ".", "delete", "(", "item", ")", "del", "self", ".", "treedata", "[", "item", "]", "tv", ".", "delete", "(", "item", ")", "self", ".", "app", ".", "set_changed", "(", ")", "if", "parent", ":", "self", ".", "_update_max_grid_rc", "(", "parent", ")", "parents_to_redraw", ".", "add", "(", "parent", ")", "self", ".", "widget_editor", ".", "hide_all", "(", ")", "except", "tk", ".", "TclError", ":", "# Selection of parent and child items ??", "# TODO: notify something here", "pass", "# redraw widgets", "for", "item", "in", "parents_to_redraw", ":", "self", ".", "draw_widget", "(", "item", ")", "# restore filter", "self", ".", "filter_restore", "(", ")" ]
Removes selected items from treeview
[ "Removes", "selected", "items", "from", "treeview" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/uitreeeditor.py#L134-L167
232,895
alejandroautalan/pygubu
pygubudesigner/uitreeeditor.py
WidgetsTreeEditor.tree_to_xml
def tree_to_xml(self): """Traverses treeview and generates a ElementTree object""" # Need to remove filter or hidden items will not be saved. self.filter_remove(remember=True) tree = self.treeview root = ET.Element('interface') items = tree.get_children() for item in items: node = self.tree_node_to_xml('', item) root.append(node) # restore filter self.filter_restore() return ET.ElementTree(root)
python
def tree_to_xml(self): """Traverses treeview and generates a ElementTree object""" # Need to remove filter or hidden items will not be saved. self.filter_remove(remember=True) tree = self.treeview root = ET.Element('interface') items = tree.get_children() for item in items: node = self.tree_node_to_xml('', item) root.append(node) # restore filter self.filter_restore() return ET.ElementTree(root)
[ "def", "tree_to_xml", "(", "self", ")", ":", "# Need to remove filter or hidden items will not be saved.", "self", ".", "filter_remove", "(", "remember", "=", "True", ")", "tree", "=", "self", ".", "treeview", "root", "=", "ET", ".", "Element", "(", "'interface'", ")", "items", "=", "tree", ".", "get_children", "(", ")", "for", "item", "in", "items", ":", "node", "=", "self", ".", "tree_node_to_xml", "(", "''", ",", "item", ")", "root", ".", "append", "(", "node", ")", "# restore filter", "self", ".", "filter_restore", "(", ")", "return", "ET", ".", "ElementTree", "(", "root", ")" ]
Traverses treeview and generates a ElementTree object
[ "Traverses", "treeview", "and", "generates", "a", "ElementTree", "object" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/uitreeeditor.py#L169-L185
232,896
alejandroautalan/pygubu
pygubudesigner/uitreeeditor.py
WidgetsTreeEditor.tree_node_to_xml
def tree_node_to_xml(self, parent, item): """Converts a treeview item and children to xml nodes""" tree = self.treeview data = self.treedata[item] node = data.to_xml_node() children = tree.get_children(item) for child in children: cnode = ET.Element('child') cwidget = self.tree_node_to_xml(item, child) cnode.append(cwidget) node.append(cnode) return node
python
def tree_node_to_xml(self, parent, item): """Converts a treeview item and children to xml nodes""" tree = self.treeview data = self.treedata[item] node = data.to_xml_node() children = tree.get_children(item) for child in children: cnode = ET.Element('child') cwidget = self.tree_node_to_xml(item, child) cnode.append(cwidget) node.append(cnode) return node
[ "def", "tree_node_to_xml", "(", "self", ",", "parent", ",", "item", ")", ":", "tree", "=", "self", ".", "treeview", "data", "=", "self", ".", "treedata", "[", "item", "]", "node", "=", "data", ".", "to_xml_node", "(", ")", "children", "=", "tree", ".", "get_children", "(", "item", ")", "for", "child", "in", "children", ":", "cnode", "=", "ET", ".", "Element", "(", "'child'", ")", "cwidget", "=", "self", ".", "tree_node_to_xml", "(", "item", ",", "child", ")", "cnode", ".", "append", "(", "cwidget", ")", "node", ".", "append", "(", "cnode", ")", "return", "node" ]
Converts a treeview item and children to xml nodes
[ "Converts", "a", "treeview", "item", "and", "children", "to", "xml", "nodes" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/uitreeeditor.py#L187-L201
232,897
alejandroautalan/pygubu
pygubudesigner/uitreeeditor.py
WidgetsTreeEditor._insert_item
def _insert_item(self, root, data, from_file=False): """Insert a item on the treeview and fills columns from data""" tree = self.treeview treelabel = data.get_id() row = col = '' if root != '' and 'layout' in data: row = data.get_layout_property('row') col = data.get_layout_property('column') # fix row position when using copy and paste # If collision, increase by 1 row_count = self.get_max_row(root) if not from_file and (row_count > int(row) and int(col) == 0): row = str(row_count + 1) data.set_layout_property('row', row) image = '' try: image = StockImage.get('16x16-tk.default') except StockImageException: # TODO: notify something here pass try: image = StockImage.get('16x16-{0}'.format(data.get_class())) except StockImageException: # TODO: notify something here pass values = (data.get_class(), row, col) item = tree.insert(root, 'end', text=treelabel, values=values, image=image) data.attach(self) self.treedata[item] = data # Update grid r/c data self._update_max_grid_rc(root, from_file=True) self.app.set_changed() return item
python
def _insert_item(self, root, data, from_file=False): """Insert a item on the treeview and fills columns from data""" tree = self.treeview treelabel = data.get_id() row = col = '' if root != '' and 'layout' in data: row = data.get_layout_property('row') col = data.get_layout_property('column') # fix row position when using copy and paste # If collision, increase by 1 row_count = self.get_max_row(root) if not from_file and (row_count > int(row) and int(col) == 0): row = str(row_count + 1) data.set_layout_property('row', row) image = '' try: image = StockImage.get('16x16-tk.default') except StockImageException: # TODO: notify something here pass try: image = StockImage.get('16x16-{0}'.format(data.get_class())) except StockImageException: # TODO: notify something here pass values = (data.get_class(), row, col) item = tree.insert(root, 'end', text=treelabel, values=values, image=image) data.attach(self) self.treedata[item] = data # Update grid r/c data self._update_max_grid_rc(root, from_file=True) self.app.set_changed() return item
[ "def", "_insert_item", "(", "self", ",", "root", ",", "data", ",", "from_file", "=", "False", ")", ":", "tree", "=", "self", ".", "treeview", "treelabel", "=", "data", ".", "get_id", "(", ")", "row", "=", "col", "=", "''", "if", "root", "!=", "''", "and", "'layout'", "in", "data", ":", "row", "=", "data", ".", "get_layout_property", "(", "'row'", ")", "col", "=", "data", ".", "get_layout_property", "(", "'column'", ")", "# fix row position when using copy and paste", "# If collision, increase by 1", "row_count", "=", "self", ".", "get_max_row", "(", "root", ")", "if", "not", "from_file", "and", "(", "row_count", ">", "int", "(", "row", ")", "and", "int", "(", "col", ")", "==", "0", ")", ":", "row", "=", "str", "(", "row_count", "+", "1", ")", "data", ".", "set_layout_property", "(", "'row'", ",", "row", ")", "image", "=", "''", "try", ":", "image", "=", "StockImage", ".", "get", "(", "'16x16-tk.default'", ")", "except", "StockImageException", ":", "# TODO: notify something here", "pass", "try", ":", "image", "=", "StockImage", ".", "get", "(", "'16x16-{0}'", ".", "format", "(", "data", ".", "get_class", "(", ")", ")", ")", "except", "StockImageException", ":", "# TODO: notify something here", "pass", "values", "=", "(", "data", ".", "get_class", "(", ")", ",", "row", ",", "col", ")", "item", "=", "tree", ".", "insert", "(", "root", ",", "'end'", ",", "text", "=", "treelabel", ",", "values", "=", "values", ",", "image", "=", "image", ")", "data", ".", "attach", "(", "self", ")", "self", ".", "treedata", "[", "item", "]", "=", "data", "# Update grid r/c data", "self", ".", "_update_max_grid_rc", "(", "root", ",", "from_file", "=", "True", ")", "self", ".", "app", ".", "set_changed", "(", ")", "return", "item" ]
Insert a item on the treeview and fills columns from data
[ "Insert", "a", "item", "on", "the", "treeview", "and", "fills", "columns", "from", "data" ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/uitreeeditor.py#L203-L243
232,898
alejandroautalan/pygubu
pygubudesigner/uitreeeditor.py
WidgetsTreeEditor.copy_to_clipboard
def copy_to_clipboard(self): """ Copies selected items to clipboard. """ tree = self.treeview # get the selected item: selection = tree.selection() if selection: self.filter_remove(remember=True) root = ET.Element('selection') for item in selection: node = self.tree_node_to_xml('', item) root.append(node) # python2 issue try: text = ET.tostring(root, encoding='unicode') except LookupError: text = ET.tostring(root, encoding='UTF-8') tree.clipboard_clear() tree.clipboard_append(text) self.filter_restore()
python
def copy_to_clipboard(self): """ Copies selected items to clipboard. """ tree = self.treeview # get the selected item: selection = tree.selection() if selection: self.filter_remove(remember=True) root = ET.Element('selection') for item in selection: node = self.tree_node_to_xml('', item) root.append(node) # python2 issue try: text = ET.tostring(root, encoding='unicode') except LookupError: text = ET.tostring(root, encoding='UTF-8') tree.clipboard_clear() tree.clipboard_append(text) self.filter_restore()
[ "def", "copy_to_clipboard", "(", "self", ")", ":", "tree", "=", "self", ".", "treeview", "# get the selected item:", "selection", "=", "tree", ".", "selection", "(", ")", "if", "selection", ":", "self", ".", "filter_remove", "(", "remember", "=", "True", ")", "root", "=", "ET", ".", "Element", "(", "'selection'", ")", "for", "item", "in", "selection", ":", "node", "=", "self", ".", "tree_node_to_xml", "(", "''", ",", "item", ")", "root", ".", "append", "(", "node", ")", "# python2 issue", "try", ":", "text", "=", "ET", ".", "tostring", "(", "root", ",", "encoding", "=", "'unicode'", ")", "except", "LookupError", ":", "text", "=", "ET", ".", "tostring", "(", "root", ",", "encoding", "=", "'UTF-8'", ")", "tree", ".", "clipboard_clear", "(", ")", "tree", ".", "clipboard_append", "(", "text", ")", "self", ".", "filter_restore", "(", ")" ]
Copies selected items to clipboard.
[ "Copies", "selected", "items", "to", "clipboard", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/uitreeeditor.py#L255-L275
232,899
alejandroautalan/pygubu
pygubudesigner/uitreeeditor.py
WidgetsTreeEditor.add_widget
def add_widget(self, wclass): """Adds a new item to the treeview.""" tree = self.treeview # get the selected item: selected_item = '' tsel = tree.selection() if tsel: selected_item = tsel[0] # Need to remove filter if set self.filter_remove() root = selected_item # check if the widget can be added at selected point if not self._validate_add(root, wclass, False): # if not try to add at item parent level parent = tree.parent(root) if parent != root: if self._validate_add(parent, wclass): root = parent else: return else: return # root item should be set at this point # setup properties widget_id = self.get_unique_id(wclass) data = WidgetDescr(wclass, widget_id) # setup default values for properties for pname in builder.CLASS_MAP[wclass].builder.properties: pdescription = {} if pname in properties.WIDGET_PROPERTIES: pdescription = properties.WIDGET_PROPERTIES[pname] if wclass in pdescription: pdescription = dict(pdescription, **pdescription[wclass]) default_value = str(pdescription.get('default', '')) data.set_property(pname, default_value) # default text for widgets with text prop: if pname in ('text', 'label'): data.set_property(pname, widget_id) # # default grid properties # # is_container = builder.CLASS_MAP[wclass].builder.container for prop_name in properties.GRID_PROPERTIES: pdescription = properties.LAYOUT_OPTIONS[prop_name] if wclass in pdescription: pdescription = dict(pdescription, **pdescription[wclass]) default_value = str(pdescription.get('default', '')) data.set_layout_property(prop_name, default_value) rownum = '0' if root: rownum = str(self.get_max_row(root)+1) data.set_layout_property('row', rownum) data.set_layout_property('column', '0') item = self._insert_item(root, data) # Do redraw self.draw_widget(item) # Select and show the item created tree.after_idle(lambda: tree.selection_set(item)) tree.after_idle(lambda: tree.focus(item)) tree.after_idle(lambda: tree.see(item))
python
def add_widget(self, wclass): """Adds a new item to the treeview.""" tree = self.treeview # get the selected item: selected_item = '' tsel = tree.selection() if tsel: selected_item = tsel[0] # Need to remove filter if set self.filter_remove() root = selected_item # check if the widget can be added at selected point if not self._validate_add(root, wclass, False): # if not try to add at item parent level parent = tree.parent(root) if parent != root: if self._validate_add(parent, wclass): root = parent else: return else: return # root item should be set at this point # setup properties widget_id = self.get_unique_id(wclass) data = WidgetDescr(wclass, widget_id) # setup default values for properties for pname in builder.CLASS_MAP[wclass].builder.properties: pdescription = {} if pname in properties.WIDGET_PROPERTIES: pdescription = properties.WIDGET_PROPERTIES[pname] if wclass in pdescription: pdescription = dict(pdescription, **pdescription[wclass]) default_value = str(pdescription.get('default', '')) data.set_property(pname, default_value) # default text for widgets with text prop: if pname in ('text', 'label'): data.set_property(pname, widget_id) # # default grid properties # # is_container = builder.CLASS_MAP[wclass].builder.container for prop_name in properties.GRID_PROPERTIES: pdescription = properties.LAYOUT_OPTIONS[prop_name] if wclass in pdescription: pdescription = dict(pdescription, **pdescription[wclass]) default_value = str(pdescription.get('default', '')) data.set_layout_property(prop_name, default_value) rownum = '0' if root: rownum = str(self.get_max_row(root)+1) data.set_layout_property('row', rownum) data.set_layout_property('column', '0') item = self._insert_item(root, data) # Do redraw self.draw_widget(item) # Select and show the item created tree.after_idle(lambda: tree.selection_set(item)) tree.after_idle(lambda: tree.focus(item)) tree.after_idle(lambda: tree.see(item))
[ "def", "add_widget", "(", "self", ",", "wclass", ")", ":", "tree", "=", "self", ".", "treeview", "# get the selected item:", "selected_item", "=", "''", "tsel", "=", "tree", ".", "selection", "(", ")", "if", "tsel", ":", "selected_item", "=", "tsel", "[", "0", "]", "# Need to remove filter if set", "self", ".", "filter_remove", "(", ")", "root", "=", "selected_item", "# check if the widget can be added at selected point", "if", "not", "self", ".", "_validate_add", "(", "root", ",", "wclass", ",", "False", ")", ":", "# if not try to add at item parent level", "parent", "=", "tree", ".", "parent", "(", "root", ")", "if", "parent", "!=", "root", ":", "if", "self", ".", "_validate_add", "(", "parent", ",", "wclass", ")", ":", "root", "=", "parent", "else", ":", "return", "else", ":", "return", "# root item should be set at this point", "# setup properties", "widget_id", "=", "self", ".", "get_unique_id", "(", "wclass", ")", "data", "=", "WidgetDescr", "(", "wclass", ",", "widget_id", ")", "# setup default values for properties", "for", "pname", "in", "builder", ".", "CLASS_MAP", "[", "wclass", "]", ".", "builder", ".", "properties", ":", "pdescription", "=", "{", "}", "if", "pname", "in", "properties", ".", "WIDGET_PROPERTIES", ":", "pdescription", "=", "properties", ".", "WIDGET_PROPERTIES", "[", "pname", "]", "if", "wclass", "in", "pdescription", ":", "pdescription", "=", "dict", "(", "pdescription", ",", "*", "*", "pdescription", "[", "wclass", "]", ")", "default_value", "=", "str", "(", "pdescription", ".", "get", "(", "'default'", ",", "''", ")", ")", "data", ".", "set_property", "(", "pname", ",", "default_value", ")", "# default text for widgets with text prop:", "if", "pname", "in", "(", "'text'", ",", "'label'", ")", ":", "data", ".", "set_property", "(", "pname", ",", "widget_id", ")", "#", "# default grid properties", "#", "# is_container = builder.CLASS_MAP[wclass].builder.container", "for", "prop_name", "in", "properties", ".", "GRID_PROPERTIES", ":", "pdescription", "=", "properties", ".", "LAYOUT_OPTIONS", "[", "prop_name", "]", "if", "wclass", "in", "pdescription", ":", "pdescription", "=", "dict", "(", "pdescription", ",", "*", "*", "pdescription", "[", "wclass", "]", ")", "default_value", "=", "str", "(", "pdescription", ".", "get", "(", "'default'", ",", "''", ")", ")", "data", ".", "set_layout_property", "(", "prop_name", ",", "default_value", ")", "rownum", "=", "'0'", "if", "root", ":", "rownum", "=", "str", "(", "self", ".", "get_max_row", "(", "root", ")", "+", "1", ")", "data", ".", "set_layout_property", "(", "'row'", ",", "rownum", ")", "data", ".", "set_layout_property", "(", "'column'", ",", "'0'", ")", "item", "=", "self", ".", "_insert_item", "(", "root", ",", "data", ")", "# Do redraw", "self", ".", "draw_widget", "(", "item", ")", "# Select and show the item created", "tree", ".", "after_idle", "(", "lambda", ":", "tree", ".", "selection_set", "(", "item", ")", ")", "tree", ".", "after_idle", "(", "lambda", ":", "tree", ".", "focus", "(", "item", ")", ")", "tree", ".", "after_idle", "(", "lambda", ":", "tree", ".", "see", "(", "item", ")", ")" ]
Adds a new item to the treeview.
[ "Adds", "a", "new", "item", "to", "the", "treeview", "." ]
41c8fb37ef973736ec5d68cbe1cd4ecb78712e40
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/uitreeeditor.py#L422-L492