hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b23b07878b9965f8556cbfb3cf2e7c53c59d8f6b
| 10,211
|
py
|
Python
|
src/cogent3/parse/blast_xml.py
|
tla256/cogent3
|
58533d11852d0cafbc0cfc6ae26429a6c0b2cb75
|
[
"BSD-3-Clause"
] | null | null | null |
src/cogent3/parse/blast_xml.py
|
tla256/cogent3
|
58533d11852d0cafbc0cfc6ae26429a6c0b2cb75
|
[
"BSD-3-Clause"
] | null | null | null |
src/cogent3/parse/blast_xml.py
|
tla256/cogent3
|
58533d11852d0cafbc0cfc6ae26429a6c0b2cb75
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""Parsers for XML output of blast, psi-blast and blat.
"""
__author__ = "Kristian Rother"
__copyright__ = "Copyright 2007-2019, The Cogent Project"
__contributors__ = ["Micah Hamady"]
__credits__ = ["Rob Knight"]
__license__ = "BSD-3"
__version__ = "2019.12.6a"
__maintainer__ = "Kristian Rother"
__email__ = "krother@rubor.de"
__status__ = "Prototype"
import xml.dom.minidom
from operator import eq as _eq
from operator import gt as _gt
from operator import le as _le
from operator import lt as _lt
from cogent3.parse.blast import MinimalBlastParser9, MinimalPsiBlastParser9
"""
CAUTION:
This XML BLAST PARSER uses minidom. This means a bad performance for
big files (>5MB), and huge XML files will for sure crash the program!
(06/2009 Kristian)
Possible improvements:
- convert some values into floats automatically (feature request)
- MH recommends sax.* for faster processing.
- test against nt result
- test really big file.
- consider high speed parser for standard output
"""
# field names used to parse tags and create dict.
HIT_XML_FIELDNAMES = [
"QUERY ID",
"SUBJECT_ID",
"HIT_DEF",
"HIT_ACCESSION",
"HIT_LENGTH",
]
HSP_XML_FIELDS = (
("PERCENT_IDENTITY", "Hsp_identity"),
("ALIGNMENT_LENGTH", "Hsp_align-len"),
("MISMATCHES", ""),
("GAP_OPENINGS", "Hsp_gaps"),
("QUERY_START", "Hsp_query-from"),
("QUERY_END", "Hsp_query-to"),
("SUBJECT_START", "Hsp_hit-from"),
("SUBJECT_END", "Hsp_hit-to"),
("E_VALUE", "Hsp_evalue"),
("BIT_SCORE", "Hsp_bit-score"),
("SCORE", "Hsp_score"),
("POSITIVE", "Hsp_positive"),
("QUERY_ALIGN", "Hsp_qseq"),
("SUBJECT_ALIGN", "Hsp_hseq"),
("MIDLINE_ALIGN", "Hsp_midline"),
)
HSP_XML_FIELDNAMES = [x[0] for x in HSP_XML_FIELDS]
HSP_XML_TAGNAMES = [x[1] for x in HSP_XML_FIELDS]
def get_tag(record, name, default=None):
"""
Loks in the XML tag 'record' for
other tags named 'name', and returns the value of the first one.
If none is found, it returns 'default'.
"""
tag = record.getElementsByTagName(name)
if len(tag) and len(tag[0].childNodes):
return tag[0].childNodes[0].nodeValue
else:
return default
def parse_hit(hit_tag, query_id=1):
"""
Parses a 'Hit' dom object.
Returns a list of lists with HSP data.
"""
result = []
# parse elements from hit tag
hit_id = get_tag(hit_tag, "Hit_id")
hit_def = get_tag(hit_tag, "Hit_def")
accession = get_tag(hit_tag, "Hit_accession")
length = int(get_tag(hit_tag, "Hit_len"), 0)
hit_data = [query_id, hit_id, hit_def, accession, length]
# process HSPS in this hit.
for hsp_tag in hit_tag.getElementsByTagName("Hsp"):
result.append(hit_data + parse_hsp(hsp_tag))
return result
def parse_hsp(hsp_tag):
"""
Parses a 'Hsp' XML dom object. Returns a list of values,
according to the items in HSP_XML_FIELDS.
"""
result = []
for tag_name in HSP_XML_TAGNAMES:
result.append(get_tag(hsp_tag, tag_name, 0))
# what about these?
# self.identity = int(self.get_tag(record,'Hsp_identity', 0))
# self.positive = int(self.get_tag(record, 'Hsp_positive', 0))
return result
def parse_header(tag):
"""
Parses a 'BlastOutput' dom object.
Returns a dict with information from the blast header
"""
result = {}
result["application"] = get_tag(tag, "BlastOutput_program")
result["version"] = get_tag(tag, "BlastOutput_version")
result["reference"] = get_tag(tag, "BlastOutput_reference")
result["query"] = get_tag(tag, "BlastOutput_query-def")
result["query_letters"] = int(get_tag(tag, "BlastOutput_query-len"))
result["database"] = get_tag(tag, "BlastOutput_db")
# add data fro Parameters tag
for param_tag in tag.getElementsByTagName("BlastOutput_param"):
# for param_tag in tag.getElementsByTagName('Parameters'):
data = parse_parameters(param_tag)
for k in data:
result[k] = data[k]
return result
def parse_parameters(tag):
"""Parses a 'BlastOutput_param' dom object."""
result = {}
result["matrix"] = get_tag(tag, "Parameters_matrix")
result["expect"] = get_tag(tag, "Parameters_expect")
result["gap_open_penalty"] = float(get_tag(tag, "Parameters_gap-open"))
result["gap_extend_penalty"] = float(get_tag(tag, "Parameters_gap-extend"))
result["filter"] = get_tag(tag, "Parameters_filter")
return result
def minimal_blast_parser_7(lines, include_column_names=False, format="xml"):
"""Yields succesive records from lines (props, data list).
lines must be XML BLAST output format.
output:
props is a dict of {UPPERCASE_KEY:value}.
data_list is a list of list of strings, optionally with header first.
LIST CONTAINS [HIT][HSP][strings], FIRST ENTRY IS LIST OF LABELS!
"""
doc = "".join(lines)
dom_obj = xml.dom.minidom.parseString(doc)
query_id = 1
for record in dom_obj.getElementsByTagName("BlastOutput"):
props = parse_header(record)
hits = [HIT_XML_FIELDNAMES + HSP_XML_FIELDNAMES]
for hit in record.getElementsByTagName("Hit"):
hits += parse_hit(hit, query_id)
yield props, hits
class BlastXMLResult(dict):
"""the BlastResult objects have the query sequence as keys,
and the values are lists of lists of dictionaries.
The FIELD NAMES given are the keys of the dict.
"""
# FIELD NAMES
QUERY_ALIGN = "HSP QSEQ"
SUBJECT_ALIGN = "HSP HSEQ"
MIDLINE_ALIGN = "HSP MIDLINE"
HIT_DEF = "HIT_DEF"
HIT_ACCESSION = "HIT_ACCESSION"
HIT_LENGTH = "HIT_LENGTH"
SCORE = "SCORE"
POSITIVE = "POSITIVE"
ITERATION = "ITERATION"
QUERY_ID = "QUERY ID"
SUBJECT_ID = "SUBJECT_ID"
PERCENT_IDENTITY = "% IDENTITY"
ALIGNMENT_LENGTH = "ALIGNMENT LENGTH"
MISMATCHES = "MISMATCHES"
GAP_OPENINGS = "GAP OPENINGS"
QUERY_START = "Q. START"
QUERY_END = "Q. END"
SUBJECT_START = "S. START"
SUBJECT_END = "S. END"
E_VALUE = "E-VALUE"
BIT_SCORE = "BIT_SCORE"
# FieldComparisonOperators = (
# BlastResult.FieldComparisonOperators = {
# HIT_DEF:(_gt, float)
# }
# .. to be done
hit_keys = set(
[
HIT_DEF,
HIT_ACCESSION,
HIT_LENGTH,
SCORE,
POSITIVE,
QUERY_ALIGN,
SUBJECT_ALIGN,
MIDLINE_ALIGN,
ITERATION,
QUERY_ID,
SUBJECT_ID,
PERCENT_IDENTITY,
ALIGNMENT_LENGTH,
MISMATCHES,
GAP_OPENINGS,
QUERY_START,
QUERY_END,
SUBJECT_START,
SUBJECT_END,
E_VALUE,
BIT_SCORE,
]
)
_field_comparison_operators = {
PERCENT_IDENTITY: (_gt, float),
ALIGNMENT_LENGTH: (_gt, int),
MISMATCHES: (_lt, int),
E_VALUE: (_lt, float),
BIT_SCORE: (_gt, float),
}
def __init__(self, data, psiblast=False, parser=None, xml=False):
# iterate blast results, generate data structure
"""
Init using blast 7 or blast 9 results
data: blast output from the m = 9 output option
psiblast: if True, will expect psiblast output, else expects
blast output
"""
# further improvement:
# add XML option to BlastResult __init__ instead of
# using a separate class.
if not parser:
if xml:
parser = minimal_blast_parser_7
elif psiblast:
parser = MinimalPsiBlastParser9
else:
parser = MinimalBlastParser9
# code below copied from BlastResult, unchanged.
mp = parser(data, True)
for props, rec_data in mp:
iteration = 1
if self.ITERATION in props:
iteration = int(props[self.ITERATION])
hits = []
# check if found any hits
if len(rec_data) > 1:
for h in rec_data[1:]:
hits.append(dict(list(zip(rec_data[0], h))))
else:
hits.append(dict(list(zip(rec_data[0], ["" for x in rec_data[0]]))))
# get blast version of query id
query_id = hits[0][self.QUERY_ID]
if query_id not in self:
self[query_id] = []
self[query_id].append(hits)
def iter_hits_by_query(self, iteration=-1):
"""Iterates over set of hits, returning list of hits for each query"""
for query_id in self:
yield query_id, self[query_id][iteration]
def best_hits_by_query(
self, iteration=-1, n=1, field="BIT_SCORE", return_self=False
):
"""Iterates over all queries and returns best hit for each
return_self: if False, will not return best hit as itself.
Uses FieldComparisonOperators to figure out which direction to compare.
"""
# check that given valid comparison field
if field not in self._field_comparison_operators:
raise ValueError(
"Invalid field: %s. You must specify one of: %s"
% (field, str(self._field_comparison_operators))
)
cmp_fun, cast_fun = self._field_comparison_operators[field]
# enumerate hits
for q, hits in self.iter_hits_by_query(iteration=iteration):
best_hits = []
for hit in hits:
# check if want to skip self hit
if not return_self:
if hit[self.SUBJECT_ID] == q:
continue
# check if better hit than ones we have
if len(best_hits) < n:
best_hits.append(hit)
else:
for ix, best_hit in enumerate(best_hits):
new_val = cast_fun(hit[field])
old_val = cast_fun(best_hit[field])
if cmp_fun(new_val, old_val):
best_hits[ix] = hit
continue
yield q, best_hits
| 31.418462
| 84
| 0.613456
|
b1eba34f66d9f1bf24d53c3f02e75338c198b58c
| 24,122
|
py
|
Python
|
sktime/classification/compose/_ensemble.py
|
biologioholic/sktime
|
9d0391a04b11d22bd783b452f01aa5b4529b41a2
|
[
"BSD-3-Clause"
] | 1
|
2021-12-22T02:45:39.000Z
|
2021-12-22T02:45:39.000Z
|
sktime/classification/compose/_ensemble.py
|
biologioholic/sktime
|
9d0391a04b11d22bd783b452f01aa5b4529b41a2
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/classification/compose/_ensemble.py
|
biologioholic/sktime
|
9d0391a04b11d22bd783b452f01aa5b4529b41a2
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Configurable time series ensembles."""
__author__ = ["mloning", "Ayushmaan Seth"]
__all__ = ["ComposableTimeSeriesForestClassifier"]
import numbers
from warnings import warn
import numpy as np
from joblib import Parallel, delayed
from sklearn.ensemble._base import _partition_estimators
from sklearn.ensemble._forest import (
_generate_unsampled_indices,
_get_n_samples_bootstrap,
)
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import compute_sample_weight
from sklearn.utils.multiclass import check_classification_targets
from sktime.classification.base import BaseClassifier
from sktime.series_as_features.base.estimators._ensemble import BaseTimeSeriesForest
from sktime.transformations.panel.summarize import RandomIntervalFeatureExtractor
from sktime.utils.slope_and_trend import _slope
from sktime.utils.validation.panel import check_X, check_X_y
class ComposableTimeSeriesForestClassifier(BaseTimeSeriesForest, BaseClassifier):
"""Time-Series Forest Classifier.
@article{DENG2013142,
title = {A time series forest for classification and feature extraction},
journal = {Information Sciences},
volume = {239},
pages = {142 - 153},
year = {2013},
issn = {0020-0255},
doi = {https://doi.org/10.1016/j.ins.2013.02.030},
url = {http://www.sciencedirect.com/science/article/pii/S0020025513001473},
author = {Houtao Deng and George Runger and Eugene Tuv and Martyanov Vladimir},
keywords = {Decision tree, Ensemble, Entrance gain, Interpretability,
Large margin, Time series classification}
}
A time series forest is a meta estimator and an adaptation of the random
forest for time-series/panel data that fits a number of decision tree
classifiers on various sub-samples of a transformed dataset and uses
averaging to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original input sample size
but the samples are drawn with replacement if `bootstrap=True` (default).
Parameters
----------
estimator : Pipeline
A pipeline consisting of series-to-tabular transformations
and a decision tree classifier as final estimator.
n_estimators : integer, optional (default=200)
The number of trees in the forest.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for both `fit` and `predict`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity when fitting and predicting.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or \
None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples. Thus,
`max_samples` should be in the interval `(0, 1)`.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_columns : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : data frame of shape = [n_timepoints, n_features]
The normalised feature values at each time index of
the time series forest
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
"""
_tags = {
"X_inner_mtype": "nested_univ", # nested pd.DataFrame
}
def __init__(
self,
estimator=None,
n_estimators=100,
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
max_samples=None,
):
self.estimator = estimator
# Assign values, even though passed on to base estimator below,
# necessary here for cloning
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.max_samples = max_samples
# Pass on params.
super(ComposableTimeSeriesForestClassifier, self).__init__(
base_estimator=None,
n_estimators=n_estimators,
estimator_params=None,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight,
max_samples=max_samples,
)
# We need to add is-fitted state when inheriting from scikit-learn
self._is_fitted = False
def fit(self, X, y, **kwargs):
"""Wrap fit to call BaseClassifier.fit.
This is a fix to get around the problem with multiple inheritance. The
problem is that if we just override _fit, this class inherits the fit from
the sklearn class BaseTimeSeriesForest. This is the simplest solution,
albeit a little hacky.
"""
return BaseClassifier.fit(self, X=X, y=y, **kwargs)
def predict(self, X, **kwargs) -> np.ndarray:
"""Wrap predict to call BaseClassifier.predict."""
return BaseClassifier.predict(self, X=X, **kwargs)
def predict_proba(self, X, **kwargs) -> np.ndarray:
"""Wrap predict_proba to call BaseClassifier.predict_proba."""
return BaseClassifier.predict_proba(self, X=X, **kwargs)
def _fit(self, X, y):
BaseTimeSeriesForest._fit(self, X=X, y=y)
def _validate_estimator(self):
if not isinstance(self.n_estimators, numbers.Integral):
raise ValueError(
"n_estimators must be an integer, "
"got {0}.".format(type(self.n_estimators))
)
if self.n_estimators <= 0:
raise ValueError(
"n_estimators must be greater than zero, "
"got {0}.".format(self.n_estimators)
)
# Set base estimator
if self.estimator is None:
# Set default time series forest
features = [np.mean, np.std, _slope]
steps = [
(
"transform",
RandomIntervalFeatureExtractor(
n_intervals="sqrt",
features=features,
random_state=self.random_state,
),
),
("clf", DecisionTreeClassifier(random_state=self.random_state)),
]
self.estimator_ = Pipeline(steps)
else:
# else check given estimator is a pipeline with prior
# transformations and final decision tree
if not isinstance(self.estimator, Pipeline):
raise ValueError("`estimator` must be pipeline with transforms.")
if not isinstance(self.estimator.steps[-1][1], DecisionTreeClassifier):
raise ValueError(
"Last step in `estimator` must be DecisionTreeClassifier."
)
self.estimator_ = self.estimator
# Set parameters according to naming in pipeline
estimator_params = {
"max_depth": self.max_depth,
"min_samples_split": self.min_samples_split,
"min_samples_leaf": self.min_samples_leaf,
"min_weight_fraction_leaf": self.min_weight_fraction_leaf,
"max_features": self.max_features,
"max_leaf_nodes": self.max_leaf_nodes,
"min_impurity_decrease": self.min_impurity_decrease,
}
final_estimator = self.estimator_.steps[-1][0]
self.estimator_params = {
f"{final_estimator}__{pname}": pval
for pname, pval in estimator_params.items()
}
# Set renamed estimator parameters
for pname, pval in self.estimator_params.items():
self.__setattr__(pname, pval)
def _predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
# all dtypes should be the same, so just take the first
class_type = self.classes_[0].dtype
predictions = np.empty((n_samples, self.n_outputs_), dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[k], axis=1), axis=0
)
return predictions
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape (n_samples, n_classes), or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
def _predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the
same class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
self.check_is_fitted()
X = check_X(X, enforce_univariate=True)
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(e.predict_proba)(X) for e in self.estimators_
)
return np.sum(all_proba, axis=0) / len(self.estimators_)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score."""
check_X_y(X, y)
check_X(X, enforce_univariate=True)
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = [
np.zeros((n_samples, n_classes_[k])) for k in range(self.n_outputs_)
]
n_samples_bootstrap = _get_n_samples_bootstrap(n_samples, self.max_samples)
for estimator in self.estimators_:
final_estimator = estimator.steps[-1][1]
unsampled_indices = _generate_unsampled_indices(
final_estimator.random_state, n_samples, n_samples_bootstrap
)
p_estimator = estimator.predict_proba(X.iloc[unsampled_indices, :])
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn(
"Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates."
)
decision = predictions[k] / predictions[k].sum(axis=1)[:, np.newaxis]
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] == np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
# TODO - Implement this abstract method properly.
def _set_oob_score_and_attributes(self, X, y):
raise NotImplementedError("Not implemented.")
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(
y[:, k], return_inverse=True
)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ("balanced", "balanced_subsample")
if isinstance(self.class_weight, str):
if self.class_weight not in valid_presets:
raise ValueError(
"Valid presets for class_weight include "
'"balanced" and "balanced_subsample".'
'Given "%s".' % self.class_weight
)
if self.warm_start:
warn(
'class_weight presets "balanced" or '
'"balanced_subsample" are '
"not recommended for warm_start if the fitted data "
"differs from the full dataset. In order to use "
'"balanced" weights, use compute_class_weight '
'("balanced", classes, y). In place of y you can use '
"a large enough sample of the full training set "
"target to properly estimate the class frequency "
"distributions. Pass the resulting weights as the "
"class_weight parameter."
)
if self.class_weight != "balanced_subsample" or not self.bootstrap:
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight, y_original)
return y, expanded_class_weight
@classmethod
def get_test_params(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator.
Parameters
----------
parameter_set : str, default="default"
Name of the set of test parameters to return, for use in tests. If no
special parameters are defined for a value, will return `"default"` set.
For classifiers, a "default" set of parameters should be provided for
general testing, and a "results_comparison" set for comparing against
previously recorded results if the general set does not produce suitable
probabilities to compare against.
Returns
-------
params : dict or list of dict, default={}
Parameters to create testing instances of the class.
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`.
"""
return {"n_estimators": 2}
| 43.152057
| 88
| 0.62984
|
a15aa29daccc2f43f03f7d3a3d1a034da81b3754
| 2,935
|
py
|
Python
|
scripts/continue_simulation/setup_continue.py
|
simeoncarstens/ensemble_hic
|
abaec8972866b593e689e39419d1c2d7ab6788dc
|
[
"Unlicense",
"MIT"
] | 3
|
2020-07-23T20:50:42.000Z
|
2021-03-16T04:38:07.000Z
|
scripts/continue_simulation/setup_continue.py
|
simeoncarstens/ensemble_hic
|
abaec8972866b593e689e39419d1c2d7ab6788dc
|
[
"Unlicense",
"MIT"
] | null | null | null |
scripts/continue_simulation/setup_continue.py
|
simeoncarstens/ensemble_hic
|
abaec8972866b593e689e39419d1c2d7ab6788dc
|
[
"Unlicense",
"MIT"
] | null | null | null |
import sys
import os
import numpy as np
from ensemble_hic.setup_functions import parse_config_file
# sys.argv = ['asdfasdf',
# '/scratch/scarste/ensemble_hic/nora2012/female_bothdomains_fixed_rep1_it4_20structures_241replicas/config.cfg',
# 50001,
# 1]
config_file = sys.argv[1]
n_samples = int(sys.argv[2])
n_cont = int(sys.argv[3])
settings = parse_config_file(config_file)
output_folder = settings['general']['output_folder']
cont_folder = output_folder + 'init_continue{}/'.format(n_cont)
if not os.path.exists(cont_folder):
os.makedirs(cont_folder)
samples_folder = output_folder + 'samples/'
n_replicas = int(settings['replica']['n_replicas'])
fname = samples_folder + 'samples_replica{}_{}-{}.pickle'
dump_interval = int(settings['replica']['samples_dump_interval'])
## determine offset
offset = 0
while True:
if os.path.exists(fname.format(1, offset, offset + dump_interval)):
offset += dump_interval
else:
break
settings['replica'].update(offset=offset)
if not os.path.exists(cont_folder):
os.makedirs(cont_folder)
## assemble and write start states
start_states = [np.load(fname.format(i, offset - dump_interval, offset))[-1]
for i in range(1, n_replicas + 1)]
start_structures = [x.variables['structures'] for x in start_states]
start_norms = [x.variables['norm'] for x in start_states]
np.save(cont_folder + 'init_states.npy', np.array(start_structures))
np.save(cont_folder + 'init_norms.npy', np.array(start_norms))
if n_cont == 1:
mcmc_stats = np.loadtxt(output_folder + 'statistics/mcmc_stats.txt')
else:
mcmc_stats = np.loadtxt(output_folder + 'init_continue{}/statistics/mcmc_stats.txt'.format(n_cont - 1))
## assemble and write HMC timesteps
timesteps = mcmc_stats[-1,2::2]
np.save(cont_folder + 'timesteps.npy', timesteps)
## write continue config file
settings['structures_hmc']['timestep'] = cont_folder + 'timesteps.npy'
settings['structures_hmc']['initial_state'] = cont_folder + 'init_states.npy'
settings['replica']['n_samples'] = n_samples
settings['replica'].update(stats_folder=cont_folder + 'statistics/')
settings['general'].update(cont_folder=cont_folder)
with open(cont_folder + 'cont_config.cfg', 'w') as opf:
for section_name, params in settings.iteritems():
opf.write('[{}]\n'.format(section_name))
for k, v in params.iteritems():
opf.write('{} = {}\n'.format(k, v))
opf.write('\n')
import datetime
datestr = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
os.system('cp mystart_continue.sh tmp/{}.sh'.format(datestr))
os.system("sed -i 's/config_PH/{}/g' tmp/{}.sh".format(cont_folder.replace('/', '\\/') + 'cont_config.cfg',
datestr))
os.system("sed -i 's/n_replicas_PH/{}/g' tmp/{}.sh".format(int(settings['replica']['n_replicas']) + 1,
datestr))
| 38.618421
| 125
| 0.683135
|
f9c806f4f8523b2339f8c47c25fb88f6870619f2
| 1,229
|
py
|
Python
|
jasmin/protocols/cli/configs.py
|
2naive/jasmin
|
7609a50ded4ebf5873b607cb4a500be4b1be6be1
|
[
"Apache-2.0"
] | 2
|
2020-05-14T18:27:01.000Z
|
2021-03-21T17:26:19.000Z
|
jasmin/protocols/cli/configs.py
|
2naive/jasmin
|
7609a50ded4ebf5873b607cb4a500be4b1be6be1
|
[
"Apache-2.0"
] | null | null | null |
jasmin/protocols/cli/configs.py
|
2naive/jasmin
|
7609a50ded4ebf5873b607cb4a500be4b1be6be1
|
[
"Apache-2.0"
] | 1
|
2020-11-24T06:48:22.000Z
|
2020-11-24T06:48:22.000Z
|
"""
Config file handler for 'jcli' section in jasmin.cfg
"""
from jasmin.config.tools import ConfigFile
import os
import logging
# Related to travis-ci builds
ROOT_PATH = os.getenv('ROOT_PATH', '/')
class JCliConfig(ConfigFile):
"""Config handler for 'jcli' section"""
def __init__(self, config_file=None):
ConfigFile.__init__(self, config_file)
self.bind = self._get('jcli', 'bind', '127.0.0.1')
self.port = self._getint('jcli', 'port', 8990)
self.authentication = self._getbool('jcli', 'authentication', True)
self.admin_username = self._get('jcli', 'admin_username', 'jcliadmin')
self.admin_password = self._get(
'jcli', 'admin_password', '79e9b0aa3f3e7c53e916f7ac47439bcb').decode('hex')
self.log_level = logging.getLevelName(self._get('jcli', 'log_level', 'INFO'))
self.log_file = self._get('jcli', 'log_file', '%s/var/log/jasmin/jcli.log' % ROOT_PATH)
self.log_rotate = self._get('jcli', 'log_rotate', 'W6')
self.log_format = self._get(
'jcli', 'log_format', '%(asctime)s %(levelname)-8s %(process)d %(message)s')
self.log_date_format = self._get('jcli', 'log_date_format', '%Y-%m-%d %H:%M:%S')
| 37.242424
| 95
| 0.646054
|
2096307ffa05d31d2fce8c9cf95b33cf029bf541
| 831
|
py
|
Python
|
downloader/upl.py
|
andre487/downloader487
|
687c0177019d27990b7b29a2b98af360558e8ca4
|
[
"MIT"
] | null | null | null |
downloader/upl.py
|
andre487/downloader487
|
687c0177019d27990b7b29a2b98af360558e8ca4
|
[
"MIT"
] | null | null | null |
downloader/upl.py
|
andre487/downloader487
|
687c0177019d27990b7b29a2b98af360558e8ca4
|
[
"MIT"
] | null | null | null |
import logging
import os
from typing import Optional
import boto3
class Uploader:
def __init__(
self, s3_endpoint: str, s3_region: str, s3_bucket: str,
s3_access: Optional[str], s3_secret: Optional[str],
) -> None:
self._s3_bucket = s3_bucket
self._client = boto3.client(
service_name='s3',
endpoint_url=s3_endpoint,
region_name=s3_region,
aws_access_key_id=s3_access,
aws_secret_access_key=s3_secret,
)
def upload(self, file_path: str) -> bool:
if not os.path.exists(file_path):
logging.error(f'File not found: {file_path}')
return False
s3_key = os.path.basename(file_path)
self._client.upload_file(file_path, self._s3_bucket, s3_key)
return True
| 26.806452
| 68
| 0.618532
|
b39eb5c43bf8307e0fdb5ffb72a309bc5335c837
| 7,366
|
py
|
Python
|
tensorpack/tfutils/varmanip.py
|
Sangyeob-Kim/tensorpack_rev
|
bf4020892edc123a09e08de784da0448464529b2
|
[
"Apache-2.0"
] | 5
|
2018-05-04T02:04:15.000Z
|
2020-04-02T05:38:48.000Z
|
tensorpack/tfutils/varmanip.py
|
Sangyeob-Kim/tensorpack_rev
|
bf4020892edc123a09e08de784da0448464529b2
|
[
"Apache-2.0"
] | null | null | null |
tensorpack/tfutils/varmanip.py
|
Sangyeob-Kim/tensorpack_rev
|
bf4020892edc123a09e08de784da0448464529b2
|
[
"Apache-2.0"
] | 2
|
2018-04-23T13:43:10.000Z
|
2019-10-30T09:56:54.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: varmanip.py
import six
import os
import pprint
import tensorflow as tf
import numpy as np
from ..utils.develop import deprecated
from ..utils import logger
from .common import get_op_tensor_name
__all__ = ['SessionUpdate', 'dump_session_params', 'dump_chkpt_vars',
'load_chkpt_vars', 'get_checkpoint_path']
def get_savename_from_varname(
varname, varname_prefix=None,
savename_prefix=None):
"""
Args:
varname(str): a variable name in the graph
varname_prefix(str): an optional prefix that may need to be removed in varname
savename_prefix(str): an optional prefix to append to all savename
Returns:
str: the name used to save the variable
"""
name = varname
if varname_prefix is not None \
and name.startswith(varname_prefix):
name = name[len(varname_prefix) + 1:]
if savename_prefix is not None:
name = savename_prefix + '/' + name
return name
class SessionUpdate(object):
""" Update the variables in a session """
def __init__(self, sess, vars_to_update):
"""
Args:
sess (tf.Session): a session object
vars_to_update: a collection of variables to update
"""
self.sess = sess
self.name_map = {v.name: v for v in vars_to_update}
@staticmethod
def load_value_to_var(var, val, strict=False):
"""
Call `var.load(val)` with the default session.
Args:
var (tf.Variable):
strict (bool): Behave less strict if set to False.
"""
if strict:
var.load(val)
return
name = var.op.name
# check incompatible shape
varshape = tuple(var.get_shape().as_list())
if varshape != val.shape:
# TODO only allow reshape when shape different by empty axis
assert np.prod(varshape) == np.prod(val.shape), \
"{}: {}!={}".format(name, varshape, val.shape)
logger.warn("Variable {} is reshaped {}->{} during assigning".format(
name, val.shape, varshape))
val = val.reshape(varshape)
# fix some common type incompatibility problems, but not all
def upcast(vartype, valtype):
# allow up-casting
if vartype == tf.float64 and valtype == np.float32:
return np.float64
if vartype in [tf.int64, tf.int32] and valtype in [np.int32, np.int16, np.int8]:
return np.int64 if vartype == tf.int64 else np.int32
return None
if hasattr(val, 'dtype'):
vartype = var.value().dtype
if vartype != val.dtype:
msg = "Variable {} has dtype {} but was given a value of dtype {}.".format(name, vartype, val.dtype)
newtype = upcast(var.dtype, val.dtype)
if newtype is not None:
val = newtype(val)
logger.warn(msg + " Load it after casting!")
else:
assert vartype == val.dtype, msg
try:
var.load(val)
except tf.errors.InvalidArgumentError:
logger.exc("Cannot load this value to the variable {}".format(name))
def update(self, prms):
"""
Args:
prms(dict): dict of {variable name: value}
Any name in prms must be in the graph and in vars_to_update.
"""
with self.sess.as_default():
for name, value in six.iteritems(prms):
assert name in self.name_map
v = self.name_map[name]
SessionUpdate.load_value_to_var(v, value)
def dump_session_params(path):
"""
Dump value of all TRAINABLE + MODEL variables to a dict, and save as
npz format (loadable by :class:`DictRestore`).
Args:
path(str): the file name to save the parameters. Must ends with npz.
"""
var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
var.extend(tf.get_collection(tf.GraphKeys.MODEL_VARIABLES))
# TODO dedup
assert len(set(var)) == len(var), "TRAINABLE and MODEL variables have duplication!"
gvars = set([k.name for k in tf.global_variables()])
var = [v for v in var if v.name in gvars]
result = {}
for v in var:
result[v.name] = v.eval()
logger.info("Variables to save to {}:".format(path))
keys = sorted(list(result.keys()))
logger.info(pprint.pformat(keys))
if path.endswith('.npy'):
np.save(path, result)
elif path.endswith('.npz'):
np.savez_compressed(path, **result)
else:
raise ValueError("Don't know which format to use for {}".format(path))
def get_checkpoint_path(model_path):
"""
Work around TF problems in checkpoint path handling.
Args:
model_path: a user-input path
Returns:
str: the argument that can be passed to NewCheckpointReader
"""
if os.path.basename(model_path) == model_path:
model_path = os.path.join('.', model_path) # avoid #4921 and #6142
if os.path.basename(model_path) == 'checkpoint':
assert tf.gfile.Exists(model_path), model_path
model_path = tf.train.latest_checkpoint(os.path.dirname(model_path))
# to be consistent with either v1 or v2
# fix paths if provided a wrong one
new_path = model_path
if '00000-of-00001' in model_path:
new_path = model_path.split('.data')[0]
elif model_path.endswith('.index'):
new_path = model_path.split('.index')[0]
if new_path != model_path:
logger.warn(
"Checkpoint path {} is auto-corrected to {}.".format(model_path, new_path))
model_path = new_path
assert tf.gfile.Exists(model_path) or tf.gfile.Exists(model_path + '.index'), model_path
return model_path
def load_chkpt_vars(model_path):
""" Load all variables from a checkpoint to a dict.
Args:
model_path(str): path to a checkpoint.
Returns:
dict: a name:value dict
"""
model_path = get_checkpoint_path(model_path)
reader = tf.train.NewCheckpointReader(model_path)
var_names = reader.get_variable_to_shape_map().keys()
result = {}
for n in var_names:
result[n] = reader.get_tensor(n)
return result
@deprecated("Renamed to 'load_chkpt_vars!'", "2018-04-20")
def dump_chkpt_vars(model_path):
return load_chkpt_vars(model_path)
def is_training_name(name):
"""
**Guess** if this variable is only used in training.
Only used internally to avoid too many logging. Do not use it.
"""
# TODO: maybe simply check against TRAINABLE_VARIABLES and MODEL_VARIABLES?
# TODO or use get_slot_names()
name = get_op_tensor_name(name)[0]
if name.endswith('/Adam') or name.endswith('/Adam_1'):
return True
if name.endswith('/Momentum'):
return True
if name.endswith('/Adadelta') or name.endswith('/Adadelta_1'):
return True
if name.endswith('/RMSProp') or name.endswith('/RMSProp_1'):
return True
if name.endswith('/Adagrad'):
return True
if name.startswith('EMA/'): # all the moving average summaries
return True
if name.startswith('AccumGrad') or name.endswith('/AccumGrad'):
return True
return False
| 34.101852
| 116
| 0.618246
|
2395f20a3325cd420becdc676dd60e29374c97ac
| 7,415
|
py
|
Python
|
note/meiduo34/mall/apps/users/views.py
|
gaosong666/taobao
|
cec3be71376fb94dc38553360253b70e88855594
|
[
"MIT"
] | null | null | null |
note/meiduo34/mall/apps/users/views.py
|
gaosong666/taobao
|
cec3be71376fb94dc38553360253b70e88855594
|
[
"MIT"
] | null | null | null |
note/meiduo34/mall/apps/users/views.py
|
gaosong666/taobao
|
cec3be71376fb94dc38553360253b70e88855594
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
from django_redis import get_redis_connection
from rest_framework import status
from rest_framework.generics import CreateAPIView, RetrieveAPIView, UpdateAPIView, GenericAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from rest_framework_jwt.views import ObtainJSONWebToken
from areas.serializers import AddressSerializer, AddressTitleSerializer
from carts.utils import merge_cart_cookie_to_redis
from goods.models import SKU
from goods.serializers import SKUSerializer
from users.models import User
from users.serialziers import RegisterCreateSerializer, UserDetailSerializer, EmailSerializer, \
AddUserBrowsingHistorySerializer
from django.shortcuts import render
from rest_framework import mixins
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
# from .serializers import EmailSerializer
class RegisterUsernameCountAPIView(APIView):
"""
获取用户名的个数
GET: /users/usernames/(?P<username>\w{5,20})/count/
"""
def get(self,request,username):
#通过模型查询,获取用户名个数
count = User.objects.filter(username=username).count()
#组织数据
context = {
'count':count,
'username':username
}
return Response(context)
class RegisterPhoneCountAPIView(APIView):
"""
查询手机号的个数
GET: /users/phones/(?P<mobile>1[345789]\d{9})/count/
"""
def get(self,request,mobile):
#通过模型查询获取手机号个数
count = User.objects.filter(mobile=mobile).count()
#组织数据
context = {
'count':count,
'phone':mobile
}
return Response(context)
class RegisterCreateView(APIView):
"""
用户注册
POST /users/
用户注册我们需要对数据进行校验,同时需要数据入库
"""
# serializer_class = RegisterCreateSerializer
def post(self, reqeust):
# 1. 接收数据
data = reqeust.data
# 2. 校验数据
serializer = RegisterCreateSerializer(data=data)
serializer.is_valid(raise_exception=True)
# 3. 数据入库
serializer.save()
# 4. 返回相应
return Response(serializer.data)
class UserDetailView(RetrieveAPIView):
"""
获取登录用户的信息
GET /users/
既然是登录用户,我们就要用到权限管理
在类视图对象中也保存了请求对象request
request对象的user属性是通过认证检验之后的请求用户对象
"""
permission_classes = [IsAuthenticated]
serializer_class = UserDetailSerializer
def get_object(self):
return self.request.user
class EmailView(UpdateAPIView):
"""
保存邮箱
PUT /users/emails/
"""
permission_classes = [IsAuthenticated]
serializer_class = EmailSerializer
def get_object(self):
return self.request.user
class VerificationEmailView(APIView):
"""
验证激活邮箱
GET /users/emails/verification/?token=xxxx
思路:
获取token,并判断
获取 token中的id
查询用户,并判断是否存在
修改状态
返回响应
"""
def get(self,request):
# 获取token, 并判断
token = request.query_params.get('token')
if not token:
return Response({'message':'缺少token'},status=status.HTTP_400_BAD_REQUEST)
# 获取token中的id,email
# 查询用户, 并判断是否存在
user = User.check_verify_email_token(token)
if user is None:
return Response({'message':'链接无效'},status=status.HTTP_400_BAD_REQUEST)
else:
# 修改状态
user.email_active = True
user.save()
# 返回响应
return Response({'message':'ok'})
class AddressViewSet(mixins.ListModelMixin,mixins.CreateModelMixin,mixins.UpdateModelMixin,GenericViewSet):
"""
用户地址新增与修改
list GET: /users/addresses/
create POST: /users/addresses/
destroy DELETE: /users/addresses/
action PUT: /users/addresses/pk/status/
action PUT: /users/addresses/pk/title/
"""
#制定序列化器
serializer_class = AddressSerializer
#添加用户权限
permission_classes = [IsAuthenticated]
#由于用户的地址有存在删除的状态,所以我们需要对数据进行筛选
def get_queryset(self):
return self.request.user.addresses.filter(is_deleted=False)
def create(self, request, *args, **kwargs):
"""
保存用户地址数据
"""
count = request.user.addresses.count()
if count >= 20:
return Response({'message':'保存地址数量已经达到上限'},status=status.HTTP_400_BAD_REQUEST)
return super().create(request,*args,**kwargs)
def list(self, request, *args, **kwargs):
"""
获取用户地址列表
"""
# 获取所有地址
queryset = self.get_queryset()
# 创建序列化器
serializer = self.get_serializer(queryset, many=True)
user = self.request.user
# 响应
return Response({
'user_id': user.id,
'default_address_id': user.default_address_id,
'limit': 20,
'addresses': serializer.data,
})
def destroy(self, request, *args, **kwargs):
"""
处理删除
"""
address = self.get_object()
# 进行逻辑删除
address.is_deleted = True
address.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(methods=['put'], detail=True)
def title(self, request, pk=None, address_id=None):
"""
修改标题
"""
address = self.get_object()
serializer = AddressTitleSerializer(instance=address, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
@action(methods=['put'], detail=True)
def status(self, request, pk=None, address_id=None):
"""
设置默认地址
"""
address = self.get_object()
request.user.default_address = address
request.user.save()
return Response({'message': 'OK'}, status=status.HTTP_200_OK)
class UserBrowsingHistoryView(mixins.CreateModelMixin, GenericAPIView):
"""
用户浏览历史记录
POST /users/browerhistories/
GET /users/browerhistories/
数据只需要保存到redis中
"""
serializer_class = AddUserBrowsingHistorySerializer
permission_classes = [IsAuthenticated]
def post(self, request):
"""
保存
"""
return self.create(request)
def get(self, request):
"""获取"""
# 获取用户信息
user_id = request.user.id
# 连接redis
redis_conn = get_redis_connection('history')
# 获取数据
history_sku_ids = redis_conn.lrange('history_%s' % user_id, 0, 5)
skus = []
for sku_id in history_sku_ids:
sku = SKU.objects.get(pk=sku_id)
skus.append(sku)
# 序列化
serializer = SKUSerializer(skus, many=True)
return Response(serializer.data)
class UserAuthorizationView(ObtainJSONWebToken):
def post(self, request):
# 调用jwt扩展的方法,对用户登录的数据进行验证
response = super().post(request)
# 如果用户登录成功,进行购物车数据合并
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
# 表示用户登录成功
user = serializer.validated_data.get("user")
# 合并购物车
#merge_cart_cookie_to_redis(request, user, response)
response = merge_cart_cookie_to_redis(request, user, response)
return response
| 27.062044
| 107
| 0.646527
|
0f8f84bc024db64d1fc9458bbb2d622360da4356
| 33,104
|
py
|
Python
|
lcnn/data/UniversalLoader.py
|
VlachosGroup/lcnn
|
90bec040296b8faa4c28230cbc440df8185da715
|
[
"MIT"
] | 8
|
2020-03-27T22:34:06.000Z
|
2020-11-20T13:49:09.000Z
|
lcnn/data/UniversalLoader.py
|
VlachosGroup/lcnn
|
90bec040296b8faa4c28230cbc440df8185da715
|
[
"MIT"
] | 4
|
2019-08-01T04:13:58.000Z
|
2021-09-16T04:52:36.000Z
|
lcnn/data/UniversalLoader.py
|
VlachosGroup/lcnn
|
90bec040296b8faa4c28230cbc440df8185da715
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) Vlachos Group, Jung Group
# GNU v3.0 license
__author__ = 'Geun Ho Gu'
__copyright__ = "Copyright 2019, Vlachos Group, Jung Group"
__version__ = "1.0"
__maintainer__ = "Geun Ho Gu"
__email__ = "ghgu@kaist.ac.kr"
__date__ = "July 31, 2019"
import numpy as np
import json
import os
from collections import defaultdict
import logging
from time import time
from datetime import datetime
from pymatgen import Element, Structure, Molecule, Lattice
from pymatgen.symmetry.analyzer import PointGroupAnalyzer
import networkx as nx
import networkx.algorithms.isomorphism as iso
from scipy.spatial.distance import cdist,pdist,squareform
from .Data import DataLoader
__all__ = [
'UniversalLoader',
'UniversalLoaderInputWriter'
]
# TQDM substitute for stdout
class Progress(object):
def __init__(self, iterator , ndata, step = 100):
logging.getLogger().setLevel(logging.INFO)
self.iter = iterator.__iter__()
self.t = time()
self.ndata = ndata
self.step = step
self.i = 0
s = '%s 0 / %d'%(datetime.now().strftime("[%H:%M:%S]"),self.ndata)
logging.info(s)
def __iter__(self):
return self
def __next__(self):
self.i += 1
if self.i%self.step == 0:
s = '%s %d / %d | %.2f/%d sec/data | ~%.2f sec left'%(datetime.now().strftime("[%H:%M:%S]"),self.i,self.ndata,\
(time()-self.t)/self.i*self.step,self.step,(self.ndata-self.i)/self.i*(time()-self.t))
logging.info(s)
return next(self.iter)
def InputReader(path):
"""Read Input Files
The input format for primitive cell is:
[comment]
[ax][ay][az][pbc]
[bx][by][bz][pbc]
[cx][cy][cz][pbc]
[number of spectator site type][number of active site type]
[os1][os2][os3]...
[number sites]
[site1a][site1b][site1c][site type]
[site2a][site2b][site2c][site type]
...
[number of data]
[datum 1 name]
...
- ax,ay, ... are cell basis vector
- pbc is either T or F indication of the periodic boundary condition
- os# is the name of the possible occupation state (interpretted as string)
- site1a,site1b,site1c are the scaled coordinates of site 1
- site type can be either S1, S2, ... or A1, A2,... indicating spectator
site and itx index and active site and its index respectively.
Example:
#Primitive Cell
2.81859800e+00 0.00000000e+00 0.00000000e+00 T
-1.40929900e+00 2.44097800e+00 0.00000000e+00 T
0.00000000e+00 0.00000000e+00 2.55082550e+01 T
1 1
-1 0 1
6
0.00000000e+00 0.00000000e+00 9.02210000e-02 S1
6.66666666e-01 3.33333333e-01 1.80442000e-01 S1
3.33333333e-01 6.66666666e-01 2.69674534e-01 S1
0.00000000e+00 0.00000000e+00 3.58978557e-01 S1
6.66666666e-01 3.33333333e-01 4.49958662e-01 S1
3.33333333e-01 6.66666666e-01 5.01129144e-01 A1
653
structure000
structure001
...
The input format for a data point is similar:
[property value]
[ax][ay][az]
[bx][by][bz]
[cx][cy][cz]
[number sites]
[site1a][site1b][site1c][site type][occupation state if active site]
[site2a][site2b][site2c][site type][occupation state if active site]
...
- property value indicates the trained value. It must start with #y=...
Example:
#y=-1.209352
2.81859800e+00 0.00000000e+00 0.00000000e+00
-1.40929900e+00 2.44097800e+00 0.00000000e+00
0.00000000e+00 0.00000000e+00 2.55082550e+01
6
0.000000000000 0.000000000000 0.090220999986 S1
0.500000499894 0.622008360788 0.180442000011 S1
0.999999500106 0.666666711253 0.270892474701 S1
0.000000000000 0.000000000000 0.361755713893 S1
0.500000499894 0.622008360788 0.454395429618 S1
0.000000000000 0.666667212896 0.502346789304 A1 1
Parameters
----------
path : input file path
Returns
-------
list of local_env : list of local_env class
"""
with open(path) as f:
s = f.readlines()
s = [line.rstrip('\n') for line in s]
nl = 0
# read comment
if '#y=' in s[nl]:
y = float(s[nl][3:])
datum = True
else:
y = None
datum = False
nl += 1
# load cell and pbc
cell = np.zeros((3,3))
pbc = np.array([True,True,True])
for i in range(3):
t = s[nl].split()
cell[i,:] = [float(i) for i in t[0:3]]
if not datum and t[3] == 'F':
pbc[i] = False
nl += 1
# read sites if primitive
if not datum:
t = s[nl].split()
ns = int(t[0])
na = int(t[1])
nl += 1
aos = s[nl].split()
nl += 1
# read positions
nS = int(s[nl])
nl += 1
coord = np.zeros((nS,3))
st = []
oss = []
for i in range(nS):
t = s[nl].split()
coord[i,:] = [float(i) for i in t[0:3]]
st.append(t[3])
if datum and len(t) == 5:
oss.append(t[4])
nl+=1
# read data name
if not datum:
nd = int(s[nl])
nl += 1
datanames = []
for i in range(nd):
datanames.append(s[nl])
nl += 1
if datum:
return y, cell, coord, st, oss
else:
return cell, pbc, coord, st, ns, na, aos, datanames
def UniversalLoaderInputWriter(path,y,cell,coord,st,oss):
"""Writes datum into file.
This can be used to print out input format of the datum you have.
parameters
----------
path : string. path to file for writing.
y : float. target property value
cell : 3 x 3. list of list of float. cell basis vectors
coord : ns x 3. list of list of float. scaled positions of each site.
ns is the number of sites.
st : ns. list of string. site type for each site.
oss : nsa. list of string. occupancy of each active site. In the order
of appearance in coord. nsa is the number of active site.
"""
s = '#y=%e\n'%y
for v in cell:
s += '%15.8e %15.8e %15.8e\n'%(v[0],v[1],v[2])
s+= str(len(st))+'\n'
n =0
for xyz,ss in zip(coord,st):
if ss == 'S1':
s += '%15.12f %15.12f %15.12f %s\n'%(xyz[0],xyz[1],xyz[2],ss)
else:
s += '%15.12f %15.12f %15.12f %s %s\n'%(xyz[0],xyz[1],xyz[2],ss,oss[n])
n +=1
with open(path,'w') as f:
f.write(s)
class SiteEnvironment(object):
def __init__(self,pos,sitetypes,env2config,permutations,cutoff,\
Grtol=0.0,Gatol=0.01,rtol = 0.01,atol=0.0, tol=0.01,grtol=0.01):
""" Initialize site environment
This class contains local site enrivonment information. This is used
to find neighborlist in the datum (see GetMapping).
Parameters
----------
pos : n x 3 list or numpy array of (non-scaled) positions. n is the
number of atom.
sitetypes : n list of string. String must be S or A followed by a
number. S indicates a spectator sites and A indicates a active
sites.
permutations : p x n list of list of integer. p is the permutation
index and n is the number of sites.
cutoff : float. cutoff used for pooling neighbors. for aesthetics only
Grtol : relative tolerance in distance for forming an edge in graph
Gatol : absolute tolerance in distance for forming an edge in graph
rtol : relative tolerance in rmsd in distance for graph matching
atol : absolute tolerance in rmsd in distance for graph matching
tol : maximum tolerance of position RMSD to decide whether two
environment are the same
grtol : tolerance for deciding symmetric nodes
"""
self.pos = pos
self.sitetypes = sitetypes
self.activesiteidx = [i for i,s in enumerate(self.sitetypes) if 'A' in s]
self.formula = defaultdict(int)
for s in sitetypes:
self.formula[s] += 1
self.permutations = permutations
self.env2config = env2config
self.cutoff = cutoff
# Set up site environment matcher
self.tol = tol
# Graphical option
self.Grtol = Grtol
self.Gatol = Gatol
#tolerance for grouping nodes
self.grtol =1e-3
# determine minimum distance between sitetypes.
# This is used to determine the existence of an edge
dists = squareform(pdist(pos))
mindists = defaultdict(list)
for i,row in enumerate(dists):
row_dists = defaultdict(list)
for j in range(0,len(sitetypes)):
if i == j:
continue
# Sort by bond
row_dists[frozenset((sitetypes[i],sitetypes[j]))].append(dists[i,j])
for pair in row_dists:
mindists[pair].append(np.min(row_dists[pair]))
# You want to maximize this in order to make sure every node gets an edge
self.mindists = {}
for pair in mindists:
self.mindists[pair] = np.max(mindists[pair])
# construct graph
self.G = self._ConstructGraph(pos,sitetypes)
# matcher options
self._nm = iso.categorical_node_match('n','')
self._em = iso.numerical_edge_match('d',0,rtol,0)
def _ConstructGraph(self,pos,sitetypes):
"""Returns local environment graph using networkx and
tolerance specified.
parameters
----------
pos: ns x 3. coordinates of positions. ns is the number of sites.
sitetypes: ns. sitetype for each site
return
------
networkx graph used for matching site positions in
datum.
"""
# construct graph
G = nx.Graph()
dists = cdist([[0,0,0]],pos - np.mean(pos,0))[0]
sdists = np.sort(dists)
#https://stackoverflow.com/questions/37847053/uniquify-an-array-list-with-a-tolerance-in-python-uniquetol-equivalent
uniquedists = sdists[~(np.triu(np.abs(sdists[:,None]-sdists)<=self.grtol,1)).any(0)]
orderfromcenter = np.digitize(dists,uniquedists)
# Add nodes
for i,o in enumerate(orderfromcenter):
G.add_node(i,n=str(o)+sitetypes[i])
# Add edge. distance is edge attribute
dists = pdist(pos); n=0
for i in range(len(sitetypes)):
for j in range(i+1,len(sitetypes)):
if dists[n] < self.mindists[frozenset((sitetypes[i],sitetypes[j]))] or\
(abs(self.mindists[frozenset((sitetypes[i],sitetypes[j]))] - dists[n]) <= self.Gatol + self.Grtol * abs(dists[n])):
G.add_edge(i,j,d=dists[n])
n+=1
return G
def __repr__(self):
s = '<' + self.sitetypes[0]+\
'|%i active neighbors'%(len([s for s in self.sitetypes if 'A' in s])-1)+\
'|%i spectator neighbors'%len([s for s in self.sitetypes if 'S' in s])+\
'|%4.2f Ang Cutoff'%self.cutoff + '| %i permutations>'%len(self.permutations)
return s
def __eq__(self,o):
"""Local environment comparison is done by comparing represented site
"""
if not isinstance(o,SiteEnvironment):
raise ValueError
return self.sitetypes[0] == o.sitetypes[0]
def __ne__(self,o):
"""Local environment comparison is done by comparing represented site
"""
if isinstance(o,SiteEnvironment):
raise ValueError
return not self.__eq__(o)
def GetMapping(self,env,path=None):
"""Returns mapping of sites from input to this object
Pymatgen molecule_matcher does not work unfortunately as it needs to be
a reasonably physical molecule.
Here, the graph is constructed by connecting the nearest neighbor, and
isomorphism is performed to find matches, then kabsch algorithm is
performed to make sure it is a match. NetworkX is used for portability.
Parameters
----------
env : dictionary that contains information of local environment of a
site in datum. See _GetSiteEnvironments defintion in the class
SiteEnvironments for what this variable should be.
Returns
-------
dict : atom mapping. None if there is no mapping
"""
# construct graph
G = self._ConstructGraph(env['pos'],env['sitetypes'])
if len(self.G.nodes) != len(G.nodes):
s = 'Number of nodes is not equal.\n'
raise ValueError(s)
elif len(self.G.edges) != len(G.edges):
print(len(self.G.edges),len(G.edges))
s = 'Number of edges is not equal.\n'
s += "- Is the data point's cell a redefined lattice of primitive cell?\n"
s += '- If relaxed structure is used, you may want to check structure or increase Gatol\n'
if path:
s += path
raise ValueError(s)
GM = iso.GraphMatcher(self.G,G,self._nm,self._em)
######################## Most Time Consuming Part #####################
ams = list(GM.isomorphisms_iter())
# Perhaps parallelize it?
######################## Most Time Consuming Part #####################
if not ams:
s = 'No isomorphism found.\n'
s += "- Is the data point's cell a redefined lattice of primitive cell?\n"
s += '- If relaxed structure is used, you may want to check structure or increase rtol\n'
if path:
s += path
raise ValueError(s)
rmsd = []
for am in ams: #Loop over isomorphism
# reconstruct graph after alinging point order
xyz = np.zeros((len(self.pos),3))
for i in am:
xyz[i,:] = env['pos'][am[i],:]
R = self._kabsch(self.pos,xyz)
#RMSD
rmsd.append(np.sqrt(np.mean(np.linalg.norm(np.dot(self.pos,R)-xyz,axis=1)**2)))
mini = np.argmin(rmsd)
minrmsd = rmsd[mini]
if minrmsd < self.tol:
return ams[mini]
else:
s = 'No isomorphism found.\n'
s += '-Consider increasing neighbor finding tolerance'
raise ValueError(s)
def _kabsch(self, P, Q):
"""Returns rotation matrix to align coordinates using
Kabsch algorithm.
"""
C = np.dot(np.transpose(P), Q)
V, S, W = np.linalg.svd(C)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
R = np.dot(V, W)
return R
class SiteEnvironments(object):
def __init__(self,site_envs,ns,na,aos,eigen_tol,pbc,cutoff, dnames= None):
"""Initialize
Use Load to intialize this class.
Parameters
----------
site_envs : list of SiteEnvironment object
ns : int. number of spectator sites types
na : int. number of active sites types
aos : list of string. avilable occupational states for active sites
string should be the name of the occupancy. (consistent with the input data)
eigen_tol : tolerance for eigenanalysis of point group analysis in
pymatgen.
pbc : periodic boundary condition.
cutoff : float. Cutoff radius in angstrom for pooling sites to
construct local environment
"""
self.site_envs = site_envs
self.unique_site_types = [env.sitetypes[0] for env in self.site_envs]
self.ns = ns
self.na = na
self.aos = aos
self.eigen_tol = eigen_tol
self.pbc = pbc
self.cutoff = cutoff
self.dnames = dnames
def __repr__(self):
s = '<%i active sites'%(self.na)+'|%i spectator sites'%(self.ns) +'>'
return s
def __getitem__(self, el):
"""Returns a site environment
"""
return self.site_envs[el]
def ReadDatum(self,path,cutoff_factor = 1.1):
"""Load structure data and return neighbor information
Parameters
----------
path : path of the structure
cutoff_factor : float. this is extra buffer factor multiplied
to cutoff to ensure pooling all relevant sites.
Return
------
Y : property value
XSites : one hot encoding of the site. See DataLoader in Data.py
for detailed instruction.
neighborlist : s x n x p x i. s is the type of site index,
n is the site index, p is the permutation,
index and i is the neighbor sites index (0 being the site itself).
See DataLoader in Data.py for detailed instruction.
"""
Y, cell, coord, st, oss = InputReader(path)
# Construct one hot encoding
XSites = np.zeros((len(oss),len(self.aos)))
for i,o in enumerate(oss):
XSites[i,self.aos.index(o)] = 1
# get mapping between all site index to active site index
alltoactive = {}
n = 0
for i,s in enumerate(st):
if 'A' in s:
alltoactive[i] = n
n+=1
# Get Neighbors
## Read Data
site_envs = self._GetSiteEnvironments(coord,cell,st,self.cutoff*cutoff_factor,
self.pbc,get_permutations=False,eigen_tol=self.eigen_tol)
XNSs = [[] for _ in range(len(self.site_envs))]
for env in site_envs:
i = self.unique_site_types.index(env['sitetypes'][0])
env = self._truncate(self.site_envs[i],env)
# get map between two environment
mapping = self.site_envs[i].GetMapping(env,path)
# align input to the primitive cell (reference)
aligned_idx = [env['env2config'][mapping[i]] for i in range(len(env['env2config']))]
# apply permutations
nni_perm = np.take(aligned_idx,self.site_envs[i].permutations)
# remove spectators
nni_perm = nni_perm[:,self.site_envs[i].activesiteidx]
# map it to active sites
nni_perm = np.vectorize(alltoactive.__getitem__)(nni_perm)
XNSs[i].append(nni_perm.tolist())
return Y, XSites.tolist(), XNSs
@classmethod
def _truncate(cls,env_ref,env):
"""When cutoff_factor is used, it will pool more site than cutoff factor specifies.
This will rule out nonrelevant sites by distance.
"""
# Extract the right number of sites by distance
dists = defaultdict(list)
for i,s in enumerate(env['sitetypes']):
dists[s].append([i,env['dist'][i]])
for s in dists:
dists[s] = sorted(dists[s], key= lambda x:x[1])
siteidx = []
for s in dists:
siteidx += [i[0] for i in dists[s][:env_ref.formula[s]]]
siteidx = sorted(siteidx)
env['pos']=[env['pos'][i] for i in range(len(env['pos'])) if i in siteidx]
env['pos']=np.subtract(env['pos'],np.mean(env['pos'],0))
env['sitetypes'] = [env['sitetypes'][i] for i in range(len(env['sitetypes'])) if i in siteidx]
env['env2config'] = [env['env2config'][i] for i in siteidx]
del env['dist']
return env
@classmethod
def Load(cls,path,cutoff,eigen_tol=1e-5):
"""Load Primitive cell and return SiteEnvironments
Parameters
----------
path : input file path
cutoff : float. cutoff distance in angstrom for collecting local
environment.
eigen_tol : tolerance for eigenanalysis of point group analysis in
pymatgen.
"""
cell, pbc, coord, st, ns, na, aos, dnames = InputReader(path)
site_envs = cls._GetSiteEnvironments(coord,cell,st,cutoff,pbc,True,eigen_tol=eigen_tol)
site_envs = [SiteEnvironment(e['pos'],e['sitetypes'],e['env2config'],
e['permutations'],cutoff) for e in site_envs]
ust = [env.sitetypes[0] for env in site_envs]
usi = np.unique(ust,return_index=True)[1]
site_envs = [site_envs[i] for i in usi]
return cls(site_envs,ns,na,aos,eigen_tol,pbc,cutoff, dnames)
@classmethod
def _GetSiteEnvironments(cls,coord,cell,SiteTypes,cutoff,pbc,get_permutations=True,eigen_tol=1e-5):
"""Extract local environments from primitive cell
Parameters
----------
coord : n x 3 list or numpy array of scaled positions. n is the number
of atom.
cell : 3 x 3 list or numpy array
SiteTypes : n list of string. String must be S or A followed by a
number. S indicates a spectator sites and A indicates a active
sites.
cutoff : float. cutoff distance in angstrom for collecting local
environment.
pbc : list of boolean. Periodic boundary condition
get_permutations : boolean. Whether to find permutatated neighbor list or not.
eigen_tol : tolerance for eigenanalysis of point group analysis in
pymatgen.
Returns
------
list of local_env : list of local_env class
"""
#%% Check error
assert isinstance(coord,(list,np.ndarray))
assert isinstance(cell,(list,np.ndarray))
assert len(coord) == len(SiteTypes)
#%% Initialize
# TODO: Technically, user doesn't even have to supply site index, because
# pymatgen can be used to automatically categorize sites..
coord = np.mod(coord,1)
pbc = np.array(pbc)
#%% Map sites to other elements..
# TODO: Available pymatgne functions are very limited when DummySpecie is
# involved. This may be perhaps fixed in the future. Until then, we
# simply bypass this by mapping site to an element
# Find available atomic number to map site to it
availableAN = [i+1 for i in reversed(range(0,118))]
# Organize Symbols and record mapping
symbols = []
site_idxs = []
SiteSymMap = {} # mapping
SymSiteMap = {}
for i,SiteType in enumerate(SiteTypes):
if SiteType not in SiteSymMap:
symbol = Element.from_Z(availableAN.pop())
SiteSymMap[SiteType] = symbol
SymSiteMap[symbol] = SiteType
else:
symbol = SiteSymMap[SiteType]
symbols.append(symbol)
if 'A' in SiteType:
site_idxs.append(i)
#%% Get local environments of each site
# Find neighbors and permutations using pymatgen
lattice = Lattice(cell)
structure = Structure(lattice, symbols,coord)
neighbors = structure.get_all_neighbors(cutoff,include_index=True)
site_envs = []
for site_idx in site_idxs:
local_env_sym = [symbols[site_idx]]
local_env_xyz = [structure[site_idx].coords]
local_env_dist = [0.0]
local_env_sitemap = [site_idx]
for n in neighbors[site_idx]:
# if PBC condition is fulfilled..
c = np.around(n[0].frac_coords,10)
withinPBC = np.logical_and(0<=c,c<1)
if np.all(withinPBC[~pbc]):
local_env_xyz.append(n[0].coords)
local_env_sym.append(n[0].specie)
local_env_dist.append(n[1])
local_env_sitemap.append(n[2])
local_env_xyz = np.subtract(local_env_xyz,np.mean(local_env_xyz,0))
perm = []
if get_permutations:
finder = PointGroupAnalyzer(Molecule(local_env_sym,local_env_xyz),eigen_tolerance=eigen_tol)
pg = finder.get_pointgroup()
for i,op in enumerate(pg):
newpos = op.operate_multi(local_env_xyz)
perm.append(np.argmin(cdist(local_env_xyz,newpos),axis=1).tolist())
site_env = {'pos':local_env_xyz,'sitetypes':[SymSiteMap[s] for s in local_env_sym],
'env2config':local_env_sitemap,'permutations':perm,
'dist':local_env_dist}
site_envs.append(site_env)
return site_envs
def _chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
class UniversalLoader(DataLoader):
def __init__(self,modelpath,datapath,cutoff,split,batch_size,seed=None,eval=False):
"""Load data
See DataLoader in Data.py for detailed instruction.
Parameters
----------
modelpath : path where the model will be.
datapath : path to where input.in is in.
cutoff : float. cutoff of radius for getting local environment.Only
used down to 2 digits.
split : list of two integers. Size of Train set and Validation set.
Test set is the remaining number of set.
batch_size : size of the batch.
seed : random seed for spliting data points
"""
cutoff = np.around(cutoff,2)
if not os.path.exists(os.path.join(datapath,'data_%4.2f.json'%(cutoff))):
input_path = 'input.in'
# Load primitive cell
SEnvs = SiteEnvironments.Load(os.path.join(datapath,input_path),cutoff)
# Load datapoints
Y = []
XSites = []
XNSs = []
# parallization
try:
import pathos.multiprocessing as multiprocessing
p = multiprocessing.Pool()
except:
multiprocessing = None
inputs = [os.path.join(datapath,dpath) for dpath in SEnvs.dnames]
import random
if seed is not None: random.seed(seed)
random.shuffle(inputs)
if multiprocessing is not None:
gen = p.imap_unordered(SEnvs.ReadDatum,inputs)
else:
gen = map(SEnvs.ReadDatum,inputs)
for y, xSites, xNSs in Progress(gen,ndata = len(inputs)):
Y.append(y)
XSites.append(xSites)
XNSs.append(xNSs)
if multiprocessing is not None:
p.close()
p.join()
json.dump((Y,XSites,XNSs),open(os.path.join(datapath,'data_%4.2f.json'%(cutoff)),'w'))
else:
Y, XSites, XNSs = json.load(open(os.path.join(datapath,'data_%4.2f.json'%(cutoff)),'r'))
# randomize data set
if seed:
randidx = np.random.RandomState(seed=seed).permutation(len(XSites)).tolist()
elif eval:
randidx = list(range(len(XSites)))
else:
randidx = np.random.permutation(len(XSites)).tolist()
# split
if split:
Split = {'Train':[],'Validation':[],'Test':[]}
for i in range(len(XSites)):
if i < split[0]:
Split['Train'].append(randidx[i])
elif i > split[0] and i < split[0] + split[1] + 1:
Split['Validation'].append(randidx[i])
elif i > split[0] + split[1] and i < split[0] + split[1] + split[2] + 1:
Split['Test'].append(randidx[i])
if not os.path.exists(modelpath):
os.mkdir(modelpath)
json.dump(Split,open(os.path.join(modelpath,'split.json'),'w'))
elif os.path.exists(os.path.join(modelpath,'split.json')):
Split = json.load(open(os.path.join(modelpath,'split.json'),'r'))
else:
raise ValueError('--split argument or split data needs to be provided')
# Compute statistics of training set
if Split['Train']:
XSites_train = []
Y_train = []
for i in Split['Train']:
XSites_train.append(XSites[i])
Y_train.append(Y[i])
XSites_train = np.concatenate(XSites_train)
Y_train = np.array(Y_train)
X_mean = XSites_train.mean(axis=0)
X_std = XSites_train.std(axis=0)
Y_mean = Y_train.mean(axis=0)
Y_std = Y_train.std(axis=0)
else: # In the case of evaluation, it's stored with the model
X_mean = None
X_std = None
Y_mean = None
Y_std = None
# separate data
Data={}
for setname in Split:
batches = []
for idxs in _chunks(Split[setname],batch_size):
batch = {}
batch['Y'] = [Y[i] for i in idxs]
XSites_batch = [XSites[i] for i in idxs]
XNSs_batch = [XNSs[i] for i in idxs]
batch['N_Sites'], batch['N_Sites_per_config'], \
batch['Idx_Config'], batch['X_Sites'], batch['X_NSs'] = \
_FlattenInput(XSites_batch,XNSs_batch)
batches.append(batch)
Data[setname] = batches
# neighborlist : s x n x p x i. is the type of site index,
#n is the site index, p is the permutation,
#index and i is the neighbor sites index (0 being the site itself)
super().__init__(Data,X_mean,X_std,Y_mean,Y_std,len(XSites[0][0]),\
[len(x[0]) for x in XNSs[0]],\
[len(x[0][0]) for x in XNSs[0]])
def _FlattenInput(X_Sites,X_NSs):
"""This method transform the input data that is easy to interpret for
human to a format that is easy to process for tensorflow. Returned
values are used as some of the input in feed_dict of run function.
Parameters
----------
Y : property value
XSites : d x n x t. One hot encoding representation of a site.
d is the datum index, n is the site index, t is the site type index
X_NSs : d x s x n x p x i. neighbor site index.
s is the type of site index (central site),
p is the permutation index, and i is the neighbor sites index
(0 being the site itself)
Returns
-------
N_Sites: list of integer
each integer indicates number of sites in each configuration. Used to compute
per site formation energy
Idx_config: list of integers (in numpy array)
In the algorithm, Site layer is flattened over each data points. This
way, we avoid using padding, and having an upper limit for the
maximum number of sites. calculations are faster, too. To do this, we
need data index for each site. This vector contains that information.
X_Sites: numpy 2D matrix
Site layer flattened over data index
X_NSs: s x n x p x i numpy 4D matrix. Flattened over data
Note
--------
These returned values can be inputted into the model in feed_dict as
feed_dict={N_Sites:N_Sites,Idx_config:Idx_config,X_Sites:X_Sites,X_NSs:X_NSs}
"""
# initialize
## Convert it to numpy for easy indexing
X_Sites_PreInt = X_Sites
X_Sites = np.array([np.array(x_sites) for x_sites in X_Sites_PreInt])
X_NSs_PreInt = X_NSs
X_NSs = []
for datum in X_NSs_PreInt:
new_datum = []
for site_type in datum:
new_datum.append(np.array(site_type))
X_NSs.append(new_datum)
# number of sites for each datum
N_Sites_per_config = []
for datum in X_Sites:
N_Sites_per_config.append(len(datum))
Idx_config = []
for i in range(len(X_Sites)):
Idx_config.append(np.repeat(i,len(X_Sites[i])))
Idx_config = np.concatenate(Idx_config).reshape(-1)
Idx_config = np.expand_dims(Idx_config,axis=1)
# Flattened Sites
X_Sites = np.concatenate(X_Sites)
# Change nearest neighbor indexing for flattening
nsite_sum = 0
for i,nsite in enumerate(N_Sites_per_config):
datum = X_NSs[i]
new_datum = [sitetype + nsite_sum for sitetype in datum]
nsite_sum += nsite
X_NSs[i] = new_datum
# Flatten nearest neighbor
X_NSs_flattened = [[] for _ in range(len(X_NSs[0]))]
for datum in X_NSs:
for j,sitetype in enumerate(datum):
X_NSs_flattened[j].append(sitetype)
X_NSs_flattened = [np.concatenate(sitetype) for sitetype in X_NSs_flattened]
N_Sites = X_Sites.shape[0]
return N_Sites, N_Sites_per_config, Idx_config, X_Sites, X_NSs_flattened
| 38.718129
| 135
| 0.571532
|
97cd7a67f1fd915f5250f2338bd7b947a6687d56
| 197
|
py
|
Python
|
UsingPython/base_algorithm/leetcode/add_two_numbers.py
|
Rick00Kim/Algorithm_coding
|
c988729462f3cef78e0b02f888e0117fdefaa5d1
|
[
"MIT"
] | null | null | null |
UsingPython/base_algorithm/leetcode/add_two_numbers.py
|
Rick00Kim/Algorithm_coding
|
c988729462f3cef78e0b02f888e0117fdefaa5d1
|
[
"MIT"
] | null | null | null |
UsingPython/base_algorithm/leetcode/add_two_numbers.py
|
Rick00Kim/Algorithm_coding
|
c988729462f3cef78e0b02f888e0117fdefaa5d1
|
[
"MIT"
] | null | null | null |
from base_algorithm.abstract_algorithm import AbstractAlgorithm
class AddTwoNumbers(AbstractAlgorithm):
def solution(N):
pass
def execute(self):
return super().execute()
| 19.7
| 63
| 0.720812
|
784df256a3928012ac47bc7be62b7ad3e8c98172
| 5,086
|
py
|
Python
|
poacher/__init__.py
|
eriknyquist/poacher
|
3f8e0384acaf2d7f08f5365072218d80c240aa3f
|
[
"Apache-2.0"
] | null | null | null |
poacher/__init__.py
|
eriknyquist/poacher
|
3f8e0384acaf2d7f08f5365072218d80c240aa3f
|
[
"Apache-2.0"
] | null | null | null |
poacher/__init__.py
|
eriknyquist/poacher
|
3f8e0384acaf2d7f08f5365072218d80c240aa3f
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "0.1"
import time
import argparse
from github import Github
from github.GithubException import UnknownObjectException
DEFAULT_STEP = 64
class GithubPoacher(object):
"""
Base class containing functionality to poll github.com for new repositories.
Extend this class and override poacher.GithubPoacher.on_repo to get information
about new repositories as they are created.
"""
def __init__(self, poll_delay_seconds=2.0, github_retries=10,
github_retry_delay_seconds=2.0):
"""
:param float poll_delay_seconds: time to wait between checking for new \
repos at github.com
:param int github_retries: number of times to retry a failed github.com\
request before giving up
:param float github_retry_delay_seconds: time to wait between retrying \
a failed github.com request
"""
self.repo_id = None
self.github_retries = github_retries
self.poll_delay_seconds = poll_delay_seconds
self.github_retry_delay_seconds = github_retry_delay_seconds
def _get_new(self, last):
ret = []
retries = 0
while True:
try:
for repo in self.github.get_repos(since=last):
ret.append(repo)
except Exception as e:
print("Error getting new repos from Github: " + str(e))
if self.github_retries > 0:
if retries >= (self.github_retries - 1):
raise e
retries += 1
time.sleep(self.github_retry_delay_seconds)
else:
break
return ret
def _repo_exists(self, repoid):
try:
_ = self.github.get_repos(since=repoid)[0]
except IndexError:
return False
return True
def _bsearch(self, startid):
upper = startid
lower = startid
idset = False
step = DEFAULT_STEP
while not idset:
self.on_search_iteration(lower, upper)
if self._repo_exists(upper):
upper += step
step *= 2
else:
idset = True
while (lower + 1) < upper:
middle = int(lower + ((upper - lower) / 2.0))
if self._repo_exists(middle):
lower = middle
else:
upper = middle
self.on_search_iteration(lower, upper)
return lower
def on_search_iteration(self, lower, upper):
"""
Override this method. This method will be called each time
the search parameters are updated during the initial binary search
for the latest repository ID in GithubPoacher.main_loop.
:param int lower: lowest repository ID in search area
:param int upper: highest repository ID in search area
"""
pass
def on_lock(self, repo_id):
"""
Override this method. This method will be called when the binary search
for the latest repo ID in GithubPoacher.main_loop is complete
:param int repo_id: result of binary search (latest repository ID)
"""
pass
def on_repo(self, repo):
"""
Override this method. This method will be called by
GithubPoacher.main_loop whenever a new Github repo is created.
:param PyGithub.Repo repo: Repo object for repository (see PyGithub \
documentation)
"""
pass
def on_repos_processed(self, num):
"""
Override this method. This method will be called in each iteration of
GithubPoacher.main_loop, after GithubPoacher.on_repo has been called
for all new repos returned by a request to github.com
:param int num: the number of new repositories processed
"""
pass
def authenticate(self, username, password):
"""
Authenticate with Github
:param str username: Github username
:param str password: Github password
"""
self.github = Github(username, password)
def main_loop(self, start_id=99525181):
"""
Find the latest repository on github.com and start polling to get
new repositories as they become available. This method never returns.
:param int startid: github repository ID for a known existing \
repository on github.com to start binary search for latest \
repository
"""
if not self._repo_exists(start_id):
raise ValueError("Repo with ID %d doesn't exist yet" % start_id)
self.repo_id = newest = self._bsearch(start_id)
self.on_lock(newest)
while True:
time.sleep(self.poll_delay_seconds)
new = self._get_new(newest)
if len(new) == 0:
continue
for repo in new:
self.on_repo(repo)
self.on_repos_processed(len(new))
self.repo_id = newest = new[-1].id
| 29.569767
| 83
| 0.59595
|
02a39d7176d33853cf04915cc3751adb1d4699ae
| 727
|
py
|
Python
|
ymir/backend/src/ymir_monitor/tests/test_api.py
|
Zhang-SJ930104/ymir
|
dd6481be6f229ade4cf8fba64ef44a15357430c4
|
[
"Apache-2.0"
] | 64
|
2021-11-15T03:48:00.000Z
|
2022-03-25T07:08:46.000Z
|
ymir/backend/src/ymir_monitor/tests/test_api.py
|
Zhang-SJ930104/ymir
|
dd6481be6f229ade4cf8fba64ef44a15357430c4
|
[
"Apache-2.0"
] | 35
|
2021-11-23T04:14:35.000Z
|
2022-03-26T09:03:43.000Z
|
ymir/backend/src/ymir_monitor/tests/test_api.py
|
Aryalfrat/ymir
|
d4617ed00ef67a77ab4e1944763f608bface4be6
|
[
"Apache-2.0"
] | 57
|
2021-11-11T10:15:40.000Z
|
2022-03-29T07:27:54.000Z
|
from fastapi.testclient import TestClient
class TestReg:
def test_reg(self, client: TestClient, clear_redislite, mocker):
mocker.patch("os.path.exists", return_value=True)
data = "t0000003000003df78d31639637101 21245543 0.50 2"
mocker.patch("builtins.open", mocker.mock_open(read_data=data))
body = dict(
task_id="abcdadf",
user_id="12",
log_path_weights={
"/data/test/monitor.txtaa": 0.5,
"/data/test/m2.txtaa": 0.5
},
)
r = client.post("/api/v1/tasks", json=body)
assert r.status_code == 200
r = client.post("/api/v1/tasks", json=body)
assert r.status_code == 400
| 30.291667
| 71
| 0.581843
|
cf7001b3be2399972da1fb05e84e0d91cfb623a5
| 662
|
py
|
Python
|
SRC/equiposv1/crud/migrations/0002_document.py
|
MarvikDC/finalIW
|
793ce522cf12e06acb1ae183287c32ea3dfdb562
|
[
"MIT"
] | null | null | null |
SRC/equiposv1/crud/migrations/0002_document.py
|
MarvikDC/finalIW
|
793ce522cf12e06acb1ae183287c32ea3dfdb562
|
[
"MIT"
] | null | null | null |
SRC/equiposv1/crud/migrations/0002_document.py
|
MarvikDC/finalIW
|
793ce522cf12e06acb1ae183287c32ea3dfdb562
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.7 on 2021-11-24 20:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crud', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('uploadedFile', models.FileField(upload_to='Uploaded Files/')),
('dateTimeOfUpload', models.DateTimeField(auto_now=True)),
],
),
]
| 28.782609
| 117
| 0.583082
|
18c10ec45d4743376f3427268adc7849e5c6391c
| 68,661
|
py
|
Python
|
spyder/plugins/ipythonconsole/tests/test_ipythonconsole.py
|
TimenoLong/spyder
|
c4a71b75dd3229b2bebd606e073cf2db536f5c13
|
[
"MIT"
] | 1
|
2021-06-29T02:20:12.000Z
|
2021-06-29T02:20:12.000Z
|
spyder/plugins/ipythonconsole/tests/test_ipythonconsole.py
|
TimenoLong/spyder
|
c4a71b75dd3229b2bebd606e073cf2db536f5c13
|
[
"MIT"
] | null | null | null |
spyder/plugins/ipythonconsole/tests/test_ipythonconsole.py
|
TimenoLong/spyder
|
c4a71b75dd3229b2bebd606e073cf2db536f5c13
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for the IPython console plugin.
"""
# Standard library imports
import codecs
import os
import os.path as osp
import shutil
import sys
import tempfile
from textwrap import dedent
from unittest.mock import Mock
# Third party imports
import IPython
from IPython.core import release as ipy_release
from IPython.core.application import get_ipython_dir
from flaky import flaky
from pkg_resources import parse_version
from pygments.token import Name
import pytest
from qtpy import PYQT5
from qtpy.QtCore import Qt
from qtpy.QtWebEngineWidgets import WEBENGINE
from qtpy.QtWidgets import QMessageBox, QMainWindow
import sympy
# Local imports
from spyder.config.base import get_home_dir
from spyder.config.gui import get_color_scheme
from spyder.config.manager import CONF
from spyder.py3compat import PY2, to_text_string
from spyder.plugins.help.tests.test_plugin import check_text
from spyder.plugins.help.utils.sphinxify import CSS_PATH
from spyder.plugins.ipythonconsole.plugin import IPythonConsole
from spyder.plugins.ipythonconsole.utils.style import create_style_class
from spyder.utils.programs import get_temp_dir
from spyder.utils.conda import is_conda_env
# =============================================================================
# Constants
# =============================================================================
SHELL_TIMEOUT = 20000
TEMP_DIRECTORY = tempfile.gettempdir()
NON_ASCII_DIR = osp.join(TEMP_DIRECTORY, u'測試', u'اختبار')
NEW_DIR = 'new_workingdir'
# =============================================================================
# Utillity Functions
# =============================================================================
def get_console_font_color(syntax_style):
styles = create_style_class(syntax_style).styles
font_color = styles[Name]
return font_color
def get_console_background_color(style_sheet):
background_color = style_sheet.split('background-color:')[1]
background_color = background_color.split(';')[0]
return background_color
def get_conda_test_env(test_env_name=u'spytest-ž'):
"""Return the full prefix path of the given `test_env_name`."""
if 'envs' in sys.prefix:
root_prefix = os.path.dirname(os.path.dirname(sys.prefix))
else:
root_prefix = sys.prefix
test_env_prefix = os.path.join(root_prefix, 'envs', test_env_name)
if os.name == 'nt':
test_env_executable = os.path.join(test_env_prefix, 'python.exe')
else:
test_env_executable = os.path.join(test_env_prefix, 'bin', 'python')
return test_env_executable
# =============================================================================
# Qt Test Fixtures
# =============================================================================
@pytest.fixture
def ipyconsole(qtbot, request):
"""IPython console fixture."""
class MainWindowMock(QMainWindow):
def get_spyder_pythonpath(self):
return CONF.get('main', 'spyder_pythonpath', [])
def __getattr__(self, attr):
if attr == 'consoles_menu_actions':
return []
else:
return Mock()
# Tests assume inline backend
CONF.set('ipython_console', 'pylab/backend', 0)
# Start in a new working directory the console
use_startup_wdir = request.node.get_closest_marker('use_startup_wdir')
if use_startup_wdir:
new_wdir = osp.join(os.getcwd(), NEW_DIR)
if not osp.exists(new_wdir):
os.mkdir(new_wdir)
CONF.set('workingdir', 'console/use_fixed_directory', True)
CONF.set('workingdir', 'console/fixed_directory', new_wdir)
else:
CONF.set('workingdir', 'console/use_fixed_directory', False)
CONF.set('workingdir', 'console/fixed_directory', get_home_dir())
# Test the console with a non-ascii temp dir
non_ascii_dir = request.node.get_closest_marker('non_ascii_dir')
if non_ascii_dir:
test_dir = NON_ASCII_DIR
else:
test_dir = None
# Instruct the console to not use a stderr file
no_stderr_file = request.node.get_closest_marker('no_stderr_file')
if no_stderr_file:
test_no_stderr = True
else:
test_no_stderr = False
# Use the automatic backend if requested
auto_backend = request.node.get_closest_marker('auto_backend')
if auto_backend:
CONF.set('ipython_console', 'pylab/backend', 1)
# Start a Pylab client if requested
pylab_client = request.node.get_closest_marker('pylab_client')
is_pylab = True if pylab_client else False
# Start a Sympy client if requested
sympy_client = request.node.get_closest_marker('sympy_client')
is_sympy = True if sympy_client else False
# Start a Cython client if requested
cython_client = request.node.get_closest_marker('cython_client')
is_cython = True if cython_client else False
# Use an external interpreter if requested
external_interpreter = request.node.get_closest_marker('external_interpreter')
if external_interpreter:
CONF.set('main_interpreter', 'default', False)
CONF.set('main_interpreter', 'executable', sys.executable)
else:
CONF.set('main_interpreter', 'default', True)
CONF.set('main_interpreter', 'executable', '')
# Use the test environment interpreter if requested
test_environment_interpreter = request.node.get_closest_marker(
'test_environment_interpreter')
if test_environment_interpreter:
CONF.set('main_interpreter', 'default', False)
CONF.set('main_interpreter', 'executable', get_conda_test_env())
else:
CONF.set('main_interpreter', 'default', True)
CONF.set('main_interpreter', 'executable', '')
# Conf css_path in the Appeareance plugin
CONF.set('appearance', 'css_path', CSS_PATH)
# Create the console and a new client
window = MainWindowMock()
console = IPythonConsole(parent=window,
testing=True,
test_dir=test_dir,
test_no_stderr=test_no_stderr)
console.dockwidget = Mock()
console._toggle_view_action = Mock()
console.create_new_client(is_pylab=is_pylab,
is_sympy=is_sympy,
is_cython=is_cython)
window.setCentralWidget(console)
# Set exclamation mark to True
CONF.set('ipython_console', 'pdb_use_exclamation_mark', True)
# This segfaults on macOS
if not sys.platform == "darwin":
qtbot.addWidget(window)
window.resize(640, 480)
window.show()
yield console
# Print shell content if failed
if request.node.rep_setup.passed:
if request.node.rep_call.failed:
# Print content of shellwidget and close window
print(console.get_current_shellwidget(
)._control.toPlainText())
client = console.get_current_client()
if client.info_page != client.blank_page:
print('info_page')
print(client.info_page)
# Close
console.closing_plugin()
console.close()
window.close()
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.external_interpreter
def test_banners(ipyconsole, qtbot):
"""Test that console banners are generated correctly."""
shell = ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Long banner
text = control.toPlainText().splitlines()
py_ver = sys.version.splitlines()[0].strip()
assert py_ver in text[0] # Python version in first line
assert 'license' in text[1] # 'license' mention in second line
assert '' == text[2] # Third line is empty
assert ipy_release.version in text[3] # Fourth line is IPython
# Short banner
short_banner = shell.short_banner()
py_ver = sys.version.split(' ')[0]
expected = 'Python %s -- IPython %s' % (py_ver, ipy_release.version)
assert expected == short_banner
@flaky(max_runs=3)
@pytest.mark.parametrize(
"function,signature,documentation",
[("arange",
["start", "stop"],
["Return evenly spaced values within a given interval.<br>",
"<br>Python built-in `range` function, but returns an ndarray ..."]),
("vectorize",
["pyfunc", "otype", "signature"],
["Generalized function class.<br>",
"Define a vectorized function which takes a nested sequence ..."]),
("absolute",
["x", "/", "out"],
["Parameters<br>", "x : array_like ..."])]
)
@pytest.mark.skipif(not os.name == 'nt',
reason="Times out on macOS and fails on Linux")
def test_get_calltips(ipyconsole, qtbot, function, signature, documentation):
"""Test that calltips show the documentation."""
shell = ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Import numpy
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np')
# Write an object in the console that should generate a calltip
# and wait for the kernel to send its response.
with qtbot.waitSignal(shell.kernel_client.shell_channel.message_received):
qtbot.keyClicks(control, 'np.' + function + '(')
# Wait a little bit for the calltip to appear
qtbot.waitUntil(lambda: control.calltip_widget.isVisible())
# Assert we displayed a calltip
assert control.calltip_widget.isVisible()
# Hide the calltip to avoid focus problems on Linux
control.calltip_widget.hide()
# Check spected elements for signature and documentation
for element in signature:
assert element in control.calltip_widget.text()
for element in documentation:
assert element in control.calltip_widget.text()
@flaky(max_runs=3)
@pytest.mark.auto_backend
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_auto_backend(ipyconsole, qtbot):
"""Test that the automatic backend is working correctly."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# This is here to generate further errors
with qtbot.waitSignal(shell.executed):
shell.execute("%matplotlib qt5")
# Assert there are no errors in the console
control = ipyconsole.get_focus_widget()
assert 'NOTE' not in control.toPlainText()
assert 'Error' not in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.pylab_client
def test_pylab_client(ipyconsole, qtbot):
"""Test that the Pylab console is working correctly."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# This is here to generate further errors
with qtbot.waitSignal(shell.executed):
shell.execute("e")
# Assert there are no errors in the console
control = ipyconsole.get_focus_widget()
assert 'Error' not in control.toPlainText()
# Reset the console namespace
shell.reset_namespace()
qtbot.wait(1000)
# See that `e` is still defined from numpy after reset
with qtbot.waitSignal(shell.executed):
shell.execute("e")
# Assert there are no errors after restting the console
control = ipyconsole.get_focus_widget()
assert 'Error' not in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.sympy_client
@pytest.mark.xfail('1.0' < sympy.__version__ < '1.2',
reason="A bug with sympy 1.1.1 and IPython-Qtconsole")
def test_sympy_client(ipyconsole, qtbot):
"""Test that the SymPy console is working correctly."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# This is here to generate further errors
with qtbot.waitSignal(shell.executed):
shell.execute("x")
# Assert there are no errors in the console
control = ipyconsole.get_focus_widget()
assert 'NameError' not in control.toPlainText()
# Reset the console namespace
shell.reset_namespace()
qtbot.wait(1000)
# See that `e` is still defined from sympy after reset
with qtbot.waitSignal(shell.executed):
shell.execute("x")
# Assert there are no errors after restting the console
control = ipyconsole.get_focus_widget()
assert 'NameError' not in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.cython_client
@pytest.mark.skipif(
(not sys.platform.startswith('linux') or
parse_version(ipy_release.version) == parse_version('7.11.0')),
reason="It only works reliably on Linux and fails for IPython 7.11.0")
def test_cython_client(ipyconsole, qtbot):
"""Test that the Cython console is working correctly."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# This is here to generate further errors
with qtbot.waitSignal(shell.executed):
shell.execute("%%cython\n"
"cdef int ctest(int x, int y):\n"
" return x + y")
# Assert there are no errors in the console
control = ipyconsole.get_focus_widget()
assert 'Error' not in control.toPlainText()
# Reset the console namespace
shell.reset_namespace()
qtbot.wait(1000)
# See that cython is still enabled after reset
with qtbot.waitSignal(shell.executed):
shell.execute("%%cython\n"
"cdef int ctest(int x, int y):\n"
" return x + y")
# Assert there are no errors after restting the console
control = ipyconsole.get_focus_widget()
assert 'Error' not in control.toPlainText()
@flaky(max_runs=3)
def test_tab_rename_for_slaves(ipyconsole, qtbot):
"""Test slave clients are renamed correctly."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
cf = ipyconsole.get_current_client().connection_file
ipyconsole._create_client_for_kernel(cf, None, None, None)
qtbot.waitUntil(lambda: len(ipyconsole.get_clients()) == 2)
# Rename slave
ipyconsole.rename_tabs_after_change('foo')
# Assert both clients have the same name
assert 'foo' in ipyconsole.get_clients()[0].get_name()
assert 'foo' in ipyconsole.get_clients()[1].get_name()
@flaky(max_runs=3)
def test_no_repeated_tabs_name(ipyconsole, qtbot):
"""Test that tabs can't have repeated given names."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Rename first client
ipyconsole.rename_tabs_after_change('foo')
# Create a new client and try to rename it
ipyconsole.create_new_client()
ipyconsole.rename_tabs_after_change('foo')
# Assert the rename didn't take place
client_name = ipyconsole.get_current_client().get_name()
assert '2' in client_name
@flaky(max_runs=3)
def test_tabs_preserve_name_after_move(ipyconsole, qtbot):
"""Test that tabs preserve their names after they are moved."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Create a new client
ipyconsole.create_new_client()
# Move tabs
ipyconsole.tabwidget.tabBar().moveTab(0, 1)
# Assert the second client is in the first position
client_name = ipyconsole.get_clients()[0].get_name()
assert '2' in client_name
@flaky(max_runs=3)
def test_conf_env_vars(ipyconsole, qtbot):
"""Test that kernels have env vars set by our kernel spec."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Get a CONF env var
with qtbot.waitSignal(shell.executed):
shell.execute("import os; a = os.environ.get('SPY_SYMPY_O')")
# Assert we get the assigned value correctly
assert shell.get_value('a') == 'False'
@flaky(max_runs=3)
@pytest.mark.no_stderr_file
def test_no_stderr_file(ipyconsole, qtbot):
"""Test that consoles can run without an stderr."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Execute a simple assignment
with qtbot.waitSignal(shell.executed):
shell.execute('a = 1')
# Assert we get the assigned value correctly
assert shell.get_value('a') == 1
@pytest.mark.non_ascii_dir
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It fails on Windows")
def test_non_ascii_stderr_file(ipyconsole, qtbot):
"""Test the creation of a console with a stderr file in a non-ascii dir."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Execute a simple assignment
with qtbot.waitSignal(shell.executed):
shell.execute('a = 1')
# Assert we get the assigned value
assert shell.get_value('a') == 1
@flaky(max_runs=3)
@pytest.mark.skipif(PY2 and sys.platform == 'darwin',
reason="It hangs frequently on Python 2.7 and macOS")
def test_console_import_namespace(ipyconsole, qtbot):
"""Test an import of the form 'from foo import *'."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Import numpy
with qtbot.waitSignal(shell.executed):
shell.execute('from numpy import *')
# Assert we get the e value correctly
assert shell.get_value('e') == 2.718281828459045
@flaky(max_runs=3)
def test_console_disambiguation(ipyconsole, qtbot):
"""Test the disambiguation of dedicated consoles."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Create directories and file for TEMP_DIRECTORY/a/b/c.py
# and TEMP_DIRECTORY/a/d/c.py
dir_b = osp.join(TEMP_DIRECTORY, 'a', 'b')
filename_b = osp.join(dir_b, 'c.py')
if not osp.isdir(dir_b):
os.makedirs(dir_b)
if not osp.isfile(filename_b):
file_c = open(filename_b, 'w+')
file_c.close()
dir_d = osp.join(TEMP_DIRECTORY, 'a', 'd')
filename_d = osp.join(dir_d, 'c.py')
if not osp.isdir(dir_d):
os.makedirs(dir_d)
if not osp.isfile(filename_d):
file_e = open(filename_d, 'w+')
file_e.close()
# Create new client and assert name without disambiguation
ipyconsole.create_client_for_file(filename_b)
client = ipyconsole.get_current_client()
assert client.get_name() == 'c.py/A'
# Create new client and assert name with disambiguation
ipyconsole.create_client_for_file(filename_d)
client = ipyconsole.get_current_client()
assert client.get_name() == 'c.py - d/A'
ipyconsole.tabwidget.setCurrentIndex(1)
client = ipyconsole.get_current_client()
assert client.get_name() == 'c.py - b/A'
@flaky(max_runs=3)
def test_console_coloring(ipyconsole, qtbot):
"""Test that console gets the same coloring present in the Editor."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
config_options = ipyconsole.config_options()
syntax_style = config_options.JupyterWidget.syntax_style
style_sheet = config_options.JupyterWidget.style_sheet
console_font_color = get_console_font_color(syntax_style)
console_background_color = get_console_background_color(style_sheet)
selected_color_scheme = CONF.get('appearance', 'selected')
color_scheme = get_color_scheme(selected_color_scheme)
editor_background_color = color_scheme['background']
editor_font_color = color_scheme['normal'][0]
console_background_color = console_background_color.replace("'", "")
editor_background_color = editor_background_color.replace("'", "")
console_font_color = console_font_color.replace("'", "")
editor_font_color = editor_font_color.replace("'", "")
assert console_background_color.strip() == editor_background_color.strip()
assert console_font_color.strip() == editor_font_color.strip()
@flaky(max_runs=3)
def test_set_cwd(ipyconsole, qtbot, tmpdir):
"""Test kernel when changing cwd."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# spyder-ide/spyder#6451.
savetemp = shell._cwd
tempdir = to_text_string(tmpdir.mkdir("queen's"))
shell.set_cwd(tempdir)
# Get current directory.
with qtbot.waitSignal(shell.executed):
shell.execute("import os; cwd = os.getcwd()")
# Assert we get the assigned value correctly
assert shell.get_value('cwd') == tempdir
# Restore original.
shell.set_cwd(savetemp)
@flaky(max_runs=3)
def test_get_cwd(ipyconsole, qtbot, tmpdir):
"""Test current working directory."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# spyder-ide/spyder#6451.
savetemp = shell._cwd
tempdir = to_text_string(tmpdir.mkdir("queen's"))
assert shell._cwd != tempdir
# Need to escape \ on Windows.
if os.name == 'nt':
tempdir = tempdir.replace(u"\\", u"\\\\")
# Change directory in the console.
with qtbot.waitSignal(shell.executed):
shell.execute(u"import os; os.chdir(u'''{}''')".format(tempdir))
# Ask for directory.
with qtbot.waitSignal(shell.sig_working_directory_changed):
shell.update_cwd()
if os.name == 'nt':
tempdir = tempdir.replace(u"\\\\", u"\\")
assert shell._cwd == tempdir
shell.set_cwd(savetemp)
@flaky(max_runs=3)
def test_request_env(ipyconsole, qtbot):
"""Test that getting env vars from the kernel is working as expected."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Add a new entry to os.environ
with qtbot.waitSignal(shell.executed):
shell.execute("import os; os.environ['FOO'] = 'bar'" )
# Ask for os.environ contents
with qtbot.waitSignal(shell.sig_show_env) as blocker:
shell.request_env()
# Get env contents from the signal
env_contents = blocker.args[0]
# Assert that our added entry is part of os.environ
assert env_contents['FOO'] == 'bar'
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt',
reason="Fails due to differences in path handling")
def test_request_syspath(ipyconsole, qtbot, tmpdir):
"""
Test that getting sys.path contents from the kernel is working as
expected.
"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Add a new entry to sys.path
with qtbot.waitSignal(shell.executed):
tmp_dir = to_text_string(tmpdir)
shell.execute("import sys; sys.path.append('%s')" % tmp_dir)
# Ask for sys.path contents
with qtbot.waitSignal(shell.sig_show_syspath) as blocker:
shell.request_syspath()
# Get sys.path contents from the signal
syspath_contents = blocker.args[0]
# Assert that our added entry is part of sys.path
assert tmp_dir in syspath_contents
@flaky(max_runs=10)
@pytest.mark.skipif(os.name == 'nt', reason="It doesn't work on Windows")
def test_save_history_dbg(ipyconsole, qtbot):
"""Test that browsing command history is working while debugging."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Enter an expression
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, 'aa = 10')
qtbot.keyClick(control, Qt.Key_Enter)
# Add a pdb command to make sure it is not saved
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!u')
qtbot.keyClick(control, Qt.Key_Enter)
# Add an empty line to make sure it is not saved
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Clear console (for some reason using shell.clear_console
# doesn't work here)
shell.reset(clear=True)
qtbot.waitUntil(lambda: shell.is_waiting_pdb_input())
# Make sure we are debugging
assert shell.is_waiting_pdb_input()
# Press Up arrow button and assert we get the last
# introduced command
qtbot.keyClick(control, Qt.Key_Up)
assert 'aa = 10' in control.toPlainText()
# Open new widget
ipyconsole.create_new_client()
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Press Up arrow button and assert we get the last
# introduced command
qtbot.keyClick(control, Qt.Key_Up)
assert 'aa = 10' in control.toPlainText()
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Add a multiline statment and ckeck we can browse it correctly
shell._pdb_history.append('if True:\n print(1)')
shell._pdb_history.append('print(2)')
shell._pdb_history.append('if True:\n print(10)')
shell._pdb_history_index = len(shell._pdb_history)
# The continuation prompt is here
qtbot.keyClick(control, Qt.Key_Up)
assert '...: print(10)' in control.toPlainText()
shell._control.set_cursor_position(shell._control.get_position('eof') - 25)
qtbot.keyClick(control, Qt.Key_Up)
assert '...: print(1)' in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.skipif(PY2 or IPython.version_info < (7, 17),
reason="insert is not the same in py2")
def test_dbg_input(ipyconsole, qtbot):
"""Test that spyder doesn't send pdb commands to unrelated input calls."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_focus_widget()
control.setFocus()
# Debug with input
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print('Hello', input('name'))")
# Reach the 'name' input
shell.pdb_execute('!n')
qtbot.wait(100)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'name')
# Execute some code and make sure that it doesn't work
# as this is not a pdb prompt
shell.pdb_execute('!n')
shell.pdb_execute('aa = 10')
qtbot.wait(500)
assert control.toPlainText().split()[-1] == 'name'
shell.kernel_client.input('test')
qtbot.waitUntil(lambda: 'Hello test' in control.toPlainText())
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="It doesn't work on PY2")
def test_unicode_vars(ipyconsole, qtbot):
"""
Test that the Variable Explorer Works with unicode variables.
"""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Set value for a Unicode variable
with qtbot.waitSignal(shell.executed):
shell.execute('д = 10')
# Assert we get its value correctly
assert shell.get_value('д') == 10
# Change its value and verify
shell.set_value('д', 20)
qtbot.waitUntil(lambda: shell.get_value('д') == 20)
assert shell.get_value('д') == 20
@flaky(max_runs=3)
def test_read_stderr(ipyconsole, qtbot):
"""
Test the read operation of the stderr file of the kernel
"""
shell = ipyconsole.get_current_shellwidget()
client = ipyconsole.get_current_client()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Set contents of the stderr file of the kernel
content = 'Test text'
stderr_file = client.stderr_file
codecs.open(stderr_file, 'w', 'cp437').write(content)
# Assert that content is correct
assert content == client._read_stderr()
@flaky(max_runs=10)
@pytest.mark.no_xvfb
@pytest.mark.skipif(os.environ.get('CI', None) is not None and os.name == 'nt',
reason="It times out on AppVeyor.")
@pytest.mark.skipif(PY2, reason="It times out in Python 2.")
def test_values_dbg(ipyconsole, qtbot):
"""
Test that getting, setting, copying and removing values is working while
debugging.
"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Get value
with qtbot.waitSignal(shell.executed):
shell.execute('aa = 10')
assert 'aa = 10' in control.toPlainText()
assert shell.get_value('aa') == 10
# Set value
shell.set_value('aa', 20)
qtbot.waitUntil(lambda: shell.get_value('aa') == 20)
assert shell.get_value('aa') == 20
# Copy value
shell.copy_value('aa', 'bb')
qtbot.waitUntil(lambda: shell.get_value('bb') == 20)
assert shell.get_value('bb') == 20
# Remove value
shell.remove_value('aa')
def is_defined(val):
try:
shell.get_value(val)
return True
except KeyError:
return False
qtbot.waitUntil(lambda: not is_defined('aa'))
with qtbot.waitSignal(shell.executed):
shell.execute('aa')
# Wait until the message is recieved
assert "*** NameError: name 'aa' is not defined" in control.toPlainText()
@flaky(max_runs=3)
def test_execute_events_dbg(ipyconsole, qtbot):
"""Test execute events while debugging"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_focus_widget()
control.setFocus()
# Import Matplotlib
with qtbot.waitSignal(shell.executed):
shell.execute('import matplotlib.pyplot as plt')
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Set processing events to True
CONF.set('ipython_console', 'pdb_execute_events', True)
shell.set_pdb_execute_events(True)
# Test reset magic
qtbot.keyClicks(control, 'plt.plot(range(10))')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Assert that there's a plot in the console
assert shell._control.toHtml().count('img src') == 1
# Set processing events to False
CONF.set('ipython_console', 'pdb_execute_events', False)
shell.set_pdb_execute_events(False)
# Test reset magic
qtbot.keyClicks(control, 'plt.plot(range(10))')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Assert that there's no new plots in the console
assert shell._control.toHtml().count('img src') == 1
# Test if the plot is shown with plt.show()
qtbot.keyClicks(control, 'plt.show()')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Assert that there's a new plots in the console
assert shell._control.toHtml().count('img src') == 2
@flaky(max_runs=3)
def test_run_doctest(ipyconsole, qtbot):
"""
Test that doctests can be run without problems
"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
code = dedent('''
def add(x, y):
"""
>>> add(1, 2)
3
>>> add(5.1, 2.2)
7.3
"""
return x + y
''')
# Run code
with qtbot.waitSignal(shell.executed):
shell.execute(code)
# Import doctest
with qtbot.waitSignal(shell.executed):
shell.execute('import doctest')
# Run doctest
with qtbot.waitSignal(shell.executed):
shell.execute('doctest.testmod()')
# Assert that doctests were run correctly
assert "TestResults(failed=0, attempted=2)" in shell._control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or (PY2 and PYQT5),
reason="It times out frequently")
def test_mpl_backend_change(ipyconsole, qtbot):
"""
Test that Matplotlib backend is changed correctly when
using the %matplotlib magic
"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Import Matplotlib
with qtbot.waitSignal(shell.executed):
shell.execute('import matplotlib.pyplot as plt')
# Generate a plot
with qtbot.waitSignal(shell.executed):
shell.execute('plt.plot(range(10))')
# Change backends
with qtbot.waitSignal(shell.executed):
shell.execute('%matplotlib tk')
# Generate another plot
with qtbot.waitSignal(shell.executed):
shell.execute('plt.plot(range(10))')
# Assert that there's a single inline plot in the console
assert shell._control.toHtml().count('img src') == 1
@flaky(max_runs=10)
@pytest.mark.skipif(os.environ.get('CI', None) is not None or PYQT5,
reason="It fails frequently in PyQt5 and our CIs")
def test_ctrl_c_dbg(ipyconsole, qtbot):
"""
Test that Ctrl+C works while debugging
"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Test Ctrl+C
qtbot.keyClick(control, Qt.Key_C, modifier=Qt.ControlModifier)
qtbot.waitUntil(
lambda: 'For copying text while debugging, use Ctrl+Shift+C' in
control.toPlainText(), timeout=2000)
assert 'For copying text while debugging, use Ctrl+Shift+C' in control.toPlainText()
@flaky(max_runs=10)
@pytest.mark.skipif(os.name == 'nt', reason="It doesn't work on Windows")
def test_clear_and_reset_magics_dbg(ipyconsole, qtbot):
"""
Test that clear and reset magics are working while debugging
"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_focus_widget()
control.setFocus()
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Test clear magic
shell.clear_console()
qtbot.waitUntil(lambda: '\nIPdb [2]: ' == control.toPlainText())
# Test reset magic
qtbot.keyClicks(control, 'bb = 10')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
assert shell.get_value('bb') == 10
shell.reset_namespace()
qtbot.wait(1000)
qtbot.keyClicks(control, 'bb')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
assert "*** NameError: name 'bb' is not defined" in control.toPlainText()
@flaky(max_runs=3)
def test_restart_kernel(ipyconsole, qtbot):
"""
Test that kernel is restarted correctly
"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Do an assignment to verify that it's not there after restarting
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Restart kernel and wait until it's up again
shell._prompt_html = None
ipyconsole.restart_kernel()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
assert 'Restarting kernel...' in shell._control.toPlainText()
assert not shell.is_defined('a')
@flaky(max_runs=3)
def test_load_kernel_file_from_id(ipyconsole, qtbot):
"""
Test that a new client is created using its id
"""
shell = ipyconsole.get_current_shellwidget()
client = ipyconsole.get_current_client()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
connection_file = osp.basename(client.connection_file)
id_ = connection_file.split('kernel-')[-1].split('.json')[0]
ipyconsole._create_client_for_kernel(id_, None, None, None)
qtbot.waitUntil(lambda: len(ipyconsole.get_clients()) == 2)
new_client = ipyconsole.get_clients()[1]
assert new_client.id_ == dict(int_id='1', str_id='B')
@flaky(max_runs=3)
def test_load_kernel_file_from_location(ipyconsole, qtbot, tmpdir):
"""
Test that a new client is created using a connection file
placed in a different location from jupyter_runtime_dir
"""
shell = ipyconsole.get_current_shellwidget()
client = ipyconsole.get_current_client()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
fname = osp.basename(client.connection_file)
connection_file = to_text_string(tmpdir.join(fname))
shutil.copy2(client.connection_file, connection_file)
ipyconsole._create_client_for_kernel(connection_file, None, None, None)
qtbot.waitUntil(lambda: len(ipyconsole.get_clients()) == 2)
assert len(ipyconsole.get_clients()) == 2
@flaky(max_runs=3)
def test_load_kernel_file(ipyconsole, qtbot, tmpdir):
"""
Test that a new client is created using the connection file
of an existing client
"""
shell = ipyconsole.get_current_shellwidget()
client = ipyconsole.get_current_client()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
ipyconsole._create_client_for_kernel(client.connection_file,
None, None, None)
qtbot.waitUntil(lambda: len(ipyconsole.get_clients()) == 2)
new_client = ipyconsole.get_clients()[1]
new_shell = new_client.shellwidget
qtbot.waitUntil(lambda: new_shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(new_shell.executed):
new_shell.execute('a = 10')
assert new_client.id_ == dict(int_id='1', str_id='B')
assert shell.get_value('a') == new_shell.get_value('a')
@flaky(max_runs=3)
def test_sys_argv_clear(ipyconsole, qtbot):
"""Test that sys.argv is cleared up correctly"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('import sys; A = sys.argv')
argv = shell.get_value("A")
assert argv == ['']
@flaky(max_runs=5)
@pytest.mark.skipif(os.name == 'nt', reason="Fails on Windows")
def test_set_elapsed_time(ipyconsole, qtbot):
"""Test that the IPython console elapsed timer is set correctly."""
shell = ipyconsole.get_current_shellwidget()
client = ipyconsole.get_current_client()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Set time to 2 minutes ago.
client.t0 -= 120
with qtbot.waitSignal(client.timer.timeout, timeout=5000):
ipyconsole.set_elapsed_time(client)
assert ('00:02:00' in client.time_label.text() or
'00:02:01' in client.time_label.text())
# Wait for a second to pass, to ensure timer is counting up
with qtbot.waitSignal(client.timer.timeout, timeout=5000):
pass
assert ('00:02:01' in client.time_label.text() or
'00:02:02' in client.time_label.text())
# Make previous time later than current time.
client.t0 += 2000
with qtbot.waitSignal(client.timer.timeout, timeout=5000):
pass
assert '00:00:00' in client.time_label.text()
client.timer.timeout.disconnect(client.show_time)
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Doesn't work on Windows")
def test_stderr_file_is_removed_one_kernel(ipyconsole, qtbot, monkeypatch):
"""Test that consoles removes stderr when client is closed."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
client = ipyconsole.get_current_client()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# In a normal situation file should exist
monkeypatch.setattr(QMessageBox, 'question',
classmethod(lambda *args: QMessageBox.Yes))
assert osp.exists(client.stderr_file)
ipyconsole.close_client(client=client)
assert not osp.exists(client.stderr_file)
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Doesn't work on Windows")
def test_stderr_file_is_removed_two_kernels(ipyconsole, qtbot, monkeypatch):
"""Test that console removes stderr when client and related clients
are closed."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
client = ipyconsole.get_current_client()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# New client with the same kernel
ipyconsole._create_client_for_kernel(client.connection_file, None, None,
None)
assert len(ipyconsole.get_related_clients(client)) == 1
other_client = ipyconsole.get_related_clients(client)[0]
assert client.stderr_file == other_client.stderr_file
# In a normal situation file should exist
monkeypatch.setattr(QMessageBox, 'question',
classmethod(lambda *args: QMessageBox.Yes))
assert osp.exists(client.stderr_file)
ipyconsole.close_client(client=client)
assert not osp.exists(client.stderr_file)
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Doesn't work on Windows")
def test_stderr_file_remains_two_kernels(ipyconsole, qtbot, monkeypatch):
"""Test that console doesn't remove stderr when a related client is not
closed."""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
client = ipyconsole.get_current_client()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# New client with the same kernel
ipyconsole._create_client_for_kernel(client.connection_file, None, None,
None)
assert len(ipyconsole.get_related_clients(client)) == 1
other_client = ipyconsole.get_related_clients(client)[0]
assert client.stderr_file == other_client.stderr_file
# In a normal situation file should exist
monkeypatch.setattr(QMessageBox, "question",
classmethod(lambda *args: QMessageBox.No))
assert osp.exists(client.stderr_file)
ipyconsole.close_client(client=client)
assert osp.exists(client.stderr_file)
@flaky(max_runs=3)
def test_kernel_crash(ipyconsole, qtbot):
"""Test that we show an error message when a kernel crash occurs."""
# Create an IPython kernel config file with a bad config
ipy_kernel_cfg = osp.join(get_ipython_dir(), 'profile_default',
'ipython_kernel_config.py')
with open(ipy_kernel_cfg, 'w') as f:
# This option must be a string, not an int
f.write("c.InteractiveShellApp.extra_extension = 1")
ipyconsole.create_new_client()
# Assert that the console is showing an error
qtbot.waitUntil(lambda: ipyconsole.get_clients()[-1].is_error_shown,
timeout=6000)
error_client = ipyconsole.get_clients()[-1]
assert error_client.is_error_shown
# Assert the error contains the text we expect
webview = error_client.infowidget
if WEBENGINE:
webpage = webview.page()
else:
webpage = webview.page().mainFrame()
qtbot.waitUntil(
lambda: check_text(webpage, "Bad config encountered"),
timeout=6000)
# Remove bad kernel config file
os.remove(ipy_kernel_cfg)
@pytest.mark.skipif(not os.name == 'nt', reason="Only works on Windows")
def test_remove_old_stderr_files(ipyconsole, qtbot):
"""Test that we are removing old stderr files."""
# Create empty stderr file in our temp dir to see
# if it's removed correctly.
tmpdir = get_temp_dir()
open(osp.join(tmpdir, 'foo.stderr'), 'a').close()
# Assert that only that file is removed
ipyconsole._remove_old_stderr_files()
assert not osp.isfile(osp.join(tmpdir, 'foo.stderr'))
@flaky(max_runs=10)
@pytest.mark.use_startup_wdir
def test_console_working_directory(ipyconsole, qtbot):
"""Test for checking the working directory."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('import os; cwd = os.getcwd()')
current_wdir = shell.get_value('cwd')
folders = osp.split(current_wdir)
assert folders[-1] == NEW_DIR
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux') or PY2,
reason="It only works on Linux with python 3.")
def test_console_complete(ipyconsole, qtbot, tmpdir):
"""Test for checking the working directory."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_focus_widget()
control.setFocus()
def check_value(name, value):
try:
return shell.get_value(name) == value
except KeyError:
return False
# test complete with one result
with qtbot.waitSignal(shell.executed):
shell.execute('cbs = 1')
qtbot.waitUntil(lambda: check_value('cbs', 1))
qtbot.wait(500)
qtbot.keyClicks(control, 'cb')
qtbot.keyClick(control, Qt.Key_Tab)
# Jedi completion takes time to start up the first time
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'cbs',
timeout=6000)
# test complete with several result
with qtbot.waitSignal(shell.executed):
shell.execute('cbba = 1')
qtbot.waitUntil(lambda: check_value('cbba', 1))
qtbot.keyClicks(control, 'cb')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(shell._completion_widget.isVisible)
# cbs is another solution, so not completed yet
assert control.toPlainText().split()[-1] == 'cb'
qtbot.keyClick(shell._completion_widget, Qt.Key_Enter)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'cbba')
# Enter debugging mode
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
# Test complete in debug mode
# check abs is completed twice (as the cursor moves)
qtbot.keyClicks(control, 'ab')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'abs')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# A second time to check a function call doesn't cause a problem
qtbot.keyClicks(control, 'print(ab')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(
lambda: control.toPlainText().split()[-1] == 'print(abs')
qtbot.keyClicks(control, ')')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Enter an expression
qtbot.keyClicks(control, 'baab = 10')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(100)
qtbot.waitUntil(lambda: check_value('baab', 10))
# Check baab is completed
qtbot.keyClicks(control, 'baa')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'baab')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Check the completion widget is shown for abba, abs
qtbot.keyClicks(control, 'abba = 10')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(100)
qtbot.waitUntil(lambda: check_value('abba', 10))
qtbot.keyClicks(control, 'ab')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(shell._completion_widget.isVisible)
assert control.toPlainText().split()[-1] == 'ab'
qtbot.keyClick(shell._completion_widget, Qt.Key_Enter)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'abba')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Create a class
qtbot.keyClicks(control, 'class A(): baba = 1')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(100)
qtbot.waitUntil(lambda: shell.is_defined('A'))
qtbot.keyClicks(control, 'a = A()')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(100)
qtbot.waitUntil(lambda: shell.is_defined('a'))
# Check we can complete attributes
qtbot.keyClicks(control, 'a.ba')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'a.baba')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Check we can complete pdb command names
qtbot.keyClicks(control, '!longl')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == '!longlist')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Check we can use custom complete for pdb
test_file = tmpdir.join('test.py')
test_file.write('stuff\n')
# Set a breakpoint in the new file
qtbot.keyClicks(control, '!b ' + str(test_file) + ':1')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
# Check we can complete the breakpoint number
qtbot.keyClicks(control, '!ignore ')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == '1')
@pytest.mark.use_startup_wdir
def test_pdb_multiline(ipyconsole, qtbot):
"""Test entering a multiline statment into pdb"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_focus_widget()
control.setFocus()
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
assert '\nIPdb [' in control.toPlainText()
# Test reset magic
qtbot.keyClicks(control, 'if True:')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
qtbot.keyClicks(control, 'bb = 10')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
assert shell.get_value('bb') == 10
assert "if True:\n ...: bb = 10\n" in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.parametrize(
"show_lib", [True, False])
def test_pdb_ignore_lib(ipyconsole, qtbot, show_lib):
"""Test that pdb can avoid closed files."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_focus_widget()
control.setFocus()
# Tests assume inline backend
CONF.set('ipython_console', 'pdb_ignore_lib', not show_lib)
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
qtbot.keyClicks(control, '!s')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(500)
qtbot.keyClicks(control, '!q')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(control, Qt.Key_Enter)
if show_lib:
assert 'iostream.py' in control.toPlainText()
else:
assert 'iostream.py' not in control.toPlainText()
CONF.set('ipython_console', 'pdb_ignore_lib', True)
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="Times out on macOS")
def test_calltip(ipyconsole, qtbot):
"""
Test Calltip.
See spyder-ide/spyder#10842
"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_focus_widget()
control.setFocus()
with qtbot.waitSignal(shell.executed):
shell.execute('a = {"a": 1}')
qtbot.keyClicks(control, 'a.keys(', delay=100)
qtbot.wait(1000)
assert control.calltip_widget.isVisible()
@flaky(max_runs=3)
@pytest.mark.order(1)
@pytest.mark.test_environment_interpreter
def test_conda_env_activation(ipyconsole, qtbot):
"""
Test that the conda environment associated with an external interpreter
is activated before a kernel is created for it.
"""
# Wait until the window is fully up
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Get conda activation environment variable
with qtbot.waitSignal(shell.executed):
shell.execute(
"import os; conda_prefix = os.environ.get('CONDA_PREFIX')")
expected_output = get_conda_test_env().replace('\\', '/')
if is_conda_env(expected_output):
output = shell.get_value('conda_prefix').replace('\\', '/')
assert expected_output == output
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="no SIGTERM on Windows")
def test_kernel_kill(ipyconsole, qtbot):
"""
Test that the kernel correctly restarts after a kill.
"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Wait for the restarter to start
qtbot.wait(3000)
crash_string = 'import os, signal; os.kill(os.getpid(), signal.SIGTERM)'
# Check only one comm is open
old_open_comms = list(shell.spyder_kernel_comm._comms.keys())
assert len(old_open_comms) == 1
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=30000):
shell.execute(crash_string)
assert crash_string in shell._control.toPlainText()
assert "Restarting kernel..." in shell._control.toPlainText()
# Check a new comm replaced the old one
new_open_comms = list(shell.spyder_kernel_comm._comms.keys())
assert len(new_open_comms) == 1
assert old_open_comms[0] != new_open_comms[0]
# Wait until the comm replies
qtbot.waitUntil(
lambda: shell.spyder_kernel_comm._comms[new_open_comms[0]][
'status'] == 'ready')
assert shell.spyder_kernel_comm._comms[new_open_comms[0]][
'status'] == 'ready'
@flaky(max_runs=3)
@pytest.mark.parametrize("spyder_pythonpath", [True, False])
def test_wrong_std_module(ipyconsole, qtbot, tmpdir, spyder_pythonpath):
"""
Test that a file with the same name of a standard library module in
the current working directory doesn't break the console.
"""
# Create an empty file called random.py in the cwd
if spyder_pythonpath:
wrong_random_mod = tmpdir.join('random.py')
wrong_random_mod.write('')
wrong_random_mod = str(wrong_random_mod)
CONF.set('main', 'spyder_pythonpath', [str(tmpdir)])
else:
wrong_random_mod = osp.join(os.getcwd(), 'random.py')
with open(wrong_random_mod, 'w') as f:
f.write('')
# Create a new client to see if its kernel starts despite the
# faulty module.
ipyconsole.create_new_client()
# A prompt should be created if the kernel didn't crash.
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Assert the extra path from spyder_pythonpath was added
if spyder_pythonpath:
check_sys_path = (
"import sys; path_added = r'{}' in sys.path".format(str(tmpdir))
)
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=30000):
shell.execute(check_sys_path)
assert shell.get_value('path_added')
# Remove wrong module
os.remove(wrong_random_mod)
# Restore CONF
CONF.set('main', 'spyder_pythonpath', [])
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="no SIGTERM on Windows")
def test_kernel_restart_after_manual_restart_and_crash(ipyconsole, qtbot):
"""
Test that the kernel restarts correctly after being restarted
manually and then it crashes.
This is a regresion for spyder-ide/spyder#12972.
"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Restart kernel and wait until it's up again
shell._prompt_html = None
ipyconsole.restart_kernel()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Wait for the restarter to start
qtbot.wait(3000)
# Generate a crash
crash_string = 'import os, signal; os.kill(os.getpid(), signal.SIGTERM)'
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=30000):
shell.execute(crash_string)
assert crash_string in shell._control.toPlainText()
# Evaluate an expression to be sure the restart was successful
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
assert shell.is_defined('a')
# Wait until the comm replies
open_comms = list(shell.spyder_kernel_comm._comms.keys())
qtbot.waitUntil(
lambda: shell.spyder_kernel_comm._comms[open_comms[0]][
'status'] == 'ready')
@flaky(max_runs=3)
def test_stderr_poll(ipyconsole, qtbot):
"""Test if the content of stderr is printed to the console."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
client = ipyconsole.get_current_client()
with open(client.stderr_file, 'w') as f:
f.write("test_test")
# Wait for the poll
qtbot.wait(2000)
assert "test_test" in ipyconsole.get_focus_widget().toPlainText()
@pytest.mark.slow
@pytest.mark.use_startup_wdir
def test_startup_code_pdb(ipyconsole, qtbot):
"""Test that startup code for pdb works."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_focus_widget()
control.setFocus()
# Run a line on startup
CONF.set('ipython_console', 'startup/pdb_run_lines',
'abba = 12; print("Hello")')
shell.execute('%debug print()')
qtbot.waitUntil(lambda: 'Hello' in control.toPlainText())
# Verify that the line was executed
assert shell.get_value('abba') == 12
# Reset setting
CONF.set('ipython_console', 'startup/pdb_run_lines', '')
@flaky(max_runs=3)
@pytest.mark.parametrize(
"backend",
['inline', 'qt5', 'tk', 'osx', ]
)
def test_pdb_eventloop(ipyconsole, qtbot, backend):
"""Check if pdb works with every backend. (only testing 3)."""
# Skip failing tests
if backend == 'tk' and (os.name == 'nt' or PY2):
return
if backend == 'osx' and (sys.platform != "darwin" or PY2):
return
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_focus_widget()
with qtbot.waitSignal(shell.executed):
shell.execute("%matplotlib " + backend)
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
with qtbot.waitSignal(shell.executed):
shell.execute("print('Two: ' + str(1+1))")
assert "Two: 2" in control.toPlainText()
@flaky(max_runs=3)
def test_recursive_pdb(ipyconsole, qtbot):
"""Check commands and code are separted."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_focus_widget()
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("abab = 10")
# Check that we can't use magic twice
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("%debug print()")
assert "Please don't use '%debug'" in control.toPlainText()
# Check we can enter the recursive debugger twice
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!debug print()")
assert "(IPdb [1]):" in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!debug print()")
assert "((IPdb [1])):" in control.toPlainText()
# quit one layer
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!quit")
assert control.toPlainText().split()[-2:] == ["(IPdb", "[2]):"]
# Check completion works
qtbot.keyClicks(control, 'aba')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'abab',
timeout=SHELL_TIMEOUT)
# quit one layer
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!quit")
assert control.toPlainText().split()[-2:] == ["IPdb", "[4]:"]
# Check completion works
qtbot.keyClicks(control, 'aba')
qtbot.keyClick(control, Qt.Key_Tab)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'abab',
timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!quit")
with qtbot.waitSignal(shell.executed):
shell.execute("1 + 1")
assert control.toPlainText().split()[-2:] == ["In", "[3]:"]
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Doesn't work on windows")
def test_stop_pdb(ipyconsole, qtbot):
"""Test if we can stop pdb"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_focus_widget()
stop_button = ipyconsole.get_current_client().stop_button
# Enter pdb
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
# Start and interrupt a long execution
shell.execute("import time; time.sleep(10)")
qtbot.wait(500)
with qtbot.waitSignal(shell.executed, timeout=1000):
qtbot.mouseClick(stop_button, Qt.LeftButton)
assert "KeyboardInterrupt" in control.toPlainText()
# We are still in the debugger
assert "IPdb [2]:" in control.toPlainText()
assert "In [2]:" not in control.toPlainText()
# Leave the debugger
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(stop_button, Qt.LeftButton)
assert "In [2]:" in control.toPlainText()
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'nt', reason="Times out on Windows")
def test_code_cache(ipyconsole, qtbot):
"""
Test that code sent to execute is properly cached
and that the cache is empited on interrupt.
"""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = ipyconsole.get_focus_widget()
control.setFocus()
def check_value(name, value):
try:
return shell.get_value(name) == value
except KeyError:
return False
# Send two execute requests and make sure the second one is executed
shell.execute('import time; time.sleep(.5)')
shell.execute('var = 142')
qtbot.wait(500)
qtbot.waitUntil(lambda: check_value('var', 142))
assert shell.get_value('var') == 142
# Send two execute requests and cancel the second one
shell.execute('import time; time.sleep(.5)')
shell.execute('var = 1000')
shell.interrupt_kernel()
qtbot.wait(1000)
# Make sure the value of var didn't change
assert shell.get_value('var') == 142
# Same for debugging
with qtbot.waitSignal(shell.executed):
shell.execute('%debug print()')
assert 'IPdb [' in shell._control.toPlainText()
# Send two execute requests and make sure the second one is executed
shell.execute('time.sleep(.5)')
shell.execute('var = 318')
qtbot.wait(500)
qtbot.waitUntil(lambda: check_value('var', 318))
assert shell.get_value('var') == 318
# Send two execute requests and cancel the second one
shell.execute('import time; time.sleep(.5)')
shell.execute('var = 1000')
shell.interrupt_kernel()
qtbot.wait(1000)
# Make sure the value of var didn't change
assert shell.get_value('var') == 318
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="Doesn't work on Python 2.7")
def test_pdb_code_and_cmd_separation(ipyconsole, qtbot):
"""Check commands and code are separted."""
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_focus_widget()
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
assert "Error" not in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.execute("e")
assert "name 'e' is not defined" in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.execute("!n")
assert "--Return--" in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.execute("a")
assert ("*** NameError: name 'a' is not defined"
not in control.toPlainText())
with qtbot.waitSignal(shell.executed):
shell.execute("abba")
assert "name 'abba' is not defined" in control.toPlainText()
with qtbot.waitSignal(shell.executed):
shell.execute("!abba")
assert "Unknown command 'abba'" in control.toPlainText()
if __name__ == "__main__":
pytest.main()
| 35.265023
| 88
| 0.678245
|
8fe0087036d7fdb71444934547aa4ac3bd20a197
| 154
|
py
|
Python
|
LuoguCodes/AT2447.py
|
Anguei/OI-Codes
|
0ef271e9af0619d4c236e314cd6d8708d356536a
|
[
"MIT"
] | null | null | null |
LuoguCodes/AT2447.py
|
Anguei/OI-Codes
|
0ef271e9af0619d4c236e314cd6d8708d356536a
|
[
"MIT"
] | null | null | null |
LuoguCodes/AT2447.py
|
Anguei/OI-Codes
|
0ef271e9af0619d4c236e314cd6d8708d356536a
|
[
"MIT"
] | null | null | null |
a = int(raw_input())
b = int(raw_input())
c = int(raw_input())
d = int(raw_input())
e = int(raw_input())
print [-a * c + d + b * e, b * e - a * e][a > 0]
| 22
| 48
| 0.525974
|
94bd5696146558ea807690815a92b159188217c9
| 1,692
|
py
|
Python
|
var/spack/repos/builtin/packages/automake/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2018-11-27T03:39:44.000Z
|
2021-09-06T15:50:35.000Z
|
var/spack/repos/builtin/packages/automake/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-01-11T20:11:52.000Z
|
2019-01-11T20:11:52.000Z
|
var/spack/repos/builtin/packages/automake/package.py
|
HaochengLIU/spack
|
26e51ff1705a4d6234e2a0cf734f93f7f95df5cb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-10-14T14:20:17.000Z
|
2020-10-14T14:20:17.000Z
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Automake(AutotoolsPackage):
"""Automake -- make file builder part of autotools"""
homepage = 'http://www.gnu.org/software/automake/'
url = 'https://ftpmirror.gnu.org/automake/automake-1.15.tar.gz'
version('1.16.1', '83cc2463a4080efd46a72ba2c9f6b8f5')
version('1.15.1', '95df3f2d6eb8f81e70b8cb63a93c8853')
version('1.15', '716946a105ca228ab545fc37a70df3a3')
version('1.14.1', 'd052a3e884631b9c7892f2efce542d75')
version('1.11.6', '0286dc30295b62985ca51919202ecfcc')
depends_on('autoconf', type='build')
depends_on('perl', type=('build', 'run'))
build_directory = 'spack-build'
def patch(self):
# The full perl shebang might be too long
files_to_be_patched_fmt = 'bin/{0}.in'
if '@:1.15.1' in self.spec:
files_to_be_patched_fmt = 't/wrap/{0}.in'
for file in ('aclocal', 'automake'):
filter_file('^#!@PERL@ -w',
'#!/usr/bin/env perl',
files_to_be_patched_fmt.format(file))
def _make_executable(self, name):
return Executable(join_path(self.prefix.bin, name))
def setup_dependent_package(self, module, dependent_spec):
# Automake is very likely to be a build dependency,
# so we add the tools it provides to the dependent module
executables = ['aclocal', 'automake']
for name in executables:
setattr(module, name, self._make_executable(name))
| 36.782609
| 73
| 0.655437
|
82844b276cd4bd8e74e5f3a5d9d7429bdd08d9a1
| 2,122
|
py
|
Python
|
Names.py
|
luontonurkka/fetcher
|
439d3531b13dcf296c75c1a637dce4e7a7cf8b91
|
[
"MIT"
] | null | null | null |
Names.py
|
luontonurkka/fetcher
|
439d3531b13dcf296c75c1a637dce4e7a7cf8b91
|
[
"MIT"
] | null | null | null |
Names.py
|
luontonurkka/fetcher
|
439d3531b13dcf296c75c1a637dce4e7a7cf8b91
|
[
"MIT"
] | null | null | null |
import requests, bs4
"""
Under MIT-Licence, 2016 Perttu Rautaniemi
"""
def getplantnames():
""" This method gets all the scientific and finnish names from url and returns them"""
names = []
site = requests.get("http://koivu.luomus.fi/kasviatlas/all.php")
soup = bs4.BeautifulSoup(site.text)
lista = soup.find("div", {"id": "main"})
das = lista.get_text().splitlines()
del das[0:2]
del das[len(das) - 3:]
for line in das:
latin, finnish = line.split(' - ', 1)
print(finnish)
latin = latin.strip()
finnish = finnish.strip()
finn = finnish.split(' ')
del finn[len(finn) - 1]
finnish = " ".join(finn)
finnish = finnish.replace('(', '')
finnish = finnish.replace(')', '')
finnish = finnish.replace(',', '')
finnish = finnish.capitalize()
o = latin.split(' ')
if len(o) == 1 or finnish.__contains__("Ryhmä") or finnish.__contains__("ryhmä") or len(finnish)<2:
continue
else:
names.append([latin, finnish])
return names
###I brazenly took this from jarno's code and modified it for my needs, thanks###
def getspeciesnames(speciesfilename):
# open the file
sf = open(speciesfilename, 'r')
# init empty dict
names = []
# first line is skipped
s = sf.readline()
s = sf.readline()
# read to end of the file
while len(s) > 0:
# data is split with tabs
# first colums in the code and second full name
parts = s.split("\t")
# add dict
names.append([parts[1], parts[2]])
# read next line
s = sf.readline()
return names
def getgridnames():
names = []
site = requests.get("http://atlas3.lintuatlas.fi/tulokset/ruudut")
soup = bs4.BeautifulSoup(site.text)
lista = soup.find("div", {"id": "d-main"})
das= lista.get_text().splitlines()
for item in das:
try:
a,b = item.split(",")
N,E = b.strip().split(":")
names.append(dict(N=N,E=E,name=a))
except:
continue
return names
| 30.314286
| 107
| 0.568332
|
ec1855087a9975d46eece498f5355df81213c212
| 124,730
|
py
|
Python
|
pdb2pqr/pdb.py
|
rkretsch/pdb2pqr
|
53cbe6d320048508710b3bad8581b69d3a358ab9
|
[
"BSD-3-Clause"
] | 32
|
2020-08-25T07:01:35.000Z
|
2022-03-17T05:27:10.000Z
|
pdb2pqr/pdb.py
|
rkretsch/pdb2pqr
|
53cbe6d320048508710b3bad8581b69d3a358ab9
|
[
"BSD-3-Clause"
] | 263
|
2020-06-30T02:42:52.000Z
|
2022-03-08T12:48:08.000Z
|
pdb2pqr/pdb.py
|
rkretsch/pdb2pqr
|
53cbe6d320048508710b3bad8581b69d3a358ab9
|
[
"BSD-3-Clause"
] | 16
|
2020-08-14T11:27:52.000Z
|
2022-01-24T10:11:31.000Z
|
""" PDB parsing class
This module parses PDBs in accordance to PDB Format Description Version 2.2
(1996); it is not very forgiving. Each class in this module corresponds
to a record in the PDB Format Description. Much of the documentation for
the classes is taken directly from the above PDB Format Description.
.. codeauthor:: Todd Dolinsky
.. codeauthor:: Yong Huang
.. codeauthor:: Nathan Baker
"""
import logging
_LOGGER = logging.getLogger(__name__)
LINE_PARSERS = {}
def register_line_parser(klass):
"""Register a line parser in the global dictionary.
:param klass: class for line parser
"""
LINE_PARSERS[klass.__name__] = klass
return klass
class BaseRecord:
"""Base class for all records.
Verifies the received record type.
"""
def __init__(self, line):
record = line[0:6].strip()
if record != self.__class__.__name__:
raise ValueError(record)
self.original_text = line.rstrip("\r\n")
def __str__(self):
return self.original_text
def record_type(self):
"""Return PDB record type as string.
:return: record type
:rtype: str
"""
return self.original_text.split()[0]
@register_line_parser
class END(BaseRecord):
"""END class
The END records are paired with MODEL records to group individual
structures found in a coordinate entry.
"""
def __init__(self, line):
"""Initialize with line.
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
@register_line_parser
class MASTER(BaseRecord):
"""MASTER class
The MASTER record is a control record for bookkeeping.
It lists the number of lines in the coordinate entry or file for selected
record types.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+------+------------+-------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+======+============+=====================================+
| 11-15 | int | num_remark | Number of REMARK records |
+---------+------+------------+-------------------------------------+
| 21-25 | int | num_het | Number of HET records |
+---------+------+------------+-------------------------------------+
| 26-30 | int | numHelix | Number of HELIX records |
+---------+------+------------+-------------------------------------+
| 31-35 | int | numSheet | Number of SHEET records |
+---------+------+------------+-------------------------------------+
| 36-40 | int | numTurn | Number of TURN records |
+---------+------+------------+-------------------------------------+
| 41-45 | int | numSite | Number of SITE records |
+---------+------+------------+-------------------------------------+
| 46-50 | int | numXform | Number of coordinate transformation |
| | | | records (ORIGX+SCALE+MTRIX) |
+---------+------+------------+-------------------------------------+
| 51-55 | int | numCoord | Number of atomic coordinate records |
| | | | (ATOM+HETATM) |
+---------+------+------------+-------------------------------------+
| 56-60 | int | numTer | Number of TER records |
+---------+------+------------+-------------------------------------+
| 61-65 | int | numConect | Number of CONECT records |
+---------+------+------------+-------------------------------------+
| 66-70 | int | numSeq | Number of SEQRES records |
+---------+------+------------+-------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.num_remark = int(line[10:15].strip())
self.num_het = int(line[20:25].strip())
self.num_helix = int(line[25:30].strip())
self.num_sheet = int(line[30:35].strip())
self.num_turn = int(line[35:40].strip())
self.num_site = int(line[40:45].strip())
self.num_xform = int(line[45:50].strip())
self.num_coord = int(line[50:55].strip())
self.num_ter = int(line[55:60].strip())
self.num_conect = int(line[60:65].strip())
self.num_seq = int(line[65:70].strip())
@register_line_parser
class CONECT(BaseRecord):
"""CONECT class
The CONECT records specify connectivity between atoms for which
coordinates are supplied. The connectivity is described using the atom
serial number as found in the entry. CONECT records are mandatory for
HET groups (excluding water) and for other bonds not specified in the
standard residue connectivity table which involve atoms in standard
residues (see Appendix 4 for the list of standard residues). These
records are generated by the PDB.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+------+----------+---------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+======+==========+=======================================+
| 7-11 | int | serial | Atom serial number |
+---------+------+----------+---------------------------------------+
| 12-16 | int | serial1 | Serial number of bonded atom |
+---------+------+----------+---------------------------------------+
| 17-21 | int | serial2 | Serial number of bonded atom |
+---------+------+----------+---------------------------------------+
| 22-26 | int | serial3 | Serial number of bonded atom |
+---------+------+----------+---------------------------------------+
| 27-31 | int | serial4 | Serial number of bonded atom |
+---------+------+----------+---------------------------------------+
| 32-36 | int | serial5 | Serial number of hydrogen bonded atom |
+---------+------+----------+---------------------------------------+
| 37-41 | int | serial6 | Serial number of hydrogen bonded atom |
+---------+------+----------+---------------------------------------+
| 42-46 | int | serial7 | Serial number of salt bridged atom |
+---------+------+----------+---------------------------------------+
| 47-51 | int | serial8 | Serial number of hydrogen bonded atom |
+---------+------+----------+---------------------------------------+
| 52-56 | int | serial9 | Serial number of hydrogen bonded atom |
+---------+------+----------+---------------------------------------+
| 57-61 | int | serial10 | Serial number of salt bridged atom |
+---------+------+----------+---------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.serial = int(line[6:11].strip())
try:
self.serial1 = int(line[11:16].strip())
except ValueError:
self.serial1 = None
try:
self.serial2 = int(line[16:21].strip())
except ValueError:
self.serial2 = None
try:
self.serial3 = int(line[21:26].strip())
except ValueError:
self.serial3 = None
try:
self.serial4 = int(line[26:31].strip())
except ValueError:
self.serial4 = None
try:
self.serial5 = int(line[31:36].strip())
except ValueError:
self.serial5 = None
try:
self.serial6 = int(line[36:41].strip())
except ValueError:
self.serial6 = None
try:
self.serial7 = int(line[41:46].strip())
except ValueError:
self.serial7 = None
try:
self.serial8 = int(line[46:51].strip())
except ValueError:
self.serial8 = None
try:
self.serial9 = int(line[51:56].strip())
except ValueError:
self.serial9 = None
try:
self.serial10 = int(line[56:61].strip())
except ValueError:
self.serial10 = None
@register_line_parser
class NUMMDL(BaseRecord):
"""NUMMDL class
The NUMMDL record indicates total number of models in a PDB entry.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+------+-------------+-------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+======+=============+===================+
| 11-14 | int | modelNumber | Number of models. |
+---------+------+-------------+-------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
try:
self.model_number = int(line[10:14].strip())
except ValueError:
self.model_number = None
@register_line_parser
class ENDMDL(BaseRecord):
"""ENDMDL class
The ENDMDL records are paired with MODEL records to group individual
structures found in a coordinate entry.
"""
def __init__(self, line):
super().__init__(line)
@register_line_parser
class TER(BaseRecord):
"""TER class
The TER record indicates the end of a list of ATOM/HETATM records for a
chain.
"""
def __init__(self, line):
"""Initialize by parsing line:
+---------+--------+----------+--------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+==========+==========================+
| 7-11 | int | serial | Serial number. |
+---------+--------+----------+--------------------------+
| 18-20 | string | res_name | Residue name. |
+---------+--------+----------+--------------------------+
| 22 | string | chain_id | Chain identifier. |
+---------+--------+----------+--------------------------+
| 23-26 | int | res_seq | Residue sequence number. |
+---------+--------+----------+--------------------------+
| 27 | string | ins_code | Insertion code. |
+---------+--------+----------+--------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
try: # Not really needed
self.serial = int(line[6:11].strip())
self.res_name = line[17:20].strip()
self.chain_id = line[21].strip()
self.res_seq = int(line[22:26].strip())
self.ins_code = line[26].strip()
except (IndexError, ValueError):
self.serial = None
self.res_name = None
self.chain_id = None
self.res_seq = None
self.ins_code = None
@register_line_parser
class SIGUIJ(BaseRecord):
"""SIGUIJ class
The SIGUIJ records present the anisotropic temperature factors.
"""
def __init__(self, line):
"""Initialize by parsing line:
+---------+--------+----------+-------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+==========+=====================================+
| 7-11 | int | serial | Atom serial number. |
+---------+--------+----------+-------------------------------------+
| 13-16 | string | name | Atom name. |
+---------+--------+----------+-------------------------------------+
| 17 | string | alt_loc | Alternate location indicator. |
+---------+--------+----------+-------------------------------------+
| 18-20 | string | res_name | Residue name. |
+---------+--------+----------+-------------------------------------+
| 22 | string | chain_id | Chain identifier. |
+---------+--------+----------+-------------------------------------+
| 23-26 | int | res_seq | Residue sequence number. |
+---------+--------+----------+-------------------------------------+
| 27 | string | ins_code | Insertion code. |
+---------+--------+----------+-------------------------------------+
| 29-35 | int | sig11 | Sigma U(1,1) |
+---------+--------+----------+-------------------------------------+
| 36-42 | int | sig22 | Sigma U(2,2) |
+---------+--------+----------+-------------------------------------+
| 43-49 | int | sig33 | Sigma U(3,3) |
+---------+--------+----------+-------------------------------------+
| 50-56 | int | sig12 | Sigma U(1,2) |
+---------+--------+----------+-------------------------------------+
| 57-63 | int | sig13 | Sigma U(1,3) |
+---------+--------+----------+-------------------------------------+
| 64-70 | int | sig23 | Sigma U(2,3) |
+---------+--------+----------+-------------------------------------+
| 73-76 | string | seg_id | Segment identifier, left-justified. |
+---------+--------+----------+-------------------------------------+
| 77-78 | string | el.ment | Element symbol, right-justified. |
+---------+--------+----------+-------------------------------------+
| 79-80 | string | charge | Charge on the atom. |
+---------+--------+----------+-------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.serial = int(line[6:11].strip())
self.name = line[12:16].strip()
self.alt_loc = line[16].strip()
self.res_name = line[17:20].strip()
self.chain_id = line[21].strip()
self.res_seq = int(line[22:26].strip())
self.ins_code = line[26].strip()
self.sig11 = int(line[28:35].strip())
self.sig22 = int(line[35:42].strip())
self.sig33 = int(line[42:49].strip())
self.sig12 = int(line[49:56].strip())
self.sig13 = int(line[56:63].strip())
self.sig23 = int(line[63:70].strip())
self.seg_id = line[72:76].strip()
self.element = line[76:78].strip()
self.charge = line[78:80].strip()
@register_line_parser
class ANISOU(BaseRecord):
"""ANISOU class
The ANISOU records present the anisotropic temperature factors.
"""
def __init__(self, line):
"""Initialize by parsing line:
+---------+--------+----------+-------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+==========+=====================================+
| 7-11 | int | serial | Atom serial number. |
+---------+--------+----------+-------------------------------------+
| 13-16 | string | name | Atom name. |
+---------+--------+----------+-------------------------------------+
| 17 | string | alt_loc | Alternate location indicator. |
+---------+--------+----------+-------------------------------------+
| 18-20 | string | res_name | Residue name. |
+---------+--------+----------+-------------------------------------+
| 22 | string | chain_id | Chain identifier. |
+---------+--------+----------+-------------------------------------+
| 23-26 | int | res_seq | Residue sequence number. |
+---------+--------+----------+-------------------------------------+
| 27 | string | ins_code | Insertion code. |
+---------+--------+----------+-------------------------------------+
| 29-35 | int | u00 | U(1,1) |
+---------+--------+----------+-------------------------------------+
| 36-42 | int | u11 | U(2,2) |
+---------+--------+----------+-------------------------------------+
| 43-49 | int | u22 | U(3,3) |
+---------+--------+----------+-------------------------------------+
| 50-56 | int | u01 | U(1,2) |
+---------+--------+----------+-------------------------------------+
| 57-63 | int | u02 | U(1,3) |
+---------+--------+----------+-------------------------------------+
| 64-70 | int | u12 | U(2,3) |
+---------+--------+----------+-------------------------------------+
| 73-76 | string | seg_id | Segment identifier, left-justified. |
+---------+--------+----------+-------------------------------------+
| 77-78 | string | element | Element symbol, right-justified. |
+---------+--------+----------+-------------------------------------+
| 79-80 | string | charge | Charge on the atom. |
+---------+--------+----------+-------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.serial = int(line[6:11].strip())
self.name = line[12:16].strip()
self.alt_loc = line[16].strip()
self.res_name = line[17:20].strip()
self.chain_id = line[21].strip()
self.res_seq = int(line[22:26].strip())
self.ins_code = line[26].strip()
self.u00 = int(line[28:35].strip())
self.u11 = int(line[35:42].strip())
self.u22 = int(line[42:49].strip())
self.u01 = int(line[49:56].strip())
self.u02 = int(line[56:63].strip())
self.u12 = int(line[63:70].strip())
self.seg_id = line[72:76].strip()
self.element = line[76:78].strip()
self.charge = line[78:80].strip()
@register_line_parser
class SIGATM(BaseRecord):
"""SIGATM class
The SIGATM records present the standard deviation of atomic parameters
as they appear in ATOM and HETATM records.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+----------+-------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+==========+=====================================+
| 7-11 | int | serial | Atom serial number. |
+---------+--------+----------+-------------------------------------+
| 13-16 | string | name | Atom name. |
+---------+--------+----------+-------------------------------------+
| 17 | string | alt_loc | Alternate location indicator. |
+---------+--------+----------+-------------------------------------+
| 18-20 | string | res_name | Residue name. |
+---------+--------+----------+-------------------------------------+
| 22 | string | chain_id | Chain identifier. |
+---------+--------+----------+-------------------------------------+
| 23-26 | int | res_seq | Residue sequence number. |
+---------+--------+----------+-------------------------------------+
| 27 | string | ins_code | Code for insertion of residues. |
+---------+--------+----------+-------------------------------------+
| 31-38 | float | sig_x | Standard deviation of orthogonal |
| | | | coordinates for X in Angstroms. |
+---------+--------+----------+-------------------------------------+
| 39-46 | float | sig_y | Standard deviation of orthogonal |
| | | | coordinates for Y in Angstroms. |
+---------+--------+----------+-------------------------------------+
| 47-54 | float | sig_z | Standard deviation of orthogonal |
| | | | coordinates for Z in Angstroms. |
+---------+--------+----------+-------------------------------------+
| 55-60 | float | sig_occ | Standard deviation of occupancy. |
+---------+--------+----------+-------------------------------------+
| 61-66 | float | sig_temp | Standard deviation of temperature |
| | | | factor. |
+---------+--------+----------+-------------------------------------+
| 73-76 | string | seg_id | Segment identifier, left-justified. |
+---------+--------+----------+-------------------------------------+
| 77-78 | string | element | Element symbol, right-justified. |
+---------+--------+----------+-------------------------------------+
| 79-80 | string | charge | Charge on the atom. |
+---------+--------+----------+-------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.serial = int(line[6:11].strip())
self.name = line[12:16].strip()
self.alt_loc = line[16].strip()
self.res_name = line[17:20].strip()
self.chain_id = line[21].strip()
self.res_seq = int(line[22:26].strip())
self.ins_code = line[26].strip()
self.sig_x = float(line[30:38].strip())
self.sig_y = float(line[38:46].strip())
self.sig_z = float(line[46:54].strip())
self.sig_occ = float(line[54:60].strip())
self.sig_temp = float(line[60:66].strip())
self.seg_id = line[72:76].strip()
self.element = line[76:78].strip()
self.charge = line[78:80].strip()
@register_line_parser
class HETATM(BaseRecord):
"""HETATM class
The HETATM records present the atomic coordinate records for atoms
within "non-standard" groups. These records are used for water
molecules and atoms presented in HET groups.
"""
def __init__(
self, line, sybyl_type="A.aaa", l_bonds=[], l_bonded_atoms=[]
):
"""Initialize by parsing line
+---------+--------+-------------+-----------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+=============+===================================+
| 7-11 | int | serial | Atom serial number. |
+---------+--------+-------------+-----------------------------------+
| 13-16 | string | name | Atom name. |
+---------+--------+-------------+-----------------------------------+
| 17 | string | alt_loc | Alternate location indicator. |
+---------+--------+-------------+-----------------------------------+
| 18-20 | string | res_name | Residue name. |
+---------+--------+-------------+-----------------------------------+
| 22 | string | chain_id | Chain identifier. |
+---------+--------+-------------+-----------------------------------+
| 23-26 | int | res_seq | Residue sequence number. |
+---------+--------+-------------+-----------------------------------+
| 27 | string | ins_code | Code for insertion of residues. |
+---------+--------+-------------+-----------------------------------+
| 31-38 | float | x | Orthogonal coordinates for X in |
| | | | Angstroms. |
+---------+--------+-------------+-----------------------------------+
| 39-46 | float | y | Orthogonal coordinates for Y in |
| | | | Angstroms. |
+---------+--------+-------------+-----------------------------------+
| 47-54 | float | z | Orthogonal coordinates for Z in |
| | | | Angstroms. |
+---------+--------+-------------+-----------------------------------+
| 55-60 | float | occupancy | Occupancy. |
+---------+--------+-------------+-----------------------------------+
| 61-66 | float | temp_factor | Temperature factor. |
+---------+--------+-------------+-----------------------------------+
| 73-76 | string | seg_id | Segment identifier, left- |
| | | | justified. |
+---------+--------+-------------+-----------------------------------+
| 77-78 | string | element | Element symbol, right-justified. |
+---------+--------+-------------+-----------------------------------+
| 79-80 | string | charge | Charge on the atom. |
+---------+--------+-------------+-----------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.serial = int(line[6:11].strip())
self.name = line[12:16].strip()
self.alt_loc = line[16].strip()
try:
self.res_name = line[17:20].strip()
self.chain_id = line[21].strip()
self.res_seq = int(line[22:26].strip())
self.ins_code = line[26].strip()
except IndexError:
raise ValueError("Residue name must be less than 4 characters!")
self.x = float(line[30:38].strip())
self.y = float(line[38:46].strip())
self.z = float(line[46:54].strip())
self.sybyl_type = sybyl_type
self.l_bonded_atoms = l_bonded_atoms
self.l_bonds = l_bonds
self.radius = 1.0
self.is_c_term = 0
self.is_n_term = 0
self.mol2charge = None
try:
self.occupancy = float(line[54:60].strip())
self.temp_factor = float(line[60:66].strip())
self.seg_id = line[72:76].strip()
self.element = line[76:78].strip()
self.charge = line[78:80].strip()
except (ValueError, IndexError):
self.occupancy = 0.00
self.temp_factor = 0.00
self.seg_id = ""
self.element = ""
self.charge = ""
@register_line_parser
class ATOM(BaseRecord):
"""ATOM class
The ATOM records present the atomic coordinates for standard residues.
They also present the occupancy and temperature factor for each atom.
Heterogen coordinates use the HETATM record type. The element symbol is
always present on each ATOM record; segment identifier and charge are
optional.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+-------------+-----------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+=============+===================================+
| 7-11 | int | serial | Atom serial number. |
+---------+--------+-------------+-----------------------------------+
| 13-16 | string | name | Atom name. |
+---------+--------+-------------+-----------------------------------+
| 17 | string | alt_loc | Alternate location indicator. |
+---------+--------+-------------+-----------------------------------+
| 18-20 | string | res_name | Residue name. |
+---------+--------+-------------+-----------------------------------+
| 22 | string | chain_id | Chain identifier. |
+---------+--------+-------------+-----------------------------------+
| 23-26 | int | res_seq | Residue sequence number. |
+---------+--------+-------------+-----------------------------------+
| 27 | string | ins_code | Code for insertion of residues. |
+---------+--------+-------------+-----------------------------------+
| 31-38 | float | x | Orthogonal coordinates for X in |
| | | | Angstroms. |
+---------+--------+-------------+-----------------------------------+
| 39-46 | float | y | Orthogonal coordinates for Y in |
| | | | Angstroms. |
+---------+--------+-------------+-----------------------------------+
| 47-54 | float | z | Orthogonal coordinates for Z in |
| | | | Angstroms. |
+---------+--------+-------------+-----------------------------------+
| 55-60 | float | occupancy | Occupancy. |
+---------+--------+-------------+-----------------------------------+
| 61-66 | float | temp_factor | Temperature factor. |
+---------+--------+-------------+-----------------------------------+
| 73-76 | string | seg_id | Segment identifier, |
| | | | left-justified. |
+---------+--------+-------------+-----------------------------------+
| 77-78 | string | element | Element symbol, right-justified. |
+---------+--------+-------------+-----------------------------------+
| 79-80 | string | charge | Charge on the atom. |
+---------+--------+-------------+-----------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.serial = int(line[6:11].strip())
self.name = line[12:16].strip()
self.alt_loc = line[16].strip()
self.res_name = line[17:20].strip()
self.chain_id = line[21].strip()
self.res_seq = int(line[22:26].strip())
self.ins_code = line[26].strip()
self.x = float(line[30:38].strip())
self.y = float(line[38:46].strip())
self.z = float(line[46:54].strip())
try:
self.occupancy = float(line[54:60].strip())
self.temp_factor = float(line[60:66].strip())
self.seg_id = line[72:76].strip()
self.element = line[76:78].strip()
self.charge = line[78:80].strip()
except (ValueError, IndexError):
self.occupancy = 0.00
self.temp_factor = 0.00
self.seg_id = ""
self.element = ""
self.charge = ""
@register_line_parser
class MODEL(BaseRecord):
"""MODEL class
The MODEL record specifies the model serial number when multiple
structures are presented in a single coordinate entry, as is often the
case with structures determined by NMR.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+------+--------+----------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+======+========+======================+
| 11-14 | int | serial | Model serial number. |
+---------+------+--------+----------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.serial = int(line[10:14].strip())
@register_line_parser
class TVECT(BaseRecord):
"""TVECT class
The TVECT records present the translation vector for infinite
covalently connected structures.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+--------+----------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+========+==================================+
| 8-10 | int | serial | Serial number |
+---------+--------+--------+----------------------------------+
| 11-20 | float | t1 | Components of translation vector |
+---------+--------+--------+----------------------------------+
| 21-30 | float | t2 | Components of translation vector |
+---------+--------+--------+----------------------------------+
| 31-40 | float | t2 | Components of translation vector |
+---------+--------+--------+----------------------------------+
| 41-70 | string | text | Comments |
+---------+--------+--------+----------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.serial = int(line[7:10].strip())
self.trans1 = float(line[10:20].strip())
self.trans2 = float(line[20:30].strip())
self.trans3 = float(line[30:40].strip())
self.text = line[40:70].strip()
class MTRIXn(BaseRecord):
"""MTRIXn baseclass
The MTRIXn (n = 1, 2, or 3) records present transformations expressing
non-crystallographic symmetry.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+-------+---------+----------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+=======+=========+========================================+
| 8-10 | int | serial | Serial number |
+---------+-------+---------+----------------------------------------+
| 11-20 | float | mn1 | M31 |
+---------+-------+---------+----------------------------------------+
| 21-30 | float | mn2 | M32 |
+---------+-------+---------+----------------------------------------+
| 31-40 | float | mn3 | M33 |
+---------+-------+---------+----------------------------------------+
| 46-55 | float | vn | V3 |
+---------+-------+---------+----------------------------------------+
| 60 | int | i_given | 1 if coordinates for the |
| | | | representations which are approximately|
| | | | related by the transformations of the |
| | | | molecule are contained in the entry. |
| | | | Otherwise, blank. |
+---------+-------+---------+----------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.serial = int(line[7:10].strip())
self.mn1 = float(line[10:20].strip())
self.mn2 = float(line[20:30].strip())
self.mn3 = float(line[30:40].strip())
self.vecn = float(line[45:55].strip())
try:
self.i_given = int(line[59].strip())
except (ValueError, IndexError):
self.i_given = None
@register_line_parser
class MTRIX3(MTRIXn):
"""MATRIX3 PDB entry"""
@register_line_parser
class MTRIX2(MTRIXn):
"""MATRIX2 PDB entry"""
@register_line_parser
class MTRIX1(MTRIXn):
"""MATRIX1 PDB entry"""
class SCALEn(BaseRecord):
"""SCALEn baseclass
The SCALEn (n = 1, 2, or 3) records present the transformation from the
orthogonal coordinates as contained in the entry to fractional
crystallographic coordinates. Non-standard coordinate systems should be
explained in the remarks.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+-------+-------+------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+=======+=======+============+
| 11-20 | float | sn1 | S31 |
+---------+-------+-------+------------+
| 21-30 | float | sn2 | S32 |
+---------+-------+-------+------------+
| 31-40 | float | sn3 | S33 |
+---------+-------+-------+------------+
| 46-55 | float | un | U3 |
+---------+-------+-------+------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.sn1 = float(line[10:20].strip())
self.sn2 = float(line[20:30].strip())
self.sn3 = float(line[30:40].strip())
self.unif = float(line[45:55].strip())
@register_line_parser
class SCALE3(SCALEn):
"""SCALE3 PDB entry"""
@register_line_parser
class SCALE2(SCALEn):
"""SCALE2 PDB entry"""
@register_line_parser
class SCALE1(SCALEn):
"""SCALE2 PDB entry"""
class ORIGXn(BaseRecord):
"""ORIGXn class
The ORIGXn (n = 1, 2, or 3) records present the transformation from the
orthogonal coordinates contained in the entry to the submitted
coordinates.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+-------+-------+------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+=======+=======+============+
| 11-20 | float | on1 | O21 |
+---------+-------+-------+------------+
| 21-30 | float | on2 | O22 |
+---------+-------+-------+------------+
| 31-40 | float | on3 | O23 |
+---------+-------+-------+------------+
| 46-55 | float | tn | T2 |
+---------+-------+-------+------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.on1 = float(line[10:20].strip())
self.on2 = float(line[20:30].strip())
self.on3 = float(line[30:40].strip())
self.tn = float(line[45:55].strip())
@register_line_parser
class ORIGX2(ORIGXn):
"""ORIGX2 PDB entry"""
@register_line_parser
class ORIGX3(ORIGXn):
"""ORIGX3 PDB entry"""
@register_line_parser
class ORIGX1(ORIGXn):
"""ORIGX3 PDB entry"""
@register_line_parser
class CRYST1(BaseRecord):
"""CRYST1 class
The CRYST1 record presents the unit cell parameters, space group, and Z
value. If the structure was not determined by crystallographic means,
CRYST1 simply defines a unit cube.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+-------------+------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+=============+==================+
| 7-15 | float | a | a (Angstroms). |
+---------+--------+-------------+------------------+
| 16-24 | float | b | b (Angstroms). |
+---------+--------+-------------+------------------+
| 25-33 | float | c | c (Angstroms). |
+---------+--------+-------------+------------------+
| 34-40 | float | alpha | alpha (degrees). |
+---------+--------+-------------+------------------+
| 41-47 | float | beta | beta (degrees). |
+---------+--------+-------------+------------------+
| 48-54 | float | gamma | gamma (degrees). |
+---------+--------+-------------+------------------+
| 56-66 | string | space_group | Space group. |
+---------+--------+-------------+------------------+
| 67-70 | int | z | Z value. |
+---------+--------+-------------+------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.a = float(line[6:15].strip())
self.b = float(line[15:24].strip())
self.c = float(line[24:33].strip())
self.alpha = float(line[33:40].strip())
self.beta = float(line[40:47].strip())
self.gamma = float(line[47:54].strip())
self.space_group = line[55:65].strip()
self.z = int(line[66:70].strip())
@register_line_parser
class SITE(BaseRecord):
"""SITE class
The SITE records supply the identification of groups comprising
important sites in the macromolecule.
"""
def __init__(self, line):
"""Initialize by parsing the line
+---------+--------+-----------+-------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
| 8-10 | int | seq_num | Sequence number. |
+---------+--------+-----------+-------------------------------------+
| 12-14 | string | site_id | Site name. |
+---------+--------+-----------+-------------------------------------+
| 16-17 | int | num_res | Number of residues comprising site. |
+---------+--------+-----------+-------------------------------------+
| 19-21 | string | res_name1 | Residue name for first residue |
| | | | comprising site. |
+---------+--------+-----------+-------------------------------------+
| 23 | string | chain_id1 | Chain identifier for first residue |
| | | | comprising site. |
+---------+--------+-----------+-------------------------------------+
| 24-27 | int | seq1 | Residue sequence number for first |
| | | | residue comprising site. |
+---------+--------+-----------+-------------------------------------+
| 28 | string | ins_code1 | Insertion code for first residue |
| | | | comprising site. |
+---------+--------+-----------+-------------------------------------+
| 30-32 | string | res_name2 | Residue name for second residue |
| | | | comprising site. |
+---------+--------+-----------+-------------------------------------+
| 34 | string | chain_id2 | Chain identifier for second residue |
| | | | comprising site. |
+---------+--------+-----------+-------------------------------------+
| 35-38 | int | seq2 | Residue sequence number for second |
| | | | residue comprising site. |
+---------+--------+-----------+-------------------------------------+
| 39 | string | ins_code2 | Insertion code for second residue |
| | | | comprising site. |
+---------+--------+-----------+-------------------------------------+
| 41-43 | string | res_name3 | Residue name for third residue |
| | | | comprising site. |
+---------+--------+-----------+-------------------------------------+
| 45 | string | chain_id3 | Chain identifier for third residue |
| | | | comprising site. |
+---------+--------+-----------+-------------------------------------+
| 46-49 | int | seq3 | Residue sequence number for third |
| | | | residue comprising site. |
+---------+--------+-----------+-------------------------------------+
| 50 | string | ins_code3 | Insertion code for third residue |
| | | | comprising site. |
+---------+--------+-----------+-------------------------------------+
| 52-54 | string | res_name4 | Residue name for fourth residue |
| | | | comprising site. |
+---------+--------+-----------+-------------------------------------+
| 56 | string | chain_id4 | Chain identifier for fourth residue |
| | | | comprising site. |
+---------+--------+-----------+-------------------------------------+
| 57-60 | int | seq4 | Residue sequence number for fourth |
| | | | residue comprising site. |
+---------+--------+-----------+-------------------------------------+
| 61 | string | ins_code4 | Insertion code for fourth residue |
| | | | comprising site. |
+---------+--------+-----------+-------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.seq_num = int(line[7:10].strip())
self.site_id = line[11:14].strip()
self.num_res = int(line[15:17].strip())
self.res_name1 = line[18:21].strip()
self.chain_id1 = line[22].strip()
self.seq1 = int(line[23:27].strip())
self.ins_code1 = line[27].strip()
self.res_name2 = line[29:32].strip()
self.chain_id2 = line[33].strip()
self.seq2 = int(line[34:38].strip())
self.ins_code2 = line[38].strip()
self.res_name3 = line[40:43].strip()
self.chain_id3 = line[44].strip()
self.seq3 = int(line[45:49].strip())
self.ins_code3 = line[49].strip()
self.res_name4 = line[51:54].strip()
self.chain_id4 = line[55].strip()
self.seq4 = int(line[56:60].strip())
try:
self.ins_code4 = line[60].strip()
except IndexError:
self.ins_code4 = None
@register_line_parser
class CISPEP(BaseRecord):
"""CISPEP field
CISPEP records specify the prolines and other peptides found to be in
the cis conformation. This record replaces the use of footnote records
to list cis peptides.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+-----------+----------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+===========+==================================+
| 8-10 | int | ser_num | Record serial number. |
+---------+--------+-----------+----------------------------------+
| 12-14 | string | pep1 | Residue name. |
+---------+--------+-----------+----------------------------------+
| 16 | string | chain_id1 | Chain identifier. |
+---------+--------+-----------+----------------------------------+
| 18-21 | int | seq_num1 | Residue sequence number. |
+---------+--------+-----------+----------------------------------+
| 22 | string | icode1 | Insertion code. |
+---------+--------+-----------+----------------------------------+
| 26-28 | string | pep2 | Residue name. |
+---------+--------+-----------+----------------------------------+
| 30 | string | chain_id2 | Chain identifier. |
+---------+--------+-----------+----------------------------------+
| 32-35 | int | seq_num2 | Residue sequence number. |
+---------+--------+-----------+----------------------------------+
| 36 | string | icode2 | Insertion code. |
+---------+--------+-----------+----------------------------------+
| 44-46 | int | mod_num | Identifies the specific model. |
+---------+--------+-----------+----------------------------------+
| 54-59 | float | measure | Measure of the angle in degrees. |
+---------+--------+-----------+----------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.ser_num = int(line[7:10].strip())
self.pep1 = line[11:14].strip()
self.chain_id1 = line[15].strip()
self.seq_num1 = int(line[17:21].strip())
self.icode1 = line[21].strip()
self.pep2 = line[25:28].strip()
self.chain_id2 = line[29].strip()
self.seq_num2 = int(line[31:35].strip())
self.icode2 = line[35].strip()
self.mod_num = int(line[43:46].strip())
self.measure = float(line[53:59].strip())
@register_line_parser
class SLTBRG(BaseRecord):
"""SLTBRG field
The SLTBRG records specify salt bridges in the entry.
records and is provided here for convenience in searching.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+-----------+---------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+===========+=================================+
| 13-16 | string | name1 | Atom name. |
+---------+--------+-----------+---------------------------------+
| 17 | string | alt_loc1 | Alternate location indicator. |
+---------+--------+-----------+---------------------------------+
| 18-20 | string | res_name1 | Residue name. |
+---------+--------+-----------+---------------------------------+
| 22 | string | chain_id1 | Chain identifier. |
+---------+--------+-----------+---------------------------------+
| 23-26 | int | res_seq1 | Residue sequence number. |
+---------+--------+-----------+---------------------------------+
| 27 | string | ins_code1 | Insertion code. |
+---------+--------+-----------+---------------------------------+
| 43-46 | string | name2 | Atom name. |
+---------+--------+-----------+---------------------------------+
| 47 | string | alt_loc2 | Alternate location indicator. |
+---------+--------+-----------+---------------------------------+
| 48-50 | string | res_name2 | Residue name. |
+---------+--------+-----------+---------------------------------+
| 52 | string | chain_id2 | Chain identifier. |
+---------+--------+-----------+---------------------------------+
| 53-56 | int | res_seq2 | Residue sequence number. |
+---------+--------+-----------+---------------------------------+
| 57 | string | ins_code2 | Insertion code. |
+---------+--------+-----------+---------------------------------+
| 60-65 | string | sym1 | Symmetry operator for 1st atom. |
+---------+--------+-----------+---------------------------------+
| 67-72 | string | sym2 | Symmetry operator for 2nd atom. |
+---------+--------+-----------+---------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.name1 = line[12:16].strip()
self.alt_loc1 = line[16].strip()
self.res_name1 = line[17:20].strip()
self.chain_id1 = line[21].strip()
self.res_seq1 = int(line[22:26].strip())
self.ins_code1 = line[26].strip()
self.name2 = line[42:46].strip()
self.alt_loc2 = line[46].strip()
self.res_name2 = line[47:50].strip()
self.chain_id2 = line[51].strip()
self.res_seq2 = int(line[52:56].strip())
self.ins_code2 = line[56].strip()
self.sym1 = line[59:65].strip()
self.sym2 = line[66:72].strip()
@register_line_parser
class HYDBND(BaseRecord):
"""HYDBND field
The HYDBND records specify hydrogen bonds in the entry.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+-----------+-------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+===========+=====================================+
| 13-16 | string | name1 | Atom name. |
+---------+--------+-----------+-------------------------------------+
| 17 | string | alt_loc1 | Alternate location indicator. |
+---------+--------+-----------+-------------------------------------+
| 18-20 | string | res_name1 | Residue name. |
+---------+--------+-----------+-------------------------------------+
| 22 | string | chain1 | Chain identifier. |
+---------+--------+-----------+-------------------------------------+
| 23-27 | int | res_seq1 | Residue sequence number. |
+---------+--------+-----------+-------------------------------------+
| 28 | string | i_code1 | Insertion code. |
+---------+--------+-----------+-------------------------------------+
| 30-33 | string | name_h | Hydrogen atom name. |
+---------+--------+-----------+-------------------------------------+
| 34 | string | alt_loc_h | Alternate location indicator. |
+---------+--------+-----------+-------------------------------------+
| 36 | string | chain_h | Chain identifier. |
+---------+--------+-----------+-------------------------------------+
| 37-41 | int | res_seq_h | Residue sequence number. |
+---------+--------+-----------+-------------------------------------+
| 42 | string | ins_codeH | Insertion code. |
+---------+--------+-----------+-------------------------------------+
| 44-47 | string | name2 | Atom name. |
+---------+--------+-----------+-------------------------------------+
| 48 | string | alt_loc2 | Alternate location indicator. |
+---------+--------+-----------+-------------------------------------+
| 49-51 | string | res_name2 | Residue name. |
+---------+--------+-----------+-------------------------------------+
| 53 | string | chain_id2 | Chain identifier. |
+---------+--------+-----------+-------------------------------------+
| 54-58 | int | res_seq2 | Residue sequence number. |
+---------+--------+-----------+-------------------------------------+
| 59 | string | ins_code2 | Insertion code. |
+---------+--------+-----------+-------------------------------------+
| 60-65 | string | sym1 | Symmetry operator for 1st |
| | | | non-hydrogen atom. |
+---------+--------+-----------+-------------------------------------+
| 67-72 | string | sym2 | Symmetry operator for 2nd |
| | | | non-hydrogen atom. |
+---------+--------+-----------+-------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.name1 = line[12:16].strip()
self.alt_loc1 = line[16].strip()
self.res_name1 = line[17:20].strip()
self.chain1 = line[21].strip()
self.res_seq1 = line[22:27].strip()
self.i_code1 = line[27].strip()
self.name_h = line[29:33].strip()
self.alt_loc_h = line[33].strip()
self.chain_h = line[35].strip()
self.res_seq_h = line[36:41].strip()
self.i_code_h = line[41].strip()
self.name2 = line[43:47].strip()
self.alt_loc2 = line[47].strip()
self.res_name2 = line[48:51].strip()
self.chain2 = line[52].strip()
self.res_seq2 = line[53:58].strip()
self.i_code2 = line[58].strip()
self.sym1 = line[59:65].strip()
self.sym2 = line[66:72].strip()
@register_line_parser
class LINK(BaseRecord):
"""LINK field
The LINK records specify connectivity between residues that is not
implied by the primary structure. Connectivity is expressed in terms of
the atom names. This record supplements information given in CONECT
records and is provided here for convenience in searching.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+-----------+---------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+===========+=================================+
| 13-16 | string | name1 | Atom name. |
+---------+--------+-----------+---------------------------------+
| 17 | string | alt_loc1 | Alternate location indicator. |
+---------+--------+-----------+---------------------------------+
| 18-20 | string | res_name1 | Residue name. |
+---------+--------+-----------+---------------------------------+
| 22 | string | chain_id1 | Chain identifier. |
+---------+--------+-----------+---------------------------------+
| 23-26 | int | res_seq1 | Residue sequence number. |
+---------+--------+-----------+---------------------------------+
| 27 | string | ins_code1 | Insertion code. |
+---------+--------+-----------+---------------------------------+
| 43-46 | string | name2 | Atom name. |
+---------+--------+-----------+---------------------------------+
| 47 | string | alt_loc2 | Alternate location indicator. |
+---------+--------+-----------+---------------------------------+
| 48-50 | string | res_name2 | Residue name. |
+---------+--------+-----------+---------------------------------+
| 52 | string | chain_id2 | Chain identifier. |
+---------+--------+-----------+---------------------------------+
| 53-56 | int | res_seq2 | Residue sequence number. |
+---------+--------+-----------+---------------------------------+
| 57 | string | ins_code2 | Insertion code. |
+---------+--------+-----------+---------------------------------+
| 60-65 | string | sym1 | Symmetry operator for 1st atom. |
+---------+--------+-----------+---------------------------------+
| 67-72 | string | sym2 | Symmetry operator for 2nd atom. |
+---------+--------+-----------+---------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.name1 = line[12:16].strip()
self.alt_loc1 = line[16].strip()
self.res_name1 = line[17:20].strip()
self.chain_id1 = line[21].strip()
self.res_seq1 = int(line[22:26].strip())
self.ins_code1 = line[26].strip()
self.name2 = line[42:46].strip()
self.alt_loc2 = line[46].strip()
self.res_name2 = line[47:50].strip()
self.chain_id2 = line[51].strip()
self.res_seq2 = int(line[52:56].strip())
self.ins_code2 = line[56].strip()
self.sym1 = line[59:65].strip()
self.sym2 = line[66:72].strip()
@register_line_parser
class SSBOND(BaseRecord):
"""SSBOND field
The SSBOND record identifies each disulfide bond in protein and
polypeptide structures by identifying the two residues involved in the
bond.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+-----------+------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+===========+====================================+
| 8-10 | int | ser_num | Serial number. |
+---------+--------+-----------+------------------------------------+
| 16 | string | chain_id1 | Chain identifier. |
+---------+--------+-----------+------------------------------------+
| 18-21 | int | seq_num1 | Residue sequence number. |
+---------+--------+-----------+------------------------------------+
| 22 | string | icode1 | Insertion code. |
+---------+--------+-----------+------------------------------------+
| 30 | string | chain_id2 | Chain identifier. |
+---------+--------+-----------+------------------------------------+
| 32-35 | int | seq_num2 | Residue sequence number. |
+---------+--------+-----------+------------------------------------+
| 36 | string | icode2 | Insertion code. |
+---------+--------+-----------+------------------------------------+
| 60-65 | string | sym1 | Symmetry operator for 1st residue. |
+---------+--------+-----------+------------------------------------+
| 67-72 | string | sym2 | Symmetry operator for 2nd residue. |
+---------+--------+-----------+------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.ser_num = int(line[7:10].strip())
self.chain_id1 = line[15].strip()
self.seq_num1 = int(line[17:21].strip())
self.icode1 = line[21].strip()
self.chain_id2 = line[29].strip()
self.seq_num2 = int(line[31:35].strip())
self.icode2 = line[35].strip()
self.sym1 = line[59:65].strip()
self.sym2 = line[66:72].strip()
@register_line_parser
class TURN(BaseRecord):
"""TURN field
The TURN records identify turns and other short loop turns which normally
connect other secondary structure segments.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+---------------+---------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+===============+=================================+
| 8-10 | int | seq | Turn number; starts with 1 and |
| | | | increments by one. |
+---------+--------+---------------+---------------------------------+
| 12-14 | string | turn_id | Turn identifier. |
+---------+--------+---------------+---------------------------------+
| 16-18 | string | init_res_name | Residue name of initial residue |
| | | | in turn. |
+---------+--------+---------------+---------------------------------+
| 20 | string | init_chain_id | Chain identifier for the chain |
| | | | containing this turn. |
+---------+--------+---------------+---------------------------------+
| 21-24 | int | init_seq_num | Sequence number of initial |
| | | | residue in turn. |
+---------+--------+---------------+---------------------------------+
| 25 | string | init_i_code | Insertion code of initial |
| | | | residue in turn. |
+---------+--------+---------------+---------------------------------+
| 27-29 | string | end_res_name | Residue name of terminal residue|
| | | | of turn. |
+---------+--------+---------------+---------------------------------+
| 31 | string | end_chain_id | Chain identifier for the chain |
| | | | containing this turn. |
+---------+--------+---------------+---------------------------------+
| 32-35 | int | end_seq_num | Sequence number of terminal |
| | | | residue of turn. |
+---------+--------+---------------+---------------------------------+
| 36 | string | end_i_code | Insertion code of terminal |
| | | | residue of turn. |
+---------+--------+---------------+---------------------------------+
| 41-70 | string | comment | Associated comment. |
+---------+--------+---------------+---------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.seq = int(line[7:10].strip())
self.turn_id = line[11:14].strip()
self.init_res_name = line[15:18].strip()
self.init_chain_id = line[19].strip()
self.init_seq_num = int(line[20:24].strip())
self.init_i_code = line[24].strip()
self.end_res_name = line[26:29].strip()
self.end_chain_id = line[30].strip()
self.end_seq_num = int(line[31:35].strip())
self.end_i_code = line[35].strip()
self.comment = line[40:70].strip()
@register_line_parser
class SHEET(BaseRecord):
"""SHEET field
SHEET records are used to identify the position of sheets in the
molecule. Sheets are both named and numbered. The residues where the
sheet begins and ends are noted.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+---------------+---------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+===============+=================================+
| 8-10 | int | strand | Strand number which starts at 1 |
| | | | for each strand within a sheet |
| | | | and increases by one. |
+---------+--------+---------------+---------------------------------+
| 12-14 | string | sheet_id | Sheet identifier. |
+---------+--------+---------------+---------------------------------+
| 15-16 | int | num_strands | Number of strands in sheet. |
+---------+--------+---------------+---------------------------------+
| 18-20 | string | init_res_name | Residue name of initial residue.|
+---------+--------+---------------+---------------------------------+
| 22 | string | init_chain_id | Chain identifier of initial |
| | | | residue in strand. |
+---------+--------+---------------+---------------------------------+
| 23-26 | int | init_seq_num | Sequence number of initial |
| | | | residue in strand. |
+---------+--------+---------------+---------------------------------+
| 27 | string | init_i_code | Insertion code of initial |
| | | | residue in strand. |
+---------+--------+---------------+---------------------------------+
| 29-31 | string | end_res_name | Residue name of terminal |
| | | | residue. |
+---------+--------+---------------+---------------------------------+
| 33 | string | end_chain_id | Chain identifier of terminal |
| | | | residue. |
+---------+--------+---------------+---------------------------------+
| 34-37 | int | end_seq_num | Sequence number of terminal |
| | | | residue. |
+---------+--------+---------------+---------------------------------+
| 38 | string | end_i_code | Insertion code of terminal |
| | | | residue. |
+---------+--------+---------------+---------------------------------+
| 39-40 | int | sense | Sense of strand with respect to |
| | | | previous strand in the sheet. 0 |
| | | | if first strand, 1 if parallel, |
| | | | -1 if anti-parallel. |
+---------+--------+---------------+---------------------------------+
| 42-45 | string | cur_atom | Registration. Atom name in |
| | | | current strand. |
+---------+--------+---------------+---------------------------------+
| 46-48 | string | curr_res_name | Registration. Residue name in |
| | | | current strand. |
+---------+--------+---------------+---------------------------------+
| 50 | string | curChainId | Registration. Chain identifier |
| | | | in current strand. |
+---------+--------+---------------+---------------------------------+
| 51-54 | int | curr_res_seq | Registration. Residue sequence |
| | | | number in current strand. |
+---------+--------+---------------+---------------------------------+
| 55 | string | curr_ins_code | Registration. Insertion code in |
| | | | current strand. |
+---------+--------+---------------+---------------------------------+
| 57-60 | string | prev_atom | Registration. Atom name in |
| | | | previous strand. |
+---------+--------+---------------+---------------------------------+
| 61-63 | string | prev_res_name | Registration. Residue name in |
| | | | previous strand. |
+---------+--------+---------------+---------------------------------+
| 65 | string | prevChainId | Registration. Chain identifier |
| | | | in previous strand. |
+---------+--------+---------------+---------------------------------+
| 66-69 | int | prev_res_seq | Registration. Residue sequence |
| | | | number in previous strand. |
+---------+--------+---------------+---------------------------------+
| 70 | string | prev_ins_code | Registration. Insertion code in |
| | | | previous strand. |
+---------+--------+---------------+---------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.strand = int(line[7:10].strip())
self.sheet_id = line[11:14].strip()
self.num_strands = int(line[14:16].strip())
self.init_res_name = line[17:20].strip()
self.init_chain_id = line[21].strip()
self.init_seq_num = int(line[22:26].strip())
self.init_i_code = line[26].strip()
self.end_res_name = line[28:31].strip()
self.end_chain_id = line[32].strip()
self.end_seq_num = int(line[33:37].strip())
self.end_i_code = line[37].strip()
self.sense = int(line[38:40].strip())
try:
self.cur_atom = line[41:45].strip()
self.curr_res_name = line[45:48].strip()
self.curr_chain_id = line[49].strip()
try:
self.curr_res_seq = int(line[50:54].strip())
except ValueError:
self.curr_res_seq = None
self.curr_ins_code = line[54].strip()
self.prev_atom = line[56:60].strip()
self.prev_res_name = line[60:63].strip()
self.prev_chain_id = line[64].strip()
try:
self.prev_res_seq = int(line[65:69].strip())
except ValueError:
self.prev_res_seq = None
self.prev_ins_code = line[69].strip()
except IndexError:
self.cur_atom = None
self.curr_res_name = None
self.curr_chain_id = None
self.curr_res_seq = None
self.curr_ins_code = None
self.prev_atom = None
self.prev_res_name = None
self.prev_chain_id = None
self.prev_res_seq = None
self.prev_ins_code = None
@register_line_parser
class HELIX(BaseRecord):
"""HELIX field
HELIX records are used to identify the position of helices in the
molecule. Helices are both named and numbered. The residues where the
helix begins and ends are noted, as well as the total length.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+---------------+---------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+===============+=================================+
| 8-10 | int | ser_num | Serial number of the helix. This|
| | | | starts at 1 and increases |
| | | | incrementally. |
+---------+--------+---------------+---------------------------------+
| 12-14 | string | helix_id | Helix identifier. In addition |
| | | | to a serial number, each helix |
| | | | is given an alphanumeric |
| | | | character helix identifier. |
+---------+--------+---------------+---------------------------------+
| 16-18 | string | init_res_name | Name of the initial residue. |
+---------+--------+---------------+---------------------------------+
| 20 | string | init_chain_id | Chain identifier for the chain |
| | | | containing this helix. |
+---------+--------+---------------+---------------------------------+
| 22-25 | int | init_seq_num | Sequence number of the initial |
| | | | residue. |
+---------+--------+---------------+---------------------------------+
| 26 | string | init_i_code | Insertion code of the initial |
| | | | residue. |
+---------+--------+---------------+---------------------------------+
| 28-30 | string | end_res_name | Name of the terminal residue of |
| | | | the helix. |
+---------+--------+---------------+---------------------------------+
| 32 | string | end_chain_id | Chain identifier for the chain |
| | | | containing this helix. |
+---------+--------+---------------+---------------------------------+
| 34-37 | int | end_seq_num | Sequence number of the terminal |
| | | | residue. |
+---------+--------+---------------+---------------------------------+
| 38 | string | end_i_code | Insertion code of the terminal |
| | | | residue. |
+---------+--------+---------------+---------------------------------+
| 39-40 | int | helix_class | Helix class (see below). |
+---------+--------+---------------+---------------------------------+
| 41-70 | string | comment | Comment about this helix. |
+---------+--------+---------------+---------------------------------+
| 72-76 | int | length | Length of this helix. |
+---------+--------+---------------+---------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.ser_num = int(line[7:10].strip())
self.helix_id = line[11:14].strip()
self.init_res_name = line[15:18].strip()
self.init_chain_id = line[19].strip()
self.init_seq_num = int(line[21:25].strip())
self.init_i_code = line[25].strip()
self.end_res_name = line[27:30].strip()
self.end_chain_id = line[31].strip()
self.end_seq_num = int(line[33:37].strip())
self.end_i_code = line[37].strip()
try:
self.helix_class = int(line[38:40].strip())
except ValueError:
self.helix_class = None
self.comment = line[40:70].strip()
try:
self.length = int(line[71:76].strip())
except ValueError:
self.length = None
@register_line_parser
class FORMUL(BaseRecord):
"""FORMUL field
The FORMUL record presents the chemical formula and charge of a
non-standard group.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+------------+------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+============+==================+
| 9-10 | int | comp_num | Component number |
+---------+--------+------------+------------------+
| 13-15 | string | hetatm_id | Het identifier |
+---------+--------+------------+------------------+
| 19 | string | asterisk * | for water |
+---------+--------+------------+------------------+
| 20-70 | string | text | Chemical formula |
+---------+--------+------------+------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.comp_num = int(line[8:10].strip())
self.hetatm_id = line[12:15].strip()
self.asterisk = line[19].strip()
self.text = line[19:70].strip()
@register_line_parser
class HETSYN(BaseRecord):
"""HETSYN field
This record provides synonyms, if any, for the compound in the
corresponding (i.e., same hetatm_id) HETNAM record. This is to allow
greater flexibility in searching for HET groups.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+-----------------+-------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+=================+===================+
| 12-14 | string | hetatm_id | Het identifier, |
| | | | right-justified. |
+---------+--------+-----------------+-------------------+
| 16-70 | string | hetatm_synonyms | List of synonyms |
+---------+--------+-----------------+-------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.hetatm_id = line[11:14].strip()
self.hetatm_synonyms = line[15:70].strip()
@register_line_parser
class HETNAM(BaseRecord):
"""HETNAM field
This record gives the chemical name of the compound with the
given hetatm_id."""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+-----------+----------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+===========+==================================+
| 12-14 | string | hetatm_id | Het identifier, right-justified. |
+---------+--------+-----------+----------------------------------+
| 16-70 | string | text | Chemical name. |
+---------+--------+-----------+----------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.hetatm_id = line[11:14].strip()
self.text = line[15:70].strip()
@register_line_parser
class HET(BaseRecord):
"""HET field
HET records are used to describe non-standard residues, such as
prosthetic groups, inhibitors, solvent molecules, and ions for which
coordinates are supplied. Groups are considered HET if they are:
* not one of the standard amino acids, and
* not one of the nucleic acids (C, G, A, T, U, and I), and
* not one of the modified versions of nucleic acids (+C, +G, +A, +T, +U,
and +I), and
* not an unknown amino acid or nucleic acid where UNK is used to indicate
the unknown residue name.
Het records also describe heterogens for which the chemical identity is
unknown, in which case the group is assigned the hetatm_id UNK.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+---------------+---------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+===============+=================================+
| 8-10 | string | hetatm_id | Het identifier, right-justified.|
+---------+--------+---------------+---------------------------------+
| 13 | string | ChainID | Chain identifier. |
+---------+--------+---------------+---------------------------------+
| 14-17 | int | seq_num | Sequence number. |
+---------+--------+---------------+---------------------------------+
| 18 | string | ins_code | Insertion code. |
+---------+--------+---------------+---------------------------------+
| 21-25 | int | num_het_atoms | Number of HETATM records. |
+---------+--------+---------------+---------------------------------+
| 31-70 | string | text | Text describing Het group. |
+---------+--------+---------------+---------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.hetatm_id = line[7:10].strip()
self.chain_id = line[12].strip()
try:
self.seq_num = int(line[13].strip())
except ValueError:
self.seq_num = None
self.ins_code = line[17].strip()
self.num_het_atoms = int(line[20:25].strip())
self.text = line[30:70].strip()
@register_line_parser
class MODRES(BaseRecord):
"""MODRES field
The MODRES record provides descriptions of modifications (e.g.,
chemical or post-translational) to protein and nucleic acid residues.
Included are a mapping between residue names given in a PDB entry and
standard residues.
"""
def __init__(self, line):
"""Initialize by parsing a line
+---------+--------+----------+--------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+==========+======================================+
| 8-11 | string | id_code | ID code of this entry. |
+---------+--------+----------+--------------------------------------+
| 13-15 | string | res_name | Residue name used in this entry. |
+---------+--------+----------+--------------------------------------+
| 17 | string | chain_id | Chain identifier. |
+---------+--------+----------+--------------------------------------+
| 19-22 | int | seq_num | Sequence number. |
+---------+--------+----------+--------------------------------------+
| 23 | string | ins_code | Insertion code. |
+---------+--------+----------+--------------------------------------+
| 25-27 | string | stdRes | Standard residue name. |
+---------+--------+----------+--------------------------------------+
| 30-70 | string | comment | Description of the residue |
| | | | modification. |
+---------+--------+----------+--------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.id_code = line[7:11].strip()
self.res_name = line[12:15].strip()
self.chain_id = line[16].strip()
self.seq_num = int(line[18:22].strip())
self.ins_code = line[22].strip()
self.stdRes = line[24:27].strip()
self.comment = line[29:70].strip()
@register_line_parser
class SEQRES(BaseRecord):
"""SEQRES field
SEQRES records contain the amino acid or nucleic acid sequence of
residues in each chain of the macromolecule that was studied.
"""
def __init__(self, line):
"""Initialize by parsing a line
+---------+--------+----------+--------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+==========+======================================+
| 9-10 | int | ser_num | Serial number of the SEQRES record |
| | | | for the current chain. Starts at 1 |
| | | | and increments by one each line. |
| | | | Reset to 1 for each chain. |
+---------+--------+----------+--------------------------------------+
| 12 | string | chain_id | Chain identifier. This may be any |
| | | | single legal character, including a |
| | | | blank which is used if there is only |
| | | | one chain. |
+---------+--------+----------+--------------------------------------+
| 14-17 | int | num_res | Number of residues in the chain. This|
| | | | value is repeated on every record. |
+---------+--------+----------+--------------------------------------+
| 20-22 | string | res_name | Residue name. |
+---------+--------+----------+--------------------------------------+
| 24-26 | string | res_name | Residue name. |
+---------+--------+----------+--------------------------------------+
| 28-30 | string | res_name | Residue name. |
+---------+--------+----------+--------------------------------------+
| 32-34 | string | res_name | Residue name. |
+---------+--------+----------+--------------------------------------+
| 36-38 | string | res_name | Residue name. |
+---------+--------+----------+--------------------------------------+
| 40-42 | string | res_name | Residue name. |
+---------+--------+----------+--------------------------------------+
| 44-46 | string | res_name | Residue name. |
+---------+--------+----------+--------------------------------------+
| 48-50 | string | res_name | Residue name. |
+---------+--------+----------+--------------------------------------+
| 52-54 | string | res_name | Residue name. |
+---------+--------+----------+--------------------------------------+
| 56-58 | string | res_name | Residue name. |
+---------+--------+----------+--------------------------------------+
| 60-62 | string | res_name | Residue name. |
+---------+--------+----------+--------------------------------------+
| 64-66 | string | res_name | Residue name. |
+---------+--------+----------+--------------------------------------+
| 68-70 | string | res_name | Residue name. |
+---------+--------+----------+--------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.ser_num = int(line[8:10].strip())
self.chain_id = line[11].strip()
self.num_res = int(line[13:17].strip())
self.res_name = [line[19:22].strip()]
self.res_name.append(line[23:26].strip())
self.res_name.append(line[27:30].strip())
self.res_name.append(line[31:34].strip())
self.res_name.append(line[35:38].strip())
self.res_name.append(line[39:42].strip())
self.res_name.append(line[43:46].strip())
self.res_name.append(line[47:50].strip())
self.res_name.append(line[51:54].strip())
self.res_name.append(line[55:58].strip())
self.res_name.append(line[59:62].strip())
self.res_name.append(line[63:66].strip())
self.res_name.append(line[67:70].strip())
@register_line_parser
class SEQADV(BaseRecord):
"""SEQADV field
The SEQADV record identifies conflicts between sequence information in
the ATOM records of the PDB entry and the sequence database entry given
on DBREF. Please note that these records were designed to identify
differences and not errors. No assumption is made as to which database
contains the correct data. PDB may include REMARK records in the entry
that reflect the depositor's view of which database has the correct
sequence.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+------------+------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+============+====================================+
| 8-11 | string | id_code | ID code of this entry. |
+---------+--------+------------+------------------------------------+
| 13-15 | string | res_name | Name of the PDB residue in |
| | | | conflict. |
+---------+--------+------------+------------------------------------+
| 17 | string | chain_id | PDB chain identifier. |
+---------+--------+------------+------------------------------------+
| 19-22 | int | seq_num | PDB sequence number. |
+---------+--------+------------+------------------------------------+
| 23 | string | ins_code | PDB insertion code. |
+---------+--------+------------+------------------------------------+
| 25-28 | string | database | Sequence database name. |
+---------+--------+------------+------------------------------------+
| 30-38 | string | db_id_code | Sequence database accession number.|
+---------+--------+------------+------------------------------------+
| 40-42 | string | db_res | Sequence database residue name. |
+---------+--------+------------+------------------------------------+
| 44-48 | int | db_seq | Sequence database sequence number. |
+---------+--------+------------+------------------------------------+
| 50-70 | string | conflict | Conflict comment. |
+---------+--------+------------+------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.id_code = line[7:11].strip()
self.res_name = line[12:15].strip()
self.chain_id = line[16].strip()
try:
self.seq_num = int(line[19:22].strip())
except ValueError:
self.seq_num = None
self.ins_code = line[22].strip()
self.database = line[24:28].strip()
self.db_id_code = line[29:38].strip()
self.db_res = line[39:42].strip()
try:
self.db_seq = int(line[43:48].strip())
except ValueError:
self.db_seq = None
self.conflict = line[49:70].strip()
@register_line_parser
class DBREF(BaseRecord):
"""DBREF field
The DBREF record provides cross-reference links between PDB sequences
and the corresponding database entry or entries. A cross reference to
the sequence database is mandatory for each peptide chain with a length
greater than ten (10) residues. For nucleic acid entries a DBREF record
pointing to the Nucleic Acid Database (NDB) is mandatory when the
corresponding entry exists in NDB.
"""
def __init__(self, line):
"""Initialize by parsing a line.
+---------+--------+--------------+----------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+==============+==================================+
| 8-11 | string | id_code | ID code of this entry. |
+---------+--------+--------------+----------------------------------+
| 13 | string | chain_id | Chain identifier. |
+---------+--------+--------------+----------------------------------+
| 15-18 | int | seq_begin | Initial sequence number of the |
| | | | PDB sequence segment. |
+---------+--------+--------------+----------------------------------+
| 19 | string | insert_begin | Initial insertion code of the |
| | | | PDB sequence segment. |
+---------+--------+--------------+----------------------------------+
| 21-24 | int | seq_end | Ending sequence number of the |
| | | | PDB sequence segment. |
+---------+--------+--------------+----------------------------------+
| 25 | string | insert_end | Ending insertion code of the |
| | | | PDB sequence segment. |
+---------+--------+--------------+----------------------------------+
| 27-32 | string | database | Sequence database name. "PDB" |
| | | | when a corresponding sequence |
| | | | database entry has not been |
| | | | identified. |
+---------+--------+--------------+----------------------------------+
| 34-41 | string | db_accession | Sequence database accession code.|
| | | | For GenBank entries, this is the |
| | | | NCBI gi number. |
+---------+--------+--------------+----------------------------------+
| 43-54 | string | db_id_code | Sequence database identification |
| | | | code. For GenBank entries, this |
| | | | is the accession code. |
+---------+--------+--------------+----------------------------------+
| 56-60 | int | db_seq_begin | Initial sequence number of the |
| | | | database seqment. |
+---------+--------+--------------+----------------------------------+
| 61 | string | db_ins_begin | Insertion code of initial residue|
| | | | of the segment, if PDB is the |
| | | | reference. |
+---------+--------+--------------+----------------------------------+
| 63-67 | int | dbseq_end | Ending sequence number of the |
| | | | database segment. |
+---------+--------+--------------+----------------------------------+
| 68 | string | db_ins_end | Insertion code of the ending |
| | | | residue of the |
| | | | segment, if PDB is the reference.|
+---------+--------+--------------+----------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.id_code = line[7:11].strip()
self.chain_id = line[12].strip()
self.seq_begin = int(line[14:18].strip())
self.insert_begin = line[18].strip()
self.seq_end = int(line[20:24].strip())
self.insert_end = line[24].strip()
self.database = line[26:32].strip()
self.db_accession = line[33:41].strip()
self.db_id_code = line[42:54].strip()
self.db_seq_begin = int(line[55:60].strip())
self.db_ins_begin = line[60].strip()
self.dbseq_end = int(line[62:67].strip())
try:
self.db_ins_end = line[67].strip()
except IndexError:
self.db_ins_end = None
@register_line_parser
class REMARK(BaseRecord):
"""REMARK field
REMARK records present experimental details, annotations, comments, and
information not included in other records. In a number of cases,
REMARKs are used to expand the contents of other record types. A new
level of structure is being used for some REMARK records. This is
expected to facilitate searching and will assist in the conversion to a
relational database.
"""
def __init__(self, line):
"""Initialize by parsing line.
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.remark_num = int(line[7:10].strip())
self.remark_dict = {}
if self.remark_num == 1:
subfield = line[11:20].strip()
if subfield == "REFERENCE":
self.remark_dict["refNum"] = int(line[21:70].strip())
elif subfield == "AUTH":
self.remark_dict["author_list"] = line[19:70].strip()
elif subfield == "TITL":
self.remark_dict["title"] = line[19:70].strip()
elif subfield == "EDIT":
self.remark_dict["editorList"] = line[19:70].strip()
elif subfield == "REF":
self.remark_dict["ref"] = line[19:66].strip()
elif subfield == "PUBL":
self.remark_dict["pub"] = line[19:70].strip()
elif subfield == "REFN":
self.remark_dict["refn"] = line[19:70].strip()
elif self.remark_num == 2:
restr = line[22:27].strip()
try:
self.remark_dict["resolution"] = float(restr)
except ValueError:
self.remark_dict["comment"] = line[11:70].strip()
else:
self.remark_dict["text"] = line[11:70].strip()
@register_line_parser
class JRNL(BaseRecord):
"""JRNL field
The JRNL record contains the primary literature citation that describes
the experiment which resulted in the deposited coordinate set. There is
at most one JRNL reference per entry. If there is no primary reference,
then there is no JRNL reference. Other references are given in REMARK 1.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+-------+---------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+=======+=====================+
| 13-70 | string | text | See details on web. |
+---------+--------+-------+---------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.text = line[12:70].strip()
@register_line_parser
class SPRSDE(BaseRecord):
"""SPRSDE field
The SPRSDE records contain a list of the ID codes of entries that were
made obsolete by the given coordinate entry and withdrawn from the PDB
release set. One entry may replace many. It is PDB policy that only the
principal investigator of a structure has the authority to withdraw it.
"""
def __init__(self, line):
"""Initialize by parsing line
+---------+--------+------------+------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+============+====================================+
| 12-20 | string | super_date | Date this entry superseded the |
| | | | listed entries. |
+---------+--------+------------+------------------------------------+
| 22-25 | string | id_code | ID code of this entry. |
+---------+--------+------------+------------------------------------+
| 32-35 | string | sid_code | ID code of a superseded entry. |
+---------+--------+------------+------------------------------------+
| 37-40 | string | sid_code | ID code of a superseded entry. |
+---------+--------+------------+------------------------------------+
| 42-45 | string | sid_code | ID code of a superseded entry. |
+---------+--------+------------+------------------------------------+
| 47-50 | string | sid_code | ID code of a superseded entry. |
+---------+--------+------------+------------------------------------+
| 52-55 | string | sid_code | ID code of a superseded entry. |
+---------+--------+------------+------------------------------------+
| 57-60 | string | sid_code | ID code of a superseded entry. |
+---------+--------+------------+------------------------------------+
| 62-65 | string | sid_code | ID code of a superseded entry. |
+---------+--------+------------+------------------------------------+
| 67-70 | string | sid_code | ID code of a superseded entry. |
+---------+--------+------------+------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.super_date = line[11:20].strip()
self.id_code = line[21:25].strip()
self.super_id_codes = [line[31:35].strip()]
self.super_id_codes.append(line[36:40].strip())
self.super_id_codes.append(line[41:45].strip())
self.super_id_codes.append(line[46:50].strip())
self.super_id_codes.append(line[51:55].strip())
self.super_id_codes.append(line[56:60].strip())
self.super_id_codes.append(line[61:65].strip())
self.super_id_codes.append(line[66:70].strip())
@register_line_parser
class REVDAT(BaseRecord):
"""REVDAT field
REVDAT records contain a history of the modifications made to an entry
since its release.
"""
def __init__(self, line):
"""Initialize by parsing a line.
.. todo::
If multiple modifications are present, only the last one in the
file is preserved.
+---------+--------+----------+--------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+==========+======================================+
| 8-10 | int | mod_num | Modification number. |
+---------+--------+----------+--------------------------------------+
| 14-22 | string | mod_date | Date of modification (or release for |
| | | | new entries). |
+---------+--------+----------+--------------------------------------+
| 24-28 | string | mod_id | Identifies this particular |
| | | | modification. It links to the archive|
| | | | used internally by PDB. |
+---------+--------+----------+--------------------------------------+
| 32 | int | mod_type | An integer identifying the type of |
| | | | modification. In case of revisions |
| | | | with more than one possible mod_type,|
| | | | the highest value applicable will be |
| | | | assigned. |
+---------+--------+----------+--------------------------------------+
| 40-45 | string | record | Name of the modified record. |
+---------+--------+----------+--------------------------------------+
| 47-52 | string | record | Name of the modified record. |
+---------+--------+----------+--------------------------------------+
| 54-59 | string | record | Name of the modified record. |
+---------+--------+----------+--------------------------------------+
| 61-66 | string | record | Name of the modified record. |
+---------+--------+----------+--------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.mod_num = int(line[7:10].strip())
self.mod_date = line[13:22].strip()
self.mod_id = line[23:28].strip()
mod_type = line[31].strip()
if mod_type:
self.mod_type = int(mod_type)
self.records = [line[39:45].strip()]
self.records.append(line[46:52].strip())
self.records.append(line[53:59].strip())
self.records.append(line[60:66].strip())
@register_line_parser
class AUTHOR(BaseRecord):
"""AUTHOR field
The AUTHOR record contains the names of the people responsible for the
contents of the entry.
"""
def __init__(self, line):
"""Initialize by parsing a line
+---------+--------+-------------+-----------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+=============+===================================+
| 11-70 | string | author_list | List of the author names, |
| | | | separated by commas |
+---------+--------+-------------+-----------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.author_list = line[10:70].strip()
@register_line_parser
class EXPDTA(BaseRecord):
"""EXPDTA field
The EXPDTA record identifies the experimental technique used. This may
refer to the type of radiation and sample, or include the spectroscopic
or modeling technique. Permitted values include:
* ELECTRON DIFFRACTION
* FIBER DIFFRACTION
* FLUORESCENCE TRANSFER
* NEUTRON DIFFRACTION
* NMR
* THEORETICAL MODEL
* X-RAY DIFFRACTION
"""
def __init__(self, line):
"""Initialize by parsing a line
+---------+--------+-----------+-------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+===========+=====================================+
| 11-70 | string | technique | The experimental technique(s) with |
| | | | optional comment describing the |
| | | | sample or experiment |
+---------+--------+-----------+-------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.technique = line[10:70].strip()
@register_line_parser
class KEYWDS(BaseRecord):
"""KEYWDS field
The KEYWDS record contains a set of terms relevant to the entry. Terms
in the KEYWDS record provide a simple means of categorizing entries and
may be used to generate index files. This record addresses some of the
limitations found in the classification field of the HEADER record. It
provides the opportunity to add further annotation to the entry in a
concise and computer-searchable fashion.
"""
def __init__(self, line):
"""Initialize by parsing a line
+---------+--------+--------+----------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+========+========================================+
| 11-70 | string | keywds | Comma-separated list of keywords |
| | | | relevant to the entry |
+---------+--------+--------+----------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.keywds = line[10:70].strip()
@register_line_parser
class SOURCE(BaseRecord):
"""SOURCE field
The SOURCE record specifies the biological and/or chemical source of
each biological molecule in the entry. Sources are described by both
the common name and the scientific name, e.g., genus and species.
Strain and/or cell-line for immortalized cells are given when they help
to uniquely identify the biological entity studied.
"""
def __init__(self, line):
"""Initialize by parsing a line
+---------+--------+--------+----------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+========+========================================+
| 11-70 | string | source | Identifies the source of the |
| | | | macromolecule in a token: value format |
+---------+--------+--------+----------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.source = line[10:70].strip()
@register_line_parser
class COMPND(BaseRecord):
"""COMPND field
The COMPND record describes the macromolecular contents of an entry.
Each macromolecule found in the entry is described by a set of token:
value pairs, and is referred to as a COMPND record component. Since the
concept of a molecule is difficult to specify exactly, PDB staff may
exercise editorial judgment in consultation with depositors in
assigning these names.
For each macromolecular component, the molecule name, synonyms, number
assigned by the Enzyme Commission (EC), and other relevant details are
specified.
"""
def __init__(self, line):
"""Initialize by parsing a line
+---------+--------+----------+--------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+==========+======================================+
| 11-70 | string | compound | Description of the molecular list |
| | | | components. |
+---------+--------+----------+--------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.compound = line[10:70].strip()
@register_line_parser
class CAVEAT(BaseRecord):
"""CAVEAT field
CAVEAT warns of severe errors in an entry. Use caution when using an entry
containing this record.
"""
def __init__(self, line):
"""Initialize by parsing line.
+---------+--------+---------+---------------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+=========+=======================================+
| 12-15 | string | id_code | PDB ID code of this entry. |
+---------+--------+---------+---------------------------------------+
| 20-70 | string | comment | Free text giving the reason for the |
| | | | CAVEAT. |
+---------+--------+---------+---------------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.id_code = line[11:15].strip()
self.comment = line[19:70].strip()
@register_line_parser
class TITLE(BaseRecord):
"""TITLE field
The TITLE record contains a title for the experiment or analysis that
is represented in the entry. It should identify an entry in the PDB in
the same way that a title identifies a paper.
"""
def __init__(self, line):
"""Initialize by parsing a line.
+---------+--------+-------+--------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+=======+==========================+
| 11-70 | string | title | Title of the experiment |
+---------+--------+-------+--------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.title = line[10:70].strip()
@register_line_parser
class OBSLTE(BaseRecord):
"""OBSLTE field
This record acts as a flag in an entry which has been withdrawn from the
PDB's full release. It indicates which, if any, new entries have replaced
the withdrawn entry.
The format allows for the case of multiple new entries replacing one
existing entry.
"""
def __init__(self, line):
"""Initialize by parsing a line.
+---------+--------+--------------+----------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+==============+==================================+
| 12-20 | string | replace_date | Date that this entry was |
| | | | replaced. |
+---------+--------+--------------+----------------------------------+
| 22-25 | string | id_code | ID code of this entry. |
+---------+--------+--------------+----------------------------------+
| 32-35 | string | rid_code | ID code of entry that replaced |
| | | | this one. |
+---------+--------+--------------+----------------------------------+
| 37-40 | string | rid_code | ID code of entry that replaced |
| | | | this one. |
+---------+--------+--------------+----------------------------------+
| 42-45 | string | rid_code | ID code of entry that replaced |
| | | | this one. |
+---------+--------+--------------+----------------------------------+
| 47-50 | string | rid_code | ID code of entry that replaced |
| | | | this one. |
+---------+--------+--------------+----------------------------------+
| 52-55 | string | rid_code | ID code of entry that replaced |
| | | | this one. |
+---------+--------+--------------+----------------------------------+
| 57-60 | string | rid_code | ID code of entry that replaced |
| | | | this one. |
+---------+--------+--------------+----------------------------------+
| 62-65 | string | rid_code | ID code of entry that replaced |
| | | | this one. |
+---------+--------+--------------+----------------------------------+
| 67-70 | string | rid_code | ID code of entry that replaced |
| | | | this one. |
+---------+--------+--------------+----------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.replace_date = line[11:20].strip()
self.id_code = line[21:25].strip()
self.replace_id_codes = [line[31:35].strip()]
self.replace_id_codes.append(line[36:40].strip())
self.replace_id_codes.append(line[41:45].strip())
self.replace_id_codes.append(line[46:50].strip())
self.replace_id_codes.append(line[51:55].strip())
self.replace_id_codes.append(line[56:60].strip())
self.replace_id_codes.append(line[61:65].strip())
self.replace_id_codes.append(line[67:70].strip())
@register_line_parser
class HEADER(BaseRecord):
"""HEADER field
The HEADER record uniquely identifies a PDB entry through the id_code
field. This record also provides a classification for the entry. Finally,
it contains the date the coordinates were deposited at the PDB.
"""
def __init__(self, line):
"""Initialize by parsing a line.
+---------+--------+----------------+--------------------------------+
| COLUMNS | TYPE | FIELD | DEFINITION |
+=========+========+================+================================+
| 11-50 | string | classification | Classifies the molecule(s) |
+---------+--------+----------------+--------------------------------+
| 51-59 | string | dep_date | Deposition date. This is the |
| | | | date the coordinates were |
| | | | received by the PDB |
+---------+--------+----------------+--------------------------------+
| 63-66 | string | id_code | This identifier is unique wihin|
| | | | within PDB |
+---------+--------+----------------+--------------------------------+
:param line: line with PDB class
:type line: str
"""
super().__init__(line)
self.classification = line[10:50].strip()
self.dep_date = line[50:59].strip()
self.id_code = line[62:66].strip()
def read_atom(line):
"""If the ATOM/HETATM is not column-formatted, try to get some information
by parsing whitespace from the right. Look for five floating point
numbers followed by the residue number.
:param line: the line to parse
:type line: str
"""
# Try to find 5 consecutive floats
words = str.split(line)
size = len(words) - 1
consec = 0
iword = 0
for i in range(size):
entry = words[size - i]
try:
_ = float(entry)
consec += 1
if consec == 5:
iword = i
break
except ValueError:
consec = 0
record = line[0:6].strip()
newline = line[0:22]
newline = newline + str.rjust(words[size - iword - 1], 4)
newline = newline + str.rjust("", 3)
newline = newline + str.rjust(words[size - iword], 8)
newline = newline + str.rjust(words[size - iword + 1], 8)
newline = newline + str.rjust(words[size - iword + 2], 8)
newline = newline + str.rjust(words[size - iword + 3], 6)
newline = newline + str.rjust(words[size - iword + 4], 6)
klass = LINE_PARSERS[record]
return klass(newline)
def read_pdb(file_):
"""Parse PDB-format data into array of Atom objects.
:param file_: open File-like object
:type file_: file
:return: (a list of objects from this module, a list of record names that
couldn't be parsed)
:rtype: (list, list)
"""
pdblist = [] # Array of parsed lines (as objects)
errlist = [] # List of records we can't parse
# We can come up with nothing if can't get our file off the web.
if file_ is None:
return pdblist, errlist
while True:
line = file_.readline().strip()
if line == "":
break
# We assume we have a method for each PDB record and can therefore
# parse them automatically
record = ""
try:
record = line[0:6].strip()
if record not in errlist:
klass = LINE_PARSERS[record]
obj = klass(line)
pdblist.append(obj)
except (KeyError, ValueError) as details:
if record not in ["HETATM", "ATOM"]:
errlist.append(record)
_LOGGER.error(f"Error parsing line: {details}")
_LOGGER.error(f"<{line.strip()}>")
_LOGGER.error(
f"Truncating remaining errors for record type:{record}"
)
else:
raise details
except IndexError as details:
if record in ["ATOM", "HETATM"]:
try:
obj = read_atom(line)
pdblist.append(obj)
except IndexError as details:
_LOGGER.error(f"Error parsing line: {details},")
_LOGGER.error(f"<{line.strip()}>")
elif record in ["SITE", "TURN"]:
pass
elif record in ["SSBOND", "LINK"]:
_LOGGER.error("Warning -- ignoring record:")
_LOGGER.error(f"<{line.strip()}>")
else:
_LOGGER.error(f"Error parsing line: {details},")
_LOGGER.error(f"<{line.strip()}>")
return pdblist, errlist
| 47.82592
| 78
| 0.364082
|
273025dc160c27176a8302b67158343fbdd74151
| 255
|
py
|
Python
|
manage.py
|
ychab/privagal
|
118197c8fdeb7e32c95cf9672b87fb0350a5a874
|
[
"BSD-3-Clause"
] | 6
|
2016-06-06T15:27:24.000Z
|
2016-07-14T01:26:42.000Z
|
manage.py
|
ychab/privagal
|
118197c8fdeb7e32c95cf9672b87fb0350a5a874
|
[
"BSD-3-Clause"
] | null | null | null |
manage.py
|
ychab/privagal
|
118197c8fdeb7e32c95cf9672b87fb0350a5a874
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "privagal.settings.dev")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.181818
| 76
| 0.772549
|
36eb93c2c5eca25b60d218531e53ff166e073d07
| 1,323
|
py
|
Python
|
src/main/py/com/example/sql/window_analytic_functions.py
|
brijeshdhaker/spark-python-examples
|
bb3504d21c073448c336c228f74449de68853b8d
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-07-18T16:23:56.000Z
|
2021-07-18T16:23:56.000Z
|
src/main/py/com/example/sql/window_analytic_functions.py
|
brijeshdhaker/spark-python-examples
|
bb3504d21c073448c336c228f74449de68853b8d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/main/py/com/example/sql/window_analytic_functions.py
|
brijeshdhaker/spark-python-examples
|
bb3504d21c073448c336c228f74449de68853b8d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import sys
from pyspark.sql import SparkSession
from pyspark.sql.window import Window
from pyspark.sql.functions import *
# create Spark context with Spark configuration
spark = SparkSession \
.builder \
.appName("PySpark Window Functions") \
.enableHiveSupport() \
.getOrCreate()
spark.sparkContext.setLogLevel("WARN")
# Create Data Frame From Hive table
dataset = [
("Thin", "cell phone", 6000),
("Normal", "tablet", 1500),
("Mini", "tablet", 5500),
("Ultra thin", "cell phone", 5000),
("Very thin", "cell phone", 6000),
("Big", "tablet", 2500),
("Bendable", "cell phone", 3000),
("Foldable", "cell phone", 3000),
("Pro", "tablet", 4500),
("Pro2", "tablet", 6500)
]
df = spark.createDataFrame(dataset, ["category", "product", "revenue"])
df.show()
windowSpec = Window.partitionBy(df['category']).orderBy(df['revenue'].desc())
""" cume_dist """
from pyspark.sql.functions import cume_dist
df.withColumn("cume_dist", cume_dist().over(windowSpec)).show()
"""lag"""
from pyspark.sql.functions import lag
df.withColumn("lag", lag("revenue", 2).over(windowSpec)).show()
"""lead"""
from pyspark.sql.functions import lead
df.withColumn("lead", lead("revenue", 2).over(windowSpec)).show()
#
spark.stop()
| 28.148936
| 77
| 0.636432
|
88498af5fb6d79a2a8d8f6ab2801225f14f3c7f9
| 5,176
|
py
|
Python
|
test_autofit/mapper/model/test_model_instance.py
|
caoxiaoyue/PyAutoFit
|
819cd2acc8d4069497a161c3bb6048128e44d828
|
[
"MIT"
] | 39
|
2019-01-24T10:45:23.000Z
|
2022-03-18T09:37:59.000Z
|
test_autofit/mapper/model/test_model_instance.py
|
caoxiaoyue/PyAutoFit
|
819cd2acc8d4069497a161c3bb6048128e44d828
|
[
"MIT"
] | 260
|
2018-11-27T12:56:33.000Z
|
2022-03-31T16:08:59.000Z
|
test_autofit/mapper/model/test_model_instance.py
|
caoxiaoyue/PyAutoFit
|
819cd2acc8d4069497a161c3bb6048128e44d828
|
[
"MIT"
] | 13
|
2018-11-30T16:49:05.000Z
|
2022-01-21T17:39:29.000Z
|
import pytest
import autofit as af
from autofit.mock.mock_model import MockClassx2, MockClassx3TupleFloat, MockComponents
@pytest.fixture(name="mock_components_1")
def make_mock_components_1():
return MockComponents()
@pytest.fixture(name="mock_components_2")
def make_mock_components_2():
return MockComponents()
@pytest.fixture(name="instance")
def make_instance(mock_components_1, mock_components_2):
sub = af.ModelInstance()
instance = af.ModelInstance()
sub.mock_components_1 = mock_components_1
instance.mock_components_2 = mock_components_2
instance.sub = sub
sub_2 = af.ModelInstance()
sub_2.mock_components_1 = mock_components_1
instance.sub.sub = sub_2
return instance
class TestModelInstance:
def test_iterable(self, instance):
assert len(list(instance)) == 2
def test_as_model(self, instance):
model = instance.as_model()
assert isinstance(model, af.ModelMapper)
assert isinstance(model.mock_components_2, af.PriorModel)
assert model.mock_components_2.cls == MockComponents
def test_object_for_path(self, instance, mock_components_1, mock_components_2):
assert instance.object_for_path(("mock_components_2",)) is mock_components_2
assert instance.object_for_path(("sub", "mock_components_1")) is mock_components_1
assert instance.object_for_path(("sub", "sub", "mock_components_1")) is mock_components_1
setattr(instance.object_for_path(("mock_components_2",)), "mock_components", mock_components_1)
assert mock_components_2.mock_components is mock_components_1
def test_path_instance_tuples_for_class(self, instance, mock_components_1, mock_components_2):
result = instance.path_instance_tuples_for_class(MockComponents)
assert result[0] == (("mock_components_2",), mock_components_2)
assert result[1] == (("sub", "mock_components_1"), mock_components_1)
assert result[2] == (("sub", "sub", "mock_components_1"), mock_components_1)
def test_simple_model(self):
mapper = af.ModelMapper()
mapper.mock_class = MockClassx2
model_map = mapper.instance_from_unit_vector([1.0, 1.0])
assert isinstance(model_map.mock_class, MockClassx2)
assert model_map.mock_class.one == 1.0
assert model_map.mock_class.two == 2.0
def test_two_object_model(self):
mapper = af.ModelMapper()
mapper.mock_class_1 = MockClassx2
mapper.mock_class_2 = MockClassx2
model_map = mapper.instance_from_unit_vector([1.0, 0.0, 0.0, 1.0])
assert isinstance(model_map.mock_class_1, MockClassx2)
assert isinstance(model_map.mock_class_2, MockClassx2)
assert model_map.mock_class_1.one == 1.0
assert model_map.mock_class_1.two == 0.0
assert model_map.mock_class_2.one == 0.0
assert model_map.mock_class_2.two == 2.0
def test_swapped_prior_construction(self):
mapper = af.ModelMapper()
mapper.mock_class_1 = MockClassx2
mapper.mock_class_2 = MockClassx2
# noinspection PyUnresolvedReferences
mapper.mock_class_2.one = mapper.mock_class_1.one
model_map = mapper.instance_from_unit_vector([1.0, 0.0, 0.0])
assert isinstance(model_map.mock_class_1, MockClassx2)
assert isinstance(model_map.mock_class_2, MockClassx2)
assert model_map.mock_class_1.one == 1.0
assert model_map.mock_class_1.two == 0.0
assert model_map.mock_class_2.one == 1.0
assert model_map.mock_class_2.two == 0.0
def test_prior_replacement(self):
mapper = af.ModelMapper()
mapper.mock_class = MockClassx2
mapper.mock_class.one = af.UniformPrior(100, 200)
model_map = mapper.instance_from_unit_vector([0.0, 0.0])
assert model_map.mock_class.one == 100.0
def test_tuple_arg(self):
mapper = af.ModelMapper()
mapper.mock_profile = MockClassx3TupleFloat
model_map = mapper.instance_from_unit_vector([1.0, 0.0, 0.0])
assert model_map.mock_profile.one_tuple == (1.0, 0.0)
assert model_map.mock_profile.two == 0.0
def test_modify_tuple(self):
mapper = af.ModelMapper()
mapper.mock_profile = MockClassx3TupleFloat
# noinspection PyUnresolvedReferences
mapper.mock_profile.one_tuple.one_tuple_0 = af.UniformPrior(1.0, 10.0)
model_map = mapper.instance_from_unit_vector([1.0, 1.0, 1.0])
assert model_map.mock_profile.one_tuple == (10.0, 2.0)
def test_match_tuple(self):
mapper = af.ModelMapper()
mapper.mock_profile = MockClassx3TupleFloat
# noinspection PyUnresolvedReferences
mapper.mock_profile.one_tuple.one_tuple_1 = (
mapper.mock_profile.one_tuple.one_tuple_0
)
model_map = mapper.instance_from_unit_vector([1.0, 0.0])
assert model_map.mock_profile.one_tuple == (1.0, 1.0)
assert model_map.mock_profile.two == 0.0
| 34.052632
| 104
| 0.681607
|
ae7b6524c30f6d9853980cdfb7f4d398621ae968
| 3,033
|
py
|
Python
|
portality/error_handler.py
|
DOAJ/doaj
|
b11f163c48f51f9e3ada2b02c617b50b847dcb4c
|
[
"Apache-2.0"
] | 47
|
2015-04-24T13:13:39.000Z
|
2022-03-06T03:22:42.000Z
|
portality/error_handler.py
|
DOAJ/doaj
|
b11f163c48f51f9e3ada2b02c617b50b847dcb4c
|
[
"Apache-2.0"
] | 1,215
|
2015-01-02T14:29:38.000Z
|
2022-03-28T14:19:13.000Z
|
portality/error_handler.py
|
DOAJ/doaj
|
b11f163c48f51f9e3ada2b02c617b50b847dcb4c
|
[
"Apache-2.0"
] | 14
|
2015-11-27T13:01:23.000Z
|
2021-05-21T07:57:23.000Z
|
import logging
import logging.handlers
import sys
# just use GMail
class TlsSMTPHandler(logging.handlers.SMTPHandler):
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
import string # for tls add this line
try:
from email.utils import formatdate
except ImportError:
formatdate = self.date_time
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
self.toaddrs + ",",
self.getSubject(record),
formatdate(), msg)
if self.username:
smtp.ehlo() # for tls add this line
smtp.starttls() # for tls add this line
smtp.ehlo() # for tls add this line
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def setup_error_logging(app):
"""
~~ErrorHandler:Feature->Logging:Library~~
~~->Email:ExternalService~~
:param app:
:return:
"""
# Custom logging WILL BE IGNORED by Flask if app.debug == True -
# even if you remove the condition below.
if app.debug:
return
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
send_to = app.config.get('ERROR_LOGGING_EMAIL', app.config.get('ADMIN_EMAIL'))
if send_to and not app.config.get('SUPPRESS_ERROR_EMAILS'):
if 'ERROR_MAIL_USERNAME' in app.config and 'ERROR_MAIL_PASSWORD' in app.config and 'ERROR_MAIL_HOSTNAME' in app.config:
import platform
hostname = platform.uname()[1]
# We have to duplicate our email config here as we can't import app_email at this point
mail_handler = TlsSMTPHandler(
(app.config['ERROR_MAIL_HOSTNAME'], 587),
'server-error@' + hostname,
send_to,
'DOAJ Flask Error',
credentials=(app.config['ERROR_MAIL_USERNAME'], app.config['ERROR_MAIL_PASSWORD'])
)
mail_handler.setLevel(logging.ERROR)
mail_handler.setFormatter(formatter)
app.logger.addHandler(mail_handler)
# send errors to stderr, supervisord will capture them in the app's
# error log
send_errors_to_supervisor = logging.StreamHandler(sys.stderr)
send_errors_to_supervisor.setLevel(logging.ERROR)
send_errors_to_supervisor.setFormatter(formatter)
app.logger.addHandler(send_errors_to_supervisor)
| 36.987805
| 127
| 0.587537
|
078d479d32b51b86c38c8c03c5cc3df6c0943883
| 807
|
py
|
Python
|
tools/data_faker/data_faker/__main__.py
|
esaulkov/rotkehlchen
|
dcbff5436abd93e91b2904bbe1dfbf24b3a2ec22
|
[
"BSD-3-Clause"
] | 1
|
2019-08-04T08:30:14.000Z
|
2019-08-04T08:30:14.000Z
|
tools/data_faker/data_faker/__main__.py
|
esaulkov/rotkehlchen
|
dcbff5436abd93e91b2904bbe1dfbf24b3a2ec22
|
[
"BSD-3-Clause"
] | null | null | null |
tools/data_faker/data_faker/__main__.py
|
esaulkov/rotkehlchen
|
dcbff5436abd93e91b2904bbe1dfbf24b3a2ec22
|
[
"BSD-3-Clause"
] | null | null | null |
from gevent import monkey # isort:skip # noqa
monkey.patch_all() # isort:skip # noqa
import logging
from data_faker.args import data_faker_args
from data_faker.faker import DataFaker
from data_faker.mock_apis.api import APIServer, RestAPI
logger = logging.getLogger(__name__)
def main():
arg_parser = data_faker_args()
args = arg_parser.parse_args()
faker = DataFaker(args)
rest_api = RestAPI(
fake_kraken=faker.fake_kraken,
fake_binance=faker.fake_binance,
)
server = APIServer(rest_api)
print('SERVER IS NOW RUNNING')
# For some reason debug=True throws an exception:
# ModuleNotFoundError: No module named 'data_faker
# server.run(debug=True)
server.run()
print('SERVER IS NOW SHUTTING DOWN')
if __name__ == '__main__':
main()
| 25.21875
| 55
| 0.713755
|
358bffb3ef4fab0ca8f3a8c93c1243090f57d80e
| 425
|
py
|
Python
|
data_tests/saved__backend__py3.9/cython/methods.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 88
|
2019-01-08T16:39:08.000Z
|
2022-02-06T14:19:23.000Z
|
data_tests/saved__backend__py3.9/cython/methods.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 13
|
2019-06-20T15:53:10.000Z
|
2021-02-09T11:03:29.000Z
|
data_tests/saved__backend__py3.9/cython/methods.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 1
|
2019-11-05T03:03:14.000Z
|
2019-11-05T03:03:14.000Z
|
try:
import cython
except ImportError:
from transonic_cl import cython
import numpy as np
def __for_method__Transmitter____call__(self_arr, self_freq, inp):
"""My docstring"""
return (inp * np.exp(np.arange(len(inp)) * self_freq * 1j), self_arr)
__code_new_method__Transmitter____call__ = """
def new_method(self, inp):
return backend_func(self.arr, self.freq, inp)
"""
__transonic__ = ("0.3.3",)
| 19.318182
| 73
| 0.712941
|
e8a1cbf6a996ce0bc56ac2e1ae60a7e189701b4f
| 6,775
|
py
|
Python
|
elevenclock/lang/lang_en.py
|
kinr0k/ElevenClock
|
f52fdc024491e16c266af2710f9012a2dd6927a7
|
[
"Apache-2.0"
] | null | null | null |
elevenclock/lang/lang_en.py
|
kinr0k/ElevenClock
|
f52fdc024491e16c266af2710f9012a2dd6927a7
|
[
"Apache-2.0"
] | null | null | null |
elevenclock/lang/lang_en.py
|
kinr0k/ElevenClock
|
f52fdc024491e16c266af2710f9012a2dd6927a7
|
[
"Apache-2.0"
] | null | null | null |
# INSTRUCTIONS
# Translate the text and write it between the "
# EXAMPLE: original -> "This text is in english: value {0}"
# translation -> "Aquest text està en anglès: valor {0}"
# If you see sth like {0}, {1}, maintain it on the translated sentence
# Meke special attention to elements like ":", etc.
lang_2_9_2 = {
"Reload log": "",
"Do not show the clock on secondary monitors": "",
"Disable clock taskbar background color (make clock transparent)": "",
"Open the welcome wizard": "",
" (ALPHA STAGE, MAY NOT WORK)": "",
"Welcome to ElevenClock": "",
"Skip": "",
"Start": "",
"Next": "",
"Finish": "",
}
lang_2_9 = lang_2_9_2 | {
"Task Manager": "",
"Change date and time": "",
"Notification settings": "",
"Updates, icon tray, language": "",
"Hide extended options from the clock right-click menu (needs a restart to be aplied)": "Hide extended options from the clock right-click menu (needs a restart to be applied)",
"Fullscreen behaviour, clock position, 1st monitor clock, other miscellanious settings": "",
'Add the "Show Desktop" button on the left corner of every clock': '',
'You might need to set a custom background color for this to work. More info <a href="{0}" style="color:DodgerBlue">HERE</a>': '',
"Clock's font, font size, font color and background, text alignment": "",
"Date format, Time format, seconds,weekday, weeknumber, regional settings": "Date format, Time format, seconds, weekday, weeknumber, regional settings",
"Testing features and error-fixing tools": "",
"Language pack author(s), help translating ElevenClock": "",
"Info, report a bug, submit a feature request, donate, about": "",
"Log, debugging information": "",
}
lang_2_8 = lang_2_9 | {
"Force the clock to be at the top of the screen": "",
"Show the clock on the primary screen": "",
"Use a custom font color": "",
"Use a custom background color": "",
"Align the clock text to the center": "",
"Select custom color": "",
"Hide the clock when a program occupies all screens": "",
}
lang2_7_bis = lang_2_8 | {
"Use a custom font": "",
"Use a custom font size": "",
"Enable hide when multi-monitor fullscreen apps are running": "",
"<b>{0}</b> needs to be enabled to change this setting": "",
"<b>{0}</b> needs to be disabled to change this setting": "",
}
lang2_7 = lang2_7_bis | {
" (This feature has been disabled because it should work by default. If it is not, please report a bug)": "",
"ElevenClock's language": ""
}
lang2_6 = lang2_7 | {
"About Qt6 (PySide6)": "",
"About": "",
"Alternative non-SSL update server (This might help with SSL errors)": "",
"Fixes and other experimental features: (Use ONLY if something is not working)": "",
"Show week number on the clock": ""
}
lang2_5 = lang2_6 | {
"Hide the clock when RDP Client or Citrix Workspace are running": "",
"Clock Appearance:": "",
"Force the clock to have black text": "",
" - It is required that the Dark Text checkbox is disabled": "",
"Debbugging information:": "",
"Open ElevenClock's log": "",
}
lang2_4 = lang2_5 | {
# Added text in version 2.4
"Show the clock on the primary screen (Useful if clock is set on the left)": "",
"Show weekday on the clock" :"",
}
lang2_3 = lang2_4 | {
#Context menu
"ElevenClock Settings" :"", # Also settings title
"Reload Clocks" :"",
"ElevenClock v{0}" :"",
"Restart ElevenClock" :"",
"Hide ElevenClock" :"",
"Quit ElevenClock" :"",
#General settings section
"General Settings:" :"",
"Automatically check for updates" :"",
"Automatically install available updates" :"",
"Enable really silent updates" :"",
"Bypass update provider authenticity check (NOT RECOMMENDED, AT YOUR OWN RISK)" :"",
"Show ElevenClock on system tray" :"Show ElevenClock icon on system tray",
"Alternative clock alignment (may not work)" :"",
"Change startup behaviour" :"",
"Change" :"",
"<b>Update to the latest version!</b>" :"",
"Install update" :"",
#Clock settings
"Clock Settings:" :"",
"Hide the clock in fullscreen mode" :"",
"Hide the clock when RDP client is active" :"",
"Force the clock to be at the bottom of the screen" :"",
"Show the clock when the taskbar is set to hide automatically" :"",
"Fix the hyphen/dash showing over the month" :"",
"Force the clock to have white text" :"",
"Show the clock at the left of the screen" :"",
#Date & time settings
"Date & Time Settings:" :"",
"Show seconds on the clock" :"",
"Show date on the clock" :"",
"Show time on the clock" :"",
"Change date and time format (Regional settings)" :"",
"Regional settings" :"",
#About the language pack
"About the language pack:" :"",
"Translated to English by martinet101" :"", # Here, make sute to give you some credits: Translated to LANGUAGE by USER/NAME/PSEUDONYM/etc.
"Translate ElevenClock to your language" :"",
"Get started" :"",
#About ElevenClock
"About ElevenClock version {0}:" :"",
"View ElevenClock's homepage" :"",
"Open" :"",
"Report an issue/request a feature" :"",
"Report" :"",
"Support the dev: Give me a coffee☕" :"",
"Open page" :"",
"Icons by Icons8" :"", # Here, the word "Icons8" should not be translated
"Webpage" :"",
"Close settings" :"",
"Close" :"",
}
lang = lang2_3
| 46.40411
| 180
| 0.51203
|
a242d6ec82ab31eedae419a24692260f74b97a16
| 1,779
|
py
|
Python
|
qiskit/__init__.py
|
nonhermitian/arrogant_seahorse
|
2be1ff60857c75fcbbb0c23aa594f41e1a33c89c
|
[
"Apache-2.0"
] | null | null | null |
qiskit/__init__.py
|
nonhermitian/arrogant_seahorse
|
2be1ff60857c75fcbbb0c23aa594f41e1a33c89c
|
[
"Apache-2.0"
] | 1
|
2018-08-08T17:56:06.000Z
|
2018-08-08T17:56:06.000Z
|
qiskit/__init__.py
|
nonhermitian/arrogant_seahorse
|
2be1ff60857c75fcbbb0c23aa594f41e1a33c89c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-order
# pylint: disable=redefined-builtin
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Main QISKit public functionality."""
# First, check for required Python and API version
from . import _util
from ._qiskiterror import QISKitError
from ._classicalregister import ClassicalRegister
from ._quantumregister import QuantumRegister
from ._quantumcircuit import QuantumCircuit
from ._gate import Gate
from ._compositegate import CompositeGate
from ._instruction import Instruction
from ._instructionset import InstructionSet
from ._reset import Reset
from ._measure import Measure
# The qiskit.extensions.x imports needs to be placed here due to the
# mechanism for adding gates dynamically.
import qiskit.extensions.standard
import qiskit.extensions.quantum_initializer
from ._quantumjob import QuantumJob
from ._quantumprogram import QuantumProgram
from ._result import Result
from .wrapper._wrapper import available_backends, execute, register, get_backend, compile
# Import the wrapper, to make it available when doing "import qiskit".
from . import wrapper
__version__ = '0.5.0'
| 35.58
| 89
| 0.761102
|
5ca47cdff5820766b77f95585ce04f0f25497c0c
| 1,732
|
py
|
Python
|
sbsched/sbsched.py
|
eyedol/sbsched
|
7f52c05158b8c0fa334e792c3ae44dc69d69993e
|
[
"Apache-2.0"
] | null | null | null |
sbsched/sbsched.py
|
eyedol/sbsched
|
7f52c05158b8c0fa334e792c3ae44dc69d69993e
|
[
"Apache-2.0"
] | null | null | null |
sbsched/sbsched.py
|
eyedol/sbsched
|
7f52c05158b8c0fa334e792c3ae44dc69d69993e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from bs4 import BeautifulSoup
import urllib
import re
class Showings:
def __init__(self):
movie_title = ''
self.times = []
def __str__(self):
return u'{} \n {}'.format(self.movie_title, self.times)
__repr__ = __str__
class Schedule:
def __init__(self):
cinema = ''
duration = ''
self.showings = []
self.movies = []
def __str__(self):
return u'{} \n {} \n {} \n {}'.format(self.cinema, self.duration, self.showings, self.movies)
class Sbsched:
def __init__(self, cinema):
self.cinema = cinema
def crawl(self):
r = urllib.urlopen('http://silverbirdcinemas.com/'+self.cinema).read()
soup = BeautifulSoup(r, 'html.parser')
# Fetch the movie playing date duration
regex = "views-row-\d+"
pattern = re.compile(regex)
rows = re.findall(pattern,r)
schs = []
sc = Schedule()
sc.cinema = self.cinema
sc.duration = soup.select('div.field-content > p')[0].get_text()
movies = soup.select('div.views-field-title > span.field-content > a')
for movie in movies[:-5]:
sc.movies.append(movie.get_text())
for row in rows[1:-1]:
movies = soup.select('div.'+row+' > div.views-field-title > span.field-content > a')
show_times = soup.select('div.'+row+' > div.views-field > div.field-content > div.showtime_day')
showing = Showings()
showing.movie_title = movies[0].get_text()
for show_time in show_times:
showing.times.append(show_time.get_text())
sc.showings.append(showing)
return sc
| 28.866667
| 108
| 0.570439
|
d3a963172d513d73901128c0f5e832a6daf5e645
| 1,791
|
py
|
Python
|
test/skia/libjpeg.py
|
robothn2/pab
|
0fcc93dd55175df23f549860595e443c248e0649
|
[
"MIT"
] | null | null | null |
test/skia/libjpeg.py
|
robothn2/pab
|
0fcc93dd55175df23f549860595e443c248e0649
|
[
"MIT"
] | null | null | null |
test/skia/libjpeg.py
|
robothn2/pab
|
0fcc93dd55175df23f549860595e443c248e0649
|
[
"MIT"
] | null | null | null |
# coding: utf-8
libjpeg_lib = {
'uri': '//third_party/libjpeg',
'type': 'sharedLib',
'source_base_dir': 'third_party/externals/libjpeg-turbo',
'public_include_dirs': [
'.',
],
'defines': [
'TURBO_FOR_WINDOWS',
],
'sources': [
'jaricom.c',
'jcapimin.c',
'jcapistd.c',
'jcarith.c',
'jccoefct.c',
'jccolor.c',
'jcdctmgr.c',
'jchuff.c',
'jcinit.c',
'jcmainct.c',
'jcmarker.c',
'jcmaster.c',
'jcomapi.c',
'jcparam.c',
'jcphuff.c',
'jcprepct.c',
'jcsample.c',
'jdapimin.c',
'jdapistd.c',
'jdarith.c',
'jdcoefct.c',
'jdcolor.c',
'jddctmgr.c',
'jdhuff.c',
'jdinput.c',
'jdmainct.c',
'jdmarker.c',
'jdmaster.c',
'jdmerge.c',
'jdphuff.c',
'jdpostct.c',
'jdsample.c',
'jerror.c',
'jfdctflt.c',
'jfdctfst.c',
'jfdctint.c',
'jidctflt.c',
'jidctfst.c',
'jidctint.c',
'jidctred.c',
'jmemmgr.c',
'jmemnobs.c',
'jquant1.c',
'jquant2.c',
'jutils.c',
],
}
def libjpeg_dyn(lib, ctx):
if 'arm32' in ctx.target_cpu_tags and 'ios' not in ctx.target_os_tags:
lib.sources += [
'simd/jsimd_arm.c',
'simd/jsimd_arm_neon.S',
]
elif 'arm64' in ctx.target_cpu_tags and 'ios' not in ctx.target_os_tags:
lib.sources += [
'simd/jsimd_arm64.c',
'simd/jsimd_arm64_neon.S',
]
else:
lib.sources += [ 'jsimd_none.c' ]
export_libs = [
(libjpeg_lib, libjpeg_dyn),
]
| 22.111111
| 76
| 0.453936
|
4d00f5e191f6f8604af43a5dff381540f591a7f5
| 972
|
py
|
Python
|
xmuda/data/nuscenes/nuscenes_dm.py
|
anhquancao/xmuda-extend
|
4b670ec2f6766e3a624e81dbe5d97b209c1c4f76
|
[
"Apache-2.0"
] | null | null | null |
xmuda/data/nuscenes/nuscenes_dm.py
|
anhquancao/xmuda-extend
|
4b670ec2f6766e3a624e81dbe5d97b209c1c4f76
|
[
"Apache-2.0"
] | null | null | null |
xmuda/data/nuscenes/nuscenes_dm.py
|
anhquancao/xmuda-extend
|
4b670ec2f6766e3a624e81dbe5d97b209c1c4f76
|
[
"Apache-2.0"
] | null | null | null |
from torch.utils.data.dataloader import DataLoader
from xmuda.data.nuscenes.nuscenes_dataset import NuscenesDataset
import pytorch_lightning as pl
from xmuda.data.kitti_360.collate import collate_fn
from xmuda.common.utils.torch_util import worker_init_fn
from torchvision import transforms
class NuscenesDataModule(pl.LightningDataModule):
def __init__(self, root,
batch_size=4, num_workers=3):
super().__init__()
self.root = root
self.batch_size = batch_size
self.num_workers = num_workers
def setup(self, stage=None):
self.val_ds = NuscenesDataset(root=self.root)
def val_dataloader(self):
return DataLoader(
self.val_ds,
batch_size=self.batch_size,
drop_last=False,
num_workers=self.num_workers,
shuffle=False,
pin_memory=True,
worker_init_fn=worker_init_fn,
collate_fn=collate_fn
)
| 31.354839
| 64
| 0.677984
|
adff6146ce1ae058371f37f8d3a7e5ad32791d3e
| 6,611
|
py
|
Python
|
_47_scrape.py
|
bansallab/roundup
|
515e082f35d608797fab3943b1292bbf6f34f8d0
|
[
"MIT"
] | null | null | null |
_47_scrape.py
|
bansallab/roundup
|
515e082f35d608797fab3943b1292bbf6f34f8d0
|
[
"MIT"
] | 4
|
2016-05-19T22:06:20.000Z
|
2022-03-30T13:57:40.000Z
|
_47_scrape.py
|
bansallab/roundup
|
515e082f35d608797fab3943b1292bbf6f34f8d0
|
[
"MIT"
] | null | null | null |
import csv
from urllib.request import Request, urlopen
import dateutil.parser
import re
from sys import argv
from bs4 import BeautifulSoup
import scrape_util
default_sale, base_url, prefix = scrape_util.get_market(argv)
report_path = 'Past%20Auction%20Results.htm'
date_pattern = re.compile(r'\d{1,2}/\d{1,2}/\d{2,4}')
head_pattern = re.compile(r'(?P<head>\d+)\s*(hd|head)?\s*sold', re.IGNORECASE)
strip_char = ';,. \n\t'
def get_sale_date(date_string):
"""Return the date of the sale."""
sale_date = dateutil.parser.parse(date_string).date()
return sale_date
# def get_sale_head(footer):
# match = re.search(r'([0-9,]+) *(hd|head)? *sold', footer, re.IGNORECASE)
# if match:
# head = match.group(1).replace(',','')
# else:
# head = None
# return head
def is_sale(this_line):
"""Determine whether a given line describes a sale of cattle."""
word = [td.get_text().strip() for td in this_line.find_all('td') if td.get_text().strip() != '']
is_not_succinct = len(word) > 2
has_price = False
for this_word in word:
if re.search(r'[0-9]+\.[0-9]{2}', this_word):
has_price = True
break
return bool(has_price and is_not_succinct)
def get_sale_location(sale_location):
"""Convert address strings into a list of address components."""
if ',' in sale_location:
sale_location = sale_location.split(',')
else:
match = re.search(r'(.*?)(' + scrape_util.state + ')$', sale_location)
if match:
sale_location = [match.group(1), match.group(2)]
else:
sale_location = [sale_location]
return sale_location
def is_number(string):
"""Test whether a string is number-ish. Ignoring units like 'cwt' and 'hd'."""
if string:
string = re.sub(r'\$|[,-/@]|cwt|he?a?d?', '', string, flags = re.IGNORECASE)
try:
float(string)
result = True
except ValueError:
result = False
else:
result = False
return result
def get_sale(word):
"""Convert the input into a dictionary, with keys matching
the CSV column headers in the scrape_util module.
"""
number_word = [idx for idx, val in enumerate(word) if is_number(val)]
sale_location = get_sale_location(word[0])
sale = {
'consignor_city': sale_location.pop(0).strip(strip_char).title(),
}
if sale_location:
sale['consignor_state'] = sale_location.pop().strip(strip_char)
cattle_string = word[1].strip(strip_char)
head_match = re.match(r'([0-9,]+)', cattle_string)
if head_match:
sale['cattle_head'] = head_match.group(1).replace(',','')
cattle_string = cattle_string.replace(head_match.group(),'').strip(strip_char)
if len(word) > 3:
cattle_string = cattle_string + ' ' + word[number_word[-1]-1].strip(strip_char)
sale['cattle_cattle'] = cattle_string
if '@' in word[number_word[-1]]:
weight_string, price_string = word[number_word[-1]].split('@')
key = 'cattle_price_cwt'
weight_string = weight_string.strip(strip_char).replace(',','')
try:
float(weight_string)
sale['cattle_avg_weight'] = weight_string
except ValueError:
pass
else:
price_string = word[number_word[-1]]
key = 'cattle_price'
try:
price_string = price_string.strip(strip_char).replace('$','').replace(',','')
float(price_string)
sale[key] = price_string
except ValueError:
pass
sale = {k:v for k,v in sale.items() if v}
return sale
def write_sale(line, this_default_sale, writer):
"""Extract sales from a list of report lines and write them to a CSV file."""
for this_line in line:
if is_sale(this_line):
sale = this_default_sale.copy()
word = []
for td in this_line.find_all('td'):
if td.get_text().strip() != '':
word.append(td.get_text().replace('\xa0','').strip())
sale.update(get_sale(word))
writer.writerow(sale)
def main():
# Collect individual reports into a list
request = Request(
base_url + report_path,
headers = scrape_util.url_header,
)
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'lxml')
content = soup.find('td', attrs={'class': 'textarea'})
report = content.find_all('table')
# Locate existing CSV files
archive = scrape_util.ArchiveFolder(argv, prefix)
# Write a CSV file for each report not in the archive
for this_report in report:
for sibling in this_report.previous_siblings:
if not hasattr(sibling, 'text'):
continue
match = date_pattern.search(sibling.text)
if match:
break
# if sibling.name == 'h1':
# header = sibling.get_text()
# break
# if not header:
# for sibling in this_report.previous_siblings:
# if hasattr(sibling, 'b'):
# header = sibling.get_text()
# break
sale_date = get_sale_date(match.group(0))
io_name = archive.new_csv(sale_date)
# Stop iteration if this report is already archived
if not io_name:
break
# Initialize the default sale dictionary
this_default_sale = default_sale.copy()
this_default_sale.update({
'sale_year': sale_date.year,
'sale_month': sale_date.month,
'sale_day': sale_date.day,
})
for sibling in this_report.next_siblings:
if not hasattr(sibling, 'text'):
continue
match = head_pattern.search(sibling.text)
if match:
this_default_sale['sale_head'] = match.group('head').replace(',', '')
break
# if sibling.name == 'h1':
# footer = sibling.get_text()
# break
# sale_head = get_sale_head(footer)
# List each line of the report
line = this_report.find_all('tr')
# Open a new CSV file and write each sale
with io_name.open('w', encoding='utf-8') as io:
writer = csv.DictWriter(io, scrape_util.header, lineterminator='\n')
writer.writeheader()
write_sale(line, this_default_sale, writer)
if __name__ == '__main__':
main()
| 30.187215
| 100
| 0.586144
|
6986ba8ae10943bbe0a19ad1d63111e25988f309
| 499
|
py
|
Python
|
dbml_from_api.py
|
ioatzim/getbigschema
|
7ec9cde9099f6f7a9a45232d598b93f55357397b
|
[
"Apache-2.0"
] | null | null | null |
dbml_from_api.py
|
ioatzim/getbigschema
|
7ec9cde9099f6f7a9a45232d598b93f55357397b
|
[
"Apache-2.0"
] | null | null | null |
dbml_from_api.py
|
ioatzim/getbigschema
|
7ec9cde9099f6f7a9a45232d598b93f55357397b
|
[
"Apache-2.0"
] | null | null | null |
import requests
import os
import json
import datetime
'''
Pulls a dbml file from the API. User must manually add the file id, found in the 'response_ids.json' file generated from dbml_post_to_api.py
'''
url='http://ec2-54-167-67-34.compute-1.amazonaws.com/api/dbmls' #url of the API
id = '6192b1f31c2a512293fea940' #id of the file, taken from 'response_ids.json' file generated from dbml_post_to_api.py
res = requests.get(f'{url}/{id}')
dbml_file = json.loads(res.json()['contents'])
| 35.642857
| 141
| 0.735471
|
de710cc880e69e072595ed64afdca333e670c5ca
| 15,955
|
py
|
Python
|
encoder-decoder-train_3.py
|
kapitsa2811/uTAB
|
5915c43db521575693181bc040cebce2f10c1158
|
[
"MIT"
] | null | null | null |
encoder-decoder-train_3.py
|
kapitsa2811/uTAB
|
5915c43db521575693181bc040cebce2f10c1158
|
[
"MIT"
] | null | null | null |
encoder-decoder-train_3.py
|
kapitsa2811/uTAB
|
5915c43db521575693181bc040cebce2f10c1158
|
[
"MIT"
] | null | null | null |
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D
from keras.layers import Activation
from keras.layers import MaxPooling2D,UpSampling2D
from keras.layers import Dropout,Dense,Flatten,BatchNormalization
from keras.optimizers import *
from keras.models import load_model
from keras import regularizers
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
import os
import cv2
import numpy as np
import sys
from keras.layers import AveragePooling2D
#average pooling
cwd=os.getcwd()+"//"
oldFiles=os.listdir(cwd+"results//")
for old in oldFiles:
try:
os.remove("/home/aniket/PycharmProjects/segmentation//Convolutional-Encoder-Decoder-for-Hand-Segmentation-master/results/"+old)
except Exception as e:
print "\n\t cant delete=",old
pass
'''
this code is modified for new segmentaion
'''
def showImage(name,image):
print "\n\t image=",image.shape
cv2.imshow(name,image)
cv2.waitKey()
'''
angles = range(-2,3)
shifts = [[0,0],[0,1],[1,0],[1,1],[0,2],[2,0],[1,2],[2,1],[2,2],
[0,-1],[-1,0],[-1,-1],[0,-2],[-2,0],[-1,-2],[-2,-1],[-2,-2],
[1,-1],[1,-2],[2,-1],[2,-2],
[-1,1],[-1,2],[-2,1],[-2,2]]
multiplier = len(angles)*len(shifts)
'''
# path_x = cwd+'/newData/X1/' #only hands
# path_y = cwd+'/newData/segment11/' #segmented data
path_x = cwd+'/newData/image/' #only hands
path_y = cwd+'/newData/segment/' #segmented data
total = 0
dump=os.listdir(path_x)
dumpLen=len(dump)
maxImageProcess= 1000#dumpLen
#for pos in range(len(path_x)):
noException=0
blackOnWhite=0
X_train=np.zeros((maxImageProcess,128,128,3))
y_train=np.zeros((maxImageProcess,128,128,3))
for indxImg,img in enumerate(sorted(dump)):
if indxImg %100==0:
print "\n\tindxImg=",indxImg,"\t dumpLen=",dumpLen
if indxImg>maxImageProcess:
break
try:
originalIm = cv2.imread(path_x+img)
#print "\n\t indxImg=",indxImg,"\t image shape=",originalIm.shape
segmentedIm = cv2.imread(path_y+img)
#print "\n\t indxImg=",indxImg,"\t image shape=",segmentedIm.shape
X_train[indxImg] = cv2.resize(originalIm, (128, 128)) #originalIm
y_train[indxImg] = cv2.resize(segmentedIm, (128, 128))
'''
for indxAngle,angle in enumerate(angles):
for indxShift,shift in enumerate(shifts):
M = cv2.getRotationMatrix2D((128/2,128/2),angle,1)
shiftM = np.float32([[1,0,shift[0]],[0,1,shift[1]]])
rotatedIm = cv2.warpAffine(originalIm,M,(128,128))
rotatedSegmentedIm = cv2.warpAffine(segmentedIm,M,(128,128))
rotatedShiftedIm = cv2.warpAffine(rotatedIm,shiftM,(128,128))
rotatedSegmentedShiftedIm = cv2.warpAffine(rotatedSegmentedIm,shiftM,(128,128))
X_train[total]=rotatedShiftedIm
y_train[total]=rotatedSegmentedShiftedIm
cv2.imwrite(cwd+"//newData//"+str(indxImg)+"_"+str(indxAngle)+"_"+str(indxShift)+"_shift.jpg",rotatedShiftedIm)
cv2.imwrite(cwd+"//newData//"+str(indxImg)+"_"+str(indxAngle)+"_"+str(indxShift)+"_segment.jpg",rotatedSegmentedShiftedIm)
total+=1
'''
# showImage("train",originalIm)
# showImage("test",segmentedIm)
except Exception as e:
noException+=1
print "\n\t e=",e
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("\n\t line no", exc_tb.tb_lineno)
#input("check exception")
print "\n\t noException=",noException
tests = os.listdir(cwd+'/newData/test/')#["A-train0101.jpg","A-train0102.jpg","A-train0103.jpg","A-train0104.jpg","A-train0105.jpg"]
noTestImages=len(tests)
print "\n\t noTestImages=",noTestImages
X_test = np.zeros((noTestImages,128,128,3))
X_test1 =[] #np.zeros((noTestImages,512,512,3)) # original images
testException=0
for pos in range(len(tests)):
try:
temp=cv2.imread(cwd+'/newData/test/'+tests[pos])
#print "\n\t test size",temp.shape
#showImage(str(pos),temp)
im = cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY)
ret2, th2 = cv2.threshold(im, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
if blackOnWhite == 1:
temp = (255 - temp)
X_test[pos] = cv2.resize(temp,(128,128))
X_test1.append(temp)
except Exception as e:
print "\n\t file name =",tests[pos]
testException+=1
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("\n\t line no in test images=", exc_tb.tb_lineno)
print "\n\t testException=",testException
X_train-=128.0
X_train/=128.0
y_train-=128.0
y_train/=128.0
X_test-=128.0
X_test/=128.0
print "1.X_train shape=",X_train.shape
print "2.y_train shape=",X_train.shape
print "3.X_test shape=",X_test.shape
#
# meen = np.mean(X_train,axis=(0,1,2))
# std = np.std(X_train,axis=(0,1,2))
# X_train-=meen
# X_train/=std
#
# #y_train-=meen
# y_train/=255
#
def createModel():
adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
clf = Sequential()
#avg = AveragePooling2D(pool_size=(2,2))
# clf.add(avg)
clf.add(Convolution2D(filters=64,kernel_size=(5,3),input_shape=(128,128,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
#clf.add(AveragePooling2D(pool_size=(2,2)))
# x = AveragePooling2D(pool_size=(14, 14),
# name='cam_average_pooling')(x)
clf.add(Convolution2D(filters=128,kernel_size=(3,3),padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
#clf.add(AveragePooling2D(pool_size=(2,2)))
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(1,1)))
#clf.add(AveragePooling2D(pool_size=(1,1)))
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
#clf.add(AveragePooling2D(pool_size=(2,2)))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
#clf.add(MaxPooling2D(pool_size=(2,2),, strides=(1,1))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
#writeName = "fusion_" + str(j) + "_" + str(i) + "_" + str(hitIndx) # this is image name
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=128,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=64,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(3, (3, 3), padding='same'))
clf.add(Activation('tanh'))
#clf.compile(optimizer=adam,loss='mse',metrics=['mae'])
clf.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['mae'])
return clf
def createModelOriginal():
adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
clf = Sequential()
clf.add(Convolution2D(filters=64,kernel_size=(3,3),input_shape=(128,128,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2))) # 1
clf.add(Convolution2D(filters=128,kernel_size=(3,3),padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))# 32 2
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(1,1))) # 3
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2))) # 4
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
#writeName = "fusion_" + str(j) + "_" + str(i) + "_" + str(hitIndx) # this is image name
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=128,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=64,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(3, (3, 3), padding='same'))
clf.add(Activation('tanh'))
clf.compile(optimizer=adam,loss='mse',metrics=['mae'])
return clf
def createModel1():
adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
clf = Sequential()
clf.add(Convolution2D(filters=64,kernel_size=(3,3),input_shape=(128,128,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
#clf.add()
'''
clf.add(Convolution2D(filters=128,kernel_size=(7,3),padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
clf.add(Convolution2D(filters=256,kernel_size=(7,5), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(1,1)))
clf.add(Convolution2D(filters=256,kernel_size=(10,10), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
clf.add(Convolution2D(filters=512,kernel_size=(10,5), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
#writeName = "fusion_" + str(j) + "_" + str(i) + "_" + str(hitIndx) # this is image name
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=128,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=64,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
'''
clf.add(Convolution2D(3, (3, 3), padding='same'))
clf.add(Activation('tanh'))
#clf.compile(optimizer=adam,loss='mse',metrics=['mae'])
clf.compile(optimizer=adam,loss='mse',metrics=['mae'])
return clf
#base CV structure
def get_callbacks(filepath, patience=10):
es = EarlyStopping('val_loss', patience=patience, mode="min")
msave = ModelCheckpoint(filepath, save_best_only=True)
return [es, msave]
file_path = cwd+"models//model_weights.hdf5"
callbacks = get_callbacks(filepath=file_path, patience=40)
clf=createModel()
#clf=createModelOriginal()
model_json=clf.to_json()
with open(cwd+"modelArch.json", "w") as json_file:
json_file.write(model_json)
print clf.summary()
#keras.callbacks.ModelCheckpoint(cwd+'//models//', monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
clf.fit(X_train,y_train,batch_size=30, epochs=100,validation_split=0.2,callbacks=callbacks)
#clf.save(cwd+'//models//model-10.h5')
sys.stdout.flush()
y_out = clf.predict(X_test)
y_out*=128.0
y_out+=128.0
for y in range(y_out.shape[0]):
h,w=X_test1[y].shape[0],X_test1[y].shape[1]
tmp= cv2.resize(y_out[y], (h, w)) #originalIm
cv2.imwrite(cwd+"//results//"+'y'+str(y)+'t.jpg',X_test1[y])
cv2.imwrite(cwd+"//results//"+'y'+str(y)+'s1gray.jpg',tmp)
| 32.964876
| 151
| 0.665497
|
224d35a95c4ab6a3ec550632b432e7c45b42ebbb
| 2,296
|
py
|
Python
|
sklearn_hierarchical_classification/tests/fixtures.py
|
mbaelde/sklearn-hierarchical-classification
|
f2cc1535b043e323a25fe0de5e26c04011dbfcb2
|
[
"Apache-2.0"
] | 1
|
2019-05-06T08:26:19.000Z
|
2019-05-06T08:26:19.000Z
|
sklearn_hierarchical_classification/tests/fixtures.py
|
mbaelde/sklearn-hierarchical-classification
|
f2cc1535b043e323a25fe0de5e26c04011dbfcb2
|
[
"Apache-2.0"
] | null | null | null |
sklearn_hierarchical_classification/tests/fixtures.py
|
mbaelde/sklearn-hierarchical-classification
|
f2cc1535b043e323a25fe0de5e26c04011dbfcb2
|
[
"Apache-2.0"
] | 2
|
2020-01-21T15:43:27.000Z
|
2021-06-19T02:43:59.000Z
|
"""
Unit-test fixtures and factory methods.
"""
from itertools import product
import numpy as np
from networkx import DiGraph, gn_graph, to_dict_of_lists
from sklearn.datasets import load_digits, make_blobs
from sklearn_hierarchical_classification.classifier import HierarchicalClassifier
from sklearn_hierarchical_classification.constants import ROOT
def make_class_hierarchy(n, n_intermediate=None, n_leaf=None):
"""Create a mock class hierarchy for testing purposes.
Parameters
----------
n : int
Number of nodes in the returned graph
n_intermediate : int
Number of intermediate (non-root, non-terminal) nodes in the returned graph
n_leaf : int
Number of leaf (terminal) nodes in the returned graph
Returns
-------
G : dict of lists adjacency matrix format representing the class hierarchy
"""
if n_leaf is None and n_intermediate is None:
# No specific structure specified, use a general purpose graph generator
G = gn_graph(n=n, create_using=DiGraph())
if n_intermediate == 0:
# No intermediate nodes, build a 1-level rooted tree
if n_leaf is None:
n_leaf = n - 1
G = DiGraph(product((ROOT,), range(n_leaf)))
return to_dict_of_lists(G)
def make_digits_dataset(targets=None, as_str=True):
X, y = load_digits(return_X_y=True)
if targets:
ix = np.isin(y, targets)
X, y = X[np.where(ix)], y[np.where(ix)]
if as_str:
# Convert targets (classes) to strings
y = y.astype(str)
return X, y
def make_classifier(base_estimator=None, class_hierarchy=None, **kwargs):
return HierarchicalClassifier(
class_hierarchy=class_hierarchy,
base_estimator=base_estimator,
**kwargs
)
def make_classifier_and_data(
n_classes=10,
n_samples=1000,
n_features=10,
class_hierarchy=None,
**classifier_kwargs
):
X, y = make_blobs(
n_samples=n_samples,
n_features=n_features,
centers=n_classes,
)
class_hierarchy = class_hierarchy or make_class_hierarchy(
n=n_classes+1,
n_intermediate=0,
)
clf = make_classifier(
class_hierarchy=class_hierarchy,
**classifier_kwargs
)
return clf, (X, y)
| 24.956522
| 83
| 0.674652
|
e7734257d9cb59926c0bf306176dce287b5015c6
| 7,033
|
py
|
Python
|
Other_samples/Gradient_check/gradient_check.py
|
rustatian/ml_samples
|
688e8b73db62105e62bc8c690f02ae03b4a3abfa
|
[
"MIT"
] | 2
|
2018-03-02T20:59:39.000Z
|
2018-04-20T13:09:01.000Z
|
Other_samples/Gradient_check/gradient_check.py
|
ValeryPiashchynski/Python-Machine-Learning-Samples
|
688e8b73db62105e62bc8c690f02ae03b4a3abfa
|
[
"MIT"
] | null | null | null |
Other_samples/Gradient_check/gradient_check.py
|
ValeryPiashchynski/Python-Machine-Learning-Samples
|
688e8b73db62105e62bc8c690f02ae03b4a3abfa
|
[
"MIT"
] | 1
|
2020-08-30T23:47:43.000Z
|
2020-08-30T23:47:43.000Z
|
import numpy as np
from Other_samples.testCases import *
from Other_samples.Gradient_check.gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, \
gradients_to_vector
def forward_propagation(x, theta):
"""
Implement the linear forward propagation (compute J) presented in Figure 1 (J(theta) = theta * x)
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
J -- the value of function J, computed using the formula J(theta) = theta * x
"""
J = theta * x
return J
x, theta = 2, 4
J = forward_propagation(x, theta)
print("J = " + str(J))
def backward_propagation(x, theta):
"""
Computes the derivative of J with respect to theta (see Figure 1).
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
dtheta -- the gradient of the cost with respect to theta
"""
dtheta = x
return dtheta
x, theta = 2, 4
dtheta = backward_propagation(x, theta)
print("dtheta = " + str(dtheta))
def gradient_check(x, theta, epsilon=1e-7):
"""
Implement the backward propagation presented in Figure 1.
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
"""
thetaplus = theta + epsilon # Step 1
thetaminus = theta - epsilon # Step 2
J_plus = forward_propagation(x, thetaplus) # Step 3
J_minus = forward_propagation(x, thetaminus) # Step 4
gradapprox = (J_plus - J_minus) / (2 * epsilon) # Step 5
grad = backward_propagation(x, gradapprox)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
difference = numerator / denominator # Step 3'
if difference < 1e-7:
print("The gradient is correct!")
else:
print("The gradient is wrong!")
return difference
x, theta = 2, 4
difference = gradient_check(x, theta)
print("difference = " + str(difference))
def forward_propagation_n(X, Y, parameters):
"""
Implements the forward propagation (and computes the cost) presented in Figure 3.
Arguments:
X -- training set for m examples
Y -- labels for m examples
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (5, 4)
b1 -- bias vector of shape (5, 1)
W2 -- weight matrix of shape (3, 5)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
Returns:
cost -- the cost function (logistic cost for one example)
"""
# retrieve parameters
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
# Cost
logprobs = np.multiply(-np.log(A3), Y) + np.multiply(-np.log(1 - A3), 1 - Y)
cost = 1. / m * np.sum(logprobs)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return cost, cache
def backward_propagation_n(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input datapoint, of shape (input size, 1)
Y -- true "label"
cache -- cache output from forward_propagation_n()
Returns:
gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables.
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1. / m * np.dot(dZ3, A2.T)
db3 = 1. / m * np.sum(dZ3, axis=1, keepdims=True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1. / m * np.dot(dZ2, A1.T)
db2 = 1. / m * np.sum(dZ2, axis=1, keepdims=True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1. / m * np.dot(dZ1, X.T)
db1 = 1. / m * np.sum(dZ1, axis=1, keepdims=True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
def gradient_check_n(parameters, gradients, X, Y, epsilon=1e-7):
"""
Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n
Arguments:
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters.
x -- input datapoint, of shape (input size, 1)
y -- true "label"
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
"""
# Set-up variables
parameters_values, _ = dictionary_to_vector(parameters)
grad = gradients_to_vector(gradients)
num_parameters = parameters_values.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradapprox = np.zeros((num_parameters, 1))
# Compute gradapprox
for i in range(num_parameters):
thetaplus = np.copy(parameters_values) # Step 1
thetaplus[i][0] = thetaplus[i] + epsilon # Step 2
J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) # Step 3
thetaminus = np.copy(parameters_values) # Step 1
thetaminus[i][0] = thetaminus[i] - epsilon # Step 2
J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3
gradapprox[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
difference = numerator / denominator # Step 3'
if difference > 1e-7:
print(
"\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m")
else:
print(
"\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m")
return difference
X, Y, parameters = gradient_check_n_test_case()
cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
difference = gradient_check_n(parameters, gradients, X, Y)
| 31.119469
| 133
| 0.628039
|
442c7e2118f64c07aaa9b1cb5dc6549d083e7251
| 393
|
py
|
Python
|
dgpolygon/dgpolygon/wsgi.py
|
mariohmol/django-google-polygon
|
9d9448e540a4d100d925d7170425143f126e2174
|
[
"MIT"
] | 1
|
2018-04-28T17:06:23.000Z
|
2018-04-28T17:06:23.000Z
|
dgpolygon/dgpolygon/wsgi.py
|
mariohmol/django-google-polygon
|
9d9448e540a4d100d925d7170425143f126e2174
|
[
"MIT"
] | null | null | null |
dgpolygon/dgpolygon/wsgi.py
|
mariohmol/django-google-polygon
|
9d9448e540a4d100d925d7170425143f126e2174
|
[
"MIT"
] | null | null | null |
"""
WSGI config for dgpolygon project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dgpolygon.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 26.2
| 78
| 0.791349
|
8d89969bbd5e89d9376f817e2888952a8889df52
| 16,310
|
py
|
Python
|
v0.5.0/google/cloud_v3.8/resnet-tpuv3-8/code/resnet/model/staging/models/rough/resnet/resnet_model.py
|
myelintek/results
|
11c38436a158c453e3011f8684570f7a55c03330
|
[
"Apache-2.0"
] | 44
|
2018-11-07T18:52:33.000Z
|
2019-07-06T12:48:18.000Z
|
v0.5.0/google/cloud_v3.8/resnet-tpuv3-8/code/resnet/model/staging/models/rough/resnet/resnet_model.py
|
myelintek/results
|
11c38436a158c453e3011f8684570f7a55c03330
|
[
"Apache-2.0"
] | 12
|
2018-12-13T18:04:36.000Z
|
2019-06-14T20:49:33.000Z
|
v0.5.0/google/cloud_v3.8/resnet-tpuv3-8/code/resnet/model/staging/models/rough/resnet/resnet_model.py
|
myelintek/results
|
11c38436a158c453e3011f8684570f7a55c03330
|
[
"Apache-2.0"
] | 44
|
2018-11-09T21:04:52.000Z
|
2019-06-24T07:40:28.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the post-activation form of Residual Networks.
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from mlperf_compliance import mlperf_log
from mlperf_compliance import resnet_log_helper
BATCH_NORM_DECAY = 0.9
BATCH_NORM_EPSILON = 1e-5
def batch_norm_relu(inputs, is_training, relu=True, init_zero=False,
data_format='channels_first'):
"""Performs a batch normalization followed by a ReLU.
Args:
inputs: `Tensor` of shape `[batch, channels, ...]`.
is_training: `bool` for whether the model is training.
relu: `bool` if False, omits the ReLU operation.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0 instead of 1 (default).
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A normalized `Tensor` with the same `data_format`.
"""
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
if data_format == 'channels_first':
axis = 1
else:
axis = 3
outputs = tf.layers.batch_normalization(
inputs=inputs,
axis=axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
center=True,
scale=True,
training=is_training,
fused=True,
gamma_initializer=gamma_initializer)
if is_training:
resnet_log_helper.log_batch_norm(
input_tensor=inputs,
output_tensor=outputs,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
center=True,
scale=True,
training=is_training)
if relu:
if is_training:
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
outputs = tf.nn.relu(outputs)
return outputs
def fixed_padding(inputs, kernel_size, data_format='channels_first'):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]` or
`[batch, height, width, channels]` depending on `data_format`.
kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`
operations. Should be a positive integer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A padded `Tensor` of the same `data_format` with size either intact
(if `kernel_size == 1`) or padded (if `kernel_size > 1`).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs,
filters,
kernel_size,
strides,
is_training,
data_format='channels_first'):
"""Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
is_training: `bool` for whether the model is in training.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
"""
inputs_for_logging = inputs
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format=data_format)
outputs = tf.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)
if is_training:
resnet_log_helper.log_conv2d(
input_tensor=inputs_for_logging,
output_tensor=outputs,
stride=strides,
filters=filters,
initializer=mlperf_log.TRUNCATED_NORMAL,
use_bias=False)
return outputs
def residual_block(inputs, filters, is_training, strides,
use_projection=False, data_format='channels_first'):
"""Standard building block for residual networks with BN after convolutions.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
is_training: `bool` for whether the model is in training.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block.
"""
shortcut = inputs
if use_projection:
# Projection shortcut in first layer to match filters and strides
shortcut = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=1,
strides=strides,
is_training=is_training,
data_format=data_format)
shortcut = batch_norm_relu(shortcut, is_training, relu=False,
data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
is_training=is_training,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=1,
is_training=is_training,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, relu=False, init_zero=True,
data_format=data_format)
return tf.nn.relu(inputs + shortcut)
def bottleneck_block(inputs, filters, is_training, strides,
use_projection=False, data_format='channels_first'):
"""Bottleneck block variant for residual networks with BN after convolutions.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
is_training: `bool` for whether the model is in training.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block.
"""
if is_training:
mlperf_log.resnet_print(
key=mlperf_log.MODEL_HP_BLOCK_TYPE, value=mlperf_log.BOTTLENECK_BLOCK)
resnet_log_helper.log_begin_block(
input_tensor=inputs, block_type=mlperf_log.BOTTLENECK_BLOCK)
shortcut = inputs
if use_projection:
# Projection shortcut only in first block within a group. Bottleneck blocks
# end with 4 times the number of filters.
filters_out = 4 * filters
shortcut = conv2d_fixed_padding(
inputs=inputs,
filters=filters_out,
kernel_size=1,
strides=strides,
is_training=is_training,
data_format=data_format)
shortcut = batch_norm_relu(shortcut, is_training, relu=False,
data_format=data_format)
if is_training:
resnet_log_helper.log_projection(
input_tensor=inputs, output_tensor=shortcut)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=1,
strides=1,
is_training=is_training,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
is_training=is_training,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=4 * filters,
kernel_size=1,
strides=1,
is_training=is_training,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, relu=False, init_zero=True,
data_format=data_format)
output = tf.nn.relu(inputs + shortcut)
if is_training:
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_SHORTCUT_ADD)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
resnet_log_helper.log_end_block(output_tensor=output)
return output
def block_group(inputs, filters, block_fn, blocks, strides, is_training, name,
data_format='channels_first'):
"""Creates one group of blocks for the ResNet model.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first convolution of the layer.
block_fn: `function` for the block to use within the model
blocks: `int` number of blocks contained in the layer.
strides: `int` stride to use for the first convolution of the layer. If
greater than 1, this layer will downsample the input.
is_training: `bool` for whether the model is training.
name: `str`name for the Tensor output of the block layer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block layer.
"""
# Drop batch size from shape logging.
if is_training:
mlperf_log.resnet_print(
key=mlperf_log.MODEL_HP_INITIAL_SHAPE, value=inputs.shape.as_list()[1:])
# Only the first block per block_group uses projection shortcut and strides.
inputs = block_fn(inputs, filters, is_training, strides,
use_projection=True, data_format=data_format)
for _ in range(1, blocks):
inputs = block_fn(inputs, filters, is_training, 1,
data_format=data_format)
return tf.identity(inputs, name)
def resnet_v1_generator(block_fn, layers, num_classes,
data_format='channels_first'):
"""Generator for ResNet v1 models.
Args:
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layers: list of 4 `int`s denoting the number of blocks to include in each
of the 4 block groups. Each group consists of blocks that take inputs of
the same resolution.
num_classes: `int` number of possible classes for image classification.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
"""
def model(inputs, is_training):
"""Creation of the model graph."""
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=64,
kernel_size=7,
strides=2,
is_training=is_training,
data_format=data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = batch_norm_relu(inputs, is_training, data_format=data_format)
pooled_inputs = tf.layers.max_pooling2d(
inputs=inputs, pool_size=3, strides=2, padding='SAME',
data_format=data_format)
if is_training:
resnet_log_helper.log_max_pool(input_tensor=inputs,
output_tensor=pooled_inputs)
inputs = tf.identity(pooled_inputs, 'initial_max_pool')
inputs = block_group(
inputs=inputs, filters=64, block_fn=block_fn, blocks=layers[0],
strides=1, is_training=is_training, name='block_group1',
data_format=data_format)
inputs = block_group(
inputs=inputs, filters=128, block_fn=block_fn, blocks=layers[1],
strides=2, is_training=is_training, name='block_group2',
data_format=data_format)
inputs = block_group(
inputs=inputs, filters=256, block_fn=block_fn, blocks=layers[2],
strides=2, is_training=is_training, name='block_group3',
data_format=data_format)
inputs = block_group(
inputs=inputs, filters=512, block_fn=block_fn, blocks=layers[3],
strides=2, is_training=is_training, name='block_group4',
data_format=data_format)
# The activation is 7x7 so this is a global average pool.
# TODO(huangyp): reduce_mean will be faster.
pool_size = (inputs.shape[1], inputs.shape[2])
inputs = tf.layers.average_pooling2d(
inputs=inputs, pool_size=pool_size, strides=1, padding='VALID',
data_format=data_format)
inputs = tf.identity(inputs, 'final_avg_pool')
inputs = tf.reshape(
inputs, [-1, 2048 if block_fn is bottleneck_block else 512])
if is_training:
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_DENSE, value=num_classes)
inputs = tf.layers.dense(
inputs=inputs,
units=num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=.01))
inputs = tf.identity(inputs, 'final_dense')
if is_training:
mlperf_log.resnet_print(
key=mlperf_log.MODEL_HP_FINAL_SHAPE, value=inputs.shape.as_list()[1:])
return inputs
model.default_image_size = 224
return model
def resnet_v1(resnet_depth, num_classes, data_format='channels_first'):
"""Returns the ResNet model for a given size and number of output classes."""
model_params = {
18: {'block': residual_block, 'layers': [2, 2, 2, 2]},
34: {'block': residual_block, 'layers': [3, 4, 6, 3]},
50: {'block': bottleneck_block, 'layers': [3, 4, 6, 3]},
101: {'block': bottleneck_block, 'layers': [3, 4, 23, 3]},
152: {'block': bottleneck_block, 'layers': [3, 8, 36, 3]},
200: {'block': bottleneck_block, 'layers': [3, 24, 36, 3]}
}
if resnet_depth not in model_params:
raise ValueError('Not a valid resnet_depth:', resnet_depth)
params = model_params[resnet_depth]
return resnet_v1_generator(
params['block'], params['layers'], num_classes, data_format)
| 37.580645
| 80
| 0.682526
|
8ce686f71107e9a95f76a5ffce1ae064416bd951
| 425
|
py
|
Python
|
config/appsettings.py
|
ramkrishna70/websvnmanager
|
e5042427bd081696e3d4862d7b0b27f2a5a031ee
|
[
"Apache-2.0"
] | null | null | null |
config/appsettings.py
|
ramkrishna70/websvnmanager
|
e5042427bd081696e3d4862d7b0b27f2a5a031ee
|
[
"Apache-2.0"
] | null | null | null |
config/appsettings.py
|
ramkrishna70/websvnmanager
|
e5042427bd081696e3d4862d7b0b27f2a5a031ee
|
[
"Apache-2.0"
] | null | null | null |
#
# File Name: appsettings.py
#
# Version: 1.0.0
#
# Application Name: websvnmanager
#
# Author: Ram Krishna Kumar
#
# Email: ramkrishna70@live.com
#
# Origin: India
#
# License: Apache License 2.0
#
# This file contens the details about the app settings. The layer1
# configs, which will be loaded globally to use in entire application run cycle.
# This file will also responsible to get the running platform details.
#
#
| 21.25
| 80
| 0.731765
|
fd9624b4d8b959e7b9d172c0a1556fca5d77daee
| 1,382
|
py
|
Python
|
python/example_code/sqs/long_polling_existing_queue.py
|
onehitcombo/aws-doc-sdk-examples
|
03e2e0c5dee75c5decbbb99e849c51417521fd82
|
[
"Apache-2.0"
] | 3
|
2021-01-19T20:23:17.000Z
|
2021-01-19T21:38:59.000Z
|
python/example_code/sqs/long_polling_existing_queue.py
|
onehitcombo/aws-doc-sdk-examples
|
03e2e0c5dee75c5decbbb99e849c51417521fd82
|
[
"Apache-2.0"
] | null | null | null |
python/example_code/sqs/long_polling_existing_queue.py
|
onehitcombo/aws-doc-sdk-examples
|
03e2e0c5dee75c5decbbb99e849c51417521fd82
|
[
"Apache-2.0"
] | 2
|
2019-12-27T13:58:00.000Z
|
2020-05-21T18:35:40.000Z
|
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[long_polling_existing_queue.py demonstrates how to set the default number of seconds to wait between retrieving a message.]
# snippet-service:[sqs]
# snippet-keyword:[Amazon Simple Queue Service]
# snippet-keyword:[Python]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[snippet]
# snippet-sourcedate:[2018-08-01]
# snippet-sourceauthor:[jschwarzwalder (AWS)]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Assign this value to an existing queue URL before running the program
queue_url = 'SQS_QUEUE_URL'
# Enable long polling on the queue
sqs = boto3.client('sqs')
sqs.set_queue_attributes(QueueUrl=queue_url,
Attributes={'ReceiveMessageWaitTimeSeconds': '20'})
| 41.878788
| 153
| 0.741679
|
b4392a0dd553d4b5596b0b3a8b847c093070b42c
| 5,694
|
py
|
Python
|
miqssr/conformer_generation/psearch_master/read_input.py
|
dzankov/3D-MIL-QSSR
|
a66dd78412188d43843cb253736af63f9318d8c8
|
[
"MIT"
] | null | null | null |
miqssr/conformer_generation/psearch_master/read_input.py
|
dzankov/3D-MIL-QSSR
|
a66dd78412188d43843cb253736af63f9318d8c8
|
[
"MIT"
] | null | null | null |
miqssr/conformer_generation/psearch_master/read_input.py
|
dzankov/3D-MIL-QSSR
|
a66dd78412188d43843cb253736af63f9318d8c8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# author : Pavel Polishchuk
# date : 13.07.16
# license : BSD-3
#==============================================================================
__author__ = 'pavel'
import os
import sys
import gzip
import pickle
from rdkit import Chem
from rdkit.Chem.PropertyMol import PropertyMol
from io import BytesIO
def __read_pkl(fname):
with open(fname, 'rb') as f:
while True:
try:
yield pickle.load(f)
except EOFError:
break
def __read_sdf(fname, input_format, id_field_name=None, sanitize=True, removeHs=True):
if input_format == 'sdf':
suppl = Chem.SDMolSupplier(fname, sanitize=sanitize, removeHs=removeHs)
elif input_format == 'sdf.gz':
suppl = Chem.ForwardSDMolSupplier(gzip.open(fname), sanitize=sanitize, removeHs=removeHs)
else:
return
for mol in suppl:
if mol is not None:
if id_field_name is not None:
mol_title = mol.GetProp(id_field_name)
else:
if mol.GetProp("_Name"):
mol_title = mol.GetProp("_Name")
else:
mol_title = Chem.MolToSmiles(mol, isomericSmiles=True)
yield PropertyMol(mol), mol_title
def __read_smiles(fname, sanitize=True):
with open(fname) as f:
for line in f:
tmp = line.strip().split(',')
if not tmp:
continue
mol = Chem.MolFromSmiles(tmp[0], sanitize=sanitize)
if mol is not None:
if len(tmp) > 1:
mol_title = tmp[1]
else:
mol_title = Chem.MolToSmiles(mol, isomericSmiles=True)
if len(tmp) > 2:
act = tmp[2]
if act.lower() == 'active':
act = 1
elif act.lower() == 'inactive':
act = 0
else:
act = None
if len(tmp) > 3:
mol_id = tmp[3]
else:
mol_id = tmp[1]
yield mol, mol_title, act, mol_id
else:
print('Error mol', line)
def __read_stdin_smiles(sanitize=True):
line = sys.stdin.readline()
while line:
tmp = line.strip().split()
mol = Chem.MolFromSmiles(tmp[0], sanitize=sanitize)
if mol is not None:
if len(tmp) > 1:
mol_title = tmp[1]
else:
mol_title = Chem.MolToSmiles(mol, isomericSmiles=True)
yield mol, mol_title
line = sys.stdin.readline()
def __read_stdin_sdf(sanitize=True, removeHs=True):
molblock = ''
line = sys.stdin.readline()
while line:
molblock += line
if line == '$$$$\n':
mol = [x for x in Chem.ForwardSDMolSupplier(BytesIO(molblock.encode('utf-8')), sanitize=sanitize, removeHs=removeHs)][0]
mol_title = molblock.split('\n', 1)[0]
if not mol_title:
mol_title = Chem.MolToSmiles(mol, isomericSmiles=True)
yield mol, mol_title
molblock = ''
line = sys.stdin.readline()
# def read_input(fname, id_field_name=None, stdin_format=None, sanitize=True):
# if fname is None:
# if stdin_format == 'smi':
# suppl = read_stdin_smiles()
# elif stdin_format == 'sdf':
# suppl = read_stdin_sdf(sanitize=sanitize)
# else:
# raise Exception("Cannot read STDIN. STDIN format should be specified explicitly: smi or sdf.")
# elif fname.lower().endswith('.sdf') or fname.lower().endswith('.sdf.gz'):
# suppl = read_sdf(os.comm_path.abspath(fname), id_field_name=id_field_name, sanitize=sanitize)
# elif fname.lower().endswith('.smi') or fname.lower().endswith('.smiles'):
# suppl = read_smiles(os.comm_path.abspath(fname))
# elif fname.lower().endswith('.pkl'):
# suppl = read_pkl(os.comm_path.abspath(fname))
# else:
# raise Exception("File extension can be only SDF, SMI or SMILES")
# for mol, mol_id in suppl:
# yield mol, mol_id
def read_input(fname, input_format=None, id_field_name=None, sanitize=True, removeHs=True):
"""
fname - is a file name, None if STDIN
input_format - is a format of input data, cannot be None for STDIN
id_field_name - name of the field containing molecule name, if None molecule title will be taken
"""
if input_format is None:
tmp = os.path.basename(fname).split('.')
if tmp == 'gz':
input_format = '.'.join(tmp[-2:])
else:
input_format = tmp[-1]
input_format = input_format.lower()
if fname is None: # handle STDIN
if input_format == 'sdf':
suppl = __read_stdin_sdf(sanitize=sanitize, removeHs=removeHs)
elif input_format == 'smi':
suppl = __read_stdin_smiles(sanitize=sanitize)
else:
raise Exception("Input STDIN format '%s' is not supported. It can be only sdf, smi." % input_format)
elif input_format in ("sdf", "sdf.gz"):
suppl = __read_sdf(os.path.abspath(fname), input_format, id_field_name, sanitize, removeHs)
elif input_format == 'pkl':
suppl = __read_pkl(os.path.abspath(fname))
elif input_format in ('smi'):
suppl = __read_smiles(os.path.abspath(fname), sanitize)
else:
raise Exception("Input file format '%s' is not supported. It can be only sdf, sdf.gz, smi, pkl." % input_format)
for mol_tuple in suppl:
yield mol_tuple
| 35.5875
| 132
| 0.561468
|
bdeb501e30684e9f0b7bbdd153ba8fbfb55ed55a
| 7,882
|
bzl
|
Python
|
bazel/envoy_build_system.bzl
|
gumpt/envoy
|
e0ea5302d7966f02fa492321479e96f5dc0a5838
|
[
"Apache-2.0"
] | 59
|
2020-08-26T11:10:08.000Z
|
2022-03-15T01:40:47.000Z
|
bazel/envoy_build_system.bzl
|
gumpt/envoy
|
e0ea5302d7966f02fa492321479e96f5dc0a5838
|
[
"Apache-2.0"
] | 624
|
2020-10-19T12:21:29.000Z
|
2021-05-09T22:47:00.000Z
|
bazel/envoy_build_system.bzl
|
gumpt/envoy
|
e0ea5302d7966f02fa492321479e96f5dc0a5838
|
[
"Apache-2.0"
] | 77
|
2020-10-13T01:34:04.000Z
|
2021-04-14T08:31:39.000Z
|
# The main Envoy bazel file. Load this file for all Envoy-specific build macros
# and rules that you'd like to use in your BUILD files.
load("@rules_foreign_cc//tools/build_defs:cmake.bzl", "cmake_external")
load(":envoy_binary.bzl", _envoy_cc_binary = "envoy_cc_binary")
load(":envoy_internal.bzl", "envoy_external_dep_path")
load(
":envoy_library.bzl",
_envoy_basic_cc_library = "envoy_basic_cc_library",
_envoy_cc_extension = "envoy_cc_extension",
_envoy_cc_library = "envoy_cc_library",
_envoy_cc_posix_library = "envoy_cc_posix_library",
_envoy_cc_win32_library = "envoy_cc_win32_library",
_envoy_include_prefix = "envoy_include_prefix",
_envoy_proto_library = "envoy_proto_library",
)
load(
":envoy_select.bzl",
_envoy_select_boringssl = "envoy_select_boringssl",
_envoy_select_google_grpc = "envoy_select_google_grpc",
_envoy_select_hot_restart = "envoy_select_hot_restart",
_envoy_select_new_codecs_in_integration_tests = "envoy_select_new_codecs_in_integration_tests",
)
load(
":envoy_test.bzl",
_envoy_benchmark_test = "envoy_benchmark_test",
_envoy_cc_benchmark_binary = "envoy_cc_benchmark_binary",
_envoy_cc_fuzz_test = "envoy_cc_fuzz_test",
_envoy_cc_mock = "envoy_cc_mock",
_envoy_cc_test = "envoy_cc_test",
_envoy_cc_test_binary = "envoy_cc_test_binary",
_envoy_cc_test_library = "envoy_cc_test_library",
_envoy_py_test_binary = "envoy_py_test_binary",
_envoy_sh_test = "envoy_sh_test",
)
load(
"@envoy_build_config//:extensions_build_config.bzl",
"EXTENSION_PACKAGE_VISIBILITY",
)
def envoy_package():
native.package(default_visibility = ["//visibility:public"])
def envoy_extension_package():
native.package(default_visibility = EXTENSION_PACKAGE_VISIBILITY)
# A genrule variant that can output a directory. This is useful when doing things like
# generating a fuzz corpus mechanically.
def _envoy_directory_genrule_impl(ctx):
tree = ctx.actions.declare_directory(ctx.attr.name + ".outputs")
ctx.actions.run_shell(
inputs = ctx.files.srcs,
tools = ctx.files.tools,
outputs = [tree],
command = "mkdir -p " + tree.path + " && " + ctx.expand_location(ctx.attr.cmd),
env = {"GENRULE_OUTPUT_DIR": tree.path},
)
return [DefaultInfo(files = depset([tree]))]
envoy_directory_genrule = rule(
implementation = _envoy_directory_genrule_impl,
attrs = {
"srcs": attr.label_list(),
"cmd": attr.string(),
"tools": attr.label_list(),
},
)
# External CMake C++ library targets should be specified with this function. This defaults
# to building the dependencies with ninja
def envoy_cmake_external(
name,
cache_entries = {},
debug_cache_entries = {},
cmake_options = ["-GNinja"],
make_commands = ["ninja -v", "ninja -v install"],
lib_source = "",
postfix_script = "",
static_libraries = [],
copy_pdb = False,
pdb_name = "",
cmake_files_dir = "$BUILD_TMPDIR/CMakeFiles",
generate_crosstool_file = False,
**kwargs):
cache_entries.update({"CMAKE_BUILD_TYPE": "Bazel"})
cache_entries_debug = dict(cache_entries)
cache_entries_debug.update(debug_cache_entries)
pf = ""
if copy_pdb:
# TODO: Add iterator of the first list presented of these options;
# static_libraries[.pdb], pdb_names, name[.pdb] files
if pdb_name == "":
pdb_name = name
copy_command = "cp {cmake_files_dir}/{pdb_name}.dir/{pdb_name}.pdb $INSTALLDIR/lib/{pdb_name}.pdb".format(cmake_files_dir = cmake_files_dir, pdb_name = pdb_name)
if postfix_script != "":
copy_command = copy_command + " && " + postfix_script
pf = select({
"@envoy//bazel:windows_dbg_build": copy_command,
"//conditions:default": postfix_script,
})
else:
pf = postfix_script
cmake_external(
name = name,
cache_entries = select({
"@envoy//bazel:dbg_build": cache_entries_debug,
"//conditions:default": cache_entries,
}),
cmake_options = cmake_options,
# TODO(lizan): Make this always true
generate_crosstool_file = select({
"@envoy//bazel:windows_x86_64": True,
"//conditions:default": generate_crosstool_file,
}),
lib_source = lib_source,
make_commands = make_commands,
postfix_script = pf,
static_libraries = static_libraries,
**kwargs
)
# Used to select a dependency that has different implementations on POSIX vs Windows.
# The platform-specific implementations should be specified with envoy_cc_posix_library
# and envoy_cc_win32_library respectively
def envoy_cc_platform_dep(name):
return select({
"@envoy//bazel:windows_x86_64": [name + "_win32"],
"//conditions:default": [name + "_posix"],
})
# Envoy proto descriptor targets should be specified with this function.
# This is used for testing only.
def envoy_proto_descriptor(name, out, srcs = [], external_deps = []):
input_files = ["$(location " + src + ")" for src in srcs]
include_paths = [".", native.package_name()]
if "api_httpbody_protos" in external_deps:
srcs.append("@com_google_googleapis//google/api:httpbody.proto")
include_paths.append("external/com_google_googleapis")
if "http_api_protos" in external_deps:
srcs.append("@com_google_googleapis//google/api:annotations.proto")
srcs.append("@com_google_googleapis//google/api:http.proto")
include_paths.append("external/com_google_googleapis")
if "well_known_protos" in external_deps:
srcs.append("@com_google_protobuf//:well_known_protos")
include_paths.append("external/com_google_protobuf/src")
options = ["--include_imports"]
options.extend(["-I" + include_path for include_path in include_paths])
options.append("--descriptor_set_out=$@")
cmd = "$(location //external:protoc) " + " ".join(options + input_files)
native.genrule(
name = name,
srcs = srcs,
outs = [out],
cmd = cmd,
tools = ["//external:protoc"],
)
# Dependencies on Google grpc should be wrapped with this function.
def envoy_google_grpc_external_deps():
return envoy_select_google_grpc([envoy_external_dep_path("grpc")])
# Here we create wrappers for each of the public targets within the separate bazel
# files loaded above. This maintains envoy_build_system.bzl as the preferred import
# for BUILD files that need these build macros. Do not use the imports directly
# from the other bzl files (e.g. envoy_select.bzl, envoy_binary.bzl, etc.)
# Select wrappers (from envoy_select.bzl)
envoy_select_boringssl = _envoy_select_boringssl
envoy_select_google_grpc = _envoy_select_google_grpc
envoy_select_hot_restart = _envoy_select_hot_restart
envoy_select_new_codecs_in_integration_tests = _envoy_select_new_codecs_in_integration_tests
# Binary wrappers (from envoy_binary.bzl)
envoy_cc_binary = _envoy_cc_binary
# Library wrappers (from envoy_library.bzl)
envoy_basic_cc_library = _envoy_basic_cc_library
envoy_cc_extension = _envoy_cc_extension
envoy_cc_library = _envoy_cc_library
envoy_cc_posix_library = _envoy_cc_posix_library
envoy_cc_win32_library = _envoy_cc_win32_library
envoy_include_prefix = _envoy_include_prefix
envoy_proto_library = _envoy_proto_library
# Test wrappers (from envoy_test.bzl)
envoy_cc_fuzz_test = _envoy_cc_fuzz_test
envoy_cc_mock = _envoy_cc_mock
envoy_cc_test = _envoy_cc_test
envoy_cc_test_binary = _envoy_cc_test_binary
envoy_cc_test_library = _envoy_cc_test_library
envoy_cc_benchmark_binary = _envoy_cc_benchmark_binary
envoy_benchmark_test = _envoy_benchmark_test
envoy_py_test_binary = _envoy_py_test_binary
envoy_sh_test = _envoy_sh_test
| 38.827586
| 169
| 0.719614
|
3c5e3933e01da44c4aab57c6bfc51b2d75b43ff8
| 306
|
py
|
Python
|
erri/python/lesson_26/tableau_test.py
|
TGITS/programming-workouts
|
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
|
[
"MIT"
] | null | null | null |
erri/python/lesson_26/tableau_test.py
|
TGITS/programming-workouts
|
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
|
[
"MIT"
] | 16
|
2020-05-30T12:38:13.000Z
|
2022-02-19T09:23:31.000Z
|
erri/python/lesson_26/tableau_test.py
|
TGITS/programming-workouts
|
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
|
[
"MIT"
] | null | null | null |
import tableau
def test_séparation_positif_negatif():
input = [0, -1, 2, -2, 1, -3, 5, -9]
t_positif = [0, 2, 1, 5]
t_negatif = [-1, -2, -3, -9]
r_t_positif, r_t_negatif = tableau.séparation_positif_negatif(input)
assert(t_positif == r_t_positif)
assert(t_negatif == r_t_negatif)
| 27.818182
| 72
| 0.647059
|
92c33ce83cc4ad44298de73ce02e4464a144529a
| 692
|
py
|
Python
|
notescribe/__init__.py
|
SatvikR/notescribe
|
8ac46297407065a920595768410be18da14d6256
|
[
"Apache-2.0"
] | null | null | null |
notescribe/__init__.py
|
SatvikR/notescribe
|
8ac46297407065a920595768410be18da14d6256
|
[
"Apache-2.0"
] | null | null | null |
notescribe/__init__.py
|
SatvikR/notescribe
|
8ac46297407065a920595768410be18da14d6256
|
[
"Apache-2.0"
] | null | null | null |
import json
from flask import Flask
import os.path
UPLOAD_FOLDER = os.path.join('object_storage', 'uploads')
WAV_FOLDER = os.path.join('object_storage', 'wav')
MIDI_FOLDER = os.path.join('object_storage', 'midi')
LILYPOND_FOLDER = os.path.join('object_storage', 'lilypond')
IMAGES_FOLDER = os.path.join('object_storage', 'images')
PDF_FOLDER = os.path.join('object_storage', 'pdf')
JSON_FOLDER = os.path.join('object_storage', 'json')
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
settings = None
with open(os.path.join('settings.json')) as f:
settings = json.load(f)
from notescribe.views import *
| 31.454545
| 64
| 0.726879
|
bf973bee2aca43517cc83bebae28cb43d6604f40
| 2,446
|
py
|
Python
|
examples/test_stream_simplest.py
|
maij/pico-python
|
ce7f580baff069a8fe9137c58404ac8f6224a8f5
|
[
"BSD-2-Clause"
] | null | null | null |
examples/test_stream_simplest.py
|
maij/pico-python
|
ce7f580baff069a8fe9137c58404ac8f6224a8f5
|
[
"BSD-2-Clause"
] | null | null | null |
examples/test_stream_simplest.py
|
maij/pico-python
|
ce7f580baff069a8fe9137c58404ac8f6224a8f5
|
[
"BSD-2-Clause"
] | null | null | null |
from picoscope import ps5000a
import pylab as pl
import numpy as np
from time import sleep
ps=ps5000a.PS5000a()
ps.setChannel("A",enabled=True, coupling="DC", VRange=2)
ps.setChannel("B",enabled=True, coupling="DC", VRange=2)
ps.setNoOfCaptures(1)
ps.memorySegments(1)
sampleRate=3e6
acquisitionTime=0.1 # Not entirely sure what this does in streaming case, but it affects the available sampling intervals
actSampleRate, maxN=ps.setSamplingFrequency(sampleRate, sampleRate*acquisitionTime)
ps.setSimpleTrigger('External', enabled=False)
#ps.quickSetup(chanAParams=dict(coupling="DC", VRange=2),
# chanBParams=dict(coupling="DC", VRange=5),
# nCaps=1,
# sampleRate=10e6, acqTime=0.60,resolution=15,
# triggerParams=dict(trigSrc="External", threshold_V=1.0,
# direction="Rising", delay=0,enabled=False)
# )
saveFileName='savestream.bin'
saveFile=open(saveFileName,'wb')
lastStartIndex=0
totPts=0;
import pdb
def streamingReadySimple(handle, nSamples, startIndex, overflow, triggerAt, triggered, autoStop, parameter):
global totPts
totPts+=nSamples
endInd=startIndex+nSamples
valid=data[:,startIndex:endInd]
if valid.size<nSamples: #This is never run, as the picoscope handles the overruns itself (i.e. next call will tell us about the extra data)
nStart=nSamples-valid.size
print('circling back')
valid=np.hstack([valid, data[:,:nStart]])
valid.T.tofile(saveFile)
DSmode=0 #or 4 for downsampling
DSratio=1 # Or the downsample factor
data=ps.allocateDataBuffers(channels=["A", "B"],numSamples=int(1e6), downSampleMode=DSmode)
data=data.squeeze()
try:
ps.runStreaming(bAutoStop=False, downSampleMode=DSmode, downSampleRatio=DSratio)
from time import sleep, time
t0=time()
tElapsed=0
while tElapsed<2:
try:
ps.getStreamingLatestValues(callback=streamingReadySimple)
except OSError as e:
if e.args[0].find('PICO_BUSY')>=0:
print('PICO_BUSY exception, try again shortly')
else:
raise e
tElapsed=time()-t0
finally:
ps.stop()
ps.close()
print('saved {} pts, at approx {} per second'.format(totPts, float(totPts)/tElapsed))
saveFile.close()
dat2=np.fromfile(saveFileName, dtype='i2').reshape(-1,2).T
pl.plot(data.T)
pl.title('current buffer')
pl.figure()
pl.plot(dat2.T)
pl.title('From file')
pl.show()
| 33.054054
| 143
| 0.69583
|
970adc00b75e6a5f0e34300ff5f164b3fe538588
| 1,019
|
py
|
Python
|
healthbuddy_backend/rapidpro/migrations/0005_channel_dailychannelcount.py
|
Asfak06/health-buddy
|
1a40a35a95bc4179a44445ed0c0b9dc32360e0bc
|
[
"MIT"
] | null | null | null |
healthbuddy_backend/rapidpro/migrations/0005_channel_dailychannelcount.py
|
Asfak06/health-buddy
|
1a40a35a95bc4179a44445ed0c0b9dc32360e0bc
|
[
"MIT"
] | null | null | null |
healthbuddy_backend/rapidpro/migrations/0005_channel_dailychannelcount.py
|
Asfak06/health-buddy
|
1a40a35a95bc4179a44445ed0c0b9dc32360e0bc
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-10-05 13:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rapidpro', '0004_dailygroupcount_group'),
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('uuid', models.CharField(max_length=255, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='DailyChannelCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.PositiveIntegerField(default=0)),
('day', models.DateTimeField()),
('channel', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='counts', to='rapidpro.Channel')),
],
),
]
| 32.870968
| 138
| 0.585868
|
f9d6d17dc0a621bf69cbbb5c29e51ebc7e8f9323
| 3,702
|
py
|
Python
|
examples/dgcnn_segmentation.py
|
LingxiaoShawn/pytorch_geometric
|
50b7bfc4a59b5b6f7ec547ff862985f3b2e22798
|
[
"MIT"
] | 1
|
2022-02-21T13:23:19.000Z
|
2022-02-21T13:23:19.000Z
|
examples/dgcnn_segmentation.py
|
LingxiaoShawn/pytorch_geometric
|
50b7bfc4a59b5b6f7ec547ff862985f3b2e22798
|
[
"MIT"
] | null | null | null |
examples/dgcnn_segmentation.py
|
LingxiaoShawn/pytorch_geometric
|
50b7bfc4a59b5b6f7ec547ff862985f3b2e22798
|
[
"MIT"
] | null | null | null |
import os.path as osp
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import ShapeNet
from torch_geometric.loader import DataLoader
from torch_geometric.nn import MLP, DynamicEdgeConv
from torch_geometric.utils import intersection_and_union as i_and_u
category = 'Airplane' # Pass in `None` to train on all categories.
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'ShapeNet')
transform = T.Compose([
T.RandomTranslate(0.01),
T.RandomRotate(15, axis=0),
T.RandomRotate(15, axis=1),
T.RandomRotate(15, axis=2)
])
pre_transform = T.NormalizeScale()
train_dataset = ShapeNet(path, category, split='trainval', transform=transform,
pre_transform=pre_transform)
test_dataset = ShapeNet(path, category, split='test',
pre_transform=pre_transform)
train_loader = DataLoader(train_dataset, batch_size=10, shuffle=True,
num_workers=6)
test_loader = DataLoader(test_dataset, batch_size=10, shuffle=False,
num_workers=6)
class Net(torch.nn.Module):
def __init__(self, out_channels, k=30, aggr='max'):
super().__init__()
self.conv1 = DynamicEdgeConv(MLP([2 * 6, 64, 64]), k, aggr)
self.conv2 = DynamicEdgeConv(MLP([2 * 64, 64, 64]), k, aggr)
self.conv3 = DynamicEdgeConv(MLP([2 * 64, 64, 64]), k, aggr)
self.mlp = MLP([3 * 64, 1024, 256, 128, out_channels], dropout=0.5,
batch_norm=False)
def forward(self, data):
x, pos, batch = data.x, data.pos, data.batch
x0 = torch.cat([x, pos], dim=-1)
x1 = self.conv1(x0, batch)
x2 = self.conv2(x1, batch)
x3 = self.conv3(x2, batch)
out = self.mlp(torch.cat([x1, x2, x3], dim=1))
return F.log_softmax(out, dim=1)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net(train_dataset.num_classes, k=30).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.8)
def train():
model.train()
total_loss = correct_nodes = total_nodes = 0
for i, data in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
out = model(data)
loss = F.nll_loss(out, data.y)
loss.backward()
optimizer.step()
total_loss += loss.item()
correct_nodes += out.argmax(dim=1).eq(data.y).sum().item()
total_nodes += data.num_nodes
if (i + 1) % 10 == 0:
print(f'[{i+1}/{len(train_loader)}] Loss: {total_loss / 10:.4f} '
f'Train Acc: {correct_nodes / total_nodes:.4f}')
total_loss = correct_nodes = total_nodes = 0
@torch.no_grad()
def test(loader):
model.eval()
y_mask = loader.dataset.y_mask
ious = [[] for _ in range(len(loader.dataset.categories))]
for data in loader:
data = data.to(device)
pred = model(data).argmax(dim=1)
i, u = i_and_u(pred, data.y, loader.dataset.num_classes, data.batch)
iou = i.cpu().to(torch.float) / u.cpu().to(torch.float)
iou[torch.isnan(iou)] = 1
# Find and filter the relevant classes for each category.
for iou, category in zip(iou.unbind(), data.category.unbind()):
ious[category.item()].append(iou[y_mask[category]])
# Compute mean IoU.
ious = [torch.stack(iou).mean(0).mean(0) for iou in ious]
return torch.tensor(ious).mean().item()
for epoch in range(1, 31):
train()
iou = test(test_loader)
print(f'Epoch: {epoch:02d}, Test IoU: {iou:.4f}')
| 34.598131
| 79
| 0.63074
|
5bc530ba1e773cd280d2ba279df1593e41a734b7
| 16,131
|
py
|
Python
|
site_scons/libdeps.py
|
ibrarahmad/percona-server-mongodb
|
57fd79b5e5aada71bc094d068ebbf5e6df069147
|
[
"Apache-2.0"
] | 1
|
2019-01-16T12:59:12.000Z
|
2019-01-16T12:59:12.000Z
|
site_scons/libdeps.py
|
ibrarahmad/percona-server-mongodb
|
57fd79b5e5aada71bc094d068ebbf5e6df069147
|
[
"Apache-2.0"
] | null | null | null |
site_scons/libdeps.py
|
ibrarahmad/percona-server-mongodb
|
57fd79b5e5aada71bc094d068ebbf5e6df069147
|
[
"Apache-2.0"
] | null | null | null |
"""Extension to SCons providing advanced static library dependency tracking.
These modifications to a build environment, which can be attached to
StaticLibrary and Program builders via a call to setup_environment(env),
cause the build system to track library dependencies through static libraries,
and to add them to the link command executed when building programs.
For example, consider a program 'try' that depends on a lib 'tc', which in
turn uses a symbol from a lib 'tb' which in turn uses a library from 'ta'.
Without this package, the Program declaration for "try" looks like this:
Program('try', ['try.c', 'path/to/${LIBPREFIX}tc${LIBSUFFIX}',
'path/to/${LIBPREFIX}tc${LIBSUFFIX}',
'path/to/${LIBPREFIX}tc${LIBSUFFIX}',])
With this library, we can instead write the following
Program('try', ['try.c'], LIBDEPS=['path/to/tc'])
StaticLibrary('tc', ['c.c'], LIBDEPS=['path/to/tb'])
StaticLibrary('tb', ['b.c'], LIBDEPS=['path/to/ta'])
StaticLibrary('ta', ['a.c'])
And the build system will figure out that it needs to link libta.a and libtb.a
when building 'try'.
A StaticLibrary S may also declare programs or libraries, [L1, ...] to be dependent
upon S by setting LIBDEPS_DEPENDENTS=[L1, ...], using the same syntax as is used
for LIBDEPS, except that the libraries and programs will not have LIBPREFIX/LIBSUFFIX
automatically added when missing.
"""
# Copyright (c) 2010, Corensic Inc., All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import SCons.Errors
import SCons.Scanner
import SCons.Util
libdeps_env_var = 'LIBDEPS'
syslibdeps_env_var = 'SYSLIBDEPS'
missing_syslibdep = 'MISSING_LIBDEP_'
class dependency(object):
Public, Private, Interface = range(3)
def __init__(self, value, deptype):
self.target_node = value
self.dependency_type = deptype
def __str__(self):
return str(self.target_node)
dependency_visibility_ignored = {
dependency.Public : dependency.Public,
dependency.Private : dependency.Public,
dependency.Interface : dependency.Public,
}
dependency_visibility_honored = {
dependency.Public : dependency.Public,
dependency.Private : dependency.Private,
dependency.Interface : dependency.Interface,
}
class DependencyCycleError(SCons.Errors.UserError):
"""Exception representing a cycle discovered in library dependencies."""
def __init__(self, first_node ):
super(DependencyCycleError, self).__init__()
self.cycle_nodes = [first_node]
def __str__(self):
return "Library dependency cycle detected: " + " => ".join(str(n) for n in self.cycle_nodes)
def __get_sorted_direct_libdeps(node):
direct_sorted = getattr(node.attributes, "libdeps_direct_sorted", False)
if not direct_sorted:
direct = getattr(node.attributes, 'libdeps_direct', [])
direct_sorted = sorted(direct, key=lambda t: str(t.target_node))
setattr(node.attributes, "libdeps_direct_sorted", direct_sorted)
return direct_sorted
def __get_libdeps(node):
"""Given a SCons Node, return its library dependencies, topologically sorted.
Computes the dependencies if they're not already cached.
"""
cached_var_name = libdeps_env_var + '_cached'
if hasattr(node.attributes, cached_var_name):
return getattr(node.attributes, cached_var_name)
tsorted = []
marked = set()
def visit(n):
if getattr(n.target_node.attributes, 'libdeps_exploring', False):
raise DependencyCycleError(n.target_node)
n.target_node.attributes.libdeps_exploring = True
try:
if n.target_node in marked:
return
try:
for child in __get_sorted_direct_libdeps(n.target_node):
if child.dependency_type != dependency.Private:
visit(child)
marked.add(n.target_node)
tsorted.append(n.target_node)
except DependencyCycleError, e:
if len(e.cycle_nodes) == 1 or e.cycle_nodes[0] != e.cycle_nodes[-1]:
e.cycle_nodes.insert(0, n.target_node)
raise
finally:
n.target_node.attributes.libdeps_exploring = False
for child in __get_sorted_direct_libdeps(node):
if child.dependency_type != dependency.Interface:
visit(child)
tsorted.reverse()
setattr(node.attributes, cached_var_name, tsorted)
return tsorted
def __get_syslibdeps(node):
""" Given a SCons Node, return its system library dependencies.
These are the depencencies listed with SYSLIBDEPS, and are linked using -l.
"""
cached_var_name = syslibdeps_env_var + '_cached'
if not hasattr(node.attributes, cached_var_name):
syslibdeps = node.get_env().Flatten(node.get_env().get(syslibdeps_env_var, []))
for lib in __get_libdeps(node):
for syslib in node.get_env().Flatten(lib.get_env().get(syslibdeps_env_var, [])):
if syslib:
if type(syslib) in (str, unicode) and syslib.startswith(missing_syslibdep):
print("Target '%s' depends on the availability of a "
"system provided library for '%s', "
"but no suitable library was found during configuration." %
(str(node), syslib[len(missing_syslibdep):]))
node.get_env().Exit(1)
syslibdeps.append(syslib)
setattr(node.attributes, cached_var_name, syslibdeps)
return getattr(node.attributes, cached_var_name)
def __missing_syslib(name):
return missing_syslibdep + name
def update_scanner(builder):
"""Update the scanner for "builder" to also scan library dependencies."""
old_scanner = builder.target_scanner
if old_scanner:
path_function = old_scanner.path_function
def new_scanner(node, env, path=()):
result = old_scanner.function(node, env, path)
result.extend(__get_libdeps(node))
return result
else:
path_function = None
def new_scanner(node, env, path=()):
return __get_libdeps(node)
builder.target_scanner = SCons.Scanner.Scanner(function=new_scanner,
path_function=path_function)
def get_libdeps(source, target, env, for_signature):
"""Implementation of the special _LIBDEPS environment variable.
Expands to the library dependencies for a target.
"""
target = env.Flatten([target])
return __get_libdeps(target[0])
def get_libdeps_objs(source, target, env, for_signature):
objs = []
for lib in get_libdeps(source, target, env, for_signature):
# This relies on Node.sources being order stable build-to-build.
objs.extend(lib.sources)
return objs
def get_syslibdeps(source, target, env, for_signature):
deps = __get_syslibdeps(target[0])
lib_link_prefix = env.subst('$LIBLINKPREFIX')
lib_link_suffix = env.subst('$LIBLINKSUFFIX')
result = []
for d in deps:
# Elements of syslibdeps are either strings (str or unicode), or they're File objects.
# If they're File objects, they can be passed straight through. If they're strings,
# they're believed to represent library short names, that should be prefixed with -l
# or the compiler-specific equivalent. I.e., 'm' becomes '-lm', but 'File("m.a") is passed
# through whole cloth.
if type(d) in (str, unicode):
result.append('%s%s%s' % (lib_link_prefix, d, lib_link_suffix))
else:
result.append(d)
return result
def __append_direct_libdeps(node, prereq_nodes):
# We do not bother to decorate nodes that are not actual Objects
if type(node) == str:
return
if getattr(node.attributes, 'libdeps_direct', None) is None:
node.attributes.libdeps_direct = []
node.attributes.libdeps_direct.extend(prereq_nodes)
def make_libdeps_emitter(dependency_builder, dependency_map=dependency_visibility_ignored):
def libdeps_emitter(target, source, env):
"""SCons emitter that takes values from the LIBDEPS environment variable and
converts them to File node objects, binding correct path information into
those File objects.
Emitters run on a particular "target" node during the initial execution of
the SConscript file, rather than during the later build phase. When they
run, the "env" environment's working directory information is what you
expect it to be -- that is, the working directory is considered to be the
one that contains the SConscript file. This allows specification of
relative paths to LIBDEPS elements.
This emitter also adds LIBSUFFIX and LIBPREFIX appropriately.
NOTE: For purposes of LIBDEPS_DEPENDENTS propagation, only the first member
of the "target" list is made a prerequisite of the elements of LIBDEPS_DEPENDENTS.
"""
lib_builder = env['BUILDERS'][dependency_builder]
lib_node_factory = lib_builder.target_factory or env.File
prog_builder = env['BUILDERS']['Program']
prog_node_factory = prog_builder.target_factory or env.File
prereqs = [dependency(l, dependency_map[dependency.Public]) for l in env.get(libdeps_env_var, []) if l]
prereqs.extend(dependency(l, dependency_map[dependency.Interface]) for l in env.get(libdeps_env_var + '_INTERFACE', []) if l)
prereqs.extend(dependency(l, dependency_map[dependency.Private]) for l in env.get(libdeps_env_var + '_PRIVATE', []) if l)
for prereq in prereqs:
prereqWithIxes = SCons.Util.adjustixes(
prereq.target_node, lib_builder.get_prefix(env), lib_builder.get_suffix(env))
prereq.target_node = lib_node_factory(prereqWithIxes)
for t in target:
# target[0] must be a Node and not a string, or else libdeps will fail to
# work properly.
__append_direct_libdeps(t, prereqs)
for dependent in env.get('LIBDEPS_DEPENDENTS', []):
if dependent is None:
continue
visibility = dependency.Private
if isinstance(dependent, tuple):
visibility = dependent[1]
dependent = dependent[0]
dependentWithIxes = SCons.Util.adjustixes(
dependent, lib_builder.get_prefix(env), lib_builder.get_suffix(env))
dependentNode = lib_node_factory(dependentWithIxes)
__append_direct_libdeps(dependentNode, [dependency(target[0], dependency_map[visibility])])
for dependent in env.get('PROGDEPS_DEPENDENTS', []):
if dependent is None:
continue
visibility = dependency.Public
if isinstance(dependent, tuple):
# TODO: Error here? Non-public PROGDEPS_DEPENDENTS probably are meaningless
visibility = dependent[1]
dependent = dependent[0]
dependentWithIxes = SCons.Util.adjustixes(
dependent, prog_builder.get_prefix(env), prog_builder.get_suffix(env))
dependentNode = prog_node_factory(dependentWithIxes)
__append_direct_libdeps(dependentNode, [dependency(target[0], dependency_map[visibility])])
return target, source
return libdeps_emitter
def expand_libdeps_tags(source, target, env, for_signature):
results = []
for expansion in env.get('LIBDEPS_TAG_EXPANSIONS', []):
results.append(expansion(source, target, env, for_signature))
return results
def setup_environment(env, emitting_shared=False):
"""Set up the given build environment to do LIBDEPS tracking."""
try:
env['_LIBDEPS']
except KeyError:
env['_LIBDEPS'] = '$_LIBDEPS_LIBS'
env['_LIBDEPS_TAGS'] = expand_libdeps_tags
env['_LIBDEPS_GET_LIBS'] = get_libdeps
env['_LIBDEPS_OBJS'] = get_libdeps_objs
env['_SYSLIBDEPS'] = get_syslibdeps
env[libdeps_env_var] = SCons.Util.CLVar()
env[syslibdeps_env_var] = SCons.Util.CLVar()
# We need a way for environments to alter just which libdeps
# emitter they want, without altering the overall program or
# library emitter which may have important effects. The
# subsitution rules for emitters are a little strange, so build
# ourselves a little trampoline to use below so we don't have to
# deal with it.
def make_indirect_emitter(variable):
def indirect_emitter(target, source, env):
return env[variable](target, source, env)
return indirect_emitter
env.Append(
LIBDEPS_LIBEMITTER=make_libdeps_emitter('StaticLibrary'),
LIBEMITTER=make_indirect_emitter('LIBDEPS_LIBEMITTER'),
LIBDEPS_SHAREMITTER=make_libdeps_emitter('SharedArchive'),
SHAREMITTER=make_indirect_emitter('LIBDEPS_SHAREMITTER'),
LIBDEPS_SHLIBEMITTER=make_libdeps_emitter('SharedLibrary', dependency_visibility_honored),
SHLIBEMITTER=make_indirect_emitter('LIBDEPS_SHLIBEMITTER'),
LIBDEPS_PROGEMITTER=make_libdeps_emitter('SharedLibrary' if emitting_shared else 'StaticLibrary'),
PROGEMITTER=make_indirect_emitter('LIBDEPS_PROGEMITTER'),
)
def expand_libdeps_with_extraction_flags(source, target, env, for_signature):
result = []
libs = get_libdeps(source, target, env, for_signature)
for lib in libs:
if 'init-no-global-side-effects' in env.Entry(lib).get_env().get('LIBDEPS_TAGS', []):
result.append(str(lib))
else:
result.extend(env.subst('$LINK_WHOLE_ARCHIVE_LIB_START'
'$TARGET'
'$LINK_WHOLE_ARCHIVE_LIB_END', target=lib).split())
return result
env['_LIBDEPS_LIBS_WITH_TAGS'] = expand_libdeps_with_extraction_flags
env['_LIBDEPS_LIBS'] = ('$LINK_WHOLE_ARCHIVE_START '
'$LINK_LIBGROUP_START '
'$_LIBDEPS_LIBS_WITH_TAGS '
'$LINK_LIBGROUP_END '
'$LINK_WHOLE_ARCHIVE_END')
env.Prepend(_LIBFLAGS='$_LIBDEPS_TAGS $_LIBDEPS $_SYSLIBDEPS ')
for builder_name in ('Program', 'SharedLibrary', 'LoadableModule', 'SharedArchive'):
try:
update_scanner(env['BUILDERS'][builder_name])
except KeyError:
pass
def setup_conftests(conf):
def FindSysLibDep(context, name, libs, **kwargs):
var = "LIBDEPS_" + name.upper() + "_SYSLIBDEP"
kwargs['autoadd'] = False
for lib in libs:
result = context.sconf.CheckLib(lib, **kwargs)
context.did_show_result = 1
if result:
context.env[var] = lib
return context.Result(result)
context.env[var] = __missing_syslib(name)
return context.Result(result)
conf.AddTest('FindSysLibDep', FindSysLibDep)
| 40.632242
| 133
| 0.673052
|
555ad1437b4761503d5626e57aa3a9e8eff7ce54
| 1,379
|
py
|
Python
|
enaml/tests/old/components/window.py
|
mmckerns/enaml
|
ebf417b4dce9132bffa038a588ad90436a59d37e
|
[
"BSD-3-Clause"
] | 11
|
2015-01-04T14:29:23.000Z
|
2019-12-25T05:38:37.000Z
|
enaml/tests/old/components/window.py
|
mmckerns/enaml
|
ebf417b4dce9132bffa038a588ad90436a59d37e
|
[
"BSD-3-Clause"
] | 36
|
2015-02-20T00:56:53.000Z
|
2020-12-04T10:02:14.000Z
|
enaml/tests/old/components/window.py
|
mmckerns/enaml
|
ebf417b4dce9132bffa038a588ad90436a59d37e
|
[
"BSD-3-Clause"
] | 3
|
2015-11-19T15:11:37.000Z
|
2019-03-11T23:45:02.000Z
|
#------------------------------------------------------------------------------
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from .enaml_test_case import EnamlTestCase, required_method
class TestWindow(EnamlTestCase):
""" Logic for testing Windows.
"""
def setUp(self):
""" Set up Window tests.
"""
enaml_source = """
enamldef MainView(MainWindow):
name = 'window'
title = 'foo'
"""
self.view = self.parse_and_create(enaml_source)
self.component = self.component_by_name(self.view, 'window')
self.widget = self.component.toolkit_widget
def test_initial_title(self):
""" Test the initial title of a Window.
"""
self.assertEnamlInSync(self.component, 'title', 'foo')
def test_title_changed(self):
""" Change the title of the Window.
"""
self.component.title = 'bar'
self.assertEnamlInSync(self.component, 'title', 'bar')
#--------------------------------------------------------------------------
# Abstract methods
#--------------------------------------------------------------------------
@required_method
def get_title(self, widget):
""" Returns the title from the toolkit widget.
"""
pass
| 27.58
| 79
| 0.474257
|
d522338cecaefcd04df1d16312698f52fa781486
| 758
|
py
|
Python
|
adapters/icasa/__init__.py
|
michahagg/domoticz-zigbee2mqtt-plugin
|
0d891a0bd96ed26547904ae8402a26e684dc8e35
|
[
"MIT"
] | null | null | null |
adapters/icasa/__init__.py
|
michahagg/domoticz-zigbee2mqtt-plugin
|
0d891a0bd96ed26547904ae8402a26e684dc8e35
|
[
"MIT"
] | null | null | null |
adapters/icasa/__init__.py
|
michahagg/domoticz-zigbee2mqtt-plugin
|
0d891a0bd96ed26547904ae8402a26e684dc8e35
|
[
"MIT"
] | null | null | null |
from adapters.dimmable_bulb_adapter import DimmableBulbAdapter
from adapters.dimmable_ct_bulb_adapter import DimmableCtBulbAdapter
from adapters.on_off_switch_adapter import OnOffSwitchAdapter
from adapters.icasa.KPD14S import KPD14S
from adapters.icasa.KPD18S import KPD18S
icasa_adapters = {
'ICZB-IW11D': DimmableBulbAdapter, # iCasa Zigbee 3.0 Dimmer
'ICZB-IW11SW': OnOffSwitchAdapter, # iCasa Zigbee 3.0 Switch
'ICZB-B1FC60/B3FC64/B2FC95/B2FC125': DimmableCtBulbAdapter, # iCasa Zigbee 3.0 Filament Lamp 60/64/95/125 mm
'ICZB-R11D': DimmableBulbAdapter, # iCasa Zigbee AC dimmer
'ICZB-KPD14S': KPD14S, # iCasa Zigbee 3.0 Keypad Pulse 4S
'ICZB-KPD18S': KPD18S, # iCasa Zigbee 3.0 Keypad Pulse 8S
}
| 47.375
| 112
| 0.754617
|
f269c7a43f0409d4fb3d9fb0f9d50266e00df29d
| 3,938
|
py
|
Python
|
rapid7vmconsole/models/resources_role.py
|
kiblik/vm-console-client-python
|
038f6d33e8b2654a558326c6eb87f09ee23e0e22
|
[
"MIT"
] | 61
|
2018-05-17T05:57:09.000Z
|
2022-03-08T13:59:21.000Z
|
rapid7vmconsole/models/resources_role.py
|
kiblik/vm-console-client-python
|
038f6d33e8b2654a558326c6eb87f09ee23e0e22
|
[
"MIT"
] | 33
|
2018-06-26T16:21:14.000Z
|
2022-03-03T20:55:47.000Z
|
rapid7vmconsole/models/resources_role.py
|
kiblik/vm-console-client-python
|
038f6d33e8b2654a558326c6eb87f09ee23e0e22
|
[
"MIT"
] | 43
|
2018-02-24T05:45:53.000Z
|
2022-03-31T22:15:16.000Z
|
# coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ResourcesRole(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'links': 'list[Link]',
'resources': 'list[Role]'
}
attribute_map = {
'links': 'links',
'resources': 'resources'
}
def __init__(self, links=None, resources=None): # noqa: E501
"""ResourcesRole - a model defined in Swagger""" # noqa: E501
self._links = None
self._resources = None
self.discriminator = None
if links is not None:
self.links = links
if resources is not None:
self.resources = resources
@property
def links(self):
"""Gets the links of this ResourcesRole. # noqa: E501
Hypermedia links to corresponding or related resources. # noqa: E501
:return: The links of this ResourcesRole. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourcesRole.
Hypermedia links to corresponding or related resources. # noqa: E501
:param links: The links of this ResourcesRole. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def resources(self):
"""Gets the resources of this ResourcesRole. # noqa: E501
The resources returned. # noqa: E501
:return: The resources of this ResourcesRole. # noqa: E501
:rtype: list[Role]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this ResourcesRole.
The resources returned. # noqa: E501
:param resources: The resources of this ResourcesRole. # noqa: E501
:type: list[Role]
"""
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourcesRole, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourcesRole):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.347222
| 80
| 0.562722
|
78593253a18bb7e1163fba63f9306ad4428e5d6f
| 504
|
py
|
Python
|
plotly/validators/scatterpolar/hoverlabel/font/_familysrc.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/scatterpolar/hoverlabel/font/_familysrc.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/scatterpolar/hoverlabel/font/_familysrc.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='familysrc',
parent_name='scatterpolar.hoverlabel.font',
**kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 26.526316
| 68
| 0.615079
|
e2e94cfaacaf24182c6b5059f7854cc8f8f890f9
| 674
|
py
|
Python
|
libs/external_libs/pownce-api-0.2/setup.py
|
google-code-export/django-hotclub
|
d783a5bbcc06816289565f3eae6d99461188ca4a
|
[
"MIT"
] | 3
|
2015-12-25T14:45:36.000Z
|
2016-11-28T09:58:03.000Z
|
libs/external_libs/pownce-api-0.2/setup.py
|
indro/t2c
|
56482ad4aed150f29353e054db2c97b567243bf8
|
[
"MIT"
] | null | null | null |
libs/external_libs/pownce-api-0.2/setup.py
|
indro/t2c
|
56482ad4aed150f29353e054db2c97b567243bf8
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(name='pownce-api',
version='0.2',
description='A Python interface to the Pownce API',
author='James Bennett',
author_email='james@b-list.org',
url='http://code.google.com/p/python-pownce-api/',
py_modules=['pownce'],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'],
)
| 37.444444
| 60
| 0.548961
|
4954b94edb77f7c79dfec897476f87a46520710e
| 1,490
|
py
|
Python
|
profiles_api/views.py
|
stellavoyage/profiles-rest-api
|
5a71484da8a1d327944aeb9a5138d1af4c0e803e
|
[
"MIT"
] | null | null | null |
profiles_api/views.py
|
stellavoyage/profiles-rest-api
|
5a71484da8a1d327944aeb9a5138d1af4c0e803e
|
[
"MIT"
] | null | null | null |
profiles_api/views.py
|
stellavoyage/profiles-rest-api
|
5a71484da8a1d327944aeb9a5138d1af4c0e803e
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from profiles_api import serializers
class HelloApiView(APIView):
serializer_class = serializers.HelloSerializer
def get(self, request, format = None):
an_apiview = [ 'Uses HTTP Methods as function (get, post, patch, put, delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your application logic',
'Is mapped manually to URLs',
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview})
def post(self, request):
"""Create a hello message with our name"""
serializer = self.serializer_class(data=request.data)
#validating serializer by calling it a class
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""Handle partial update of object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Delete an object"""
return Response({'method': 'DELETE'})
| 31.041667
| 87
| 0.632215
|
b4f5c45728f691a9009aa6f4db544a3670b7d1bd
| 270
|
py
|
Python
|
readthedocs/restapi/urls.py
|
ludia/readthedocs.org
|
636c2bd57b417c4d73657d2517efaf4258dd75c1
|
[
"MIT"
] | 1
|
2021-11-12T23:52:23.000Z
|
2021-11-12T23:52:23.000Z
|
readthedocs/restapi/urls.py
|
titilambert/readthedocs.org
|
774611db90fea94c3ae4d7de4726f010ab01ddab
|
[
"MIT"
] | null | null | null |
readthedocs/restapi/urls.py
|
titilambert/readthedocs.org
|
774611db90fea94c3ae4d7de4726f010ab01ddab
|
[
"MIT"
] | null | null | null |
# Not used currently.
from rest_framework import routers
from .views import ProjectViewSet, NotificationViewSet
router = routers.DefaultRouter()
router.register(r'project', ProjectViewSet)
router.register(r'notification', NotificationViewSet)
urlpatterns = router.urls
| 30
| 54
| 0.82963
|
00f6264f59187eaea6fe98c7f2200ea5a03db3c9
| 3,880
|
py
|
Python
|
botocore/__init__.py
|
hellysmile/botocore
|
24a793947edef9068d5256e71d181926466832b4
|
[
"Apache-2.0"
] | null | null | null |
botocore/__init__.py
|
hellysmile/botocore
|
24a793947edef9068d5256e71d181926466832b4
|
[
"Apache-2.0"
] | null | null | null |
botocore/__init__.py
|
hellysmile/botocore
|
24a793947edef9068d5256e71d181926466832b4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import re
import logging
__version__ = '1.20.8'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Configure default logger to do nothing
log = logging.getLogger('botocore')
log.addHandler(NullHandler())
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
# The regex below handles the special case where some acryonym
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
_special_case_transform = re.compile('[A-Z]{3,}s$')
# Prepopulate the cache with special cases that don't match
# our regular transformation.
_xform_cache = {
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
('ListHITsForQualificationType', '_'): 'list_hits_for_qualification_type',
('ListHITsForQualificationType', '-'): 'list-hits-for-qualification-type',
('ExecutePartiQLStatement', '_'): 'execute_partiql_statement',
('ExecutePartiQLStatement', '-'): 'execute-partiql-statement',
('ExecutePartiQLTransaction', '_'): 'execute_partiql_transaction',
('ExecutePartiQLTransaction', '-'): 'execute-partiql-transaction',
('ExecutePartiQLBatch', '_'): 'execute_partiql_batch',
('ExecutePartiQLBatch', '-'): 'execute-partiql-batch',
}
# The items in this dict represent partial renames to apply globally to all
# services which might have a matching argument or operation. This way a
# common mis-translation can be fixed without having to call out each
# individual case.
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# Used to specify anonymous (unsigned) request signature
class UNSIGNED(object):
def __copy__(self):
return self
def __deepcopy__(self, memodict):
return self
UNSIGNED = UNSIGNED()
def xform_name(name, sep='_', _xform_cache=_xform_cache):
"""Convert camel case to a "pythonic" name.
If the name contains the ``sep`` character, then it is
returned unchanged.
"""
if sep in name:
# If the sep is in the name, assume that it's already
# transformed and return the string unchanged.
return name
key = (name, sep)
if key not in _xform_cache:
if _special_case_transform.search(name) is not None:
is_special = _special_case_transform.search(name)
matched = is_special.group()
# Replace something like ARNs, ACLs with _arns, _acls.
name = name[:-len(matched)] + sep + matched.lower()
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s1).lower()
_xform_cache[key] = transformed
return _xform_cache[key]
| 39.191919
| 78
| 0.703866
|
f57061bb592444c80bd96fdd8677edffea963485
| 42,801
|
py
|
Python
|
teuthology/openstack/test/test_openstack.py
|
varshar16/teuthology
|
321319b12ea4ff9b63c7655015a3156de2c3b279
|
[
"MIT"
] | 117
|
2015-03-24T17:30:44.000Z
|
2022-03-27T13:29:55.000Z
|
teuthology/openstack/test/test_openstack.py
|
varshar16/teuthology
|
321319b12ea4ff9b63c7655015a3156de2c3b279
|
[
"MIT"
] | 1,014
|
2015-01-05T21:33:17.000Z
|
2022-03-31T13:10:09.000Z
|
teuthology/openstack/test/test_openstack.py
|
varshar16/teuthology
|
321319b12ea4ff9b63c7655015a3156de2c3b279
|
[
"MIT"
] | 237
|
2015-01-04T03:37:42.000Z
|
2022-03-31T16:53:19.000Z
|
#
# Copyright (c) 2015,2016 Red Hat, Inc.
#
# Author: Loic Dachary <loic@dachary.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import argparse
import logging
import os
import pytest
import subprocess
import tempfile
import time
from mock import patch
import teuthology
from teuthology import misc
from teuthology.config import set_config_attr
from teuthology.openstack import TeuthologyOpenStack, OpenStack, OpenStackInstance
from teuthology.openstack import NoFlavorException
import scripts.openstack
class TestOpenStackBase(object):
def setup(self):
OpenStack.token = None
OpenStack.token_expires = None
self.environ = {}
for k in os.environ.keys():
if k.startswith('OS_'):
self.environ[k] = os.environ[k]
def teardown(self):
OpenStack.token = None
OpenStack.token_expires = None
for k in os.environ.keys():
if k.startswith('OS_'):
if k in self.environ:
os.environ[k] = self.environ[k]
else:
del os.environ[k]
class TestOpenStackInstance(TestOpenStackBase):
teuthology_instance = """
{
"OS-EXT-STS:task_state": null,
"addresses": "Ext-Net=167.114.233.32",
"image": "Ubuntu 14.04 (0d315a8d-75e3-418a-80e4-48e62d599627)",
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2015-08-17T12:22:13.000000",
"flavor": "vps-ssd-1 (164fcc7e-7771-414f-a607-b388cb7b7aa0)",
"id": "f3ca32d7-212b-458b-a0d4-57d1085af953",
"security_groups": [
{
"name": "default"
}
],
"user_id": "3a075820e5d24fda96cd340b87fd94e9",
"OS-DCF:diskConfig": "AUTO",
"accessIPv4": "",
"accessIPv6": "",
"progress": 0,
"OS-EXT-STS:power_state": 1,
"project_id": "62cf1be03cec403c8ed8e64df55732ea",
"config_drive": "",
"status": "ACTIVE",
"updated": "2015-11-03T13:48:53Z",
"hostId": "bcdf964b6f724e573c07156ff85b4db1707f6f0969f571cf33e0468d",
"OS-SRV-USG:terminated_at": null,
"key_name": "loic",
"properties": "",
"OS-EXT-AZ:availability_zone": "nova",
"name": "mrdarkdragon",
"created": "2015-08-17T12:21:31Z",
"os-extended-volumes:volumes_attached": [{"id": "627e2631-fbb3-48cd-b801-d29cd2a76f74"}, {"id": "09837649-0881-4ee2-a560-adabefc28764"}, {"id": "44e5175b-6044-40be-885a-c9ddfb6f75bb"}]
}
"""
teuthology_instance_no_addresses = """
{
"OS-EXT-STS:task_state": null,
"addresses": "",
"image": "Ubuntu 14.04 (0d315a8d-75e3-418a-80e4-48e62d599627)",
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2015-08-17T12:22:13.000000",
"flavor": "vps-ssd-1 (164fcc7e-7771-414f-a607-b388cb7b7aa0)",
"id": "f3ca32d7-212b-458b-a0d4-57d1085af953",
"security_groups": [
{
"name": "default"
}
],
"user_id": "3a075820e5d24fda96cd340b87fd94e9",
"OS-DCF:diskConfig": "AUTO",
"accessIPv4": "",
"accessIPv6": "",
"progress": 0,
"OS-EXT-STS:power_state": 1,
"project_id": "62cf1be03cec403c8ed8e64df55732ea",
"config_drive": "",
"status": "ACTIVE",
"updated": "2015-11-03T13:48:53Z",
"hostId": "bcdf964b6f724e573c07156ff85b4db1707f6f0969f571cf33e0468d",
"OS-SRV-USG:terminated_at": null,
"key_name": "loic",
"properties": "",
"OS-EXT-AZ:availability_zone": "nova",
"name": "mrdarkdragon",
"created": "2015-08-17T12:21:31Z",
"os-extended-volumes:volumes_attached": []
}
"""
@classmethod
def setup_class(self):
if 'OS_AUTH_URL' not in os.environ:
pytest.skip('no OS_AUTH_URL environment variable')
def test_init(self):
with patch.multiple(
misc,
sh=lambda cmd: self.teuthology_instance,
):
o = OpenStackInstance('NAME')
assert o['id'] == 'f3ca32d7-212b-458b-a0d4-57d1085af953'
o = OpenStackInstance('NAME', {"id": "OTHER"})
assert o['id'] == "OTHER"
def test_get_created(self):
with patch.multiple(
misc,
sh=lambda cmd: self.teuthology_instance,
):
o = OpenStackInstance('NAME')
assert o.get_created() > 0
def test_exists(self):
with patch.multiple(
misc,
sh=lambda cmd: self.teuthology_instance,
):
o = OpenStackInstance('NAME')
assert o.exists()
def sh_raises(cmd):
raise subprocess.CalledProcessError('FAIL', 'BAD')
with patch.multiple(
misc,
sh=sh_raises,
):
o = OpenStackInstance('NAME')
assert not o.exists()
def test_volumes(self):
with patch.multiple(
misc,
sh=lambda cmd: self.teuthology_instance,
):
o = OpenStackInstance('NAME')
assert len(o.get_volumes()) == 3
def test_get_addresses(self):
answers = [
self.teuthology_instance_no_addresses,
self.teuthology_instance,
]
def sh(self):
return answers.pop(0)
with patch.multiple(
misc,
sh=sh,
):
o = OpenStackInstance('NAME')
assert o.get_addresses() == 'Ext-Net=167.114.233.32'
def test_get_ip_neutron(self):
instance_id = '8e1fd70a-3065-46f8-9c30-84dc028c1834'
ip = '10.10.10.4'
def sh(cmd):
if 'neutron subnet-list' in cmd:
return """
[
{
"ip_version": 6,
"id": "c45b9661-b2ba-4817-9e3a-f8f63bf32989"
},
{
"ip_version": 4,
"id": "e03a3dbc-afc8-4b52-952e-7bf755397b50"
}
]
"""
elif 'neutron port-list' in cmd:
return ("""
[
{
"device_id": "915504ad-368b-4cce-be7c-4f8a83902e28",
"fixed_ips": "{\\"subnet_id\\": \\"e03a3dbc-afc8-4b52-952e-7bf755397b50\\", \\"ip_address\\": \\"10.10.10.1\\"}\\n{\\"subnet_id\\": \\"c45b9661-b2ba-4817-9e3a-f8f63bf32989\\", \\"ip_address\\": \\"2607:f298:6050:9afc::1\\"}"
},
{
"device_id": "{instance_id}",
"fixed_ips": "{\\"subnet_id\\": \\"e03a3dbc-afc8-4b52-952e-7bf755397b50\\", \\"ip_address\\": \\"{ip}\\"}\\n{\\"subnet_id\\": \\"c45b9661-b2ba-4817-9e3a-f8f63bf32989\\", \\"ip_address\\": \\"2607:f298:6050:9afc:f816:3eff:fe07:76c1\\"}"
},
{
"device_id": "17e4a968-4caa-4cee-8e4b-f950683a02bd",
"fixed_ips": "{\\"subnet_id\\": \\"e03a3dbc-afc8-4b52-952e-7bf755397b50\\", \\"ip_address\\": \\"10.10.10.5\\"}\\n{\\"subnet_id\\": \\"c45b9661-b2ba-4817-9e3a-f8f63bf32989\\", \\"ip_address\\": \\"2607:f298:6050:9afc:f816:3eff:fe9c:37f0\\"}"
}
]
""".replace('{instance_id}', instance_id).
replace('{ip}', ip))
else:
raise Exception("unexpected " + cmd)
with patch.multiple(
misc,
sh=sh,
):
assert ip == OpenStackInstance(
instance_id,
{ 'id': instance_id },
).get_ip_neutron()
class TestOpenStack(TestOpenStackBase):
flavors = """[
{
"Name": "eg-120-ssd",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 800,
"ID": "008f75de-c467-4d15-8f70-79c8fbe19538"
},
{
"Name": "hg-60",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 1600,
"ID": "0297d7ac-fe6f-4ff1-b6e7-0b8b0908c94f"
},
{
"Name": "win-sp-120-ssd-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "039e31f2-6541-46c8-85cf-7f47fab7ad78"
},
{
"Name": "win-sp-60",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 400,
"ID": "0417a0e6-f68a-4b8b-a642-ca5ecb9652f7"
},
{
"Name": "hg-120-ssd",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 800,
"ID": "042aefc6-b713-4a7e-ada5-3ff81daa1960"
},
{
"Name": "win-sp-60-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "0609290c-ad2a-40f0-8c66-c755dd38fe3f"
},
{
"Name": "win-eg-120",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 800,
"ID": "0651080f-5d07-44b1-a759-7ea4594b669e"
},
{
"Name": "win-sp-240",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 1600,
"ID": "07885848-8831-486d-8525-91484c09cc7e"
},
{
"Name": "win-hg-60-ssd",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 800,
"ID": "079aa0a2-5e48-4e58-8205-719bc962736e"
},
{
"Name": "eg-120",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 1600,
"ID": "090f8b8c-673c-4ab8-9a07-6e54a8776e7b"
},
{
"Name": "win-hg-15-ssd-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "10e10c58-d29f-4ff6-a1fd-085c35a3bd9b"
},
{
"Name": "eg-15-ssd",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 200,
"ID": "1340a920-0f2f-4c1b-8d74-e2502258da73"
},
{
"Name": "win-eg-30-ssd-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "13e54752-fbd0-47a6-aa93-e5a67dfbc743"
},
{
"Name": "eg-120-ssd-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "15c07a54-2dfb-41d9-aa73-6989fd8cafc2"
},
{
"Name": "win-eg-120-ssd-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "15e0dfcc-10f4-4e70-8ac1-30bc323273e2"
},
{
"Name": "vps-ssd-1",
"RAM": 2000,
"Ephemeral": 0,
"VCPUs": 1,
"Is Public": true,
"Disk": 10,
"ID": "164fcc7e-7771-414f-a607-b388cb7b7aa0"
},
{
"Name": "win-sp-120-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "169415e1-0979-4527-94fb-638c885bbd8c"
},
{
"Name": "win-hg-60-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "16f13d5b-be27-4b8b-88da-959d3904d3ba"
},
{
"Name": "win-sp-30-ssd",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 100,
"ID": "1788102b-ab80-4a0c-b819-541deaca7515"
},
{
"Name": "win-sp-240-flex",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "17bcfa14-135f-442f-9397-a4dc25265560"
},
{
"Name": "win-eg-60-ssd-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "194ca9ba-04af-4d86-ba37-d7da883a7eab"
},
{
"Name": "win-eg-60-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "19ff8837-4751-4f6c-a82b-290bc53c83c1"
},
{
"Name": "win-eg-30-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "1aaef5e5-4df9-4462-80d3-701683ab9ff0"
},
{
"Name": "eg-15",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 400,
"ID": "1cd85b81-5e4d-477a-a127-eb496b1d75de"
},
{
"Name": "hg-120",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 1600,
"ID": "1f1efedf-ec91-4a42-acd7-f5cf64b02d3c"
},
{
"Name": "hg-15-ssd-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "20347a07-a289-4c07-a645-93cb5e8e2d30"
},
{
"Name": "win-eg-7-ssd",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 100,
"ID": "20689394-bd77-4f4d-900e-52cc8a86aeb4"
},
{
"Name": "win-sp-60-ssd-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "21104d99-ba7b-47a0-9133-7e884710089b"
},
{
"Name": "win-sp-120-ssd",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 400,
"ID": "23c21ecc-9ee8-4ad3-bd9f-aa17a3faf84e"
},
{
"Name": "win-hg-15-ssd",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 200,
"ID": "24e293ed-bc54-4f26-8fb7-7b9457d08e66"
},
{
"Name": "eg-15-ssd-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "25f3534a-89e5-489d-aa8b-63f62e76875b"
},
{
"Name": "win-eg-60",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 800,
"ID": "291173f1-ea1d-410b-8045-667361a4addb"
},
{
"Name": "sp-30-ssd-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "2b646463-2efa-428b-94ed-4059923c3636"
},
{
"Name": "win-eg-120-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "2c74df82-29d2-4b1a-a32c-d5633e7359b4"
},
{
"Name": "win-eg-15-ssd",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 200,
"ID": "2fe4344f-d701-4bc4-8dcd-6d0b5d83fa13"
},
{
"Name": "sp-30-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "31487b30-eeb6-472f-a9b6-38ace6587ebc"
},
{
"Name": "win-sp-240-ssd",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 800,
"ID": "325b602f-ecc4-4444-90bd-5a2cf4e0da53"
},
{
"Name": "win-hg-7",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 200,
"ID": "377ded36-491f-4ad7-9eb4-876798b2aea9"
},
{
"Name": "sp-30-ssd",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 100,
"ID": "382f2831-4dba-40c4-bb7a-6fadff71c4db"
},
{
"Name": "hg-30",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 800,
"ID": "3c1d6170-0097-4b5c-a3b3-adff1b7a86e0"
},
{
"Name": "hg-60-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "3c669730-b5cd-4e44-8bd2-bc8d9f984ab2"
},
{
"Name": "sp-240-ssd-flex",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "3d66fea3-26f2-4195-97ab-fdea3b836099"
},
{
"Name": "sp-240-flex",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "40c781f7-d7a7-4b0d-bcca-5304aeabbcd9"
},
{
"Name": "hg-7-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "42730e52-147d-46b8-9546-18e31e5ac8a9"
},
{
"Name": "eg-30-ssd",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 400,
"ID": "463f30e9-7d7a-4693-944f-142067cf553b"
},
{
"Name": "hg-15-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "534f07c6-91af-44c8-9e62-156360fe8359"
},
{
"Name": "win-sp-30-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "55533fdf-ad57-4aa7-a2c6-ee31bb94e77b"
},
{
"Name": "win-hg-60-ssd-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "58b24234-3804-4c4f-9eb6-5406a3a13758"
},
{
"Name": "hg-7-ssd-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "596c1276-8e53-40a0-b183-cdd9e9b1907d"
},
{
"Name": "win-hg-30-ssd",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 400,
"ID": "5c54dc08-28b9-4860-9f24-a2451b2a28ec"
},
{
"Name": "eg-7",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 200,
"ID": "5e409dbc-3f4b-46e8-a629-a418c8497922"
},
{
"Name": "hg-30-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "656423ea-0551-48c6-9e0f-ec6e15952029"
},
{
"Name": "hg-15",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 400,
"ID": "675558ea-04fe-47a2-83de-40be9b2eacd4"
},
{
"Name": "eg-60-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "68a8e4e1-d291-46e8-a724-fbb1c4b9b051"
},
{
"Name": "hg-30-ssd",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 400,
"ID": "6ab72807-e0a5-4e9f-bbb9-7cbbf0038b26"
},
{
"Name": "win-hg-30",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 800,
"ID": "6e12cae3-0492-483c-aa39-54a0dcaf86dd"
},
{
"Name": "win-hg-7-ssd",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 100,
"ID": "6ead771c-e8b9-424c-afa0-671280416422"
},
{
"Name": "win-hg-30-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "70ded741-8f58-4bb9-8cfd-5e838b66b5f3"
},
{
"Name": "win-sp-30-ssd-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "7284d104-a260-421d-8cee-6dc905107b25"
},
{
"Name": "win-eg-120-ssd",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 800,
"ID": "72c0b262-855d-40bb-a3e9-fd989a1bc466"
},
{
"Name": "win-hg-7-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "73961591-c5f1-436f-b641-1a506eddaef4"
},
{
"Name": "sp-240-ssd",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 800,
"ID": "7568d834-3b16-42ce-a2c1-0654e0781160"
},
{
"Name": "win-eg-60-ssd",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 800,
"ID": "75f7fe5c-f87a-41d8-a961-a0169d02c268"
},
{
"Name": "eg-7-ssd-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "77e1db73-0b36-4e37-8e47-32c2d2437ca9"
},
{
"Name": "eg-60-ssd-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "78df4e30-98ca-4362-af68-037d958edaf0"
},
{
"Name": "vps-ssd-2",
"RAM": 4000,
"Ephemeral": 0,
"VCPUs": 1,
"Is Public": true,
"Disk": 20,
"ID": "7939cc5c-79b1-45c0-be2d-aa935d92faa1"
},
{
"Name": "sp-60",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 400,
"ID": "80d8510a-79cc-4307-8db7-d1965c9e8ddb"
},
{
"Name": "win-hg-120-ssd-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "835e734a-46b6-4cb2-be68-e8678fd71059"
},
{
"Name": "win-eg-7",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 200,
"ID": "84869b00-b43a-4523-babd-d47d206694e9"
},
{
"Name": "win-eg-7-ssd-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "852308f8-b8bf-44a4-af41-cbc27437b275"
},
{
"Name": "win-sp-30",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 200,
"ID": "8be9dc29-3eca-499b-ae2d-e3c99699131a"
},
{
"Name": "win-hg-7-ssd-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "8d704cfd-05b2-4d4a-add2-e2868bcc081f"
},
{
"Name": "eg-30",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 800,
"ID": "901f77c2-73f6-4fae-b28a-18b829b55a17"
},
{
"Name": "sp-60-ssd-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "944b92fb-9a0c-406d-bb9f-a1d93cda9f01"
},
{
"Name": "eg-30-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "965472c7-eb54-4d4d-bd6e-01ebb694a631"
},
{
"Name": "sp-120-ssd",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 400,
"ID": "97824a8c-e683-49a8-a70a-ead64240395c"
},
{
"Name": "hg-60-ssd",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 800,
"ID": "9831d7f1-3e79-483d-8958-88e3952c7ea2"
},
{
"Name": "eg-60",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 1600,
"ID": "9e1f13d0-4fcc-4abc-a9e6-9c76d662c92d"
},
{
"Name": "win-eg-30-ssd",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 400,
"ID": "9e6b85fa-6f37-45ce-a3d6-11ab40a28fad"
},
{
"Name": "hg-120-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "9ed787cc-a0db-400b-8cc1-49b6384a1000"
},
{
"Name": "sp-120-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "9f3cfdf7-b850-47cc-92be-33aefbfd2b05"
},
{
"Name": "hg-60-ssd-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "a37bdf17-e1b1-41cc-a67f-ed665a120446"
},
{
"Name": "win-hg-120-ssd",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 800,
"ID": "aa753e73-dadb-4528-9c4a-24e36fc41bf4"
},
{
"Name": "win-sp-240-ssd-flex",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 50,
"ID": "abc007b8-cc44-4b6b-9606-fd647b03e101"
},
{
"Name": "sp-120",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 800,
"ID": "ac74cb45-d895-47dd-b9cf-c17778033d83"
},
{
"Name": "win-hg-15",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 400,
"ID": "ae900175-72bd-4fbc-8ab2-4673b468aa5b"
},
{
"Name": "win-eg-15-ssd-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "aeb37dbf-d7c9-4fd7-93f1-f3818e488ede"
},
{
"Name": "hg-7-ssd",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 100,
"ID": "b1dc776c-b6e3-4a96-b230-850f570db3d5"
},
{
"Name": "sp-60-ssd",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 200,
"ID": "b24df495-10f3-466e-95ab-26f0f6839a2f"
},
{
"Name": "win-hg-120",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 1600,
"ID": "b798e44e-bf71-488c-9335-f20bf5976547"
},
{
"Name": "eg-7-ssd",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 100,
"ID": "b94e6623-913d-4147-b2a3-34ccf6fe7a5e"
},
{
"Name": "eg-15-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "bb5fdda8-34ec-40c8-a4e3-308b9e2c9ee2"
},
{
"Name": "win-eg-7-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "c65384f6-4665-461a-a292-2f3f5a016244"
},
{
"Name": "eg-60-ssd",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 800,
"ID": "c678f1a8-6542-4f9d-89af-ffc98715d674"
},
{
"Name": "hg-30-ssd-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "d147a094-b653-41e7-9250-8d4da3044334"
},
{
"Name": "sp-30",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 200,
"ID": "d1acf88d-6f55-4c5c-a914-4ecbdbd50d6b"
},
{
"Name": "sp-120-ssd-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "d2d33e8e-58b1-4661-8141-826c47f82166"
},
{
"Name": "hg-120-ssd-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "d7322c37-9881-4a57-9b40-2499fe2e8f42"
},
{
"Name": "win-hg-15-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "daf597ea-fbbc-4c71-a35e-5b41d33ccc6c"
},
{
"Name": "win-hg-30-ssd-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "dcfd834c-3932-47a3-8b4b-cdfeecdfde2c"
},
{
"Name": "win-hg-60",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 1600,
"ID": "def75cbd-a4b1-4f82-9152-90c65df9587b"
},
{
"Name": "eg-30-ssd-flex",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 50,
"ID": "e04c7ad6-a5de-45f5-93c9-f3343bdfe8d1"
},
{
"Name": "vps-ssd-3",
"RAM": 8000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 40,
"ID": "e43d7458-6b82-4a78-a712-3a4dc6748cf4"
},
{
"Name": "win-eg-15-flex",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "e8bd3402-7310-4a0f-8b99-d9212359c957"
},
{
"Name": "win-eg-30",
"RAM": 30000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 800,
"ID": "ebf7a997-e2f8-42f4-84f7-33a3d53d1af9"
},
{
"Name": "eg-120-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "ec852ed3-1e42-4c59-abc3-12bcd26abec8"
},
{
"Name": "sp-240",
"RAM": 240000,
"Ephemeral": 0,
"VCPUs": 16,
"Is Public": true,
"Disk": 1600,
"ID": "ed286e2c-769f-4c47-ac52-b8de7a4891f6"
},
{
"Name": "win-sp-60-ssd",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 200,
"ID": "ed835a73-d9a0-43ee-bd89-999c51d8426d"
},
{
"Name": "win-eg-15",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 400,
"ID": "f06056c1-a2d4-40e7-a7d8-e5bfabada72e"
},
{
"Name": "win-sp-120",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 8,
"Is Public": true,
"Disk": 800,
"ID": "f247dc56-395b-49de-9a62-93ccc4fff4ed"
},
{
"Name": "eg-7-flex",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 50,
"ID": "f476f959-ffa6-46f2-94d8-72293570604d"
},
{
"Name": "sp-60-flex",
"RAM": 60000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 50,
"ID": "f52db47a-315f-49d4-bc5c-67dd118e7ac0"
},
{
"Name": "win-hg-120-flex",
"RAM": 120000,
"Ephemeral": 0,
"VCPUs": 32,
"Is Public": true,
"Disk": 50,
"ID": "f6cb8144-5d98-4057-b44f-46da342fb571"
},
{
"Name": "hg-7",
"RAM": 7000,
"Ephemeral": 0,
"VCPUs": 2,
"Is Public": true,
"Disk": 200,
"ID": "fa3cc551-0358-4170-be64-56ea432b064c"
},
{
"Name": "hg-15-ssd",
"RAM": 15000,
"Ephemeral": 0,
"VCPUs": 4,
"Is Public": true,
"Disk": 200,
"ID": "ff48c2cf-c17f-4682-aaf6-31d66786f808"
}
]"""
@classmethod
def setup_class(self):
if 'OS_AUTH_URL' not in os.environ:
pytest.skip('no OS_AUTH_URL environment variable')
@patch('teuthology.misc.sh')
def test_sorted_flavors(self, m_sh):
o = OpenStack()
select = '^(vps|hg)-.*ssd'
m_sh.return_value = TestOpenStack.flavors
flavors = o.get_sorted_flavors('arch', select)
assert [u'vps-ssd-1',
u'vps-ssd-2',
u'hg-7-ssd-flex',
u'hg-7-ssd',
u'vps-ssd-3',
u'hg-15-ssd-flex',
u'hg-15-ssd',
u'hg-30-ssd-flex',
u'hg-30-ssd',
u'hg-60-ssd-flex',
u'hg-60-ssd',
u'hg-120-ssd-flex',
u'hg-120-ssd',
] == [ f['Name'] for f in flavors ]
m_sh.assert_called_with("openstack --quiet flavor list -f json")
def test_flavor(self):
def get_sorted_flavors(self, arch, select):
return [
{
'Name': 'too_small',
'RAM': 2048,
'Disk': 50,
'VCPUs': 1,
},
]
with patch.multiple(
OpenStack,
get_sorted_flavors=get_sorted_flavors,
):
with pytest.raises(NoFlavorException):
hint = { 'ram': 1000, 'disk': 40, 'cpus': 2 }
OpenStack().flavor(hint, 'arch')
flavor = 'good-flavor'
def get_sorted_flavors(self, arch, select):
return [
{
'Name': flavor,
'RAM': 2048,
'Disk': 50,
'VCPUs': 2,
},
]
with patch.multiple(
OpenStack,
get_sorted_flavors=get_sorted_flavors,
):
hint = { 'ram': 1000, 'disk': 40, 'cpus': 2 }
assert flavor == OpenStack().flavor(hint, 'arch')
def test_flavor_range(self):
flavors = [
{
'Name': 'too_small',
'RAM': 2048,
'Disk': 50,
'VCPUs': 1,
},
]
def get_sorted_flavors(self, arch, select):
return flavors
min = { 'ram': 1000, 'disk': 40, 'cpus': 2 }
good = { 'ram': 4000, 'disk': 40, 'cpus': 2 }
#
# there are no flavors in the required range
#
with patch.multiple(
OpenStack,
get_sorted_flavors=get_sorted_flavors,
):
with pytest.raises(NoFlavorException):
OpenStack().flavor_range(min, good, 'arch')
#
# there is one flavor in the required range
#
flavors.append({
'Name': 'min',
'RAM': 2048,
'Disk': 40,
'VCPUs': 2,
})
with patch.multiple(
OpenStack,
get_sorted_flavors=get_sorted_flavors,
):
assert 'min' == OpenStack().flavor_range(min, good, 'arch')
#
# out of the two flavors in the required range, get the bigger one
#
flavors.append({
'Name': 'good',
'RAM': 3000,
'Disk': 40,
'VCPUs': 2,
})
with patch.multiple(
OpenStack,
get_sorted_flavors=get_sorted_flavors,
):
assert 'good' == OpenStack().flavor_range(min, good, 'arch')
#
# there is one flavor bigger or equal to good, get this one
#
flavors.append({
'Name': 'best',
'RAM': 4000,
'Disk': 40,
'VCPUs': 2,
})
with patch.multiple(
OpenStack,
get_sorted_flavors=get_sorted_flavors,
):
assert 'best' == OpenStack().flavor_range(min, good, 'arch')
#
# there are two flavors bigger or equal to good, get the smallest one
#
flavors.append({
'Name': 'too_big',
'RAM': 30000,
'Disk': 400,
'VCPUs': 20,
})
with patch.multiple(
OpenStack,
get_sorted_flavors=get_sorted_flavors,
):
assert 'best' == OpenStack().flavor_range(min, good, 'arch')
def test_interpret_hints(self):
defaults = {
'machine': {
'ram': 0,
'disk': 0,
'cpus': 0,
},
'volumes': {
'count': 0,
'size': 0,
},
}
expected_disk = 10 # first hint larger than the second
expected_ram = 20 # second hint larger than the first
expected_cpus = 0 # not set, hence zero by default
expected_count = 30 # second hint larger than the first
expected_size = 40 # does not exist in the first hint
hints = [
{
'machine': {
'ram': 2,
'disk': expected_disk,
},
'volumes': {
'count': 9,
'size': expected_size,
},
},
{
'machine': {
'ram': expected_ram,
'disk': 3,
},
'volumes': {
'count': expected_count,
},
},
]
hint = OpenStack().interpret_hints(defaults, hints)
assert hint == {
'machine': {
'ram': expected_ram,
'disk': expected_disk,
'cpus': expected_cpus,
},
'volumes': {
'count': expected_count,
'size': expected_size,
}
}
assert defaults == OpenStack().interpret_hints(defaults, None)
def test_get_provider(self):
auth = os.environ.get('OS_AUTH_URL', None)
os.environ['OS_AUTH_URL'] = 'cloud.ovh.net'
assert OpenStack().get_provider() == 'ovh'
if auth != None:
os.environ['OS_AUTH_URL'] = auth
else:
del os.environ['OS_AUTH_URL']
def test_get_os_url(self):
o = OpenStack()
#
# Only for OVH
#
o.provider = 'something'
assert "" == o.get_os_url("server ")
o.provider = 'ovh'
assert "" == o.get_os_url("unknown ")
type2cmd = {
'compute': ('server', 'flavor'),
'network': ('ip', 'security', 'network'),
'image': ('image',),
'volume': ('volume',),
}
os.environ['OS_REGION_NAME'] = 'REGION'
os.environ['OS_TENANT_ID'] = 'TENANT'
for (type, cmds) in type2cmd.items():
for cmd in cmds:
assert ("//" + type) in o.get_os_url(cmd + " ")
for type in type2cmd.keys():
assert ("//" + type) in o.get_os_url("whatever ", type=type)
@patch('teuthology.misc.sh')
def test_cache_token(self, m_sh):
token = 'TOKEN VALUE'
m_sh.return_value = token
OpenStack.token = None
o = OpenStack()
#
# Only for OVH
#
o.provider = 'something'
assert False == o.cache_token()
o.provider = 'ovh'
#
# Set the environment with the token
#
assert 'OS_TOKEN_VALUE' not in os.environ
assert 'OS_TOKEN_EXPIRES' not in os.environ
assert True == o.cache_token()
m_sh.assert_called_with('openstack -q token issue -c id -f value')
assert token == os.environ['OS_TOKEN_VALUE']
assert token == OpenStack.token
assert time.time() < int(os.environ['OS_TOKEN_EXPIRES'])
assert time.time() < OpenStack.token_expires
#
# Reset after it expires
#
token_expires = int(time.time()) - 2000
OpenStack.token_expires = token_expires
assert True == o.cache_token()
assert time.time() < int(os.environ['OS_TOKEN_EXPIRES'])
assert time.time() < OpenStack.token_expires
@patch('teuthology.misc.sh')
def test_cache_token_from_environment(self, m_sh):
OpenStack.token = None
o = OpenStack()
o.provider = 'ovh'
token = 'TOKEN VALUE'
os.environ['OS_TOKEN_VALUE'] = token
token_expires = int(time.time()) + OpenStack.token_cache_duration
os.environ['OS_TOKEN_EXPIRES'] = str(token_expires)
assert True == o.cache_token()
assert token == OpenStack.token
assert token_expires == OpenStack.token_expires
m_sh.assert_not_called()
@patch('teuthology.misc.sh')
def test_cache_token_expired_environment(self, m_sh):
token = 'TOKEN VALUE'
m_sh.return_value = token
OpenStack.token = None
o = OpenStack()
o.provider = 'ovh'
os.environ['OS_TOKEN_VALUE'] = token
token_expires = int(time.time()) - 2000
os.environ['OS_TOKEN_EXPIRES'] = str(token_expires)
assert True == o.cache_token()
m_sh.assert_called_with('openstack -q token issue -c id -f value')
assert token == os.environ['OS_TOKEN_VALUE']
assert token == OpenStack.token
assert time.time() < int(os.environ['OS_TOKEN_EXPIRES'])
assert time.time() < OpenStack.token_expires
class TestTeuthologyOpenStack(TestOpenStackBase):
@classmethod
def setup_class(self):
if 'OS_AUTH_URL' not in os.environ:
pytest.skip('no OS_AUTH_URL environment variable')
teuthology.log.setLevel(logging.DEBUG)
set_config_attr(argparse.Namespace())
ip = TeuthologyOpenStack.create_floating_ip()
if ip:
ip_id = TeuthologyOpenStack.get_floating_ip_id(ip)
OpenStack().run("ip floating delete " + ip_id)
self.can_create_floating_ips = True
else:
self.can_create_floating_ips = False
def setup(self):
super(TestTeuthologyOpenStack, self).setup()
self.key_filename = tempfile.mktemp()
self.key_name = 'teuthology-test'
self.name = 'teuthology-test'
self.clobber()
misc.sh("""
openstack keypair create {key_name} > {key_filename}
chmod 600 {key_filename}
""".format(key_filename=self.key_filename,
key_name=self.key_name))
self.options = ['--key-name', self.key_name,
'--key-filename', self.key_filename,
'--name', self.name,
'--verbose']
def teardown(self):
super(TestTeuthologyOpenStack, self).teardown()
self.clobber()
os.unlink(self.key_filename)
def clobber(self):
misc.sh("""
openstack server delete {name} --wait || true
openstack keypair delete {key_name} || true
""".format(key_name=self.key_name,
name=self.name))
def test_create(self, caplog):
teuthology_argv = [
'--suite', 'upgrade/hammer',
'--dry-run',
'--ceph', 'master',
'--kernel', 'distro',
'--flavor', 'gcov',
'--distro', 'ubuntu',
'--suite-branch', 'hammer',
'--email', 'loic@dachary.org',
'--num', '10',
'--limit', '23',
'--subset', '1/2',
'--priority', '101',
'--timeout', '234',
'--filter', 'trasher',
'--filter-out', 'erasure-code',
'--throttle', '3',
]
archive_upload = 'user@archive:/tmp'
argv = (self.options +
['--teuthology-git-url', 'TEUTHOLOGY_URL',
'--teuthology-branch', 'TEUTHOLOGY_BRANCH',
'--ceph-workbench-git-url', 'CEPH_WORKBENCH_URL',
'--ceph-workbench-branch', 'CEPH_WORKBENCH_BRANCH',
'--upload',
'--archive-upload', archive_upload] +
teuthology_argv)
args = scripts.openstack.parse_args(argv)
teuthology_argv.extend([
'--archive-upload', archive_upload,
'--archive-upload-url', args.archive_upload_url,
])
teuthology = TeuthologyOpenStack(args, None, argv)
teuthology.user_data = 'teuthology/openstack/test/user-data-test1.txt'
teuthology.teuthology_suite = 'echo --'
teuthology.main()
assert 0 == teuthology.ssh("lsb_release -a")
assert 0 == teuthology.ssh("grep 'substituded variables' /var/log/cloud-init.log")
l = caplog.text
assert 'Ubuntu 14.04' in l
assert "nworkers=" + str(args.simultaneous_jobs) in l
assert "username=" + teuthology.username in l
assert "upload=--archive-upload user@archive:/tmp" in l
assert ("ceph_workbench="
" --ceph-workbench-branch CEPH_WORKBENCH_BRANCH"
" --ceph-workbench-git-url CEPH_WORKBENCH_URL") in l
assert "clone=git clone -b TEUTHOLOGY_BRANCH TEUTHOLOGY_URL" in l
assert os.environ['OS_AUTH_URL'] in l
assert " ".join(teuthology_argv) in l
if self.can_create_floating_ips:
ip = teuthology.get_floating_ip(self.name)
teuthology.teardown()
if self.can_create_floating_ips:
assert teuthology.get_floating_ip_id(ip) == None
def test_floating_ip(self):
if not self.can_create_floating_ips:
pytest.skip('unable to create floating ips')
expected = TeuthologyOpenStack.create_floating_ip()
ip = TeuthologyOpenStack.get_unassociated_floating_ip()
assert expected == ip
ip_id = TeuthologyOpenStack.get_floating_ip_id(ip)
OpenStack().run("ip floating delete " + ip_id)
| 25.236439
| 245
| 0.52403
|
9d1a3eb100ebe21888b4df5c30796a2bd48f0ec4
| 2,679
|
py
|
Python
|
commands/servercheck.py
|
nstra111/autovc
|
e73e1fea7b566721c3dce3ca6f587472e7ee9d1b
|
[
"MIT"
] | 177
|
2020-02-02T18:03:46.000Z
|
2022-03-17T06:18:43.000Z
|
commands/servercheck.py
|
zigsphere/Auto-Voice-Channels
|
6ae901728580bef4246737a6f1b9f10763badd3e
|
[
"MIT"
] | 82
|
2020-02-02T17:43:18.000Z
|
2022-03-24T20:34:55.000Z
|
commands/servercheck.py
|
zigsphere/Auto-Voice-Channels
|
6ae901728580bef4246737a6f1b9f10763badd3e
|
[
"MIT"
] | 165
|
2019-02-17T20:15:20.000Z
|
2022-03-27T23:59:23.000Z
|
import cfg
import discord
import functions as func
from commands.base import Cmd
help_text = [
[
("Usage:", "<PREFIX><COMMAND>"),
("Description:",
"Get information about this server, such as the voice channels I know about and the Patreon status."),
]
]
def permission_checks(channel, me):
r = ""
perms = me.permissions_in(channel)
if not perms.manage_channels:
r += " `❌ Manage Channels`"
if not perms.read_messages:
r += " `❌ Read Text Channels & See Voice Channels`"
if not perms.connect:
r += " `❌ Connect`"
if not perms.move_members:
r += " `❌ Move members`"
if r:
r = "\t Permission issues:" + r
return r
async def execute(ctx, params):
guild = ctx['guild']
settings = ctx['settings']
r = "Name: **{}** \tID: `{}`\n".format(func.esc_md(guild.name), guild.id)
members = [m for m in guild.members if not m.bot]
num_members = len(members)
percent_members_online = len([m for m in members if m.status != discord.Status.offline]) / num_members * 100
r += "**{}** non-bot members, {}% currently online\n".format(num_members, round(percent_members_online))
r += "Gold features active: **{}**\n".format("Yes" if func.is_gold(guild) else "No")
r += "Sapphire features active: {}\n".format(
("**Yes** +private bot" if cfg.SAPPHIRE_ID is not None else "**Yes**") if func.is_sapphire(guild) else "**No**"
)
r += "\n**Known Channels:**\n"
for p in settings['auto_channels']:
pc = guild.get_channel(p)
if pc:
r += "{} (`{}`)".format(func.esc_md(pc.name), pc.id)
if pc.category:
r += " in category \"{}\"".format(func.esc_md(pc.category.name))
r += permission_checks(pc, guild.me)
secondaries = settings['auto_channels'][p]['secondaries']
r += "\t {} sub-channel{}".format(len(secondaries), "s" if len(secondaries) != 1 else "")
r += "\n"
for s, v in secondaries.items():
sc = guild.get_channel(s)
scc = guild.get_member(v['creator'])
if sc:
r += "\t ⮡ \t\"{}\" (`{}`)\t Created by: \"{}\" (\"{}\", `{}`){}\n".format(
func.esc_md(sc.name), sc.id,
func.esc_md(scc.display_name), func.user_hash(scc), scc.id,
permission_checks(sc, guild.me)
)
r = r.replace('➕', '+') # Make the default plus sign more visible
return True, r
command = Cmd(
execute=execute,
help_text=help_text,
params_required=0,
admin_required=True,
)
| 35.72
| 119
| 0.552072
|
abc8c749917737c7b6d41407dfd6e0e43c1027d5
| 3,235
|
py
|
Python
|
todo/api_1_0/auth.py
|
l769829723/todo
|
7c2da38996d244709e0b7a2041e1e973f6b2743b
|
[
"MIT"
] | null | null | null |
todo/api_1_0/auth.py
|
l769829723/todo
|
7c2da38996d244709e0b7a2041e1e973f6b2743b
|
[
"MIT"
] | null | null | null |
todo/api_1_0/auth.py
|
l769829723/todo
|
7c2da38996d244709e0b7a2041e1e973f6b2743b
|
[
"MIT"
] | null | null | null |
from flask import jsonify
from flask_restful import Resource
from flask_restful import reqparse
from flask_restful import fields
from flask_restful import marshal
from flask_restful import abort
from flask import request
from flask_jwt_extended import create_access_token, jwt_required, get_jwt_identity, fresh_jwt_required
from todo.api_1_0 import api
from todo.models import User
class Login(Resource):
def post(self):
token_fields = dict(
token=fields.String
)
parser = reqparse.RequestParser()
parser.add_argument(
'username',
type=str,
location='json',
required=True,
help='Specified your user name.'
)
parser.add_argument(
'password',
type=str,
location='json',
required=True,
help='Specified your user password.'
)
args = parser.parse_args()
user = User.query.filter_by(email=args.get('username')).first()
if user and user.verify_password(args.get('password')):
return jsonify(marshal(
dict(token=create_access_token(user.id)),
token_fields
))
abort(401)
api.add_url_rule('/login/', view_func=Login.as_view('login'))
class Verify(Resource):
method_decorators = [jwt_required]
def get(self):
return jsonify({'token': 'invalid'}), 200
api.add_url_rule('/login/verify/', view_func=Verify.as_view('verify'))
class UserInfo(Resource):
method_decorators = [jwt_required]
user_field = dict(
username=fields.String,
email=fields.String
)
parser = reqparse.RequestParser()
parser.add_argument(
'username',
type=str,
location='json',
required=True,
help='Specified your user name.'
)
parser.add_argument(
'old_password',
type=str,
location='json',
required=True,
help='Specified your old password.'
)
parser.add_argument(
'new_password',
type=str,
location='json',
required=True,
help='Specified your new password.'
)
def get(self):
current_user = self.get_object()
return jsonify(dict(
username=current_user.username,
email=current_user.email
)), 200
def post(self):
args = self.parser.parse_args()
old_password = args.get('old_password')
current_user = self.get_object()
verify_result = current_user.verify_password(old_password)
if verify_result:
username = args.get('username')
password = args.get('new_password')
current_user.username = username
current_user.set_password(password)
current_user.save()
return jsonify({'message': 'Your profile has updated.'})
return jsonify(
{'message': 'Identity invalid, please check your profile.'}
), 401
def get_object(self):
current_user = get_jwt_identity()
user = User.query.get(current_user)
return user
api.add_url_rule('/login/me/', view_func=UserInfo.as_view('user_info'))
| 27.415254
| 102
| 0.611747
|
e60b377d9d3aa19aee17bcddf0fd58f95eb3bae5
| 11,959
|
py
|
Python
|
MLP/lib/Optimizer_xrh.py
|
Xinrihui/Statistical-Learning-Method
|
2e1c107bed8a21307596fbe0f8f1daed638657c5
|
[
"Apache-2.0"
] | 2
|
2021-06-25T06:42:14.000Z
|
2021-11-07T13:44:29.000Z
|
MLP/lib/Optimizer_xrh.py
|
Xinrihui/Statistical-Learning-Method
|
2e1c107bed8a21307596fbe0f8f1daed638657c5
|
[
"Apache-2.0"
] | null | null | null |
MLP/lib/Optimizer_xrh.py
|
Xinrihui/Statistical-Learning-Method
|
2e1c107bed8a21307596fbe0f8f1daed638657c5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import numpy as np
import math
import time
class Optimizer:
"""
优化算法
class BGDOptimizer 批量梯度下降(BGD)
class MinBatchOptimizer Mini-batch 梯度下降
class MomentumOptimizer 带动量(Momentum)的 Mini-batch 梯度下降
class AdamOptimizer Adam Mini-batch 梯度下降
Author: xrh
Date: 2021-07-14
"""
def __init__(self, *args,**kwargs):
"""
:param args: 不确定个数的(可能有多个, 也可能没有)位置参数
:param kwargs: 不确定个数的键值参数
eg.
func()
func(1,2,3)
func( 1 ,arg2="two", arg3=3)
"""
pass
def fit(self, func_forwoard,func_backwoard): # TODO
pass
def get_batches(self,X, y_onehot,**kwargs):
"""
获取 所有批次的训练数据
:param X:
:param y_onehot:
:return:
"""
pass
def update_parameters(self, learning_rate, parameters, grad_W_list, grad_b_list,grad_gama_list,grad_beta_list,**kwargs):
"""
根据反向传播计算得到梯度信息 更新 模型参数
:param learning_rate:
:param parameters:
:param grad_W_list:
:param grad_b_list:
:return:
"""
pass
class BGDOptimizer(Optimizer):
def get_batches(self,X, y_onehot,**kwargs):
"""
获取 所有批次的训练数据
BGD 中就只有一个批次, 里面有整个训练集的数据
:param X:
:param y_onehot:
:return:
"""
batches = [(X,y_onehot)]
return batches
def update_parameters(self, learning_rate, parameters, grad_W_list, grad_b_list,grad_gama_list,grad_beta_list,**kwargs):
"""
根据反向传播计算得到梯度信息 更新 模型参数
:param learning_rate:
:param parameters:
:param grad_W_list:
:param grad_b_list:
:return:
"""
W_list = parameters['W']
b_list = parameters['b']
gama_list = parameters['gama']
beta_list = parameters['beta']
L = len(W_list) # MLP 的层数
for l in range(L): # 遍历输入层, 隐藏层和输入层 l = 0 ,...,L-1
W_list[l] -= learning_rate * grad_W_list[l]
b_list[l] -= learning_rate * grad_b_list[l]
gama_list[l] -= learning_rate * grad_gama_list[l]
beta_list[l] -= learning_rate * grad_beta_list[l]
return parameters
class MinBatchOptimizer(Optimizer):
def random_mini_batches(self, X, y_onehot, mini_batch_size=640):
"""
从样本中生成所有的 mini_batch, 以列表的形式返回
:param X: shape (N,m)
:param y_onehot: shape (K,N)
:param mini_batch_size: 最小批次的大小
:return:
mini_batches -- list of (mini_batch_X, mini_batch_Y)
"""
# seed = int(time.time()) # 随机种子用系统时间取整数得到, seed 的分辨率为 1s, 即每间隔1s 随机数种子发生变化
# np.random.seed(seed) # 确保每一个 epcho 都以不一样的顺序打乱训练样本
N = X.shape[0] # 训练样本的个数
mini_batches = [] #
# 打乱训练样本的次序 (X, y)
permutation = list(np.random.permutation(N))
shuffled_X = X[permutation, :]
shuffled_y = y_onehot[:, permutation]
# 对样本集合进行分区 (shuffled_X, shuffled_y)
num_complete_minibatches = math.floor(N / mini_batch_size) # 向下取整
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[k * mini_batch_size: (k + 1) * mini_batch_size, :]
mini_batch_y = shuffled_y[:, k * mini_batch_size: (k + 1) * mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_y)
mini_batches.append(mini_batch)
# 处理最后一个分区 (last mini-batch < mini_batch_size)
if N % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size: N, :]
mini_batch_y = shuffled_y[:, num_complete_minibatches * mini_batch_size: N]
mini_batch = (mini_batch_X, mini_batch_y)
mini_batches.append(mini_batch)
return mini_batches
def get_batches(self,X, y_onehot,mini_batch_size=640):
"""
获取 所有批次的训练数据
:param X:
:param y_onehot:
:return:
"""
return self.random_mini_batches(X=X, y_onehot=y_onehot, mini_batch_size=mini_batch_size)
def update_parameters(self, learning_rate, parameters, grad_W_list, grad_b_list,grad_gama_list,grad_beta_list,**kwargs):
"""
根据反向传播计算得到梯度信息 更新 模型参数
:param learning_rate:
:param parameters:
:param grad_W_list:
:param grad_b_list:
:return:
"""
W_list = parameters['W']
b_list = parameters['b']
gama_list = parameters['gama']
beta_list = parameters['beta']
L = len(W_list) # MLP 的层数
for l in range(L): # 遍历输入层, 隐藏层和输入层 l = 0 ,...,L-1
W_list[l] -= learning_rate * grad_W_list[l]
b_list[l] -= learning_rate * grad_b_list[l]
gama_list[l] -= learning_rate * grad_gama_list[l]
beta_list[l] -= learning_rate * grad_beta_list[l]
return parameters
class MomentumOptimizer(MinBatchOptimizer):
def __init__(self, parameters, beta1=0.9):
"""
:param parameters:
:param beta1: 相当于定义了计算指数加权平均数时的窗口大小
eg.
beta=0.5 窗口大小为: 1/(1-beta) = 2
beta=0.9 窗口大小为: 1/(1-beta) = 10
beta=0.99 窗口大小为: 1/(1-beta) = 100
"""
self.beta1 = beta1
W_list = parameters['W']
b_list = parameters['b']
gama_list = parameters['gama']
beta_list = parameters['beta']
L = len(W_list) # MLP 的层数
v_W_list = [] #
v_b_list = [] #
v_gama_list = [] #
v_beta_list = [] #
for l in range(L): # 遍历输入层, 隐藏层和输入层 l = 0 ,...,L-1
v_W_list.append(np.zeros(np.shape(W_list[l])))
v_b_list.append(np.zeros(np.shape(b_list[l])))
v_gama_list.append(np.zeros(np.shape(gama_list[l])))
v_beta_list.append(np.zeros(np.shape(beta_list[l])))
self.v_W_list = v_W_list
self.v_b_list = v_b_list
self.v_gama_list = v_gama_list
self.v_beta_list = v_beta_list
def update_parameters(self, learning_rate, parameters, grad_W_list, grad_b_list,grad_gama_list,grad_beta_list,**kwargs):
"""
根据反向传播计算得到梯度信息 更新 模型参数
:param learning_rate:
:param parameters:
:param grad_W_list:
:param grad_b_list:
:return:
"""
W_list = parameters['W']
b_list = parameters['b']
gama_list = parameters['gama']
beta_list = parameters['beta']
L = len(W_list) # MLP 的层数
for l in range(L): # 遍历输入层, 隐藏层和输入层 l = 0 ,...,L-1
self.v_W_list[l] = self.beta1 * self.v_W_list[l] + (1 - self.beta1) * grad_W_list[l]
self.v_b_list[l] = self.beta1 * self.v_b_list[l] + (1 - self.beta1) * grad_b_list[l]
self.v_gama_list[l] = self.beta1 * self.v_gama_list[l] + (1 - self.beta1) * grad_gama_list[l]
self.v_beta_list[l] = self.beta1 * self.v_beta_list[l] + (1 - self.beta1) * grad_beta_list[l]
W_list[l] -= learning_rate * self.v_W_list[l]
b_list[l] -= learning_rate * self.v_b_list[l]
gama_list[l] -= learning_rate * self.v_gama_list[l]
beta_list[l] -= learning_rate * self.v_beta_list[l]
return parameters
class AdamOptimizer(MinBatchOptimizer):
def __init__(self, parameters,
beta1 = 0.9,
beta2 = 0.99,
epsilon = 1e-8):
"""
:param parameters:
:param beta1: 惯性保持, 历史梯度和当前梯度的平均 (默认 0.9)
:param beta2: 环境感知, 为不同的模型参数产生自适应的学习率 (默认 0.99)
beta1, beta1 一般无需调节
:param epsilon: 一个很小的数
eg.
beta1=0.5 历史梯度的窗口大小为: 1/(1-beta1) = 2
beta1=0.9 历史梯度的窗口大小为: 1/(1-beta1) = 10
beta1=0.99 历史梯度的窗口大小为: 1/(1-beta1) = 100
"""
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
W_list = parameters['W']
b_list = parameters['b']
gama_list = parameters['gama']
beta_list = parameters['beta']
L = len(W_list) # MLP 的层数
# 参数的一阶矩, 体现惯性保持
m_W_list=[]
m_b_list = []
m_gama_list=[]
m_beta_list = []
# 参数的二阶矩, 体现环境感知
v_W_list = []
v_b_list = []
v_gama_list = []
v_beta_list = []
for l in range(L): # 遍历输入层, 隐藏层和输入层 l = 0 ,...,L-1
m_W_list.append(np.zeros(np.shape(W_list[l])))
m_b_list.append(np.zeros(np.shape(b_list[l])))
m_gama_list.append(np.zeros(np.shape(gama_list[l])))
m_beta_list.append(np.zeros(np.shape(beta_list[l])))
v_W_list.append(np.zeros(np.shape(W_list[l])))
v_b_list.append(np.zeros(np.shape(b_list[l])))
v_gama_list.append(np.zeros(np.shape(gama_list[l])))
v_beta_list.append(np.zeros(np.shape(beta_list[l])))
self.m_W_list = m_W_list
self.m_b_list = m_b_list
self.m_gama_list = m_gama_list
self.m_beta_list = m_beta_list
self.v_W_list = v_W_list
self.v_b_list = v_b_list
self.v_gama_list = v_gama_list
self.v_beta_list = v_beta_list
def update_parameters(self, learning_rate, parameters, grad_W_list, grad_b_list,grad_gama_list,grad_beta_list,t=0,use_bias_correct=False):
"""
根据反向传播计算得到梯度信息 更新 模型参数
:param learning_rate:
:param parameters:
:param grad_W_list:
:param grad_b_list:
:param t: 当前时刻 t
:param use_bias_correct: 开启偏差修正, 在 t 较小时, 使得均值的计算更准确
( 默认: 关闭 False)
:return:
"""
W_list = parameters['W']
b_list = parameters['b']
gama_list = parameters['gama']
beta_list = parameters['beta']
L = len(W_list) # MLP 的层数
for l in range(L): # 遍历输入层, 隐藏层和输入层 l = 0 ,...,L-1
# 一阶矩
self.m_W_list[l] = self.beta1 * self.m_W_list[l] + (1-self.beta1)*grad_W_list[l]
self.m_b_list[l] = self.beta1 * self.m_b_list[l] + (1 -self.beta1) * grad_b_list[l]
self.m_gama_list[l] = self.beta1 * self.m_gama_list[l] + (1 - self.beta1) * grad_gama_list[l]
self.m_beta_list[l] = self.beta1 * self.m_beta_list[l] + (1 - self.beta1) * grad_beta_list[l]
# 二阶矩
self.v_W_list[l] = self.beta2 * self.v_W_list[l] + (1-self.beta2)* np.square(grad_W_list[l])
self.v_b_list[l] = self.beta2 * self.v_b_list[l] + (1 -self.beta2) * np.square(grad_b_list[l])
self.v_gama_list[l] = self.beta2 * self.v_gama_list[l] + (1-self.beta2)* np.square(grad_gama_list[l])
self.v_beta_list[l] = self.beta2 * self.v_beta_list[l] + (1 -self.beta2) * np.square(grad_beta_list[l])
# 偏差修正 TODO: 使用偏差修正会造成计算溢出, 模型不收敛; 原因: t 的初始值为 0, 导致后面发生 除 0 错误
if use_bias_correct:
z1 = 1-(self.beta1**t)
z2 = 1 - (self.beta2**t)
self.m_W_list[l] = self.m_W_list[l] / z1
self.m_b_list[l] = self.m_b_list[l] / z1
self.m_gama_list[l] = self.m_gama_list[l] / z1
self.m_beta_list[l] = self.m_beta_list[l] / z1
self.v_W_list[l] = self.v_W_list[l] / z2
self.v_b_list[l] = self.v_b_list[l] / z2
self.v_gama_list[l] = self.v_gama_list[l] / z2
self.v_beta_list[l] = self.v_beta_list[l] / z2
# 一阶矩 二阶矩融合
W_list[l] -= ((learning_rate * self.m_W_list[l]) / np.sqrt(self.v_W_list[l]+self.epsilon))
b_list[l] -= ((learning_rate * self.m_b_list[l]) / np.sqrt(self.v_b_list[l]+self.epsilon))
gama_list[l] -= ((learning_rate * self.m_gama_list[l]) / np.sqrt(self.v_gama_list[l]+self.epsilon))
beta_list[l] -= ((learning_rate * self.m_beta_list[l]) / np.sqrt(self.v_beta_list[l]+self.epsilon))
return parameters
| 28.747596
| 142
| 0.570365
|
d90fb70cbc23afb93f6c44e8e7224c204db315e9
| 541
|
py
|
Python
|
WebMirror/management/rss_parser_funcs/feed_parse_extractAmenalongeCom.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 193
|
2016-08-02T22:04:35.000Z
|
2022-03-09T20:45:41.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractAmenalongeCom.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 533
|
2016-08-23T20:48:23.000Z
|
2022-03-28T15:55:13.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractAmenalongeCom.py
|
rrosajp/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 19
|
2015-08-13T18:01:08.000Z
|
2021-07-12T17:13:09.000Z
|
def extractAmenalongeCom(item):
'''
Parser for 'amenalonge.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| 24.590909
| 104
| 0.628466
|
fe4050a0d73152b4e4816041bcbc5ce45a2b57a8
| 9,715
|
py
|
Python
|
codes/b_environments/rotary_inverted_pendulum/rip_service_pb2_grpc.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | null | null | null |
codes/b_environments/rotary_inverted_pendulum/rip_service_pb2_grpc.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | null | null | null |
codes/b_environments/rotary_inverted_pendulum/rip_service_pb2_grpc.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | 1
|
2021-11-23T12:30:37.000Z
|
2021-11-23T12:30:37.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from codes.b_environments.rotary_inverted_pendulum import rip_service_pb2 as rip__service__pb2
class RDIPStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.reset = channel.unary_unary(
'/RDIP/reset',
request_serializer=rip__service__pb2.RipRequest.SerializeToString,
response_deserializer=rip__service__pb2.RipResponse.FromString,
)
self.step = channel.unary_unary(
'/RDIP/step',
request_serializer=rip__service__pb2.RipRequest.SerializeToString,
response_deserializer=rip__service__pb2.RipResponse.FromString,
)
self.terminate = channel.unary_unary(
'/RDIP/terminate',
request_serializer=rip__service__pb2.RipRequest.SerializeToString,
response_deserializer=rip__service__pb2.RipResponse.FromString,
)
self.initialize = channel.unary_unary(
'/RDIP/initialize',
request_serializer=rip__service__pb2.RipRequest.SerializeToString,
response_deserializer=rip__service__pb2.RipResponse.FromString,
)
self.step_sync = channel.unary_unary(
'/RDIP/step_sync',
request_serializer=rip__service__pb2.RipRequest.SerializeToString,
response_deserializer=rip__service__pb2.RipResponse.FromString,
)
self.reset_sync = channel.unary_unary(
'/RDIP/reset_sync',
request_serializer=rip__service__pb2.RipRequest.SerializeToString,
response_deserializer=rip__service__pb2.RipResponse.FromString,
)
class RDIPServicer(object):
"""Missing associated documentation comment in .proto file."""
def reset(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def step(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def terminate(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def initialize(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def step_sync(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def reset_sync(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RDIPServicer_to_server(servicer, server):
rpc_method_handlers = {
'reset': grpc.unary_unary_rpc_method_handler(
servicer.reset,
request_deserializer=rip__service__pb2.RipRequest.FromString,
response_serializer=rip__service__pb2.RipResponse.SerializeToString,
),
'step': grpc.unary_unary_rpc_method_handler(
servicer.step,
request_deserializer=rip__service__pb2.RipRequest.FromString,
response_serializer=rip__service__pb2.RipResponse.SerializeToString,
),
'terminate': grpc.unary_unary_rpc_method_handler(
servicer.terminate,
request_deserializer=rip__service__pb2.RipRequest.FromString,
response_serializer=rip__service__pb2.RipResponse.SerializeToString,
),
'initialize': grpc.unary_unary_rpc_method_handler(
servicer.initialize,
request_deserializer=rip__service__pb2.RipRequest.FromString,
response_serializer=rip__service__pb2.RipResponse.SerializeToString,
),
'step_sync': grpc.unary_unary_rpc_method_handler(
servicer.step_sync,
request_deserializer=rip__service__pb2.RipRequest.FromString,
response_serializer=rip__service__pb2.RipResponse.SerializeToString,
),
'reset_sync': grpc.unary_unary_rpc_method_handler(
servicer.reset_sync,
request_deserializer=rip__service__pb2.RipRequest.FromString,
response_serializer=rip__service__pb2.RipResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'RDIP', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class RDIP(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def reset(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/RDIP/reset',
rip__service__pb2.RipRequest.SerializeToString,
rip__service__pb2.RipResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def step(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/RDIP/step',
rip__service__pb2.RipRequest.SerializeToString,
rip__service__pb2.RipResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def terminate(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/RDIP/terminate',
rip__service__pb2.RipRequest.SerializeToString,
rip__service__pb2.RipResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def initialize(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/RDIP/initialize',
rip__service__pb2.RipRequest.SerializeToString,
rip__service__pb2.RipResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def step_sync(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/RDIP/step_sync',
rip__service__pb2.RipRequest.SerializeToString,
rip__service__pb2.RipResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def reset_sync(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/RDIP/reset_sync',
rip__service__pb2.RipRequest.SerializeToString,
rip__service__pb2.RipResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 41.875
| 94
| 0.642717
|
d497a7fe3e96cec4a383c34db1f8f2bdbccdf63c
| 1,357
|
py
|
Python
|
devon/web/call.py
|
joehewitt/devon
|
5b11265e5eae3db7bfaeb49543a2a6293bd15557
|
[
"BSD-3-Clause"
] | 3
|
2015-12-25T16:26:02.000Z
|
2016-05-08T18:19:25.000Z
|
devon/web/call.py
|
joehewitt/devon
|
5b11265e5eae3db7bfaeb49543a2a6293bd15557
|
[
"BSD-3-Clause"
] | null | null | null |
devon/web/call.py
|
joehewitt/devon
|
5b11265e5eae3db7bfaeb49543a2a6293bd15557
|
[
"BSD-3-Clause"
] | 1
|
2021-07-13T07:17:01.000Z
|
2021-07-13T07:17:01.000Z
|
import devon.jump, devon.code.sync
import xmlrpclib, sys
# **************************************************************************************************
def main(request):
params, method = xmlrpclib.loads(request.data)
try:
fn = eval(method)
except:
fn = None
result = xmlrpclib.Fault(1, "Function '%s' not found" % method)
response = xmlrpclib.dumps(result)
if fn:
try:
result = fn(request, *params)
if result == None:
result = ""
response = xmlrpclib.dumps((result,), methodresponse=1)
except:
result = xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value))
response = xmlrpclib.dumps(result)
raise
request.send_response(200)
request.send_header("Content-type", "text/xml")
request.send_header("Content-length", str(len(response)))
request.end_headers()
request.wfile.write(response)
def jump(request, sourcePath, sourceText, sourceOffset, relative):
jumpPath = devon.jump.jump(sourcePath, sourceText, sourceOffset, relative)
return jumpPath
def jumpLaunch(request, sourcePath, sourceText, sourceOffset, relative):
return devon.jump.launch(sourcePath, sourceText, sourceOffset, relative)
def shutdown(request):
devon.server.web.stopServer()
| 31.55814
| 100
| 0.596168
|
6f1389dbc4eea08c7e86e63ce1d7435f63b03ede
| 838
|
py
|
Python
|
deepensemble/utils/utils_translation.py
|
pdoren/correntropy-and-ensembles-in-deep-learning
|
b8e39e0ea97395e9f4ef5e9b351551a89fedc885
|
[
"MIT"
] | 1
|
2017-11-22T15:35:45.000Z
|
2017-11-22T15:35:45.000Z
|
deepensemble/utils/utils_translation.py
|
pdoren/correntropy-and-ensembles-in-deep-learning
|
b8e39e0ea97395e9f4ef5e9b351551a89fedc885
|
[
"MIT"
] | null | null | null |
deepensemble/utils/utils_translation.py
|
pdoren/correntropy-and-ensembles-in-deep-learning
|
b8e39e0ea97395e9f4ef5e9b351551a89fedc885
|
[
"MIT"
] | 1
|
2021-12-14T04:16:57.000Z
|
2021-12-14T04:16:57.000Z
|
from .singleton import Singleton
__all__ = ['TextTranslation']
# noinspection PyMissingConstructor
class TextTranslation(Singleton):
def __init__(self):
self.current_language = 'es' # Default Language
self.dict_trans = {}
self._load_language()
def get_str(self, name_str):
return self.dict_trans.get(name_str, '')
def _load_language(self):
import os
dir = os.path.dirname(os.path.abspath(__file__))
with open(dir + "/languages/dict_language_%s.txt" % self.current_language, encoding="utf-8") as f:
for line in f:
(key, val) = line.split(':', 1)
self.dict_trans[key.strip()] = val.strip()
def set_current_language(self, name_language):
self.current_language = name_language
self._load_language()
| 27.032258
| 106
| 0.637232
|
a067863071f2d9af2d9c78477ec1c9a93d3f4507
| 58,487
|
py
|
Python
|
src/ptvsd/common/messaging.py
|
int19h/ptvsd
|
55aa650e3e5fff91a3804a59dbf20ff3583be238
|
[
"MIT"
] | 349
|
2019-05-07T00:15:12.000Z
|
2022-03-10T15:05:08.000Z
|
src/ptvsd/common/messaging.py
|
WooSung-Jung/ptvsd
|
99c8513921021d2cc7cd82e132b65c644c256768
|
[
"MIT"
] | 1,095
|
2018-03-01T00:50:11.000Z
|
2019-05-06T17:44:15.000Z
|
src/ptvsd/common/messaging.py
|
WooSung-Jung/ptvsd
|
99c8513921021d2cc7cd82e132b65c644c256768
|
[
"MIT"
] | 53
|
2018-03-01T00:33:57.000Z
|
2019-05-05T00:50:23.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
"""An implementation of the session and presentation layers as used in the Debug
Adapter Protocol (DAP): channels and their lifetime, JSON messages, requests,
responses, and events.
https://microsoft.github.io/debug-adapter-protocol/overview#base-protocol
"""
import collections
import contextlib
import functools
import itertools
import os
import socket
import sys
import threading
from ptvsd.common import compat, fmt, json, log
from ptvsd.common.compat import unicode
class JsonIOError(IOError):
"""Indicates that a read or write operation on JsonIOStream has failed.
"""
def __init__(self, *args, **kwargs):
stream = kwargs.pop("stream")
cause = kwargs.pop("cause", None)
if not len(args) and cause is not None:
args = [str(cause)]
super(JsonIOError, self).__init__(*args, **kwargs)
self.stream = stream
"""The stream that couldn't be read or written.
Set by JsonIOStream.read_json() and JsonIOStream.write_json().
JsonMessageChannel relies on this value to decide whether a NoMoreMessages
instance that bubbles up to the message loop is related to that loop.
"""
self.cause = cause
"""The underlying exception, if any."""
class NoMoreMessages(JsonIOError, EOFError):
"""Indicates that there are no more messages that can be read from or written
to a stream.
"""
def __init__(self, *args, **kwargs):
args = args if len(args) else ["No more messages"]
super(NoMoreMessages, self).__init__(*args, **kwargs)
class JsonIOStream(object):
"""Implements a JSON value stream over two byte streams (input and output).
Each value is encoded as a DAP packet, with metadata headers and a JSON payload.
"""
MAX_BODY_SIZE = 0xFFFFFF
json_decoder_factory = json.JsonDecoder
"""Used by read_json() when decoder is None."""
json_encoder_factory = json.JsonEncoder
"""Used by write_json() when encoder is None."""
@classmethod
def from_stdio(cls, name="stdio"):
"""Creates a new instance that receives messages from sys.stdin, and sends
them to sys.stdout.
On Win32, this also sets stdin and stdout to binary mode, since the protocol
requires that to work properly.
"""
if sys.version_info >= (3,):
stdin = sys.stdin.buffer
stdout = sys.stdout.buffer
else:
stdin = sys.stdin
stdout = sys.stdout
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(stdin.fileno(), os.O_BINARY)
msvcrt.setmode(stdout.fileno(), os.O_BINARY)
return cls(stdin, stdout, name)
@classmethod
def from_process(cls, process, name="stdio"):
"""Creates a new instance that receives messages from process.stdin, and sends
them to process.stdout.
"""
return cls(process.stdout, process.stdin, name)
@classmethod
def from_socket(cls, sock, name=None):
"""Creates a new instance that sends and receives messages over a socket.
"""
sock.settimeout(None) # make socket blocking
if name is None:
name = repr(sock)
# TODO: investigate switching to buffered sockets; readline() on unbuffered
# sockets is very slow! Although the implementation of readline() itself is
# native code, it calls read(1) in a loop - and that then ultimately calls
# SocketIO.readinto(), which is implemented in Python.
socket_io = sock.makefile("rwb", 0)
# SocketIO.close() doesn't close the underlying socket.
def cleanup():
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
sock.close()
return cls(socket_io, socket_io, name, cleanup)
def __init__(self, reader, writer, name=None, cleanup=lambda: None):
"""Creates a new JsonIOStream.
reader must be a BytesIO-like object, from which incoming messages will be
read by read_json().
writer must be a BytesIO-like object, into which outgoing messages will be
written by write_json().
cleanup must be a callable; it will be invoked without arguments when the
stream is closed.
reader.readline() must treat "\n" as the line terminator, and must leave "\r"
as is - it must not replace "\r\n" with "\n" automatically, as TextIO does.
"""
if name is None:
name = fmt("reader={0!r}, writer={1!r}", reader, writer)
self.name = name
self._reader = reader
self._writer = writer
self._cleanup = cleanup
self._closed = False
def close(self):
"""Closes the stream, the reader, and the writer.
"""
if self._closed:
return
self._closed = True
log.debug("Closing {0} message stream", self.name)
try:
try:
# Close the writer first, so that the other end of the connection has
# its message loop waiting on read() unblocked. If there is an exception
# while closing the writer, we still want to try to close the reader -
# only one exception can bubble up, so if both fail, it'll be the one
# from reader.
try:
self._writer.close()
finally:
if self._reader is not self._writer:
self._reader.close()
finally:
self._cleanup()
except Exception:
# On Python 2, close() will raise an exception if there is a concurrent
# read() or write(), which is a common and expected occurrence with
# JsonMessageChannel, so don't even bother logging it.
if sys.version_info >= (3,):
raise log.exception("Error while closing {0} message stream", self.name)
def _log_message(self, dir, data, logger=log.debug):
format_string = "{0} {1} " + (
"{2!j:indent=None}" if isinstance(data, list) else "{2!j}"
)
return logger(format_string, self.name, dir, data)
def _read_line(self, reader):
line = b""
while True:
try:
line += reader.readline()
except Exception as ex:
raise NoMoreMessages(str(ex), stream=self)
if not line:
raise NoMoreMessages(stream=self)
if line.endswith(b"\r\n"):
line = line[0:-2]
return line
def read_json(self, decoder=None):
"""Read a single JSON value from reader.
Returns JSON value as parsed by decoder.decode(), or raises NoMoreMessages
if there are no more values to be read.
"""
decoder = decoder if decoder is not None else self.json_decoder_factory()
reader = self._reader
read_line = functools.partial(self._read_line, reader)
# If any error occurs while reading and parsing the message, log the original
# raw message data as is, so that it's possible to diagnose missing or invalid
# headers, encoding issues, JSON syntax errors etc.
def log_message_and_exception(format_string="", *args, **kwargs):
if format_string:
format_string += "\n\n"
format_string += "{name} -->\n{raw_lines}"
raw_lines = b"".join(raw_chunks).split(b"\n")
raw_lines = "\n".join(repr(line) for line in raw_lines)
return log.exception(
format_string, *args, name=self.name, raw_lines=raw_lines, **kwargs
)
raw_chunks = []
headers = {}
while True:
try:
line = read_line()
except Exception:
# Only log it if we have already read some headers, and are looking
# for a blank line terminating them. If this is the very first read,
# there's no message data to log in any case, and the caller might
# be anticipating the error - e.g. NoMoreMessages on disconnect.
if headers:
log_message_and_exception("Error while reading message headers:")
raise
raw_chunks += [line, b"\n"]
if line == b"":
break
key, _, value = line.partition(b":")
headers[key] = value
try:
length = int(headers[b"Content-Length"])
if not (0 <= length <= self.MAX_BODY_SIZE):
raise ValueError
except (KeyError, ValueError):
try:
raise IOError("Content-Length is missing or invalid:")
except Exception:
raise log_message_and_exception()
body_start = len(raw_chunks)
body_remaining = length
while body_remaining > 0:
try:
chunk = reader.read(body_remaining)
if not chunk:
raise EOFError
except Exception as exc:
# Not logged due to https://github.com/microsoft/ptvsd/issues/1699
# log_message_and_exception(
# "Couldn't read the expected {0} bytes of body:", length
# )
raise NoMoreMessages(str(exc), stream=self)
raw_chunks.append(chunk)
body_remaining -= len(chunk)
assert body_remaining == 0
body = b"".join(raw_chunks[body_start:])
try:
body = body.decode("utf-8")
except Exception:
raise log_message_and_exception()
try:
body = decoder.decode(body)
except Exception:
raise log_message_and_exception()
# If parsed successfully, log as JSON for readability.
self._log_message("-->", body)
return body
def write_json(self, value, encoder=None):
"""Write a single JSON value into writer.
Value is written as encoded by encoder.encode().
"""
if self._closed:
# Don't log this - it's a common pattern to write to a stream while
# anticipating EOFError from it in case it got closed concurrently.
raise NoMoreMessages(stream=self)
encoder = encoder if encoder is not None else self.json_encoder_factory()
writer = self._writer
# Format the value as a message, and try to log any failures using as much
# information as we already have at the point of the failure. For example,
# if it fails after it is serialized to JSON, log that JSON.
try:
body = encoder.encode(value)
except Exception:
raise self._log_message("<--", value, logger=log.exception)
if not isinstance(body, bytes):
body = body.encode("utf-8")
header = fmt("Content-Length: {0}\r\n\r\n", len(body))
header = header.encode("ascii")
data = header + body
data_written = 0
try:
while data_written < len(data):
written = writer.write(data[data_written:])
# On Python 2, socket.makefile().write() does not properly implement
# BytesIO.write(), and always returns None instead of the number of
# bytes written - but also guarantees that it is always a full write.
if written is None:
break
data_written += written
writer.flush()
except Exception as exc:
self._log_message("<--", value, logger=log.exception)
raise JsonIOError(stream=self, cause=exc)
self._log_message("<--", value)
def __repr__(self):
return fmt("{0}({1!r})", type(self).__name__, self.name)
class MessageDict(collections.OrderedDict):
"""A specialized dict that is used for JSON message payloads - Request.arguments,
Response.body, and Event.body.
For all members that normally throw KeyError when a requested key is missing, this
dict raises InvalidMessageError instead. Thus, a message handler can skip checks
for missing properties, and just work directly with the payload on the assumption
that it is valid according to the protocol specification; if anything is missing,
it will be reported automatically in the proper manner.
If the value for the requested key is itself a dict, it is returned as is, and not
automatically converted to MessageDict. Thus, to enable convenient chaining - e.g.
d["a"]["b"]["c"] - the dict must consistently use MessageDict instances rather than
vanilla dicts for all its values, recursively. This is guaranteed for the payload
of all freshly received messages (unless and until it is mutated), but there is no
such guarantee for outgoing messages.
"""
def __init__(self, message, items=None):
assert message is None or isinstance(message, Message)
if items is None:
super(MessageDict, self).__init__()
else:
super(MessageDict, self).__init__(items)
self.message = message
"""The Message object that owns this dict.
For any instance exposed via a Message object corresponding to some incoming
message, it is guaranteed to reference that Message object. There is no similar
guarantee for outgoing messages.
"""
def __repr__(self):
return fmt("{0!j}", self)
def __call__(self, key, validate, optional=False):
"""Like get(), but with validation.
The item is first retrieved as if with self.get(key, default=()) - the default
value is () rather than None, so that JSON nulls are distinguishable from
missing properties.
If optional=True, and the value is (), it's returned as is. Otherwise, the
item is validated by invoking validate(item) on it.
If validate=False, it's treated as if it were (lambda x: x) - i.e. any value
is considered valid, and is returned unchanged. If validate is a type or a
tuple, it's treated as json.of_type(validate). Otherwise, if validate is not
callable(), it's treated as json.default(validate).
If validate() returns successfully, the item is substituted with the value
it returns - thus, the validator can e.g. replace () with a suitable default
value for the property.
If validate() raises TypeError or ValueError, raises InvalidMessageError with
the same text that applies_to(self.messages).
See ptvsd.common.json for reusable validators.
"""
if not validate:
validate = lambda x: x
elif isinstance(validate, type) or isinstance(validate, tuple):
validate = json.of_type(validate, optional=optional)
elif not callable(validate):
validate = json.default(validate)
value = self.get(key, ())
try:
value = validate(value)
except (TypeError, ValueError) as exc:
message = Message if self.message is None else self.message
err = fmt("{0}", exc)
if not err.startswith("["):
err = " " + err
raise message.isnt_valid("{0!j}{1}", key, err)
return value
def _invalid_if_no_key(func):
def wrap(self, key, *args, **kwargs):
try:
return func(self, key, *args, **kwargs)
except KeyError:
message = Message if self.message is None else self.message
raise message.isnt_valid("missing property {0!r}", key)
return wrap
__getitem__ = _invalid_if_no_key(collections.OrderedDict.__getitem__)
__delitem__ = _invalid_if_no_key(collections.OrderedDict.__delitem__)
pop = _invalid_if_no_key(collections.OrderedDict.pop)
del _invalid_if_no_key
def _payload(value):
"""JSON validator for message payload.
If that value is missing or null, it is treated as if it were {}.
"""
if value is not None and value != ():
if isinstance(value, dict): # can be int, str, list...
assert isinstance(value, MessageDict)
return value
# Missing payload. Construct a dummy MessageDict, and make it look like it was
# deserialized. See JsonMessageChannel._parse_incoming_message for why it needs
# to have associate_with().
def associate_with(message):
value.message = message
value = MessageDict(None)
value.associate_with = associate_with
return value
class Message(object):
"""Represents a fully parsed incoming or outgoing message.
https://microsoft.github.io/debug-adapter-protocol/specification#protocolmessage
"""
def __init__(self, channel, seq, json=None):
self.channel = channel
self.seq = seq
"""Sequence number of the message in its channel.
This can be None for synthesized Responses.
"""
self.json = json
"""For incoming messages, the MessageDict containing raw JSON from which
this message was originally parsed.
"""
def __str__(self):
return fmt("{0!j}", self.json) if self.json is not None else repr(self)
def describe(self):
"""A brief description of the message that is enough to identify it.
Examples:
'#1 request "launch" from IDE'
'#2 response to #1 request "launch" from IDE'.
"""
raise NotImplementedError
@property
def payload(self):
"""Payload of the message - self.body or self.arguments, depending on the
message type.
"""
raise NotImplementedError
def __call__(self, *args, **kwargs):
"""Same as self.payload(...)."""
return self.payload(*args, **kwargs)
def __contains__(self, key):
"""Same as (key in self.payload)."""
return key in self.payload
def is_event(self, *event):
"""Returns True if this message is an Event of one of the specified types.
"""
if not isinstance(self, Event):
return False
return event == () or self.event in event
def is_request(self, *command):
"""Returns True if this message is a Request of one of the specified types.
"""
if not isinstance(self, Request):
return False
return command == () or self.command in command
def is_response(self, *command):
"""Returns True if this message is a Response to a request of one of the
specified types.
"""
if not isinstance(self, Response):
return False
return command == () or self.request.command in command
def error(self, exc_type, format_string, *args, **kwargs):
"""Returns a new exception of the specified type from the point at which it is
invoked, with the specified formatted message as the reason.
The resulting exception will have its cause set to the Message object on which
error() was called. Additionally, if that message is a Request, a failure
response is immediately sent.
"""
assert issubclass(exc_type, MessageHandlingError)
silent = kwargs.pop("silent", False)
reason = fmt(format_string, *args, **kwargs)
exc = exc_type(reason, self, silent) # will log it
if isinstance(self, Request):
self.respond(exc)
return exc
def isnt_valid(self, *args, **kwargs):
"""Same as self.error(InvalidMessageError, ...).
"""
return self.error(InvalidMessageError, *args, **kwargs)
def cant_handle(self, *args, **kwargs):
"""Same as self.error(MessageHandlingError, ...).
"""
return self.error(MessageHandlingError, *args, **kwargs)
class Event(Message):
"""Represents an incoming event.
https://microsoft.github.io/debug-adapter-protocol/specification#event
It is guaranteed that body is a MessageDict associated with this Event, and so
are all the nested dicts in it. If "body" was missing or null in JSON, body is
an empty dict.
To handle the event, JsonMessageChannel tries to find a handler for this event in
JsonMessageChannel.handlers. Given event="X", if handlers.X_event exists, then it
is the specific handler for this event. Otherwise, handlers.event must exist, and
it is the generic handler for this event. A missing handler is a fatal error.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
To report failure to handle the event, the handler must raise an instance of
MessageHandlingError that applies_to() the Event object it was handling. Any such
failure is logged, after which the message loop moves on to the next message.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Event object.
"""
def __init__(self, channel, seq, event, body, json=None):
super(Event, self).__init__(channel, seq, json)
self.event = event
if isinstance(body, MessageDict) and hasattr(body, "associate_with"):
body.associate_with(self)
self.body = body
def describe(self):
return fmt("#{0} event {1!j} from {2}", self.seq, self.event, self.channel)
@property
def payload(self):
return self.body
@staticmethod
def _parse(channel, message_dict):
seq = message_dict("seq", int)
event = message_dict("event", unicode)
body = message_dict("body", _payload)
message = Event(channel, seq, event, body, json=message_dict)
channel._enqueue_handlers(message, message._handle)
def _handle(self):
channel = self.channel
handler = channel._get_handler_for("event", self.event)
try:
try:
result = handler(self)
assert result is None, fmt(
"Handler {0} tried to respond to {1}.",
compat.srcnameof(handler),
self.describe(),
)
except MessageHandlingError as exc:
if not exc.applies_to(self):
raise
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
compat.srcnameof(handler),
self.describe(),
str(exc),
)
except Exception:
raise log.exception(
"Handler {0}\ncouldn't handle {1}:",
compat.srcnameof(handler),
self.describe(),
)
NO_RESPONSE = object()
"""Can be returned from a request handler in lieu of the response body, to indicate
that no response is to be sent.
Request.respond() must be invoked explicitly at some later point to provide a response.
"""
class Request(Message):
"""Represents an incoming or an outgoing request.
Incoming requests are represented directly by instances of this class.
Outgoing requests are represented by instances of OutgoingRequest, which provides
additional functionality to handle responses.
For incoming requests, it is guaranteed that arguments is a MessageDict associated
with this Request, and so are all the nested dicts in it. If "arguments" was missing
or null in JSON, arguments is an empty dict.
To handle the request, JsonMessageChannel tries to find a handler for this request
in JsonMessageChannel.handlers. Given command="X", if handlers.X_request exists,
then it is the specific handler for this request. Otherwise, handlers.request must
exist, and it is the generic handler for this request. A missing handler is a fatal
error.
The handler is then invoked with the Request object as its sole argument.
If the handler itself invokes respond() on the Request at any point, then it must
not return any value.
Otherwise, if the handler returns NO_RESPONSE, no response to the request is sent.
It must be sent manually at some later point via respond().
Otherwise, a response to the request is sent with the returned value as the body.
To fail the request, the handler can return an instance of MessageHandlingError,
or respond() with one, or raise one such that it applies_to() the Request object
being handled.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Request object.
"""
def __init__(self, channel, seq, command, arguments, json=None):
super(Request, self).__init__(channel, seq, json)
self.command = command
if isinstance(arguments, MessageDict) and hasattr(arguments, "associate_with"):
arguments.associate_with(self)
self.arguments = arguments
self.response = None
"""Response to this request.
For incoming requests, it is set as soon as the request handler returns.
For outgoing requests, it is set as soon as the response is received, and
before self._handle_response is invoked.
"""
def describe(self):
return fmt("#{0} request {1!j} from {2}", self.seq, self.command, self.channel)
@property
def payload(self):
return self.arguments
def respond(self, body):
assert self.response is None
d = {"type": "response", "request_seq": self.seq, "command": self.command}
if isinstance(body, Exception):
d["success"] = False
err_text = str(body)
try:
err_text = compat.force_unicode(err_text, "utf-8")
except Exception:
# On Python 2, the error message might not be Unicode, and we don't
# really know what encoding it is. So if treating it as UTF-8 failed,
# use repr() as a fallback - it should escape all non-ASCII chars in
# the string.
err_text = compat.force_unicode(repr(body), "ascii", errors="replace")
d["message"] = err_text
else:
d["success"] = True
if body is not None and body != {}:
d["body"] = body
with self.channel._send_message(d) as seq:
pass
self.response = Response(self.channel, seq, self, body)
@staticmethod
def _parse(channel, message_dict):
seq = message_dict("seq", int)
command = message_dict("command", unicode)
arguments = message_dict("arguments", _payload)
message = Request(channel, seq, command, arguments, json=message_dict)
channel._enqueue_handlers(message, message._handle)
def _handle(self):
channel = self.channel
handler = channel._get_handler_for("request", self.command)
try:
try:
result = handler(self)
except MessageHandlingError as exc:
if not exc.applies_to(self):
raise
result = exc
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
compat.srcnameof(handler),
self.describe(),
str(exc),
)
if result is NO_RESPONSE:
assert self.response is None, fmt(
"Handler {0} for {1} must not return NO_RESPONSE if it has already "
"invoked request.respond().",
compat.srcnameof(handler),
self.describe(),
)
elif self.response is not None:
assert result is None or result is self.response.body, fmt(
"Handler {0} for {1} must not return a response body if it has "
"already invoked request.respond().",
compat.srcnameof(handler),
self.describe(),
)
else:
assert result is not None, fmt(
"Handler {0} for {1} must either call request.respond() before it "
"returns, or return the response body, or return NO_RESPONSE.",
compat.srcnameof(handler),
self.describe(),
)
try:
self.respond(result)
except NoMoreMessages:
log.warning(
"Channel was closed before the response from handler {0} to {1} could be sent",
compat.srcnameof(handler),
self.describe(),
)
except Exception:
raise log.exception(
"Handler {0}\ncouldn't handle {1}:",
compat.srcnameof(handler),
self.describe(),
)
class OutgoingRequest(Request):
"""Represents an outgoing request, for which it is possible to wait for a
response to be received, and register a response handler.
"""
_parse = _handle = None
def __init__(self, channel, seq, command, arguments):
super(OutgoingRequest, self).__init__(channel, seq, command, arguments)
self._response_handlers = []
def describe(self):
return fmt("#{0} request {1!j} to {2}", self.seq, self.command, self.channel)
def wait_for_response(self, raise_if_failed=True):
"""Waits until a response is received for this request, records the Response
object for it in self.response, and returns response.body.
If no response was received from the other party before the channel closed,
self.response is a synthesized Response with body=NoMoreMessages().
If raise_if_failed=True and response.success is False, raises response.body
instead of returning.
"""
with self.channel:
while self.response is None:
self.channel._handlers_enqueued.wait()
if raise_if_failed and not self.response.success:
raise self.response.body
return self.response.body
def on_response(self, response_handler):
"""Registers a handler to invoke when a response is received for this request.
The handler is invoked with Response as its sole argument.
If response has already been received, invokes the handler immediately.
It is guaranteed that self.response is set before the handler is invoked.
If no response was received from the other party before the channel closed,
self.response is a dummy Response with body=NoMoreMessages().
The handler is always invoked asynchronously on an unspecified background
thread - thus, the caller of on_response() can never be blocked or deadlocked
by the handler.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
"""
with self.channel:
self._response_handlers.append(response_handler)
self._enqueue_response_handlers()
def _enqueue_response_handlers(self):
response = self.response
if response is None:
# Response._parse() will submit the handlers when response is received.
return
def run_handlers():
for handler in handlers:
try:
try:
handler(response)
except MessageHandlingError as exc:
if not exc.applies_to(response):
raise
log.error(
"Handler {0}\ncouldn't handle {1}:\n{2}",
compat.srcnameof(handler),
response.describe(),
str(exc),
)
except Exception:
raise log.exception(
"Handler {0}\ncouldn't handle {1}:",
compat.srcnameof(handler),
response.describe(),
)
handlers = self._response_handlers[:]
self.channel._enqueue_handlers(response, run_handlers)
del self._response_handlers[:]
class Response(Message):
"""Represents an incoming or an outgoing response to a Request.
https://microsoft.github.io/debug-adapter-protocol/specification#response
error_message corresponds to "message" in JSON, and is renamed for clarity.
If success is False, body is None. Otherwise, it is a MessageDict associated
with this Response, and so are all the nested dicts in it. If "body" was missing
or null in JSON, body is an empty dict.
If this is a response to an outgoing request, it will be handled by the handler
registered via self.request.on_response(), if any.
Regardless of whether there is such a handler, OutgoingRequest.wait_for_response()
can also be used to retrieve and handle the response. If there is a handler, it is
executed before wait_for_response() returns.
No further incoming messages are processed until the handler returns, except for
responses to requests that have wait_for_response() invoked on them.
To report failure to handle the event, the handler must raise an instance of
MessageHandlingError that applies_to() the Response object it was handling. Any
such failure is logged, after which the message loop moves on to the next message.
Helper methods Message.isnt_valid() and Message.cant_handle() can be used to raise
the appropriate exception type that applies_to() the Response object.
"""
def __init__(self, channel, seq, request, body, json=None):
super(Response, self).__init__(channel, seq, json)
self.request = request
"""The request to which this is the response."""
if isinstance(body, MessageDict) and hasattr(body, "associate_with"):
body.associate_with(self)
self.body = body
"""Body of the response if the request was successful, or an instance
of some class derived from Exception it it was not.
If a response was received from the other side, but request failed, it is an
instance of MessageHandlingError containing the received error message. If the
error message starts with InvalidMessageError.PREFIX, then it's an instance of
the InvalidMessageError specifically, and that prefix is stripped.
If no response was received from the other party before the channel closed,
it is an instance of NoMoreMessages.
"""
def describe(self):
return fmt("#{0} response to {1}", self.seq, self.request.describe())
@property
def payload(self):
return self.body
@property
def success(self):
"""Whether the request succeeded or not.
"""
return not isinstance(self.body, Exception)
@property
def result(self):
"""Result of the request. Returns the value of response.body, unless it
is an exception, in which case it is raised instead.
"""
if self.success:
return self.body
else:
raise self.body
@staticmethod
def _parse(channel, message_dict, body=None):
seq = message_dict("seq", int) if (body is None) else None
request_seq = message_dict("request_seq", int)
command = message_dict("command", unicode)
success = message_dict("success", bool)
if body is None:
if success:
body = message_dict("body", _payload)
else:
error_message = message_dict("message", unicode)
exc_type = MessageHandlingError
if error_message.startswith(InvalidMessageError.PREFIX):
error_message = error_message[len(InvalidMessageError.PREFIX) :]
exc_type = InvalidMessageError
body = exc_type(error_message, silent=True)
try:
with channel:
request = channel._sent_requests.pop(request_seq)
known_request = True
except KeyError:
# Synthetic Request that only has seq and command as specified in response
# JSON, for error reporting purposes.
request = OutgoingRequest(channel, request_seq, command, "<unknown>")
known_request = False
if not success:
body.cause = request
response = Response(channel, seq, request, body, json=message_dict)
with channel:
request.response = response
request._enqueue_response_handlers()
if known_request:
return response
else:
raise response.isnt_valid(
"request_seq={0} does not match any known request", request_seq
)
class Disconnect(Message):
"""A dummy message used to represent disconnect. It's always the last message
received from any channel.
"""
def __init__(self, channel):
super(Disconnect, self).__init__(channel, None)
def describe(self):
return fmt("disconnect from {0}", self.channel)
class MessageHandlingError(Exception):
"""Indicates that a message couldn't be handled for some reason.
If the reason is a contract violation - i.e. the message that was handled did not
conform to the protocol specification - InvalidMessageError, which is a subclass,
should be used instead.
If any message handler raises an exception not derived from this class, it will
escape the message loop unhandled, and terminate the process.
If any message handler raises this exception, but applies_to(message) is False, it
is treated as if it was a generic exception, as desribed above. Thus, if a request
handler issues another request of its own, and that one fails, the failure is not
silently propagated. However, a request that is delegated via Request.delegate()
will also propagate failures back automatically. For manual propagation, catch the
exception, and call exc.propagate().
If any event handler raises this exception, and applies_to(event) is True, the
exception is silently swallowed by the message loop.
If any request handler raises this exception, and applies_to(request) is True, the
exception is silently swallowed by the message loop, and a failure response is sent
with "message" set to str(reason).
Note that, while errors are not logged when they're swallowed by the message loop,
by that time they have already been logged by their __init__ (when instantiated).
"""
def __init__(self, reason, cause=None, silent=False):
"""Creates a new instance of this class, and immediately logs the exception.
Message handling errors are logged immediately unless silent=True, so that the
precise context in which they occured can be determined from the surrounding
log entries.
"""
self.reason = reason
"""Why it couldn't be handled. This can be any object, but usually it's either
str or Exception.
"""
assert cause is None or isinstance(cause, Message)
self.cause = cause
"""The Message object for the message that couldn't be handled. For responses
to unknown requests, this is a synthetic Request.
"""
if not silent:
try:
raise self
except MessageHandlingError:
log.exception()
def __hash__(self):
return hash((self.reason, id(self.cause)))
def __eq__(self, other):
if not isinstance(other, MessageHandlingError):
return NotImplemented
if type(self) is not type(other):
return NotImplemented
if self.reason != other.reason:
return False
if self.cause is not None and other.cause is not None:
if self.cause.seq != other.cause.seq:
return False
return True
def __ne__(self, other):
return not self == other
def __str__(self):
return str(self.reason)
def __repr__(self):
s = type(self).__name__
if self.cause is None:
s += fmt("(reason={0!r})", self.reason)
else:
s += fmt(
"(channel={0!r}, cause={1!r}, reason={2!r})",
self.cause.channel.name,
self.cause.seq,
self.reason,
)
return s
def applies_to(self, message):
"""Whether this MessageHandlingError can be treated as a reason why the
handling of message failed.
If self.cause is None, this is always true.
If self.cause is not None, this is only true if cause is message.
"""
return self.cause is None or self.cause is message
def propagate(self, new_cause):
"""Propagates this error, raising a new instance of the same class with the
same reason, but a different cause.
"""
raise type(self)(self.reason, new_cause, silent=True)
class InvalidMessageError(MessageHandlingError):
"""Indicates that an incoming message did not follow the protocol specification -
for example, it was missing properties that are required, or the message itself
is not allowed in the current state.
Raised by MessageDict in lieu of KeyError for missing keys.
"""
PREFIX = "Invalid message: "
"""Automatically prepended to the "message" property in JSON responses, when the
handler raises InvalidMessageError.
If a failed response has "message" property that starts with this prefix, it is
reported as InvalidMessageError rather than MessageHandlingError.
"""
def __str__(self):
return InvalidMessageError.PREFIX + str(self.reason)
class JsonMessageChannel(object):
"""Implements a JSON message channel on top of a raw JSON message stream, with
support for DAP requests, responses, and events.
The channel can be locked for exclusive use via the with-statement::
with channel:
channel.send_request(...)
# No interleaving messages can be sent here from other threads.
channel.send_event(...)
"""
def __init__(self, stream, handlers=None, name=None):
self.stream = stream
self.handlers = handlers
self.name = name if name is not None else stream.name
self.started = False
self._lock = threading.RLock()
self._closed = False
self._seq_iter = itertools.count(1)
self._sent_requests = {} # {seq: Request}
self._handler_queue = [] # [(what, handler)]
self._handlers_enqueued = threading.Condition(self._lock)
self._handler_thread = None
self._parser_thread = None
def __str__(self):
return self.name
def __repr__(self):
return fmt("{0}({1!r})", type(self).__name__, self.name)
def __enter__(self):
self._lock.acquire()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self._lock.release()
def close(self):
"""Closes the underlying stream.
This does not immediately terminate any handlers that are already executing,
but they will be unable to respond. No new request or event handlers will
execute after this method is called, even for messages that have already been
received. However, response handlers will continue to executed for any request
that is still pending, as will any handlers registered via on_response().
"""
with self:
if not self._closed:
self._closed = True
self.stream.close()
def start(self):
"""Starts a message loop which parses incoming messages and invokes handlers
for them on a background thread, until the channel is closed.
Incoming messages, including responses to requests, will not be processed at
all until this is invoked.
"""
assert not self.started
self.started = True
self._parser_thread = threading.Thread(
target=self._parse_incoming_messages, name=fmt("{0} message parser", self)
)
self._parser_thread.pydev_do_not_trace = True
self._parser_thread.is_pydev_daemon_thread = True
self._parser_thread.daemon = True
self._parser_thread.start()
def wait(self):
"""Waits for the message loop to terminate, and for all enqueued Response
message handlers to finish executing.
"""
parser_thread = self._parser_thread
if parser_thread is not None:
parser_thread.join()
handler_thread = self._handler_thread
if handler_thread is not None:
handler_thread.join()
# Order of keys for _prettify() - follows the order of properties in
# https://microsoft.github.io/debug-adapter-protocol/specification
_prettify_order = (
"seq",
"type",
"request_seq",
"success",
"command",
"event",
"message",
"arguments",
"body",
"error",
)
def _prettify(self, message_dict):
"""Reorders items in a MessageDict such that it is more readable.
"""
for key in self._prettify_order:
if key not in message_dict:
continue
value = message_dict[key]
del message_dict[key]
message_dict[key] = value
@contextlib.contextmanager
def _send_message(self, message):
"""Sends a new message to the other party.
Generates a new sequence number for the message, and provides it to the
caller before the message is sent, using the context manager protocol::
with send_message(...) as seq:
# The message hasn't been sent yet.
...
# Now the message has been sent.
Safe to call concurrently for the same channel from different threads.
"""
assert "seq" not in message
with self:
seq = next(self._seq_iter)
message = MessageDict(None, message)
message["seq"] = seq
self._prettify(message)
with self:
yield seq
self.stream.write_json(message)
def send_request(self, command, arguments=None, on_before_send=None):
"""Sends a new request, and returns the OutgoingRequest object for it.
If arguments is None or {}, "arguments" will be omitted in JSON.
If on_before_send is not None, invokes on_before_send() with the request
object as the sole argument, before the request actually gets sent.
Does not wait for response - use OutgoingRequest.wait_for_response().
Safe to call concurrently for the same channel from different threads.
"""
d = {"type": "request", "command": command}
if arguments is not None and arguments != {}:
d["arguments"] = arguments
with self._send_message(d) as seq:
request = OutgoingRequest(self, seq, command, arguments)
if on_before_send is not None:
on_before_send(request)
self._sent_requests[seq] = request
return request
def send_event(self, event, body=None):
"""Sends a new event.
If body is None or {}, "body" will be omitted in JSON.
Safe to call concurrently for the same channel from different threads.
"""
d = {"type": "event", "event": event}
if body is not None and body != {}:
d["body"] = body
with self._send_message(d):
pass
def request(self, *args, **kwargs):
"""Same as send_request(...).wait_for_response()
"""
return self.send_request(*args, **kwargs).wait_for_response()
def propagate(self, message):
"""Sends a new message with the same type and payload.
If it was a request, returns the new OutgoingRequest object for it.
"""
assert message.is_request() or message.is_event()
if message.is_request():
return self.send_request(message.command, message.arguments)
else:
self.send_event(message.event, message.body)
def delegate(self, message):
"""Like propagate(message).wait_for_response(), but will also propagate
any resulting MessageHandlingError back.
"""
try:
result = self.propagate(message)
if result.is_request():
result = result.wait_for_response()
return result
except MessageHandlingError as exc:
exc.propagate(message)
def _parse_incoming_messages(self):
log.debug("Starting message loop for channel {0}", self)
try:
while True:
self._parse_incoming_message()
except NoMoreMessages as exc:
log.debug("Exiting message loop for channel {0}: {1}", self, exc)
with self:
# Generate dummy responses for all outstanding requests.
err_message = compat.force_unicode(str(exc), "utf-8", errors="replace")
# Response._parse() will remove items from _sent_requests, so
# make a snapshot before iterating.
sent_requests = list(self._sent_requests.values())
for request in sent_requests:
response_json = MessageDict(
None,
{
"seq": -1,
"request_seq": request.seq,
"command": request.command,
"success": False,
"message": err_message,
},
)
Response._parse(self, response_json, body=exc)
assert not len(self._sent_requests)
self._enqueue_handlers(Disconnect(self), self._handle_disconnect)
self.close()
_message_parsers = {
"event": Event._parse,
"request": Request._parse,
"response": Response._parse,
}
def _parse_incoming_message(self):
"""Reads incoming messages, parses them, and puts handlers into the queue
for _run_handlers() to invoke, until the channel is closed.
"""
# Set up a dedicated decoder for this message, to create MessageDict instances
# for all JSON objects, and track them so that they can be later wired up to
# the Message they belong to, once it is instantiated.
def object_hook(d):
d = MessageDict(None, d)
if "seq" in d:
self._prettify(d)
d.associate_with = associate_with
message_dicts.append(d)
return d
# A hack to work around circular dependency between messages, and instances of
# MessageDict in their payload. We need to set message for all of them, but it
# cannot be done until the actual Message is created - which happens after the
# dicts are created during deserialization.
#
# So, upon deserialization, every dict in the message payload gets a method
# that can be called to set MessageDict.message for *all* dicts belonging to
# that message. This method can then be invoked on the top-level dict by the
# parser, after it has parsed enough of the dict to create the appropriate
# instance of Event, Request, or Response for this message.
def associate_with(message):
for d in message_dicts:
d.message = message
del d.associate_with
message_dicts = []
decoder = self.stream.json_decoder_factory(object_hook=object_hook)
message_dict = self.stream.read_json(decoder)
assert isinstance(message_dict, MessageDict) # make sure stream used decoder
msg_type = message_dict("type", json.enum("event", "request", "response"))
parser = self._message_parsers[msg_type]
try:
parser(self, message_dict)
except InvalidMessageError as exc:
log.error(
"Failed to parse message in channel {0}: {1} in:\n{2!j}",
self,
str(exc),
message_dict,
)
except Exception as exc:
if isinstance(exc, NoMoreMessages) and exc.stream is self.stream:
raise
log.exception(
"Fatal error in channel {0} while parsing:\n{1!j}", self, message_dict
)
os._exit(1)
def _enqueue_handlers(self, what, *handlers):
"""Enqueues handlers for _run_handlers() to run.
`what` is the Message being handled, and is used for logging purposes.
If the background thread with _run_handlers() isn't running yet, starts it.
"""
with self:
self._handler_queue.extend((what, handler) for handler in handlers)
self._handlers_enqueued.notify_all()
# If there is anything to handle, but there's no handler thread yet,
# spin it up. This will normally happen only once, on the first call
# to _enqueue_handlers(), and that thread will run all the handlers
# for parsed messages. However, this can also happen is somebody calls
# Request.on_response() - possibly concurrently from multiple threads -
# after the channel has already been closed, and the initial handler
# thread has exited. In this case, we spin up a new thread just to run
# the enqueued response handlers, and it will exit as soon as it's out
# of handlers to run.
if len(self._handler_queue) and self._handler_thread is None:
self._handler_thread = threading.Thread(
target=self._run_handlers, name=fmt("{0} message handler", self)
)
self._handler_thread.pydev_do_not_trace = True
self._handler_thread.is_pydev_daemon_thread = True
self._handler_thread.start()
def _run_handlers(self):
"""Runs enqueued handlers until the channel is closed, or until the handler
queue is empty once the channel is closed.
"""
while True:
with self:
closed = self._closed
if closed:
# Wait for the parser thread to wrap up and enqueue any remaining
# handlers, if it is still running.
self._parser_thread.join()
# From this point on, _enqueue_handlers() can only get called
# from Request.on_response().
with self:
if not closed and not len(self._handler_queue):
# Wait for something to process.
self._handlers_enqueued.wait()
# Make a snapshot before releasing the lock.
handlers = self._handler_queue[:]
del self._handler_queue[:]
if closed and not len(handlers):
# Nothing to process, channel is closed, and parser thread is
# not running anymore - time to quit! If Request.on_response()
# needs to call _enqueue_handlers() later, it will spin up
# a new handler thread.
self._handler_thread = None
return
for what, handler in handlers:
# If the channel is closed, we don't want to process any more events
# or requests - only responses and the final disconnect handler. This
# is to guarantee that if a handler calls close() on its own channel,
# the corresponding request or event is the last thing to be processed.
if closed and handler in (Event._handle, Request._handle):
continue
with log.prefixed("/handling {0}/\n", what.describe()):
try:
handler()
except Exception:
# It's already logged by the handler, so just fail fast.
self.close()
os._exit(1)
def _get_handler_for(self, type, name):
"""Returns the handler for a message of a given type.
"""
with self:
handlers = self.handlers
for handler_name in (name + "_" + type, type):
try:
return getattr(handlers, handler_name)
except AttributeError:
continue
raise AttributeError(
fmt(
"handler object {0} for channel {1} has no handler for {2} {3!r}",
compat.srcnameof(handlers),
self,
type,
name,
)
)
def _handle_disconnect(self):
handler = getattr(self.handlers, "disconnect", lambda: None)
try:
handler()
except Exception:
raise log.exception(
"Handler {0}\ncouldn't handle disconnect from {1}:",
compat.srcnameof(handler),
self,
)
class MessageHandlers(object):
"""A simple delegating message handlers object for use with JsonMessageChannel.
For every argument provided, the object gets an attribute with the corresponding
name and value.
"""
def __init__(self, **kwargs):
for name, func in kwargs.items():
setattr(self, name, func)
| 37.684923
| 103
| 0.611093
|
d67887a42871ec4868a9db8ce98405a7c5d80f28
| 3,614
|
py
|
Python
|
flask_svgbarcode/__init__.py
|
lysdexia/flask-svg-barcode
|
f0a54a03d1728cfcc43c65e6897efe8f7fde387a
|
[
"MIT"
] | null | null | null |
flask_svgbarcode/__init__.py
|
lysdexia/flask-svg-barcode
|
f0a54a03d1728cfcc43c65e6897efe8f7fde387a
|
[
"MIT"
] | null | null | null |
flask_svgbarcode/__init__.py
|
lysdexia/flask-svg-barcode
|
f0a54a03d1728cfcc43c65e6897efe8f7fde387a
|
[
"MIT"
] | null | null | null |
import barcode
from flask.ext import restful
from flask_restful import reqparse, abort
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
def svg_barcode(app, endpoint):
"""
flask extension providing tissue thin wrapper around pybarcode
You can find pyBarcode with it's nice licenses and documentation at
http://pythonhosted.org/pyBarcode/
You'll be glad you did.
accepts
app <object> for circular import of curcularity
endpoint <str> api endpoint for post call ex. "/api/barcode"
Usage:
from flask_svg_barcode import svg_barcode
app=Flask(__name__)
# initialize with endpoint
svg_barcode(app, "/api/barcode")
XML string is returned in json object.
If you don't like this, feel free to fork it, add an
@api.representation('application/xml') decorator to the post and convert
the error messages to xml or whatever turns you on. I'm groovy like that.
"""
api = restful.Api(app)
class SVGBarcodeAPI(restful.Resource):
def barcode_svg(self, barcode_format, barcode_string):
"""
accepts
barcode_format <str> one of ['code39', 'ean', 'ean13', 'ean8', 'gs1', 'gtin', 'isbn', 'isbn10', 'isbn13', 'issn', 'jan', 'pzn', 'upc', 'upca']
barcode_string <str> string constructed according to barcode_format requirements - see google, svp
"""
try:
barcode_svg = barcode.get(
barcode_format,
barcode_string).render(writer_options=None)
except Exception as error:
return {
"error": 500,
"message": str(error)
}
return {"barcode_svg": barcode_svg}
def post(self):
"""
accept
JSON object {barcode_format: <str>, barcode_string: <str>}
return
JSON object {"barcode_svg": <XML svg barcode in format specified in "barcode_string> }
on error return
JSON object {"error": <numeric error code>, "message": <str>}
"""
parser = reqparse.RequestParser(bundle_errors=True)
parser.add_argument(
"barcode_format",
type=str,
help="barcode formats supported: %s"%str(
barcode.PROVIDED_BARCODES),
required=True
)
parser.add_argument(
"barcode_string",
type=str,
help="string to render",
required=True
)
args = parser.parse_args()
data = self.barcode_svg(
args["barcode_format"],
args["barcode_string"],
)
if "error" in data:
return data, data["error"]
return data, 200
api.add_resource(SVGBarcodeAPI, endpoint)
class SVGBarcode(object):
def __init__(self, app=None, endpoint=None):
self.app = app
if app is not None:
self.init_app(app, endpoint)
def init_app(self, app, endpoint):
if not endpoint:
endpoint = "/api/barcode"
if hasattr(app, "teardown_appcontext"):
app.teardown_appcontext(self.teardown)
else:
app.teardown_request(self.teardown)
svg_barcode(app, endpoint)
| 32.854545
| 158
| 0.552297
|
bb4d65081f8aeaf728ae1a56800d6b610b915436
| 3,861
|
py
|
Python
|
Server/ConsoleGameFunctions.py
|
kjones1511/Blackjack-Flask
|
33f5cabf60ac254602ac19a1afaf25854a2d7c4b
|
[
"MIT"
] | null | null | null |
Server/ConsoleGameFunctions.py
|
kjones1511/Blackjack-Flask
|
33f5cabf60ac254602ac19a1afaf25854a2d7c4b
|
[
"MIT"
] | null | null | null |
Server/ConsoleGameFunctions.py
|
kjones1511/Blackjack-Flask
|
33f5cabf60ac254602ac19a1afaf25854a2d7c4b
|
[
"MIT"
] | null | null | null |
from bjObjects import *
def initializeDeck(deckCount):
deck = Deck(deckCount)
deck.shuffle()
return deck
def initializeOnePlayer(players, data, casino):
playerName = choice = input("Enter Player Name: ")
players.append(Player(playerName, 100))
# begin recording
data["player"] = playerName
data["casino"] = casino
def dealHand(players, dealerHand, deck):
# deal first hands
for player in players:
player.currentHand = [Hand()]
player.currentHand[0].newHand(deck)
dealerHand.newHand(deck)
#if happy with print, delete comments
def print_results(dealerHand, player_hand): #name, hand):
clear()
print ("The dealer has a \n" + str(dealerHand) + "\nfor a total of: " + str(dealerHand.total())+ "\n")
print ("You have a \n" + str(player_hand) + "\nfor a total of: " + str(player_hand.total())+ "\n")
#print(name + "has a \n" + str(hand) + "\nfor a total of: " + str(hand.total() ))
#Note: assumes called before players hit, doesnt account for hand bigger than 2 cards
#todo: should this be adjusted to a hand instead of player?
#todo: pretty sure this won't work for splits
def blackjack(dealerHand, player):
if player.currentHand[0].total() == dealerHand.total() == 21:
print("you both received blackjack, push")
playerHand.win = 2
elif player.currentHand[0].total() == 21:
# print_results(player.currentHand)
print ("Congratulations! You got a Blackjack!\n" + str(player.currentHand[0]))
player.currentHand[0].blackjack = 1
player.currentHand[0].win = 1
elif dealerHand.total() == 21:
# print_results(dealerHand, player.currentHand)
print ("Sorry, you lose. The dealer got a blackjack.\n"+ str(dealerHand))
dealerHand.blackjack = 1
def score(dealerHand, playerHand):
#todo if code breaks, re-add blackjack check. Otherwise, assume blackjacks are handled earlier in logic
print("Dealer Hand:" + str(dealerHand) + " for a score of: " + str(dealerHand.total()))
print("Player Hand:" + str(playerHand) + " for a score of: " + str(playerHand.total()))
#ties dealerHand to player for results later
playerHand.dealerHand = dealerHand.hand
playerHand.dealerScore = dealerHand.total()
playerHand.score = playerHand.total()
if playerHand.total() > 21:
print ("Sorry. You busted. You lose.\n")
elif dealerHand.total() > 21:
print ("Dealer busts. You win!\n")
playerHand.win = 1
elif playerHand.total() < dealerHand.total():
print ("Sorry. Your score isn't higher than the dealer. You lose.\n")
elif playerHand.total() > dealerHand.total():
print ("Congratulations. Your score is higher than the dealer. You win\n")
playerHand.win = 1
elif playerHand.total() == dealerHand.total():
print ("Hand is a push, players tied")
playerHand.win = 2
else:
print ("something has gone wrong with score() if this appears")
def playerDecisionHandling (thisHand, dealerHand, player, deck):
choice = ""
doubleState = True
while choice not in ["s", "d", "q"] and thisHand.total() < 21:
print("The dealer is showing a " + str(dealerHand.hand[0]))
print("Player:" + player.name + "\nHand:" + str(thisHand) + "\nScore:" + str(thisHand.total()))
if doubleState:
choice = input("Do you want to [D]ouble down, [H]it, or [S]tand: ").lower()
doubleState = False
else:
choice = input("Do you want to [H]it, [S]tand, or [Q]uit: ").lower()
# clear()
if choice == "h":
thisHand.hitState = 1
thisHand.hit(deck)
print("your new card is: " + str(thisHand.hand[-1]) + "\nfor a total of " + str(thisHand.total()))
elif choice == "s":
print(player.name + " stands")
elif choice == "d":
thisHand.double = 1
thisHand.hit(deck)
print("your new card is: " + str(thisHand.hand[-1]) + "\nfor a total of " + str(thisHand.total()))
time.sleep(1)
clear()
def dealerHitlogic(dealerHand, dealerStandBoundary, deck):
while dealerHand.total() < dealerStandBoundary:
dealerHand.hit(deck)
| 37.852941
| 104
| 0.69179
|
f212783bc169b845bb8a38d497a9ac6b65874273
| 259
|
py
|
Python
|
setup.py
|
Karalius/tesla_factory
|
187628f8b840d54ecb879f6cc8d5ab2826623d0f
|
[
"MIT"
] | null | null | null |
setup.py
|
Karalius/tesla_factory
|
187628f8b840d54ecb879f6cc8d5ab2826623d0f
|
[
"MIT"
] | null | null | null |
setup.py
|
Karalius/tesla_factory
|
187628f8b840d54ecb879f6cc8d5ab2826623d0f
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='Tesla_factory',
version='1.0',
packages=[''],
package_dir={'': 'tesla'},
url='',
license='',
author='Karalius',
author_email='tesla@tesla.com',
description='Tesla class package'
)
| 18.5
| 37
| 0.606178
|
591a682db297ba10d065c72bf1ed734fd37cab14
| 2,290
|
py
|
Python
|
pycatia/knowledge_interfaces/enum_param.py
|
evereux/catia_python
|
08948585899b12587b0415ce3c9191a408b34897
|
[
"MIT"
] | 90
|
2019-02-21T10:05:28.000Z
|
2022-03-19T01:53:41.000Z
|
pycatia/knowledge_interfaces/enum_param.py
|
Luanee/pycatia
|
ea5eef8178f73de12404561c00baf7a7ca30da59
|
[
"MIT"
] | 99
|
2019-05-21T08:29:12.000Z
|
2022-03-25T09:55:15.000Z
|
pycatia/knowledge_interfaces/enum_param.py
|
Luanee/pycatia
|
ea5eef8178f73de12404561c00baf7a7ca30da59
|
[
"MIT"
] | 26
|
2019-04-04T06:31:36.000Z
|
2022-03-30T07:24:47.000Z
|
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.knowledge_interfaces.parameter import Parameter
class EnumParam(Parameter):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| KnowledgeInterfaces.Parameter
| EnumParam
|
| Represents the enum parameter.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.enum_param = com_object
@property
def value_enum(self) -> str:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property ValueEnum() As CATBSTR
|
| Returns or sets the value of the EnumParameter object. Units are expressed
| in the IS unit system, except for lengths expressed in millimeters, and angles
| expressed in decimal degrees.
|
| Example:
| This example sets the param1 value to 1 if its value is greater than
| 2.5:
|
| If (density.Value > 2.5) Then
| density.Value = 1
| End If
:return: str
:rtype: str
"""
return self.enum_param.ValueEnum
@value_enum.setter
def value_enum(self, value: str):
"""
:param str value:
"""
self.enum_param.ValueEnum = value
def __repr__(self):
return f'EnumParam(name="{ self.name }")'
| 30.131579
| 108
| 0.500873
|
97da9dbd8f62272a86faf28029ac7ca8d8cec1ed
| 955
|
py
|
Python
|
lib/apikey/test/load_credentials_sample.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 12
|
2018-11-20T04:30:49.000Z
|
2021-11-09T12:34:26.000Z
|
lib/apikey/test/load_credentials_sample.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 1
|
2019-01-24T15:56:15.000Z
|
2019-05-31T07:56:55.000Z
|
lib/apikey/test/load_credentials_sample.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 6
|
2018-06-29T03:45:50.000Z
|
2022-03-18T01:51:45.000Z
|
# coding: utf-8
from __future__ import absolute_import
import time
import thread
import datetime
from ncloud_apikey.credentials import CredentialsResolver
def test_load_credentials(tid, time_to_sleep, credentials):
print('thread %d, now %s, sleep %ss' % (tid, datetime.datetime.now().time(), time_to_sleep))
try:
print('thread %d, now: %s, credentials: %s,access_key: %s, expiration: %s' % (
tid, datetime.datetime.now().time(), credentials, credentials.access_key, credentials.expiration))
except Exception as e:
print(e)
print('thread %d, now: %s, end' % (tid, datetime.datetime.now().time()))
creds = CredentialsResolver().load_credentials()
print('start: %s' % (datetime.datetime.now().time()))
# mandatory refresh (10)
thread.start_new_thread(test_load_credentials, (0, 5, creds))
time.sleep(1)
# advisory refresh (30)
thread.start_new_thread(test_load_credentials, (1, 20, creds))
time.sleep(20)
| 29.84375
| 110
| 0.710995
|
2ec6abee9fe17dfd25af91e759cef8fd864a577f
| 4,070
|
py
|
Python
|
ucsmsdk/mometa/sw/SwUtilityDomainFsmStage.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 78
|
2015-11-30T14:10:05.000Z
|
2022-02-13T00:29:08.000Z
|
ucsmsdk/mometa/sw/SwUtilityDomainFsmStage.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 113
|
2015-11-20T09:42:46.000Z
|
2022-03-16T16:53:29.000Z
|
ucsmsdk/mometa/sw/SwUtilityDomainFsmStage.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 86
|
2015-12-12T08:22:18.000Z
|
2022-01-23T03:56:34.000Z
|
"""This module contains the general information for SwUtilityDomainFsmStage ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class SwUtilityDomainFsmStageConsts:
LAST_UPDATE_TIME_ = ""
NAME_DEPLOY_BEGIN = "DeployBegin"
NAME_DEPLOY_FAIL = "DeployFail"
NAME_DEPLOY_SUCCESS = "DeploySuccess"
NAME_DEPLOY_UPDATE_CONNECTIVITY = "DeployUpdateConnectivity"
NAME_NOP = "nop"
STAGE_STATUS_FAIL = "fail"
STAGE_STATUS_IN_PROGRESS = "inProgress"
STAGE_STATUS_NOP = "nop"
STAGE_STATUS_PENDING = "pending"
STAGE_STATUS_SKIP = "skip"
STAGE_STATUS_SUCCESS = "success"
STAGE_STATUS_THROTTLED = "throttled"
class SwUtilityDomainFsmStage(ManagedObject):
"""This is SwUtilityDomainFsmStage class."""
consts = SwUtilityDomainFsmStageConsts()
naming_props = set(['name'])
mo_meta = MoMeta("SwUtilityDomainFsmStage", "swUtilityDomainFsmStage", "stage-[name]", VersionMeta.Version211a, "OutputOnly", 0xf, [], [""], ['swUtilityDomainFsm'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"last_update_time": MoPropertyMeta("last_update_time", "lastUpdateTime", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [""], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version211a, MoPropertyMeta.NAMING, None, None, None, None, ["DeployBegin", "DeployFail", "DeploySuccess", "DeployUpdateConnectivity", "nop"], []),
"order": MoPropertyMeta("order", "order", "ushort", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"retry": MoPropertyMeta("retry", "retry", "byte", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"stage_status": MoPropertyMeta("stage_status", "stageStatus", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["fail", "inProgress", "nop", "pending", "skip", "success", "throttled"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"lastUpdateTime": "last_update_time",
"name": "name",
"order": "order",
"retry": "retry",
"rn": "rn",
"sacl": "sacl",
"stageStatus": "stage_status",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.child_action = None
self.descr = None
self.last_update_time = None
self.order = None
self.retry = None
self.sacl = None
self.stage_status = None
self.status = None
ManagedObject.__init__(self, "SwUtilityDomainFsmStage", parent_mo_or_dn, **kwargs)
| 55.753425
| 265
| 0.647174
|
5e1fe06e8b2eafd7dab49abf9b27304b0c167951
| 413
|
py
|
Python
|
fidget/__init__.py
|
angr/fidget
|
0f255bbd11c6721d39581c5d3d2863fce5fad785
|
[
"BSD-2-Clause"
] | 44
|
2016-08-08T14:32:43.000Z
|
2021-11-28T23:33:09.000Z
|
fidget/__init__.py
|
angr/fidget
|
0f255bbd11c6721d39581c5d3d2863fce5fad785
|
[
"BSD-2-Clause"
] | null | null | null |
fidget/__init__.py
|
angr/fidget
|
0f255bbd11c6721d39581c5d3d2863fce5fad785
|
[
"BSD-2-Clause"
] | 8
|
2016-09-13T22:39:49.000Z
|
2020-05-24T18:51:57.000Z
|
from .patching import Fidget
from .binary_data import BinaryData
from .new_analysis import OffsetAnalysis
from .techniques import FidgetTechnique, FidgetDefaultTechnique
from .memory import register_fidget_preset
def patch_file(infile, outfile, options):
fidgetress = Fidget(infile, **options.pop('Fidget', {}))
fidgetress.patch(**options)
fidgetress.apply_patches(outfile)
register_fidget_preset()
| 31.769231
| 63
| 0.801453
|
6a945c431fc016feef669e5b8c4f0cdfbf94feeb
| 603
|
py
|
Python
|
books/masteringPython/cp07/concepts.py
|
Bingwen-Hu/hackaway
|
69727d76fd652390d9660e9ea4354ba5cc76dd5c
|
[
"BSD-2-Clause"
] | null | null | null |
books/masteringPython/cp07/concepts.py
|
Bingwen-Hu/hackaway
|
69727d76fd652390d9660e9ea4354ba5cc76dd5c
|
[
"BSD-2-Clause"
] | null | null | null |
books/masteringPython/cp07/concepts.py
|
Bingwen-Hu/hackaway
|
69727d76fd652390d9660e9ea4354ba5cc76dd5c
|
[
"BSD-2-Clause"
] | null | null | null |
# chapter 7 asyncio
# concepts of asyncio
# main concepts: coroutines and event loops
# helper class: Streams, Futures, Processes
# create_task and ensure_future
import asyncio
async def sleeper(delay):
await asyncio.sleep(delay)
print('Finished sleeper with delay: %d' % delay)
# create an event loop
loop = asyncio.get_event_loop()
# create the task
result = loop.call_soon(loop.create_task, sleeper(1))
# make sure the loop stops after 2 seconds
result = loop.call_later(2, loop.stop)
# start the loop and make it run forever, or at least until the loop.stop gets
loop.run_forever()
| 23.192308
| 78
| 0.749585
|
ef42a11d4ca425c87c562a2184a292f9bacac119
| 5,013
|
py
|
Python
|
scripts/offsets_cmd.py
|
LoneWanderer-GH/nlohmann-json-gdb
|
a2bf71e8e0825e71262c6555be6657554835ebb5
|
[
"MIT"
] | 6
|
2020-05-23T11:38:42.000Z
|
2021-11-11T06:55:16.000Z
|
scripts/offsets_cmd.py
|
LoneWanderer-GH/nlohmann-json-gdb
|
a2bf71e8e0825e71262c6555be6657554835ebb5
|
[
"MIT"
] | 3
|
2020-04-23T21:54:14.000Z
|
2020-06-23T07:35:53.000Z
|
scripts/offsets_cmd.py
|
LoneWanderer-GH/nlohmann-json-gdb
|
a2bf71e8e0825e71262c6555be6657554835ebb5
|
[
"MIT"
] | null | null | null |
#
# Licensed under the MIT License <http://opensource.org/licenses/MIT>.
# SPDX-License-Identifier: MIT
# Copyright (c) 2020 LoneWanderer-GH https://github.com/LoneWanderer-GH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import gdb
# heavily inspired from https://stackoverflow.com/questions/9788679/how-to-get-the-relative-address-of-a-field-in-a-structure-dump-c
# but with some significant additions
gdb_type_code_dict = {
gdb.TYPE_CODE_PTR:"gdb.TYPE_CODE_PTR = The type is a pointer.",
gdb.TYPE_CODE_ARRAY:"gdb.TYPE_CODE_ARRAY = The type is an array.",
gdb.TYPE_CODE_STRUCT:"gdb.TYPE_CODE_STRUCT = The type is a structure.",
gdb.TYPE_CODE_UNION:"gdb.TYPE_CODE_UNION = The type is a union.",
gdb.TYPE_CODE_ENUM:"gdb.TYPE_CODE_ENUM = The type is an enum.",
gdb.TYPE_CODE_FLAGS:"gdb.TYPE_CODE_FLAGS = A bit flags type, used for things such as status registers.",
gdb.TYPE_CODE_FUNC:"gdb.TYPE_CODE_FUNC = The type is a function.",
gdb.TYPE_CODE_INT:"gdb.TYPE_CODE_INT = The type is an integer type.",
gdb.TYPE_CODE_FLT:"gdb.TYPE_CODE_FLT = A floating point type.",
gdb.TYPE_CODE_VOID:"gdb.TYPE_CODE_VOID = The special type void.",
gdb.TYPE_CODE_SET:"gdb.TYPE_CODE_SET = A Pascal set type.",
gdb.TYPE_CODE_RANGE:"gdb.TYPE_CODE_RANGE = A range type, that is, an integer type with bounds.",
gdb.TYPE_CODE_STRING:"gdb.TYPE_CODE_STRING = A string type. Note that this is only used for certain languages with language-defined string types; C strings are not represented this way.",
gdb.TYPE_CODE_BITSTRING:"gdb.TYPE_CODE_BITSTRING = A string of bits. It is deprecated.",
gdb.TYPE_CODE_ERROR:"gdb.TYPE_CODE_ERROR = An unknown or erroneous type.",
gdb.TYPE_CODE_METHOD:"gdb.TYPE_CODE_METHOD = A method type, as found in C++.",
gdb.TYPE_CODE_METHODPTR:"gdb.TYPE_CODE_METHODPTR = A pointer-to-member-function.",
gdb.TYPE_CODE_MEMBERPTR:"gdb.TYPE_CODE_MEMBERPTR = A pointer-to-member.",
gdb.TYPE_CODE_REF:"gdb.TYPE_CODE_REF = A reference type.",
gdb.TYPE_CODE_RVALUE_REF:"gdb.TYPE_CODE_RVALUE_REF = A C++11 rvalue reference type.",
gdb.TYPE_CODE_CHAR:"gdb.TYPE_CODE_CHAR = A character type.",
gdb.TYPE_CODE_BOOL:"gdb.TYPE_CODE_BOOL = A boolean type.",
gdb.TYPE_CODE_COMPLEX:"gdb.TYPE_CODE_COMPLEX = A complex float type.",
gdb.TYPE_CODE_TYPEDEF:"gdb.TYPE_CODE_TYPEDEF = A typedef to some other type.",
gdb.TYPE_CODE_NAMESPACE:"gdb.TYPE_CODE_NAMESPACE = A C++ namespace.",
gdb.TYPE_CODE_DECFLOAT:"gdb.TYPE_CODE_DECFLOAT = A decimal floating point type.",
gdb.TYPE_CODE_INTERNAL_FUNCTION:"gdb.TYPE_CODE_INTERNAL_FUNCTION = A function internal to GDB. This is the type used to represent convenience functions."
}
class Offsets(gdb.Command):
def __init__(self):
super (Offsets, self).__init__ ('offsets-of', gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if len(argv) != 1:
raise gdb.GdbError('offsets-of takes exactly 1 argument.')
t = argv[0]
stype = gdb.execute("whatis {}".format(t), to_string=True)
# print("{} is {}".format(t, stype))
stype = stype.split("=")[-1].strip()
gdb_type = gdb.lookup_type(stype)
if gdb_type.code in [gdb.TYPE_CODE_PTR, gdb.TYPE_CODE_REF, gdb.TYPE_CODE_RVALUE_REF]:
print("Type is a pointer, get referenced value type")
gdb_type = gdb_type.referenced_value().type
if not gdb_type.code in [gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION]:
print("{} is not a structure with fields ...")
print("{}".format(gdb_type_code_dict[gdb_type.code]))
return
lines = "\n".join(["\t{:<20.20} => {:5d}".format(field.name, field.bitpos//8) for field in gdb_type.fields()])
# print(lines)
print ("{0} (of type {1})\n{2}".format(t, gdb_type, lines))
Offsets()
| 55.087912
| 191
| 0.708159
|
e19d6a17b9d46ea2fc4125f6e5809a1184fcd06a
| 1,046
|
py
|
Python
|
10/JackConst.py
|
aadityarautela/nand2tetris
|
64768087ae5f6903beeb17a01492d68d7b2354f6
|
[
"MIT"
] | null | null | null |
10/JackConst.py
|
aadityarautela/nand2tetris
|
64768087ae5f6903beeb17a01492d68d7b2354f6
|
[
"MIT"
] | null | null | null |
10/JackConst.py
|
aadityarautela/nand2tetris
|
64768087ae5f6903beeb17a01492d68d7b2354f6
|
[
"MIT"
] | null | null | null |
T_KEYWORD = 0
T_SYM = 1
T_NUM = 2
T_STR = 3
T_ID = 4
T_ERROR = 5
KW_CLASS = 'class'
KW_METHOD = 'method'
KW_FUNCTION = 'function'
KW_CONSTRUCTOR = 'constructor'
KW_INT = 'int'
KW_BOOLEAN = 'boolean'
KW_CHAR = 'char'
KW_VOID = 'void'
KW_VAR = 'var'
KW_STATIC = 'static'
KW_FIELD = 'field'
KW_LET = 'let'
KW_DO = 'do'
KW_IF = 'if'
KW_ELSE = 'else'
KW_WHILE = 'while'
KW_RETURN = 'return'
KW_TRUE = 'true'
KW_FALSE = 'false'
KW_NULL = 'null'
KW_THIS = 'this'
KW_NONE = ''
keywords = [KW_CLASS, KW_METHOD, KW_FUNCTION, KW_CONSTRUCTOR, KW_INT, KW_BOOLEAN,
KW_CHAR, KW_VOID, KW_VAR, KW_STATIC, KW_FIELD, KW_LET, KW_DO, KW_IF,
KW_ELSE, KW_WHILE, KW_RETURN, KW_TRUE, KW_FALSE, KW_NULL, KW_THIS]
tokens = ['keyword', 'symbol', 'integerConstant', 'stringConstant', 'identifier']
symbols = '{}()[].,;+-*/&|<>=~'
| 25.512195
| 81
| 0.525813
|
feeb63df5e39fdf1bf4e0c5da99d1e55cec43ac9
| 39,010
|
py
|
Python
|
qa/rpc-tests/test_framework/mininode.py
|
crowdcoinChain/Crowdcoin-Exp
|
e5e1cbaee01ea0c8757f28e63ae1615b363c96e9
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/test_framework/mininode.py
|
crowdcoinChain/Crowdcoin-Exp
|
e5e1cbaee01ea0c8757f28e63ae1615b363c96e9
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/test_framework/mininode.py
|
crowdcoinChain/Crowdcoin-Exp
|
e5e1cbaee01ea0c8757f28e63ae1615b363c96e9
|
[
"MIT"
] | null | null | null |
# mininode.py - Crowdcoin P2P network half-a-node
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a dash node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# dash/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import time
import sys
import random
from binascii import hexlify, unhexlify
from io import BytesIO
from codecs import encode
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
import dash_hash
BIP0031_VERSION = 60000
MY_VERSION = 70206 # current MIN_PEER_PROTO_VERSION
MY_SUBVERSION = b"/python-mininode-tester:0.0.2/"
MAX_INV_SZ = 50000
MAX_BLOCK_SIZE = 1000000
COIN = 100000000L # 1 btc in satoshis
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def dashhash(s):
return dash_hash.getPoWHash(s)
def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return f.read(nit)
def ser_string(s):
if len(s) < 253:
return struct.pack("B", len(s)) + s
elif len(s) < 0x10000:
return struct.pack("<BH", 253, len(s)) + s
elif len(s) < 0x100000000L:
return struct.pack("<BI", 254, len(s)) + s
return struct.pack("<BQ", 255, len(s)) + s
def deser_uint256(f):
r = 0L
for i in xrange(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in xrange(8):
rs += struct.pack("<I", u & 0xFFFFFFFFL)
u >>= 32
return rs
def uint256_from_str(s):
r = 0L
t = struct.unpack("<IIIIIIII", s[:32])
for i in xrange(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFFL) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(unhexlify(hex_string.encode('ascii'))))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return hexlify(obj.serialize()).decode('ascii')
# Objects that map to dashd objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block"}
def __init__(self, t=0, h=0L):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), hexlify(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
hexlify(self.scriptPubKey))
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
def rehash(self):
self.sha256 = None
self.calc_sha256()
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(dashhash(r))
self.hash = encode(dashhash(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = b""
r += super(CBlock, self).serialize()
r += ser_vector(self.vtx)
return r
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
while len(hashes) > 1:
newhashes = []
for i in xrange(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0L):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in dashd indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0L
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
def deliver(self, conn, message):
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
getattr(self, 'on_' + message.command)(conn, message)
except:
print "ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0])
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
class SingleNodeConnCB(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
}
MAGIC_BYTES = {
"mainnet": b"\xbf\x0c\x6b\xbd", # mainnet
"testnet3": b"\xce\xe2\xca\xff", # testnet3
"regtest": b"\xfc\xc1\xb7\xdc" # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=1):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print 'MiniNode: Connecting to Crowdcoin Node IP # ' + dstaddr + ':' \
+ str(dstport)
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
length = len(self.sendbuf)
return (length > 0)
def handle_write(self):
with mininode_lock:
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
except Exception as e:
print 'got_data:', repr(e)
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
return
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| 29.155456
| 184
| 0.569982
|
4974ed92e1aabe97565f0857b010bd9bbf25168e
| 145,818
|
py
|
Python
|
src/transformers/generation_utils.py
|
alvinwatner/transformers_constraint
|
ba8281e34244c3b5bbc33c70b762b147a50ee020
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/generation_utils.py
|
alvinwatner/transformers_constraint
|
ba8281e34244c3b5bbc33c70b762b147a50ee020
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/generation_utils.py
|
alvinwatner/transformers_constraint
|
ba8281e34244c3b5bbc33c70b762b147a50ee020
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from dataclasses import dataclass
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
from torch import nn
from .file_utils import ModelOutput
from .generation_beam_search import BeamScorer, BeamSearchScorer
from .generation_logits_process import (
EncoderNoRepeatNGramLogitsProcessor,
ForcedBOSTokenLogitsProcessor,
ForcedEOSTokenLogitsProcessor,
HammingDiversityLogitsProcessor,
InfNanRemoveLogitsProcessor,
LogitsProcessorList,
MinLengthLogitsProcessor,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
PrefixConstrainedLogitsProcessor,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
Timesteps,
)
from .generation_stopping_criteria import (
MaxLengthCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
from .utils import logging
logger = logging.get_logger(__name__)
@dataclass
class GreedySearchDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using greedy search.
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. :obj:`(max_length-input_ids.shape[-1],)`-shaped tuple of :obj:`torch.FloatTensor`
with each tensor of shape :obj:`(batch_size, config.vocab_size)`).
attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`.
hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class GreedySearchEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention
weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. :obj:`(max_length-1,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor
of shape :obj:`(batch_size, config.vocab_size)`).
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer of the decoder) of shape :obj:`(batch_size,
num_heads, sequence_length, sequence_length)`.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
decoder_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`.
cross_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class SampleDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using sampling.
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. :obj:`(max_length-input_ids.shape[-1],)`-shaped tuple of :obj:`torch.FloatTensor`
with each tensor of shape :obj:`(batch_size*num_return_sequences, config.vocab_size)`).
attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(num_return_sequences*batch_size, num_heads, generated_length,
sequence_length)`.
hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(num_return_sequences*batch_size, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class SampleEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of
the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states
attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. :obj:`(max_length-1,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor
of shape :obj:`(batch_size*num_return_sequences, config.vocab_size)`).
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer of the decoder) of shape
:obj:`(batch_size*num_return_sequences, num_heads, sequence_length, sequence_length)`.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size*num_return_sequences, sequence_length, hidden_size)`.
decoder_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_return_sequences, num_heads, generated_length,
sequence_length)`.
cross_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_return_sequences, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class BeamSearchDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using beam search.
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
sequences_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_return_sequences)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Final beam scores of the generated ``sequences``.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam
. :obj:`(max_length-input_ids.shape[-1],)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of
shape :obj:`(batch_size*num_beams*num_return_sequences, config.vocab_size)`).
attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length,
sequence_length)`.
hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, generated_length,
hidden_size)`.
"""
sequences: torch.LongTensor = None
sequences_scores: Optional[torch.FloatTensor] = None
scores: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class BeamSearchEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights
of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states
attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
sequences_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_return_sequences)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Final beam scores of the generated ``sequences``.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam
. :obj:`(max_length-1,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of shape
:obj:`(batch_size*num_beams, config.vocab_size)`).
attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer of the decoder) of shape :obj:`(batch_size,
num_heads, sequence_length, sequence_length)`.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`.
decoder_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, num_heads,
generated_length, sequence_length)`.
cross_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, generated_length,
hidden_size)`.
"""
sequences: torch.LongTensor = None
sequences_scores: Optional[torch.FloatTensor] = None
scores: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class BeamSampleDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using beam sample.
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
sequences_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_return_sequence)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Final beam scores of the generated ``sequences``.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam
. :obj:`(max_length-input_ids.shape[-1],)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of
shape :obj:`(batch_size*num_beams*num_return_sequences, config.vocab_size)`).
attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length,
sequence_length)`.
hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
sequences_scores: Optional[torch.FloatTensor] = None
scores: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class BeamSampleEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention
weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_beams, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
sequences_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_return_sequence)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Final beam scores of the generated ``sequences``.
scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam
. :obj:`(max_length-1,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of shape
:obj:`(batch_size*num_beams, config.vocab_size)`).
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer of the decoder) of shape :obj:`(batch_size,
num_heads, sequence_length, sequence_length)`.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size*num_beams, sequence_length, hidden_size)`.
decoder_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length,
sequence_length)`.
cross_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
sequences_scores: Optional[torch.FloatTensor] = None
scores: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
GreedySearchOutput = Union[GreedySearchEncoderDecoderOutput, GreedySearchDecoderOnlyOutput]
SampleOutput = Union[SampleEncoderDecoderOutput, SampleDecoderOnlyOutput]
BeamSearchOutput = Union[BeamSearchEncoderDecoderOutput, BeamSearchDecoderOnlyOutput]
BeamSampleOutput = Union[BeamSampleEncoderDecoderOutput, BeamSampleDecoderOnlyOutput]
class GenerationMixin:
"""
A class containing all of the functions supporting generation, to be used as a mixin in
:class:`~transformers.PreTrainedModel`.
"""
def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> Dict[str, Any]:
"""
Implement in subclasses of :class:`~transformers.PreTrainedModel` for custom behavior to prepare inputs in the
generate method.
"""
return {"input_ids": input_ids}
def adjust_logits_during_generation(self, logits: torch.FloatTensor, **kwargs) -> torch.FloatTensor:
"""
Implement in subclasses of :class:`~transformers.PreTrainedModel` for custom behavior to adjust the logits in
the generate method.
"""
return logits
def _prepare_input_ids_for_generation(
self, bos_token_id: Optional[int], encoder_outputs: Optional[ModelOutput]
) -> torch.LongTensor:
if self.config.is_encoder_decoder and encoder_outputs is not None:
# make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding
shape = encoder_outputs.last_hidden_state.size()[:-1]
return torch.ones(shape, dtype=torch.long, device=self.device) * -100
if bos_token_id is None:
raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.")
return torch.ones((1, 1), dtype=torch.long, device=self.device) * bos_token_id
def _prepare_attention_mask_for_generation(
self, input_ids: torch.Tensor, pad_token_id: int, eos_token_id: int
) -> torch.LongTensor:
is_pad_token_in_inputs_ids = (pad_token_id is not None) and (pad_token_id in input_ids)
is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (
(eos_token_id is not None) and (pad_token_id != eos_token_id)
)
if is_pad_token_in_inputs_ids and is_pad_token_not_equal_to_eos_token_id:
return input_ids.ne(pad_token_id).long()
return input_ids.new_ones(input_ids.shape, dtype=torch.long)
def _prepare_encoder_decoder_kwargs_for_generation(
self, input_ids: torch.LongTensor, model_kwargs
) -> Dict[str, Any]:
if "encoder_outputs" not in model_kwargs:
# retrieve encoder hidden states
encoder = self.get_encoder()
encoder_kwargs = {
argument: value
for argument, value in model_kwargs.items()
if not (argument.startswith("decoder_") or argument.startswith("cross_attn"))
}
model_kwargs["encoder_outputs"]: ModelOutput = encoder(input_ids, return_dict=True, **encoder_kwargs)
return model_kwargs
def _prepare_decoder_input_ids_for_generation(
self, input_ids: torch.LongTensor, decoder_start_token_id: int = None, bos_token_id: int = None
) -> torch.LongTensor:
decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
decoder_input_ids = (
torch.ones((input_ids.shape[0], 1), dtype=torch.long, device=input_ids.device) * decoder_start_token_id
)
return decoder_input_ids
def _get_pad_token_id(self, pad_token_id: int = None, eos_token_id: int = None) -> int:
if pad_token_id is None and eos_token_id is not None:
logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
pad_token_id = eos_token_id
return pad_token_id
def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int:
decoder_start_token_id = (
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
)
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
if decoder_start_token_id is not None:
return decoder_start_token_id
elif (
hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "decoder_start_token_id")
and self.config.decoder.decoder_start_token_id is not None
):
return self.config.decoder.decoder_start_token_id
elif bos_token_id is not None:
return bos_token_id
elif (
hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "bos_token_id")
and self.config.decoder.bos_token_id is not None
):
return self.config.decoder.bos_token_id
raise ValueError(
"`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
)
@staticmethod
def _expand_inputs_for_generation(
input_ids: torch.LongTensor,
expand_size: int = 1,
is_encoder_decoder: bool = False,
attention_mask: torch.LongTensor = None,
encoder_outputs: ModelOutput = None,
**model_kwargs,
) -> Tuple[torch.LongTensor, Dict[str, Any]]:
expanded_return_idx = (
torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device)
)
input_ids = input_ids.index_select(0, expanded_return_idx)
if "token_type_ids" in model_kwargs:
token_type_ids = model_kwargs["token_type_ids"]
model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx)
if attention_mask is not None:
model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx)
if is_encoder_decoder:
if encoder_outputs is None:
raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.index_select(
0, expanded_return_idx.to(encoder_outputs.last_hidden_state.device)
)
model_kwargs["encoder_outputs"] = encoder_outputs
return input_ids, model_kwargs
@staticmethod
def _update_model_kwargs_for_generation(
outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False
) -> Dict[str, Any]:
# update past
if "past_key_values" in outputs:
model_kwargs["past"] = outputs.past_key_values
elif "mems" in outputs:
model_kwargs["past"] = outputs.mems
elif "past_buckets_states" in outputs:
model_kwargs["past"] = outputs.past_buckets_states
else:
model_kwargs["past"] = None
# update token_type_ids with last value
if "token_type_ids" in model_kwargs:
token_type_ids = model_kwargs["token_type_ids"]
model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1)
# update attention mask
if not is_encoder_decoder:
if "attention_mask" in model_kwargs:
attention_mask = model_kwargs["attention_mask"]
model_kwargs["attention_mask"] = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
return model_kwargs
def _reorder_cache(self, past, beam_idx):
raise NotImplementedError(
f"Make sure that a `_reorder_cache` function is correctly implemented in {self.__class__.__module__} to enable beam search for {self.__class__}"
)
def _get_logits_warper(
self, top_k: int = None, top_p: float = None, temperature: float = None, num_beams: int = None
) -> LogitsProcessorList:
"""
This class returns a :obj:`~transformers.LogitsProcessorList` list object that contains all relevant
:obj:`~transformers.LogitsWarper` instances used for multinomial sampling.
"""
# init warp parameters
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
temperature = temperature if temperature is not None else self.config.temperature
# instantiate warpers list
warpers = LogitsProcessorList()
# the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files
# all samplers can be found in `generation_utils_samplers.py`
if temperature is not None and temperature != 1.0:
warpers.append(TemperatureLogitsWarper(temperature))
if top_k is not None and top_k != 0:
warpers.append(TopKLogitsWarper(top_k=top_k, min_tokens_to_keep=(2 if num_beams > 1 else 1)))
if top_p is not None and top_p < 1.0:
warpers.append(TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=(2 if num_beams > 1 else 1)))
return warpers
def _get_logits_processor(
self,
repetition_penalty: float,
no_repeat_ngram_size: int,
encoder_no_repeat_ngram_size: int,
encoder_input_ids: torch.LongTensor,
bad_words_ids: List[List[int]],
min_length: int,
max_length: int,
eos_token_id: int,
forced_bos_token_id: int,
forced_eos_token_id: int,
prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]],
num_beams: int,
num_beam_groups: int,
diversity_penalty: float,
remove_invalid_values: bool,
) -> LogitsProcessorList:
"""
This class returns a :obj:`~transformers.LogitsProcessorList` list object that contains all relevant
:obj:`~transformers.LogitsProcessor` instances used to modify the scores of the language model head.
"""
processors = LogitsProcessorList()
# init warp parameters
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
encoder_no_repeat_ngram_size = (
encoder_no_repeat_ngram_size
if encoder_no_repeat_ngram_size is not None
else self.config.encoder_no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
min_length = min_length if min_length is not None else self.config.min_length
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
diversity_penalty = diversity_penalty if diversity_penalty is not None else self.config.diversity_penalty
forced_bos_token_id = (
forced_bos_token_id if forced_bos_token_id is not None else self.config.forced_bos_token_id
)
forced_eos_token_id = (
forced_eos_token_id if forced_eos_token_id is not None else self.config.forced_eos_token_id
)
remove_invalid_values = (
remove_invalid_values if remove_invalid_values is not None else self.config.remove_invalid_values
)
# instantiate processors list
# the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files
# all samplers can be found in `generation_utils_samplers.py`
if diversity_penalty is not None and diversity_penalty > 0.0:
processors.append(
HammingDiversityLogitsProcessor(
diversity_penalty=diversity_penalty, num_beams=num_beams, num_beam_groups=num_beam_groups
)
)
if repetition_penalty is not None and repetition_penalty != 1.0:
processors.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty))
if no_repeat_ngram_size is not None and no_repeat_ngram_size > 0:
processors.append(NoRepeatNGramLogitsProcessor(no_repeat_ngram_size))
if encoder_no_repeat_ngram_size is not None and encoder_no_repeat_ngram_size > 0:
if self.config.is_encoder_decoder:
processors.append(EncoderNoRepeatNGramLogitsProcessor(encoder_no_repeat_ngram_size, encoder_input_ids))
else:
raise ValueError(
"It's impossible to use `encoder_no_repeat_ngram_size` with decoder-only architecture"
)
if bad_words_ids is not None:
processors.append(NoBadWordsLogitsProcessor(bad_words_ids, eos_token_id))
if min_length is not None and eos_token_id is not None and min_length > -1:
processors.append(MinLengthLogitsProcessor(min_length, eos_token_id))
if prefix_allowed_tokens_fn is not None:
processors.append(PrefixConstrainedLogitsProcessor(prefix_allowed_tokens_fn, num_beams // num_beam_groups))
if forced_bos_token_id is not None:
processors.append(ForcedBOSTokenLogitsProcessor(forced_bos_token_id))
if forced_eos_token_id is not None:
processors.append(ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id))
if remove_invalid_values is True:
processors.append(InfNanRemoveLogitsProcessor())
return processors
def _get_stopping_criteria(self, max_length: Optional[int], max_time: Optional[float]) -> StoppingCriteriaList:
stopping_criteria = StoppingCriteriaList()
if max_length is not None:
stopping_criteria.append(MaxLengthCriteria(max_length=max_length))
if max_time is not None:
stopping_criteria.append(MaxTimeCriteria(max_time=max_time))
return stopping_criteria
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.LongTensor] = None,
max_length: Optional[int] = None,
min_length: Optional[int] = None,
do_sample: Optional[bool] = None,
early_stopping: Optional[bool] = None,
num_beams: Optional[int] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
repetition_penalty: Optional[float] = None,
bad_words_ids: Optional[Iterable[int]] = None,
banned_words: Optional[Dict] = None,
bos_token_id: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
length_penalty: Optional[float] = None,
no_repeat_ngram_size: Optional[int] = None,
encoder_no_repeat_ngram_size: Optional[int] = None,
num_return_sequences: Optional[int] = None,
max_time: Optional[float] = None,
max_new_tokens: Optional[int] = None,
decoder_start_token_id: Optional[int] = None,
use_cache: Optional[bool] = None,
num_beam_groups: Optional[int] = None,
diversity_penalty: Optional[float] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
forced_bos_token_id: Optional[int] = None,
forced_eos_token_id: Optional[int] = None,
remove_invalid_values: Optional[bool] = None,
synced_gpus: Optional[bool] = None,
**model_kwargs,
) -> Union[GreedySearchOutput, SampleOutput, BeamSearchOutput, BeamSampleOutput, torch.LongTensor]:
r"""
Generates sequences for models with a language modeling head. The method currently supports greedy decoding,
multinomial sampling, beam-search decoding, and beam-search multinomial sampling.
Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the
attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values
indicated are the default values of those config.
Most of these parameters are explained in more detail in `this blog post
<https://huggingface.co/blog/how-to-generate>`__.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`None` the method initializes it with
:obj:`bos_token_id` and a batch size of 1.
max_length (:obj:`int`, `optional`, defaults to :obj:`model.config.max_length`):
The maximum length of the sequence to be generated.
max_new_tokens (:obj:`int`, `optional`, defaults to None):
The maximum numbers of tokens to generate, ignore the current number of tokens. Use either
:obj:`max_new_tokens` or :obj:`max_length` but not both, they serve the same purpose.
min_length (:obj:`int`, `optional`, defaults to 10):
The minimum length of the sequence to be generated.
do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use sampling ; use greedy decoding otherwise.
early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not.
num_beams (:obj:`int`, `optional`, defaults to 1):
Number of beams for beam search. 1 means no beam search.
temperature (:obj:`float`, `optional`, defaults to 1.0):
The value used to module the next token probabilities.
top_k (:obj:`int`, `optional`, defaults to 50):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (:obj:`float`, `optional`, defaults to 1.0):
If set to float < 1, only the most probable tokens with probabilities that add up to :obj:`top_p` or
higher are kept for generation.
repetition_penalty (:obj:`float`, `optional`, defaults to 1.0):
The parameter for repetition penalty. 1.0 means no penalty. See `this paper
<https://arxiv.org/pdf/1909.05858.pdf>`__ for more details.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
bos_token_id (:obj:`int`, `optional`):
The id of the `beginning-of-sequence` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
length_penalty (:obj:`float`, `optional`, defaults to 1.0):
Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the
model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer
sequences.
no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0):
If set to int > 0, all ngrams of that size can only occur once.
encoder_no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0):
If set to int > 0, all ngrams of that size that occur in the ``encoder_input_ids`` cannot occur in the
``decoder_input_ids``.
bad_words_ids(:obj:`List[List[int]]`, `optional`):
List of token ids that are not allowed to be generated. In order to get the tokens of the words that
should not appear in the generated text, use :obj:`tokenizer(bad_word,
add_prefix_space=True).input_ids`.
num_return_sequences(:obj:`int`, `optional`, defaults to 1):
The number of independently computed returned sequences for each element in the batch.
max_time(:obj:`float`, `optional`, defaults to None):
The maximum amount of time you allow the computation to run for in seconds. generation will still
finish the current pass after allocated time has been passed.
attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for
tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same
shape as :obj:`input_ids` that masks the pad token. `What are attention masks?
<../glossary.html#attention-mask>`__
decoder_start_token_id (:obj:`int`, `optional`):
If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token.
use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should use the past last key/values attentions (if applicable to the model) to
speed up decoding.
num_beam_groups (:obj:`int`, `optional`, defaults to 1):
Number of groups to divide :obj:`num_beams` into in order to ensure diversity among different groups of
beams. `this paper <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details.
diversity_penalty (:obj:`float`, `optional`, defaults to 0.0):
This value is subtracted from a beam's score if it generates a token same as any beam from other group
at a particular time. Note that :obj:`diversity_penalty` is only effective if ``group beam search`` is
enabled.
prefix_allowed_tokens_fn: (:obj:`Callable[[int, torch.Tensor], List[int]]`, `optional`):
If provided, this function constraints the beam search to allowed tokens only at each step. If not
provided no constraint is applied. This function takes 2 arguments: the batch ID :obj:`batch_id` and
:obj:`input_ids`. It has to return a list with the allowed tokens for the next generation step
conditioned on the batch ID :obj:`batch_id` and the previously generated tokens :obj:`inputs_ids`. This
argument is useful for constrained generation conditioned on the prefix, as described in
`Autoregressive Entity Retrieval <https://arxiv.org/abs/2010.00904>`__.
output_attentions (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more details.
output_hidden_states (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more details.
output_scores (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details.
return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
forced_bos_token_id (:obj:`int`, `optional`):
The id of the token to force as the first generated token after the :obj:`decoder_start_token_id`.
Useful for multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token
needs to be the target language token.
forced_eos_token_id (:obj:`int`, `optional`):
The id of the token to force as the last generated token when :obj:`max_length` is reached.
remove_invalid_values (:obj:`bool`, `optional`):
Whether to remove possible `nan` and `inf` outputs of the model to prevent the generation method to
crash. Note that using ``remove_invalid_values`` can slow down generation.
synced_gpus (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
model_kwargs:
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. If the
model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific
kwargs should be prefixed with `decoder_`.
Return:
:class:`~transformers.file_utils.ModelOutput` or :obj:`torch.LongTensor`: A
:class:`~transformers.file_utils.ModelOutput` (if ``return_dict_in_generate=True`` or when
``config.return_dict_in_generate=True``) or a :obj:`torch.FloatTensor`.
If the model is `not` an encoder-decoder model (``model.config.is_encoder_decoder=False``), the
possible :class:`~transformers.file_utils.ModelOutput` types are:
- :class:`~transformers.generation_utils.GreedySearchDecoderOnlyOutput`,
- :class:`~transformers.generation_utils.SampleDecoderOnlyOutput`,
- :class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput`,
- :class:`~transformers.generation_utils.BeamSampleDecoderOnlyOutput`
If the model is an encoder-decoder model (``model.config.is_encoder_decoder=True``), the possible
:class:`~transformers.file_utils.ModelOutput` types are:
- :class:`~transformers.generation_utils.GreedySearchEncoderDecoderOutput`,
- :class:`~transformers.generation_utils.SampleEncoderDecoderOutput`,
- :class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput`,
- :class:`~transformers.generation_utils.BeamSampleEncoderDecoderOutput`
Examples::
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSeq2SeqLM
>>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
>>> model = AutoModelForCausalLM.from_pretrained("distilgpt2")
>>> # do greedy decoding without providing a prompt
>>> outputs = model.generate(max_length=40)
>>> print("Generated:", tokenizer.decode(outputs[0], skip_special_tokens=True))
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> document = (
... "at least two people were killed in a suspected bomb attack on a passenger bus "
... "in the strife-torn southern philippines on monday , the military said."
... )
>>> # encode input context
>>> input_ids = tokenizer(document, return_tensors="pt").input_ids
>>> # generate 3 independent sequences using beam search decoding (5 beams)
>>> # with T5 encoder-decoder model conditioned on short news article.
>>> outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3)
>>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
>>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
>>> model = AutoModelForCausalLM.from_pretrained("distilgpt2")
>>> input_context = "The dog"
>>> # encode input context
>>> input_ids = tokenizer(input_context, return_tensors="pt").input_ids
>>> # generate 3 candidates using sampling
>>> outputs = model.generate(input_ids=input_ids, max_length=20, num_return_sequences=3, do_sample=True)
>>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
>>> tokenizer = AutoTokenizer.from_pretrained("ctrl")
>>> model = AutoModelForCausalLM.from_pretrained("ctrl")
>>> # "Legal" is one of the control codes for ctrl
>>> input_context = "Legal My neighbor is"
>>> # encode input context
>>> input_ids = tokenizer(input_context, return_tensors="pt").input_ids
>>> outputs = model.generate(input_ids=input_ids, max_length=20, repetition_penalty=1.2)
>>> print("Generated:", tokenizer.decode(outputs[0], skip_special_tokens=True))
>>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
>>> model = AutoModelForCausalLM.from_pretrained("gpt2")
>>> input_context = "My cute dog"
>>> # get tokens of words that should not be generated
>>> bad_words_ids = [tokenizer(bad_word, add_prefix_space=True).input_ids for bad_word in ["idiot", "stupid", "shut up"]]
>>> # encode input context
>>> input_ids = tokenizer(input_context, return_tensors="pt").input_ids
>>> # generate sequences without allowing bad_words to be generated
>>> outputs = model.generate(input_ids=input_ids, max_length=20, do_sample=True, bad_words_ids=bad_words_ids)
>>> print("Generated:", tokenizer.decode(outputs[0], skip_special_tokens=True))
"""
num_beams = num_beams if num_beams is not None else self.config.num_beams
num_beam_groups = num_beam_groups if num_beam_groups is not None else self.config.num_beam_groups
do_sample = do_sample if do_sample is not None else self.config.do_sample
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
output_scores = output_scores if output_scores is not None else self.config.output_scores
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate
)
model_kwargs["output_attentions"] = output_attentions
model_kwargs["output_hidden_states"] = output_hidden_states
if input_ids is None and "inputs_embeds" not in model_kwargs:
# init `input_ids` with bos_token_id
input_ids = self._prepare_input_ids_for_generation(bos_token_id, model_kwargs.get("encoder_outputs"))
if model_kwargs.get("attention_mask", None) is None:
# init `attention_mask` depending on `pad_token_id`
model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
input_ids, pad_token_id, eos_token_id
)
# special case if pad_token_id is not defined
if pad_token_id is None and eos_token_id is not None:
logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
pad_token_id = eos_token_id
# Storing encoder_input_ids for logits_processor that could use them
encoder_input_ids = input_ids if self.config.is_encoder_decoder else None
if self.config.is_encoder_decoder:
# add encoder_outputs to model_kwargs
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, model_kwargs)
# set input_ids as decoder_input_ids
if "decoder_input_ids" in model_kwargs:
input_ids = model_kwargs.pop("decoder_input_ids")
else:
input_ids = self._prepare_decoder_input_ids_for_generation(
input_ids, decoder_start_token_id=decoder_start_token_id, bos_token_id=bos_token_id
)
if "encoder_outputs" not in model_kwargs or not isinstance(model_kwargs["encoder_outputs"], ModelOutput):
raise ValueError("Make sure that `model_kwargs` include `encoder_outputs` of type `ModelOutput`.")
# if `max_new_tokens` is passed, but not `max_length` -> set `max_length = max_new_tokens`
if max_length is None and max_new_tokens is not None:
max_length = (
max_new_tokens + input_ids.shape[-1]
if input_ids is not None
else max_length + model_kwargs["inputs_embeds"].shape[1]
)
elif max_length is not None and max_new_tokens is not None:
# Both are set, this is odd, raise a warning
warnings.warn(
"Both `max_length` and `max_new_tokens` have been set "
f"but they serve the same purpose. `max_length` {max_length} "
f"will take priority over `max_new_tokens` {max_new_tokens}.",
UserWarning,
)
# default to config if still None
max_length = max_length if max_length is not None else self.config.max_length
if input_ids.shape[-1] >= max_length:
input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
logger.warning(
f"Input length of {input_ids_string} is {input_ids.shape[-1]}, but ``max_length`` is set to {max_length}. "
"This can lead to unexpected behavior. You should consider increasing ``config.max_length`` or ``max_length``."
)
# determine generation mode
is_greedy_gen_mode = (num_beams == 1) and (num_beam_groups == 1) and do_sample is False
is_sample_gen_mode = (num_beams == 1) and (num_beam_groups == 1) and do_sample is True
is_beam_gen_mode = (num_beams > 1) and (num_beam_groups == 1) and do_sample is False
is_beam_sample_gen_mode = (num_beams > 1) and (num_beam_groups == 1) and do_sample is True
is_group_beam_gen_mode = (num_beams > 1) and (num_beam_groups > 1)
if num_beam_groups > num_beams:
raise ValueError("`num_beam_groups` has to be smaller or equal to `num_beams`")
if is_group_beam_gen_mode and do_sample is True:
raise ValueError(
"Diverse beam search cannot be used in sampling mode. Make sure that `do_sample` is set to `False`."
)
# set model_kwargs
model_kwargs["use_cache"] = use_cache
# get distribution pre_processing samplers
logits_processor = self._get_logits_processor(
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size,
encoder_input_ids=encoder_input_ids,
bad_words_ids=bad_words_ids,
min_length=min_length,
max_length=max_length,
eos_token_id=eos_token_id,
forced_bos_token_id=forced_bos_token_id,
forced_eos_token_id=forced_eos_token_id,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
num_beams=num_beams,
num_beam_groups=num_beam_groups,
diversity_penalty=diversity_penalty,
remove_invalid_values=remove_invalid_values,
)
stopping_criteria = self._get_stopping_criteria(max_length=max_length, max_time=max_time)
if is_greedy_gen_mode:
if num_return_sequences > 1:
raise ValueError(
f"num_return_sequences has to be 1, but is {num_return_sequences} when doing greedy search."
)
# greedy search
return self.greedy_search(
input_ids,
logits_processor=logits_processor,
banned_words = banned_words,
stopping_criteria=stopping_criteria,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
elif is_sample_gen_mode:
# get probability distribution warper
logits_warper = self._get_logits_warper(
top_k=top_k, top_p=top_p, temperature=temperature, num_beams=num_beams
)
# expand input_ids with `num_return_sequences` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids,
expand_size=num_return_sequences,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# sample
return self.sample(
input_ids,
logits_processor=logits_processor,
logits_warper=logits_warper,
stopping_criteria=stopping_criteria,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
elif is_beam_gen_mode:
batch_size = input_ids.shape[0]
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
if num_return_sequences > num_beams:
raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.")
if stopping_criteria.max_length is None:
raise ValueError("`max_length` needs to be a stopping_criteria for now.")
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=self.device,
length_penalty=length_penalty,
do_early_stopping=early_stopping,
num_beam_hyps_to_keep=num_return_sequences,
)
# interleave with `num_beams`
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs
)
return self.beam_search(
input_ids,
beam_scorer,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
elif is_beam_sample_gen_mode:
logits_warper = self._get_logits_warper(
top_k=top_k, top_p=top_p, temperature=temperature, num_beams=num_beams
)
batch_size = input_ids.shape[0] * num_return_sequences
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
if stopping_criteria.max_length is None:
raise ValueError("`max_length` needs to be a stopping_criteria for now.")
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=self.device,
length_penalty=length_penalty,
do_early_stopping=early_stopping,
)
# interleave with `num_beams * num_return_sequences`
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids,
expand_size=num_beams * num_return_sequences,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
return self.beam_sample(
input_ids,
beam_scorer,
logits_processor=logits_processor,
logits_warper=logits_warper,
stopping_criteria=stopping_criteria,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
elif is_group_beam_gen_mode:
batch_size = input_ids.shape[0]
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
if num_return_sequences > num_beams:
raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.")
if num_beams % num_beam_groups != 0:
raise ValueError("`num_beams` should be divisible by `num_beam_groups` for group beam search.")
if stopping_criteria.max_length is None:
raise ValueError("`max_length` needs to be a stopping_criteria for now.")
diverse_beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
max_length=stopping_criteria.max_length,
device=self.device,
length_penalty=length_penalty,
do_early_stopping=early_stopping,
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=num_beam_groups,
)
# interleave with `num_beams`
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs
)
return self.group_beam_search(
input_ids,
diverse_beam_scorer,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
def greedy_search(
self,
input_ids: torch.LongTensor,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
banned_words : Optional[Dict] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = None,
**model_kwargs,
) -> Union[GreedySearchOutput, torch.LongTensor]:
r"""
Generates sequences for models with a language modeling head using greedy decoding.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
logits_processor (:obj:`LogitsProcessorList`, `optional`):
An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from
:class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling
head applied at each generation step.
stopping_criteria (:obj:`StoppingCriteriaList`, `optional`):
An instance of :class:`~transformers.StoppingCriteriaList`. List of instances of class derived from
:class:`~transformers.StoppingCriteria` used to tell if the generation loop should stop.
max_length (:obj:`int`, `optional`, defaults to 20):
**DEPRECATED**. Use :obj:`logits_processor` or :obj:`stopping_criteria` directly to cap the number of
generated tokens. The maximum length of the sequence to be generated.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
output_attentions (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more details.
output_hidden_states (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more details.
output_scores (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details.
return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
synced_gpus (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
model_kwargs:
Additional model specific keyword arguments will be forwarded to the :obj:`forward` function of the
model. If model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`.
Return:
:class:`~transformers.generation_utils.GreedySearchDecoderOnlyOutput`,
:class:`~transformers.generation_utils.GreedySearchEncoderDecoderOutput` or obj:`torch.LongTensor`: A
:obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a
:class:`~transformers.generation_utils.GreedySearchDecoderOnlyOutput` if
``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a
:class:`~transformers.generation_utils.GreedySearchEncoderDecoderOutput` if
``model.config.is_encoder_decoder=True``.
Examples::
>>> from transformers import (
... AutoTokenizer,
... AutoModelForCausalLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... )
>>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
>>> model = AutoModelForCausalLM.from_pretrained("gpt2")
>>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
>>> model.config.pad_token_id = model.config.eos_token_id
>>> input_prompt = "Today is a beautiful day, and"
>>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList([
... MinLengthLogitsProcessor(15, eos_token_id=model.config.eos_token_id),
... ])
>>> outputs = model.greedy_search(input_ids, logits_processor=logits_processor)
>>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
output_scores = output_scores if output_scores is not None else self.config.output_scores
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
# keep track of which sequences are already finished
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
cur_len = input_ids.shape[-1]
this_peer_finished = False # used by synced_gpus only
revert = False # used by banned_words decoding
detected_banned_words_length_greater_than_1 = None # used by banned_words decoding
banned_words_ids = banned_words['ids'] # used by banned_words decoding
epsilon = banned_words['epsilon'] # used by banned_words decoding
timesteps = Timesteps() # used by banned_words decoding
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
# prepare model inputs
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
# forward pass to get next token
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
cur_len = cur_len + 1
continue # don't waste resources running the code we don't need
next_token_logits = outputs.logits[:, -1, :]
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_token_logits,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if self.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# pre-process distribution
next_tokens_scores = logits_processor(input_ids, next_token_logits)
_, sorted_next_token_indices = torch.topk(next_tokens_scores, next_tokens_scores.shape[1])
# The result below is the same as argmax
next_tokens = sorted_next_token_indices[0,0]
random_uniform = torch.rand((1,))
if revert and epsilon > random_uniform:
input_ids, next_tokens = timesteps.revert_timestep()
revert = False
else:
if detected_banned_words_length_greater_than_1 is not None:
next_idx = detected_banned_words_length_greater_than_1['next_idx']
if next_tokens != detected_banned_words_length_greater_than_1['ids'][next_idx]:
"""
If the next_tokens is not equal to the subsequent token in the banned words,
we will set the detected banned_words to None.
For e.g., banned_words = ['blue rabbits'], while the generated sequence
is "In the early monday, the blue sky ..."
"""
detected_banned_words_length_greater_than_1 = None
else:
if (detected_banned_words_length_greater_than_1['next_idx'] + 1) == \
len(detected_banned_words_length_greater_than_1['ids']):
revert = True
detected_banned_words_length_greater_than_1 = None
else:
detected_banned_words_length_greater_than_1['next_idx'] += 1
else:
for ids in banned_words_ids:
if next_tokens == ids[0]:
if len(ids) == 1:
revert = True
else:
detected_banned_words_length_greater_than_1 = {'ids': ids,
'next_idx': 1}
timesteps.update(input_ids, sorted_next_token_indices)
# finished sentences should have their next token be a padding token
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
# update generated ids, model inputs, and length for next step
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
cur_len = cur_len + 1
# if eos_token was found in one sentence, set sentence to finished
if eos_token_id is not None:
unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long())
# stop when each sentence is finished, or if we exceed the maximum length
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
if not synced_gpus:
break
else:
this_peer_finished = True
if return_dict_in_generate:
if self.config.is_encoder_decoder:
return GreedySearchEncoderDecoderOutput(
sequences=input_ids,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return GreedySearchDecoderOnlyOutput(
sequences=input_ids,
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return input_ids
def sample(
self,
input_ids: torch.LongTensor,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
logits_warper: Optional[LogitsProcessorList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = None,
**model_kwargs,
) -> Union[SampleOutput, torch.LongTensor]:
r"""
Generates sequences for models with a language modeling head using multinomial sampling.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
logits_processor (:obj:`LogitsProcessorList`, `optional`):
An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from
:class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling
head applied at each generation step.
stopping_criteria (:obj:`StoppingCriteriaList`, `optional`):
An instance of :class:`~transformers.StoppingCriteriaList`. List of instances of class derived from
:class:`~transformers.StoppingCriteria` used to tell if the generation loop should stop.
logits_warper (:obj:`LogitsProcessorList`, `optional`):
An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from
:class:`~transformers.LogitsWarper` used to warp the prediction score distribution of the language
modeling head applied before multinomial sampling at each generation step.
max_length (:obj:`int`, `optional`, defaults to 20):
**DEPRECATED**. Use :obj:`logits_processor` or :obj:`stopping_criteria` directly to cap the number of
generated tokens. The maximum length of the sequence to be generated.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
output_attentions (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more details.
output_hidden_states (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more details.
output_scores (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details.
return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
synced_gpus (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
model_kwargs:
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. If
model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`.
Return:
:class:`~transformers.generation_utils.SampleDecoderOnlyOutput`,
:class:`~transformers.generation_utils.SampleEncoderDecoderOutput` or obj:`torch.LongTensor`: A
:obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a
:class:`~transformers.generation_utils.SampleDecoderOnlyOutput` if
``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a
:class:`~transformers.generation_utils.SampleEncoderDecoderOutput` if
``model.config.is_encoder_decoder=True``.
Examples::
>>> from transformers import (
... AutoTokenizer,
... AutoModelForCausalLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... TopKLogitsWarper,
... TemperatureLogitsWarper,
... )
>>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
>>> model = AutoModelForCausalLM.from_pretrained("gpt2")
>>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
>>> model.config.pad_token_id = model.config.eos_token_id
>>> input_prompt = "Today is a beautiful day, and"
>>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList([
... MinLengthLogitsProcessor(15, eos_token_id=model.config.eos_token_id),
... ])
>>> # instantiate logits processors
>>> logits_warper = LogitsProcessorList([
... TopKLogitsWarper(50),
... TemperatureLogitsWarper(0.7),
... ])
>>> outputs = model.sample(input_ids, logits_processor=logits_processor, logits_warper=logits_warper)
>>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList()
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
output_scores = output_scores if output_scores is not None else self.config.output_scores
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
# keep track of which sequences are already finished
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
cur_len = input_ids.shape[-1]
this_peer_finished = False # used by synced_gpus only
# auto-regressive generation
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
# prepare model inputs
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
# forward pass to get next token
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
cur_len = cur_len + 1
continue # don't waste resources running the code we don't need
next_token_logits = outputs.logits[:, -1, :]
# pre-process distribution
next_token_scores = logits_processor(input_ids, next_token_logits)
next_token_scores = logits_warper(input_ids, next_token_scores)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_token_scores,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if self.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# sample
probs = nn.functional.softmax(next_token_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
# finished sentences should have their next token be a padding token
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
# update generated ids, model inputs, and length for next step
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
cur_len = cur_len + 1
# if eos_token was found in one sentence, set sentence to finished
if eos_token_id is not None:
unfinished_sequences = unfinished_sequences.mul((next_tokens != eos_token_id).long())
# stop when each sentence is finished, or if we exceed the maximum length
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
if not synced_gpus:
break
else:
this_peer_finished = True
if return_dict_in_generate:
if self.config.is_encoder_decoder:
return SampleEncoderDecoderOutput(
sequences=input_ids,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return SampleDecoderOnlyOutput(
sequences=input_ids,
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return input_ids
def beam_search(
self,
input_ids: torch.LongTensor,
beam_scorer: BeamScorer,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = None,
**model_kwargs,
) -> Union[BeamSearchOutput, torch.LongTensor]:
r"""
Generates sequences for models with a language modeling head using beam search decoding.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
beam_scorer (:obj:`BeamScorer`):
An derived instance of :class:`~transformers.BeamScorer` that defines how beam hypotheses are
constructed, stored and sorted during generation. For more information, the documentation of
:class:`~transformers.BeamScorer` should be read.
logits_processor (:obj:`LogitsProcessorList`, `optional`):
An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from
:class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling
head applied at each generation step.
stopping_criteria (:obj:`StoppingCriteriaList`, `optional`):
An instance of :class:`~transformers.StoppingCriteriaList`. List of instances of class derived from
:class:`~transformers.StoppingCriteria` used to tell if the generation loop should stop.
max_length (:obj:`int`, `optional`, defaults to 20):
**DEPRECATED**. Use :obj:`logits_processor` or :obj:`stopping_criteria` directly to cap the number of
generated tokens. The maximum length of the sequence to be generated.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
output_attentions (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more details.
output_hidden_states (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more details.
output_scores (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details.
return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
synced_gpus (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
model_kwargs:
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. If
model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`.
Return:
:class:`~transformers.generation_utilsBeamSearchDecoderOnlyOutput`,
:class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput` or obj:`torch.LongTensor`: A
:obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a
:class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput` if
``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a
:class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput` if
``model.config.is_encoder_decoder=True``.
Examples::
>>> from transformers import (
... AutoTokenizer,
... AutoModelForSeq2SeqLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... BeamSearchScorer,
... )
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> encoder_input_str = "translate English to German: How old are you?"
>>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
>>> # lets run beam search using 3 beams
>>> num_beams = 3
>>> # define decoder start token ids
>>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
>>> input_ids = input_ids * model.config.decoder_start_token_id
>>> # add encoder_outputs to model keyword arguments
>>> model_kwargs = {
... "encoder_outputs": model.get_encoder()(encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True)
... }
>>> # instantiate beam scorer
>>> beam_scorer = BeamSearchScorer(
... batch_size=1,
... num_beams=num_beams,
... device=model.device,
... )
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList([
... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
... ])
>>> outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs)
>>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
if len(stopping_criteria) == 0:
warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning)
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
output_scores = output_scores if output_scores is not None else self.config.output_scores
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
batch_size = len(beam_scorer._beam_hyps)
num_beams = beam_scorer.num_beams
batch_beam_size, cur_len = input_ids.shape
if num_beams * batch_size != batch_beam_size:
raise ValueError(
f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
)
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view((batch_size * num_beams,))
this_peer_finished = False # used by synced_gpus only
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
cur_len = cur_len + 1
continue # don't waste resources running the code we don't need
next_token_logits = outputs.logits[:, -1, :]
# hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id`
# cannot be generated both before and after the `nn.functional.log_softmax` operation.
next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len)
next_token_scores = nn.functional.log_softmax(
next_token_logits, dim=-1
) # (batch_size * num_beams, vocab_size)
next_token_scores = logits_processor(input_ids, next_token_scores)
next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_token_scores,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if self.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# reshape for beam search
vocab_size = next_token_scores.shape[-1]
next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
next_token_scores, next_tokens = torch.topk(
next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True
)
next_indices = (next_tokens / vocab_size).long()
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = beam_scorer.process(
input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
)
beam_scores = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if model_kwargs["past"] is not None:
model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx)
# increase cur_len
cur_len = cur_len + 1
if beam_scorer.is_done or stopping_criteria(input_ids, scores):
if not synced_gpus:
break
else:
this_peer_finished = True
sequence_outputs = beam_scorer.finalize(
input_ids,
beam_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
max_length=stopping_criteria.max_length,
)
if return_dict_in_generate:
if not output_scores:
sequence_outputs["sequence_scores"] = None
if self.config.is_encoder_decoder:
return BeamSearchEncoderDecoderOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return BeamSearchDecoderOnlyOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return sequence_outputs["sequences"]
def beam_sample(
self,
input_ids: torch.LongTensor,
beam_scorer: BeamScorer,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
logits_warper: Optional[LogitsProcessorList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = None,
**model_kwargs,
) -> Union[BeamSampleOutput, torch.LongTensor]:
r"""
Generates sequences for models with a language modeling head using beam search with multinomial sampling.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
beam_scorer (:obj:`BeamScorer`):
A derived instance of :class:`~transformers.BeamScorer` that defines how beam hypotheses are
constructed, stored and sorted during generation. For more information, the documentation of
:class:`~transformers.BeamScorer` should be read.
logits_processor (:obj:`LogitsProcessorList`, `optional`):
An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from
:class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling
head applied at each generation step.
stopping_criteria (:obj:`StoppingCriteriaList`, `optional`):
An instance of :class:`~transformers.StoppingCriteriaList`. List of instances of class derived from
:class:`~transformers.StoppingCriteria` used to tell if the generation loop should stop.
logits_warper (:obj:`LogitsProcessorList`, `optional`):
An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from
:class:`~transformers.LogitsWarper` used to warp the prediction score distribution of the language
modeling head applied before multinomial sampling at each generation step.
max_length (:obj:`int`, `optional`, defaults to 20):
**DEPRECATED**. Use :obj:`logits_processor` or :obj:`stopping_criteria` directly to cap the number of
generated tokens. The maximum length of the sequence to be generated.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
output_attentions (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more details.
output_hidden_states (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more details.
output_scores (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details.
return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
synced_gpus (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
model_kwargs:
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. If
model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`.
Return:
:class:`~transformers.generation_utils.BeamSampleDecoderOnlyOutput`,
:class:`~transformers.generation_utils.BeamSampleEncoderDecoderOutput` or obj:`torch.LongTensor`: A
:obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a
:class:`~transformers.generation_utils.BeamSampleDecoderOnlyOutput` if
``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a
:class:`~transformers.generation_utils.BeamSampleEncoderDecoderOutput` if
``model.config.is_encoder_decoder=True``.
Examples::
>>> from transformers import (
... AutoTokenizer,
... AutoModelForSeq2SeqLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... TopKLogitsWarper,
... TemperatureLogitsWarper,
... BeamSearchScorer,
... )
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> encoder_input_str = "translate English to German: How old are you?"
>>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
>>> # lets run beam search using 3 beams
>>> num_beams = 3
>>> # define decoder start token ids
>>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
>>> input_ids = input_ids * model.config.decoder_start_token_id
>>> # add encoder_outputs to model keyword arguments
>>> model_kwargs = {
... "encoder_outputs": model.get_encoder()(encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True)
... }
>>> # instantiate beam scorer
>>> beam_scorer = BeamSearchScorer(
... batch_size=1,
... max_length=model.config.max_length,
... num_beams=num_beams,
... device=model.device,
... )
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList([
... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id)
... ])
>>> # instantiate logits processors
>>> logits_warper = LogitsProcessorList([
... TopKLogitsWarper(50),
... TemperatureLogitsWarper(0.7),
... ])
>>> outputs = model.beam_sample(
... input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs
... )
>>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
output_scores = output_scores if output_scores is not None else self.config.output_scores
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
batch_size = len(beam_scorer._beam_hyps)
num_beams = beam_scorer.num_beams
batch_beam_size, cur_len = input_ids.shape
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
beam_scores = beam_scores.view((batch_size * num_beams,))
this_peer_finished = False # used by synced_gpus only
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
cur_len = cur_len + 1
continue # don't waste resources running the code we don't need
next_token_logits = outputs.logits[:, -1, :]
# hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id`
# cannot be generated both before and after the `nn.functional.log_softmax` operation.
next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len)
next_token_scores = nn.functional.log_softmax(
next_token_logits, dim=-1
) # (batch_size * num_beams, vocab_size)
next_token_scores = logits_processor(input_ids, next_token_scores)
next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores)
next_token_scores = logits_warper(input_ids, next_token_scores)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_token_scores,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if self.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# reshape for beam search
vocab_size = next_token_scores.shape[-1]
next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
probs = nn.functional.softmax(next_token_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams)
next_token_scores = torch.gather(next_token_scores, -1, next_tokens)
next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, _indices)
next_indices = next_tokens // vocab_size
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = beam_scorer.process(
input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
)
beam_scores = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if model_kwargs["past"] is not None:
model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx)
# increase cur_len
cur_len = cur_len + 1
if beam_scorer.is_done or stopping_criteria(input_ids, scores):
if not synced_gpus:
break
else:
this_peer_finished = True
sequence_outputs = beam_scorer.finalize(
input_ids,
beam_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
max_length=stopping_criteria.max_length,
)
if return_dict_in_generate:
if not output_scores:
sequence_outputs["sequence_scores"] = None
if self.config.is_encoder_decoder:
return BeamSampleEncoderDecoderOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return BeamSampleDecoderOnlyOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return sequence_outputs["sequences"]
def group_beam_search(
self,
input_ids: torch.LongTensor,
beam_scorer: BeamScorer,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = None,
**model_kwargs,
):
r"""
Generates sequences for models with a language modeling head using beam search decoding.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
beam_scorer (:obj:`BeamScorer`):
An derived instance of :class:`~transformers.BeamScorer` that defines how beam hypotheses are
constructed, stored and sorted during generation. For more information, the documentation of
:class:`~transformers.BeamScorer` should be read.
logits_processor (:obj:`LogitsProcessorList`, `optional`):
An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from
:class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling
head applied at each generation step.
stopping_criteria (:obj:`StoppingCriteriaList`, `optional`):
An instance of :class:`~transformers.StoppingCriteriaList`. List of instances of class derived from
:class:`~transformers.StoppingCriteria` used to tell if the generation loop should stop.
max_length (:obj:`int`, `optional`, defaults to 20):
**DEPRECATED**. Use :obj:`logits_processor` or :obj:`stopping_criteria` directly to cap the number of
generated tokens. The maximum length of the sequence to be generated.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
output_attentions (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more details.
output_hidden_states (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more details.
output_scores (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details.
return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
synced_gpus (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
model_kwargs:
Additional model specific kwargs that will be forwarded to the :obj:`forward` function of the model. If
model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`.
Return:
:class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput`,
:class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput` or obj:`torch.LongTensor`: A
:obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a
:class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput` if
:class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput` if
``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a
:class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput` if
``model.config.is_encoder_decoder=True``.
Examples::
>>> from transformers import (
... AutoTokenizer,
... AutoModelForSeq2SeqLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... HammingDiversityLogitsProcessor,
... BeamSearchScorer,
... )
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> encoder_input_str = "translate English to German: How old are you?"
>>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
>>> # lets run diverse beam search using 6 beams
>>> num_beams = 6
>>> # define decoder start token ids
>>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
>>> input_ids = input_ids * model.config.decoder_start_token_id
>>> # add encoder_outputs to model keyword arguments
>>> model_kwargs = {
... "encoder_outputs": model.get_encoder()(encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True)
... }
>>> # instantiate beam scorer
>>> beam_scorer = BeamSearchScorer(
... batch_size=1,
... max_length=model.config.max_length,
... num_beams=num_beams,
... device=model.device,
... num_beam_groups=3
... )
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList([
... HammingDiversityLogitsProcessor(5.5, num_beams=6, num_beam_groups=3),
... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
... ])
>>> outputs = model.group_beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs)
>>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
output_scores = output_scores if output_scores is not None else self.config.output_scores
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
batch_size = len(beam_scorer._beam_hyps)
num_beams = beam_scorer.num_beams
num_beam_groups = beam_scorer.num_beam_groups
num_sub_beams = num_beams // num_beam_groups
device = input_ids.device
batch_beam_size, cur_len = input_ids.shape
if num_beams * batch_size != batch_beam_size:
raise ValueError(
f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
)
beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device)
# initialise score of first beam of each group with 0 and the rest with 1e-9. This ensures that the beams in
# the same group don't produce same tokens everytime.
beam_scores[:, ::num_sub_beams] = 0
beam_scores = beam_scores.view((batch_size * num_beams,))
this_peer_finished = False # used by synced_gpus only
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
# predicted tokens in cur_len step
current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device)
# indices which will form the beams in the next time step
reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device)
# do one decoder step on all beams of all sentences in batch
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
cur_len = cur_len + 1
continue # don't waste resources running the code we don't need
if output_scores:
processed_score = torch.zeros_like(outputs.logits[:, -1, :])
for beam_group_idx in range(num_beam_groups):
group_start_idx = beam_group_idx * num_sub_beams
group_end_idx = min(group_start_idx + num_sub_beams, num_beams)
group_size = group_end_idx - group_start_idx
# indices of beams of current group among all sentences in batch
batch_group_indices = []
for batch_idx in range(batch_size):
batch_group_indices.extend(
[batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)]
)
group_input_ids = input_ids[batch_group_indices]
# select outputs of beams of current group only
next_token_logits = outputs.logits[batch_group_indices, -1, :]
# hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id`
# cannot be generated both before and after the `nn.functional.log_softmax` operation.
next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len)
next_token_scores = nn.functional.log_softmax(
next_token_logits, dim=-1
) # (batch_size * group_size, vocab_size)
vocab_size = next_token_scores.shape[-1]
next_token_scores = logits_processor(
group_input_ids, next_token_scores, current_tokens=current_tokens, beam_group_idx=beam_group_idx
)
next_token_scores = next_token_scores + beam_scores[batch_group_indices].unsqueeze(-1).expand_as(
next_token_scores
)
if output_scores:
processed_score[batch_group_indices] = next_token_scores
# reshape for beam search
next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size)
next_token_scores, next_tokens = torch.topk(
next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True
)
next_indices = next_tokens // vocab_size
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = beam_scorer.process(
group_input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
)
beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids[batch_group_indices] = group_input_ids[beam_idx]
group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
current_tokens[batch_group_indices] = group_input_ids[:, -1]
# (beam_idx // group_size) -> batch_idx
# (beam_idx % group_size) -> offset of idx inside the group
reordering_indices[batch_group_indices] = (
num_beams * (beam_idx // group_size) + group_start_idx + (beam_idx % group_size)
)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (processed_score,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if self.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if model_kwargs["past"] is not None:
model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], reordering_indices)
# increase cur_len
cur_len = cur_len + 1
if beam_scorer.is_done or stopping_criteria(input_ids, scores):
if not synced_gpus:
break
else:
this_peer_finished = True
sequence_outputs = beam_scorer.finalize(
input_ids,
beam_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
max_length=stopping_criteria.max_length,
)
if return_dict_in_generate:
if not output_scores:
sequence_outputs["sequence_scores"] = None
if self.config.is_encoder_decoder:
return BeamSearchEncoderDecoderOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return BeamSearchDecoderOnlyOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return sequence_outputs["sequences"]
def top_k_top_p_filtering(
logits: torch.FloatTensor,
top_k: int = 0,
top_p: float = 1.0,
filter_value: float = -float("Inf"),
min_tokens_to_keep: int = 1,
) -> torch.FloatTensor:
"""
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
top_k (:obj:`int`, `optional`, defaults to 0):
If > 0, only keep the top k tokens with highest probability (top-k filtering)
top_p (:obj:`float`, `optional`, defaults to 1.0):
If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus
filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
min_tokens_to_keep (:obj:`int`, `optional`, defaults to 1):
Minimumber of tokens we keep per batch example in the output.
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
if top_k > 0:
logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)(
None, logits
)
if 0 <= top_p <= 1.0:
logits = TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=min_tokens_to_keep)(None, logits)
return logits
| 55.401976
| 201
| 0.650359
|
3f80210353ca83d527a8565930403b6b990ffbbc
| 30,262
|
py
|
Python
|
jeremy/old/lhopital.py
|
sin3000x/manim
|
bd369534d29b962a321153dadacca827e06ec899
|
[
"MIT"
] | null | null | null |
jeremy/old/lhopital.py
|
sin3000x/manim
|
bd369534d29b962a321153dadacca827e06ec899
|
[
"MIT"
] | null | null | null |
jeremy/old/lhopital.py
|
sin3000x/manim
|
bd369534d29b962a321153dadacca827e06ec899
|
[
"MIT"
] | null | null | null |
from manimlib.imports import *
from jeremy.ref.VideoProgressBar import VideoProgressBar
class main(Scene):
CONFIG = {
"map": {"f": RED, "g": BLUE, "infty": WHITE, "forall": WHITE,"frac": WHITE,"left":WHITE, "right": WHITE, "inf": WHITE},
"infmap": {"limsup": RED, "liminf": GREEN}
}
def construct(self):
self.opening()
self.other_limit()
self.remark1()
self.remark2()
self.remark3()
def opening(self):
self.title = title = TextMobject(r"\textbf{\underline{{\heiti l'H\^{o}pital法则}}}", color=YELLOW).to_corner(UL)
title_00 = TextMobject(r"$\boldsymbol{\dfrac{0}{0}}$\textbf{{\heiti 型}}", color=YELLOW).move_to(title)
title_inf = TextMobject(r"$\boldsymbol{\dfrac{*}{\infty}}$\textbf{{\heiti 型}}", color=YELLOW).move_to(title)
title_limit = TextMobject(r"\textbf{{\heiti 上下极限}}", color=YELLOW).move_to(title)
self.play(Write(title))
self.wait()
zero_zero = TexMobject(r"{0\over 0}")
infty_infty = TexMobject(r"\infty", r" \over", r" \infty")
star_infty = TexMobject(r"*", r"\over", r"\infty")
VGroup(zero_zero, infty_infty, star_infty).scale(2.5)
VGroup(zero_zero, infty_infty).arrange(buff=LARGE_BUFF * 3)
star_infty.move_to(infty_infty)
self.play(Write(zero_zero))
self.wait()
self.play(Write(infty_infty))
self.wait()
self.play(ReplacementTransform(infty_infty, star_infty))
self.wait()
zero_zero.generate_target()
zero_zero.target.set_x(0).scale(4)
zero_zero.target.fade(1)
self.play(FadeOut(star_infty), MoveToTarget(zero_zero))
t1 = TexMobject(*r"f , g \text{~在~}(a,b)\text{~上可导~}".split())
t2 = TexMobject(*r"g '(x) \neq 0,\quad\forall x\in(a,b)".split())
t3 = TexMobject(*r"\lim_{ x\to a^+} f (x) = \lim_{x\to "
# 0 1 2 3 4 5 6
r"a^+} g (x) = 0".split())
################### 7 8 9 10 11
t3_inf = TexMobject(*r"\lim_{x\to "
# 0
r"a^+} g (x) = \infty".split()).set_color_by_tex_to_color_map(self.map)
################### 1 2 3 4 5
t4 = TexMobject(*r"\lim_{ x\to a^+} {f '(x)\over g"
r" '(x)} \text{~存在,或为~}\infty".split())
tmain = TexMobject(*r"\Longrightarrow \lim_{ x\to a^+} {f (x) "
# 0 1 2 3 4 5
r"\over g (x)} = \lim_{ x\to a^+} {f '(x)\over g '(x)}".split())
###################### 6 7 8 9 10
v = VGroup(t1, t2, t3, t4, tmain).arrange(DOWN, buff=0.4).next_to(title, DOWN).set_x(0)
t3_inf.move_to(t3)
for t in v:
t.set_color_by_tex_to_color_map(self.map)
tmain[1:].set_x(0)
tmain[0].next_to(tmain[1:], LEFT)
tmp = tmain[1:].copy().scale(1.5).move_to(ORIGIN)
box = SurroundingRectangle(tmain[1:], color=YELLOW)
box1, box2 = SurroundingRectangle(tmp[2], buff=.05), SurroundingRectangle(tmp[11], buff=.05)
# bg = BackgroundRectangle(tmain[1:], color=GOLD, fill_opacity=.15, buff=.1)
question2 = TextMobject(r"{\kaishu 过分吗?}", color=YELLOW).move_to(t2).to_edge(LEFT)
question4 = question2.copy().move_to(t4).to_edge(LEFT)
# arrow2 = Arrow(question2.get_right(), t2.get_left())
arrow4 = Arrow(question4.get_right(), t4.get_left(), color=YELLOW)
arrow2 = arrow4.copy().set_y(t2.get_y())
self.play(Write(tmp))
self.wait()
self.play(ShowCreation(box1), ShowCreation(box2))
self.wait()
self.play(FadeOut(VGroup(box1, box2)))
self.play(ReplacementTransform(tmp, tmain[1:]))
self.play(Write(tmain[0]))
self.play(ShowCreation(box))
# self.play(ReplacementTransform(box1, box), ReplacementTransform(box2, box))
self.wait()
self.play(ReplacementTransform(tmain[4:6].copy(), t3[3:5]),
ReplacementTransform(tmain[7:9].copy(), t3[8:10]),
Write(t3[:3]), Write(t3[5:8]), Write(t3[10:]), run_time=2)
self.wait()
self.play(ReplacementTransform(tmain[10:].copy(), t4[:7]))
self.play(Write(t4[7:]), run_time=2)
self.wait()
self.play(GrowArrow(arrow4))
self.wait()
self.play(Write(t1))
self.wait()
self.play(ReplacementTransform(tmain[-2:].copy(), t2[:2]), run_time=2)
self.play(Write(t2[2:]), run_time=2)
self.wait()
self.play(GrowArrow(arrow2))
self.wait()
t = tmain.deepcopy()
self.play(FadeOut(VGroup(t1, t2, t3, t4, box, arrow2, arrow4, tmain[0])),
tmain[1:].move_to, t1, Transform(title, title_00),run_time=2)
self.wait()
# proving start
number_line = NumberLine(x_min=-2, x_max=2, tick_frequency=4,
leftmost_tick=-2, numbers_with_elongated_ticks=[-2, 2]) \
.next_to(tmain[1:], DOWN, buff=MED_LARGE_BUFF)
a, b = TexMobject("a"), TexMobject("b")
a.next_to(number_line.get_left(), DOWN, buff=MED_LARGE_BUFF)
b.next_to(number_line.get_right(), DOWN, buff=MED_LARGE_BUFF)
a.align_to(b, DOWN)
left_r, right_r = TexMobject("(", color=TEAL, stroke_width=3).scale(1.2).move_to(
number_line.get_left()).align_to(number_line, LEFT), \
TexMobject(")", color=TEAL, stroke_width=3).scale(1.2).move_to(
number_line.get_right()).align_to(number_line, RIGHT)
left_s, right_s = TexMobject("[", color=YELLOW, stroke_width=3).scale(1.2).move_to(
number_line.n2p(-2)).align_to(number_line, LEFT), \
TexMobject("]", color=YELLOW, stroke_width=3).scale(1.2).move_to(number_line.n2p(1.5))
xi = number_line.get_tick(.4)
x_label, xi_label = TexMobject("x"), TexMobject(r"\xi")
x_label.add_updater(lambda x: x.next_to(right_s, DOWN).align_to(b, DOWN))
xi_label.add_updater(lambda x: x.next_to(xi, DOWN).align_to(b, DOWN))
cauchy = TexMobject(*r"{f (x) \over g (x)} = {{f "
# 0 1 2 3 4 5 6
r"(x) - f (a)} \over {g"
# 7 8 9 10 11 12
r" (x) - g (a)}} = "
# 13 1415 16 17
r"{f '( \xi )\over g '( \xi )}".split()) \
.set_color_by_tex_to_color_map(self.map).next_to(number_line, DOWN, buff=LARGE_BUFF*1.2)
# 18 19 20 21 22
conclusion = TexMobject(*r"\lim_{ x \to a^+} {f (x) "
# 0 1 2 3 4 5
r"\over g (x)} = \lim_{ \xi \to a^+} {f '( \xi )\over g '( \xi )}"
# 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
.split()).set_color_by_tex_to_color_map(self.map).next_to(cauchy, DOWN)
conclusion2 = TexMobject(*r"\lim_{ x \to a^+} {f (x) "
# 0 1 2 3 4 5
r"\over g (x)} = \lim_{ x \to a^+} {f '( x )\over g '( x )}"
# 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
.split()).set_color_by_tex_to_color_map(self.map).next_to(cauchy, DOWN)
bg = BackgroundRoundedRectangle(VGroup(cauchy, conclusion2), buff=.3)
r1 = TexMobject(r"f , g \text{{\kaishu ~在~}}[a,x]\text{{\kaishu ~上连续}}")
r2 = TexMobject(r"(a,x)\text{{\kaishu ~上可导}}")
r3 = TexMobject(r"g '(t)\neq0,~~\forall t\in(a,x)")
v1 = VGroup(r1, r2, r3).arrange(DOWN).scale(.8).next_to(number_line, LEFT, buff=LARGE_BUFF)
box2 = SurroundingRectangle(v1, color=PINK)
self.play(FadeIn(bg), run_time=1.5)
self.play(Write(cauchy[:6]))
self.wait()
self.play(ReplacementTransform(cauchy[:2].copy(), cauchy[6:8]),
ReplacementTransform(cauchy[3:5].copy(), cauchy[12:14]),
Write(cauchy[11]), run_time=1.5)
self.play(Write(cauchy[8:11]), Write(cauchy[14:17]), run_time=2)
self.wait()
# number line
self.play(FadeIn(VGroup(number_line, a, b)))
self.play(Write(left_r), Write(right_r))
self.wait(2)
self.play(ReplacementTransform(left_r, left_s))
self.wait(2)
self.play(FadeIn(VGroup(right_s, x_label)))
self.wait()
self.play(Write(cauchy[17:]))
self.play(FadeIn(VGroup(xi, xi_label)))
cauchy_label = BraceLabel(cauchy, r"{\heiti $[a,x]$上的Cauchy中值定理}", label_constructor=TextMobject,
label_scale=.8)
cauchy_label.label.set_color(YELLOW)
self.play(GrowFromCenter(cauchy_label.brace))
self.play(Write(cauchy_label.label))
self.wait()
self.play(Write(r1))
self.play(Write(r2))
self.play(Write(r3))
self.play(ShowCreation(box2))
self.wait()
# x to a+
self.play(right_s.move_to, number_line.n2p(-1.6), xi.move_to, number_line.n2p(-1.8), run_time=2)
self.wait()
self.play(FadeOut(cauchy_label))
self.play(ReplacementTransform(cauchy[:5].copy(), conclusion[4:9]), Write(conclusion[:4]), run_time=2)
self.play(Write(conclusion[9:14]), ReplacementTransform(cauchy[18:].copy(), conclusion[14:]), run_time=2)
self.wait()
self.play(Transform(conclusion, conclusion2))
self.wait(2)
x_label.clear_updaters()
xi_label.clear_updaters()
# finishing 0/0
self.play(FadeOut(VGroup(box2, v1, conclusion, cauchy, bg,
number_line, x_label, xi_label, xi, a, b, left_s, right_s, right_r)))
tmain[0].become(t[0])
self.play(Transform(tmain[1:], t[1:]), run_time=2)
self.wait()
# inf/inf starts
self.play(FadeIn(VGroup(t1, t2, t3, t4, arrow2, arrow4, tmain[0])), ShowCreation(box))
self.wait()
self.play(FadeOut(t3[:6]), ReplacementTransform(t3[6:11], t3_inf[:5]),
ReplacementTransform(t3[-1], t3_inf[-1]), Transform(title, title_inf), )
self.wait()
self.play(FadeOut(VGroup(t1, t2, t3_inf, t4, box, arrow2, arrow4)),
FadeOut(tmain[0]),
tmain[1:].move_to, t1, run_time=2)
self.wait()
# proof starts: strip
right_box = SurroundingRectangle(tmain[10:], color=YELLOW)
l = TextMobject(r'\kaishu 记为~$l$', color=YELLOW).next_to(right_box, RIGHT)
self.play(ShowCreation(right_box))
self.play(Write(l))
self.wait()
strip = TexMobject(r"\text{{\kaishu 区间内有~}}",*r"l-\varepsilon< {f '(x)\over g '(x)} < l+\varepsilon.".split()).set_color_by_tex_to_color_map(self.map).scale(.9)
####################################################### 1 2 3 4 5
VGroup(number_line, strip).arrange(buff=LARGE_BUFF*1).next_to(tmain[1:], DOWN)
left_r, right_r = TexMobject("(", color=TEAL, stroke_width=3).scale(1.2).move_to(
number_line.get_left()).align_to(number_line, LEFT), \
TexMobject(")", color=TEAL, stroke_width=3).scale(1.2).move_to(
number_line.get_right()).align_to(number_line, RIGHT)
left_s, right_s = TexMobject("[", color=YELLOW, stroke_width=3).scale(1.2).move_to(
number_line.n2p(-0.5)), \
TexMobject("]", color=YELLOW, stroke_width=3).scale(1.2).move_to(number_line.n2p(0.5))
a, a_delta, x, c = [TexMobject(f"{s}").scale(.9) for s in ["a",r"a+\delta","x","c"]]
a.next_to(left_r, DOWN)
a_delta.next_to(right_r, DOWN).align_to(a, DOWN).shift(DOWN*.048)
x.add_updater(lambda u: u.next_to(left_s, DOWN))
c.add_updater(lambda u: u.next_to(right_s, DOWN))
xi_arrow = Arrow(UP, DOWN).next_to(number_line.n2p(0), DOWN)
xi_label2 = TexMobject(r"\xi")
xi_label2.scale(.9).next_to(xi_arrow, DOWN)
# number line shows
self.play(FadeIn(VGroup(number_line, left_r, right_r, a, a_delta)))
self.play(Write(strip))
self.wait()
self.play(FadeIn(VGroup(left_s, right_s, x, c)))
self.wait()
cauchy2 = TexMobject(*r"{{f "
# 0
r"( x ) - f ( c )} \over {g"
# 1 2 3 4 5 6 7 8 9 10
r" ( x ) - g ( c )}} = "
# 11 12131415161718 19
r"{f '( \xi )\over g '( \xi )}".split()).tm(self.map).next_to(number_line, DOWN, buff=LARGE_BUFF).set_x(0)
# 20 21 22 23 24 25 26 27
cauchy2_ineq = TexMobject(*r"l-\varepsilon < {{f "
# 0 1 2
r"( x ) - f ( c )} \over {g"
# 3 4 5 6 7 8 9 10 11 12
r" ( x ) - g ( c )}} "
# 13 14151617181920
r"< l+\varepsilon".split()).tm(self.map).next_to(number_line, DOWN,
buff=LARGE_BUFF).set_x(
0)
# 21 22
cauchy2_gx = TexMobject(*r"l-\varepsilon < {{{f "
# 0 1 2
r"( x ) \over g (x)} - {f ( c ) \over g (x)}} \over {{g"
# 3 4 5 6 7 8 9 10 111213 14 15 16 17 18
r" ( x ) \over g (x)} - {g ( c ) \over g (x)}}} "
# 192021 22 23 24 25 26272829 30 31 32
r"< l+\varepsilon".split()).tm(self.map).next_to(number_line, DOWN,
buff=LARGE_BUFF).set_x(
0)
# 33 34
cauchy2_res = TexMobject(*r"l-\varepsilon < {{{f "
# 0 1 2
r"( x ) \over g (x)} - {f ( c ) \over g (x)}} \over {1 "
# 3 4 5 6 7 8 9 10111213 14 15 16 17 18
r"- {g ( c ) \over g (x)}}} "
# 19 2021 22 23 24 25 26272829 30 31 32
r"< l+\varepsilon".split()).tm(self.map).next_to(number_line, DOWN,
buff=LARGE_BUFF).set_x(
0)
# 33 34
question = TexMobject(r"\text{\kaishu 如何出现~}\frac{f(x)}{g(x)}?",color=YELLOW).scale(.9)
clock = Clock().scale(.9)
VGroup(question, clock).arrange(buff=LARGE_BUFF).next_to(cauchy2_ineq, DOWN)
limit = TexMobject(*r"l-\varepsilon \leq \lim_{x\to a^+} {f (x) \over g (x)}\leq l+\varepsilon".split())\
.tm(self.map).next_to(cauchy2_gx, DOWN)
imply = CurvedArrow(cauchy2_gx.get_left()+LEFT*.5, limit.get_left()+LEFT*.5, color=YELLOW)
error = VGroup(TextMobject(r"\kaishu 默认了"),
TexMobject(r"\lim_{x\to a^+}\frac{f(x)}{g(x)}\text{\kaishu 存在}"))\
.arrange(DOWN).set_color(YELLOW).scale(.8).next_to(imply, LEFT)
cross = Cross(limit)
# proof continues
self.play(Write(cauchy2))
self.wait()
self.play(GrowArrow(xi_arrow))
self.play(Write(xi_label2))
self.wait()
self.play(ShowPassingFlashAround(cauchy2[20:]), ShowPassingFlashAround(strip[2:6]))
self.play(ShowPassingFlashAround(cauchy2[20:]), ShowPassingFlashAround(strip[2:6]))
self.wait()
# xi_label.clear_updaters()
self.play(FadeOut(cauchy2[19:]), ReplacementTransform(cauchy2[:19], cauchy2_ineq[2:21]),
FadeOut(VGroup(xi_arrow, xi_label2)))
self.play(Write(cauchy2_ineq[:2]), Write(cauchy2_ineq[21:]))
self.wait()
# think about how to relate to f/g
self.play(Write(question))
self.wait()
self.play(FadeIn(clock))
self.play(ClockPassesTime(clock))
self.play(FadeOut(clock), FadeOut(question))
self.wait()
# super dirty transformation
self.play(ReplacementTransform(cauchy2_ineq[:2], cauchy2_gx[:2]),
ReplacementTransform(cauchy2_ineq[2], cauchy2_gx[2]),
ReplacementTransform(cauchy2_ineq[3], cauchy2_gx[3]),
ReplacementTransform(cauchy2_ineq[4], cauchy2_gx[4]),
ReplacementTransform(cauchy2_ineq[5], cauchy2_gx[5]),
ReplacementTransform(cauchy2_ineq[6], cauchy2_gx[9]),
ReplacementTransform(cauchy2_ineq[7], cauchy2_gx[10]),
ReplacementTransform(cauchy2_ineq[8], cauchy2_gx[11]),
ReplacementTransform(cauchy2_ineq[9], cauchy2_gx[12]),
ReplacementTransform(cauchy2_ineq[10], cauchy2_gx[13]),
ReplacementTransform(cauchy2_ineq[11], cauchy2_gx[17]),
ReplacementTransform(cauchy2_ineq[12], cauchy2_gx[18]),
ReplacementTransform(cauchy2_ineq[13], cauchy2_gx[19]),
ReplacementTransform(cauchy2_ineq[14], cauchy2_gx[20]),
ReplacementTransform(cauchy2_ineq[15], cauchy2_gx[21]),
ReplacementTransform(cauchy2_ineq[16], cauchy2_gx[25]),
ReplacementTransform(cauchy2_ineq[17], cauchy2_gx[26]),
ReplacementTransform(cauchy2_ineq[18], cauchy2_gx[27]),
ReplacementTransform(cauchy2_ineq[19], cauchy2_gx[28]),
ReplacementTransform(cauchy2_ineq[20], cauchy2_gx[29]),
ReplacementTransform(cauchy2_ineq[21:], cauchy2_gx[33:]),
)
self.play(
Write(cauchy2_gx[6:9]),
Write(cauchy2_gx[14:17]),
Write(cauchy2_gx[22:25]),
Write(cauchy2_gx[30:33]),
)
# self.play(ReplacementTransform(cauchy2_gx, cauchy2_res))
self.play(Transform(cauchy2_gx[18:25], cauchy2_res[18]),
Transform(cauchy2_gx[25], cauchy2_res[19]),
Transform(cauchy2_gx[26:33], cauchy2_res[20:-2])
)
# two parts to 0 as x-> a+
bg1 = SurroundingRectangle(cauchy2_gx[10:17], color=GOLD, buff=0.05)
bg2 = SurroundingRectangle(cauchy2_gx[26:33], color=GOLD, buff=0.05)
to0 = TexMobject(r"\to 0", color=GOLD).next_to(bg1, buff=.1).scale(.8)
to02 = TexMobject(r"\to 0", color=GOLD).next_to(bg2, buff=.1).scale(.8)
self.wait()
self.play(left_s.move_to, number_line.n2p(-1.7), run_time=2)
self.wait()
self.play(ShowCreation(bg1))
self.play(ShowCreation(bg2))
self.play(Write(to0), Write(to02))
self.wait()
self.play(Write(limit))
self.wait()
# but there's an error
self.play(GrowArrow(imply))
self.wait()
self.play(Write(error[0]))
self.play(Write(error[1]))
self.wait()
self.play(ShowCreation(cross))
self.wait()
# go over upper/lower limits
self.play(FadeOut(VGroup(
tmain[1:], right_box, l,
number_line, left_r, left_s, right_s, right_r, a, a_delta, x, c,
strip,
cauchy2_gx, bg1, bg2, to0, to02,
imply, error, limit, cross
)), Transform(title, title_limit))
self.wait()
prop1 = TexMobject(*r"1.~\lim_{x\to0}\sin \left(\frac{1}{x}\right) ".split(),r"\text{\kaishu 不存在}",*r",~ \varlimsup_{x\to0} \sin\left(\frac{1}{x}\right)"
r"=1,~ \varliminf_{x\to0} \sin\left(\frac{1}{x}\right) =-1".split())\
.tm(self.infmap).scale(.9).next_to(title_limit, DOWN).set_x(0)
prop2 = TexMobject(*r"2.~ \varliminf \leq \varlimsup ,~".split(),r"\text{\kaishu 等号成立}",*r"\Longleftrightarrow~ \lim= \varliminf = \varlimsup".split())\
.tm(self.infmap).next_to(prop1, DOWN, aligned_edge=LEFT, buff=LARGE_BUFF)
prop3 = TexMobject(*r"3.~ f\leq g ~\Longrightarrow~ \varliminf f \leq \varliminf g,~ \varlimsup f \leq \varlimsup g".split())\
.tm({"limsup": RED, "liminf": GREEN}).next_to(prop2, DOWN, aligned_edge=LEFT, buff=LARGE_BUFF*1.3)
self.play(Write(prop1[:3]))
self.wait()
self.play(Write(prop1[3:]))
self.wait()
self.play(Write(prop2[:4]))
self.wait()
self.play(Write(prop2[4:]))
self.wait()
self.play(Write(prop3[:3]))
self.wait()
self.play(Write(prop3[3:]))
self.wait(2)
# go back
self.play(FadeOut(VGroup(prop1, prop2, prop3)),
Transform(title, title_inf))
VGroup(
cauchy2_gx, bg1, bg2, to0, to02,
).shift(UP)
VGroup(bg1, bg2, to0, to02).set_color(YELLOW)
brace1 = Brace(cauchy2_gx[:33], DOWN)
liminf = TexMobject(*r"l - \varepsilon \leq \varliminf_{x\to a^+} {f (x)\over g (x)}".split())\
.tm(self.map).scale(1).next_to(brace1, DOWN)
brace2 = Brace(cauchy2_gx[2:], DOWN)
limsup = TexMobject(*r"\varlimsup_{x\to a^+} {f (x)\over g (x)} \leq l + \varepsilon".split())\
.tm(self.map).scale(1).next_to(brace2, DOWN)
liminfsup = TexMobject(*r"l - \varepsilon \leq \varliminf_{x\to a^+} {f (x)\over g (x)} \leq \varlimsup_{x\to a^+} {f (x)\over g (x)} \leq l + \varepsilon".split())\
.tm(self.map).scale(1).next_to(brace1, DOWN).set_x(0)
lim0eps = TexMobject(
*r"l \leq \varliminf_{x\to a^+} {f (x)\over g (x)} \leq \varlimsup_{x\to a^+} {f (x)\over g (x)} \leq l".split()) \
.tm(self.map).scale(1).next_to(brace1, DOWN).set_x(0)
limeq = TexMobject(
*r"l = \varliminf_{x\to a^+} {f (x)\over g (x)} = \varlimsup_{x\to a^+} {f (x)\over g (x)} = l".split()) \
.tm(self.map).scale(1).next_to(brace1, DOWN).set_x(0)
limres = TexMobject(
*r"\lim_{x\to a^+} {f (x)\over g (x)}= l".split()) \
.tm(self.map).scale(1).next_to(brace1, DOWN).set_x(0)
bg_inf = BackgroundRoundedRectangle(VGroup(cauchy2_gx, lim0eps)).set_width(FRAME_WIDTH-2, stretch=True)
self.play(FadeIn(VGroup(
tmain[1:], right_box, l,
cauchy2_gx, bg1, bg2, to0, to02,
bg_inf
)))
self.wait()
# braces
self.play(GrowFromCenter(brace1))
self.play(Write(liminf))
self.wait()
self.play(FadeOut(brace1), liminf.shift, LEFT*3)
self.play(GrowFromCenter(brace2))
self.play(Write(limsup))
self.wait()
# self.play(FadeOut(brace2))
self.play(FadeOut(brace2), ReplacementTransform(liminf, liminfsup[:10]),
ReplacementTransform(limsup, liminfsup[11:]))
self.play(Write(liminfsup[10]))
self.wait()
self.play(ReplacementTransform(liminfsup[3:-3], lim0eps[1:-1]),
ReplacementTransform(liminfsup[:3], lim0eps[0]),
ReplacementTransform(liminfsup[-3:], lim0eps[-1]),
)
self.wait()
self.play(ReplacementTransform(lim0eps, limeq))
self.wait()
self.play(ReplacementTransform(limeq, limres))
self.wait()
self.play(FadeOut(VGroup(
tmain[1:], right_box, l,
cauchy2_gx, bg1, bg2, to0, to02,
bg_inf,
limres, title
)))
def other_limit(self):
minus = TexMobject(r"x\to ",r"a^-",r"~\text{时,证明类似.}")
minus[1].set_color(YELLOW)
a = TexMobject(r"x\to ","a",r"~\text{时,转化为单侧极限.}")
a[1].set_color(YELLOW)
inf = TexMobject(r"x\to ",r"\pm\infty",r"~\text{时,作}~",r"t=\frac 1x",r"~\text{化为}~",r"t\to 0^+/0^-.")
inf[1].set_color(YELLOW)
VGroup(minus, a, inf).arrange(DOWN, buff=LARGE_BUFF)
self.play(Write(minus))
self.wait()
self.play(Write(a))
self.wait()
self.play(Write(inf))
self.wait()
self.play(FadeOut(VGroup(minus, a, inf)))
def remark1(self):
q = TexMobject("1.~",r"\lim_{x\to 0} {\sin x \over x} = 1",r"~\text{可以用l'H\^{o}pital法则吗?}").to_edge(UP)
q[1].set_color(YELLOW)
clock = Clock()
sol = TexMobject(*r"\lim_{x\to 0} {(\sin x)' \over x'} =\lim_{x\to 0} {\cos x \over 1} =\cos 0=1.".split()).next_to(q, DOWN)
######################## 0 1 2 3 4 5 6 7 8 9 10 11 12 13
derdef = VGroup(TexMobject(r"(\sin x)'",r"=",r"\lim_{h\to 0} {{\sin (x+h)-\sin x}\over h}"),
TexMobject(r"=\lim_{h\to 0} {{2\cos\left(x+\frac{h}{2}\right)\sin\left(\frac{h}{2}\right)} \over h}"),
TexMobject(r"=",r"\cos x\cdot",r"\lim_{h\to 0} {{\sin\left(\frac{h}{2}\right)} \over {\frac{h}{2}}}")).arrange(DOWN).next_to(sol, DOWN)
derdef[1].align_to(derdef[0][1], LEFT)
derdef[2].align_to(derdef[0][1], LEFT)
derdef[2][-1].set_color(YELLOW)
taylor_sin = TexMobject(r"\sin x=x-\frac{x^3}{3!}+\frac{x^5}{5!}-\cdots")
taylor_cos = TexMobject(r"\cos x=1-\frac{x^2}{2!}+\frac{x^4}{4!}-\cdots")
taylor_v = VGroup(taylor_sin, taylor_cos).arrange(DOWN).scale(.9).set_color(RED)
euler_sin = TexMobject(r"\sin x=\frac{\e^{\i x}-\e^{-\i x}}{2\i}")
euler_cos = TexMobject(r"\cos x=\frac{\e^{\i x}+\e^{-\i x}}{2}")
euler_v = VGroup(euler_sin, euler_cos).arrange(DOWN).scale(.9).set_color(BLUE)
other_v = VGroup(taylor_v, euler_v).arrange(buff=LARGE_BUFF*1.5).next_to(sol, DOWN, buff=MED_LARGE_BUFF)
taylor_brace = Brace(VGroup(taylor_sin, taylor_cos), LEFT)
euler_brace = Brace(VGroup(euler_sin, euler_cos), LEFT)
bg_other = BackgroundRectangle(other_v)
use_derdef = TexMobject(r"\text{没有必要.}~",r"\lim_{x\to 0}\frac{\sin x-\sin 0}{x-0}",r"=\sin'(0)=\cos 0=1.").next_to(other_v, DOWN)
use_derdef[1].set_color(YELLOW)
self.play(Write(q))
self.wait()
self.play(FadeIn(clock))
self.play(ClockPassesTime(clock))
self.play(FadeOut(clock))
self.play(Write(sol))
self.wait()
box_der = SurroundingRectangle(VGroup(sol[2:4], sol[8:10]), color=RED)
self.play(ShowCreation(box_der))
self.wait()
self.play(Write(derdef[0]))
self.play(Write(derdef[1]))
self.play(Write(derdef[2]))
self.wait()
self.play(Transform(box_der, SurroundingRectangle(derdef[2][-1], color=RED)))
self.wait()
# other def
self.play(FadeOut(box_der), FadeOut(derdef))
self.play(Write(taylor_sin))
self.play(Write(taylor_cos))
self.play(GrowFromCenter(taylor_brace))
self.wait()
self.play(Write(euler_sin))
self.play(Write(euler_cos))
self.play(GrowFromCenter(euler_brace))
self.wait()
self.play(Write(use_derdef[0]))
self.wait()
self.play(Write(use_derdef[1:]))
self.wait()
self.play(FadeOut(VGroup(
q, sol, taylor_v, euler_v, taylor_brace, euler_brace, use_derdef
)))
def remark2(self):
remark = TexMobject("2.~",r"\text{关于要求}\,",*r"\lim_{x\to a^+} {f '(x) \over g '(x)}".split(),r"~\text{存在.}").to_edge(UP).tm(self.map)
p = TexMobject(r"\lim_{x\to\infty}{{x+\sin x}\over x}=\lim_{x\to\infty} 1+\lim_{x\to\infty}{\sin x \over x}=1+0=1")#.next_to(remark, DOWN)
lhospital = TexMobject(r"\lim_{x\to\infty}{(x+\sin x)' \over x'}=\lim_{x\to\infty} (1+\cos x)",r"~\text{不存在.}")#.next_to(p, DOWN, buff=MED_LARGE_BUFF)
VGroup(p, lhospital).arrange(DOWN, buff=LARGE_BUFF).next_to(remark, DOWN, buff=MED_LARGE_BUFF)
self.play(Write(remark))
self.wait()
self.play(Write(p), run_time=3)
self.wait()
self.play(Write(lhospital), run_time=3)
self.wait()
self.play(FadeOut(VGroup(remark, p, lhospital)))
def remark3(self):
remark = TexMobject(r"3.~",r"\text{关于要求}~",r"g",r"'(x)",r"~\text{在去心邻域内恒不为0.}").tm(self.map).to_edge(UP)
example = TexMobject(r"\lim_{x\to\infty}",r"{{x+\sin x\cos x}",r"\over {\e^{\sin x}\left(",r"x+\sin x\cos x",r"\right)}}","=",
r"\lim_{x\to\infty}{1\over \e^{\sin x}}",r"~\text{不存在.}").next_to(remark, DOWN)
lhospital = VGroup(TexMobject(r"{{\left(",r"x+\sin x\cos x",r"\right)'}",r"\over {\left(\e^{\sin x}\left(",r"x+\sin x\cos x",r"\right)\right)'}}"),
TexMobject("="),
TexMobject(r"{{2\cos^2 x} \over {\e^{\sin x}",r"\cos x",r" (2\cos x+x+\sin x\cos x)}}"),
).scale(.95).arrange().next_to(example, DOWN, buff=MED_LARGE_BUFF)
# lhospital[2].next_to(lhospital[0], DOWN).align_to(lhospital[0], LEFT)
# lhospital[1].next_to(lhospital[2], LEFT)
lhospital2 = TexMobject("=",r"{{2\cos x} \over {\e^{\sin x}",r" (2\cos x+x+\sin x\cos x)}}").next_to(lhospital, DOWN).align_to(lhospital[1], LEFT)
lhospital3 = TexMobject(r"\to 0").next_to(lhospital2, DOWN, buff=MED_LARGE_BUFF, aligned_edge=LEFT)
for i in [1,3]:
example[i].set_color(YELLOW)
lhospital[0][1].set_color(YELLOW)
lhospital[0][4].set_color(YELLOW)
lhospital[2][-2].set_color(RED)
self.play(Write(remark))
self.wait()
self.play(Write(example[:5]))
self.wait()
self.play(Write(example[5:]))
self.wait()
self.play(Write(lhospital[0]))
self.wait()
self.play(Write(lhospital[1:]))
self.wait()
self.play(Write(lhospital2))
self.wait()
self.play(Write(lhospital3))
self.wait(4)
| 50.269103
| 173
| 0.534697
|
25af5e567e8e8ec6d0de43786142a9a1fa774cf4
| 585
|
py
|
Python
|
whatsapp_tracker/mains/whatsapp_tracker_main.py
|
itay-bardugo/whatsapp_tracker
|
c53a309b08bf47597c8191ec0a155a1fe1536842
|
[
"MIT"
] | 1
|
2021-09-25T12:22:35.000Z
|
2021-09-25T12:22:35.000Z
|
whatsapp_tracker/mains/whatsapp_tracker_main.py
|
itay-bardugo/whatsapp_tracker
|
c53a309b08bf47597c8191ec0a155a1fe1536842
|
[
"MIT"
] | null | null | null |
whatsapp_tracker/mains/whatsapp_tracker_main.py
|
itay-bardugo/whatsapp_tracker
|
c53a309b08bf47597c8191ec0a155a1fe1536842
|
[
"MIT"
] | null | null | null |
import logging
import sys
from whatsapp_tracker.pipelines.wt_main_pipeline import WTMainPipeline
class WhatsappTrackerMain:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.wt_pipeline = WTMainPipeline(**self.kwargs)
def main(self):
try:
self.wt_pipeline.setup_selenium()
self.wt_pipeline.login()
self.wt_pipeline.track()
self.wt_pipeline.shutdown_selenium()
except Exception as e:
logging.debug(e)
self.wt_pipeline.shutdown_selenium()
sys.exit(0)
| 24.375
| 70
| 0.637607
|
2aa4b5d699ce79e3b87e40103f4d6baae703bcdd
| 1,045
|
py
|
Python
|
common/xrd-ui-tests-python/tests/xroad_cs_ocsp_responder/XroadViewOcspResponder.py
|
ria-ee/XTM
|
6103f3f5bbba387b8b59b050c0c4f1fb2180fc37
|
[
"MIT"
] | 3
|
2018-03-15T14:22:50.000Z
|
2021-11-08T10:30:35.000Z
|
common/xrd-ui-tests-python/tests/xroad_cs_ocsp_responder/XroadViewOcspResponder.py
|
ria-ee/XTM
|
6103f3f5bbba387b8b59b050c0c4f1fb2180fc37
|
[
"MIT"
] | 11
|
2017-04-06T09:25:41.000Z
|
2018-06-04T09:08:48.000Z
|
common/xrd-ui-tests-python/tests/xroad_cs_ocsp_responder/XroadViewOcspResponder.py
|
ria-ee/XTM
|
6103f3f5bbba387b8b59b050c0c4f1fb2180fc37
|
[
"MIT"
] | 20
|
2017-03-14T07:21:58.000Z
|
2019-05-21T09:26:30.000Z
|
import unittest
from main.maincontroller import MainController
import ocsp_responder
class XroadViewOcspResponder(unittest.TestCase):
"""
TRUST_05 View the OCSP Responders of a CA
RIA URL: https://jira.ria.ee/browse/XTKB-192
Depends on finishing other test(s): Add OCSP
Requires helper scenarios:
X-Road version: 6.16.0
"""
def test_xroad_delete_ocsp_responder(self):
main = MainController(self)
cs_host = main.config.get('cs.host')
cs_user = main.config.get('cs.user')
cs_pass = main.config.get('cs.pass')
ca_name = main.config.get('ca.name')
ocsp_url = main.config.get('ca.ocs_host')
test_view_ocsp_responder = ocsp_responder.test_view_ocsp_responder(main, ca_name=ca_name,
ocsp_url=ocsp_url)
try:
main.reload_webdriver(url=cs_host, username=cs_user, password=cs_pass)
test_view_ocsp_responder()
finally:
main.tearDown()
| 31.666667
| 97
| 0.629665
|
0559c58ddca05f8d414b7223ac0c886b1070dceb
| 1,981
|
py
|
Python
|
code/create_zanzibar_boundary_map.py
|
hishivshah/WorldPop
|
1c0546bfc7328072e6b498732654525e02375f2b
|
[
"MIT"
] | null | null | null |
code/create_zanzibar_boundary_map.py
|
hishivshah/WorldPop
|
1c0546bfc7328072e6b498732654525e02375f2b
|
[
"MIT"
] | null | null | null |
code/create_zanzibar_boundary_map.py
|
hishivshah/WorldPop
|
1c0546bfc7328072e6b498732654525e02375f2b
|
[
"MIT"
] | null | null | null |
import logging
import mapnik
import xml.etree.ElementTree as ET
import os
import subprocess
import tempfile
# Set up logging
logging.basicConfig(format="%(asctime)s|%(levelname)s|%(message)s", level=logging.INFO)
# Parameters
shpPath = "C:/Projects/BirthsAndPregnanciesMapping/data/2014-04-24/Zanzibar/Zanzibar.shp"
epsDir = "C:/Projects/BirthsAndPregnanciesMapping/results/eps"
max_img_size = 1000 # Max width or height of output image
# Create style
stroke = mapnik.Stroke()
stroke.color = mapnik.Color(0,0,0)
stroke.width = 1.0
symbolizer = mapnik.LineSymbolizer(stroke)
rule = mapnik.Rule()
rule.symbols.append(symbolizer)
style = mapnik.Style()
style.rules.append(rule)
# Create Datasource
datasource = mapnik.Shapefile(file=shpPath)
# Create layer
layer = mapnik.Layer("boundaries")
layer.datasource = datasource
layer.styles.append("boundariesStyle")
# Calculate image output size
envelope = datasource.envelope()
dLong = envelope.maxx - envelope.minx
dLat = envelope.maxy - envelope.miny
aspectRatio = dLong / dLat
if dLong > dLat:
width = max_img_size
height = int(width / aspectRatio)
elif dLat > dLong:
height = max_img_size
width = int(aspectRatio * height)
else:
width = max_img_size
height = max_img_size
# Create map
map = mapnik.Map(width, height)
map.append_style("boundariesStyle", style)
map.layers.append(layer)
map.zoom_all()
# Output to temporary postscript file
outPsPath = os.path.join(tempfile.gettempdir(), "ZanzibarAdminBoundaries.ps")
mapnik.render_to_file(map, outPsPath)
# Convert postscript to EPS file using ghostscript
outEpsPath = os.path.join(epsDir, "ZanzibarAdminBoundaries.eps")
subprocess.call(["C:/Program Files/gs/gs9.14/bin/gswin64c",
"-dDEVICEWIDTHPOINTS=%s" % width,
"-dDEVICEHEIGHTPOINTS=%s" % height,
"-sDEVICE=eps2write",
"-o",
outEpsPath,
outPsPath])
# Delete temporary file
os.remove(outPsPath)
| 27.513889
| 89
| 0.724886
|
c8154e03aa0d02a0613c9c2d84268e8f18658e9a
| 289
|
py
|
Python
|
course/2of5/exercise/exercise_8_5.py
|
GloryPassarello/Python
|
e08df3f0ac1f6376ea08740fa7d674d68c69f448
|
[
"CNRI-Python",
"AAL"
] | null | null | null |
course/2of5/exercise/exercise_8_5.py
|
GloryPassarello/Python
|
e08df3f0ac1f6376ea08740fa7d674d68c69f448
|
[
"CNRI-Python",
"AAL"
] | null | null | null |
course/2of5/exercise/exercise_8_5.py
|
GloryPassarello/Python
|
e08df3f0ac1f6376ea08740fa7d674d68c69f448
|
[
"CNRI-Python",
"AAL"
] | null | null | null |
fname = raw_input("Enter file name: ")
if len(fname) < 1 : fname = "mbox-short.txt"
fh = open(fname)
count = 0
for line in fh:
if line.startswith('From '):
txt = line.split()
print txt[1]
count = count + 1
print "There were", count, "lines in the file with From as the first word"
| 24.083333
| 74
| 0.657439
|
5b33f41e25e4541228fef291b4bdc07fbce2e078
| 4,683
|
py
|
Python
|
src/dispersion.py
|
bbc/dvbcss-synctiming
|
f9a1848a6406e866ff493506446036dadd3d6f8c
|
[
"Apache-2.0"
] | 17
|
2015-03-15T17:49:37.000Z
|
2021-05-07T13:05:26.000Z
|
src/dispersion.py
|
bbc/dvbcss-synctiming
|
f9a1848a6406e866ff493506446036dadd3d6f8c
|
[
"Apache-2.0"
] | 8
|
2015-03-20T11:11:19.000Z
|
2018-08-01T16:49:08.000Z
|
src/dispersion.py
|
BBC/dvbcss-synctiming
|
f9a1848a6406e866ff493506446036dadd3d6f8c
|
[
"Apache-2.0"
] | 7
|
2015-07-17T10:42:08.000Z
|
2019-10-11T08:44:55.000Z
|
#!/usr/bin/env python
#
# Copyright 2015 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DispersionRecorder(object):
def __init__(self, dispersionAlgorithm):
"""\
:param dispersionAlgorithm: The algorithm object to obtain dispersions from.
The algorithm object must have an onClockAdjusted method that can be overriden or replaced
with the same arguments as the one defined for :class:`~dvbcss.protocol.client.wc.algorithm.LowestDispersionCandidate`.
Works by replacing the onClockAdjusted method in the algorithm object,
so beware if using other code that tries to do the same.
Usage:
.. code-block:: python
algorithm = LowestDispersionAlgorithm(...)
... create a wall clock client that uses this algorithm object ...
recorder = DispersionRecorder(algorithm)
...
recorder.start()
...
recorder.stop() # not necessary, but will stop memory being filled!
t=int(raw_input("Enter a wall clock time:"))
disp = recorder.dispersionAt(t)
print "At wall clock time "+str(t)+", the dispersion was:",disp
"""
super(DispersionRecorder,self).__init__()
self.changeHistory = []
self.recording = False
self.algorithm = dispersionAlgorithm
# plug into the algorithm object to receive the onClockAdjusted calls
self.original_onClockAdjusted = self.algorithm.onClockAdjusted
self.algorithm.onClockAdjusted = self._onClockAdjustedHandler
def _onClockAdjustedHandler(self, timeAfterAdjustment, adjustment, oldDispersionNanos, newDispersionNanos, dispersionGrowthRate):
if self.recording:
entry = timeAfterAdjustment, adjustment, oldDispersionNanos, newDispersionNanos, dispersionGrowthRate
self.changeHistory.append(entry)
self.original_onClockAdjusted(timeAfterAdjustment, adjustment, oldDispersionNanos, newDispersionNanos, dispersionGrowthRate)
def clear(self):
"""\
Clear the recorded history.
"""
self.changeHistory = []
def start(self):
"""\
Start recording changes in dispersion.
If already recording, then this method call does nothing.
"""
self.recording = True
def stop(self):
"""\
Stop recording changes in dispersion.
If already not recording, then this method call does nothing.
"""
self.recording = False
def dispersionAt(self, wcTime):
"""\
Calculate the dispersion at a given wall clock time, using the recorded history.
:param wcTime: time of the wall clock
:returns: dispersion (in nanoseconds) when the wall clock had the time specified
"""
changeInfo = None
for ci in self.changeHistory:
when = ci[0]
if when <= wcTime:
changeInfo = ci
else:
pass # don't abort immediately but instead
# keep looking through because, due to clock adjustment we
# might get a later recorded history entry that covers the
# same range of wall clock values (because the clock could jump
# backwards when adjusted)
if changeInfo is None:
raise ValueError("History did not contain any entries early enough to give dispersion at time "+str(wcTime))
# unpack
when, adjustment, oldDispersionNanos, newDispersionNanos, dispersionGrowthRate = changeInfo
# 'when' is before 'wcTime'
# so we extrapolate the newDispersion
timeDiff = wcTime - when
dispersion = newDispersionNanos + dispersionGrowthRate * timeDiff
return dispersion
| 35.748092
| 133
| 0.61435
|
ec0f6df5a10c30a8fe08a0e9533172f47a2e0aef
| 1,688
|
py
|
Python
|
var/spack/repos/builtin/packages/opari2/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2020-10-15T01:08:42.000Z
|
2021-10-18T01:28:18.000Z
|
var/spack/repos/builtin/packages/opari2/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2019-07-30T10:12:28.000Z
|
2019-12-17T09:02:27.000Z
|
var/spack/repos/builtin/packages/opari2/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5
|
2019-07-30T09:42:14.000Z
|
2021-01-25T05:39:20.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Opari2(AutotoolsPackage):
"""OPARI2 is a source-to-source instrumentation tool for OpenMP and hybrid
codes. It surrounds OpenMP directives and runtime library calls with calls
to the POMP2 measurement interface. OPARI2 will provide you with a new
initialization method that allows for multi-directory and parallel builds
as well as the usage of pre-instrumented libraries. Furthermore, an
efficient way of tracking parent-child relationships was added.
Additionally, we extended OPARI2 to support instrumentation of OpenMP 3.0
tied tasks.
"""
homepage = "http://www.vi-hps.org/projects/score-p"
url = "https://www.vi-hps.org/cms/upload/packages/opari2/opari2-2.0.4.tar.gz"
version('2.0.5', sha256='9034dd7596ac2176401090fd5ced45d0ab9a9404444ff767f093ccce68114ef5')
version('2.0.4', sha256='f69e324792f66780b473daf2b3c81f58ee8188adc72b6fe0dacf43d4c1a0a131')
version('2.0.3', sha256='7e2efcfbc99152ee6e31454ef4fb747f77165691539d5d2c1df2abc4612de86c')
version('2.0.1', sha256='f49d74d7533f428a4701cd561eba8a69f60615332e81b66f01ef1c9b7ee54666')
version('2.0', sha256='0c4e575be05627cd001d692204f10caef37b2f3d1ec825f98cbe1bfa4232b0b7')
version('1.1.4', sha256='b80c04fe876faaa4ee9a0654486ecbeba516b27fc14a90d20c6384e81060cffe')
version('1.1.2', sha256='8405c2903730d94c828724b3a5f8889653553fb8567045a6c54ac0816237835d')
def configure_args(self):
return ["--enable-shared"]
| 51.151515
| 95
| 0.777251
|
70bf5d5135bfa9ea0752360374ef61b675b581a3
| 21,619
|
py
|
Python
|
tests/integration/shell/test_call.py
|
ContextLogic/salt
|
f98839c72df2294cdd1670835d10904b12089622
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/shell/test_call.py
|
ContextLogic/salt
|
f98839c72df2294cdd1670835d10904b12089622
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/shell/test_call.py
|
ContextLogic/salt
|
f98839c72df2294cdd1670835d10904b12089622
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
tests.integration.shell.call
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import
import os
import sys
import re
import shutil
from datetime import datetime
import logging
# Import Salt Testing libs
from tests.support.case import ShellCase
from tests.support.unit import skipIf
from tests.support.paths import FILES, TMP
from tests.support.mixins import ShellCaseCommonTestsMixin
from tests.support.helpers import (
destructiveTest,
flaky,
skip_if_not_root,
)
from tests.integration.utils import testprogram
from tests.integration.states.test_pkg import _PKG_TARGETS
# Import salt libs
import salt.utils.files
import salt.utils.json
import salt.utils.platform
import salt.utils.yaml
from salt.ext import six
log = logging.getLogger(__name__)
class CallTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMixin):
_call_binary_ = 'salt-call'
def test_default_output(self):
out = self.run_call('-l quiet test.fib 3')
expect = ['local:',
' - 2']
self.assertEqual(expect, out[:-1])
def test_text_output(self):
out = self.run_call('-l quiet --out txt test.fib 3')
expect = [
'local: (2'
]
self.assertEqual(''.join(expect), ''.join(out).rsplit(",", 1)[0])
def test_json_out_indent(self):
out = self.run_call('test.ping -l quiet --out=json --out-indent=-1')
self.assertIn('"local": true', ''.join(out))
out = self.run_call('test.ping -l quiet --out=json --out-indent=0')
self.assertIn('"local": true', ''.join(out))
out = self.run_call('test.ping -l quiet --out=json --out-indent=1')
self.assertIn('"local": true', ''.join(out))
def test_local_sls_call(self):
fileroot = os.path.join(FILES, 'file', 'base')
out = self.run_call('--file-root {0} --local state.sls saltcalllocal'.format(fileroot))
self.assertIn('Name: test.echo', ''.join(out))
self.assertIn('Result: True', ''.join(out))
self.assertIn('hello', ''.join(out))
self.assertIn('Succeeded: 1', ''.join(out))
@destructiveTest
@skip_if_not_root
@skipIf(salt.utils.platform.is_windows(), 'This test does not apply on Windows')
def test_local_pkg_install(self):
'''
Test to ensure correct output when installing package
This also tests to make sure that salt call does not execute the
function twice, see https://github.com/saltstack/salt/pull/49552
'''
def _run_call(cmd):
cmd = '--out=json --local ' + cmd
return salt.utils.json.loads(''.join(self.run_call(cmd)))['local']
os_family = _run_call('grains.get os_family')
if os_family == 'RedHat':
# This test errors in odd ways on some distros (namely Fedora, CentOS).
# There is a bug somewhere either in the test suite or Python versions
# that causes a SyntaxError. This test was skipped entirely long ago,
# likely due to this same issue. For now, let's skip the test for these
# distros and let the other OSes catch regressions here.
# The actual commands work fine, it's the test suite that has problems.
# See https://github.com/saltstack/salt-jenkins/issues/1122 and also see
# https://github.com/saltstack/salt/pull/49552 for more info.
self.skipTest('Test throws SyntaxErrors due to deep bug. Skipping until '
'issue can be resolved.')
try:
target = _PKG_TARGETS.get(os_family, [])[0]
except IndexError:
self.skipTest(
'No package targets for os_family {0}'.format(os_family))
cur_pkgs = _run_call('pkg.list_pkgs')
if target in cur_pkgs:
self.fail('Target package \'{0}\' already installed'.format(target))
out = ''.join(self.run_call('--local pkg.install {0}'.format(target)))
self.assertIn('local: ----------', out)
self.assertIn('{0}: ----------'.format(target), out)
self.assertIn('new:', out)
self.assertIn('old:', out)
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
@flaky
def test_user_delete_kw_output(self):
ret = self.run_call('-l quiet -d user.delete')
assert 'salt \'*\' user.delete name remove=True force=True' in ''.join(ret)
def test_salt_documentation_too_many_arguments(self):
'''
Test to see if passing additional arguments shows an error
'''
data = self.run_call('-d virtualenv.create /tmp/ve', catch_stderr=True)
self.assertIn('You can only get documentation for one method at one time', '\n'.join(data[1]))
def test_issue_6973_state_highstate_exit_code(self):
'''
If there is no tops/master_tops or state file matches
for this minion, salt-call should exit non-zero if invoked with
option --retcode-passthrough
'''
src = os.path.join(FILES, 'file/base/top.sls')
dst = os.path.join(FILES, 'file/base/top.sls.bak')
shutil.move(src, dst)
expected_comment = 'No states found for this minion'
try:
stdout, retcode = self.run_call(
'-l quiet --retcode-passthrough state.highstate',
with_retcode=True
)
finally:
shutil.move(dst, src)
self.assertIn(expected_comment, ''.join(stdout))
self.assertNotEqual(0, retcode)
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
@skipIf(True, 'to be re-enabled when #23623 is merged')
def test_return(self):
self.run_call('cmd.run "echo returnTOmaster"')
jobs = [a for a in self.run_run('jobs.list_jobs')]
self.assertTrue(True in ['returnTOmaster' in j for j in jobs])
# lookback jid
first_match = [(i, j)
for i, j in enumerate(jobs)
if 'returnTOmaster' in j][0]
jid, idx = None, first_match[0]
while idx > 0:
jid = re.match("([0-9]+):", jobs[idx])
if jid:
jid = jid.group(1)
break
idx -= 1
assert idx > 0
assert jid
master_out = [
a for a in self.run_run('jobs.lookup_jid {0}'.format(jid))
]
self.assertTrue(True in ['returnTOmaster' in a for a in master_out])
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
@flaky
def test_issue_2731_masterless(self):
root_dir = os.path.join(TMP, 'issue-2731')
config_dir = os.path.join(root_dir, 'conf')
minion_config_file = os.path.join(config_dir, 'minion')
logfile = os.path.join(root_dir, 'minion_test_issue_2731')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
with salt.utils.files.fopen(self.get_config_file_path('master')) as fhr:
master_config = salt.utils.yaml.safe_load(fhr)
master_root_dir = master_config['root_dir']
this_minion_key = os.path.join(
master_root_dir, 'pki', 'master', 'minions', 'minion_test_issue_2731'
)
minion_config = {
'id': 'minion_test_issue_2731',
'master': 'localhost',
'master_port': 64506,
'root_dir': master_root_dir,
'pki_dir': 'pki',
'cachedir': 'cachedir',
'sock_dir': 'minion_sock',
'open_mode': True,
'log_file': logfile,
'log_level': 'quiet',
'log_level_logfile': 'info',
'transport': self.master_opts['transport'],
}
try:
# Remove existing logfile
if os.path.isfile(logfile):
os.unlink(logfile)
start = datetime.now()
# Let's first test with a master running
with salt.utils.files.fopen(minion_config_file, 'w') as fh_:
salt.utils.yaml.safe_dump(minion_config, fh_, default_flow_style=False)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
)
)
try:
self.assertIn('local:', ret)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Calculate the required timeout, since next will fail.
# I needed this because after many attempts, I was unable to catch:
# WARNING: Master hostname: salt not found. Retrying in 30 seconds
ellapsed = datetime.now() - start
timeout = ellapsed.seconds + 3
# Now let's remove the master configuration
minion_config.pop('master')
minion_config.pop('master_port')
with salt.utils.files.fopen(minion_config_file, 'w') as fh_:
salt.utils.yaml.safe_dump(minion_config, fh_, default_flow_style=False)
_, timed_out = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=timeout,
catch_timeout=True,
)
try:
self.assertTrue(timed_out)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Should work with --local
ret = self.run_script(
'salt-call',
'--config-dir {0} --local cmd.run "echo foo"'.format(
config_dir
),
timeout=60
)
try:
self.assertIn('local:', ret)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Should work with local file client
minion_config['file_client'] = 'local'
with salt.utils.files.fopen(minion_config_file, 'w') as fh_:
salt.utils.yaml.safe_dump(minion_config, fh_, default_flow_style=False)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=60
)
self.assertIn('local:', ret)
finally:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
def test_issue_7754(self):
old_cwd = os.getcwd()
config_dir = os.path.join(TMP, 'issue-7754')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
os.chdir(config_dir)
with salt.utils.files.fopen(self.get_config_file_path('minion'), 'r') as fh_:
minion_config = salt.utils.yaml.safe_load(fh_)
minion_config['log_file'] = 'file:///dev/log/LOG_LOCAL3'
with salt.utils.files.fopen(os.path.join(config_dir, 'minion'), 'w') as fh_:
salt.utils.yaml.safe_dump(minion_config, fh_, default_flow_style=False)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=60,
catch_stderr=True,
with_retcode=True
)
try:
self.assertIn('local:', ret[0])
self.assertFalse(os.path.isdir(os.path.join(config_dir, 'file:')))
except AssertionError:
# We now fail when we're unable to properly set the syslog logger
self.assertIn(
'Failed to setup the Syslog logging handler', '\n'.join(ret[1])
)
self.assertEqual(ret[2], 2)
finally:
self.chdir(old_cwd)
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
def test_syslog_file_not_found(self):
'''
test when log_file is set to a syslog file that does not exist
'''
old_cwd = os.getcwd()
config_dir = os.path.join(TMP, 'log_file_incorrect')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
os.chdir(config_dir)
with salt.utils.files.fopen(self.get_config_file_path('minion'), 'r') as fh_:
minion_config = salt.utils.yaml.load(fh_.read())
minion_config['log_file'] = 'file:///dev/doesnotexist'
with salt.utils.files.fopen(os.path.join(config_dir, 'minion'), 'w') as fh_:
fh_.write(
salt.utils.yaml.dump(minion_config, default_flow_style=False)
)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=60,
catch_stderr=True,
with_retcode=True
)
try:
if sys.version_info >= (3, 5, 4):
self.assertIn('local:', ret[0])
self.assertIn('[WARNING ] The log_file does not exist. Logging not setup correctly or syslog service not started.', ret[1])
self.assertEqual(ret[2], 0)
else:
self.assertIn(
'Failed to setup the Syslog logging handler', '\n'.join(ret[1])
)
self.assertEqual(ret[2], 2)
finally:
self.chdir(old_cwd)
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
@skipIf(True, 'This test is unreliable. Need to investigate why more deeply.')
@flaky
def test_issue_15074_output_file_append(self):
output_file_append = os.path.join(TMP, 'issue-15074')
try:
# Let's create an initial output file with some data
_ = self.run_script(
'salt-call',
'-c {0} --output-file={1} test.versions'.format(
self.config_dir,
output_file_append
),
catch_stderr=True,
with_retcode=True
)
with salt.utils.files.fopen(output_file_append) as ofa:
output = ofa.read()
self.run_script(
'salt-call',
'-c {0} --output-file={1} --output-file-append test.versions'.format(
self.config_dir,
output_file_append
),
catch_stderr=True,
with_retcode=True
)
with salt.utils.files.fopen(output_file_append) as ofa:
self.assertEqual(ofa.read(), output + output)
finally:
if os.path.exists(output_file_append):
os.unlink(output_file_append)
@skipIf(True, 'This test is unreliable. Need to investigate why more deeply.')
@flaky
def test_issue_14979_output_file_permissions(self):
output_file = os.path.join(TMP, 'issue-14979')
with salt.utils.files.set_umask(0o077):
try:
# Let's create an initial output file with some data
self.run_script(
'salt-call',
'-c {0} --output-file={1} -l trace -g'.format(
self.config_dir,
output_file
),
catch_stderr=True,
with_retcode=True
)
try:
stat1 = os.stat(output_file)
except OSError:
self.fail('Failed to generate output file, see log for details')
# Let's change umask
os.umask(0o777) # pylint: disable=blacklisted-function
self.run_script(
'salt-call',
'-c {0} --output-file={1} --output-file-append -g'.format(
self.config_dir,
output_file
),
catch_stderr=True,
with_retcode=True
)
try:
stat2 = os.stat(output_file)
except OSError:
self.fail('Failed to generate output file, see log for details')
self.assertEqual(stat1.st_mode, stat2.st_mode)
# Data was appeneded to file
self.assertTrue(stat1.st_size < stat2.st_size)
# Let's remove the output file
os.unlink(output_file)
# Not appending data
self.run_script(
'salt-call',
'-c {0} --output-file={1} -g'.format(
self.config_dir,
output_file
),
catch_stderr=True,
with_retcode=True
)
try:
stat3 = os.stat(output_file)
except OSError:
self.fail('Failed to generate output file, see log for details')
# Mode must have changed since we're creating a new log file
self.assertNotEqual(stat1.st_mode, stat3.st_mode)
finally:
if os.path.exists(output_file):
os.unlink(output_file)
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
def test_42116_cli_pillar_override(self):
ret = self.run_call(
'state.apply issue-42116-cli-pillar-override '
'pillar=\'{"myhost": "localhost"}\''
)
for line in ret:
line = line.lstrip()
if line == 'Comment: Command "ping -c 2 localhost" run':
# Successful test
break
else:
log.debug('salt-call output:\n\n%s', '\n'.join(ret))
self.fail('CLI pillar override not found in pillar data')
def test_pillar_items_masterless(self):
'''
Test to ensure we get expected output
from pillar.items with salt-call
'''
get_items = self.run_call('pillar.items', local=True)
exp_out = [' - Lancelot', ' - Galahad', ' - Bedevere',
' monty:', ' python']
for out in exp_out:
self.assertIn(out, get_items)
def tearDown(self):
'''
Teardown method to remove installed packages
'''
user = ''
user_info = self.run_call('--local grains.get username')
if user_info and isinstance(user_info, (list, tuple)) and isinstance(user_info[-1], six.string_types):
user = user_info[-1].strip()
super(CallTest, self).tearDown()
# pylint: disable=invalid-name
def test_exit_status_unknown_argument(self):
'''
Ensure correct exit status when an unknown argument is passed to salt-call.
'''
call = testprogram.TestProgramSaltCall(
name='unknown_argument',
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
call.setup()
stdout, stderr, status = call.run(
args=['--unknown-argument'],
catch_stderr=True,
with_retcode=True,
)
self.assert_exit_status(
status, 'EX_USAGE',
message='unknown argument',
stdout=stdout, stderr=stderr
)
def test_masterless_highstate(self):
'''
test state.highstate in masterless mode
'''
ret = self.run_call('state.highstate', local=True)
destpath = os.path.join(TMP, 'testfile')
exp_out = [' Function: file.managed', ' Result: True',
' ID: {0}'.format(destpath)]
for out in exp_out:
self.assertIn(out, ret)
self.assertTrue(os.path.exists(destpath))
def test_exit_status_correct_usage(self):
'''
Ensure correct exit status when salt-call starts correctly.
'''
call = testprogram.TestProgramSaltCall(
name='correct_usage',
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
call.setup()
stdout, stderr, status = call.run(
args=['--local', 'test.true'],
catch_stderr=True,
with_retcode=True,
)
self.assert_exit_status(
status, 'EX_OK',
message='correct usage',
stdout=stdout, stderr=stderr
)
| 37.209983
| 139
| 0.548915
|
a9eaa9475ba10a1c650bca4923933e3eff00a68a
| 912
|
py
|
Python
|
isi_sdk_8_2_2/test/test_ndmp_settings_dmas.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_2/test/test_ndmp_settings_dmas.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_2/test/test_ndmp_settings_dmas.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_2
from isi_sdk_8_2_2.models.ndmp_settings_dmas import NdmpSettingsDmas # noqa: E501
from isi_sdk_8_2_2.rest import ApiException
class TestNdmpSettingsDmas(unittest.TestCase):
"""NdmpSettingsDmas unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNdmpSettingsDmas(self):
"""Test NdmpSettingsDmas"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_2.models.ndmp_settings_dmas.NdmpSettingsDmas() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.243902
| 90
| 0.708333
|
645ecd2ccaa20cd884d58417b03f4c6851420cdb
| 11,305
|
py
|
Python
|
tests/mock_decorators/test_function_mock.py
|
fhuertas/mock_decorator
|
bb8971398aa80f555089db0ca57fff5a648f5405
|
[
"Apache-2.0"
] | null | null | null |
tests/mock_decorators/test_function_mock.py
|
fhuertas/mock_decorator
|
bb8971398aa80f555089db0ca57fff5a648f5405
|
[
"Apache-2.0"
] | null | null | null |
tests/mock_decorators/test_function_mock.py
|
fhuertas/mock_decorator
|
bb8971398aa80f555089db0ca57fff5a648f5405
|
[
"Apache-2.0"
] | null | null | null |
import sys
import unittest
from mock_decorators.function_mock import FunctionMock
from mock_decorators.function_mock import FunctionMockResult
from mock_decorators.function_mock import FunctionMockChangeResult
from mock_decorators.function_mock import FunctionMockCheckCall
from tests.mock_decorators import module_test
from tests.mock_decorators.module_test import TestClass
class TestFunctionMock(unittest.TestCase):
p1 = 10
p2 = 2
def test_function_mock_correct(self):
def function_mocked(param_a, param_b):
return param_a - param_b
@FunctionMock(entity=module_test, function_name='function_sum', mocked_function=function_mocked,
check_signature=True)
def inner_test():
return module_test.function_sum(self.p1, self.p2)
result_no_mocked = module_test.function_sum(self.p1, self.p2)
result_mocked = inner_test()
expected_mocked = self.p1 - self.p2
expected_no_mocked = self.p1 + self.p2
self.assertEqual(result_mocked, expected_mocked, "The FunctionMock has failed")
self.assertEqual(result_no_mocked, expected_no_mocked, "The FunctionMock has failed")
def test_function_mock_correct_a_class(self):
result_of_the_function_mock = "mock mock mock"
def function_mocked(self, *args, **kwargs):
return result_of_the_function_mock
test_class = TestClass()
@FunctionMock(entity=TestClass, function_name='function_echo', mocked_function=function_mocked,
check_signature=True)
def inner_test():
return test_class.function_echo("No echo")
result_mocked = inner_test()
self.assertEqual(result_mocked, result_of_the_function_mock, "The FunctionMock has failed")
self.assertNotEqual(test_class.function_echo("No echo"),
result_of_the_function_mock, "The FunctionMock has failed")
def test_function_mock_bad_signature(self):
def function_mocked(param, param_b):
return param - param_b
@FunctionMock(entity=module_test, function_name='function_sum', mocked_function=function_mocked,
check_signature=True)
def inner_test():
module_test.function_sum(self.p1, self.p2)
if sys.version_info < (3, 4):
self.assertRaisesRegexp(TypeError, "signature", inner_test)
else:
self.assertRaisesRegex(TypeError, "signature", inner_test)
def test_function_mock_not_exists(self):
def function_mocked(param, param_b):
return param - param_b
@FunctionMock(entity=module_test, function_name='function_suma', mocked_function=function_mocked,
check_signature=True)
def inner_test():
module_test.function_suma(self.p1, self.p2)
if sys.version_info < (3, 4):
self.assertRaises(TypeError, inner_test)
else:
self.assertRaisesRegex(TypeError, "unsupported callable", inner_test)
def test_function_mock_bad_signature_no_checked(self):
def function_mocked(param, param_b):
return param - param_b
@FunctionMock(entity=module_test, function_name='function_sum', mocked_function=function_mocked,
check_signature=False)
def inner_test():
return module_test.function_sum(self.p1, self.p2)
result_no_mocked = module_test.function_sum(self.p1, self.p2)
result_mocked = inner_test()
expected_mocked = self.p1 - self.p2
expected_no_mocked = self.p1 + self.p2
self.assertEqual(result_mocked, expected_mocked, "The FunctionMock has failed")
self.assertEqual(result_no_mocked, expected_no_mocked, "The FunctionMock has failed")
def test_function_mock_no_exists(self):
def function_mocked(param, param_b):
return param - param_b
@FunctionMock(entity=module_test, function_name='function_summ', mocked_function=function_mocked,
check_signature=True)
def inner_test():
pass
if sys.version_info < (3, 4):
self.assertRaises(TypeError, inner_test)
else:
self.assertRaisesRegex(TypeError, 'unsupported callable', inner_test)
class TestFunctionMockResult(unittest.TestCase):
def test_function_mock_result_correct(self):
result_returned = -21231
@FunctionMockResult(module_test, 'function_sum', result_returned, True)
def inner_test():
return module_test.function_sum(1, 1)
result_value_mocked = inner_test()
self.assertTrue(result_value_mocked, result_returned)
def test_function_mock_result_correct_no_exist(self):
result_returned = -21231
@FunctionMockResult(module_test, 'function_suma', result_returned, True)
def inner_test():
return module_test.function_sum(1, 1)
if sys.version_info < (3, 3):
self.assertRaisesRegexp(TypeError, "the function don't exist", inner_test)
else:
self.assertRaisesRegex(TypeError, "the function don't exist", inner_test)
def test_function_mock_result_correct_no_exist_no_checked(self):
result_returned = -21231
@FunctionMockResult(module_test, 'function_suma', result_returned, False)
def inner_test():
return module_test.function_sum(1, 1)
result_value_mocked = inner_test()
self.assertTrue(result_value_mocked, result_returned)
def test_function_mock_no_function(self):
result_returned = -21231
invalid_function_name = 'invalid_function'
def inner_test():
@FunctionMockResult(module_test, invalid_function_name, result_returned)
def call_test():
module_test.function_sum(1, 1)
call_test()
if sys.version_info < (3, 0):
self.assertRaisesRegexp(AttributeError, invalid_function_name, inner_test)
else:
self.assertRaisesRegex(AttributeError, invalid_function_name, inner_test)
class TestFunctionMockChangeResult(unittest.TestCase):
first_parameter = 500
second_parameter = 10
def test_function_mock_change_result_correct(self):
def function_change(value):
return value + 2
@FunctionMockChangeResult(module_test, 'function_sum', function_change)
def inner_test():
return module_test.function_sum(self.first_parameter, self.second_parameter)
result_no_mocked = module_test.function_sum(self.first_parameter, self.second_parameter)
result_mocked = inner_test()
expected_mocked = self.first_parameter + self.second_parameter + 2
expected_no_mocked = self.first_parameter + self.second_parameter
self.assertEqual(result_mocked, expected_mocked, "The FunctionMock has failed")
self.assertEqual(result_no_mocked, expected_no_mocked, "The FunctionMock has failed")
def test_function_mock_change_result_incorrect_function(self):
def function_change():
return 2
@FunctionMockChangeResult(module_test, 'function_sum', function_change)
def inner_test():
if sys.version_info < (3, 0):
self.assertRaisesRegexp(TypeError, 'takes no arguments',
module_test.function_sum, self.first_parameter, self.second_parameter)
else:
self.assertRaisesRegex(TypeError, 'positional arguments but 1 was given',
module_test.function_sum, self.first_parameter, self.second_parameter)
inner_test()
def test_function_mock_change_result_no_function(self):
def function_change(value):
return value + 2
invalid_function_name = 'invalid_function'
def inner_test():
@FunctionMockChangeResult(module_test, invalid_function_name, function_change)
def call_test():
module_test.function_sum(1, 1)
call_test()
if sys.version_info < (3, 0):
self.assertRaisesRegexp(AttributeError, invalid_function_name, inner_test)
else:
self.assertRaisesRegex(AttributeError, invalid_function_name, inner_test)
class TestFunctionMockCheckCall(unittest.TestCase):
def test_no_called(self):
def inner_test():
@FunctionMockCheckCall(module_test, 'function_sum')
def call_test():
pass
call_test()
self.assertRaises(ValueError, inner_test)
def test_called(self):
@FunctionMockCheckCall(module_test, 'function_sum')
def inner_test():
return module_test.function_sum(2, 2)
result = inner_test()
self.assertEqual(result, 4, "The function result has been modified")
def test_call_check_invocations_ok(self):
@FunctionMockCheckCall(module_test, 'function_sum', expected_times=3)
def inner_test():
module_test.function_sum(2, 2)
module_test.function_sum(2, 2)
return module_test.function_sum(2, 2)
result = inner_test()
module_test.function_sum(2, 2)
self.assertEqual(result, 4, "The function result has been modified")
def test_call_check_invocations_ko(self):
@FunctionMockCheckCall(module_test, 'function_sum', expected_times=2)
def inner_test():
module_test.function_sum(2, 2)
module_test.function_sum(2, 2)
return module_test.function_sum(2, 2)
self.assertRaises(ValueError, inner_test)
result = module_test.function_sum(2, 2)
self.assertEqual(result, 4, "The function result has been modified")
def test_call_change_return(self):
@FunctionMockCheckCall(module_test, 'function_sum', return_value=3)
def inner_test():
return module_test.function_sum(2, 2)
result_change = inner_test()
result_no_change = module_test.function_sum(2, 2)
self.assertEqual(result_change, 3, "The function result has been modified")
self.assertEqual(result_no_change, 4, "The function result has been modified")
def test_call_change_return_0(self):
@FunctionMockCheckCall(module_test, 'function_sum', expected_times=1, return_value=0)
def inner_test():
return module_test.function_sum(2, 2)
result_change = inner_test()
result_no_change = module_test.function_sum(2, 2)
self.assertEqual(result_change, 0, "The function result has been modified")
self.assertEqual(result_no_change, 4, "The function result has been modified")
def test_check_no_call_ok(self):
@FunctionMockCheckCall(module_test, 'function_sum', expected_times=0)
def inner_test():
return 3
result_no_change = module_test.function_sum(2, 2)
self.assertEqual(result_no_change, 4, "The function result has been modified")
def test_check_no_call_ko(self):
@FunctionMockCheckCall(module_test, 'function_sum', expected_times=0)
def inner_test():
return module_test.function_sum(2, 2)
self.assertRaises(ValueError, inner_test)
| 38.715753
| 110
| 0.681203
|
9388833b2a5fad9974140681130d7c327f102e36
| 17,770
|
py
|
Python
|
tests/test_shed_upload.py
|
shiltemann/planemo
|
6bb642c61df4af91f6c873dfc1c6c3c06d1d491a
|
[
"CC-BY-3.0"
] | null | null | null |
tests/test_shed_upload.py
|
shiltemann/planemo
|
6bb642c61df4af91f6c873dfc1c6c3c06d1d491a
|
[
"CC-BY-3.0"
] | null | null | null |
tests/test_shed_upload.py
|
shiltemann/planemo
|
6bb642c61df4af91f6c873dfc1c6c3c06d1d491a
|
[
"CC-BY-3.0"
] | null | null | null |
"""Integration tests for shed contents commands.
Specifically, tests for shed_upload, shed_download, and shed_create.
commands.
"""
import contextlib
import os
import shutil
import tarfile
from os.path import exists, join
from planemo import git
from planemo.io import shell
from .test_utils import (
assert_exists,
CliShedTestCase,
modify_environ,
TEST_REPOS_DIR,
)
class ShedUploadTestCase(CliShedTestCase):
def test_tar_single(self):
with self._isolate_repo("single_tool") as f:
upload_command = ["shed_upload", "--tar_only"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
assert_exists(join(f, "shed_upload.tar.gz"))
def test_upload_not_exists(self):
with self._isolate_repo("single_tool"):
upload_command = ["shed_upload"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command, exit_code=2)
def test_update_not_exists(self):
with self._isolate_repo("single_tool"):
upload_command = ["shed_update"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command, exit_code=2)
def test_update_not_exists_update_only(self):
with self._isolate_repo("single_tool"):
upload_command = ["shed_update", "--skip_upload"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command, exit_code=2)
def test_update_with_check_diff(self):
with self._isolate_repo("single_tool") as f:
self._shed_create()
self._assert_shed_diff(diff=0)
upload_command = [
"shed_update", "--force_repository_creation", "--check_diff"
]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
# First time no difference.
r = self._check_exit_code(upload_command)
assert "not different, skipping upload." in r.output
# Modify a file so there is a difference.
with open(join(f, "related_file"), "w") as rf:
rf.write("new_contents")
self._assert_shed_diff(diff=1)
# No assert there is no difference again.
r = self._check_exit_code(upload_command)
assert "not different, skipping upload." not in r.output
self._assert_shed_diff(diff=0)
def test_update_with_check_diff_package(self):
with self._isolate_repo("package_1") as f:
self._shed_create()
self._assert_shed_diff(diff=0)
upload_command = [
"shed_update", "--force_repository_creation", "--check_diff"
]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
# First time no difference.
r = self._check_exit_code(upload_command)
assert "not different, skipping upload." in r.output
update_package_1(f)
self._assert_shed_diff(diff=1)
# No assert there is no difference again.
r = self._check_exit_code(upload_command)
assert "not different, skipping upload." not in r.output
self._assert_shed_diff(diff=0)
def test_update_with_force_create_metadata_only(self):
with self._isolate_repo("single_tool") as f:
upload_command = ["shed_update", "--force_repository_creation", "--skip_upload"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
self._verify_empty_repository(f)
def test_update_with_force_create(self):
with self._isolate_repo("single_tool") as f:
upload_command = ["shed_update", "--force_repository_creation"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
self._verify_single_uploaded(f)
def test_tar_from_git(self):
with self._isolate() as f:
with self._git_configured():
dest = join(f, "single_tool")
self._copy_repo("single_tool", dest)
shell(" && ".join([
"cd %s" % dest,
"git init",
"git add .",
"git commit -m 'initial commit'"
]))
upload_command = [
"shed_update", "--force_repository_creation",
"git+single_tool/.git"
]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
self._verify_single_uploaded(f, ["single_tool"])
def test_upload_from_git(self):
with self._isolate() as f:
with self._git_configured():
dest = join(f, "single_tool")
self._copy_repo("single_tool", dest)
shell(" && ".join([
"cd %s" % dest,
"git init",
"git add .",
"git commit -m 'initial commit'"
]))
rev = git.rev(None, "single_tool")
upload_command = [
"shed_update", "--force_repository_creation",
"git+single_tool/.git"
]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
self._verify_single_uploaded(f, ["single_tool"])
model = self.mock_shed.model
repo_id = self.repository_by_name("single_tool")["id"]
message = model._repositories_msg[repo_id][0]
assert "planemo upload for repository " in message
assert "repository https://github.com/galaxyproject" in message
assert rev in message
@contextlib.contextmanager
def _git_configured(self):
with modify_environ({
"GIT_AUTHOR_NAME": "planemo developer",
"GIT_COMMITTER_NAME": "planemo developer",
"EMAIL": "planemo@galaxyproject.org",
"GIT_AUTHOR_EMAIL": "planemo@galaxyproject.org",
"GIT_COMMITTER_EMAIL": "planemo@galaxyproject.org",
}):
yield
def test_create_and_upload(self):
with self._isolate_repo("single_tool") as f:
create_command = ["shed_create"]
create_command.extend(self._shed_args())
self._check_exit_code(create_command)
upload_command = ["shed_upload"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
self._verify_single_uploaded(f)
def test_create_with_upload(self):
with self._isolate_repo("single_tool") as f:
create_command = ["shed_create"]
create_command.extend(self._shed_args())
self._check_exit_code(create_command)
self._verify_single_uploaded(f)
def test_cannont_recreate(self):
with self._isolate_repo("single_tool"):
create_command = ["shed_create"]
create_command.extend(self._shed_args())
self._check_exit_code(create_command)
self._check_exit_code(create_command, exit_code=1)
def test_cannot_upload_missing_include(self):
with self._isolate_repo("bad_missing_include"):
upload_command = ["shed_upload", "--tar_only"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command, exit_code=254)
def test_upload_recusrive(self):
with self._isolate_repo("multi_repos_nested") as f:
upload_command = [
"shed_update", "-r", "--force_repository_creation"
]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
self._verify_upload(f, ["cat1.xml", "macros.xml"], ["cat1"])
self._verify_upload(f, ["cat2.xml", "macros.xml"], ["cat2"])
def test_upload_filters_invalid_suite(self):
with self._isolate_repo("suite_1") as f:
# No .shed.yml, make sure to test it can infer type
# from passed in --name.
upload_command = ["shed_upload", "--tar_only",
"--name", "suite_1"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
target = self._untar(f, "shed_upload.tar.gz")
# Only one file was in archive
assert_exists(join(target, "repository_dependencies.xml"))
# this got filtered
assert not exists(join(target, "README.rst"))
def test_upload_suite_auto(self):
with self._isolate_repo("suite_auto") as f:
upload_command = ["shed_upload", "--tar_only"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
target = self._untar(f, "shed_upload.tar.gz")
# Only one file was in archive
assert_exists(join(target, "repository_dependencies.xml"))
def test_upload_filters_ignore(self):
with self._isolate_repo("single_tool_exclude") as f:
upload_command = ["shed_upload", "--force_repository_creation"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
target = self._verify_upload(f)
assert not exists(join(target, "related_file"))
def test_tar_with_symlinks(self):
with self._isolate_repo("multi_repos_nested") as f:
upload_command = ["shed_upload", "--force_repository_creation"]
upload_command.append("cat2")
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
target = self._verify_upload(f, ["macros.xml"], ["cat2"])
with open(join(target, "macros.xml"), "r") as macro_f:
macro_contents = macro_f.read()
assert macro_contents.startswith("<macros>")
def test_upload_filters_git(self):
with self._isolate_repo("single_tool") as f:
mock_git_dir = join(f, ".git")
os.makedirs(mock_git_dir)
index_path = join(mock_git_dir, "index_file")
with open(index_path, "w") as index_f:
index_f.write("test")
with open(join(f, "related_file~"), "w") as tilde_f:
tilde_f.write("backup!")
upload_command = ["shed_upload", "--force_repository_creation"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
target = self._verify_upload(f)
assert not exists(join(target, ".git"))
assert not exists(join(target, "related_file~"))
def test_upload_filters_invalid_package(self):
with self._isolate_repo("package_1") as f:
upload_command = ["shed_upload", "--tar_only"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
target = self._untar(f, "shed_upload.tar.gz")
# Only one file was in archive
assert_exists(join(target, "tool_dependencies.xml"))
# this got filtered
assert not exists(join(target, "README.rst"))
# .shed.yml always gets filtered
assert not exists(join(target, ".shed.yml"))
def test_upload_not_filters_unrestricted(self):
with self._isolate_repo("workflow_1") as f:
upload_command = ["shed_upload", "--tar_only"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
target = self._untar(f, "shed_upload.tar.gz")
# Only one file was in archive
assert_exists(join(target, "repository_dependencies.xml"))
assert_exists(join(target, "README.rst"))
def test_upload_expansion_configured(self):
with self._isolate_repo("multi_repos_flat_configured") as f:
self._verify_expansion(f)
def test_upload_expansion_flagged(self):
with self._isolate_repo("multi_repos_flat_flag") as f:
self._verify_expansion(f)
def test_upload_expansion_configured_extras(self):
with self._isolate() as f:
repo = join(f, "repo")
self._copy_repo("multi_repos_flat_configured_complex", repo)
self._copy_repo("shared_files", join(f, "shared_files"))
self._verify_expansion(f, "repo")
for tool_id in ["cat1", "cat2"]:
self._check_tar(
f, "shed_upload_cs_%s.tar.gz" % tool_id,
contains=[
"CITATION.txt",
"test-data/extra_test_file.txt",
],
)
def test_upload_expansion_suite(self):
with self._isolate_repo("multi_repos_flat_flag_suite") as f:
self._verify_expansion(f)
target = self._check_tar(
f, "shed_upload_suite_cat.tar.gz",
contains=[
"repository_dependencies.xml",
],
not_contains=["macros.xml"]
)
with open(join(target, "repository_dependencies.xml")) as f:
repo_xml = f.read()
assert 'owner="devteam" name="cat_legacy"' in repo_xml
assert 'owner="iuc" name="cs-cat2"' in repo_xml
def test_upload_with_double_dot(self):
with self._isolate() as f:
self._copy_repo("up_root/", join(f, "up_root/"))
self._copy_repo("shared_files/", join(f, "shared_files/"))
upload_command = ["shed_upload", "--tar_only"]
upload_command.extend(self._shed_args())
self._check_exit_code(upload_command)
self._check_tar(
f, "shed_upload.tar.gz",
contains=[
"up_root/README.rst",
"up_root/cat.xml",
"shared_files/extra_test_data/extra_test_file.txt",
],
not_contains=[])
def _assert_shed_diff(self, diff=1):
shed_diff_command = ["shed_diff"]
shed_diff_command.extend(self._shed_args())
self._check_exit_code(shed_diff_command, exit_code=diff)
def _verify_expansion(self, f, name=None):
upload_command = ["shed_upload", "--tar_only"]
upload_command.extend(self._shed_args())
if name is not None:
upload_command.append(join(f, name))
self._check_exit_code(upload_command)
self._check_tar(
f, "shed_upload_cs_cat1.tar.gz",
contains=[
"cat1.xml",
"macros.xml",
"test-data/1.bed"
],
not_contains=["cat2.xml"]
)
self._check_tar(
f, "shed_upload_cs_cat2.tar.gz",
contains=[
"cat2.xml",
"macros.xml",
"test-data/1.bed"
],
not_contains=["cat1.xml"]
)
def _verify_single_uploaded(self, f, download_args=[]):
self._verify_upload(
f, ["cat.xml", "related_file", "test-data/1.bed"], download_args
)
def _verify_empty_repository(self, f, download_args=[]):
target = self._download_repo(f, download_args)
assert len(os.listdir(target)) == 0
def _verify_upload(self, f, download_files=[], download_args=[]):
target = self._download_repo(f, download_args)
for download_file in download_files:
assert_exists(join(target, download_file))
return target
def _check_tar(self, f, tar_path, contains=[], not_contains=[]):
tar_path = join(f, tar_path)
assert_exists(tar_path)
target = self._untar(f, tar_path)
for path in contains:
assert_exists(join(target, path))
for path in not_contains:
assert not exists(join(target, path))
return target
def _download_repo(self, f, download_args=[]):
download_command = ["shed_download"]
download_command.extend(download_args)
download_command.extend(self._shed_args(read_only=True))
self._check_exit_code(download_command)
download = join(f, "shed_download.tar.gz")
assert_exists(download)
return self._untar(f, "shed_download.tar.gz", tarbomb=False)
def _untar(self, f, path, tarbomb=True):
target = join(f, "download")
if exists(target):
shutil.rmtree(target)
os.makedirs(target)
try:
tar = tarfile.open(path, "r:gz")
except tarfile.ReadError as e:
# Fixed in later version of Python, see
# http://bugs.python.org/issue6123
assert str(e) == "empty header", e
return target # note contained no files!
tar.extractall(path=target)
tar.close()
tar = tarfile.open(path, "r:gz")
for tar_info in tar.getmembers():
# These entries cause problems with TS.
assert tar_info.name != "."
assert tar_info.name != ""
tar.close()
if not tarbomb:
return os.path.join(target, os.listdir(target)[0])
else:
return target
def update_package_1(f):
"""Update tool dependencies file for package_1."""
changed_xml = join(
TEST_REPOS_DIR,
"package_1_changed",
"tool_dependencies.xml"
)
shutil.copyfile(changed_xml, join(f, "tool_dependencies.xml"))
| 40.022523
| 92
| 0.594429
|
9dd72774864456b781ff4e7d85717dcd1f0fc220
| 1,336
|
py
|
Python
|
MinimumBoundingBox/test.py
|
winding-lines/argoverse-tracker
|
9038e4407cfd850dba890acd9c1e3d3d5e44a9ed
|
[
"MIT"
] | 51
|
2019-10-18T03:39:15.000Z
|
2022-03-19T11:21:05.000Z
|
MinimumBoundingBox/test.py
|
winding-lines/argoverse-tracker
|
9038e4407cfd850dba890acd9c1e3d3d5e44a9ed
|
[
"MIT"
] | 1
|
2019-11-21T03:46:28.000Z
|
2019-11-21T03:56:27.000Z
|
MinimumBoundingBox/test.py
|
winding-lines/argoverse-tracker
|
9038e4407cfd850dba890acd9c1e3d3d5e44a9ed
|
[
"MIT"
] | 13
|
2019-10-18T03:40:46.000Z
|
2022-03-19T11:21:15.000Z
|
import unittest
from MinimumBoundingBox import MinimumBoundingBox, BoundingBox
from math import pi
class TestMinimumBoundingBox(unittest.TestCase):
def test_MinimumBoundingBox(self):
bb = MinimumBoundingBox(((0, 0), (3, 0), (1, 1)))
self.assertAlmostEqual(bb.area, 3)
self.assertEqual(bb.length_parallel, 3)
self.assertEqual(bb.length_orthogonal, 1)
self.assertEqual(bb.rectangle_center, (1.5, 0.5))
self.assertEqual(bb.unit_vector, (1, 0))
self.assertEqual(bb.unit_vector_angle, 0)
self.assertEqual(
bb.corner_points, {(0, -1.1102230246251565e-16), (3, 0), (3, 1), (0, 1)}
)
bb = MinimumBoundingBox(((0, 0), (0, 2), (-1, 0), (-0.9, 1)))
self.assertAlmostEqual(bb.area, 2)
self.assertEqual(bb.length_parallel, 1)
self.assertEqual(bb.length_orthogonal, 2)
self.assertEqual(bb.rectangle_center, (-0.49999999999999994, 1))
self.assertEqual(bb.unit_vector, (1, 0))
self.assertEqual(bb.unit_vector_angle, 0)
self.assertEqual(
bb.corner_points,
{
(1.6653345369377348e-16, 0),
(1.6653345369377348e-16, 2),
(-1, 2),
(-1, 0),
},
)
if __name__ == "__main__":
unittest.main()
| 32.585366
| 84
| 0.590569
|
573285d57bdfdb26575d23500f1654599b6f9ac6
| 2,290
|
py
|
Python
|
devel/rubin_obscurations.py
|
jmeyers314/danish
|
c82e455e52b3528ccfb6d8f4ab067be95dd3a7cf
|
[
"BSD-3-Clause"
] | null | null | null |
devel/rubin_obscurations.py
|
jmeyers314/danish
|
c82e455e52b3528ccfb6d8f4ab067be95dd3a7cf
|
[
"BSD-3-Clause"
] | 3
|
2021-10-12T17:18:45.000Z
|
2021-12-07T17:35:18.000Z
|
devel/rubin_obscurations.py
|
jmeyers314/danish
|
c82e455e52b3528ccfb6d8f4ab067be95dd3a7cf
|
[
"BSD-3-Clause"
] | 1
|
2021-12-09T14:24:05.000Z
|
2021-12-09T14:24:05.000Z
|
import batoid
import numpy as np
import matplotlib.pyplot as plt
"""Script to use batoid to model the projection of obscurations along the beam
as a function of the incoming field angle.
"""
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
def model(th):
"""Full trace at field angle th. Then fit a linear function in x and y
of surface intersection point vs pupil point.
Return the coefficients for each surface.
"""
thx = 0.0
thy = th
rays = batoid.RayVector.asPolar(
telescope,
theta_x=np.deg2rad(thx),
theta_y=np.deg2rad(thy),
wavelength=620e-9,
nrad=50,
naz=200,
inner=2.3
)
tf = telescope.traceFull(rays)
rays = telescope.stopSurface.interact(rays.copy())
u0, v0 = rays.x, rays.y
out = {}
for s in tf.keys():
if s == 'Detector':
continue
u1, v1 = tf[s]['out'].x, tf[s]['out'].y
rx, resx, _, _, _ = np.polyfit(u0, u1, 1, full=True)
ry, resy, _, _, _ = np.polyfit(v0, v1, 1, full=True)
out[s] = rx, ry
return out
# Determine how surface/pupil coordinate transformations evolve with field
# angle.
scales = {}
centroids = {}
ths = np.linspace(0.0, 2.0, 20)
for th in ths:
data = model(th)
for k in data:
if k not in scales:
scales[k] = []
centroids[k] = []
rx = data[k][0]
ry = data[k][1]
scales[k].append(np.mean([rx[0], ry[0]])) # good enough?
centroids[k].append(ry[1])
pupil_radii = {}
pupil_motion = {}
for k in scales:
r, res, _, _, _ = np.polyfit(np.deg2rad(ths), centroids[k], 1, full=True)
motion = r[0] / np.mean(scales[k])
obsc = telescope[k].obscuration.original
if isinstance(obsc, batoid.ObscAnnulus):
pupil_radii[k+'_outer'] = obsc.outer / np.mean(scales[k])
pupil_radii[k+'_inner'] = obsc.inner / np.mean(scales[k])
pupil_motion[k+'_outer'] = motion
pupil_motion[k+'_inner'] = motion
elif isinstance(obsc, batoid.ObscCircle):
pupil_radii[k] = obsc.radius / np.mean(scales[k])
pupil_motion[k] = motion
print("radii")
for k, v in pupil_radii.items():
print(f"'{k}': {v}")
print()
print("motion")
for k, v in pupil_motion.items():
print(f"'{k}': {v}")
| 28.625
| 78
| 0.594323
|
843e9509288d291c2964234fd1ee96e91de01f83
| 2,842
|
py
|
Python
|
env/Lib/site-packages/testfixtures/tests/test_tempdir.py
|
beblount/Steer-Clear-Backend-Web
|
2aca521bad5f9a09c912f8e546f46bd39610544f
|
[
"MIT"
] | null | null | null |
env/Lib/site-packages/testfixtures/tests/test_tempdir.py
|
beblount/Steer-Clear-Backend-Web
|
2aca521bad5f9a09c912f8e546f46bd39610544f
|
[
"MIT"
] | null | null | null |
env/Lib/site-packages/testfixtures/tests/test_tempdir.py
|
beblount/Steer-Clear-Backend-Web
|
2aca521bad5f9a09c912f8e546f46bd39610544f
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2008-2014 Simplistix Ltd
# See license.txt for license details.
import os
from mock import Mock
from tempfile import mkdtemp
from testfixtures import Replacer, ShouldRaise, TempDirectory, compare, tempdir
from unittest import TestCase
from ..rmtree import rmtree
class TestTempDir(TestCase):
@tempdir()
def test_simple(self,d):
d.write('something', b'stuff')
d.write('.svn', b'stuff')
d.check(
'.svn',
'something',
)
@tempdir()
def test_subdirs(self,d):
subdir = ['some','thing']
d.write(subdir+['something'], b'stuff')
d.write(subdir+['.svn'], b'stuff')
d.check_dir(subdir,
'.svn',
'something',
)
@tempdir()
def test_not_same(self,d):
d.write('something', b'stuff')
with ShouldRaise(AssertionError(
"sequence not as expected:\n\nsame:\n()\n\nfirst:\n('.svn', 'something')\n\nsecond:\n('something',)"
)):
d.check('.svn', 'something')
@tempdir(ignore=('.svn',))
def test_ignore(self,d):
d.write('something', b'stuff')
d.write('.svn', b'stuff')
d.check('something', )
def test_cleanup_properly(self):
r = Replacer()
try:
m = Mock()
d = mkdtemp()
m.return_value = d
r.replace('testfixtures.tempdirectory.mkdtemp',m)
self.failUnless(os.path.exists(d))
self.assertFalse(m.called)
@tempdir()
def test_method(d):
d.write('something', b'stuff')
d.check('something', )
self.assertFalse(m.called)
compare(os.listdir(d),[])
test_method()
self.assertTrue(m.called)
self.failIf(os.path.exists(d))
finally:
r.restore()
if os.path.exists(d):
# only runs if the test fails!
rmtree(d) # pragma: no cover
@tempdir()
def test_cleanup_test_okay_with_deleted_dir(self,d):
rmtree(d.path)
@tempdir()
def test_decorator_returns_tempdirectory(self,d):
# check for what we get, so we only have to write
# tests in test_tempdirectory.py
self.failUnless(isinstance(d,TempDirectory))
def test_dont_create_or_cleanup_with_path(self):
with Replacer() as r:
m = Mock()
r.replace('testfixtures.tempdirectory.mkdtemp',m)
r.replace('testfixtures.tempdirectory.rmtree',m)
@tempdir(path='foo')
def test_method(d):
compare(d.path,'foo')
test_method()
self.assertFalse(m.called)
| 26.811321
| 112
| 0.534131
|
68be9a334b00b7aac72c979d574a5d566c9be98c
| 308
|
py
|
Python
|
210915/Q1085.py
|
JongGuk/BOJ
|
dd447d9f65481de19a3c0b4f8bb8b5f3d6277c15
|
[
"MIT"
] | null | null | null |
210915/Q1085.py
|
JongGuk/BOJ
|
dd447d9f65481de19a3c0b4f8bb8b5f3d6277c15
|
[
"MIT"
] | null | null | null |
210915/Q1085.py
|
JongGuk/BOJ
|
dd447d9f65481de19a3c0b4f8bb8b5f3d6277c15
|
[
"MIT"
] | null | null | null |
'''한수는 지금 (x, y)에 있다. 직사각형은 각 변이 좌표축에 평행하고, 왼쪽 아래 꼭짓점은 (0, 0), 오른쪽 위 꼭짓점은 (w, h)에 있다.
직사각형의 경계선까지 가는 거리의 최솟값을 구하는 프로그램을 작성하시오.
첫째 줄에 x, y, w, h가 주어진다.
예시 입력
6 2 10 3
예시 출력
1
'''
x, y, w, h = map(int, input().split())
distances = [x, y]
distances.append(w - x)
distances.append(h - y)
print(min(distances))
| 20.533333
| 86
| 0.616883
|
31156993acbaa6f865819b8ea3a17d94f2e4801e
| 811
|
py
|
Python
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/jdfusion/models/VmInstanceItem.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 14
|
2018-04-19T09:53:56.000Z
|
2022-01-27T06:05:48.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/jdfusion/models/VmInstanceItem.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 15
|
2018-09-11T05:39:54.000Z
|
2021-07-02T12:38:02.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/jdfusion/models/VmInstanceItem.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 33
|
2018-04-20T05:29:16.000Z
|
2022-02-17T09:10:05.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class VmInstanceItem(object):
def __init__(self, vm=None):
"""
:param vm: (Optional)
"""
self.vm = vm
| 28.964286
| 75
| 0.7127
|
94b8f55041b0075e387b11d3c7a10efcf3c6e19e
| 6,107
|
py
|
Python
|
keystone/server/flask/application.py
|
ferag/keystone
|
af1c1a822a8dfdd543c6e4d48264f5b8be2bdfc7
|
[
"Apache-2.0"
] | null | null | null |
keystone/server/flask/application.py
|
ferag/keystone
|
af1c1a822a8dfdd543c6e4d48264f5b8be2bdfc7
|
[
"Apache-2.0"
] | null | null | null |
keystone/server/flask/application.py
|
ferag/keystone
|
af1c1a822a8dfdd543c6e4d48264f5b8be2bdfc7
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import functools
import sys
import flask
import oslo_i18n
from oslo_log import log
from oslo_middleware import healthcheck
import six
try:
# werkzeug 0.15.x
from werkzeug.middleware import dispatcher as wsgi_dispatcher
except ImportError:
# werkzeug 0.14.x
import werkzeug.wsgi as wsgi_dispatcher
import keystone.api
from keystone import exception
from keystone.server.flask import common as ks_flask
from keystone.server.flask.request_processing import json_body
from keystone.server.flask.request_processing import req_logging
from keystone.receipt import handlers as receipt_handlers
LOG = log.getLogger(__name__)
def fail_gracefully(f):
"""Log exceptions and aborts."""
@functools.wraps(f)
def wrapper(*args, **kw):
try:
return f(*args, **kw)
except Exception as e:
LOG.debug(e, exc_info=True)
# exception message is printed to all logs
LOG.critical(e)
sys.exit(1)
return wrapper
def _add_vary_x_auth_token_header(response):
# Add the expected Vary Header, this is run after every request in the
# response-phase
response.headers['Vary'] = 'X-Auth-Token'
return response
def _best_match_language():
"""Determine the best available locale.
This returns best available locale based on the Accept-Language HTTP
header passed in the request.
"""
if not flask.request.accept_languages:
return None
return flask.request.accept_languages.best_match(
oslo_i18n.get_available_languages('keystone'))
def _handle_keystone_exception(error):
# TODO(adriant): register this with its own specific handler:
if isinstance(error, exception.InsufficientAuthMethods):
return receipt_handlers.build_receipt(error)
# Handle logging
if isinstance(error, exception.Unauthorized):
LOG.warning(
"Authorization failed. %(exception)s from %(remote_addr)s",
{'exception': error, 'remote_addr': flask.request.remote_addr})
elif isinstance(error, exception.UnexpectedError):
LOG.exception(six.text_type(error))
else:
LOG.warning(six.text_type(error))
# Render the exception to something user "friendly"
error_message = error.args[0]
message = oslo_i18n.translate(error_message, _best_match_language())
if message is error_message:
# translate() didn't do anything because it wasn't a Message,
# convert to a string.
message = six.text_type(message)
body = dict(
error={
'code': error.code,
'title': error.title,
'message': message}
)
if isinstance(error, exception.AuthPluginException):
body['error']['identity'] = error.authentication
# Create the response and set status code.
response = flask.jsonify(body)
response.status_code = error.code
# Add the appropriate WWW-Authenticate header for Unauthorized
if isinstance(error, exception.Unauthorized):
url = ks_flask.base_url()
response.headers['WWW-Authenticate'] = 'Keystone uri="%s"' % url
return response
def _handle_unknown_keystone_exception(error):
# translate a python exception to something we can properly render as
# an API error.
if isinstance(error, TypeError):
new_exc = exception.ValidationError(error)
else:
new_exc = exception.UnexpectedError(error)
return _handle_keystone_exception(new_exc)
@fail_gracefully
def application_factory(name='public'):
if name not in ('admin', 'public'):
raise RuntimeError('Application name (for base_url lookup) must be '
'either `admin` or `public`.')
app = flask.Flask(name)
# Register Error Handler Function for Keystone Errors.
# NOTE(morgan): Flask passes errors to an error handling function. All of
# keystone's api errors are explicitly registered in
# keystone.exception.KEYSTONE_API_EXCEPTIONS and those are in turn
# registered here to ensure a proper error is bubbled up to the end user
# instead of a 500 error.
for exc in exception.KEYSTONE_API_EXCEPTIONS:
app.register_error_handler(exc, _handle_keystone_exception)
# Register extra (python) exceptions with the proper exception handler,
# specifically TypeError. It will render as a 400 error, but presented in
# a "web-ified" manner
app.register_error_handler(TypeError, _handle_unknown_keystone_exception)
# Add core before request functions
app.before_request(req_logging.log_request_info)
app.before_request(json_body.json_body_before_request)
# Add core after request functions
app.after_request(_add_vary_x_auth_token_header)
# NOTE(morgan): Configure the Flask Environment for our needs.
app.config.update(
# We want to bubble up Flask Exceptions (for now)
PROPAGATE_EXCEPTIONS=True)
for api in keystone.api.__apis__:
for api_bp in api.APIs:
api_bp.instantiate_and_register_to_app(app)
# Load in Healthcheck and map it to /healthcheck
hc_app = healthcheck.Healthcheck.app_factory(
{}, oslo_config_project='keystone')
# Use the simple form of the dispatch middleware, no extra logic needed
# for legacy dispatching. This is to mount /healthcheck at a consistent
# place
app.wsgi_app = wsgi_dispatcher.DispatcherMiddleware(
app.wsgi_app,
{'/healthcheck': hc_app})
return app
| 33.927778
| 78
| 0.709022
|
357b7bd3cedd56d768f2e148141f03e2f12d5d21
| 7,866
|
py
|
Python
|
venv/Lib/site-packages/caffe2/python/cnn.py
|
Westlanderz/AI-Plat1
|
1187c22819e5135e8e8189c99b86a93a0d66b8d8
|
[
"MIT"
] | 1
|
2022-01-08T12:30:44.000Z
|
2022-01-08T12:30:44.000Z
|
venv/Lib/site-packages/caffe2/python/cnn.py
|
Westlanderz/AI-Plat1
|
1187c22819e5135e8e8189c99b86a93a0d66b8d8
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/caffe2/python/cnn.py
|
Westlanderz/AI-Plat1
|
1187c22819e5135e8e8189c99b86a93a0d66b8d8
|
[
"MIT"
] | null | null | null |
## @package cnn
# Module caffe2.python.cnn
from caffe2.python import brew, workspace
from caffe2.python.model_helper import ModelHelper
from caffe2.proto import caffe2_pb2
import logging
class CNNModelHelper(ModelHelper):
"""A helper model so we can write CNN models more easily, without having to
manually define parameter initializations and operators separately.
"""
def __init__(self, order="NCHW", name=None,
use_cudnn=True, cudnn_exhaustive_search=False,
ws_nbytes_limit=None, init_params=True,
skip_sparse_optim=False,
param_model=None):
logging.warning(
"[====DEPRECATE WARNING====]: you are creating an "
"object from CNNModelHelper class which will be deprecated soon. "
"Please use ModelHelper object with brew module. For more "
"information, please refer to caffe2.ai and python/brew.py, "
"python/brew_test.py for more information."
)
cnn_arg_scope = {
'order': order,
'use_cudnn': use_cudnn,
'cudnn_exhaustive_search': cudnn_exhaustive_search,
}
if ws_nbytes_limit:
cnn_arg_scope['ws_nbytes_limit'] = ws_nbytes_limit
super(CNNModelHelper, self).__init__(
skip_sparse_optim=skip_sparse_optim,
name="CNN" if name is None else name,
init_params=init_params,
param_model=param_model,
arg_scope=cnn_arg_scope,
)
self.order = order
self.use_cudnn = use_cudnn
self.cudnn_exhaustive_search = cudnn_exhaustive_search
self.ws_nbytes_limit = ws_nbytes_limit
if self.order != "NHWC" and self.order != "NCHW":
raise ValueError(
"Cannot understand the CNN storage order %s." % self.order
)
def ImageInput(self, blob_in, blob_out, use_gpu_transform=False, **kwargs):
return brew.image_input(
self,
blob_in,
blob_out,
order=self.order,
use_gpu_transform=use_gpu_transform,
**kwargs
)
def VideoInput(self, blob_in, blob_out, **kwargs):
return brew.video_input(
self,
blob_in,
blob_out,
**kwargs
)
def PadImage(self, blob_in, blob_out, **kwargs):
# TODO(wyiming): remove this dummy helper later
self.net.PadImage(blob_in, blob_out, **kwargs)
def ConvNd(self, *args, **kwargs):
return brew.conv_nd(
self,
*args,
use_cudnn=self.use_cudnn,
order=self.order,
cudnn_exhaustive_search=self.cudnn_exhaustive_search,
ws_nbytes_limit=self.ws_nbytes_limit,
**kwargs
)
def Conv(self, *args, **kwargs):
return brew.conv(
self,
*args,
use_cudnn=self.use_cudnn,
order=self.order,
cudnn_exhaustive_search=self.cudnn_exhaustive_search,
ws_nbytes_limit=self.ws_nbytes_limit,
**kwargs
)
def ConvTranspose(self, *args, **kwargs):
return brew.conv_transpose(
self,
*args,
use_cudnn=self.use_cudnn,
order=self.order,
cudnn_exhaustive_search=self.cudnn_exhaustive_search,
ws_nbytes_limit=self.ws_nbytes_limit,
**kwargs
)
def GroupConv(self, *args, **kwargs):
return brew.group_conv(
self,
*args,
use_cudnn=self.use_cudnn,
order=self.order,
cudnn_exhaustive_search=self.cudnn_exhaustive_search,
ws_nbytes_limit=self.ws_nbytes_limit,
**kwargs
)
def GroupConv_Deprecated(self, *args, **kwargs):
return brew.group_conv_deprecated(
self,
*args,
use_cudnn=self.use_cudnn,
order=self.order,
cudnn_exhaustive_search=self.cudnn_exhaustive_search,
ws_nbytes_limit=self.ws_nbytes_limit,
**kwargs
)
def FC(self, *args, **kwargs):
return brew.fc(self, *args, **kwargs)
def PackedFC(self, *args, **kwargs):
return brew.packed_fc(self, *args, **kwargs)
def FC_Prune(self, *args, **kwargs):
return brew.fc_prune(self, *args, **kwargs)
def FC_Decomp(self, *args, **kwargs):
return brew.fc_decomp(self, *args, **kwargs)
def FC_Sparse(self, *args, **kwargs):
return brew.fc_sparse(self, *args, **kwargs)
def Dropout(self, *args, **kwargs):
return brew.dropout(
self, *args, order=self.order, use_cudnn=self.use_cudnn, **kwargs
)
def LRN(self, *args, **kwargs):
return brew.lrn(
self, *args, order=self.order, use_cudnn=self.use_cudnn, **kwargs
)
def Softmax(self, *args, **kwargs):
return brew.softmax(self, *args, use_cudnn=self.use_cudnn, **kwargs)
def SpatialBN(self, *args, **kwargs):
return brew.spatial_bn(self, *args, order=self.order, **kwargs)
def SpatialGN(self, *args, **kwargs):
return brew.spatial_gn(self, *args, order=self.order, **kwargs)
def InstanceNorm(self, *args, **kwargs):
return brew.instance_norm(self, *args, order=self.order, **kwargs)
def Relu(self, *args, **kwargs):
return brew.relu(
self, *args, order=self.order, use_cudnn=self.use_cudnn, **kwargs
)
def PRelu(self, *args, **kwargs):
return brew.prelu(self, *args, **kwargs)
def Concat(self, *args, **kwargs):
return brew.concat(self, *args, order=self.order, **kwargs)
def DepthConcat(self, *args, **kwargs):
"""The old depth concat function - we should move to use concat."""
print("DepthConcat is deprecated. use Concat instead.")
return self.Concat(*args, **kwargs)
def Sum(self, *args, **kwargs):
return brew.sum(self, *args, **kwargs)
def Transpose(self, *args, **kwargs):
return brew.transpose(self, *args, use_cudnn=self.use_cudnn, **kwargs)
def Iter(self, *args, **kwargs):
return brew.iter(self, *args, **kwargs)
def Accuracy(self, *args, **kwargs):
return brew.accuracy(self, *args, **kwargs)
def MaxPool(self, *args, **kwargs):
return brew.max_pool(
self, *args, use_cudnn=self.use_cudnn, order=self.order, **kwargs
)
def MaxPoolWithIndex(self, *args, **kwargs):
return brew.max_pool_with_index(self, *args, order=self.order, **kwargs)
def AveragePool(self, *args, **kwargs):
return brew.average_pool(
self, *args, use_cudnn=self.use_cudnn, order=self.order, **kwargs
)
@property
def XavierInit(self):
return ('XavierFill', {})
def ConstantInit(self, value):
return ('ConstantFill', dict(value=value))
@property
def MSRAInit(self):
return ('MSRAFill', {})
@property
def ZeroInit(self):
return ('ConstantFill', {})
def AddWeightDecay(self, weight_decay):
return brew.add_weight_decay(self, weight_decay)
@property
def CPU(self):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CPU
return device_option
@property
def GPU(self, gpu_id=0):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = workspace.GpuDeviceType
device_option.device_id = gpu_id
return device_option
| 32.639004
| 81
| 0.582888
|
9221f96e875e7a3628aa90698180e7541e3841c2
| 594
|
py
|
Python
|
Interpulation/Interpulations.py
|
Starlord82/Misc-programs
|
f4ff00a88f6c2bef6ff892f69ee9f77cf901768e
|
[
"Apache-2.0"
] | null | null | null |
Interpulation/Interpulations.py
|
Starlord82/Misc-programs
|
f4ff00a88f6c2bef6ff892f69ee9f77cf901768e
|
[
"Apache-2.0"
] | null | null | null |
Interpulation/Interpulations.py
|
Starlord82/Misc-programs
|
f4ff00a88f6c2bef6ff892f69ee9f77cf901768e
|
[
"Apache-2.0"
] | null | null | null |
def inter(d1,d2,e1,e2,pn):
result = {}
for num in range(pn):
user_dist = float(input(f'Enter distanse of point number {num+1}: '))
result[user_dist] = e1 + ((e2-e1)/(d2-d1))*(user_dist-d1)
for key, value in result.items():
print(f'\n{key} : {value:.2f}')
if __name__ == "__main__":
dist1 = float(input("Enter first distanse: "))
dist2 = float(input("Enter last distanse: "))
elev1 = float(input("Enter first elevation: "))
elev2 = float(input("Enter last elevation: "))
point_num = int(input("How many points between? "))
inter(dist1, dist2, elev1, elev2, point_num)
| 25.826087
| 71
| 0.651515
|
5da1a4475db819a950dfb69478307fcdeddaa6f4
| 12,378
|
py
|
Python
|
webapps/server/views.py
|
pilliq/mongo-web-shell
|
03cbc8815982d0eb160ec239bf3a36c7a2e08dde
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2015-07-17T02:33:31.000Z
|
2015-07-17T02:33:31.000Z
|
webapps/server/views.py
|
pilliq/mongo-web-shell
|
03cbc8815982d0eb160ec239bf3a36c7a2e08dde
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
webapps/server/views.py
|
pilliq/mongo-web-shell
|
03cbc8815982d0eb160ec239bf3a36c7a2e08dde
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2013 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from datetime import datetime, timedelta
from functools import update_wrapper
import uuid
from bson import BSON
from bson.json_util import dumps, loads
from flask import Blueprint, current_app, make_response, request
from flask import session
from pymongo.errors import InvalidDocument, OperationFailure
from webapps.lib import CLIENTS_COLLECTION
from webapps.lib.MWSServerError import MWSServerError
from webapps.lib.db import get_db
from webapps.lib.decorators import check_session_id, ratelimit
from webapps.lib.util import (
UseResId,
get_collection_names,
get_internal_coll_name
)
mws = Blueprint('mws', __name__, url_prefix='/mws')
_logger = logging.getLogger('mongows.views')
@mws.after_request
def no_cache(response):
response.cache_control.no_cache = True
response.headers['Expires'] = 0
return response
# TODO: Look over this method; remove unnecessary bits, check convention, etc.
# via http://flask.pocoo.org/snippets/56/
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if isinstance(headers, list):
headers = ', '.join(x.upper() for x in headers)
if isinstance(origin, list):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
cors_origin = origin or current_app.config.get('CORS_ORIGIN', '')
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = cors_origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
else:
reqh = request.headers.get('Access-Control-Request-Headers')
h['Access-Control-Allow-Headers'] = reqh
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
@mws.route('/', methods=['POST', 'OPTIONS'])
@crossdomain()
def create_mws_resource():
session_id = session.get('session_id', str(uuid.uuid4()))
session['session_id'] = session_id
clients = get_db()[CLIENTS_COLLECTION]
cursor = clients.find({'session_id': session_id}, {'res_id': 1, '_id': 0})
if cursor.count():
# TODO: handle multiple res_id per session
res_id = cursor[0]['res_id']
is_new = False
else:
res_id = generate_res_id()
clients.insert({
'version': 1,
'res_id': res_id,
'collections': [],
'session_id': session_id,
'timestamp': datetime.now()
})
is_new = True
return to_json({'res_id': res_id, 'is_new': is_new})
@mws.route('/<res_id>/keep-alive', methods=['POST', 'OPTIONS'])
@crossdomain()
@check_session_id
def keep_mws_alive(res_id):
clients = get_db()[CLIENTS_COLLECTION]
clients.update({'session_id': session.get('session_id'), 'res_id': res_id},
{'$set': {'timestamp': datetime.now()}})
return empty_success()
@mws.route('/<res_id>/db/<collection_name>/find', methods=['GET', 'OPTIONS'])
@crossdomain()
@check_session_id
@ratelimit
def db_collection_find(res_id, collection_name):
# TODO: Should we specify a content type? Then we have to use an options
# header, and we should probably get the return type from the content-type
# header.
parse_get_json(request)
query = request.json.get('query')
projection = request.json.get('projection')
skip = request.json.get('skip', 0)
limit = request.json.get('limit', 0)
sort = request.json.get('sort', {})
sort = sort.items()
with UseResId(res_id):
coll = get_db()[collection_name]
cursor = coll.find(query, projection, skip, limit)
if len(sort) > 0:
cursor.sort(sort)
documents = list(cursor)
return to_json({'result': documents})
@mws.route('/<res_id>/db/<collection_name>/insert',
methods=['POST', 'OPTIONS'])
@crossdomain()
@check_session_id
@ratelimit
def db_collection_insert(res_id, collection_name):
# TODO: Ensure request.json is not None.
if 'document' in request.json:
document = request.json['document']
else:
error = '\'document\' argument not found in the insert request.'
raise MWSServerError(400, error)
# Check quota
size = get_collection_size(res_id, collection_name)
# Handle inserting both a list of docs or a single doc
if isinstance(document, list):
req_size = 0
for d in document:
req_size += len(BSON.encode(d))
else:
req_size = len(BSON.encode(document))
if size + req_size > current_app.config['QUOTA_COLLECTION_SIZE']:
raise MWSServerError(403, 'Collection size exceeded')
# Insert document
with UseResId(res_id):
try:
get_db()[collection_name].insert(document)
return empty_success()
except InvalidDocument as e:
raise MWSServerError(400, e.message)
@mws.route('/<res_id>/db/<collection_name>/remove',
methods=['DELETE', 'OPTIONS'])
@crossdomain()
@check_session_id
@ratelimit
def db_collection_remove(res_id, collection_name):
constraint = request.json.get('constraint') if request.json else {}
just_one = request.json and request.json.get('just_one', False)
with UseResId(res_id):
collection = get_db()[collection_name]
if just_one:
collection.find_and_modify(constraint, remove=True)
else:
collection.remove(constraint)
return empty_success()
@mws.route('/<res_id>/db/<collection_name>/update', methods=['PUT', 'OPTIONS'])
@crossdomain()
@check_session_id
@ratelimit
def db_collection_update(res_id, collection_name):
query = update = None
if request.json:
query = request.json.get('query')
update = request.json.get('update')
upsert = request.json.get('upsert', False)
multi = request.json.get('multi', False)
if query is None or update is None:
error = 'update requires spec and document arguments'
raise MWSServerError(400, error)
# Check quota
size = get_collection_size(res_id, collection_name)
with UseResId(res_id):
# Computation of worst case size increase - update size * docs affected
# It would be nice if we were able to make a more conservative estimate
# of the space difference that an update will cause. (especially if it
# results in smaller documents)
db = get_db()
affected = db[collection_name].find(query).count()
req_size = len(BSON.encode(update)) * affected
if size + req_size > current_app.config['QUOTA_COLLECTION_SIZE']:
raise MWSServerError(403, 'Collection size exceeded')
try:
db[collection_name].update(query, update, upsert, multi=multi)
return empty_success()
except OperationFailure as e:
raise MWSServerError(400, e.message)
@mws.route('/<res_id>/db/<collection_name>/save',
methods=['POST', 'OPTIONS'])
@crossdomain()
@check_session_id
@ratelimit
def db_collection_save(res_id, collection_name):
# TODO: Ensure request.json is not None.
if 'document' in request.json:
document = request.json['document']
else:
error = '\'document\' argument not found in the save request.'
raise MWSServerError(400, error)
# Check quota
size = get_collection_size(res_id, collection_name)
req_size = len(BSON.encode(document))
if size + req_size > current_app.config['QUOTA_COLLECTION_SIZE']:
raise MWSServerError(403, 'Collection size exceeded')
# Save document
with UseResId(res_id):
try:
get_db()[collection_name].save(document)
return empty_success()
except InvalidDocument as e:
raise MWSServerError(400, e.message)
@mws.route('/<res_id>/db/<collection_name>/aggregate',
methods=['GET', 'OPTIONS'])
@crossdomain()
@check_session_id
def db_collection_aggregate(res_id, collection_name):
parse_get_json(request)
try:
with UseResId(res_id):
coll = get_db()[collection_name]
result = coll.aggregate(request.json)
return to_json(result)
except OperationFailure as e:
raise MWSServerError(400, e.message)
@mws.route('/<res_id>/db/<collection_name>/drop',
methods=['DELETE', 'OPTIONS'])
@crossdomain()
@check_session_id
@ratelimit
def db_collection_drop(res_id, collection_name):
with UseResId(res_id):
get_db().drop_collection(collection_name)
return empty_success()
@mws.route('/<res_id>/db/<collection_name>/count', methods=['GET', 'OPTIONS'])
@crossdomain()
@check_session_id
@ratelimit
def db_collection_count(res_id, collection_name):
parse_get_json(request)
query = request.json.get('query')
skip = request.json.get('skip', 0)
limit = request.json.get('limit', 0)
use_skip_limit = bool(skip or limit)
with UseResId(res_id):
coll = get_db()[collection_name]
cursor = coll.find(query, skip=skip, limit=limit)
count = cursor.count(use_skip_limit)
return to_json({'count': count})
@mws.route('/<res_id>/db/getCollectionNames',
methods=['GET', 'OPTIONS'])
@crossdomain()
@check_session_id
def db_get_collection_names(res_id):
return to_json({'result': get_collection_names(res_id)})
@mws.route('/<res_id>/db',
methods=['DELETE', 'OPTIONS'])
@crossdomain()
@check_session_id
def db_drop(res_id):
DB = get_db()
collections = get_collection_names(res_id)
with UseResId(res_id):
for c in collections:
DB.drop_collection(c)
return empty_success()
def generate_res_id():
return str(uuid.uuid4())
def user_has_access(res_id, session_id):
query = {'res_id': res_id, 'session_id': session_id}
coll = get_db()[CLIENTS_COLLECTION]
return_value = coll.find_one(query)
return False if return_value is None else True
def to_json(result):
try:
return dumps(result), 200
except ValueError:
error = 'Error in find while trying to convert the results to ' + \
'JSON format.'
raise MWSServerError(500, error)
def empty_success():
return '', 204
def parse_get_json(request):
try:
request.json = loads(request.args.keys()[0])
except ValueError:
raise MWSServerError(400, 'Error parsing JSON data',
'Invalid GET parameter data')
def get_collection_size(res_id, collection_name):
coll = get_internal_coll_name(res_id, collection_name)
try:
return get_db().command({'collstats': coll})['size']
except OperationFailure as e:
if 'ns not found' in e.message:
return 0
else:
raise MWSServerError(500, e.message)
| 32.067358
| 79
| 0.657295
|
90dc97657be5a38d69427f4ca520f8edf64127d6
| 2,134
|
py
|
Python
|
vonx/web/__init__.py
|
nrempel/von-x
|
06896def4708ac217d78b8c9560eed1f862d0ddd
|
[
"Apache-2.0"
] | null | null | null |
vonx/web/__init__.py
|
nrempel/von-x
|
06896def4708ac217d78b8c9560eed1f862d0ddd
|
[
"Apache-2.0"
] | null | null | null |
vonx/web/__init__.py
|
nrempel/von-x
|
06896def4708ac217d78b8c9560eed1f862d0ddd
|
[
"Apache-2.0"
] | 2
|
2018-08-01T01:15:03.000Z
|
2018-08-03T16:26:30.000Z
|
#
# Copyright 2017-2018 Government of Canada
# Public Services and Procurement Canada - buyandsell.gc.ca
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
vonx.web module initialization
"""
import json
import os
from aiohttp import web
import aiohttp_jinja2
from jinja2 import ChoiceLoader, FileSystemLoader, PackageLoader
from ..common.manager import ConfigServiceManager
from .routes import get_routes
def _setup_jinja(manager: ConfigServiceManager, app: web.Application):
"""
Initialize aiohttp-jinja2 for template rendering
"""
tpl_path = manager.env.get('TEMPLATE_PATH')
if not tpl_path:
tpl_path = os.path.join(manager.config_root, 'templates')
# load default templates provided by package
loader = PackageLoader('vonx', 'templates')
if tpl_path:
# load custom templates if present
# may want to use a resource loader if tpl_path looks like a package name (has a colon)
loader = ChoiceLoader([
loader,
FileSystemLoader(tpl_path)
])
filters = {"jsonify": json.dumps}
aiohttp_jinja2.setup(app, loader=loader, filters=filters)
async def init_web(manager: ConfigServiceManager):
"""
Initialize the web server application
"""
base = manager.env.get('WEB_BASE_HREF', '/')
app = web.Application()
app['base_href'] = base
app['manager'] = manager
app['static_root_url'] = base + 'assets'
app.add_routes(get_routes(app))
_setup_jinja(manager, app)
if base != '/':
root_app = web.Application()
root_app.add_subapp(base, app)
return root_app
return app
| 29.232877
| 95
| 0.705248
|
1048d2eb856ffa0e3b646b033ed40ea0d65cc1df
| 1,799
|
py
|
Python
|
apps/modules/permission/apis/permission.py
|
yeayee/osroom
|
f7084843ea4b75505283f8b23da60471ba8fc9bb
|
[
"BSD-2-Clause"
] | 1
|
2019-05-12T14:54:40.000Z
|
2019-05-12T14:54:40.000Z
|
apps/modules/permission/apis/permission.py
|
yeayee/osroom
|
f7084843ea4b75505283f8b23da60471ba8fc9bb
|
[
"BSD-2-Clause"
] | null | null | null |
apps/modules/permission/apis/permission.py
|
yeayee/osroom
|
f7084843ea4b75505283f8b23da60471ba8fc9bb
|
[
"BSD-2-Clause"
] | null | null | null |
# -*-coding:utf-8-*-
from flask import request
from apps.configs.sys_config import METHOD_WARNING
from apps.core.blueprint import api
from apps.core.flask.login_manager import osr_login_required
from apps.core.flask.permission import permission_required
from apps.core.flask.response import response_format
from apps.modules.permission.process.permission import permission, permissions, add_per, delete_per, edit_per, \
permissions_details
__author__ = "Allen Woo"
@api.route('/admin/permission', methods=['GET', 'POST', 'PUT', 'DELETE'])
@osr_login_required
@permission_required()
def api_permission():
"""
GET:
1.获取系统的权限数据详情
pre:<int>,每页获取几条数据,默认10
page:<int>,第几页,默认1
keyword:<str>,搜索关键字
is_details:<int>, 必须是1
2.只获取系统的全部权限的value, name, explain, 以及已使用的权重位置
不填任何参数
POST:
添加一个权限
name:<str>, 名称
position:<int>, 二进制中的位置
explain:<str>,说明
is_default:<int>, 0表示不作为默认权限, 1表示作为默认权限之一
PUT:
更新权限
id:<str>,id
name:<str>, 名称
position:<int>, 二进制中的位置
explain:<str>,说明
is_default:<int>, 0表示不作为默认权限, 1表示作为默认权限之一
DELETE:
删除手动添加的页面路由
ids:<array>
:return:
"""
if request.c_method == "GET":
if request.argget.all("id"):
data = permission()
elif request.argget.all("is_details"):
data = permissions_details()
else:
data = permissions()
elif request.c_method == "POST":
data = add_per()
elif request.c_method == "PUT":
data = edit_per()
elif request.c_method == "DELETE":
data = delete_per()
else:
data = {"msg_type": "w", "msg": METHOD_WARNING, "custom_status": 405}
return response_format(data)
| 27.676923
| 112
| 0.623124
|
30f48f011862c4c5368f9076ac56bd2a187109b4
| 1,714
|
py
|
Python
|
gps.py
|
vaibhav16uec121/GPS
|
ec3c099e7620d66f04a867b4017fc1755a7676cd
|
[
"MIT"
] | null | null | null |
gps.py
|
vaibhav16uec121/GPS
|
ec3c099e7620d66f04a867b4017fc1755a7676cd
|
[
"MIT"
] | null | null | null |
gps.py
|
vaibhav16uec121/GPS
|
ec3c099e7620d66f04a867b4017fc1755a7676cd
|
[
"MIT"
] | null | null | null |
from kivy.lang import Builder
from plyer import gps
from kivy.app import App
from kivy.properties import StringProperty
from kivy.clock import Clock, mainthread
kv = '''
BoxLayout:
orientation: 'vertical'
Label:
text: app.gps_location
Label:
text: app.gps_status
BoxLayout:
size_hint_y: None
height: '48dp'
padding: '4dp'
ToggleButton:
text: 'Start' if self.state == 'normal' else 'Stop'
on_state:
app.start(1000, 0) if self.state == 'down' else \
app.stop()
'''
class GpsTest(App):
gps_location = StringProperty()
gps_status = StringProperty('Click Start to get GPS location updates')
def build(self):
try:
gps.configure(on_location=self.on_location,
on_status=self.on_status)
except NotImplementedError:
import traceback
traceback.print_exc()
self.gps_status = 'GPS is not implemented for your platform'
return Builder.load_string(kv)
def start(self, minTime, minDistance):
gps.start(minTime, minDistance)
def stop(self):
gps.stop()
@mainthread
def on_location(self, **kwargs):
self.gps_location = '\n'.join([
'{}={}'.format(k, v) for k, v in kwargs.items()])
@mainthread
def on_status(self, stype, status):
self.gps_status = 'type={}\n{}'.format(stype, status)
def on_pause(self):
gps.stop()
return True
def on_resume(self):
gps.start(1000, 0)
pass
if __name__ == '__main__':
GpsTest().run()
| 25.969697
| 75
| 0.568261
|
cfd4d20caa5f05458bdd075f71e090bf654e68df
| 10,330
|
py
|
Python
|
novelsave_sources/sources/crawler.py
|
mensch272/novelsave_sources
|
6d7b49aca4f9543fb23c1b1a81c9e2bd7d5d352a
|
[
"Apache-2.0"
] | 11
|
2021-09-05T04:36:41.000Z
|
2022-03-20T00:16:42.000Z
|
novelsave_sources/sources/crawler.py
|
mHaisham/novelsave_sources
|
6d7b49aca4f9543fb23c1b1a81c9e2bd7d5d352a
|
[
"Apache-2.0"
] | 9
|
2021-08-30T02:36:43.000Z
|
2022-03-30T02:06:43.000Z
|
novelsave_sources/sources/crawler.py
|
mHaisham/novelsave_sources
|
6d7b49aca4f9543fb23c1b1a81c9e2bd7d5d352a
|
[
"Apache-2.0"
] | 2
|
2022-03-20T13:18:01.000Z
|
2022-03-29T07:07:04.000Z
|
import datetime
import re
from abc import ABC
from typing import List, Union, Optional
from urllib.parse import urlparse
import requests
from bs4 import BeautifulSoup, Comment
from requests.cookies import RequestsCookieJar
from ..exceptions import BadResponseException
from ..utils.gateways import BaseHttpGateway, DefaultHttpGateway
class Crawler(ABC):
"""Base crawler class
Implements crawler specific helper methods that can be used
when parsing html content
Attributes:
lang (str): The language of the content available through the source.
It is specified ``multi`` if the source supports multiple languages.
base_urls (List[str]): The hostnames of the websites that this crawler
supports.
last_updated (datetime.date): The date at which the specific crawler
implementation was last updated.
bad_tags (List[str]): List of names of tags that should be removed from
chapter content for this specific crawler.
blacklist_patterns (List[str]): List of regex patterns denoting text that
should be removed from chapter content.
notext_tags (List[str]): List of names of tags that even if there is no
text should not be removed from chapter content.
Elements with no text are usually removed from the chapter content,
unless the element is specified in this list.
preserve_attrs (List[str]): Element attributes that contain meaningful content
and should be kept with in the element during attribute cleanup.
"""
lang: str
base_urls: List[str]
last_updated: datetime.date
@classmethod
def of(cls, url: str) -> bool:
"""Check whether the url is from the this source
The source implementations may override this method to provide
custom matching functionality.
The default implementation checks if the hostname of the
url matches any of the base urls of the source.
:param url: The url to test if it belongs to this source
:type url: str
:return: Whether the url is from this source
:rtype: bool
"""
return any(url.startswith(base_url) for base_url in cls.base_urls)
def __init__(self, http_gateway: BaseHttpGateway = None):
self.http_gateway = (
http_gateway if http_gateway is not None else DefaultHttpGateway()
)
self.init()
def init(self):
"""Call this method instead of __init__ for trivial purposes
The purpose can be any of:
- editing bad_tags or blacklist_patterns
"""
def set_cookies(self, cookies: RequestsCookieJar):
self.http_gateway.cookies = cookies
def get_soup(self, url: str, method: str = "GET", **kwargs) -> BeautifulSoup:
"""Makes a request to the url and attempts to make a :class:`BeautifulSoup`
object from the response content.
Once the response is acquired, soup object is created using :meth:`~novelsave_sources.sources.Crawler.make_soup`.
Then the soup object is checked for the ``body`` to check if document was
retrieved successfully.
:param url: forwarded to :meth:`~novelsave_sources.sources.Crawler.request`
:type url: str
:param method: forwarded to :meth:`~novelsave_sources.sources.Crawler.request`
:type method: str
:param kwargs: forwarded to :meth:`~novelsave_sources.sources.Crawler.request`
:return: The created soup object
:rtype: BeautifulSoup
:raises ConnectionError: If document was not retrieved successfully
"""
soup = self.make_soup(self.request(method, url, **kwargs).content, "lxml")
if not soup.find("body"):
raise ConnectionError("HTML document was not loaded correctly.")
return soup
@staticmethod
def make_soup(text: Union[str, bytes], parser: str = "lxml") -> BeautifulSoup:
"""Create a new soup object using the specified parser
:param text: The content for the soup
:type text: str | bytes
:param parser: The html tree parser to use (default = 'lxml')
:type parser: str
:return: The created soup object
:rtype: BeautifulSoup
"""
return BeautifulSoup(text, parser)
def request(self, method: str, url: str, **kwargs) -> requests.Response:
"""Send a request to the provided url using the specified method
Checks if the response is valid before returning, if its not valid
throws an exception.
:param method: Request method ex: GET, POST, PUT
:type method: str
:param url: The url endpoint to make the request to
:type url: str
:param kwargs: Forwarded to
:meth:`http_gateway.request <novelsave_sources.utils.gateways.BaseHttpGateway.request>`
:return: The response from the request
:rtype: requests.Response
:raises BadResponseException: if the response is not valid (status code != 200)
"""
response = self.http_gateway.request(method, url, **kwargs)
if not response.ok:
raise BadResponseException(response)
return response
def request_get(self, url, **kwargs):
"""Creates a get request to the specified url"""
return self.request("GET", url, **kwargs)
# ---- Inspired from https://github.com/dipu-bd/lightnovel-crawler ----
# ---- And almost a perfect copy of the functions below ----
bad_tags = [
"noscript",
"script",
"style",
"iframe",
"ins",
"header",
"footer",
"button",
"input",
"amp-auto-ads",
"pirate",
"figcaption",
"address",
"tfoot",
"object",
"video",
"audio",
"source",
"nav",
"output",
"select",
"textarea",
"form",
"map",
]
blacklist_patterns = []
notext_tags = [
"img",
]
preserve_attrs = [
"href",
"src",
"alt",
]
def is_blacklisted(self, text):
"""Whether the text is blacklisted"""
return any(
re.search(pattern, text, re.IGNORECASE)
for pattern in self.blacklist_patterns
)
def clean_contents(self, contents):
"""Remove unnecessary elements and attributes"""
if not contents:
return contents
contents.attrs = {}
for element in contents.find_all(True):
self.clean_element(element)
return contents
def clean_element(self, element):
"""
If the element does not add any meaningful content the element
is removed, this can happen on either of below conditions.
- Element is a comment
- Element is a <br> and the next sibling element is also a <br>
- Element is part of the bad tags (undesired tags that dont add content)
- The element has no text and has no children and is not part of notext_tags
(elements that doesnt need text to be meaningful)
- The text of the element matches one of the blacklisted patterns
(undesirable text such as ads and watermarks)
If none of the conditions are met, all the attributes except those marked
important :attr:`preserve_attrs` are removed from this element
"""
# remove comments
if isinstance(element, Comment):
element.extract()
elif element.name == "br":
next_element = getattr(element, "next_sibling")
if next_element and next_element.name == "br":
element.extract()
# Remove bad tags
elif element.name in self.bad_tags:
element.extract()
# Remove empty elements
elif not element.text.strip():
if element.name not in self.notext_tags and not element.find_all(
recursive=False
):
element.extract()
# Remove blacklisted elements
elif self.is_blacklisted(element.text):
element.extract()
# Remove attributes
elif hasattr(element, "attrs"):
element.attrs = {
key: element.get(key)
for key in self.preserve_attrs
if key in element.attrs
}
@staticmethod
def find_paragraphs(element, **kwargs) -> List[str]:
"""Extract all text of the element into paragraphs"""
paragraphs = []
for t in element.find_all(text=True, **kwargs):
text = str(t).strip()
if not text:
continue
paragraphs.append(text)
return paragraphs
def to_absolute_url(self, url: str, current_url: Optional[str] = None) -> str:
"""Detects the url state and converts it into the appropriate absolute url
There are several relevant states the url could be in:
- absolute: starts with either 'https://' or 'http://', in this the url
is returned as it without any changes.
- missing schema: schema is missing and the url starts with '//', in this
case the appropriate schema from either current url or base url is prefixed.
- relative absolute: the url is relative to the website and starts with '/', in
this case the base website location (netloc) is prefixed to the url:
- relative current: the url is relative to the current webpage and does not match
any of the above conditions, in this case the url is added to the current url provided.
:param url: The url to be converted
:type url: str
:param current_url: The webpage from which the url is extracted
:type current_url: Optional[str]
:return: The absolute converted url
:rtype: str
"""
if url.startswith("http://") or url.startswith("https://"):
return url
if url.startswith("//"):
return f"{urlparse(current_url or self.base_urls[0]).scheme}:{url}"
elif url.startswith("/"):
return self.base_urls[0].rstrip("/") + url
return current_url.rstrip("/") + url
| 32.898089
| 121
| 0.621394
|
64822455fd94e2f814850b675a5dce57d4912092
| 939
|
py
|
Python
|
test/integration/test_absolute_import_and_namespace.py
|
cpuabuse/absolute-import
|
1e7d3e70b8e14e5c41b90fa440d915ad9e014704
|
[
"0BSD"
] | 4
|
2020-02-21T07:47:43.000Z
|
2020-12-16T04:36:58.000Z
|
test/integration/test_absolute_import_and_namespace.py
|
cpuabuse/absolute-import
|
1e7d3e70b8e14e5c41b90fa440d915ad9e014704
|
[
"0BSD"
] | null | null | null |
test/integration/test_absolute_import_and_namespace.py
|
cpuabuse/absolute-import
|
1e7d3e70b8e14e5c41b90fa440d915ad9e014704
|
[
"0BSD"
] | null | null | null |
"""
Performs an integration test for absolute_import.py.
"""
# Bootstrap for package import
from pathlib import Path
from sys import path
target_path: str = str(Path(__file__).parent.joinpath("..").joinpath("..").joinpath("src").resolve())
if target_path not in path:
path.insert(1, target_path) # Inserting to ensure the use of local package
# Importing from __init__.py
from absolute_import import absolute_import
class TestAbsoluteImportAndNamespace:
"""
Integration test for absolute-import.
"""
def test_import(self):
"""
Test the import of success module.
"""
# Set the absolute import
absolute_import(file=__file__, name=__name__, path=[Path(__file__).parent.as_posix()])
# Import aux module; Disabling the pylint due to inability to set .env in a crossplatform way for multiple targets
from _input.success import success # pylint: disable=import-error
# Assert import was successful
assert success()
| 27.617647
| 116
| 0.752929
|
faa3f8ccd7893df9d11bc53fee21c884ec0d3d71
| 2,160
|
py
|
Python
|
trading_calendars/utils/pandas_utils.py
|
sjquant/trading_calendars
|
9407809a4447da0bf8920f4100496c4d4834185b
|
[
"Apache-2.0"
] | 508
|
2018-06-27T05:50:30.000Z
|
2022-03-31T09:08:49.000Z
|
trading_calendars/utils/pandas_utils.py
|
sjquant/trading_calendars
|
9407809a4447da0bf8920f4100496c4d4834185b
|
[
"Apache-2.0"
] | 149
|
2018-06-21T22:20:49.000Z
|
2022-03-30T23:05:15.000Z
|
trading_calendars/utils/pandas_utils.py
|
sjquant/trading_calendars
|
9407809a4447da0bf8920f4100496c4d4834185b
|
[
"Apache-2.0"
] | 243
|
2018-07-10T06:49:21.000Z
|
2022-03-03T03:57:44.000Z
|
import numpy as np
import pandas as pd
from pytz import UTC
def days_at_time(days, t, tz, day_offset=0):
"""
Create an index of days at time ``t``, interpreted in timezone ``tz``.
The returned index is localized to UTC.
Parameters
----------
days : DatetimeIndex
An index of dates (represented as midnight).
t : datetime.time
The time to apply as an offset to each day in ``days``.
tz : pytz.timezone
The timezone to use to interpret ``t``.
day_offset : int
The number of days we want to offset @days by
Examples
--------
In the example below, the times switch from 13:45 to 12:45 UTC because
March 13th is the daylight savings transition for UAmerica/New_York. All
the times are still 8:45 when interpreted in America/New_York.
>>> import pandas as pd; import datetime; import pprint
>>> dts = pd.date_range('2016-03-12', '2016-03-14')
>>> dts_845 = days_at_time(dts, datetime.time(8, 45), 'America/New_York')
>>> pprint.pprint([str(dt) for dt in dts_845])
['2016-03-12 13:45:00+00:00',
'2016-03-13 12:45:00+00:00',
'2016-03-14 12:45:00+00:00']
"""
days = pd.DatetimeIndex(days).tz_localize(None)
if len(days) == 0:
return days.tz_localize(UTC)
# Offset days without tz to avoid timezone issues.
delta = pd.Timedelta(
days=day_offset,
hours=t.hour,
minutes=t.minute,
seconds=t.second,
)
return (days + delta).tz_localize(tz).tz_convert(UTC)
def vectorized_sunday_to_monday(dtix):
"""A vectorized implementation of
:func:`pandas.tseries.holiday.sunday_to_monday`.
Parameters
----------
dtix : pd.DatetimeIndex
The index to shift sundays to mondays.
Returns
-------
sundays_as_mondays : pd.DatetimeIndex
``dtix`` with all sundays moved to the next monday.
"""
values = dtix.values.copy()
values[dtix.weekday == 6] += np.timedelta64(1, 'D')
return pd.DatetimeIndex(values)
try:
from pandas import testing # noqa: rexport
except ImportError:
from pandas.util import testing # noqa: rexport
| 29.189189
| 77
| 0.640278
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.