body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
def begin_update_patch(self, resource_group_name, service_name, monitoring_setting_resource, **kwargs):
"Update the Monitoring Setting.\n\n :param resource_group_name: The name of the resource group that contains the resource. You can\n obtain this value from the Azure Resource Manager API or the por... | -1,511,098,406,233,714,000 | Update the Monitoring Setting.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param monito... | sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2020_11_01_preview/operations/_monitoring_settings_operations.py | begin_update_patch | AriZavala2/azure-sdk-for-python | python | def begin_update_patch(self, resource_group_name, service_name, monitoring_setting_resource, **kwargs):
"Update the Monitoring Setting.\n\n :param resource_group_name: The name of the resource group that contains the resource. You can\n obtain this value from the Azure Resource Manager API or the por... |
def compute(self, admat, features):
' Forward Propagation through the layer according to the spectral rule '
self.D = torch.diag(admat.sum(1), diagonal=0)
self.out = torch.empty(admat.size[0], self.op_size)
self.a_hat = (admat + torch.eye(admat.size[0]))
self.D_inv = (self.D ** (- 0.5))
self.a_h... | 3,169,009,021,672,720,000 | Forward Propagation through the layer according to the spectral rule | gcn/layers.py | compute | veds12/aihaven | python | def compute(self, admat, features):
' '
self.D = torch.diag(admat.sum(1), diagonal=0)
self.out = torch.empty(admat.size[0], self.op_size)
self.a_hat = (admat + torch.eye(admat.size[0]))
self.D_inv = (self.D ** (- 0.5))
self.a_hat = ((self.D_inv * self.a_hat) * self.D_inv)
self.out = torch.d... |
def get_train_tfdataset(self) -> tf.data.Dataset:
'\n Returns the training :class:`~tf.data.Dataset`.\n\n Subclass and override this method if you want to inject some custom behavior.\n '
if (self.train_dataset is None):
raise ValueError('Trainer: training requires a train_dataset.'... | -1,645,174,303,493,521,000 | Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior. | src/transformers/trainer_tf.py | get_train_tfdataset | AdrienDS/transformers | python | def get_train_tfdataset(self) -> tf.data.Dataset:
'\n Returns the training :class:`~tf.data.Dataset`.\n\n Subclass and override this method if you want to inject some custom behavior.\n '
if (self.train_dataset is None):
raise ValueError('Trainer: training requires a train_dataset.'... |
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset]=None) -> tf.data.Dataset:
'\n Returns the evaluation :class:`~tf.data.Dataset`.\n\n Args:\n eval_dataset (:class:`~tf.data.Dataset`, `optional`):\n If provided, will override `self.eval_dataset`. The dataset... | -8,114,545,385,704,695,000 | Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``l... | src/transformers/trainer_tf.py | get_eval_tfdataset | AdrienDS/transformers | python | def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset]=None) -> tf.data.Dataset:
'\n Returns the evaluation :class:`~tf.data.Dataset`.\n\n Args:\n eval_dataset (:class:`~tf.data.Dataset`, `optional`):\n If provided, will override `self.eval_dataset`. The dataset... |
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
'\n Returns a test :class:`~tf.data.Dataset`.\n\n Args:\n test_dataset (:class:`~tf.data.Dataset`):\n The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features... | -9,158,064,499,687,991,000 | Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calcula... | src/transformers/trainer_tf.py | get_test_tfdataset | AdrienDS/transformers | python | def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
'\n Returns a test :class:`~tf.data.Dataset`.\n\n Args:\n test_dataset (:class:`~tf.data.Dataset`):\n The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features... |
def create_optimizer_and_scheduler(self, num_training_steps: int):
"\n Setup the optimizer and the learning rate scheduler.\n\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n TFTrainer's init through :obj:`optimizers`, or subcla... | 638,026,942,663,855,200 | Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method. | src/transformers/trainer_tf.py | create_optimizer_and_scheduler | AdrienDS/transformers | python | def create_optimizer_and_scheduler(self, num_training_steps: int):
"\n Setup the optimizer and the learning rate scheduler.\n\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n TFTrainer's init through :obj:`optimizers`, or subcla... |
def setup_wandb(self):
'\n Setup the optional Weights & Biases (`wandb`) integration.\n\n One can subclass and override this method to customize the setup if needed. Find more information\n `here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:... | -5,292,940,192,721,177,000 | Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str... | src/transformers/trainer_tf.py | setup_wandb | AdrienDS/transformers | python | def setup_wandb(self):
'\n Setup the optional Weights & Biases (`wandb`) integration.\n\n One can subclass and override this method to customize the setup if needed. Find more information\n `here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:... |
def setup_comet(self):
'\n Setup the optional Comet.ml integration.\n\n Environment:\n COMET_MODE:\n (Optional): str - "OFFLINE", "ONLINE", or "DISABLED"\n COMET_PROJECT_NAME:\n (Optional): str - Comet.ml project name for experiments\n COM... | -6,661,743,849,111,943,000 | Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments ... | src/transformers/trainer_tf.py | setup_comet | AdrienDS/transformers | python | def setup_comet(self):
'\n Setup the optional Comet.ml integration.\n\n Environment:\n COMET_MODE:\n (Optional): str - "OFFLINE", "ONLINE", or "DISABLED"\n COMET_PROJECT_NAME:\n (Optional): str - Comet.ml project name for experiments\n COM... |
def prediction_loop(self, dataset: tf.data.Dataset, steps: int, num_examples: int, description: str, prediction_loss_only: Optional[bool]=None) -> PredictionOutput:
'\n Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and\n :func:`~transformers.TFTrainer.predict`.\n\n ... | -8,581,805,643,994,753,000 | Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels. | src/transformers/trainer_tf.py | prediction_loop | AdrienDS/transformers | python | def prediction_loop(self, dataset: tf.data.Dataset, steps: int, num_examples: int, description: str, prediction_loss_only: Optional[bool]=None) -> PredictionOutput:
'\n Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and\n :func:`~transformers.TFTrainer.predict`.\n\n ... |
def log(self, logs: Dict[(str, float)]) -> None:
'\n Log :obj:`logs` on the various objects watching training.\n\n Subclass and override this method to inject custom behavior.\n\n Args:\n logs (:obj:`Dict[str, float]`):\n The values to log.\n '
if hasattr(se... | -4,662,213,817,398,437,000 | Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log. | src/transformers/trainer_tf.py | log | AdrienDS/transformers | python | def log(self, logs: Dict[(str, float)]) -> None:
'\n Log :obj:`logs` on the various objects watching training.\n\n Subclass and override this method to inject custom behavior.\n\n Args:\n logs (:obj:`Dict[str, float]`):\n The values to log.\n '
if hasattr(se... |
def evaluate(self, eval_dataset: Optional[tf.data.Dataset]=None) -> Dict[(str, float)]:
'\n Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are\n task-dependent (pass it to the init :obj:`compute_metrics` argument... | 7,102,218,496,738,634,000 | Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`s... | src/transformers/trainer_tf.py | evaluate | AdrienDS/transformers | python | def evaluate(self, eval_dataset: Optional[tf.data.Dataset]=None) -> Dict[(str, float)]:
'\n Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are\n task-dependent (pass it to the init :obj:`compute_metrics` argument... |
def prediction_step(self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor) -> tf.Tensor:
'\n Compute the prediction on features and update the loss with labels.\n\n Subclass and override to inject some custom behavior.\n '
(per_example_loss, logits) = self.ru... | 8,889,516,695,795,166,000 | Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior. | src/transformers/trainer_tf.py | prediction_step | AdrienDS/transformers | python | def prediction_step(self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor) -> tf.Tensor:
'\n Compute the prediction on features and update the loss with labels.\n\n Subclass and override to inject some custom behavior.\n '
(per_example_loss, logits) = self.ru... |
def train(self) -> None:
'\n Train method to train the model.\n '
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = (self.num_train_examples / self.total_train_... | 2,479,631,024,617,770,500 | Train method to train the model. | src/transformers/trainer_tf.py | train | AdrienDS/transformers | python | def train(self) -> None:
'\n \n '
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = (self.num_train_examples / self.total_train_batch_size)
approx = (math.f... |
def training_step(self, features, labels, nb_instances_in_global_batch):
'\n Perform a training step on features and labels.\n\n Subclass and override to inject some custom behavior.\n '
(per_example_loss, _) = self.run_model(features, labels, True)
scaled_loss = (per_example_loss / tf.... | 2,539,421,775,325,609,500 | Perform a training step on features and labels.
Subclass and override to inject some custom behavior. | src/transformers/trainer_tf.py | training_step | AdrienDS/transformers | python | def training_step(self, features, labels, nb_instances_in_global_batch):
'\n Perform a training step on features and labels.\n\n Subclass and override to inject some custom behavior.\n '
(per_example_loss, _) = self.run_model(features, labels, True)
scaled_loss = (per_example_loss / tf.... |
def run_model(self, features, labels, training):
'\n Computes the loss of the given features and labels pair.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n features (:obj:`tf.Tensor`): A batch of input features.\n labels (:ob... | 8,507,172,863,236,026,000 | Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in trai... | src/transformers/trainer_tf.py | run_model | AdrienDS/transformers | python | def run_model(self, features, labels, training):
'\n Computes the loss of the given features and labels pair.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n features (:obj:`tf.Tensor`): A batch of input features.\n labels (:ob... |
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
'\n Run prediction and returns predictions and potential metrics.\n\n Depending on the dataset and your use case, your test dataset may contain labels.\n In that case, this method will also return metrics, like in :obj:`evalu... | -7,247,605,620,693,667,000 | Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. ... | src/transformers/trainer_tf.py | predict | AdrienDS/transformers | python | def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
'\n Run prediction and returns predictions and potential metrics.\n\n Depending on the dataset and your use case, your test dataset may contain labels.\n In that case, this method will also return metrics, like in :obj:`evalu... |
def save_model(self, output_dir: Optional[str]=None):
'\n Will save the model, so you can reload it using :obj:`from_pretrained()`.\n '
output_dir = (output_dir if (output_dir is not None) else self.args.output_dir)
logger.info('Saving model in {}'.format(output_dir))
if (not isinstance(se... | -7,942,789,213,139,627,000 | Will save the model, so you can reload it using :obj:`from_pretrained()`. | src/transformers/trainer_tf.py | save_model | AdrienDS/transformers | python | def save_model(self, output_dir: Optional[str]=None):
'\n \n '
output_dir = (output_dir if (output_dir is not None) else self.args.output_dir)
logger.info('Saving model in {}'.format(output_dir))
if (not isinstance(self.model, TFPreTrainedModel)):
raise ValueError('Trainer.model ap... |
def initialize(self, cfn):
'Initialize the rule'
for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
self.resource_property_types.append(resource_type_spec)
for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
self.resource_sub_pro... | 7,519,247,917,375,911,000 | Initialize the rule | src/cfnlint/rules/resources/properties/AllowedValue.py | initialize | janssenivo/cfn-python-lint | python | def initialize(self, cfn):
for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
self.resource_property_types.append(resource_type_spec)
for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
self.resource_sub_property_types.append(pr... |
def check_value(self, value, path, property_name, **kwargs):
'Check Value'
matches = []
allowed_value_specs = kwargs.get('value_specs', {}).get('AllowedValues', {})
if allowed_value_specs:
if (str(value) not in allowed_value_specs):
message = 'You must specify a valid value for {0} (... | 2,931,616,691,034,806,000 | Check Value | src/cfnlint/rules/resources/properties/AllowedValue.py | check_value | janssenivo/cfn-python-lint | python | def check_value(self, value, path, property_name, **kwargs):
matches = []
allowed_value_specs = kwargs.get('value_specs', {}).get('AllowedValues', {})
if allowed_value_specs:
if (str(value) not in allowed_value_specs):
message = 'You must specify a valid value for {0} ({1}).\nValid ... |
def check(self, cfn, properties, value_specs, property_specs, path):
'Check itself'
matches = list()
for (p_value, p_path) in properties.items_safe(path[:]):
for prop in p_value:
if (prop in value_specs):
value = value_specs.get(prop).get('Value', {})
if v... | -235,133,823,034,690,980 | Check itself | src/cfnlint/rules/resources/properties/AllowedValue.py | check | janssenivo/cfn-python-lint | python | def check(self, cfn, properties, value_specs, property_specs, path):
matches = list()
for (p_value, p_path) in properties.items_safe(path[:]):
for prop in p_value:
if (prop in value_specs):
value = value_specs.get(prop).get('Value', {})
if value:
... |
def match_resource_sub_properties(self, properties, property_type, path, cfn):
'Match for sub properties'
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})
property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes')... | -6,248,846,930,321,518,000 | Match for sub properties | src/cfnlint/rules/resources/properties/AllowedValue.py | match_resource_sub_properties | janssenivo/cfn-python-lint | python | def match_resource_sub_properties(self, properties, property_type, path, cfn):
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})
property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)
ma... |
def match_resource_properties(self, properties, resource_type, path, cfn):
'Check CloudFormation Properties'
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})
resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceType... | -1,158,974,634,323,442,000 | Check CloudFormation Properties | src/cfnlint/rules/resources/properties/AllowedValue.py | match_resource_properties | janssenivo/cfn-python-lint | python | def match_resource_properties(self, properties, resource_type, path, cfn):
matches = list()
specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})
resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)
matche... |
@api_view(['GET'])
def get_public_channel_list(request, version):
' Endpoint: /public/<version>/channels/?=<query params> '
try:
channel_list = _get_channel_list(version, request.query_params)
except LookupError:
return HttpResponseNotFound(json.dumps({'id': error_constants.NOT_FOUND, 'metad... | -6,538,074,424,372,295,000 | Endpoint: /public/<version>/channels/?=<query params> | kolibri/core/public/api.py | get_public_channel_list | MikiasEphrem/kolibri | python | @api_view(['GET'])
def get_public_channel_list(request, version):
' '
try:
channel_list = _get_channel_list(version, request.query_params)
except LookupError:
return HttpResponseNotFound(json.dumps({'id': error_constants.NOT_FOUND, 'metadata': {'view': }}), content_type='application/json')
... |
@api_view(['GET'])
def get_public_channel_lookup(request, version, identifier):
' Endpoint: /public/<version>/channels/lookup/<identifier> '
try:
channel_list = _get_channel_list(version, request.query_params, identifier=identifier.strip().replace('-', ''))
except LookupError:
return HttpRes... | 1,402,104,657,418,538,200 | Endpoint: /public/<version>/channels/lookup/<identifier> | kolibri/core/public/api.py | get_public_channel_lookup | MikiasEphrem/kolibri | python | @api_view(['GET'])
def get_public_channel_lookup(request, version, identifier):
' '
try:
channel_list = _get_channel_list(version, request.query_params, identifier=identifier.strip().replace('-', ))
except LookupError:
return HttpResponseNotFound(json.dumps({'id': error_constants.NOT_FOUND,... |
def list(self, request):
'Returns metadata information about the device'
instance_model = InstanceIDModel.get_or_create_current_instance()[0]
info = {'application': 'kolibri', 'kolibri_version': kolibri.__version__, 'instance_id': instance_model.id, 'device_name': instance_model.hostname, 'operating_system'... | 203,232,092,821,044,770 | Returns metadata information about the device | kolibri/core/public/api.py | list | MikiasEphrem/kolibri | python | def list(self, request):
instance_model = InstanceIDModel.get_or_create_current_instance()[0]
info = {'application': 'kolibri', 'kolibri_version': kolibri.__version__, 'instance_id': instance_model.id, 'device_name': instance_model.hostname, 'operating_system': platform.system()}
return Response(info) |
def setup_method(self, method):
'Initialize the test problem. '
self.aux_names = [] | -6,245,637,525,381,043,000 | Initialize the test problem. | proteus/tests/HotStart_3P/test_HotStart_rans3p.py | setup_method | burgreen/proteus | python | def setup_method(self, method):
' '
self.aux_names = [] |
def __init__(self, attacker, classifier, invoke_limit=100, average_invoke=False, **kwargs):
'\n :param Attacker attacker: The attacker you use.\n :param Classifier classifier: The classifier you want to attack.\n :param int invoke_limit: Limitation of invoke for each instance.\n :param b... | 7,128,506,053,846,003,000 | :param Attacker attacker: The attacker you use.
:param Classifier classifier: The classifier you want to attack.
:param int invoke_limit: Limitation of invoke for each instance.
:param bool average_invoke: If true, returns "Avg. Victim Model Queries".
:param kwargs: Other parameters, see :py:class:`.DefaultAttackEval` ... | OpenAttack/attack_evals/invoke_limit_eval.py | __init__ | agcopenhaver/OpenAttack | python | def __init__(self, attacker, classifier, invoke_limit=100, average_invoke=False, **kwargs):
'\n :param Attacker attacker: The attacker you use.\n :param Classifier classifier: The classifier you want to attack.\n :param int invoke_limit: Limitation of invoke for each instance.\n :param b... |
def copytree(source, destination, ignore=None, include=None):
'\n Similar to shutil.copytree except that it removes the limitation that the destination directory should\n be present.\n\n :type source: str\n :param source:\n Path to the source folder to copy\n\n :type destination: str\n :par... | -8,982,296,800,963,278,000 | Similar to shutil.copytree except that it removes the limitation that the destination directory should
be present.
:type source: str
:param source:
Path to the source folder to copy
:type destination: str
:param destination:
Path to destination folder
:type ignore: function
:param ignore:
A function that... | aws_lambda_builders/utils.py | copytree | awslabs/aws-lambda-builders | python | def copytree(source, destination, ignore=None, include=None):
'\n Similar to shutil.copytree except that it removes the limitation that the destination directory should\n be present.\n\n :type source: str\n :param source:\n Path to the source folder to copy\n\n :type destination: str\n :par... |
def which(cmd, mode=(os.F_OK | os.X_OK), executable_search_paths=None):
'Given a command, mode, and executable search paths list, return the paths which\n conforms to the given mode on the PATH with the prepended additional search paths,\n or None if there is no such file.\n `mode` defaults to os.F_OK | os... | 8,573,287,553,414,485,000 | Given a command, mode, and executable search paths list, return the paths which
conforms to the given mode on the PATH with the prepended additional search paths,
or None if there is no such file.
`mode` defaults to os.F_OK | os.X_OK. the default search `path` defaults
to the result of os.environ.get("PATH")
Note: This... | aws_lambda_builders/utils.py | which | awslabs/aws-lambda-builders | python | def which(cmd, mode=(os.F_OK | os.X_OK), executable_search_paths=None):
'Given a command, mode, and executable search paths list, return the paths which\n conforms to the given mode on the PATH with the prepended additional search paths,\n or None if there is no such file.\n `mode` defaults to os.F_OK | os... |
def get_goarch(architecture):
'\n Parameters\n ----------\n architecture : str\n name of the type of architecture\n\n Returns\n -------\n str\n returns a valid GO Architecture value\n '
return ('arm64' if (architecture == ARM64) else 'amd64') | -6,302,580,191,235,693,000 | Parameters
----------
architecture : str
name of the type of architecture
Returns
-------
str
returns a valid GO Architecture value | aws_lambda_builders/utils.py | get_goarch | awslabs/aws-lambda-builders | python | def get_goarch(architecture):
'\n Parameters\n ----------\n architecture : str\n name of the type of architecture\n\n Returns\n -------\n str\n returns a valid GO Architecture value\n '
return ('arm64' if (architecture == ARM64) else 'amd64') |
def partition_lines(lines, step=1000000):
'Note: line numbers are **1-based**\n '
lo = pd.DataFrame.from_records([dict(start=lo, qty=min(((lines + 1) - lo), step), lines=lines) for lo in range(1, (lines + 1), step)])
return lo | 644,283,970,201,553,900 | Note: line numbers are **1-based** | nb4/slogfiles.py | partition_lines | Agoric/testnet-notes | python | def partition_lines(lines, step=1000000):
'\n '
lo = pd.DataFrame.from_records([dict(start=lo, qty=min(((lines + 1) - lo), step), lines=lines) for lo in range(1, (lines + 1), step)])
return lo |
def iter_cranks(path):
'split each slogfile into runs (each beginning with an import-kernel event),\n process each run by finding sequential matching deliver+deliver-result pairs,\n turn each pair into a (crankNum, computrons, wallclock) triple\n '
log.info('iter_cranks: %s', path)
with gzip.open(p... | -8,181,327,759,624,395,000 | split each slogfile into runs (each beginning with an import-kernel event),
process each run by finding sequential matching deliver+deliver-result pairs,
turn each pair into a (crankNum, computrons, wallclock) triple | nb4/slogfiles.py | iter_cranks | Agoric/testnet-notes | python | def iter_cranks(path):
'split each slogfile into runs (each beginning with an import-kernel event),\n process each run by finding sequential matching deliver+deliver-result pairs,\n turn each pair into a (crankNum, computrons, wallclock) triple\n '
log.info('iter_cranks: %s', path)
with gzip.open(p... |
def setUp(self):
'Sets up the needed objects used throughout the test.'
super(CPIOBinaryFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.bin.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test... | 631,313,947,189,636,700 | Sets up the needed objects used throughout the test. | tests/file_io/cpio_file_io.py | setUp | Acidburn0zzz/dfvfs | python | def setUp(self):
super(CPIOBinaryFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.bin.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CPIOP... |
def testOpenClosePathSpec(self):
'Test the open and close functionality using a path specification.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() | -6,653,219,118,026,506,000 | Test the open and close functionality using a path specification. | tests/file_io/cpio_file_io.py | testOpenClosePathSpec | Acidburn0zzz/dfvfs | python | def testOpenClosePathSpec(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() |
def testSeek(self):
'Test the seek functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() | 3,067,408,371,601,975,300 | Test the seek functionality. | tests/file_io/cpio_file_io.py | testSeek | Acidburn0zzz/dfvfs | python | def testSeek(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() |
def testRead(self):
'Test the read functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() | 7,601,449,651,663,779,000 | Test the read functionality. | tests/file_io/cpio_file_io.py | testRead | Acidburn0zzz/dfvfs | python | def testRead(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() |
def setUp(self):
'Sets up the needed objects used throughout the test.'
super(CPIOPortableASCIIFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.odc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(locati... | -7,919,440,319,864,761,000 | Sets up the needed objects used throughout the test. | tests/file_io/cpio_file_io.py | setUp | Acidburn0zzz/dfvfs | python | def setUp(self):
super(CPIOPortableASCIIFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.odc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spe... |
def testOpenClosePathSpec(self):
'Test the open and close functionality using a path specification.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() | -6,653,219,118,026,506,000 | Test the open and close functionality using a path specification. | tests/file_io/cpio_file_io.py | testOpenClosePathSpec | Acidburn0zzz/dfvfs | python | def testOpenClosePathSpec(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() |
def testSeek(self):
'Test the seek functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() | 3,067,408,371,601,975,300 | Test the seek functionality. | tests/file_io/cpio_file_io.py | testSeek | Acidburn0zzz/dfvfs | python | def testSeek(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() |
def testRead(self):
'Test the read functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() | 7,601,449,651,663,779,000 | Test the read functionality. | tests/file_io/cpio_file_io.py | testRead | Acidburn0zzz/dfvfs | python | def testRead(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() |
def setUp(self):
'Sets up the needed objects used throughout the test.'
super(CPIONewASCIIFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.newc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=t... | -8,965,766,636,364,369,000 | Sets up the needed objects used throughout the test. | tests/file_io/cpio_file_io.py | setUp | Acidburn0zzz/dfvfs | python | def setUp(self):
super(CPIONewASCIIFileTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.newc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_path_spec.CP... |
def testOpenClosePathSpec(self):
'Test the open and close functionality using a path specification.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() | -6,653,219,118,026,506,000 | Test the open and close functionality using a path specification. | tests/file_io/cpio_file_io.py | testOpenClosePathSpec | Acidburn0zzz/dfvfs | python | def testOpenClosePathSpec(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() |
def testSeek(self):
'Test the seek functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() | 3,067,408,371,601,975,300 | Test the seek functionality. | tests/file_io/cpio_file_io.py | testSeek | Acidburn0zzz/dfvfs | python | def testSeek(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() |
def testRead(self):
'Test the read functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() | 7,601,449,651,663,779,000 | Test the read functionality. | tests/file_io/cpio_file_io.py | testRead | Acidburn0zzz/dfvfs | python | def testRead(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() |
def setUp(self):
'Sets up the needed objects used throughout the test.'
super(CPIONewASCIIFileWithChecksumTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.crc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec... | 8,044,887,331,691,074,000 | Sets up the needed objects used throughout the test. | tests/file_io/cpio_file_io.py | setUp | Acidburn0zzz/dfvfs | python | def setUp(self):
super(CPIONewASCIIFileWithChecksumTest, self).setUp()
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['syslog.crc.cpio'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._cpio_path_spec = cpio_p... |
def testOpenClosePathSpec(self):
'Test the open and close functionality using a path specification.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() | -6,653,219,118,026,506,000 | Test the open and close functionality using a path specification. | tests/file_io/cpio_file_io.py | testOpenClosePathSpec | Acidburn0zzz/dfvfs | python | def testOpenClosePathSpec(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestGetSizeFileObject(file_object)
file_object.close() |
def testSeek(self):
'Test the seek functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() | 3,067,408,371,601,975,300 | Test the seek functionality. | tests/file_io/cpio_file_io.py | testSeek | Acidburn0zzz/dfvfs | python | def testSeek(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestSeekFileObject(file_object)
file_object.close() |
def testRead(self):
'Test the read functionality.'
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() | 7,601,449,651,663,779,000 | Test the read functionality. | tests/file_io/cpio_file_io.py | testRead | Acidburn0zzz/dfvfs | python | def testRead(self):
file_object = cpio_file_io.CPIOFile(self._resolver_context)
file_object.open(path_spec=self._cpio_path_spec)
self._TestReadFileObject(file_object)
file_object.close() |
def registry_record_matches(registry_record_str, registry, repository):
'\n\n :param registry_record_str: the string with optional wildcard to match against a the registry/repository combo\n :param registry: the registry to match against\n :param repository: the repository to match against\n :return: bo... | -8,767,196,122,331,420,000 | :param registry_record_str: the string with optional wildcard to match against a the registry/repository combo
:param registry: the registry to match against
:param repository: the repository to match against
:return: bool true if a match, false if not | anchore_engine/auth/common.py | registry_record_matches | Mattlk13/anchore-engine | python | def registry_record_matches(registry_record_str, registry, repository):
'\n\n :param registry_record_str: the string with optional wildcard to match against a the registry/repository combo\n :param registry: the registry to match against\n :param repository: the repository to match against\n :return: bo... |
def _compute_power_transforms(Ys: Dict[(str, List[float])]) -> Dict[(str, PowerTransformer)]:
'Compute power transforms.'
power_transforms = {}
for (k, ys) in Ys.items():
y = np.array(ys)[:, None]
pt = PowerTransformer(method='yeo-johnson').fit(y)
power_transforms[k] = pt
return ... | 6,725,729,149,776,200,000 | Compute power transforms. | ax/modelbridge/transforms/power_transform_y.py | _compute_power_transforms | danielcohenlive/Ax-1 | python | def _compute_power_transforms(Ys: Dict[(str, List[float])]) -> Dict[(str, PowerTransformer)]:
power_transforms = {}
for (k, ys) in Ys.items():
y = np.array(ys)[:, None]
pt = PowerTransformer(method='yeo-johnson').fit(y)
power_transforms[k] = pt
return power_transforms |
def _compute_inverse_bounds(power_transforms: Dict[(str, PowerTransformer)], tol: float=1e-10) -> Dict[(str, Tuple[(float, float)])]:
'Computes the image of the transform so we can clip when we untransform.\n\n The inverse of the Yeo-Johnson transform is given by:\n if X >= 0 and lambda == 0:\n X = exp... | -6,789,349,316,306,147,000 | Computes the image of the transform so we can clip when we untransform.
The inverse of the Yeo-Johnson transform is given by:
if X >= 0 and lambda == 0:
X = exp(X_trans) - 1
elif X >= 0 and lambda != 0:
X = (X_trans * lambda + 1) ** (1 / lambda) - 1
elif X < 0 and lambda != 2:
X = 1 - (-(2 - lambda) * X_tr... | ax/modelbridge/transforms/power_transform_y.py | _compute_inverse_bounds | danielcohenlive/Ax-1 | python | def _compute_inverse_bounds(power_transforms: Dict[(str, PowerTransformer)], tol: float=1e-10) -> Dict[(str, Tuple[(float, float)])]:
'Computes the image of the transform so we can clip when we untransform.\n\n The inverse of the Yeo-Johnson transform is given by:\n if X >= 0 and lambda == 0:\n X = exp... |
def transform_observation_data(self, observation_data: List[ObservationData], observation_features: List[ObservationFeatures]) -> List[ObservationData]:
'Winsorize observation data in place.'
for obsd in observation_data:
for (i, m) in enumerate(obsd.metric_names):
if (m in self.metric_names... | 4,156,824,596,059,636,700 | Winsorize observation data in place. | ax/modelbridge/transforms/power_transform_y.py | transform_observation_data | danielcohenlive/Ax-1 | python | def transform_observation_data(self, observation_data: List[ObservationData], observation_features: List[ObservationFeatures]) -> List[ObservationData]:
for obsd in observation_data:
for (i, m) in enumerate(obsd.metric_names):
if (m in self.metric_names):
transform = self.po... |
def untransform_observation_data(self, observation_data: List[ObservationData], observation_features: List[ObservationFeatures]) -> List[ObservationData]:
'Winsorize observation data in place.'
for obsd in observation_data:
for (i, m) in enumerate(obsd.metric_names):
if (m in self.metric_nam... | -321,994,114,304,318,900 | Winsorize observation data in place. | ax/modelbridge/transforms/power_transform_y.py | untransform_observation_data | danielcohenlive/Ax-1 | python | def untransform_observation_data(self, observation_data: List[ObservationData], observation_features: List[ObservationFeatures]) -> List[ObservationData]:
for obsd in observation_data:
for (i, m) in enumerate(obsd.metric_names):
if (m in self.metric_names):
(l, u) = self.inv... |
def _print_out(inputstring):
'Print the inputstring. To make it compatible with Python2 and Python3.'
sys.stdout.write((inputstring + '\n')) | -1,625,176,883,322,357,000 | Print the inputstring. To make it compatible with Python2 and Python3. | dummy_serial.py | _print_out | edgar-bonet/minimalmodbus | python | def _print_out(inputstring):
sys.stdout.write((inputstring + '\n')) |
def __repr__(self):
'String representation of the dummy_serial object'
return '{0}.{1}<id=0x{2:x}, open={3}>(port={4!r}, timeout={5!r}, waiting_data={6!r})'.format(self.__module__, self.__class__.__name__, id(self), self._isOpen, self.port, self.timeout, self._waiting_data) | 8,809,353,308,238,116,000 | String representation of the dummy_serial object | dummy_serial.py | __repr__ | edgar-bonet/minimalmodbus | python | def __repr__(self):
return '{0}.{1}<id=0x{2:x}, open={3}>(port={4!r}, timeout={5!r}, waiting_data={6!r})'.format(self.__module__, self.__class__.__name__, id(self), self._isOpen, self.port, self.timeout, self._waiting_data) |
def open(self):
'Open a (previously initialized) port on dummy_serial.'
if VERBOSE:
_print_out('\nDummy_serial: Opening port\n')
if self._isOpen:
raise IOError('Dummy_serial: The port is already open')
self._isOpen = True
self.port = self._initial_port_name | 3,247,762,950,439,310,000 | Open a (previously initialized) port on dummy_serial. | dummy_serial.py | open | edgar-bonet/minimalmodbus | python | def open(self):
if VERBOSE:
_print_out('\nDummy_serial: Opening port\n')
if self._isOpen:
raise IOError('Dummy_serial: The port is already open')
self._isOpen = True
self.port = self._initial_port_name |
def close(self):
'Close a port on dummy_serial.'
if VERBOSE:
_print_out('\nDummy_serial: Closing port\n')
if (not self._isOpen):
raise IOError('Dummy_serial: The port is already closed')
self._isOpen = False
self.port = None | -4,742,343,131,919,546,000 | Close a port on dummy_serial. | dummy_serial.py | close | edgar-bonet/minimalmodbus | python | def close(self):
if VERBOSE:
_print_out('\nDummy_serial: Closing port\n')
if (not self._isOpen):
raise IOError('Dummy_serial: The port is already closed')
self._isOpen = False
self.port = None |
def write(self, inputdata):
'Write to a port on dummy_serial.\n\n Args:\n inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response\n for subsequent read operations.\n\n Note that for Python2, the inputdata should be a **string**. For Python... | 5,103,098,685,743,835,000 | Write to a port on dummy_serial.
Args:
inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response
for subsequent read operations.
Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**. | dummy_serial.py | write | edgar-bonet/minimalmodbus | python | def write(self, inputdata):
'Write to a port on dummy_serial.\n\n Args:\n inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response\n for subsequent read operations.\n\n Note that for Python2, the inputdata should be a **string**. For Python... |
def read(self, numberOfBytes):
'Read from a port on dummy_serial.\n\n The response is dependent on what was written last to the port on dummy_serial,\n and what is defined in the :data:`RESPONSES` dictionary.\n\n Args:\n numberOfBytes (int): For compability with the real function.\n\... | 4,200,138,346,946,070,500 | Read from a port on dummy_serial.
The response is dependent on what was written last to the port on dummy_serial,
and what is defined in the :data:`RESPONSES` dictionary.
Args:
numberOfBytes (int): For compability with the real function.
Returns a **string** for Python2 and **bytes** for Python3.
If the respons... | dummy_serial.py | read | edgar-bonet/minimalmodbus | python | def read(self, numberOfBytes):
'Read from a port on dummy_serial.\n\n The response is dependent on what was written last to the port on dummy_serial,\n and what is defined in the :data:`RESPONSES` dictionary.\n\n Args:\n numberOfBytes (int): For compability with the real function.\n\... |
def _calc_validation_statistics(validation_results):
'\n Calculate summary statistics for the validation results and\n return ``ExpectationStatistics``.\n '
successful_expectations = sum((exp.success for exp in validation_results))
evaluated_expectations = len(validation_results)
unsuccessful_e... | -3,360,372,455,839,803,400 | Calculate summary statistics for the validation results and
return ``ExpectationStatistics``. | great_expectations/data_asset/data_asset.py | _calc_validation_statistics | BSofo/great_expectations | python | def _calc_validation_statistics(validation_results):
'\n Calculate summary statistics for the validation results and\n return ``ExpectationStatistics``.\n '
successful_expectations = sum((exp.success for exp in validation_results))
evaluated_expectations = len(validation_results)
unsuccessful_e... |
def __init__(self, *args, **kwargs):
"\n Initialize the DataAsset.\n\n :param profiler (profiler class) = None: The profiler that should be run on the data_asset to\n build a baseline expectation suite.\n\n Note: DataAsset is designed to support multiple inheritance (e.g. PandasDatas... | -3,279,447,003,300,268,500 | Initialize the DataAsset.
:param profiler (profiler class) = None: The profiler that should be run on the data_asset to
build a baseline expectation suite.
Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a
Pandas DataFrame and Dataset which inherits from DataAsse... | great_expectations/data_asset/data_asset.py | __init__ | BSofo/great_expectations | python | def __init__(self, *args, **kwargs):
"\n Initialize the DataAsset.\n\n :param profiler (profiler class) = None: The profiler that should be run on the data_asset to\n build a baseline expectation suite.\n\n Note: DataAsset is designed to support multiple inheritance (e.g. PandasDatas... |
def autoinspect(self, profiler):
'Deprecated: use profile instead.\n\n Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.\n\n Args:\n profiler: The profiler to use\n\n Returns:\n tuple(expectation_suite, validation_... | 6,257,769,304,988,778,000 | Deprecated: use profile instead.
Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.
Args:
profiler: The profiler to use
Returns:
tuple(expectation_suite, validation_results) | great_expectations/data_asset/data_asset.py | autoinspect | BSofo/great_expectations | python | def autoinspect(self, profiler):
'Deprecated: use profile instead.\n\n Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.\n\n Args:\n profiler: The profiler to use\n\n Returns:\n tuple(expectation_suite, validation_... |
def profile(self, profiler, profiler_configuration=None):
'Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.\n\n Args:\n profiler: The profiler to use\n profiler_configuration: Optional profiler configuration dict\n\n Ret... | -7,188,949,196,076,871,000 | Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.
Args:
profiler: The profiler to use
profiler_configuration: Optional profiler configuration dict
Returns:
tuple(expectation_suite, validation_results) | great_expectations/data_asset/data_asset.py | profile | BSofo/great_expectations | python | def profile(self, profiler, profiler_configuration=None):
'Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.\n\n Args:\n profiler: The profiler to use\n profiler_configuration: Optional profiler configuration dict\n\n Ret... |
@classmethod
def expectation(cls, method_arg_names):
"Manages configuration and running of expectation objects.\n\n Expectation builds and saves a new expectation configuration to the DataAsset object. It is the core decorator used by great expectations to manage expectation configurations.\n\n ... | 4,132,695,184,668,145,700 | Manages configuration and running of expectation objects.
Expectation builds and saves a new expectation configuration to the DataAsset object. It is the core decorator used by great expectations to manage expectation configurations.
Args:
method_arg_names (List) : An ordered list of the arguments used by... | great_expectations/data_asset/data_asset.py | expectation | BSofo/great_expectations | python | @classmethod
def expectation(cls, method_arg_names):
"Manages configuration and running of expectation objects.\n\n Expectation builds and saves a new expectation configuration to the DataAsset object. It is the core decorator used by great expectations to manage expectation configurations.\n\n ... |
def _initialize_expectations(self, expectation_suite=None, expectation_suite_name=None):
"Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`.\n In addition, this always sets the `default_expectation_args` to:\n `include_config`: False,\n `catc... | -3,262,480,724,169,079,000 | Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`.
In addition, this always sets the `default_expectation_args` to:
`include_config`: False,
`catch_exceptions`: False,
`output_format`: 'BASIC'
By default, initializes data_asset_type to the name of the implementi... | great_expectations/data_asset/data_asset.py | _initialize_expectations | BSofo/great_expectations | python | def _initialize_expectations(self, expectation_suite=None, expectation_suite_name=None):
"Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`.\n In addition, this always sets the `default_expectation_args` to:\n `include_config`: False,\n `catc... |
def append_expectation(self, expectation_config):
'This method is a thin wrapper for ExpectationSuite.append_expectation'
warnings.warn(('append_expectation is deprecated, and will be removed in a future release. ' + 'Please use ExpectationSuite.add_expectation instead.'), DeprecationWarning)
self._expectat... | 3,051,038,348,989,836,300 | This method is a thin wrapper for ExpectationSuite.append_expectation | great_expectations/data_asset/data_asset.py | append_expectation | BSofo/great_expectations | python | def append_expectation(self, expectation_config):
warnings.warn(('append_expectation is deprecated, and will be removed in a future release. ' + 'Please use ExpectationSuite.add_expectation instead.'), DeprecationWarning)
self._expectation_suite.append_expectation(expectation_config) |
def find_expectation_indexes(self, expectation_configuration: ExpectationConfiguration, match_type: str='domain') -> List[int]:
'This method is a thin wrapper for ExpectationSuite.find_expectation_indexes'
warnings.warn(('find_expectation_indexes is deprecated, and will be removed in a future release. ' + 'Plea... | -6,067,033,181,278,181,000 | This method is a thin wrapper for ExpectationSuite.find_expectation_indexes | great_expectations/data_asset/data_asset.py | find_expectation_indexes | BSofo/great_expectations | python | def find_expectation_indexes(self, expectation_configuration: ExpectationConfiguration, match_type: str='domain') -> List[int]:
warnings.warn(('find_expectation_indexes is deprecated, and will be removed in a future release. ' + 'Please use ExpectationSuite.find_expectation_indexes instead.'), DeprecationWarni... |
def find_expectations(self, expectation_configuration: ExpectationConfiguration, match_type: str='domain') -> List[ExpectationConfiguration]:
'This method is a thin wrapper for ExpectationSuite.find_expectations()'
warnings.warn(('find_expectations is deprecated, and will be removed in a future release. ' + 'Pl... | -3,740,664,149,477,724,000 | This method is a thin wrapper for ExpectationSuite.find_expectations() | great_expectations/data_asset/data_asset.py | find_expectations | BSofo/great_expectations | python | def find_expectations(self, expectation_configuration: ExpectationConfiguration, match_type: str='domain') -> List[ExpectationConfiguration]:
warnings.warn(('find_expectations is deprecated, and will be removed in a future release. ' + 'Please use ExpectationSuite.find_expectation_indexes instead.'), Deprecati... |
def remove_expectation(self, expectation_configuration: ExpectationConfiguration, match_type: str='domain', remove_multiple_matches: bool=False) -> List[ExpectationConfiguration]:
'This method is a thin wrapper for ExpectationSuite.remove()'
warnings.warn(('DataAsset.remove_expectations is deprecated, and will ... | 5,106,026,801,216,445,000 | This method is a thin wrapper for ExpectationSuite.remove() | great_expectations/data_asset/data_asset.py | remove_expectation | BSofo/great_expectations | python | def remove_expectation(self, expectation_configuration: ExpectationConfiguration, match_type: str='domain', remove_multiple_matches: bool=False) -> List[ExpectationConfiguration]:
warnings.warn(('DataAsset.remove_expectations is deprecated, and will be removed in a future release. ' + 'Please use ExpectationSu... |
def get_default_expectation_arguments(self):
'Fetch default expectation arguments for this data_asset\n\n Returns:\n A dictionary containing all the current default expectation arguments for a data_asset\n\n Ex::\n\n {\n "include_config" : True,\n ... | -6,200,447,437,952,300,000 | Fetch default expectation arguments for this data_asset
Returns:
A dictionary containing all the current default expectation arguments for a data_asset
Ex::
{
"include_config" : True,
"catch_exceptions" : False,
"result_format" : 'BASIC'
}
See also:
se... | great_expectations/data_asset/data_asset.py | get_default_expectation_arguments | BSofo/great_expectations | python | def get_default_expectation_arguments(self):
'Fetch default expectation arguments for this data_asset\n\n Returns:\n A dictionary containing all the current default expectation arguments for a data_asset\n\n Ex::\n\n {\n "include_config" : True,\n ... |
def set_default_expectation_argument(self, argument, value):
'Set a default expectation argument for this data_asset\n\n Args:\n argument (string): The argument to be replaced\n value : The New argument to use for replacement\n\n Returns:\n None\n\n See also:\n ... | 1,081,044,049,782,888,400 | Set a default expectation argument for this data_asset
Args:
argument (string): The argument to be replaced
value : The New argument to use for replacement
Returns:
None
See also:
get_default_expectation_arguments | great_expectations/data_asset/data_asset.py | set_default_expectation_argument | BSofo/great_expectations | python | def set_default_expectation_argument(self, argument, value):
'Set a default expectation argument for this data_asset\n\n Args:\n argument (string): The argument to be replaced\n value : The New argument to use for replacement\n\n Returns:\n None\n\n See also:\n ... |
def get_expectation_suite(self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, suppress_logging=False):
'Returns _expectation_config as a JSON object, and perform some cleaning along the way.\n\n... | 3,306,050,817,204,586,500 | Returns _expectation_config as a JSON object, and perform some cleaning along the way.
Args:
discard_failed_expectations (boolean): Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.
discard_result_format_kwargs (boolean): In ret... | great_expectations/data_asset/data_asset.py | get_expectation_suite | BSofo/great_expectations | python | def get_expectation_suite(self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, suppress_logging=False):
'Returns _expectation_config as a JSON object, and perform some cleaning along the way.\n\n... |
def save_expectation_suite(self, filepath=None, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False):
"Writes ``_expectation_config`` to a JSON file.\n\n Writes the DataAsset's expectation co... | 6,608,268,073,604,393,000 | Writes ``_expectation_config`` to a JSON file.
Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value pairs :ref:`result_format`, :ref:`include_c... | great_expectations/data_asset/data_asset.py | save_expectation_suite | BSofo/great_expectations | python | def save_expectation_suite(self, filepath=None, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False):
"Writes ``_expectation_config`` to a JSON file.\n\n Writes the DataAsset's expectation co... |
def validate(self, expectation_suite=None, run_id=None, data_context=None, evaluation_parameters=None, catch_exceptions=True, result_format=None, only_return_failures=False, run_name=None, run_time=None):
'Generates a JSON-formatted report describing the outcome of all expectations.\n\n Use the default expec... | -2,197,131,871,844,403,700 | Generates a JSON-formatted report describing the outcome of all expectations.
Use the default expectation_suite=None to validate the expectations config associated with the DataAsset.
Args:
expectation_suite (json or None): If None, uses the expectations config generated with the DataAsset during ... | great_expectations/data_asset/data_asset.py | validate | BSofo/great_expectations | python | def validate(self, expectation_suite=None, run_id=None, data_context=None, evaluation_parameters=None, catch_exceptions=True, result_format=None, only_return_failures=False, run_name=None, run_time=None):
'Generates a JSON-formatted report describing the outcome of all expectations.\n\n Use the default expec... |
def get_evaluation_parameter(self, parameter_name, default_value=None):
'Get an evaluation parameter value that has been stored in meta.\n\n Args:\n parameter_name (string): The name of the parameter to store.\n default_value (any): The default value to be returned if the parameter is n... | 5,703,724,371,110,034,000 | Get an evaluation parameter value that has been stored in meta.
Args:
parameter_name (string): The name of the parameter to store.
default_value (any): The default value to be returned if the parameter is not found.
Returns:
The current value of the evaluation parameter. | great_expectations/data_asset/data_asset.py | get_evaluation_parameter | BSofo/great_expectations | python | def get_evaluation_parameter(self, parameter_name, default_value=None):
'Get an evaluation parameter value that has been stored in meta.\n\n Args:\n parameter_name (string): The name of the parameter to store.\n default_value (any): The default value to be returned if the parameter is n... |
def set_evaluation_parameter(self, parameter_name, parameter_value):
'Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate\n parameterized expectations.\n\n Args:\n parameter_name (string): The name of the kwarg to be replaced at evaluation time\n... | -1,699,529,156,743,561,200 | Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate
parameterized expectations.
Args:
parameter_name (string): The name of the kwarg to be replaced at evaluation time
parameter_value (any): The value to be used | great_expectations/data_asset/data_asset.py | set_evaluation_parameter | BSofo/great_expectations | python | def set_evaluation_parameter(self, parameter_name, parameter_value):
'Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate\n parameterized expectations.\n\n Args:\n parameter_name (string): The name of the kwarg to be replaced at evaluation time\n... |
@property
def expectation_suite_name(self):
'Gets the current expectation_suite name of this data_asset as stored in the expectations configuration.'
return self._expectation_suite.expectation_suite_name | -395,357,649,999,344,200 | Gets the current expectation_suite name of this data_asset as stored in the expectations configuration. | great_expectations/data_asset/data_asset.py | expectation_suite_name | BSofo/great_expectations | python | @property
def expectation_suite_name(self):
return self._expectation_suite.expectation_suite_name |
@expectation_suite_name.setter
def expectation_suite_name(self, expectation_suite_name):
'Sets the expectation_suite name of this data_asset as stored in the expectations configuration.'
self._expectation_suite.expectation_suite_name = expectation_suite_name | 2,893,231,240,521,078,000 | Sets the expectation_suite name of this data_asset as stored in the expectations configuration. | great_expectations/data_asset/data_asset.py | expectation_suite_name | BSofo/great_expectations | python | @expectation_suite_name.setter
def expectation_suite_name(self, expectation_suite_name):
self._expectation_suite.expectation_suite_name = expectation_suite_name |
def _format_map_output(self, result_format, success, element_count, nonnull_count, unexpected_count, unexpected_list, unexpected_index_list):
'Helper function to construct expectation result objects for map_expectations (such as column_map_expectation\n and file_lines_map_expectation).\n\n Expectation... | 2,999,667,954,995,944,000 | Helper function to construct expectation result objects for map_expectations (such as column_map_expectation
and file_lines_map_expectation).
Expectations support four result_formats: BOOLEAN_ONLY, BASIC, SUMMARY, and COMPLETE.
In each case, the object returned has a different set of populated fields.
See :ref:`result... | great_expectations/data_asset/data_asset.py | _format_map_output | BSofo/great_expectations | python | def _format_map_output(self, result_format, success, element_count, nonnull_count, unexpected_count, unexpected_list, unexpected_index_list):
'Helper function to construct expectation result objects for map_expectations (such as column_map_expectation\n and file_lines_map_expectation).\n\n Expectation... |
def _calc_map_expectation_success(self, success_count, nonnull_count, mostly):
'Calculate success and percent_success for column_map_expectations\n\n Args:\n success_count (int): The number of successful values in the column\n nonnull_count (int): The num... | 1,063,625,033,990,201,300 | Calculate success and percent_success for column_map_expectations
Args:
success_count (int): The number of successful values in the column
nonnull_count (int): The number of nonnull values in the column
mostly (float or None): A value between 0 and 1 (or None... | great_expectations/data_asset/data_asset.py | _calc_map_expectation_success | BSofo/great_expectations | python | def _calc_map_expectation_success(self, success_count, nonnull_count, mostly):
'Calculate success and percent_success for column_map_expectations\n\n Args:\n success_count (int): The number of successful values in the column\n nonnull_count (int): The num... |
def test_expectation_function(self, function, *args, **kwargs):
'Test a generic expectation function\n\n Args:\n function (func): The function to be tested. (Must be a valid expectation function.)\n *args : Positional arguments to be passed the the function\n **kwarg... | 7,143,974,565,756,083,000 | Test a generic expectation function
Args:
function (func): The function to be tested. (Must be a valid expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
A JSON-serializable expectation re... | great_expectations/data_asset/data_asset.py | test_expectation_function | BSofo/great_expectations | python | def test_expectation_function(self, function, *args, **kwargs):
'Test a generic expectation function\n\n Args:\n function (func): The function to be tested. (Must be a valid expectation function.)\n *args : Positional arguments to be passed the the function\n **kwarg... |
def run(self):
' \n Build the command and run.\n Return list of file(s)\n '
contigs = self.data.contigfiles
reads = self.data.readsets
if (len(contigs) > 1):
raise Exception('Reapr: multiple contig files!')
if (len(reads) > 1):
self.out_module.write('WARNING: Rea... | -2,533,049,675,244,112,400 | Build the command and run.
Return list of file(s) | lib/assembly/plugins/reapr.py | run | levinas/assembly | python | def run(self):
' \n Build the command and run.\n Return list of file(s)\n '
contigs = self.data.contigfiles
reads = self.data.readsets
if (len(contigs) > 1):
raise Exception('Reapr: multiple contig files!')
if (len(reads) > 1):
self.out_module.write('WARNING: Rea... |
def setup(set_prefix=True):
'\n Configure the settings (this happens as a side effect of accessing the\n first setting), configure logging and populate the app registry.\n Set the thread-local urlresolvers script prefix if `set_prefix` is True.\n '
from django.apps import apps
from django.conf i... | 3,197,155,097,738,839,000 | Configure the settings (this happens as a side effect of accessing the
first setting), configure logging and populate the app registry.
Set the thread-local urlresolvers script prefix if `set_prefix` is True. | Thesis@3.9.1/Lib/site-packages/django/__init__.py | setup | nverbois/TFE21-232 | python | def setup(set_prefix=True):
'\n Configure the settings (this happens as a side effect of accessing the\n first setting), configure logging and populate the app registry.\n Set the thread-local urlresolvers script prefix if `set_prefix` is True.\n '
from django.apps import apps
from django.conf i... |
def on_trial_complete(self, trial_id: str, result: Optional[Dict]=None, error: bool=False):
'Notification for the completion of trial.\n\n If a trial fails, it will be reported as a failed Observation, telling\n the optimizer that the Suggestion led to a metric failure, which\n updates the feas... | 5,290,662,398,531,859,000 | Notification for the completion of trial.
If a trial fails, it will be reported as a failed Observation, telling
the optimizer that the Suggestion led to a metric failure, which
updates the feasible region and improves parameter recommendation.
Creates SigOpt Observation object for trial. | python/ray/tune/suggest/sigopt.py | on_trial_complete | Actexpler/ray | python | def on_trial_complete(self, trial_id: str, result: Optional[Dict]=None, error: bool=False):
'Notification for the completion of trial.\n\n If a trial fails, it will be reported as a failed Observation, telling\n the optimizer that the Suggestion led to a metric failure, which\n updates the feas... |
@staticmethod
def serialize_metric(metrics: List[str], modes: List[str]):
'\n Converts metrics to https://app.sigopt.com/docs/objects/metric\n '
serialized_metric = []
for (metric, mode) in zip(metrics, modes):
serialized_metric.append(dict(name=metric, **SigOptSearch.OBJECTIVE_MAP[mod... | -1,739,824,465,971,723,000 | Converts metrics to https://app.sigopt.com/docs/objects/metric | python/ray/tune/suggest/sigopt.py | serialize_metric | Actexpler/ray | python | @staticmethod
def serialize_metric(metrics: List[str], modes: List[str]):
'\n \n '
serialized_metric = []
for (metric, mode) in zip(metrics, modes):
serialized_metric.append(dict(name=metric, **SigOptSearch.OBJECTIVE_MAP[mode].copy()))
return serialized_metric |
def serialize_result(self, result: Dict):
'\n Converts experiments results to\n https://app.sigopt.com/docs/objects/metric_evaluation\n '
missing_scores = [metric for metric in self._metric if (metric not in result)]
if missing_scores:
raise ValueError(f'Some metrics specified d... | -4,923,203,362,273,260,000 | Converts experiments results to
https://app.sigopt.com/docs/objects/metric_evaluation | python/ray/tune/suggest/sigopt.py | serialize_result | Actexpler/ray | python | def serialize_result(self, result: Dict):
'\n Converts experiments results to\n https://app.sigopt.com/docs/objects/metric_evaluation\n '
missing_scores = [metric for metric in self._metric if (metric not in result)]
if missing_scores:
raise ValueError(f'Some metrics specified d... |
def replacing_yield(o, attr, val):
'Context manager to temporarily replace an attribute'
old = getattr(o, attr)
try:
(yield setattr(o, attr, val))
finally:
setattr(o, attr, old) | 7,309,409,930,689,528,000 | Context manager to temporarily replace an attribute | fastai2/learner.py | replacing_yield | akashpalrecha/fastai2 | python | def replacing_yield(o, attr, val):
old = getattr(o, attr)
try:
(yield setattr(o, attr, val))
finally:
setattr(o, attr, old) |
def mk_metric(m):
"Convert `m` to an `AvgMetric`, unless it's already a `Metric`"
return (m if isinstance(m, Metric) else AvgMetric(m)) | 3,190,265,609,295,447,600 | Convert `m` to an `AvgMetric`, unless it's already a `Metric` | fastai2/learner.py | mk_metric | akashpalrecha/fastai2 | python | def mk_metric(m):
return (m if isinstance(m, Metric) else AvgMetric(m)) |
def save_model(file, model, opt, with_opt=True):
'Save `model` to `file` along with `opt` (if available, and if `with_opt`)'
if (opt is None):
with_opt = False
state = get_model(model).state_dict()
if with_opt:
state = {'model': state, 'opt': opt.state_dict()}
torch.save(state, file) | 815,293,515,183,298,400 | Save `model` to `file` along with `opt` (if available, and if `with_opt`) | fastai2/learner.py | save_model | akashpalrecha/fastai2 | python | def save_model(file, model, opt, with_opt=True):
if (opt is None):
with_opt = False
state = get_model(model).state_dict()
if with_opt:
state = {'model': state, 'opt': opt.state_dict()}
torch.save(state, file) |
def load_model(file, model, opt, with_opt=None, device=None, strict=True):
'Load `model` from `file` along with `opt` (if available, and if `with_opt`)'
if isinstance(device, int):
device = torch.device('cuda', device)
elif (device is None):
device = 'cpu'
state = torch.load(file, map_lo... | 5,862,884,301,277,646,000 | Load `model` from `file` along with `opt` (if available, and if `with_opt`) | fastai2/learner.py | load_model | akashpalrecha/fastai2 | python | def load_model(file, model, opt, with_opt=None, device=None, strict=True):
if isinstance(device, int):
device = torch.device('cuda', device)
elif (device is None):
device = 'cpu'
state = torch.load(file, map_location=device)
hasopt = (set(state) == {'model', 'opt'})
model_state ... |
@patch
def export(self: Learner, fname='export.pkl'):
'Export the content of `self` without the items and the optimizer state for inference'
if rank_distrib():
return
old_dbunch = self.dls
self.dls = self.dls.new_empty()
state = self.opt.state_dict()
self.opt = None
with warnings.cat... | -5,186,329,875,146,697,000 | Export the content of `self` without the items and the optimizer state for inference | fastai2/learner.py | export | akashpalrecha/fastai2 | python | @patch
def export(self: Learner, fname='export.pkl'):
if rank_distrib():
return
old_dbunch = self.dls
self.dls = self.dls.new_empty()
state = self.opt.state_dict()
self.opt = None
with warnings.catch_warnings():
warnings.simplefilter('ignore')
torch.save(self, (self.... |
def load_learner(fname, cpu=True):
'Load a `Learner` object in `fname`, optionally putting it on the `cpu`'
res = torch.load(fname, map_location=('cpu' if cpu else None))
if hasattr(res, 'to_fp32'):
res = res.to_fp32()
if cpu:
res.dls.cpu()
return res | -7,052,218,109,212,532,000 | Load a `Learner` object in `fname`, optionally putting it on the `cpu` | fastai2/learner.py | load_learner | akashpalrecha/fastai2 | python | def load_learner(fname, cpu=True):
res = torch.load(fname, map_location=('cpu' if cpu else None))
if hasattr(res, 'to_fp32'):
res = res.to_fp32()
if cpu:
res.dls.cpu()
return res |
@patch
def tta(self: Learner, ds_idx=1, dl=None, n=4, item_tfms=None, batch_tfms=None, beta=0.25, use_max=False):
'Return predictions on the `ds_idx` dataset or `dl` using Test Time Augmentation'
if (dl is None):
dl = self.dls[ds_idx]
if ((item_tfms is not None) or (batch_tfms is not None)):
... | 623,691,656,397,558,400 | Return predictions on the `ds_idx` dataset or `dl` using Test Time Augmentation | fastai2/learner.py | tta | akashpalrecha/fastai2 | python | @patch
def tta(self: Learner, ds_idx=1, dl=None, n=4, item_tfms=None, batch_tfms=None, beta=0.25, use_max=False):
if (dl is None):
dl = self.dls[ds_idx]
if ((item_tfms is not None) or (batch_tfms is not None)):
dl = dl.new(after_item=item_tfms, after_batch=batch_tfms)
with dl.dataset.se... |
def begin_fit(self):
'Prepare state for training'
(self.lrs, self.iters, self.losses, self.values) = ([], [], [], [])
names = self.metrics.attrgot('name')
if (self.train_metrics and self.valid_metrics):
names = (L('loss') + names)
names = (names.map('train_{}') + names.map('valid_{}'))
... | 8,912,982,815,727,020,000 | Prepare state for training | fastai2/learner.py | begin_fit | akashpalrecha/fastai2 | python | def begin_fit(self):
(self.lrs, self.iters, self.losses, self.values) = ([], [], [], [])
names = self.metrics.attrgot('name')
if (self.train_metrics and self.valid_metrics):
names = (L('loss') + names)
names = (names.map('train_{}') + names.map('valid_{}'))
elif self.valid_metrics:
... |
def after_batch(self):
'Update all metrics and records lr and smooth loss in training'
if (len(self.yb) == 0):
return
mets = (self._train_mets if self.training else self._valid_mets)
for met in mets:
met.accumulate(self.learn)
if (not self.training):
return
self.lrs.appen... | 5,701,394,699,218,141,000 | Update all metrics and records lr and smooth loss in training | fastai2/learner.py | after_batch | akashpalrecha/fastai2 | python | def after_batch(self):
if (len(self.yb) == 0):
return
mets = (self._train_mets if self.training else self._valid_mets)
for met in mets:
met.accumulate(self.learn)
if (not self.training):
return
self.lrs.append(self.opt.hypers[(- 1)]['lr'])
self.losses.append(self.smo... |
def begin_epoch(self):
'Set timer if `self.add_time=True`'
(self.cancel_train, self.cancel_valid) = (False, False)
if self.add_time:
self.start_epoch = time.time()
self.log = L(getattr(self, 'epoch', 0)) | 7,413,627,441,664,656,000 | Set timer if `self.add_time=True` | fastai2/learner.py | begin_epoch | akashpalrecha/fastai2 | python | def begin_epoch(self):
(self.cancel_train, self.cancel_valid) = (False, False)
if self.add_time:
self.start_epoch = time.time()
self.log = L(getattr(self, 'epoch', 0)) |
def after_epoch(self):
'Store and log the loss/metric values'
self.learn.final_record = self.log[1:].copy()
self.values.append(self.learn.final_record)
if self.add_time:
self.log.append(format_time((time.time() - self.start_epoch)))
self.logger(self.log)
self.iters.append(self.smooth_los... | 2,613,439,967,057,962,000 | Store and log the loss/metric values | fastai2/learner.py | after_epoch | akashpalrecha/fastai2 | python | def after_epoch(self):
self.learn.final_record = self.log[1:].copy()
self.values.append(self.learn.final_record)
if self.add_time:
self.log.append(format_time((time.time() - self.start_epoch)))
self.logger(self.log)
self.iters.append(self.smooth_loss.count) |
def FromStruct(self, PointList):
' FromStruct(self: dotnetPointList_t,PointList: PointList) '
pass | -6,544,352,734,837,394,000 | FromStruct(self: dotnetPointList_t,PointList: PointList) | release/stubs.min/Tekla/Structures/ModelInternal_parts/dotnetPointList_t.py | FromStruct | YKato521/ironpython-stubs | python | def FromStruct(self, PointList):
' '
pass |
def ToStruct(self, PointList):
' ToStruct(self: dotnetPointList_t,PointList: PointList) '
pass | 1,758,093,045,537,316,400 | ToStruct(self: dotnetPointList_t,PointList: PointList) | release/stubs.min/Tekla/Structures/ModelInternal_parts/dotnetPointList_t.py | ToStruct | YKato521/ironpython-stubs | python | def ToStruct(self, PointList):
' '
pass |
@staticmethod
def __new__(self, Size):
'\n __new__[dotnetPointList_t]() -> dotnetPointList_t\n\n \n\n __new__(cls: type,Size: int)\n '
pass | 3,034,896,310,408,620,500 | __new__[dotnetPointList_t]() -> dotnetPointList_t
__new__(cls: type,Size: int) | release/stubs.min/Tekla/Structures/ModelInternal_parts/dotnetPointList_t.py | __new__ | YKato521/ironpython-stubs | python | @staticmethod
def __new__(self, Size):
'\n __new__[dotnetPointList_t]() -> dotnetPointList_t\n\n \n\n __new__(cls: type,Size: int)\n '
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.