body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
@property
def rollout_lengths(self):
' Lengths of sub-rollouts. '
if (not self.continuous):
raise pyrado.ValueErr(msg='Sub-rollouts are only supported on continuous data.')
bounds = self._rollout_bounds
return (bounds[1:] - bounds[:(- 1)]) | 4,705,739,400,180,986,000 | Lengths of sub-rollouts. | mushroom_rl/core/parallelization_tools/step_sequence.py | rollout_lengths | nifunk/GNNMushroomRL | python | @property
def rollout_lengths(self):
' '
if (not self.continuous):
raise pyrado.ValueErr(msg='Sub-rollouts are only supported on continuous data.')
bounds = self._rollout_bounds
return (bounds[1:] - bounds[:(- 1)]) |
def __len__(self):
" Get the step sequence's length. "
return self.length | -953,459,290,759,964,000 | Get the step sequence's length. | mushroom_rl/core/parallelization_tools/step_sequence.py | __len__ | nifunk/GNNMushroomRL | python | def __len__(self):
" "
return self.length |
def add_data(self, name: str, value=None, item_shape: tuple=None, with_after_last: Optional[bool]=False):
'\n Add a new data field to the step sequence.\n\n :param name: string for the name\n :param value: the data\n :param item_shape: shape to store the data in\n :param with_afte... | -2,198,449,852,763,759,900 | Add a new data field to the step sequence.
:param name: string for the name
:param value: the data
:param item_shape: shape to store the data in
:param with_after_last: `True` if there is one more element than the length (e.g. last observation) | mushroom_rl/core/parallelization_tools/step_sequence.py | add_data | nifunk/GNNMushroomRL | python | def add_data(self, name: str, value=None, item_shape: tuple=None, with_after_last: Optional[bool]=False):
'\n Add a new data field to the step sequence.\n\n :param name: string for the name\n :param value: the data\n :param item_shape: shape to store the data in\n :param with_afte... |
def get_data_values(self, name: str, truncate_last: Optional[bool]=False):
'\n Return the data tensor stored under the given name.\n\n :param name: data name\n :param truncate_last: True to truncate the length+1 entry if present\n '
assert (name in self._data_names)
entry = self.... | 1,501,171,783,061,499,400 | Return the data tensor stored under the given name.
:param name: data name
:param truncate_last: True to truncate the length+1 entry if present | mushroom_rl/core/parallelization_tools/step_sequence.py | get_data_values | nifunk/GNNMushroomRL | python | def get_data_values(self, name: str, truncate_last: Optional[bool]=False):
'\n Return the data tensor stored under the given name.\n\n :param name: data name\n :param truncate_last: True to truncate the length+1 entry if present\n '
assert (name in self._data_names)
entry = self.... |
def numpy(self, data_type=None):
'\n Convert data to numpy ndarrays.\n\n :param data_type: type to return data in. When None is passed, the data type is left unchanged.\n '
self.convert('numpy', data_type) | 7,891,443,739,255,916,000 | Convert data to numpy ndarrays.
:param data_type: type to return data in. When None is passed, the data type is left unchanged. | mushroom_rl/core/parallelization_tools/step_sequence.py | numpy | nifunk/GNNMushroomRL | python | def numpy(self, data_type=None):
'\n Convert data to numpy ndarrays.\n\n :param data_type: type to return data in. When None is passed, the data type is left unchanged.\n '
self.convert('numpy', data_type) |
def torch(self, data_type=None):
'\n Convert data to PyTorch Tensors.\n\n :param data_type: type to return data in. When None is passed, the data type is left unchanged.\n '
self.convert('torch', data_type) | 9,121,887,788,393,913,000 | Convert data to PyTorch Tensors.
:param data_type: type to return data in. When None is passed, the data type is left unchanged. | mushroom_rl/core/parallelization_tools/step_sequence.py | torch | nifunk/GNNMushroomRL | python | def torch(self, data_type=None):
'\n Convert data to PyTorch Tensors.\n\n :param data_type: type to return data in. When None is passed, the data type is left unchanged.\n '
self.convert('torch', data_type) |
def convert(self, data_format: str, data_type=None):
'\n Convert data to specified format.\n\n :param data_format: torch to use Tensors, numpy to use ndarrays\n :param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.\n '
if (data... | 5,424,327,337,248,844,000 | Convert data to specified format.
:param data_format: torch to use Tensors, numpy to use ndarrays
:param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged. | mushroom_rl/core/parallelization_tools/step_sequence.py | convert | nifunk/GNNMushroomRL | python | def convert(self, data_format: str, data_type=None):
'\n Convert data to specified format.\n\n :param data_format: torch to use Tensors, numpy to use ndarrays\n :param data_type: optional torch/numpy dtype for data. When `None` is passed, the data type is left unchanged.\n '
if (data... |
def get_rollout(self, index):
'\n Get an indexed sub-rollout.\n\n :param index: generic index of sub-rollout, negative values, slices and iterables are allowed\n :return: selected subset.\n '
if (not self.continuous):
raise pyrado.ValueErr(msg='Sub-rollouts are only supported... | 1,579,322,541,815,827,200 | Get an indexed sub-rollout.
:param index: generic index of sub-rollout, negative values, slices and iterables are allowed
:return: selected subset. | mushroom_rl/core/parallelization_tools/step_sequence.py | get_rollout | nifunk/GNNMushroomRL | python | def get_rollout(self, index):
'\n Get an indexed sub-rollout.\n\n :param index: generic index of sub-rollout, negative values, slices and iterables are allowed\n :return: selected subset.\n '
if (not self.continuous):
raise pyrado.ValueErr(msg='Sub-rollouts are only supported... |
def iterate_rollouts(self):
' Iterate over all sub-rollouts of a concatenated rollout. '
if (not self.continuous):
raise pyrado.ValueErr(msg='Sub-rollouts are only supported on continuous data.')
bounds = self._rollout_bounds
count = (len(bounds) - 1)
if (count == 1):
(yield self)
... | 3,715,250,394,928,838,000 | Iterate over all sub-rollouts of a concatenated rollout. | mushroom_rl/core/parallelization_tools/step_sequence.py | iterate_rollouts | nifunk/GNNMushroomRL | python | def iterate_rollouts(self):
' '
if (not self.continuous):
raise pyrado.ValueErr(msg='Sub-rollouts are only supported on continuous data.')
bounds = self._rollout_bounds
count = (len(bounds) - 1)
if (count == 1):
(yield self)
else:
for i in range(count):
start... |
def sample_w_next(self, batch_size: int) -> tuple:
'\n Sample a random batch of steps from a together with the associated next steps.\n Similar to `split_shuffled_batches` with `complete_rollouts=False`\n\n :param batch_size: number of steps to sample\n :return: randomly sampled batch of... | -3,061,683,762,915,780,600 | Sample a random batch of steps from a together with the associated next steps.
Similar to `split_shuffled_batches` with `complete_rollouts=False`
:param batch_size: number of steps to sample
:return: randomly sampled batch of steps | mushroom_rl/core/parallelization_tools/step_sequence.py | sample_w_next | nifunk/GNNMushroomRL | python | def sample_w_next(self, batch_size: int) -> tuple:
'\n Sample a random batch of steps from a together with the associated next steps.\n Similar to `split_shuffled_batches` with `complete_rollouts=False`\n\n :param batch_size: number of steps to sample\n :return: randomly sampled batch of... |
def split_ordered_batches(self, batch_size: int=None, num_batches: int=None):
'\n Batch generation. Split the step collection into ordered mini-batches of size batch_size.\n\n :param batch_size: number of steps per batch, i.e. variable number of batches\n :param num_batches: number of batches t... | -1,020,326,957,354,149,100 | Batch generation. Split the step collection into ordered mini-batches of size batch_size.
:param batch_size: number of steps per batch, i.e. variable number of batches
:param num_batches: number of batches to split the rollout in, i.e. variable batch size
.. note::
Left out the option to return complete rollouts ... | mushroom_rl/core/parallelization_tools/step_sequence.py | split_ordered_batches | nifunk/GNNMushroomRL | python | def split_ordered_batches(self, batch_size: int=None, num_batches: int=None):
'\n Batch generation. Split the step collection into ordered mini-batches of size batch_size.\n\n :param batch_size: number of steps per batch, i.e. variable number of batches\n :param num_batches: number of batches t... |
def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool]=False):
'\n Batch generation. Split the step collection into random mini-batches of size batch_size.\n\n :param batch_size: number of steps per batch\n :param complete_rollouts: if `complete_rollouts = True`, th... | 1,159,818,307,064,741,400 | Batch generation. Split the step collection into random mini-batches of size batch_size.
:param batch_size: number of steps per batch
:param complete_rollouts: if `complete_rollouts = True`, the batches will not contain partial rollouts.
However, the size of the returned batches cannot be str... | mushroom_rl/core/parallelization_tools/step_sequence.py | split_shuffled_batches | nifunk/GNNMushroomRL | python | def split_shuffled_batches(self, batch_size: int, complete_rollouts: Optional[bool]=False):
'\n Batch generation. Split the step collection into random mini-batches of size batch_size.\n\n :param batch_size: number of steps per batch\n :param complete_rollouts: if `complete_rollouts = True`, th... |
def undiscounted_return(self) -> float:
'\n Compute the undiscounted return.\n\n :return: sum of rewards\n '
if (not (len(self._rollout_bounds) == 2)):
raise pyrado.ShapeErr(msg='The StepSequence must be a single continuous rollout.')
return self.rewards.sum() | -7,107,731,319,835,752,000 | Compute the undiscounted return.
:return: sum of rewards | mushroom_rl/core/parallelization_tools/step_sequence.py | undiscounted_return | nifunk/GNNMushroomRL | python | def undiscounted_return(self) -> float:
'\n Compute the undiscounted return.\n\n :return: sum of rewards\n '
if (not (len(self._rollout_bounds) == 2)):
raise pyrado.ShapeErr(msg='The StepSequence must be a single continuous rollout.')
return self.rewards.sum() |
def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
'\n Compute the discounted return.\n\n :param gamma: temporal discount factor\n :return: exponentially weighted sum of rewards\n '
if (not (len(self._rollout_bounds) == 2)):
raise pyrado.ShapeErr(msg='The S... | -144,263,897,106,388,860 | Compute the discounted return.
:param gamma: temporal discount factor
:return: exponentially weighted sum of rewards | mushroom_rl/core/parallelization_tools/step_sequence.py | discounted_return | nifunk/GNNMushroomRL | python | def discounted_return(self, gamma: float) -> (to.Tensor, np.ndarray):
'\n Compute the discounted return.\n\n :param gamma: temporal discount factor\n :return: exponentially weighted sum of rewards\n '
if (not (len(self._rollout_bounds) == 2)):
raise pyrado.ShapeErr(msg='The S... |
@classmethod
def concat(cls, parts: Sequence['StepSequence'], data_format: Optional[str]=None, truncate_last: Optional[bool]=True):
'\n Concatenate multiple step sequences into one, truncating the last observation.\n\n :param parts: batch of sequences to concatenate\n :param data_format: torch ... | -2,580,023,898,341,784,000 | Concatenate multiple step sequences into one, truncating the last observation.
:param parts: batch of sequences to concatenate
:param data_format: torch to use Tensors, numpy to use ndarrays, `None` to choose automatically
:param truncate_last: remove the last step from each part, highly recommended to be `True`
:retu... | mushroom_rl/core/parallelization_tools/step_sequence.py | concat | nifunk/GNNMushroomRL | python | @classmethod
def concat(cls, parts: Sequence['StepSequence'], data_format: Optional[str]=None, truncate_last: Optional[bool]=True):
'\n Concatenate multiple step sequences into one, truncating the last observation.\n\n :param parts: batch of sequences to concatenate\n :param data_format: torch ... |
@classmethod
def process_data(cls, rollout: 'StepSequence', fcn: Callable, fcn_arg_name: str, fcn_arg_types: Union[(type, Tuple[type])]=np.ndarray, include_fields: Sequence[str]=None, exclude_fields: Sequence[str]=None, **process_fcn_kwargs):
'\n Process all data fields of a rollouts using an arbitrary funct... | -3,135,531,648,346,896,400 | Process all data fields of a rollouts using an arbitrary function. Optionally, some fields can be excluded.
:param rollout: `StepSequence` holding the data
:param fcn: function (of one remaining input) to used manipulate the data fields, e.g. `scipy.filtfilt()`
:param fcn_arg_name: sting of the remaining input of `pro... | mushroom_rl/core/parallelization_tools/step_sequence.py | process_data | nifunk/GNNMushroomRL | python | @classmethod
def process_data(cls, rollout: 'StepSequence', fcn: Callable, fcn_arg_name: str, fcn_arg_types: Union[(type, Tuple[type])]=np.ndarray, include_fields: Sequence[str]=None, exclude_fields: Sequence[str]=None, **process_fcn_kwargs):
'\n Process all data fields of a rollouts using an arbitrary funct... |
def _next_value(step: Step) -> float:
' Helper to return `next_value = 0` for last step '
if step.done:
return 0.0
return step.next_value | -70,958,724,919,454,110 | Helper to return `next_value = 0` for last step | mushroom_rl/core/parallelization_tools/step_sequence.py | _next_value | nifunk/GNNMushroomRL | python | def _next_value(step: Step) -> float:
' '
if step.done:
return 0.0
return step.next_value |
@functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
' Wrap the processing function to call it recursivelyy for nested data structures. '
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
inp = fcn(**kwargs)
elif isinstance(inp, dict):
for (key, value) in inp.i... | 1,204,677,687,032,972,500 | Wrap the processing function to call it recursivelyy for nested data structures. | mushroom_rl/core/parallelization_tools/step_sequence.py | recursive_wrapper | nifunk/GNNMushroomRL | python | @functools.wraps(fcn)
def recursive_wrapper(inp, **kwargs):
' '
kwargs.update({fcn_arg_name: inp})
if isinstance(inp, fcn_arg_types):
inp = fcn(**kwargs)
elif isinstance(inp, dict):
for (key, value) in inp.items():
if isinstance(value, fcn_arg_types):
inp[key... |
def make_scoped_cache(scope):
'Create a new scoped cache.\n\n In most cases the global cache should not be used directly but rather\n with a scope depending on the module a cache is used for. This is\n especially important when passing user-provided data as the cache key\n to prevent reading other unrel... | 4,806,206,535,844,877,000 | Create a new scoped cache.
In most cases the global cache should not be used directly but rather
with a scope depending on the module a cache is used for. This is
especially important when passing user-provided data as the cache key
to prevent reading other unrelated cache keys. | indico/core/cache.py | make_scoped_cache | errikos/indico | python | def make_scoped_cache(scope):
'Create a new scoped cache.\n\n In most cases the global cache should not be used directly but rather\n with a scope depending on the module a cache is used for. This is\n especially important when passing user-provided data as the cache key\n to prevent reading other unrel... |
def set_detector_value(self, kwargs_list: list):
' Only allow changes to confidence or the model '
try:
for kwargs in kwargs_list:
field = kwargs.get('field')
value = kwargs.get('value')
if (field in ['detector_confidence', 'detector_model']):
logger.i... | -4,545,308,264,092,963,000 | Only allow changes to confidence or the model | traffic_monitor/services/detectors/detector_cvlib.py | set_detector_value | mcdomx/monitor | python | def set_detector_value(self, kwargs_list: list):
' '
try:
for kwargs in kwargs_list:
field = kwargs.get('field')
value = kwargs.get('value')
if (field in ['detector_confidence', 'detector_model']):
logger.info(f'{self.detector_name}: setting value: {f... |
def fasterrcnn_resnet50_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs):
'\n Constructs a Faster R-CNN model with a ResNet-50-FPN backbone.\n\n Reference: `"Faster R-CNN: Towards Real-Time Object Detection with\n Region Proposal Netwo... | 1,192,879,758,170,599,200 | Constructs a Faster R-CNN model with a ResNet-50-FPN backbone.
Reference: `"Faster R-CNN: Towards Real-Time Object Detection with
Region Proposal Networks" <https://arxiv.org/abs/1506.01497>`_.
The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each
image, and should be i... | torchvision/models/detection/faster_rcnn.py | fasterrcnn_resnet50_fpn | Bethhhh/vision | python | def fasterrcnn_resnet50_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs):
'\n Constructs a Faster R-CNN model with a ResNet-50-FPN backbone.\n\n Reference: `"Faster R-CNN: Towards Real-Time Object Detection with\n Region Proposal Netwo... |
def fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs):
'\n Constructs a low resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone tunned for mobile use-cases.\n It works similarly to Faster R... | -6,396,726,167,147,814,000 | Constructs a low resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone tunned for mobile use-cases.
It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See
:func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more
details.
Example::
>>> model = torchvision.models.detection.f... | torchvision/models/detection/faster_rcnn.py | fasterrcnn_mobilenet_v3_large_320_fpn | Bethhhh/vision | python | def fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs):
'\n Constructs a low resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone tunned for mobile use-cases.\n It works similarly to Faster R... |
def fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs):
'\n Constructs a high resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone.\n It works similarly to Faster R-CNN with ResNet-50 FPN backbon... | 1,931,367,722,196,547,600 | Constructs a high resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone.
It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See
:func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more
details.
Example::
>>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_larg... | torchvision/models/detection/faster_rcnn.py | fasterrcnn_mobilenet_v3_large_fpn | Bethhhh/vision | python | def fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs):
'\n Constructs a high resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone.\n It works similarly to Faster R-CNN with ResNet-50 FPN backbon... |
@extensions.expected_errors((404, 409))
@wsgi.action('pause')
def _pause(self, req, id, body):
'Permit Admins to pause the server.'
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
server = common.get_instance(self.compute_api, ctxt, id, want_objects=True)
try:
self.compute_api.pa... | -3,257,034,508,495,823,400 | Permit Admins to pause the server. | nova/api/openstack/compute/plugins/v3/pause_server.py | _pause | PFZheng/nova | python | @extensions.expected_errors((404, 409))
@wsgi.action('pause')
def _pause(self, req, id, body):
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
server = common.get_instance(self.compute_api, ctxt, id, want_objects=True)
try:
self.compute_api.pause(ctxt, server)
except excepti... |
@extensions.expected_errors((404, 409))
@wsgi.action('unpause')
def _unpause(self, req, id, body):
'Permit Admins to unpause the server.'
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
server = common.get_instance(self.compute_api, ctxt, id, want_objects=True)
try:
self.comput... | 8,663,507,411,384,678,000 | Permit Admins to unpause the server. | nova/api/openstack/compute/plugins/v3/pause_server.py | _unpause | PFZheng/nova | python | @extensions.expected_errors((404, 409))
@wsgi.action('unpause')
def _unpause(self, req, id, body):
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
server = common.get_instance(self.compute_api, ctxt, id, want_objects=True)
try:
self.compute_api.unpause(ctxt, server)
except... |
def _wait_until(self, predicate, timeout=None, poll_frequency=0.5):
"Wait until the value returned by predicate is not False or\n the timeout is elapsed.\n 'predicate' takes the driver as argument.\n "
if (not timeout):
timeout = self.explicit_wait
wait.WebDriverWait(self.driver... | 3,087,956,966,512,135,000 | Wait until the value returned by predicate is not False or
the timeout is elapsed.
'predicate' takes the driver as argument. | openstack-dashboard/openstack_dashboard/test/integration_tests/basewebobject.py | _wait_until | JerryDog/horizon-f-road | python | def _wait_until(self, predicate, timeout=None, poll_frequency=0.5):
"Wait until the value returned by predicate is not False or\n the timeout is elapsed.\n 'predicate' takes the driver as argument.\n "
if (not timeout):
timeout = self.explicit_wait
wait.WebDriverWait(self.driver... |
def _wait_till_text_present_in_element(self, element, text, timeout=None):
'Waiting for a text to appear in a certain element very often is\n actually waiting for a _different_ element with a different text to\n appear in place of an old element. So a way to avoid capturing stale\n element refe... | 6,722,385,146,241,805,000 | Waiting for a text to appear in a certain element very often is
actually waiting for a _different_ element with a different text to
appear in place of an old element. So a way to avoid capturing stale
element reference should be provided for this use case.
Better to wrap getting entity status cell in a lambda
to avoid... | openstack-dashboard/openstack_dashboard/test/integration_tests/basewebobject.py | _wait_till_text_present_in_element | JerryDog/horizon-f-road | python | def _wait_till_text_present_in_element(self, element, text, timeout=None):
'Waiting for a text to appear in a certain element very often is\n actually waiting for a _different_ element with a different text to\n appear in place of an old element. So a way to avoid capturing stale\n element refe... |
def airflow_test_suite():
'Test suite for Airflow tests'
test_loader = unittest.TestLoader()
test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')
return test_suite | -580,689,423,026,202,600 | Test suite for Airflow tests | setup.py | airflow_test_suite | 312day/airflow | python | def airflow_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')
return test_suite |
def git_version(version_: str) -> str:
'\n Return a version to identify the state of the underlying git repo. The version will\n indicate whether the head of the current git-backed working directory is tied to a\n release tag or not : it will indicate the former with a \'release:{version}\' prefix\n and... | 1,922,320,437,300,587,000 | Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sh... | setup.py | git_version | 312day/airflow | python | def git_version(version_: str) -> str:
'\n Return a version to identify the state of the underlying git repo. The version will\n indicate whether the head of the current git-backed working directory is tied to a\n release tag or not : it will indicate the former with a \'release:{version}\' prefix\n and... |
def write_version(filename: str=os.path.join(*[my_dir, 'airflow', 'git_version'])):
'\n Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".\n\n :param str filename: Destination file to write\n '
text = '{}'.format(git_version(version))
with open(filen... | 2,068,128,176,971,055,900 | Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".
:param str filename: Destination file to write | setup.py | write_version | 312day/airflow | python | def write_version(filename: str=os.path.join(*[my_dir, 'airflow', 'git_version'])):
'\n Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".\n\n :param str filename: Destination file to write\n '
text = '{}'.format(git_version(version))
with open(filen... |
def is_package_excluded(package: str, exclusion_list: List[str]):
'\n Checks if package should be excluded.\n :param package: package name (beginning of it)\n :param exclusion_list: list of excluded packages\n :return: true if package should be excluded\n '
return any([package.startswith(excluded... | -7,323,985,351,570,894,000 | Checks if package should be excluded.
:param package: package name (beginning of it)
:param exclusion_list: list of excluded packages
:return: true if package should be excluded | setup.py | is_package_excluded | 312day/airflow | python | def is_package_excluded(package: str, exclusion_list: List[str]):
'\n Checks if package should be excluded.\n :param package: package name (beginning of it)\n :param exclusion_list: list of excluded packages\n :return: true if package should be excluded\n '
return any([package.startswith(excluded... |
def do_setup():
'Perform the Airflow package setup.'
write_version()
setup(name='apache-airflow', description='Programmatically author, schedule and monitor data pipelines', long_description=long_description, long_description_content_type='text/markdown', license='Apache License 2.0', version=version, packa... | 2,453,606,635,205,621,000 | Perform the Airflow package setup. | setup.py | do_setup | 312day/airflow | python | def do_setup():
write_version()
setup(name='apache-airflow', description='Programmatically author, schedule and monitor data pipelines', long_description=long_description, long_description_content_type='text/markdown', license='Apache License 2.0', version=version, packages=find_packages(include=['airflow'... |
def initialize_options(self):
'Set default values for options.' | 953,287,520,272,231,300 | Set default values for options. | setup.py | initialize_options | 312day/airflow | python | def initialize_options(self):
|
def finalize_options(self):
'Set final values for options.' | 1,898,983,747,956,181,200 | Set final values for options. | setup.py | finalize_options | 312day/airflow | python | def finalize_options(self):
|
def run(self):
'Run command to remove temporary files and directories.'
os.chdir(my_dir)
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info') | -9,137,811,618,676,238,000 | Run command to remove temporary files and directories. | setup.py | run | 312day/airflow | python | def run(self):
os.chdir(my_dir)
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info') |
def initialize_options(self):
'Set default values for options.' | 953,287,520,272,231,300 | Set default values for options. | setup.py | initialize_options | 312day/airflow | python | def initialize_options(self):
|
def finalize_options(self):
'Set final values for options.' | 1,898,983,747,956,181,200 | Set final values for options. | setup.py | finalize_options | 312day/airflow | python | def finalize_options(self):
|
def run(self):
'Run a command to compile and build assets.'
subprocess.check_call('./airflow/www/compile_assets.sh') | -811,770,318,344,817,200 | Run a command to compile and build assets. | setup.py | run | 312day/airflow | python | def run(self):
subprocess.check_call('./airflow/www/compile_assets.sh') |
def initialize_options(self):
'Set default values for options.' | 953,287,520,272,231,300 | Set default values for options. | setup.py | initialize_options | 312day/airflow | python | def initialize_options(self):
|
def finalize_options(self):
'Set final values for options.' | 1,898,983,747,956,181,200 | Set final values for options. | setup.py | finalize_options | 312day/airflow | python | def finalize_options(self):
|
def run(self):
'List extras.'
print('\n'.join(wrap(', '.join(EXTRAS_REQUIREMENTS.keys()), 100))) | 946,835,777,747,823,000 | List extras. | setup.py | run | 312day/airflow | python | def run(self):
print('\n'.join(wrap(', '.join(EXTRAS_REQUIREMENTS.keys()), 100))) |
def git_ignored(file: Path) -> bool:
'Returns true if this file is in a Git repo and ignored by that repo.\n\n Returns true for ignored files that were manually added to a repo.\n '
file = file.resolve()
directory = file.parent
while True:
try:
returncode = subprocess.run(['git... | -2,774,505,125,595,756,000 | Returns true if this file is in a Git repo and ignored by that repo.
Returns true for ignored files that were manually added to a repo. | pw_watch/py/pw_watch/watch.py | git_ignored | isabella232/pigweed | python | def git_ignored(file: Path) -> bool:
'Returns true if this file is in a Git repo and ignored by that repo.\n\n Returns true for ignored files that were manually added to a repo.\n '
file = file.resolve()
directory = file.parent
while True:
try:
returncode = subprocess.run(['git... |
def add_parser_arguments(parser: argparse.ArgumentParser) -> None:
'Sets up an argument parser for pw watch.'
parser.add_argument('--patterns', help=(_WATCH_PATTERN_DELIMITER + '-delimited list of globs to watch to trigger recompile'), default=_WATCH_PATTERN_DELIMITER.join(_WATCH_PATTERNS))
parser.add_argum... | 8,633,480,375,014,321,000 | Sets up an argument parser for pw watch. | pw_watch/py/pw_watch/watch.py | add_parser_arguments | isabella232/pigweed | python | def add_parser_arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--patterns', help=(_WATCH_PATTERN_DELIMITER + '-delimited list of globs to watch to trigger recompile'), default=_WATCH_PATTERN_DELIMITER.join(_WATCH_PATTERNS))
parser.add_argument('--ignore_patterns', dest='ignore_patt... |
def minimal_watch_directories(to_watch: Path, to_exclude: Iterable[Path]):
'Determine which subdirectory to watch recursively'
try:
to_watch = Path(to_watch)
except TypeError:
assert False, 'Please watch one directory at a time.'
directories_to_exclude: List[Path] = [to_watch.joinpath(di... | 8,227,061,236,398,482,000 | Determine which subdirectory to watch recursively | pw_watch/py/pw_watch/watch.py | minimal_watch_directories | isabella232/pigweed | python | def minimal_watch_directories(to_watch: Path, to_exclude: Iterable[Path]):
try:
to_watch = Path(to_watch)
except TypeError:
assert False, 'Please watch one directory at a time.'
directories_to_exclude: List[Path] = [to_watch.joinpath(directory_to_exclude) for directory_to_exclude in to_... |
def get_common_excludes() -> List[Path]:
'Find commonly excluded directories, and return them as a [Path]'
exclude_list: List[Path] = []
typical_ignored_directories: List[str] = ['.environment', '.presubmit', '.git', '.mypy_cache', '.cargo', 'environment', 'out']
pw_root_dir = Path(os.environ['PW_ROOT']... | -5,883,953,771,426,845,000 | Find commonly excluded directories, and return them as a [Path] | pw_watch/py/pw_watch/watch.py | get_common_excludes | isabella232/pigweed | python | def get_common_excludes() -> List[Path]:
exclude_list: List[Path] = []
typical_ignored_directories: List[str] = ['.environment', '.presubmit', '.git', '.mypy_cache', '.cargo', 'environment', 'out']
pw_root_dir = Path(os.environ['PW_ROOT'])
exclude_list.extend(((pw_root_dir / ignored_directory) for ... |
def _find_build_dir(default_build_dir: Path=Path('out')) -> Optional[Path]:
'Searches for a build directory, returning the first it finds.'
if default_build_dir.joinpath('build.ninja').exists():
return default_build_dir
for path in default_build_dir.glob('**/build.ninja'):
return path.parent... | 3,079,257,728,143,472,600 | Searches for a build directory, returning the first it finds. | pw_watch/py/pw_watch/watch.py | _find_build_dir | isabella232/pigweed | python | def _find_build_dir(default_build_dir: Path=Path('out')) -> Optional[Path]:
if default_build_dir.joinpath('build.ninja').exists():
return default_build_dir
for path in default_build_dir.glob('**/build.ninja'):
return path.parent
for path in Path.cwd().glob('**/build.ninja'):
ret... |
def watch(default_build_targets: List[str], build_directories: List[str], patterns: str, ignore_patterns_string: str, exclude_list: List[Path], restart: bool):
'Watches files and runs Ninja commands when they change.'
_LOG.info('Starting Pigweed build watcher')
if (os.environ['PW_ROOT'] is None):
_e... | -2,830,571,198,471,650,300 | Watches files and runs Ninja commands when they change. | pw_watch/py/pw_watch/watch.py | watch | isabella232/pigweed | python | def watch(default_build_targets: List[str], build_directories: List[str], patterns: str, ignore_patterns_string: str, exclude_list: List[Path], restart: bool):
_LOG.info('Starting Pigweed build watcher')
if (os.environ['PW_ROOT'] is None):
_exit_due_to_pigweed_not_installed()
pw_root = Path(os.... |
def main() -> None:
'Watch files for changes and rebuild.'
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
add_parser_arguments(parser)
watch(**vars(parser.parse_args())) | 5,897,164,643,106,543,000 | Watch files for changes and rebuild. | pw_watch/py/pw_watch/watch.py | main | isabella232/pigweed | python | def main() -> None:
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
add_parser_arguments(parser)
watch(**vars(parser.parse_args())) |
def _path_matches(self, path: Path) -> bool:
'Returns true if path matches according to the watcher patterns'
return ((not any((path.match(x) for x in self.ignore_patterns))) and any((path.match(x) for x in self.patterns))) | 4,431,571,565,773,947,000 | Returns true if path matches according to the watcher patterns | pw_watch/py/pw_watch/watch.py | _path_matches | isabella232/pigweed | python | def _path_matches(self, path: Path) -> bool:
return ((not any((path.match(x) for x in self.ignore_patterns))) and any((path.match(x) for x in self.patterns))) |
def run(self) -> None:
'Run all the builds in serial and capture pass/fail for each.'
print('\x1bc', end='')
print(pw_cli.branding.banner())
print(_COLOR.green(' Watching for changes. Ctrl-C to exit; enter to rebuild'))
print()
_LOG.info('Change detected: %s', self.matching_path)
self.build... | -7,174,389,099,788,953,000 | Run all the builds in serial and capture pass/fail for each. | pw_watch/py/pw_watch/watch.py | run | isabella232/pigweed | python | def run(self) -> None:
print('\x1bc', end=)
print(pw_cli.branding.banner())
print(_COLOR.green(' Watching for changes. Ctrl-C to exit; enter to rebuild'))
print()
_LOG.info('Change detected: %s', self.matching_path)
self.builds_succeeded = []
num_builds = len(self.build_commands)
_... |
def _JsonValueToPythonValue(json_value):
'Convert the given JsonValue to a json string.'
util.Typecheck(json_value, JsonValue)
_ValidateJsonValue(json_value)
if json_value.is_null:
return None
entries = [(f, json_value.get_assigned_value(f.name)) for f in json_value.all_fields()]
assigne... | 231,432,454,391,992,060 | Convert the given JsonValue to a json string. | .install/.backup/lib/apitools/base/py/extra_types.py | _JsonValueToPythonValue | Technology-Hatchery/google-cloud-sdk | python | def _JsonValueToPythonValue(json_value):
util.Typecheck(json_value, JsonValue)
_ValidateJsonValue(json_value)
if json_value.is_null:
return None
entries = [(f, json_value.get_assigned_value(f.name)) for f in json_value.all_fields()]
assigned_entries = [(f, value) for (f, value) in entri... |
def _PythonValueToJsonValue(py_value):
'Convert the given python value to a JsonValue.'
if (py_value is None):
return JsonValue(is_null=True)
if isinstance(py_value, bool):
return JsonValue(boolean_value=py_value)
if isinstance(py_value, basestring):
return JsonValue(string_value... | -8,058,865,423,543,755,000 | Convert the given python value to a JsonValue. | .install/.backup/lib/apitools/base/py/extra_types.py | _PythonValueToJsonValue | Technology-Hatchery/google-cloud-sdk | python | def _PythonValueToJsonValue(py_value):
if (py_value is None):
return JsonValue(is_null=True)
if isinstance(py_value, bool):
return JsonValue(boolean_value=py_value)
if isinstance(py_value, basestring):
return JsonValue(string_value=py_value)
if isinstance(py_value, numbers.N... |
def _EncodeInt64Field(field, value):
'Handle the special case of int64 as a string.'
capabilities = [messages.Variant.INT64, messages.Variant.UINT64]
if (field.variant not in capabilities):
return encoding.CodecResult(value=value, complete=False)
if field.repeated:
result = [str(x) for x... | 6,361,198,810,250,222,000 | Handle the special case of int64 as a string. | .install/.backup/lib/apitools/base/py/extra_types.py | _EncodeInt64Field | Technology-Hatchery/google-cloud-sdk | python | def _EncodeInt64Field(field, value):
capabilities = [messages.Variant.INT64, messages.Variant.UINT64]
if (field.variant not in capabilities):
return encoding.CodecResult(value=value, complete=False)
if field.repeated:
result = [str(x) for x in value]
else:
result = str(value... |
def vmin(*vectors):
'\n Retrieve the minimum out of an iterable of Vectors.\n\n Raises\n ------\n TypeError\n If there are two incomparable Vectors.\n ValueError\n If an empty sequence is supplied\n '
if (not all((isinstance(i, Vector) for i in vectors))):
raise TypeError... | 1,086,249,124,194,980,200 | Retrieve the minimum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied | devito/ir/support/vector.py | vmin | rhodrin/devito | python | def vmin(*vectors):
'\n Retrieve the minimum out of an iterable of Vectors.\n\n Raises\n ------\n TypeError\n If there are two incomparable Vectors.\n ValueError\n If an empty sequence is supplied\n '
if (not all((isinstance(i, Vector) for i in vectors))):
raise TypeError... |
def vmax(*vectors):
'\n Retrieve the maximum out of an iterable of Vectors.\n\n Raises\n ------\n TypeError\n If there are two incomparable Vectors.\n ValueError\n If an empty sequence is supplied\n '
if (not all((isinstance(i, Vector) for i in vectors))):
raise TypeError... | 4,997,967,146,906,403,000 | Retrieve the maximum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied | devito/ir/support/vector.py | vmax | rhodrin/devito | python | def vmax(*vectors):
'\n Retrieve the maximum out of an iterable of Vectors.\n\n Raises\n ------\n TypeError\n If there are two incomparable Vectors.\n ValueError\n If an empty sequence is supplied\n '
if (not all((isinstance(i, Vector) for i in vectors))):
raise TypeError... |
def distance(self, other):
'\n Compute the distance from ``self`` to ``other``.\n\n The distance is a reflexive, transitive, and anti-symmetric relation,\n which establishes a total ordering amongst Vectors.\n\n The distance is a function [Vector x Vector --> D]. D is a tuple of length\n... | -7,095,503,812,526,716,000 | Compute the distance from ``self`` to ``other``.
The distance is a reflexive, transitive, and anti-symmetric relation,
which establishes a total ordering amongst Vectors.
The distance is a function [Vector x Vector --> D]. D is a tuple of length
equal to the Vector ``rank``. The i-th entry of D, D_i, indicates whethe... | devito/ir/support/vector.py | distance | rhodrin/devito | python | def distance(self, other):
'\n Compute the distance from ``self`` to ``other``.\n\n The distance is a reflexive, transitive, and anti-symmetric relation,\n which establishes a total ordering amongst Vectors.\n\n The distance is a function [Vector x Vector --> D]. D is a tuple of length\n... |
@classmethod
def transpose(cls, *vectors):
'\n Transpose a matrix represented as an iterable of homogeneous LabeledVectors.\n '
if (len(vectors) == 0):
return LabeledVector()
if (not all((isinstance(v, LabeledVector) for v in vectors))):
raise ValueError(('All items must be of ... | 4,527,107,071,712,793,000 | Transpose a matrix represented as an iterable of homogeneous LabeledVectors. | devito/ir/support/vector.py | transpose | rhodrin/devito | python | @classmethod
def transpose(cls, *vectors):
'\n \n '
if (len(vectors) == 0):
return LabeledVector()
if (not all((isinstance(v, LabeledVector) for v in vectors))):
raise ValueError(('All items must be of type LabeledVector, got [%s]' % ','.join((i.__class__.__name__ for i in vect... |
@memoized_meth
def distance(self, other):
'\n Compute the distance from ``self`` to ``other``.\n\n Parameters\n ----------\n other : LabeledVector\n The LabeledVector from which the distance is computed.\n '
if (not isinstance(other, LabeledVector)):
raise T... | 7,381,341,259,469,386,000 | Compute the distance from ``self`` to ``other``.
Parameters
----------
other : LabeledVector
The LabeledVector from which the distance is computed. | devito/ir/support/vector.py | distance | rhodrin/devito | python | @memoized_meth
def distance(self, other):
'\n Compute the distance from ``self`` to ``other``.\n\n Parameters\n ----------\n other : LabeledVector\n The LabeledVector from which the distance is computed.\n '
if (not isinstance(other, LabeledVector)):
raise T... |
def generate_bin():
'Generate bin files.'
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data/cora/cora_mr', help='Data dir')
parser.add_argument('--train_nodes_num', type=int, default=140, help='Nodes numbers for training')
parser.add_argument('--eval_node... | -6,830,304,606,836,681,000 | Generate bin files. | model_zoo/official/gnn/gat/preprocess.py | generate_bin | 233-puchi/mindspore | python | def generate_bin():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data/cora/cora_mr', help='Data dir')
parser.add_argument('--train_nodes_num', type=int, default=140, help='Nodes numbers for training')
parser.add_argument('--eval_nodes_num', type=int, def... |
def test_encode_nibbles_variable_over_max() -> None:
'Variable field length is over maximum allowed'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'... | -3,900,687,380,426,086,400 | Variable field length is over maximum allowed | tests/test_nibbles.py | test_encode_nibbles_variable_over_max | knovichikhin/pyiso8583 | python | def test_encode_nibbles_variable_over_max() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 2
spec['2']['max_len'] = 4
spec['2'... |
def test_encode_nibbles_fixed_partial() -> None:
'Fixed field is provided partially'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['... | 6,831,563,969,507,959,000 | Fixed field is provided partially | tests/test_nibbles.py | test_encode_nibbles_fixed_partial | knovichikhin/pyiso8583 | python | def test_encode_nibbles_fixed_partial() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['2']['max_len'] = 4
spec['2']['l... |
def test_encode_nibbles_fixed_missing() -> None:
'Fixed field is missing'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['2']['max_le... | -2,089,517,104,892,656,000 | Fixed field is missing | tests/test_nibbles.py | test_encode_nibbles_fixed_missing | knovichikhin/pyiso8583 | python | def test_encode_nibbles_fixed_missing() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['2']['max_len'] = 4
spec['2']['l... |
def test_decode_nibbles_variable_over_max() -> None:
'Variable field length is over maximum allowed'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'... | -2,357,995,680,247,013,000 | Variable field length is over maximum allowed | tests/test_nibbles.py | test_decode_nibbles_variable_over_max | knovichikhin/pyiso8583 | python | def test_decode_nibbles_variable_over_max() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 2
spec['2']['max_len'] = 4
spec['2'... |
def test_decode_nibbles_variable_partial() -> None:
'Variable field is provided partially'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 2
... | 3,606,540,515,098,440,700 | Variable field is provided partially | tests/test_nibbles.py | test_decode_nibbles_variable_partial | knovichikhin/pyiso8583 | python | def test_decode_nibbles_variable_partial() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 2
spec['2']['max_len'] = 4
spec['2']... |
def test_decode_nibbles_variable_missing() -> None:
'Variable field is missing'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 2
spec['2']['... | -7,780,357,992,181,737,000 | Variable field is missing | tests/test_nibbles.py | test_decode_nibbles_variable_missing | knovichikhin/pyiso8583 | python | def test_decode_nibbles_variable_missing() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 2
spec['2']['max_len'] = 4
spec['2']... |
def test_decode_nibbles_fixed_partial() -> None:
'Fixed field is provided partially'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['... | -8,831,870,260,704,048,000 | Fixed field is provided partially | tests/test_nibbles.py | test_decode_nibbles_fixed_partial | knovichikhin/pyiso8583 | python | def test_decode_nibbles_fixed_partial() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['2']['max_len'] = 4
spec['2']['l... |
def test_decode_nibbles_fixed_missing() -> None:
'Fixed field is missing'
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['2']['max_le... | 7,520,147,214,975,566,000 | Fixed field is missing | tests/test_nibbles.py | test_decode_nibbles_fixed_missing | knovichikhin/pyiso8583 | python | def test_decode_nibbles_fixed_missing() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec['t']['data_enc'] = 'ascii'
spec['p']['data_enc'] = 'ascii'
spec['2']['data_enc'] = 'ascii'
spec['2']['len_enc'] = 'ascii'
spec['2']['len_type'] = 0
spec['2']['max_len'] = 4
spec['2']['l... |
@property
def energy_thresh_lo(self):
'Low energy threshold'
return (self.meta['LO_THRES'] * u.TeV) | -6,215,233,484,041,799,000 | Low energy threshold | gammapy/irf/psf/gauss.py | energy_thresh_lo | mdebony/gammapy | python | @property
def energy_thresh_lo(self):
return (self.meta['LO_THRES'] * u.TeV) |
@property
def energy_thresh_hi(self):
'High energy threshold'
return (self.meta['HI_THRES'] * u.TeV) | 4,512,721,132,327,294,000 | High energy threshold | gammapy/irf/psf/gauss.py | energy_thresh_hi | mdebony/gammapy | python | @property
def energy_thresh_hi(self):
return (self.meta['HI_THRES'] * u.TeV) |
@classmethod
def read(cls, filename, hdu='PSF_2D_GAUSS'):
'Create `EnergyDependentMultiGaussPSF` from FITS file.\n\n Parameters\n ----------\n filename : str\n File name\n '
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hd... | -1,094,260,843,814,858,400 | Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name | gammapy/irf/psf/gauss.py | read | mdebony/gammapy | python | @classmethod
def read(cls, filename, hdu='PSF_2D_GAUSS'):
'Create `EnergyDependentMultiGaussPSF` from FITS file.\n\n Parameters\n ----------\n filename : str\n File name\n '
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hd... |
@classmethod
def from_table_hdu(cls, hdu):
'Create `EnergyDependentMultiGaussPSF` from HDU list.\n\n Parameters\n ----------\n hdu : `~astropy.io.fits.BinTableHDU`\n HDU\n '
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(table, column_prefix='ENERG', for... | -4,669,775,591,258,652,000 | Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
HDU | gammapy/irf/psf/gauss.py | from_table_hdu | mdebony/gammapy | python | @classmethod
def from_table_hdu(cls, hdu):
'Create `EnergyDependentMultiGaussPSF` from HDU list.\n\n Parameters\n ----------\n hdu : `~astropy.io.fits.BinTableHDU`\n HDU\n '
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(table, column_prefix='ENERG', for... |
def to_hdulist(self):
'\n Convert psf table data to FITS hdu list.\n\n Returns\n -------\n hdu_list : `~astropy.io.fits.HDUList`\n PSF in HDU list format.\n '
names = ['SCALE', 'SIGMA_1', 'AMPL_2', 'SIGMA_2', 'AMPL_3', 'SIGMA_3']
units = ['', 'deg', '', 'deg', '... | -3,263,587,897,682,827,300 | Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format. | gammapy/irf/psf/gauss.py | to_hdulist | mdebony/gammapy | python | def to_hdulist(self):
'\n Convert psf table data to FITS hdu list.\n\n Returns\n -------\n hdu_list : `~astropy.io.fits.HDUList`\n PSF in HDU list format.\n '
names = ['SCALE', 'SIGMA_1', 'AMPL_2', 'SIGMA_2', 'AMPL_3', 'SIGMA_3']
units = [, 'deg', , 'deg', , 'de... |
def write(self, filename, *args, **kwargs):
'Write PSF to FITS file.\n\n Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.\n '
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs) | 2,628,976,963,919,575,000 | Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments. | gammapy/irf/psf/gauss.py | write | mdebony/gammapy | python | def write(self, filename, *args, **kwargs):
'Write PSF to FITS file.\n\n Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.\n '
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs) |
def psf_at_energy_and_theta(self, energy, theta):
'\n Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.\n\n No interpolation is used.\n\n Parameters\n ----------\n energy : `~astropy.units.u.Quantity`\n Energy at which a PSF is requested.\n ... | 6,419,885,130,308,105,000 | Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy... | gammapy/irf/psf/gauss.py | psf_at_energy_and_theta | mdebony/gammapy | python | def psf_at_energy_and_theta(self, energy, theta):
'\n Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.\n\n No interpolation is used.\n\n Parameters\n ----------\n energy : `~astropy.units.u.Quantity`\n Energy at which a PSF is requested.\n ... |
def containment_radius(self, energy, theta, fraction=0.68):
'Compute containment for all energy and theta values'
energies = u.Quantity(energy).flatten()
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for (idx, energy) in enumerate(energies):
for (jdx, theta) in... | 2,057,220,733,141,044,700 | Compute containment for all energy and theta values | gammapy/irf/psf/gauss.py | containment_radius | mdebony/gammapy | python | def containment_radius(self, energy, theta, fraction=0.68):
energies = u.Quantity(energy).flatten()
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for (idx, energy) in enumerate(energies):
for (jdx, theta) in enumerate(thetas):
try:
... |
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
'\n Plot containment image with energy and theta axes.\n\n Parameters\n ----------\n fraction : float\n Containment fraction between 0 and 1.\n add_cbar : bool\n Add a colorbar\n ... | -2,834,756,856,326,981,600 | Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar | gammapy/irf/psf/gauss.py | plot_containment | mdebony/gammapy | python | def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
'\n Plot containment image with energy and theta axes.\n\n Parameters\n ----------\n fraction : float\n Containment fraction between 0 and 1.\n add_cbar : bool\n Add a colorbar\n ... |
def _plot_safe_energy_range(self, ax):
'add safe energy range lines to the plot'
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f'Safe energy threshold: {esafe:3.2f}'
a... | 1,833,843,675,341,529,300 | add safe energy range lines to the plot | gammapy/irf/psf/gauss.py | _plot_safe_energy_range | mdebony/gammapy | python | def _plot_safe_energy_range(self, ax):
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f'Safe energy threshold: {esafe:3.2f}'
ax.text(x=(1.1 * esafe.value), y=0.3, s=la... |
def plot_containment_vs_energy(self, fractions=[0.68, 0.95], thetas=Angle([0, 1], 'deg'), ax=None, **kwargs):
'Plot containment fraction as a function of energy.\n '
import matplotlib.pyplot as plt
ax = (plt.gca() if (ax is None) else ax)
energy = self.energy_axis_true.center
for theta in the... | -9,218,549,933,889,060,000 | Plot containment fraction as a function of energy. | gammapy/irf/psf/gauss.py | plot_containment_vs_energy | mdebony/gammapy | python | def plot_containment_vs_energy(self, fractions=[0.68, 0.95], thetas=Angle([0, 1], 'deg'), ax=None, **kwargs):
'\n '
import matplotlib.pyplot as plt
ax = (plt.gca() if (ax is None) else ax)
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
... |
def peek(self, figsize=(15, 5)):
'Quick-look summary plots.'
import matplotlib.pyplot as plt
(fig, axes) = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])... | 4,996,040,905,371,016,000 | Quick-look summary plots. | gammapy/irf/psf/gauss.py | peek | mdebony/gammapy | python | def peek(self, figsize=(15, 5)):
import matplotlib.pyplot as plt
(fig, axes) = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
plt.tight_layout() |
def info(self, fractions=[0.68, 0.95], energies=u.Quantity([1.0, 10.0], 'TeV'), thetas=u.Quantity([0.0], 'deg')):
'\n Print PSF summary info.\n\n The containment radius for given fraction, energies and thetas is\n computed and printed on the command line.\n\n Parameters\n --------... | -5,399,679,831,764,795,000 | Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
... | gammapy/irf/psf/gauss.py | info | mdebony/gammapy | python | def info(self, fractions=[0.68, 0.95], energies=u.Quantity([1.0, 10.0], 'TeV'), thetas=u.Quantity([0.0], 'deg')):
'\n Print PSF summary info.\n\n The containment radius for given fraction, energies and thetas is\n computed and printed on the command line.\n\n Parameters\n --------... |
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"Convert triple Gaussian PSF ot table PSF.\n\n Parameters\n ----------\n theta : `~astropy.coordinates.Angle`\n Offset in the field of view. Default theta = 0 deg\n rad : `~astropy.coordinates.Angle`... | -273,039,063,780,967,460 | Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure... | gammapy/irf/psf/gauss.py | to_energy_dependent_table_psf | mdebony/gammapy | python | def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"Convert triple Gaussian PSF ot table PSF.\n\n Parameters\n ----------\n theta : `~astropy.coordinates.Angle`\n Offset in the field of view. Default theta = 0 deg\n rad : `~astropy.coordinates.Angle`... |
def to_psf3d(self, rad=None):
'Create a PSF3D from an analytical PSF.\n\n Parameters\n ----------\n rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`\n the array of position errors (rad) on which the PSF3D will be defined\n\n Returns\n -------\n p... | -6,570,291,098,575,002,000 | Create a PSF3D from an analytical PSF.
Parameters
----------
rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`
the array of position errors (rad) on which the PSF3D will be defined
Returns
-------
psf3d : `~gammapy.irf.PSF3D`
the PSF3D. It will be defined on the same energy and offset values t... | gammapy/irf/psf/gauss.py | to_psf3d | mdebony/gammapy | python | def to_psf3d(self, rad=None):
'Create a PSF3D from an analytical PSF.\n\n Parameters\n ----------\n rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`\n the array of position errors (rad) on which the PSF3D will be defined\n\n Returns\n -------\n p... |
def read_model(model_path, weigths_path):
'Load your pretrained model\n\t'
model = model_from_json(open(model_path).read())
model.load_weights(weigths_path)
return model | -8,900,926,367,652,668,000 | Load your pretrained model | demo.py | read_model | ijinmao/CAM-Localization | python | def read_model(model_path, weigths_path):
'\n\t'
model = model_from_json(open(model_path).read())
model.load_weights(weigths_path)
return model |
def train_cam_model(X_train, Y_train, X_test, Y_test, batch_size, nb_epoch):
'Train CAM model based on your pretrained model\n\n\t# Arguments\n\t\tmodel: your pretrained model, CAM model is trained based on this model.\n\n\t'
pretrained_model_path = ''
pretrained_weights_path = ''
pretrained_model_name ... | 8,713,368,420,113,241,000 | Train CAM model based on your pretrained model
# Arguments
model: your pretrained model, CAM model is trained based on this model. | demo.py | train_cam_model | ijinmao/CAM-Localization | python | def train_cam_model(X_train, Y_train, X_test, Y_test, batch_size, nb_epoch):
'Train CAM model based on your pretrained model\n\n\t# Arguments\n\t\tmodel: your pretrained model, CAM model is trained based on this model.\n\n\t'
pretrained_model_path =
pretrained_weights_path =
pretrained_model_name = 'V... |
def cam_model():
'\n\tReturn your trained CAM model\n\t'
return | 4,348,099,947,783,747,000 | Return your trained CAM model | demo.py | cam_model | ijinmao/CAM-Localization | python | def cam_model():
'\n\t\n\t'
return |
def plot_cam_map(img_path, img_size, batch_size, label_plot):
'Plot class activation map.\n\n\t'
gap_spacial_size = 14
model = cam_model()
im_ori = np.asarray(load_img(img_path, target_size=(img_size, img_size)))
test_data = preprocess_image(img_path, img_size, expand_dims=True)
im_cam = get_cam... | -4,127,016,016,632,387,600 | Plot class activation map. | demo.py | plot_cam_map | ijinmao/CAM-Localization | python | def plot_cam_map(img_path, img_size, batch_size, label_plot):
'\n\n\t'
gap_spacial_size = 14
model = cam_model()
im_ori = np.asarray(load_img(img_path, target_size=(img_size, img_size)))
test_data = preprocess_image(img_path, img_size, expand_dims=True)
im_cam = get_cam_img(model, test_data, lab... |
def train(self, start_epoch, max_epoch):
'Generic training loops.'
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.before_train()
for self.epoch in range(self.start_epoch, self.max_epoch):
self.before_epoch()
self.run_epoch()
self.after_epoch()
self.after_t... | -5,646,719,091,533,232,000 | Generic training loops. | dassl/engine/trainer.py | train | zhaoxin94/Dassl.pytorch | python | def train(self, start_epoch, max_epoch):
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.before_train()
for self.epoch in range(self.start_epoch, self.max_epoch):
self.before_epoch()
self.run_epoch()
self.after_epoch()
self.after_train() |
def check_cfg(self, cfg):
"Check whether some variables are set correctly for\n the trainer (optional).\n\n For example, a trainer might require a particular sampler\n for training such as 'RandomDomainSampler', so it is good\n to do the checking:\n\n assert cfg.DATALOADER.SAMPLER... | -6,042,607,910,904,794,000 | Check whether some variables are set correctly for
the trainer (optional).
For example, a trainer might require a particular sampler
for training such as 'RandomDomainSampler', so it is good
to do the checking:
assert cfg.DATALOADER.SAMPLER_TRAIN == 'RandomDomainSampler' | dassl/engine/trainer.py | check_cfg | zhaoxin94/Dassl.pytorch | python | def check_cfg(self, cfg):
"Check whether some variables are set correctly for\n the trainer (optional).\n\n For example, a trainer might require a particular sampler\n for training such as 'RandomDomainSampler', so it is good\n to do the checking:\n\n assert cfg.DATALOADER.SAMPLER... |
def build_data_loader(self):
'Create essential data-related attributes.\n\n What must be done in the re-implementation\n of this method:\n 1) initialize data manager\n 2) assign as attributes the data loaders\n 3) assign as attribute the number of classes\n '
self.dm = ... | -6,602,930,038,107,107,000 | Create essential data-related attributes.
What must be done in the re-implementation
of this method:
1) initialize data manager
2) assign as attributes the data loaders
3) assign as attribute the number of classes | dassl/engine/trainer.py | build_data_loader | zhaoxin94/Dassl.pytorch | python | def build_data_loader(self):
'Create essential data-related attributes.\n\n What must be done in the re-implementation\n of this method:\n 1) initialize data manager\n 2) assign as attributes the data loaders\n 3) assign as attribute the number of classes\n '
self.dm = ... |
def build_model(self):
'Build and register model.\n\n The default builds a classification model along with its\n optimizer and scheduler.\n\n Custom trainers can re-implement this method if necessary.\n '
cfg = self.cfg
print('Building model')
self.model = SimpleNet(cfg, cfg.... | -4,805,539,644,684,228,000 | Build and register model.
The default builds a classification model along with its
optimizer and scheduler.
Custom trainers can re-implement this method if necessary. | dassl/engine/trainer.py | build_model | zhaoxin94/Dassl.pytorch | python | def build_model(self):
'Build and register model.\n\n The default builds a classification model along with its\n optimizer and scheduler.\n\n Custom trainers can re-implement this method if necessary.\n '
cfg = self.cfg
print('Building model')
self.model = SimpleNet(cfg, cfg.... |
@torch.no_grad()
def test(self, split=None, return_per_class_results=False):
'A generic testing pipeline.'
self.set_model_mode('eval')
self.evaluator.reset()
if (split is None):
split = self.cfg.TEST.SPLIT
if ((split == 'val') and (self.val_loader is not None)):
data_loader = self.va... | 8,053,161,361,375,309,000 | A generic testing pipeline. | dassl/engine/trainer.py | test | zhaoxin94/Dassl.pytorch | python | @torch.no_grad()
def test(self, split=None, return_per_class_results=False):
self.set_model_mode('eval')
self.evaluator.reset()
if (split is None):
split = self.cfg.TEST.SPLIT
if ((split == 'val') and (self.val_loader is not None)):
data_loader = self.val_loader
print('Do ev... |
@property
def method(self) -> str:
'HTTP method used for the request'
return self._environ['REQUEST_METHOD'] | 5,419,372,049,588,608,000 | HTTP method used for the request | pogweb/models.py | method | ahnaf-zamil/pogweb | python | @property
def method(self) -> str:
return self._environ['REQUEST_METHOD'] |
@property
def endpoint(self) -> str:
'The route/endpoint used for that specific request'
return self._environ['PATH_INFO'] | -2,896,867,337,130,264,000 | The route/endpoint used for that specific request | pogweb/models.py | endpoint | ahnaf-zamil/pogweb | python | @property
def endpoint(self) -> str:
return self._environ['PATH_INFO'] |
@property
def query_args(self) -> ImmutableDict:
'Query arguments from the request'
args = self._environ['QUERY_STRING']
if (not args):
return ImmutableDict({})
args = args.split('&')
query_args = {}
for _arg in args:
(name, value) = _arg.split('=')
query_args[name] = val... | 6,587,376,489,600,675,000 | Query arguments from the request | pogweb/models.py | query_args | ahnaf-zamil/pogweb | python | @property
def query_args(self) -> ImmutableDict:
args = self._environ['QUERY_STRING']
if (not args):
return ImmutableDict({})
args = args.split('&')
query_args = {}
for _arg in args:
(name, value) = _arg.split('=')
query_args[name] = value
return ImmutableDict(query_... |
@property
def form(self) -> typing.Optional[typing.Dict]:
'Form data sent via HTTP request'
data = self._environ.get('wsgi.input')
if data:
form_dict = parse_qs(data.getvalue().decode('utf-8'))
final_dict = {}
for (k, v) in form_dict.items():
final_dict[k] = v[0]
... | 6,462,910,950,514,397,000 | Form data sent via HTTP request | pogweb/models.py | form | ahnaf-zamil/pogweb | python | @property
def form(self) -> typing.Optional[typing.Dict]:
data = self._environ.get('wsgi.input')
if data:
form_dict = parse_qs(data.getvalue().decode('utf-8'))
final_dict = {}
for (k, v) in form_dict.items():
final_dict[k] = v[0]
return ImmutableDict(final_dict) |
def get_entity(hass):
'Get the fan entity.'
return hass.states.get(FAN_ENTITY_ID) | 7,856,547,689,952,820,000 | Get the fan entity. | tests/components/demo/test_fan.py | get_entity | ActuallyRuben/home-assistant | python | def get_entity(hass):
return hass.states.get(FAN_ENTITY_ID) |
@pytest.fixture(autouse=True)
def setup_comp(hass):
'Initialize components.'
hass.loop.run_until_complete(async_setup_component(hass, fan.DOMAIN, {'fan': {'platform': 'demo'}})) | -959,499,954,790,526,500 | Initialize components. | tests/components/demo/test_fan.py | setup_comp | ActuallyRuben/home-assistant | python | @pytest.fixture(autouse=True)
def setup_comp(hass):
hass.loop.run_until_complete(async_setup_component(hass, fan.DOMAIN, {'fan': {'platform': 'demo'}})) |
async def test_turn_on(hass):
'Test turning on the device.'
assert (STATE_OFF == get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID))
assert (STATE_OFF != get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID, fan.SPEED_HIGH))
assert (STATE_ON == get_entity... | -8,242,091,094,631,456,000 | Test turning on the device. | tests/components/demo/test_fan.py | test_turn_on | ActuallyRuben/home-assistant | python | async def test_turn_on(hass):
assert (STATE_OFF == get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID))
assert (STATE_OFF != get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID, fan.SPEED_HIGH))
assert (STATE_ON == get_entity(hass).state)
assert (fan... |
async def test_turn_off(hass):
'Test turning off the device.'
assert (STATE_OFF == get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID))
assert (STATE_OFF != get_entity(hass).state)
(await common.async_turn_off(hass, FAN_ENTITY_ID))
assert (STATE_OFF == get_entity(hass).state... | 9,193,625,582,298,587,000 | Test turning off the device. | tests/components/demo/test_fan.py | test_turn_off | ActuallyRuben/home-assistant | python | async def test_turn_off(hass):
assert (STATE_OFF == get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID))
assert (STATE_OFF != get_entity(hass).state)
(await common.async_turn_off(hass, FAN_ENTITY_ID))
assert (STATE_OFF == get_entity(hass).state) |
async def test_turn_off_without_entity_id(hass):
'Test turning off all fans.'
assert (STATE_OFF == get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID))
assert (STATE_OFF != get_entity(hass).state)
(await common.async_turn_off(hass))
assert (STATE_OFF == get_entity(hass).stat... | 4,560,760,357,813,158,400 | Test turning off all fans. | tests/components/demo/test_fan.py | test_turn_off_without_entity_id | ActuallyRuben/home-assistant | python | async def test_turn_off_without_entity_id(hass):
assert (STATE_OFF == get_entity(hass).state)
(await common.async_turn_on(hass, FAN_ENTITY_ID))
assert (STATE_OFF != get_entity(hass).state)
(await common.async_turn_off(hass))
assert (STATE_OFF == get_entity(hass).state) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.