body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
def apply_transform(x, transform_matrix, channel_axis=0, fill_mode='nearest', cval=0.0):
"Applies the image transformation specified by a matrix.\n\n # Arguments\n x: 2D numpy array, single image.\n transform_matrix: Numpy array specifying the geometric transformation.\n channel_axis: Index ... | -4,162,282,333,979,550,000 | Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are fil... | keras/preprocessing/image.py | apply_transform | HangJie720/keras | python | def apply_transform(x, transform_matrix, channel_axis=0, fill_mode='nearest', cval=0.0):
"Applies the image transformation specified by a matrix.\n\n # Arguments\n x: 2D numpy array, single image.\n transform_matrix: Numpy array specifying the geometric transformation.\n channel_axis: Index ... |
def array_to_img(x, data_format=None, scale=True):
'Converts a 3D Numpy array to a PIL Image instance.\n\n # Arguments\n x: Input Numpy array.\n data_format: Image data format.\n either "channels_first" or "channels_last".\n scale: Whether to rescale image values\n to b... | 3,415,023,904,854,424,000 | Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if... | keras/preprocessing/image.py | array_to_img | HangJie720/keras | python | def array_to_img(x, data_format=None, scale=True):
'Converts a 3D Numpy array to a PIL Image instance.\n\n # Arguments\n x: Input Numpy array.\n data_format: Image data format.\n either "channels_first" or "channels_last".\n scale: Whether to rescale image values\n to b... |
def img_to_array(img, data_format=None):
'Converts a PIL Image instance to a Numpy array.\n\n # Arguments\n img: PIL Image instance.\n data_format: Image data format,\n either "channels_first" or "channels_last".\n\n # Returns\n A 3D Numpy array.\n\n # Raises\n ValueE... | 553,028,858,712,164,860 | Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed. | keras/preprocessing/image.py | img_to_array | HangJie720/keras | python | def img_to_array(img, data_format=None):
'Converts a PIL Image instance to a Numpy array.\n\n # Arguments\n img: PIL Image instance.\n data_format: Image data format,\n either "channels_first" or "channels_last".\n\n # Returns\n A 3D Numpy array.\n\n # Raises\n ValueE... |
def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):
'Saves an image stored as a Numpy array to a path or file object.\n\n # Arguments\n path: Path or file object.\n x: Numpy array.\n data_format: Image data format,\n either "channels_first" or "channel... | -4,915,116,415,993,890,000 | Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined fro... | keras/preprocessing/image.py | save_img | HangJie720/keras | python | def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs):
'Saves an image stored as a Numpy array to a path or file object.\n\n # Arguments\n path: Path or file object.\n x: Numpy array.\n data_format: Image data format,\n either "channels_first" or "channel... |
def load_img(path, grayscale=False, target_size=None, interpolation='nearest'):
'Loads an image into PIL format.\n\n # Arguments\n path: Path to image file.\n grayscale: Boolean, whether to load the image as grayscale.\n target_size: Either `None` (default to original size)\n or t... | -3,354,841,818,060,038,700 | Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if t... | keras/preprocessing/image.py | load_img | HangJie720/keras | python | def load_img(path, grayscale=False, target_size=None, interpolation='nearest'):
'Loads an image into PIL format.\n\n # Arguments\n path: Path to image file.\n grayscale: Boolean, whether to load the image as grayscale.\n target_size: Either `None` (default to original size)\n or t... |
def _iter_valid_files(directory, white_list_formats, follow_links):
'Iterates on files with extension in `white_list_formats` contained in `directory`.\n\n # Arguments\n directory: Absolute path to the directory\n containing files to be counted\n white_list_formats: Set of strings contai... | 2,464,239,369,516,098,000 | Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yie... | keras/preprocessing/image.py | _iter_valid_files | HangJie720/keras | python | def _iter_valid_files(directory, white_list_formats, follow_links):
'Iterates on files with extension in `white_list_formats` contained in `directory`.\n\n # Arguments\n directory: Absolute path to the directory\n containing files to be counted\n white_list_formats: Set of strings contai... |
def _count_valid_files_in_directory(directory, white_list_formats, split, follow_links):
'Counts files with extension in `white_list_formats` contained in `directory`.\n\n # Arguments\n directory: absolute path to the directory\n containing files to be counted\n white_list_formats: set o... | 6,417,385,685,608,633,000 | Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2... | keras/preprocessing/image.py | _count_valid_files_in_directory | HangJie720/keras | python | def _count_valid_files_in_directory(directory, white_list_formats, split, follow_links):
'Counts files with extension in `white_list_formats` contained in `directory`.\n\n # Arguments\n directory: absolute path to the directory\n containing files to be counted\n white_list_formats: set o... |
def _list_valid_filenames_in_directory(directory, white_list_formats, split, class_indices, follow_links):
'Lists paths of files in `subdir` with extensions in `white_list_formats`.\n\n # Arguments\n directory: absolute path to a directory containing the files to list.\n The directory name is u... | -1,473,300,423,495,430,400 | Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed exten... | keras/preprocessing/image.py | _list_valid_filenames_in_directory | HangJie720/keras | python | def _list_valid_filenames_in_directory(directory, white_list_formats, split, class_indices, follow_links):
'Lists paths of files in `subdir` with extensions in `white_list_formats`.\n\n # Arguments\n directory: absolute path to a directory containing the files to list.\n The directory name is u... |
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None):
'Takes numpy data & label arrays, and generates batches of augmented data.\n\n # Arguments\n x: Input data. Numpy array of rank 4 or a tuple.\n ... | -2,937,696,480,903,132,700 | Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the o... | keras/preprocessing/image.py | flow | HangJie720/keras | python | def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None, save_to_dir=None, save_prefix=, save_format='png', subset=None):
'Takes numpy data & label arrays, and generates batches of augmented data.\n\n # Arguments\n x: Input data. Numpy array of rank 4 or a tuple.\n ... |
def flow_from_directory(self, directory, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest'):
'Takes the path to a directory & generate... | -610,350,686,517,741,700 | Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the gener... | keras/preprocessing/image.py | flow_from_directory | HangJie720/keras | python | def flow_from_directory(self, directory, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix=, save_format='png', follow_links=False, subset=None, interpolation='nearest'):
'Takes the path to a directory & generates ... |
def standardize(self, x):
'Applies the normalization configuration to a batch of inputs.\n\n # Arguments\n x: Batch of inputs to be normalized.\n\n # Returns\n The inputs, normalized.\n '
if self.preprocessing_function:
x = self.preprocessing_function(x)
if... | 3,214,915,977,443,593,700 | Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized. | keras/preprocessing/image.py | standardize | HangJie720/keras | python | def standardize(self, x):
'Applies the normalization configuration to a batch of inputs.\n\n # Arguments\n x: Batch of inputs to be normalized.\n\n # Returns\n The inputs, normalized.\n '
if self.preprocessing_function:
x = self.preprocessing_function(x)
if... |
def random_transform(self, x, seed=None):
'Randomly augments a single image tensor.\n\n # Arguments\n x: 3D tensor, single image.\n seed: Random seed.\n\n # Returns\n A randomly transformed version of the input (same shape).\n '
img_row_axis = (self.row_axis... | 6,704,900,902,420,396,000 | Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape). | keras/preprocessing/image.py | random_transform | HangJie720/keras | python | def random_transform(self, x, seed=None):
'Randomly augments a single image tensor.\n\n # Arguments\n x: 3D tensor, single image.\n seed: Random seed.\n\n # Returns\n A randomly transformed version of the input (same shape).\n '
img_row_axis = (self.row_axis... |
def fit(self, x, augment=False, rounds=1, seed=None):
'Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.\n\n Only required if `featurewise_center` or\n `featurewise_std_normalization` or `zca_whitening` are set to True.\n\n # Argum... | 9,041,075,605,165,846,000 | Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the ch... | keras/preprocessing/image.py | fit | HangJie720/keras | python | def fit(self, x, augment=False, rounds=1, seed=None):
'Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.\n\n Only required if `featurewise_center` or\n `featurewise_std_normalization` or `zca_whitening` are set to True.\n\n # Argum... |
def _get_batches_of_transformed_samples(self, index_array):
'Gets a batch of transformed samples.\n\n # Arguments\n index_array: Array of sample indices to include in batch.\n\n # Returns\n A batch of transformed samples.\n '
raise NotImplementedError | 1,617,728,283,657,301,500 | Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples. | keras/preprocessing/image.py | _get_batches_of_transformed_samples | HangJie720/keras | python | def _get_batches_of_transformed_samples(self, index_array):
'Gets a batch of transformed samples.\n\n # Arguments\n index_array: Array of sample indices to include in batch.\n\n # Returns\n A batch of transformed samples.\n '
raise NotImplementedError |
def next(self):
'For python 2.x.\n\n # Returns\n The next batch.\n '
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array) | -782,806,053,129,658,100 | For python 2.x.
# Returns
The next batch. | keras/preprocessing/image.py | next | HangJie720/keras | python | def next(self):
'For python 2.x.\n\n # Returns\n The next batch.\n '
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array) |
def next(self):
'For python 2.x.\n\n # Returns\n The next batch.\n '
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array) | -782,806,053,129,658,100 | For python 2.x.
# Returns
The next batch. | keras/preprocessing/image.py | next | HangJie720/keras | python | def next(self):
'For python 2.x.\n\n # Returns\n The next batch.\n '
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array) |
def reroot(root: expression.Expression, source_path: path.Path) -> expression.Expression:
'Reroot to a new path, maintaining a input proto index.\n\n Similar to root.get_descendant_or_error(source_path): however, this\n method retains the ability to get a map to the original index.\n\n Args:\n root: the origi... | 7,610,696,560,224,901,000 | Reroot to a new path, maintaining a input proto index.
Similar to root.get_descendant_or_error(source_path): however, this
method retains the ability to get a map to the original index.
Args:
root: the original root.
source_path: the path to the new root.
Returns:
the new root. | struct2tensor/expression_impl/reroot.py | reroot | anukaal/struct2tensor | python | def reroot(root: expression.Expression, source_path: path.Path) -> expression.Expression:
'Reroot to a new path, maintaining a input proto index.\n\n Similar to root.get_descendant_or_error(source_path): however, this\n method retains the ability to get a map to the original index.\n\n Args:\n root: the origi... |
def __init__(self, root: expression.Expression):
'Constructor for proto index expression.\n\n Args:\n root: an expression that must return a RootNodeTensor.\n '
super().__init__(is_repeated=False, my_type=tf.int64)
self._root = root | 2,506,666,732,620,582,000 | Constructor for proto index expression.
Args:
root: an expression that must return a RootNodeTensor. | struct2tensor/expression_impl/reroot.py | __init__ | anukaal/struct2tensor | python | def __init__(self, root: expression.Expression):
'Constructor for proto index expression.\n\n Args:\n root: an expression that must return a RootNodeTensor.\n '
super().__init__(is_repeated=False, my_type=tf.int64)
self._root = root |
@classmethod
def ensure_user(cls, email, password, is_internal=False, is_admin=False, is_staff=False, is_researcher=False, is_contributor=True, is_user_manager=False, is_exporter=False, is_active=True, in_beta=False, in_alpha=False, update=False, **kwargs):
'\n Create a new user.\n '
from app.exte... | 4,482,907,887,963,877,000 | Create a new user. | app/modules/users/models.py | ensure_user | karenc/houston | python | @classmethod
def ensure_user(cls, email, password, is_internal=False, is_admin=False, is_staff=False, is_researcher=False, is_contributor=True, is_user_manager=False, is_exporter=False, is_active=True, in_beta=False, in_alpha=False, update=False, **kwargs):
'\n \n '
from app.extensions import db
... |
def test_pcap_input():
'test_pcap_input\n '
print('Testing PcapDataset')
pcap_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_pcap', 'http.pcap')
file_url = ('file://' + pcap_filename)
url_filenames = [file_url]
dataset = pcap_io.PcapDataset(url_filenames, batch=1)
... | 5,723,679,461,885,828,000 | test_pcap_input | tests/test_pcap_eager.py | test_pcap_input | HubBucket-Team/io | python | def ():
'\n '
print('Testing PcapDataset')
pcap_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_pcap', 'http.pcap')
file_url = ('file://' + pcap_filename)
url_filenames = [file_url]
dataset = pcap_io.PcapDataset(url_filenames, batch=1)
packets_total = 0
for v i... |
def requests_error_handler(func):
"Re-raise ConnectionError with help message.\n Continue on HTTP 404 error (server is on but workflow doesn't exist).\n Otherwise, re-raise from None to hide nested tracebacks.\n "
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
... | -771,253,056,658,749,200 | Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks. | caper/cromwell_rest_api.py | requests_error_handler | ENCODE-DCC/caper | python | def requests_error_handler(func):
"Re-raise ConnectionError with help message.\n Continue on HTTP 404 error (server is on but workflow doesn't exist).\n Otherwise, re-raise from None to hide nested tracebacks.\n "
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
... |
def is_valid_uuid(workflow_id, version=4):
"To validate Cromwell's UUID (lowercase only).\n This does not allow uppercase UUIDs.\n "
if (not isinstance(workflow_id, str)):
return False
if (not workflow_id.islower()):
return False
try:
UUID(workflow_id, version=version)
... | 3,885,909,614,512,867,000 | To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs. | caper/cromwell_rest_api.py | is_valid_uuid | ENCODE-DCC/caper | python | def is_valid_uuid(workflow_id, version=4):
"To validate Cromwell's UUID (lowercase only).\n This does not allow uppercase UUIDs.\n "
if (not isinstance(workflow_id, str)):
return False
if (not workflow_id.islower()):
return False
try:
UUID(workflow_id, version=version)
... |
def has_wildcard(workflow_id_or_label):
'Check if string or any element in list/tuple has\n a wildcard (? or *).\n\n Args:\n workflow_id_or_label:\n Workflow ID (str) or label (str).\n Or array (list, tuple) of them.\n '
if (workflow_id_or_label is None):
return Fal... | -6,452,021,859,195,065,000 | Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them. | caper/cromwell_rest_api.py | has_wildcard | ENCODE-DCC/caper | python | def has_wildcard(workflow_id_or_label):
'Check if string or any element in list/tuple has\n a wildcard (? or *).\n\n Args:\n workflow_id_or_label:\n Workflow ID (str) or label (str).\n Or array (list, tuple) of them.\n '
if (workflow_id_or_label is None):
return Fal... |
def submit(self, source, dependencies=None, inputs=None, options=None, labels=None, on_hold=False):
'Submit a workflow.\n\n Returns:\n JSON Response from POST request submit a workflow\n '
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.rea... | -423,739,342,744,370,100 | Submit a workflow.
Returns:
JSON Response from POST request submit a workflow | caper/cromwell_rest_api.py | submit | ENCODE-DCC/caper | python | def submit(self, source, dependencies=None, inputs=None, options=None, labels=None, on_hold=False):
'Submit a workflow.\n\n Returns:\n JSON Response from POST request submit a workflow\n '
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.rea... |
def abort(self, workflow_ids=None, labels=None):
'Abort workflows matching workflow IDs or labels\n\n Returns:\n List of JSON responses from POST request\n for aborting workflows\n '
valid_workflow_ids = self.find_valid_workflow_ids(workflow_ids=workflow_ids, labels=labels)
... | 2,118,086,710,568,472,300 | Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows | caper/cromwell_rest_api.py | abort | ENCODE-DCC/caper | python | def abort(self, workflow_ids=None, labels=None):
'Abort workflows matching workflow IDs or labels\n\n Returns:\n List of JSON responses from POST request\n for aborting workflows\n '
valid_workflow_ids = self.find_valid_workflow_ids(workflow_ids=workflow_ids, labels=labels)
... |
def release_hold(self, workflow_ids=None, labels=None):
'Release hold of workflows matching workflow IDs or labels\n\n Returns:\n List of JSON responses from POST request\n for releasing hold of workflows\n '
valid_workflow_ids = self.find_valid_workflow_ids(workflow_ids=work... | -4,205,324,820,267,993,600 | Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows | caper/cromwell_rest_api.py | release_hold | ENCODE-DCC/caper | python | def release_hold(self, workflow_ids=None, labels=None):
'Release hold of workflows matching workflow IDs or labels\n\n Returns:\n List of JSON responses from POST request\n for releasing hold of workflows\n '
valid_workflow_ids = self.find_valid_workflow_ids(workflow_ids=work... |
def get_default_backend(self):
'Retrieve default backend name\n\n Returns:\n Default backend name\n '
return self.get_backends()['defaultBackend'] | 2,825,734,983,184,767,500 | Retrieve default backend name
Returns:
Default backend name | caper/cromwell_rest_api.py | get_default_backend | ENCODE-DCC/caper | python | def get_default_backend(self):
'Retrieve default backend name\n\n Returns:\n Default backend name\n '
return self.get_backends()['defaultBackend'] |
def get_backends(self):
'Retrieve available backend names and default backend name\n\n Returns:\n JSON response with keys "defaultBackend" and "supportedBackends"\n Example: {"defaultBackend":"Local","supportedBackends":\n ["Local","aws","gcp","pbs","sge","slurm"]}\... | 6,374,964,352,086,033,000 | Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]} | caper/cromwell_rest_api.py | get_backends | ENCODE-DCC/caper | python | def get_backends(self):
'Retrieve available backend names and default backend name\n\n Returns:\n JSON response with keys "defaultBackend" and "supportedBackends"\n Example: {"defaultBackend":"Local","supportedBackends":\n ["Local","aws","gcp","pbs","sge","slurm"]}\... |
def find_valid_workflow_ids(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).\n If so then we don't have to send the server a query to get matching workflow IDs.\n "
if ((not labels) and workflow_i... | -8,487,964,851,941,723,000 | Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs. | caper/cromwell_rest_api.py | find_valid_workflow_ids | ENCODE-DCC/caper | python | def find_valid_workflow_ids(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).\n If so then we don't have to send the server a query to get matching workflow IDs.\n "
if ((not labels) and workflow_i... |
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"Retrieve metadata for workflows matching workflow IDs or labels\n\n Args:\n workflow_ids:\n List of workflows IDs to find workflows matched.\n labels:\n List of Caper's string... | -703,684,439,540,065,200 | Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow'... | caper/cromwell_rest_api.py | get_metadata | ENCODE-DCC/caper | python | def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"Retrieve metadata for workflows matching workflow IDs or labels\n\n Args:\n workflow_ids:\n List of workflows IDs to find workflows matched.\n labels:\n List of Caper's string... |
def get_labels(self, workflow_id):
'Get labels JSON for a specified workflow\n\n Returns:\n Labels JSON for a workflow\n '
if ((workflow_id is None) or (not is_valid_uuid(workflow_id))):
return
r = self.__request_get(CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id))... | -300,777,881,764,296,450 | Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow | caper/cromwell_rest_api.py | get_labels | ENCODE-DCC/caper | python | def get_labels(self, workflow_id):
'Get labels JSON for a specified workflow\n\n Returns:\n Labels JSON for a workflow\n '
if ((workflow_id is None) or (not is_valid_uuid(workflow_id))):
return
r = self.__request_get(CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id))... |
def get_label(self, workflow_id, key):
'Get a label for a key in a specified workflow\n\n Returns:\n Value for a specified key in labels JSON for a workflow\n '
labels = self.get_labels(workflow_id)
if (labels is None):
return
if (key in labels):
return labels[ke... | 8,523,240,131,071,194,000 | Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow | caper/cromwell_rest_api.py | get_label | ENCODE-DCC/caper | python | def get_label(self, workflow_id, key):
'Get a label for a key in a specified workflow\n\n Returns:\n Value for a specified key in labels JSON for a workflow\n '
labels = self.get_labels(workflow_id)
if (labels is None):
return
if (key in labels):
return labels[ke... |
def update_labels(self, workflow_id, labels):
'Update labels for a specified workflow with\n a list of (key, val) tuples\n '
if ((workflow_id is None) or (labels is None)):
return
r = self.__request_patch(CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels)
logger.deb... | 6,315,808,637,045,976,000 | Update labels for a specified workflow with
a list of (key, val) tuples | caper/cromwell_rest_api.py | update_labels | ENCODE-DCC/caper | python | def update_labels(self, workflow_id, labels):
'Update labels for a specified workflow with\n a list of (key, val) tuples\n '
if ((workflow_id is None) or (labels is None)):
return
r = self.__request_patch(CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels)
logger.deb... |
def find_with_wildcard(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
'Retrieves all workflows from Cromwell server.\n And then find matching workflows by ID or labels.\n Wildcards (? and *) are allowed for both parameters.\n '
result = []
if ((not workflow_ids) and (n... | -267,213,617,896,036,400 | Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters. | caper/cromwell_rest_api.py | find_with_wildcard | ENCODE-DCC/caper | python | def find_with_wildcard(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
'Retrieves all workflows from Cromwell server.\n And then find matching workflows by ID or labels.\n Wildcards (? and *) are allowed for both parameters.\n '
result = []
if ((not workflow_ids) and (n... |
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
'Finds workflows by exactly matching workflow IDs (UUIDs).\n Does OR search for a list of workflow IDs.\n Invalid UUID in `workflows_ids` will be ignored without warning.\n Wildcards (? and *) are not allowed.\n\n ... | -2,741,076,046,262,680,600 | Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-cas... | caper/cromwell_rest_api.py | find_by_workflow_ids | ENCODE-DCC/caper | python | def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
'Finds workflows by exactly matching workflow IDs (UUIDs).\n Does OR search for a list of workflow IDs.\n Invalid UUID in `workflows_ids` will be ignored without warning.\n Wildcards (? and *) are not allowed.\n\n ... |
def find_by_labels(self, labels=None, exclude_subworkflow=True):
'Finds workflows by exactly matching labels (key, value) tuples.\n Does OR search for a list of label key/value pairs.\n Wildcards (? and *) are not allowed.\n\n Args:\n labels:\n List of labels (key/valu... | 2,556,947,127,859,812,400 | Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs. | caper/cromwell_rest_api.py | find_by_labels | ENCODE-DCC/caper | python | def find_by_labels(self, labels=None, exclude_subworkflow=True):
'Finds workflows by exactly matching labels (key, value) tuples.\n Does OR search for a list of label key/value pairs.\n Wildcards (? and *) are not allowed.\n\n Args:\n labels:\n List of labels (key/valu... |
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
'Wrapper for the following three find functions.\n - find_with_wildcard\n - find_by_workflow_ids\n - find_by_labels\n\n Find workflows by matching workflow IDs or label (key, value) tuples.\n Does OR search ... | -1,235,322,776,911,959,000 | Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, whic... | caper/cromwell_rest_api.py | find | ENCODE-DCC/caper | python | def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
'Wrapper for the following three find functions.\n - find_with_wildcard\n - find_by_workflow_ids\n - find_by_labels\n\n Find workflows by matching workflow IDs or label (key, value) tuples.\n Does OR search ... |
def __init_auth(self):
'Init auth object\n '
if ((self._user is not None) and (self._password is not None)):
self._auth = (self._user, self._password)
else:
self._auth = None | 4,628,404,205,856,784,000 | Init auth object | caper/cromwell_rest_api.py | __init_auth | ENCODE-DCC/caper | python | def __init_auth(self):
'\n '
if ((self._user is not None) and (self._password is not None)):
self._auth = (self._user, self._password)
else:
self._auth = None |
@requests_error_handler
def __request_get(self, endpoint, params=None):
'GET request\n\n Returns:\n JSON response\n '
url = (CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port) + endpoint)
resp = requests.get(url, auth=self._auth, params=params, headers={'acce... | -7,574,936,274,253,186,000 | GET request
Returns:
JSON response | caper/cromwell_rest_api.py | __request_get | ENCODE-DCC/caper | python | @requests_error_handler
def __request_get(self, endpoint, params=None):
'GET request\n\n Returns:\n JSON response\n '
url = (CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port) + endpoint)
resp = requests.get(url, auth=self._auth, params=params, headers={'acce... |
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
'POST request\n\n Returns:\n JSON response\n '
url = (CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port) + endpoint)
resp = requests.post(url, files=manifest, auth=self._auth, headers=... | 8,571,179,937,235,917,000 | POST request
Returns:
JSON response | caper/cromwell_rest_api.py | __request_post | ENCODE-DCC/caper | python | @requests_error_handler
def __request_post(self, endpoint, manifest=None):
'POST request\n\n Returns:\n JSON response\n '
url = (CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port) + endpoint)
resp = requests.post(url, files=manifest, auth=self._auth, headers=... |
@requests_error_handler
def __request_patch(self, endpoint, data):
'POST request\n\n Returns:\n JSON response\n '
url = (CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port) + endpoint)
resp = requests.patch(url, data=data, auth=self._auth, headers={'accept': '... | -8,638,686,449,579,552,000 | POST request
Returns:
JSON response | caper/cromwell_rest_api.py | __request_patch | ENCODE-DCC/caper | python | @requests_error_handler
def __request_patch(self, endpoint, data):
'POST request\n\n Returns:\n JSON response\n '
url = (CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port) + endpoint)
resp = requests.patch(url, data=data, auth=self._auth, headers={'accept': '... |
def _print_job_folders(self, file_list, show_plaster_json=True):
'\n file_list is a list of munches [Munch(folder="folder", name="foo.txt", size=123, mtime=123456789)]\n '
if (len(file_list) == 0):
print('No files found')
return
folders = {file.folder: Munch(folder=file.folder,... | 6,601,777,888,008,007,000 | file_list is a list of munches [Munch(folder="folder", name="foo.txt", size=123, mtime=123456789)] | plaster/main.py | _print_job_folders | erisyon/plaster | python | def _print_job_folders(self, file_list, show_plaster_json=True):
'\n \n '
if (len(file_list) == 0):
print('No files found')
return
folders = {file.folder: Munch(folder=file.folder, size_gb=0, file_count=0) for file in file_list}
gb = (1024 ** 3)
total_gb = 0
for fil... |
def run_nbstripout(self):
'Strip all notebooks of output to save space in commits'
important('Stripping Notebooks...')
result = ((local['find'][('.', '-type', 'f', '-not', '-path', '*/\\.*', '-name', '*.ipynb', '-print')] | local['xargs']['nbstripout']) & TF(FG=True))
if (not result):
raise Comm... | -8,431,873,764,699,112,000 | Strip all notebooks of output to save space in commits | plaster/main.py | run_nbstripout | erisyon/plaster | python | def run_nbstripout(self):
important('Stripping Notebooks...')
result = ((local['find'][('.', '-type', 'f', '-not', '-path', '*/\\.*', '-name', '*.ipynb', '-print')] | local['xargs']['nbstripout']) & TF(FG=True))
if (not result):
raise CommandError |
def sigproc_test(self, jobs_folder):
'\n This is adapted from zest_sigproc_v2_integration\n '
profile_folder = (jobs_folder / '_profile')
profile_folder.delete()
job_folder = (profile_folder / 'sigproc_test')
source_folder = (profile_folder / '_synth_field')
job_folder.mkdir()
... | 5,582,201,818,341,143,000 | This is adapted from zest_sigproc_v2_integration | plaster/main.py | sigproc_test | erisyon/plaster | python | def sigproc_test(self, jobs_folder):
'\n \n '
profile_folder = (jobs_folder / '_profile')
profile_folder.delete()
job_folder = (profile_folder / 'sigproc_test')
source_folder = (profile_folder / '_synth_field')
job_folder.mkdir()
source_folder.mkdir()
dim = (1024, 1024)
... |
def main(self, run_path, symbol):
'\n run_path: path to the run folder\n symbol: Eg: "sigproc_v2.sig"\n '
run = RunResult(run_path)
parts = symbol.split('.')
result = run[parts[0]]
sym = getattr(result, parts[1])
if callable(sym):
val = sym()
else:
val = ... | 6,500,575,292,112,177,000 | run_path: path to the run folder
symbol: Eg: "sigproc_v2.sig" | plaster/main.py | main | erisyon/plaster | python | def main(self, run_path, symbol):
'\n run_path: path to the run folder\n symbol: Eg: "sigproc_v2.sig"\n '
run = RunResult(run_path)
parts = symbol.split('.')
result = run[parts[0]]
sym = getattr(result, parts[1])
if callable(sym):
val = sym()
else:
val = ... |
def main(self, run_path):
"\n run_path: path to the run folder (don't forget this is a subfolder of job)\n "
run = RunResult(run_path)
name = run.run_folder.parent.name
prefix = f'{name}__'
tell(f'Prefixing saved files with {prefix}')
tell('Saving sig.npy')
np.save(f'{prefix}si... | 1,042,366,781,578,320,500 | run_path: path to the run folder (don't forget this is a subfolder of job) | plaster/main.py | main | erisyon/plaster | python | def main(self, run_path):
"\n \n "
run = RunResult(run_path)
name = run.run_folder.parent.name
prefix = f'{name}__'
tell(f'Prefixing saved files with {prefix}')
tell('Saving sig.npy')
np.save(f'{prefix}sig.npy', run.sigproc_v2.sig())
tell('Saving noi.npy')
np.save(f'{pr... |
def solve_polynomial_join(left, right, reverse=0):
"\n Solves for a parametric cubic polynomial curve joining the right side of left\n to the left side of right. The curve matches slope and position at it's\n boundaries and is parameterized from 0 to 1; 0 being the left boundary and 1\n being the right.\n\n m... | 4,725,810,013,788,473,000 | Solves for a parametric cubic polynomial curve joining the right side of left
to the left side of right. The curve matches slope and position at it's
boundaries and is parameterized from 0 to 1; 0 being the left boundary and 1
being the right.
method: parametric cubic matching position and slope of endpoints.
... | whisk/test_merge3.py | solve_polynomial_join | aiporre/whisk | python | def solve_polynomial_join(left, right, reverse=0):
"\n Solves for a parametric cubic polynomial curve joining the right side of left\n to the left side of right. The curve matches slope and position at it's\n boundaries and is parameterized from 0 to 1; 0 being the left boundary and 1\n being the right.\n\n m... |
def filter_ends(wv, min_score, shape, border=10):
'\n Return candidate ends for joining.\n\n Returns an iterator yielding (Whisker_Seg, side).\n '
(maxy, maxx) = [(x - border) for x in shape]
(minx, miny) = (border, border)
test_point = (lambda x, y: ((x > minx) and (x < maxx) and (y > miny) and (y <... | 246,672,456,328,021,300 | Return candidate ends for joining.
Returns an iterator yielding (Whisker_Seg, side). | whisk/test_merge3.py | filter_ends | aiporre/whisk | python | def filter_ends(wv, min_score, shape, border=10):
'\n Return candidate ends for joining.\n\n Returns an iterator yielding (Whisker_Seg, side).\n '
(maxy, maxx) = [(x - border) for x in shape]
(minx, miny) = (border, border)
test_point = (lambda x, y: ((x > minx) and (x < maxx) and (y > miny) and (y <... |
def __init__(self, wvd, shape, scale):
' `wvd` may be either a dict or list of whiskers '
object.__init__(self)
self._map = {}
self._shape = shape
self._scale = scale
self._stride = stride = (shape[1] / scale)
self.topx = (lambda p: (int((p[0] / scale)) + (stride * int((p[1] / scale)))))
... | -3,386,438,633,474,830,000 | `wvd` may be either a dict or list of whiskers | whisk/test_merge3.py | __init__ | aiporre/whisk | python | def __init__(self, wvd, shape, scale):
' '
object.__init__(self)
self._map = {}
self._shape = shape
self._scale = scale
self._stride = stride = (shape[1] / scale)
self.topx = (lambda p: (int((p[0] / scale)) + (stride * int((p[1] / scale)))))
self._build_inverse_table(wvd) |
def update(self, changes):
' Changes is a dict mapping old whisker segments to new segments '
last = None
for (w, p) in changes.items():
self.remove(w)
if p:
self.add(p[0])
self.add(p[(- 1)])
last = p[1]
if last:
self.add(last) | -137,687,085,251,495,400 | Changes is a dict mapping old whisker segments to new segments | whisk/test_merge3.py | update | aiporre/whisk | python | def update(self, changes):
' '
last = None
for (w, p) in changes.items():
self.remove(w)
if p:
self.add(p[0])
self.add(p[(- 1)])
last = p[1]
if last:
self.add(last) |
def __next__(self):
' This changes the inverse table by removing hits.\n\n Returns a (Whisker_Seg, index),(Whisker_Seg, index)... tuple\n or None, if done.\n '
todelete = []
retval = None
for (px, s) in self._map.items():
todelete.append(px)
if (len(s) > 1):
... | 5,829,238,777,711,661,000 | This changes the inverse table by removing hits.
Returns a (Whisker_Seg, index),(Whisker_Seg, index)... tuple
or None, if done. | whisk/test_merge3.py | __next__ | aiporre/whisk | python | def __next__(self):
' This changes the inverse table by removing hits.\n\n Returns a (Whisker_Seg, index),(Whisker_Seg, index)... tuple\n or None, if done.\n '
todelete = []
retval = None
for (px, s) in self._map.items():
todelete.append(px)
if (len(s) > 1):
... |
@patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initia... | -4,977,607,476,640,469,000 | Will return the instructions file path set in __init__ | tests/test_xmlparser.py | test_instructionspath | 3Peso/mosk | python | @patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initia... |
@patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initia... | 8,851,255,329,636,294,000 | Will raise FileNotFound exeption. | tests/test_xmlparser.py | test_instructionspath_instruction_file_not_there | 3Peso/mosk | python | @patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initia... |
def test__validate_schema_valid_instructions(self):
'\n Should do nothing.\n '
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/valid_instructions.xml'... | 15,335,525,377,500,458 | Should do nothing. | tests/test_xmlparser.py | test__validate_schema_valid_instructions | 3Peso/mosk | python | def test__validate_schema_valid_instructions(self):
'\n \n '
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/valid_instructions.xml')
except XMLSc... |
def test__validate_schema_invalid_instructions(self):
'\n Should raise exception.\n '
from instructionparsers.xmlparser import XmlParser
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
self.assertRaises(XMLSchemaException, XmlParser._validate_schema, './instructions/invali... | -6,052,100,148,682,693,000 | Should raise exception. | tests/test_xmlparser.py | test__validate_schema_invalid_instructions | 3Peso/mosk | python | def test__validate_schema_invalid_instructions(self):
'\n \n '
from instructionparsers.xmlparser import XmlParser
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
self.assertRaises(XMLSchemaException, XmlParser._validate_schema, './instructions/invalid_instructions.xml') |
def test__validate_schema_minimal_valid_instructions(self):
'\n Should do nothing.\n '
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/minimal_valid_i... | 2,742,942,849,900,286,500 | Should do nothing. | tests/test_xmlparser.py | test__validate_schema_minimal_valid_instructions | 3Peso/mosk | python | def test__validate_schema_minimal_valid_instructions(self):
'\n \n '
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/minimal_valid_instructions.xml')
... |
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__initializemetadata_valid_instructions(self, path_mock):
'\n Should initialize member \'metadata\' with all elements which have the attribute "title".\n '
metadata = ('Examiner', 'Assignment', 'Client', 'Description of Arte... | 3,682,683,099,307,162,000 | Should initialize member 'metadata' with all elements which have the attribute "title". | tests/test_xmlparser.py | test__initializemetadata_valid_instructions | 3Peso/mosk | python | @patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__initializemetadata_valid_instructions(self, path_mock):
'\n Should initialize member \'metadata\' with all elements which have the attribute "title".\n '
metadata = ('Examiner', 'Assignment', 'Client', 'Description of Arte... |
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
'\n Should initialize collectors for all XML elements which have the attribute "module".\n '
from instructionparsers.xmlparser import XmlParser
from instructionpar... | -2,930,029,731,182,761,500 | Should initialize collectors for all XML elements which have the attribute "module". | tests/test_xmlparser.py | test__init_instructions_valid_instructions | 3Peso/mosk | python | @patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
'\n \n '
from instructionparsers.xmlparser import XmlParser
from instructionparsers.wrapper import InstructionWrapper
instructions = './instructions/valid_inst... |
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
'\n Should return the instruction tree starting with "Root" node.\n '
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instr... | -8,174,262,218,797,025,000 | Should return the instruction tree starting with "Root" node. | tests/test_xmlparser.py | test__init_instructions_valid_instructions | 3Peso/mosk | python | @patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
'\n \n '
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=inst... |
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_first_instruction_element(self, path_mock):
'\n Should return the xml element with the title "Root".\n '
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
... | -8,376,315,482,908,859,000 | Should return the xml element with the title "Root". | tests/test_xmlparser.py | test__get_first_instruction_element | 3Peso/mosk | python | @patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_first_instruction_element(self, path_mock):
'\n \n '
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instruction... |
def test__get_placeholder_name(self):
'\n If XmlElement contains attribute "placeholder" method should return value of this attribute.\n '
from instructionparsers.xmlparser import XmlParser
document = Document()
element = document.createElement('Demo')
element.setAttribute(XmlParser.PL... | 4,556,195,059,788,626,000 | If XmlElement contains attribute "placeholder" method should return value of this attribute. | tests/test_xmlparser.py | test__get_placeholder_name | 3Peso/mosk | python | def test__get_placeholder_name(self):
'\n \n '
from instructionparsers.xmlparser import XmlParser
document = Document()
element = document.createElement('Demo')
element.setAttribute(XmlParser.PLACEHOLDERNAME_ATTRIBUTE, 'test')
result = XmlParser._get_placeholder_name(element)
s... |
def test__get_placeholder_name_no_placeholder(self):
'\n If XmlElement does not contain attribute "placeholder" method should return an empty string.\n '
from instructionparsers.xmlparser import XmlParser
element = Element('Demo')
result = XmlParser._get_placeholder_name(element)
self.... | 1,274,365,093,746,357,800 | If XmlElement does not contain attribute "placeholder" method should return an empty string. | tests/test_xmlparser.py | test__get_placeholder_name_no_placeholder | 3Peso/mosk | python | def test__get_placeholder_name_no_placeholder(self):
'\n \n '
from instructionparsers.xmlparser import XmlParser
element = Element('Demo')
result = XmlParser._get_placeholder_name(element)
self.assertEqual(result, ) |
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict(self, path_mock):
'\n Should return UserDict\n '
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse('./instructions/instruction... | 8,628,808,085,382,813,000 | Should return UserDict | tests/test_xmlparser.py | test__get_parameter_attributes_return_userdict | 3Peso/mosk | python | @patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict(self, path_mock):
'\n \n '
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse('./instructions/instructions_stub.xml').documentE... |
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict_with_2_entries(self, path_mock):
'\n Should return dict with two entries\n '
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse... | 4,547,153,624,282,183,700 | Should return dict with two entries | tests/test_xmlparser.py | test__get_parameter_attributes_return_userdict_with_2_entries | 3Peso/mosk | python | @patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict_with_2_entries(self, path_mock):
'\n \n '
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse('./instructions/instructions_stub.... |
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_should_return_none_special_attributes(self, path_mock):
'\n Should return dicitionry with "users_with_homedir" key and with "properties" key.\n '
from instructionparsers.xmlparser import XmlParser
... | -7,049,063,655,947,019,000 | Should return dicitionry with "users_with_homedir" key and with "properties" key. | tests/test_xmlparser.py | test__get_parameter_attributes_should_return_none_special_attributes | 3Peso/mosk | python | @patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_should_return_none_special_attributes(self, path_mock):
'\n \n '
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse('./instructions/instruction... |
def LessOptionsStart(builder):
'This method is deprecated. Please switch to Start.'
return Start(builder) | -7,271,987,008,384,153,000 | This method is deprecated. Please switch to Start. | tf2onnx/tflite/LessOptions.py | LessOptionsStart | LoicDagnas/tensorflow-onnx | python | def LessOptionsStart(builder):
return Start(builder) |
def LessOptionsEnd(builder):
'This method is deprecated. Please switch to End.'
return End(builder) | 7,464,512,795,446,503,000 | This method is deprecated. Please switch to End. | tf2onnx/tflite/LessOptions.py | LessOptionsEnd | LoicDagnas/tensorflow-onnx | python | def LessOptionsEnd(builder):
return End(builder) |
@classmethod
def GetRootAsLessOptions(cls, buf, offset=0):
'This method is deprecated. Please switch to GetRootAs.'
return cls.GetRootAs(buf, offset) | 7,481,250,055,416,234,000 | This method is deprecated. Please switch to GetRootAs. | tf2onnx/tflite/LessOptions.py | GetRootAsLessOptions | LoicDagnas/tensorflow-onnx | python | @classmethod
def GetRootAsLessOptions(cls, buf, offset=0):
return cls.GetRootAs(buf, offset) |
@contextlib.contextmanager
def db_role_setter(conn, *, role_name):
'Since we catch exceptions here and log, temporarily install a customised hook.'
with conn.cursor() as cursor:
cursor.execute('SHOW ROLE')
old_role = cursor.fetchone()[0]
cursor.execute('SET ROLE %s', [role_name])
... | 586,806,249,027,235,700 | Since we catch exceptions here and log, temporarily install a customised hook. | src/dirbs/utils.py | db_role_setter | nealmadhu/DIRBS-Core | python | @contextlib.contextmanager
def db_role_setter(conn, *, role_name):
with conn.cursor() as cursor:
cursor.execute('SHOW ROLE')
old_role = cursor.fetchone()[0]
cursor.execute('SET ROLE %s', [role_name])
(yield role_name)
cursor.execute('SET ROLE %s', [old_role]) |
def compute_md5_hash(file, buf_size=65536):
'Utility method to generate a md5 hash of file.'
md5_hash = hashlib.md5()
while True:
data = file.read(buf_size)
if (not data):
break
md5_hash.update(data)
return md5_hash.hexdigest() | 6,161,984,623,669,644,000 | Utility method to generate a md5 hash of file. | src/dirbs/utils.py | compute_md5_hash | nealmadhu/DIRBS-Core | python | def compute_md5_hash(file, buf_size=65536):
md5_hash = hashlib.md5()
while True:
data = file.read(buf_size)
if (not data):
break
md5_hash.update(data)
return md5_hash.hexdigest() |
def cachebusted_filename_from_contents(byte_array):
'Utility method to generate a unique filename based on the hash of a given content array (of bytes).'
return compute_md5_hash(io.BytesIO(byte_array))[:8] | -1,696,663,648,842,604,000 | Utility method to generate a unique filename based on the hash of a given content array (of bytes). | src/dirbs/utils.py | cachebusted_filename_from_contents | nealmadhu/DIRBS-Core | python | def cachebusted_filename_from_contents(byte_array):
return compute_md5_hash(io.BytesIO(byte_array))[:8] |
def cli_db_params_from_dsn(dsn, user=None, database=None, port=None, host=None):
'Convert DB-related command-line arguments from a DSN into a format appropriate for DIRBS CLI commands.'
db_args = []
db_args.append('--db-user={0}'.format((user if (user is not None) else dsn.get('user'))))
db_args.append(... | -2,480,064,948,385,259,500 | Convert DB-related command-line arguments from a DSN into a format appropriate for DIRBS CLI commands. | src/dirbs/utils.py | cli_db_params_from_dsn | nealmadhu/DIRBS-Core | python | def cli_db_params_from_dsn(dsn, user=None, database=None, port=None, host=None):
db_args = []
db_args.append('--db-user={0}'.format((user if (user is not None) else dsn.get('user'))))
db_args.append('--db-name={0}'.format((database if (database is not None) else dsn.get('database'))))
db_args.appen... |
def create_db_connection(db_config, readonly=False, autocommit=False):
'Creates a DB connection to the database.\n\n Imports the config module, which results in the config being read from disk.\n Changes to the config file made after this method has been called will not be read.\n\n Calling entity should h... | -6,769,060,532,684,972,000 | Creates a DB connection to the database.
Imports the config module, which results in the config being read from disk.
Changes to the config file made after this method has been called will not be read.
Calling entity should handle connection errors as appropriate. | src/dirbs/utils.py | create_db_connection | nealmadhu/DIRBS-Core | python | def create_db_connection(db_config, readonly=False, autocommit=False):
'Creates a DB connection to the database.\n\n Imports the config module, which results in the config being read from disk.\n Changes to the config file made after this method has been called will not be read.\n\n Calling entity should h... |
def verify_db_schema(conn, required_role):
'Function that runs all DB verification checks.'
warn_if_db_superuser(conn)
verify_db_roles_installed(conn)
verify_db_role_for_job(conn, required_role)
verify_db_schema_version(conn)
verify_db_ownership(conn)
verify_hll_schema(conn)
verify_core_... | 887,756,644,857,539,100 | Function that runs all DB verification checks. | src/dirbs/utils.py | verify_db_schema | nealmadhu/DIRBS-Core | python | def verify_db_schema(conn, required_role):
warn_if_db_superuser(conn)
verify_db_roles_installed(conn)
verify_db_role_for_job(conn, required_role)
verify_db_schema_version(conn)
verify_db_ownership(conn)
verify_hll_schema(conn)
verify_core_schema(conn)
verify_db_search_path(conn) |
def warn_if_db_superuser(conn):
'Warn if the current DB user is a PostgreSQL superuser.'
logger = logging.getLogger('dirbs.db')
if is_db_user_superuser(conn):
logger.warn('Running as PostgreSQL superuser -- for security reasons, we recommend running all DIRBS tasks as a normal user') | 4,244,715,382,031,265,300 | Warn if the current DB user is a PostgreSQL superuser. | src/dirbs/utils.py | warn_if_db_superuser | nealmadhu/DIRBS-Core | python | def warn_if_db_superuser(conn):
logger = logging.getLogger('dirbs.db')
if is_db_user_superuser(conn):
logger.warn('Running as PostgreSQL superuser -- for security reasons, we recommend running all DIRBS tasks as a normal user') |
def verify_db_roles_installed(conn):
'Function used to verify whether roles have been installed in the DB.'
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cursor:
cursor.execute("SELECT 1 AS res FROM pg_roles WHERE rolname = 'dirbs_core_power_user'")
if (cursor.fetchone() is No... | -1,286,691,893,464,138,200 | Function used to verify whether roles have been installed in the DB. | src/dirbs/utils.py | verify_db_roles_installed | nealmadhu/DIRBS-Core | python | def verify_db_roles_installed(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cursor:
cursor.execute("SELECT 1 AS res FROM pg_roles WHERE rolname = 'dirbs_core_power_user'")
if (cursor.fetchone() is None):
logger.error("DIRBS Core roles have not been installe... |
def verify_db_role_for_job(conn, expected_role):
'Function used to verify that the current DB user is in the role expected for this job.'
if (not is_db_user_dirbs_role(conn, expected_role)):
role = conn.get_dsn_parameters().get('user')
raise DatabaseRoleCheckException('Current DB user {0} does n... | 866,532,192,136,172,500 | Function used to verify that the current DB user is in the role expected for this job. | src/dirbs/utils.py | verify_db_role_for_job | nealmadhu/DIRBS-Core | python | def verify_db_role_for_job(conn, expected_role):
if (not is_db_user_dirbs_role(conn, expected_role)):
role = conn.get_dsn_parameters().get('user')
raise DatabaseRoleCheckException('Current DB user {0} does not have required role: {1}. To fix this:\n\t1. GRANT {1} TO {0};'.format(role, expected_... |
def verify_db_schema_version(conn):
'Function used to check whether the DB schema version matches the code schema version.'
logger = logging.getLogger('dirbs.db')
version = query_db_schema_version(conn)
if (version != code_db_schema_version):
if (version is None):
logger.error('DB sc... | 5,007,492,157,737,945,000 | Function used to check whether the DB schema version matches the code schema version. | src/dirbs/utils.py | verify_db_schema_version | nealmadhu/DIRBS-Core | python | def verify_db_schema_version(conn):
logger = logging.getLogger('dirbs.db')
version = query_db_schema_version(conn)
if (version != code_db_schema_version):
if (version is None):
logger.error('DB schema has not been installed via dirbs-db install!')
raise DatabaseSchemaExc... |
def verify_db_ownership(conn):
'Function used to check whether DB ownership matches what we expect.'
logger = logging.getLogger('dirbs.db')
if (query_db_ownership(conn) != 'dirbs_core_power_user'):
logger.error('Database is not owned by the dirbs_core_power_user group! Please the following as the cu... | 5,671,161,847,534,446,000 | Function used to check whether DB ownership matches what we expect. | src/dirbs/utils.py | verify_db_ownership | nealmadhu/DIRBS-Core | python | def verify_db_ownership(conn):
logger = logging.getLogger('dirbs.db')
if (query_db_ownership(conn) != 'dirbs_core_power_user'):
logger.error('Database is not owned by the dirbs_core_power_user group! Please the following as the current DB owner (whilst logged into the database):\n\tALTER DATABASE <... |
def verify_core_schema(conn):
'Function used to check whether Core schema exists and has correct ownership.'
if (not query_schema_existence(conn, 'core')):
raise DatabaseSchemaException("Missing schema 'core' in DB. Was dirbs-db install run successfully?")
if (query_schema_ownership(conn, 'core') !=... | 5,272,892,874,875,728,000 | Function used to check whether Core schema exists and has correct ownership. | src/dirbs/utils.py | verify_core_schema | nealmadhu/DIRBS-Core | python | def verify_core_schema(conn):
if (not query_schema_existence(conn, 'core')):
raise DatabaseSchemaException("Missing schema 'core' in DB. Was dirbs-db install run successfully?")
if (query_schema_ownership(conn, 'core') != 'dirbs_core_power_user'):
raise DatabaseSchemaException("Schema 'core... |
def verify_hll_schema(conn):
'Function used to check whether HLL schema exists and that extension is installed correctly.'
logger = logging.getLogger('dirbs.db')
if (not query_schema_existence(conn, 'hll')):
logger.error("Schema 'hll' does not exist. Please ensure the hll extension is installed and ... | 2,937,410,098,723,305,500 | Function used to check whether HLL schema exists and that extension is installed correctly. | src/dirbs/utils.py | verify_hll_schema | nealmadhu/DIRBS-Core | python | def verify_hll_schema(conn):
logger = logging.getLogger('dirbs.db')
if (not query_schema_existence(conn, 'hll')):
logger.error("Schema 'hll' does not exist. Please ensure the hll extension is installed and run the following as a superuser whilst connected to this DB: \n\t1. CREATE SCHEMA hll;\n\t2.... |
def verify_db_search_path(conn):
'Function used to check whether db_search_path is correct by looking for objects.'
logger = logging.getLogger('dirbs.db')
is_search_path_valid = True
with conn.cursor() as cursor:
cursor.execute("SELECT to_regclass('schema_version')")
res = cursor.fetchon... | -4,710,171,244,612,890,000 | Function used to check whether db_search_path is correct by looking for objects. | src/dirbs/utils.py | verify_db_search_path | nealmadhu/DIRBS-Core | python | def verify_db_search_path(conn):
logger = logging.getLogger('dirbs.db')
is_search_path_valid = True
with conn.cursor() as cursor:
cursor.execute("SELECT to_regclass('schema_version')")
res = cursor.fetchone()[0]
if (res is None):
is_search_path_valid = False
... |
def query_db_schema_version(conn):
'Function to fetch the DB version number from the database.'
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
try:
cur.execute('SELECT MAX(version) FROM schema_version')
return cur.fetchone()[0]
except psycopg2.Progr... | 7,737,910,117,503,055,000 | Function to fetch the DB version number from the database. | src/dirbs/utils.py | query_db_schema_version | nealmadhu/DIRBS-Core | python | def query_db_schema_version(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
try:
cur.execute('SELECT MAX(version) FROM schema_version')
return cur.fetchone()[0]
except psycopg2.ProgrammingError as ex:
logger.error(str(ex).strip())... |
def set_db_schema_version(conn, new_version):
'Function to set the DB version number in the database.'
with conn.cursor() as cur:
cur.execute('SELECT COUNT(*) FROM schema_version')
num_rows = cur.fetchone()[0]
assert (num_rows <= 1)
if (num_rows > 0):
cur.execute('UPD... | -7,654,731,514,572,616,000 | Function to set the DB version number in the database. | src/dirbs/utils.py | set_db_schema_version | nealmadhu/DIRBS-Core | python | def set_db_schema_version(conn, new_version):
with conn.cursor() as cur:
cur.execute('SELECT COUNT(*) FROM schema_version')
num_rows = cur.fetchone()[0]
assert (num_rows <= 1)
if (num_rows > 0):
cur.execute('UPDATE schema_version SET version = %s', [new_version])
... |
def is_db_user_superuser(conn):
'Function to test whether the current DB user is a PostgreSQL superuser.'
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolsuper\n FROM pg_roles\n WHERE rolname = CURRENT_USER')
... | -2,014,946,659,195,252,500 | Function to test whether the current DB user is a PostgreSQL superuser. | src/dirbs/utils.py | is_db_user_superuser | nealmadhu/DIRBS-Core | python | def is_db_user_superuser(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolsuper\n FROM pg_roles\n WHERE rolname = CURRENT_USER')
res = cur.fetchone()
if (res is None):
logger.wa... |
def is_db_user_dirbs_role(conn, role_name):
'Function to test whether the current DB user is in a DIRBS role.'
with conn.cursor() as cur:
cur.execute("SELECT pg_has_role(%s, 'MEMBER')", [role_name])
return cur.fetchone()[0] | 7,042,693,565,980,279,000 | Function to test whether the current DB user is in a DIRBS role. | src/dirbs/utils.py | is_db_user_dirbs_role | nealmadhu/DIRBS-Core | python | def is_db_user_dirbs_role(conn, role_name):
with conn.cursor() as cur:
cur.execute("SELECT pg_has_role(%s, 'MEMBER')", [role_name])
return cur.fetchone()[0] |
def is_db_user_dirbs_poweruser(conn):
'Function to test whether the current DB user is a DIRBS power user.'
return is_db_user_dirbs_role(conn, 'dirbs_core_power_user') | -2,488,561,900,685,019,000 | Function to test whether the current DB user is a DIRBS power user. | src/dirbs/utils.py | is_db_user_dirbs_poweruser | nealmadhu/DIRBS-Core | python | def is_db_user_dirbs_poweruser(conn):
return is_db_user_dirbs_role(conn, 'dirbs_core_power_user') |
def can_db_user_create_roles(conn):
'Function to test whether the current DB user has the CREATEROLE privilege.'
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolcreaterole\n FROM pg_roles\n WHERE rolname = CURREN... | -1,844,158,055,052,842,200 | Function to test whether the current DB user has the CREATEROLE privilege. | src/dirbs/utils.py | can_db_user_create_roles | nealmadhu/DIRBS-Core | python | def can_db_user_create_roles(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolcreaterole\n FROM pg_roles\n WHERE rolname = CURRENT_USER')
res = cur.fetchone()
if (res is None):
... |
def query_db_ownership(conn):
'Function to verify whether the current database ownership is correct.'
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolname\n FROM pg_roles\n JOIN pg_database\n ... | 6,090,212,680,849,892,000 | Function to verify whether the current database ownership is correct. | src/dirbs/utils.py | query_db_ownership | nealmadhu/DIRBS-Core | python | def query_db_ownership(conn):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolname\n FROM pg_roles\n JOIN pg_database\n ON (pg_database.datdba = pg_roles.oid)\n ... |
def query_schema_existence(conn, schema_name):
'Function to verify whether the current database schema ownership is correct.'
with conn.cursor() as cur:
cur.execute('SELECT EXISTS(SELECT 1 FROM information_schema.schemata WHERE SCHEMA_NAME = %s)', [schema_name])
return cur.fetchone().exists | -1,074,036,478,143,411,000 | Function to verify whether the current database schema ownership is correct. | src/dirbs/utils.py | query_schema_existence | nealmadhu/DIRBS-Core | python | def query_schema_existence(conn, schema_name):
with conn.cursor() as cur:
cur.execute('SELECT EXISTS(SELECT 1 FROM information_schema.schemata WHERE SCHEMA_NAME = %s)', [schema_name])
return cur.fetchone().exists |
def query_schema_ownership(conn, schema_name):
'Function to verify whether the current database schema ownership is correct.'
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolname\n FROM pg_roles\n JOIN pg_namesp... | 8,221,048,068,172,430,000 | Function to verify whether the current database schema ownership is correct. | src/dirbs/utils.py | query_schema_ownership | nealmadhu/DIRBS-Core | python | def query_schema_ownership(conn, schema_name):
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute('SELECT rolname\n FROM pg_roles\n JOIN pg_namespace\n ON (pg_namespace.nspowner = pg_roles.oid)\n... |
def compute_analysis_end_date(conn, curr_date):
'Function to get the end of the analysis window based on current operator data.'
end_date = curr_date
if (end_date is None):
with conn.cursor() as cursor:
monthly_country_child_tbl_list = child_table_names(conn, 'monthly_network_triplets_co... | 4,949,691,341,178,486,000 | Function to get the end of the analysis window based on current operator data. | src/dirbs/utils.py | compute_analysis_end_date | nealmadhu/DIRBS-Core | python | def compute_analysis_end_date(conn, curr_date):
end_date = curr_date
if (end_date is None):
with conn.cursor() as cursor:
monthly_country_child_tbl_list = child_table_names(conn, 'monthly_network_triplets_country')
year_month_list_in_child_tbls_records = table_invariants_lis... |
def hash_string_64bit(s):
'Basic string hash based on taking an initial prime number and multiplying it by another prime numnber.'
string_hash = 7
string_bytes = bytearray(s, 'utf-8')
for b in string_bytes:
string_hash = ((string_hash * 31) + b)
return (string_hash % (pow(2, 63) - 1)) | 6,294,782,927,157,870,000 | Basic string hash based on taking an initial prime number and multiplying it by another prime numnber. | src/dirbs/utils.py | hash_string_64bit | nealmadhu/DIRBS-Core | python | def hash_string_64bit(s):
string_hash = 7
string_bytes = bytearray(s, 'utf-8')
for b in string_bytes:
string_hash = ((string_hash * 31) + b)
return (string_hash % (pow(2, 63) - 1)) |
def child_table_names(conn, parent_name):
'Return a list of table names for a parent table name.'
with conn.cursor() as cursor:
cursor.execute('SELECT c.relname AS child_tblname\n FROM pg_inherits\n JOIN pg_class AS c\n ... | -5,567,330,417,522,711,000 | Return a list of table names for a parent table name. | src/dirbs/utils.py | child_table_names | nealmadhu/DIRBS-Core | python | def child_table_names(conn, parent_name):
with conn.cursor() as cursor:
cursor.execute('SELECT c.relname AS child_tblname\n FROM pg_inherits\n JOIN pg_class AS c\n ON (c.oid = inhrelid)\n JO... |
def table_invariants_list(conn, table_names, invariant_col_names):
'Gets a list of tuples containing the values for common table invariant columns across a list table names.'
if (len(table_names) == 0):
return []
with conn.cursor() as cursor:
table_queries = []
for tblname in table_n... | -7,955,107,562,763,765,000 | Gets a list of tuples containing the values for common table invariant columns across a list table names. | src/dirbs/utils.py | table_invariants_list | nealmadhu/DIRBS-Core | python | def table_invariants_list(conn, table_names, invariant_col_names):
if (len(table_names) == 0):
return []
with conn.cursor() as cursor:
table_queries = []
for tblname in table_names:
table_queries.append(sql.SQL('SELECT * FROM (SELECT {0} FROM {1} LIMIT 1) {2}').format(sq... |
def most_recently_run_condition_info(conn, cond_names, successful_only=False):
'For a list of condition names, return a dict of cond_name -> (run_id, cond_config) for the most recent results.\n\n If a particular condition has never completed successfully, the value of the dict will be None, unless the\n succe... | 6,417,752,044,326,887,000 | For a list of condition names, return a dict of cond_name -> (run_id, cond_config) for the most recent results.
If a particular condition has never completed successfully, the value of the dict will be None, unless the
successful_only parameter is set to True, in which case the key will not exist in the returned dict. | src/dirbs/utils.py | most_recently_run_condition_info | nealmadhu/DIRBS-Core | python | def most_recently_run_condition_info(conn, cond_names, successful_only=False):
'For a list of condition names, return a dict of cond_name -> (run_id, cond_config) for the most recent results.\n\n If a particular condition has never completed successfully, the value of the dict will be None, unless the\n succe... |
def filter_imei_list_sql_by_device_type(conn, exempted_device_types, imei_list_sql):
'Function to return SQL filtering out exempted device types.'
return sql.SQL('SELECT imei_norm\n FROM (SELECT imei_norm,\n SUBSTRING(imei_norm, 1, 8) AS tac\n ... | -3,477,251,885,230,759,000 | Function to return SQL filtering out exempted device types. | src/dirbs/utils.py | filter_imei_list_sql_by_device_type | nealmadhu/DIRBS-Core | python | def filter_imei_list_sql_by_device_type(conn, exempted_device_types, imei_list_sql):
return sql.SQL('SELECT imei_norm\n FROM (SELECT imei_norm,\n SUBSTRING(imei_norm, 1, 8) AS tac\n FROM ({0}) imeis) imeis_with_tac\n ... |
def format_datetime_for_report(timestamp_with_tz):
"Format the datetime into a string for reporting.\n\n Replace this function with datetime.isoformat(sep=' ', timespec='seconds') after we update python version to 3.6\n "
if (timestamp_with_tz is not None):
return timestamp_with_tz.strftime('%Y-%m... | -4,662,658,461,025,122,000 | Format the datetime into a string for reporting.
Replace this function with datetime.isoformat(sep=' ', timespec='seconds') after we update python version to 3.6 | src/dirbs/utils.py | format_datetime_for_report | nealmadhu/DIRBS-Core | python | def format_datetime_for_report(timestamp_with_tz):
"Format the datetime into a string for reporting.\n\n Replace this function with datetime.isoformat(sep=' ', timespec='seconds') after we update python version to 3.6\n "
if (timestamp_with_tz is not None):
return timestamp_with_tz.strftime('%Y-%m... |
def validate_exempted_device_types(conn, config):
'Method to validate exempted device types specified in config.'
with conn.cursor() as cursor:
logger = logging.getLogger('dirbs.config')
exempted_device_types = config.region_config.exempted_device_types
if (len(exempted_device_types) > 0... | -8,926,925,077,652,810,000 | Method to validate exempted device types specified in config. | src/dirbs/utils.py | validate_exempted_device_types | nealmadhu/DIRBS-Core | python | def validate_exempted_device_types(conn, config):
with conn.cursor() as cursor:
logger = logging.getLogger('dirbs.config')
exempted_device_types = config.region_config.exempted_device_types
if (len(exempted_device_types) > 0):
cursor.execute('SELECT DISTINCT device_type FROM... |
def log_analysis_window(logger, analysis_start_date, analysis_end_date, start_message='', start_date_inclusive=True, end_date_inclusive=False):
'Helper function to print out window on used for analysis and list generation using interval notation.'
start_date_interval_notation = ('[' if start_date_inclusive else... | 1,355,111,144,782,573,000 | Helper function to print out window on used for analysis and list generation using interval notation. | src/dirbs/utils.py | log_analysis_window | nealmadhu/DIRBS-Core | python | def log_analysis_window(logger, analysis_start_date, analysis_end_date, start_message=, start_date_inclusive=True, end_date_inclusive=False):
start_date_interval_notation = ('[' if start_date_inclusive else '(')
end_date_interval_notation = (']' if end_date_inclusive else ')')
logger.debug('{0} {sd_int... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.