sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def render_bump_release(self):
"""
If the bump_release plugin is present, configure it
"""
phase = 'prebuild_plugins'
plugin = 'bump_release'
if not self.pt.has_plugin_conf(phase, plugin):
return
if self.user_params.release.value:
self.pt.remove_plugin(phase, plugin, 'release value supplied as user parameter')
return
# For flatpak, we want a name-version-release of
# <name>-<stream>-<module_build_version>.<n>, where the .<n> makes
# sure that the build is unique in Koji
if self.user_params.flatpak.value:
self.pt.set_plugin_arg(phase, plugin, 'append', True)
|
If the bump_release plugin is present, configure it
|
entailment
|
def render_check_and_set_platforms(self):
"""
If the check_and_set_platforms plugin is present, configure it
"""
phase = 'prebuild_plugins'
plugin = 'check_and_set_platforms'
if not self.pt.has_plugin_conf(phase, plugin):
return
if self.user_params.koji_target.value:
self.pt.set_plugin_arg(phase, plugin, "koji_target",
self.user_params.koji_target.value)
|
If the check_and_set_platforms plugin is present, configure it
|
entailment
|
def render_import_image(self, use_auth=None):
"""
Configure the import_image plugin
"""
# import_image is a multi-phase plugin
if self.user_params.imagestream_name.value is None:
self.pt.remove_plugin('exit_plugins', 'import_image',
'imagestream not in user parameters')
elif self.pt.has_plugin_conf('exit_plugins', 'import_image'):
self.pt.set_plugin_arg('exit_plugins', 'import_image', 'imagestream',
self.user_params.imagestream_name.value)
|
Configure the import_image plugin
|
entailment
|
def render_tag_from_config(self):
"""Configure tag_from_config plugin"""
phase = 'postbuild_plugins'
plugin = 'tag_from_config'
if not self.has_tag_suffixes_placeholder():
return
unique_tag = self.user_params.image_tag.value.split(':')[-1]
tag_suffixes = {'unique': [unique_tag], 'primary': []}
if self.user_params.build_type.value == BUILD_TYPE_ORCHESTRATOR:
additional_tags = self.user_params.additional_tags.value or set()
if self.user_params.scratch.value:
pass
elif self.user_params.isolated.value:
tag_suffixes['primary'].extend(['{version}-{release}'])
elif self.user_params.tags_from_yaml.value:
tag_suffixes['primary'].extend(['{version}-{release}'])
tag_suffixes['primary'].extend(additional_tags)
else:
tag_suffixes['primary'].extend(['latest', '{version}', '{version}-{release}'])
tag_suffixes['primary'].extend(additional_tags)
self.pt.set_plugin_arg(phase, plugin, 'tag_suffixes', tag_suffixes)
|
Configure tag_from_config plugin
|
entailment
|
def render_pull_base_image(self):
"""Configure pull_base_image"""
phase = 'prebuild_plugins'
plugin = 'pull_base_image'
if self.user_params.parent_images_digests.value:
self.pt.set_plugin_arg(phase, plugin, 'parent_images_digests',
self.user_params.parent_images_digests.value)
|
Configure pull_base_image
|
entailment
|
def get_user(self, username="~"):
"""
get info about user (if no user specified, use the one initiating request)
:param username: str, name of user to get info about, default="~"
:return: dict
"""
url = self._build_url("users/%s/" % username, _prepend_namespace=False)
response = self._get(url)
check_response(response)
return response
|
get info about user (if no user specified, use the one initiating request)
:param username: str, name of user to get info about, default="~"
:return: dict
|
entailment
|
def create_build(self, build_json):
"""
:return:
"""
url = self._build_url("builds/")
logger.debug(build_json)
return self._post(url, data=json.dumps(build_json),
headers={"Content-Type": "application/json"})
|
:return:
|
entailment
|
def get_all_build_configs_by_labels(self, label_selectors):
"""
Returns all builds matching a given set of label selectors. It is up to the
calling function to filter the results.
"""
labels = ['%s=%s' % (field, value) for field, value in label_selectors]
labels = ','.join(labels)
url = self._build_url("buildconfigs/", labelSelector=labels)
return self._get(url).json()['items']
|
Returns all builds matching a given set of label selectors. It is up to the
calling function to filter the results.
|
entailment
|
def get_build_config_by_labels(self, label_selectors):
"""
Returns a build config matching the given label
selectors. This method will raise OsbsException
if not exactly one build config is found.
"""
items = self.get_all_build_configs_by_labels(label_selectors)
if not items:
raise OsbsException(
"Build config not found for labels: %r" %
(label_selectors, ))
if len(items) > 1:
raise OsbsException(
"More than one build config found for labels: %r" %
(label_selectors, ))
return items[0]
|
Returns a build config matching the given label
selectors. This method will raise OsbsException
if not exactly one build config is found.
|
entailment
|
def get_build_config_by_labels_filtered(self, label_selectors, filter_key, filter_value):
"""
Returns a build config matching the given label selectors, filtering against
another predetermined value. This method will raise OsbsException
if not exactly one build config is found after filtering.
"""
items = self.get_all_build_configs_by_labels(label_selectors)
if filter_value is not None:
build_configs = []
for build_config in items:
match_value = graceful_chain_get(build_config, *filter_key.split('.'))
if filter_value == match_value:
build_configs.append(build_config)
items = build_configs
if not items:
raise OsbsException(
"Build config not found for labels: %r" %
(label_selectors, ))
if len(items) > 1:
raise OsbsException(
"More than one build config found for labels: %r" %
(label_selectors, ))
return items[0]
|
Returns a build config matching the given label selectors, filtering against
another predetermined value. This method will raise OsbsException
if not exactly one build config is found after filtering.
|
entailment
|
def create_build_config(self, build_config_json):
"""
:return:
"""
url = self._build_url("buildconfigs/")
return self._post(url, data=build_config_json,
headers={"Content-Type": "application/json"})
|
:return:
|
entailment
|
def stream_logs(self, build_id):
"""
stream logs from build
:param build_id: str
:return: iterator
"""
kwargs = {'follow': 1}
# If connection is closed within this many seconds, give up:
min_idle_timeout = 60
# Stream logs, but be careful of the connection closing
# due to idle timeout. In that case, try again until the
# call returns more quickly than a reasonable timeout
# would be set to.
last_activity = time.time()
while True:
buildlogs_url = self._build_url("builds/%s/log/" % build_id,
**kwargs)
try:
response = self._get(buildlogs_url, stream=1,
headers={'Connection': 'close'})
check_response(response)
for line in response.iter_lines():
last_activity = time.time()
yield line
# NOTE1: If self._get causes ChunkedEncodingError, ConnectionError,
# or IncompleteRead to be raised, they'll be wrapped in
# OsbsNetworkException or OsbsException
# NOTE2: If iter_lines causes ChunkedEncodingError
# or IncompleteRead to be raised, it'll simply be silenced.
# NOTE3: An exception may be raised from
# check_response(). In this case, exception will be
# wrapped in OsbsException or OsbsNetworkException,
# inspect cause to detect ConnectionError.
except OsbsException as exc:
if not isinstance(exc.cause, ConnectionError):
raise
idle = time.time() - last_activity
logger.debug("connection closed after %ds", idle)
if idle < min_idle_timeout:
# Finish output
return
since = int(idle - 1)
logger.debug("fetching logs starting from %ds ago", since)
kwargs['sinceSeconds'] = since
|
stream logs from build
:param build_id: str
:return: iterator
|
entailment
|
def logs(self, build_id, follow=False, build_json=None, wait_if_missing=False):
"""
provide logs from build
:param build_id: str
:param follow: bool, fetch logs as they come?
:param build_json: dict, to save one get-build query
:param wait_if_missing: bool, if build doesn't exist, wait
:return: None, str or iterator
"""
# does build exist?
try:
build_json = build_json or self.get_build(build_id).json()
except OsbsResponseException as ex:
if ex.status_code == 404:
if not wait_if_missing:
raise OsbsException("Build '%s' doesn't exist." % build_id)
else:
raise
if follow or wait_if_missing:
build_json = self.wait_for_build_to_get_scheduled(build_id)
br = BuildResponse(build_json)
# When build is in new or pending state, openshift responds with 500
if br.is_pending():
return
if follow:
return self.stream_logs(build_id)
buildlogs_url = self._build_url("builds/%s/log/" % build_id)
response = self._get(buildlogs_url, headers={'Connection': 'close'})
check_response(response)
return response.content
|
provide logs from build
:param build_id: str
:param follow: bool, fetch logs as they come?
:param build_json: dict, to save one get-build query
:param wait_if_missing: bool, if build doesn't exist, wait
:return: None, str or iterator
|
entailment
|
def list_builds(self, build_config_id=None, koji_task_id=None,
field_selector=None, labels=None):
"""
List builds matching criteria
:param build_config_id: str, only list builds created from BuildConfig
:param koji_task_id: str, only list builds for Koji Task ID
:param field_selector: str, field selector for query
:return: HttpResponse
"""
query = {}
selector = '{key}={value}'
label = {}
if labels is not None:
label.update(labels)
if build_config_id is not None:
label['buildconfig'] = build_config_id
if koji_task_id is not None:
label['koji-task-id'] = str(koji_task_id)
if label:
query['labelSelector'] = ','.join([selector.format(key=key,
value=value)
for key, value in label.items()])
if field_selector is not None:
query['fieldSelector'] = field_selector
url = self._build_url("builds/", **query)
return self._get(url)
|
List builds matching criteria
:param build_config_id: str, only list builds created from BuildConfig
:param koji_task_id: str, only list builds for Koji Task ID
:param field_selector: str, field selector for query
:return: HttpResponse
|
entailment
|
def create_resource_quota(self, name, quota_json):
"""
Prevent builds being scheduled and wait for running builds to finish.
:return:
"""
url = self._build_k8s_url("resourcequotas/")
response = self._post(url, data=json.dumps(quota_json),
headers={"Content-Type": "application/json"})
if response.status_code == http_client.CONFLICT:
url = self._build_k8s_url("resourcequotas/%s" % name)
response = self._put(url, data=json.dumps(quota_json),
headers={"Content-Type": "application/json"})
check_response(response)
return response
|
Prevent builds being scheduled and wait for running builds to finish.
:return:
|
entailment
|
def wait(self, build_id, states):
"""
:param build_id: wait for build to finish
:return:
"""
logger.info("watching build '%s'", build_id)
for changetype, obj in self.watch_resource("builds", build_id):
try:
obj_name = obj["metadata"]["name"]
except KeyError:
logger.error("'object' doesn't have any name")
continue
try:
obj_status = obj["status"]["phase"]
except KeyError:
logger.error("'object' doesn't have any status")
continue
else:
obj_status_lower = obj_status.lower()
logger.info("object has changed: '%s', status: '%s', name: '%s'",
changetype, obj_status, obj_name)
if obj_name == build_id:
logger.info("matching build found")
logger.debug("is %s in %s?", repr(obj_status_lower), states)
if obj_status_lower in states:
logger.debug("Yes, build is in the state I'm waiting for.")
return obj
else:
logger.debug("No, build is not in the state I'm "
"waiting for.")
else:
logger.info("The build %r isn't me %r", obj_name, build_id)
# I'm not sure how we can end up here since there are two possible scenarios:
# 1. our object was found and we are returning in the loop
# 2. our object was not found and we keep waiting (in the loop)
# Therefore, let's raise here
logger.warning("build '%s' was not found during wait", build_id)
raise OsbsWatchBuildNotFound("build '%s' was not found and response stream ended" %
build_id)
|
:param build_id: wait for build to finish
:return:
|
entailment
|
def adjust_attributes_on_object(self, collection, name, things, values, how):
"""
adjust labels or annotations on object
labels have to match RE: (([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])? and
have at most 63 chars
:param collection: str, object collection e.g. 'builds'
:param name: str, name of object
:param things: str, 'labels' or 'annotations'
:param values: dict, values to set
:param how: callable, how to adjust the values e.g.
self._replace_metadata_things
:return:
"""
url = self._build_url("%s/%s" % (collection, name))
response = self._get(url)
logger.debug("before modification: %s", response.content)
build_json = response.json()
how(build_json['metadata'], things, values)
response = self._put(url, data=json.dumps(build_json), use_json=True)
check_response(response)
return response
|
adjust labels or annotations on object
labels have to match RE: (([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])? and
have at most 63 chars
:param collection: str, object collection e.g. 'builds'
:param name: str, name of object
:param things: str, 'labels' or 'annotations'
:param values: dict, values to set
:param how: callable, how to adjust the values e.g.
self._replace_metadata_things
:return:
|
entailment
|
def update_annotations_on_build(self, build_id, annotations):
"""
set annotations on build object
:param build_id: str, id of build
:param annotations: dict, annotations to set
:return:
"""
return self.adjust_attributes_on_object('builds', build_id,
'annotations', annotations,
self._update_metadata_things)
|
set annotations on build object
:param build_id: str, id of build
:param annotations: dict, annotations to set
:return:
|
entailment
|
def import_image(self, name, stream_import, tags=None):
"""
Import image tags from a Docker registry into an ImageStream
:return: bool, whether tags were imported
"""
# Get the JSON for the ImageStream
imagestream_json = self.get_image_stream(name).json()
logger.debug("imagestream: %r", imagestream_json)
if 'dockerImageRepository' in imagestream_json.get('spec', {}):
logger.debug("Removing 'dockerImageRepository' from ImageStream %s", name)
source_repo = imagestream_json['spec'].pop('dockerImageRepository')
imagestream_json['metadata']['annotations'][ANNOTATION_SOURCE_REPO] = source_repo
imagestream_json = self.update_image_stream(name, imagestream_json).json()
# Note the tags before import
oldtags = imagestream_json.get('status', {}).get('tags', [])
logger.debug("tags before import: %r", oldtags)
stream_import['metadata']['name'] = name
stream_import['spec']['images'] = []
tags_set = set(tags) if tags else set()
for tag in imagestream_json.get('spec', {}).get('tags', []):
if tags_set and tag['name'] not in tags_set:
continue
image_import = {
'from': tag['from'],
'to': {'name': tag['name']},
'importPolicy': tag.get('importPolicy'),
'referencePolicy': tag.get('referencePolicy'),
}
stream_import['spec']['images'].append(image_import)
if not stream_import['spec']['images']:
logger.debug('No tags to import')
return False
import_url = self._build_url("imagestreamimports/")
import_response = self._post(import_url, data=json.dumps(stream_import),
use_json=True)
self._check_import_image_response(import_response)
new_tags = [
image['tag']
for image in import_response.json().get('status', {}).get('images', [])]
logger.debug("tags after import: %r", new_tags)
return True
|
Import image tags from a Docker registry into an ImageStream
:return: bool, whether tags were imported
|
entailment
|
def import_image_tags(self, name, stream_import, tags, repository, insecure):
"""
Import image tags from a Docker registry into an ImageStream
:return: bool, whether tags were imported
"""
# Get the JSON for the ImageStream
imagestream_json = self.get_image_stream(name).json()
logger.debug("imagestream: %r", imagestream_json)
changed = False
# existence of dockerImageRepository is limiting how many tags are updated
if 'dockerImageRepository' in imagestream_json.get('spec', {}):
logger.debug("Removing 'dockerImageRepository' from ImageStream %s", name)
imagestream_json['spec'].pop('dockerImageRepository')
changed = True
all_annotations = imagestream_json.get('metadata', {}).get('annotations', {})
# remove annotations about registry, since method will get them as arguments
for annotation in ANNOTATION_SOURCE_REPO, ANNOTATION_INSECURE_REPO:
if annotation in all_annotations:
imagestream_json['metadata']['annotations'].pop(annotation)
changed = True
if changed:
imagestream_json = self.update_image_stream(name, imagestream_json).json()
# Note the tags before import
oldtags = imagestream_json.get('status', {}).get('tags', [])
logger.debug("tags before import: %r", oldtags)
stream_import['metadata']['name'] = name
stream_import['spec']['images'] = []
tags_set = set(tags) if tags else set()
if not tags_set:
logger.debug('No tags to import')
return False
for tag in tags_set:
image_import = {
'from': {"kind": "DockerImage",
"name": '{}:{}'.format(repository, tag)},
'to': {'name': tag},
'importPolicy': {'insecure': insecure},
# referencePolicy will default to "type: source"
# so we don't have to explicitly set it
}
stream_import['spec']['images'].append(image_import)
import_url = self._build_url("imagestreamimports/")
import_response = self._post(import_url, data=json.dumps(stream_import),
use_json=True)
self._check_import_image_response(import_response)
new_tags = [
image['tag']
for image in import_response.json().get('status', {}).get('images', [])]
logger.debug("tags after import: %r", new_tags)
return True
|
Import image tags from a Docker registry into an ImageStream
:return: bool, whether tags were imported
|
entailment
|
def str_on_2_unicode_on_3(s):
"""
argparse is way too awesome when doing repr() on choices when printing usage
:param s: str or unicode
:return: str on 2, unicode on 3
"""
if not PY3:
return str(s)
else: # 3+
if not isinstance(s, str):
return str(s, encoding="utf-8")
return s
|
argparse is way too awesome when doing repr() on choices when printing usage
:param s: str or unicode
:return: str on 2, unicode on 3
|
entailment
|
def load(self):
"""
Extract tabular data as |TableData| instances from a Line-delimited JSON file.
|load_source_desc_file|
:return:
Loaded table data iterator.
|load_table_name_desc|
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the data is invalid Line-delimited JSON.
:raises pytablereader.error.ValidationError:
If the data is not acceptable Line-delimited JSON format.
"""
formatter = JsonLinesTableFormatter(self.load_dict())
formatter.accept(self)
return formatter.to_table_data()
|
Extract tabular data as |TableData| instances from a Line-delimited JSON file.
|load_source_desc_file|
:return:
Loaded table data iterator.
|load_table_name_desc|
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the data is invalid Line-delimited JSON.
:raises pytablereader.error.ValidationError:
If the data is not acceptable Line-delimited JSON format.
|
entailment
|
def load(self):
"""
Load table data from a Google Spreadsheet.
This method consider :py:attr:`.source` as a path to the
credential JSON file to access Google Sheets API.
The method automatically search the header row start from
:py:attr:`.start_row`. The condition of the header row is that
all of the columns have value (except empty columns).
:return:
Loaded table data. Return one |TableData| for each sheet in
the workbook. The table name for data will be determined by
:py:meth:`~.GoogleSheetsTableLoader.make_table_name`.
:rtype: iterator of |TableData|
:raises pytablereader.DataError:
If the header row is not found.
:raises pytablereader.OpenError:
If the spread sheet not found.
"""
import gspread
from oauth2client.service_account import ServiceAccountCredentials
self._validate_table_name()
self._validate_title()
scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"]
credentials = ServiceAccountCredentials.from_json_keyfile_name(self.source, scope)
gc = gspread.authorize(credentials)
try:
for worksheet in gc.open(self.title).worksheets():
self._worksheet = worksheet
self.__all_values = [row for row in worksheet.get_all_values()]
if self._is_empty_sheet():
continue
try:
self.__strip_empty_col()
except ValueError:
continue
value_matrix = self.__all_values[self._get_start_row_idx() :]
try:
headers = value_matrix[0]
rows = value_matrix[1:]
except IndexError:
continue
self.inc_table_count()
yield TableData(
self.make_table_name(),
headers,
rows,
dp_extractor=self.dp_extractor,
type_hints=self._extract_type_hints(headers),
)
except gspread.exceptions.SpreadsheetNotFound:
raise OpenError("spreadsheet '{}' not found".format(self.title))
except gspread.exceptions.APIError as e:
raise APIError(e)
|
Load table data from a Google Spreadsheet.
This method consider :py:attr:`.source` as a path to the
credential JSON file to access Google Sheets API.
The method automatically search the header row start from
:py:attr:`.start_row`. The condition of the header row is that
all of the columns have value (except empty columns).
:return:
Loaded table data. Return one |TableData| for each sheet in
the workbook. The table name for data will be determined by
:py:meth:`~.GoogleSheetsTableLoader.make_table_name`.
:rtype: iterator of |TableData|
:raises pytablereader.DataError:
If the header row is not found.
:raises pytablereader.OpenError:
If the spread sheet not found.
|
entailment
|
def set_log_level(log_level):
"""
Set logging level of this module. Using
`logbook <https://logbook.readthedocs.io/en/stable/>`__ module for logging.
:param int log_level:
One of the log level of
`logbook <https://logbook.readthedocs.io/en/stable/api/base.html>`__.
Disabled logging if ``log_level`` is ``logbook.NOTSET``.
:raises LookupError: If ``log_level`` is an invalid value.
"""
if not LOGBOOK_INSTALLED:
return
# validate log level
logbook.get_level_name(log_level)
if log_level == logger.level:
return
if log_level == logbook.NOTSET:
set_logger(is_enable=False)
else:
set_logger(is_enable=True)
logger.level = log_level
dataproperty.set_log_level(log_level)
try:
import simplesqlite
simplesqlite.set_log_level(log_level)
except ImportError:
pass
|
Set logging level of this module. Using
`logbook <https://logbook.readthedocs.io/en/stable/>`__ module for logging.
:param int log_level:
One of the log level of
`logbook <https://logbook.readthedocs.io/en/stable/api/base.html>`__.
Disabled logging if ``log_level`` is ``logbook.NOTSET``.
:raises LookupError: If ``log_level`` is an invalid value.
|
entailment
|
def buildconfig_update(orig, new, remove_nonexistent_keys=False):
"""Performs update of given `orig` BuildConfig with values from `new` BuildConfig.
Both BuildConfigs have to be represented as `dict`s.
This function:
- adds all key/value pairs to `orig` from `new` that are missing
- replaces values in `orig` for keys that are in both
- removes key/value pairs from `orig` for keys that are not in `new`,
but only in dicts nested inside `strategy` key
(see https://github.com/projectatomic/osbs-client/pull/273#issuecomment-148038314)
"""
if isinstance(orig, dict) and isinstance(new, dict):
clean_triggers(orig, new)
if remove_nonexistent_keys:
missing = set(orig.keys()) - set(new.keys())
for k in missing:
orig.pop(k)
for k, v in new.items():
if k == 'strategy':
remove_nonexistent_keys = True
if isinstance(orig.get(k), dict) and isinstance(v, dict):
buildconfig_update(orig[k], v, remove_nonexistent_keys)
else:
orig[k] = v
|
Performs update of given `orig` BuildConfig with values from `new` BuildConfig.
Both BuildConfigs have to be represented as `dict`s.
This function:
- adds all key/value pairs to `orig` from `new` that are missing
- replaces values in `orig` for keys that are in both
- removes key/value pairs from `orig` for keys that are not in `new`,
but only in dicts nested inside `strategy` key
(see https://github.com/projectatomic/osbs-client/pull/273#issuecomment-148038314)
|
entailment
|
def checkout_git_repo(git_url, target_dir=None, commit=None, retry_times=GIT_MAX_RETRIES,
branch=None, depth=None):
"""
clone provided git repo to target_dir, optionally checkout provided commit
yield the ClonedRepoData and delete the repo when finished
:param git_url: str, git repo to clone
:param target_dir: str, filesystem path where the repo should be cloned
:param commit: str, commit to checkout, SHA-1 or ref
:param retry_times: int, number of retries for git clone
:param branch: str, optional branch of the commit, required if depth is provided
:param depth: int, optional expected depth
:return: str, int, commit ID of HEAD
"""
tmpdir = tempfile.mkdtemp()
target_dir = target_dir or os.path.join(tmpdir, "repo")
try:
yield clone_git_repo(git_url, target_dir, commit, retry_times, branch, depth)
finally:
shutil.rmtree(tmpdir)
|
clone provided git repo to target_dir, optionally checkout provided commit
yield the ClonedRepoData and delete the repo when finished
:param git_url: str, git repo to clone
:param target_dir: str, filesystem path where the repo should be cloned
:param commit: str, commit to checkout, SHA-1 or ref
:param retry_times: int, number of retries for git clone
:param branch: str, optional branch of the commit, required if depth is provided
:param depth: int, optional expected depth
:return: str, int, commit ID of HEAD
|
entailment
|
def clone_git_repo(git_url, target_dir=None, commit=None, retry_times=GIT_MAX_RETRIES, branch=None,
depth=None):
"""
clone provided git repo to target_dir, optionally checkout provided commit
:param git_url: str, git repo to clone
:param target_dir: str, filesystem path where the repo should be cloned
:param commit: str, commit to checkout, SHA-1 or ref
:param retry_times: int, number of retries for git clone
:param branch: str, optional branch of the commit, required if depth is provided
:param depth: int, optional expected depth
:return: str, int, commit ID of HEAD
"""
retry_delay = GIT_BACKOFF_FACTOR
target_dir = target_dir or os.path.join(tempfile.mkdtemp(), "repo")
commit = commit or "master"
logger.info("cloning git repo '%s'", git_url)
logger.debug("url = '%s', dir = '%s', commit = '%s'",
git_url, target_dir, commit)
cmd = ["git", "clone"]
if branch:
cmd += ["-b", branch, "--single-branch"]
if depth:
cmd += ["--depth", str(depth)]
elif depth:
logger.warning("branch not provided for %s, depth setting ignored", git_url)
depth = None
cmd += [git_url, target_dir]
logger.debug("cloning '%s'", cmd)
repo_commit = ''
repo_depth = None
for counter in range(retry_times + 1):
try:
# we are using check_output, even though we aren't using
# the return value, but we will get 'output' in exception
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
repo_commit, repo_depth = reset_git_repo(target_dir, commit, depth)
break
except subprocess.CalledProcessError as exc:
if counter != retry_times:
logger.info("retrying command '%s':\n '%s'", cmd, exc.output)
time.sleep(retry_delay * (2 ** counter))
else:
raise OsbsException("Unable to clone git repo '%s' "
"branch '%s'" % (git_url, branch),
cause=exc, traceback=sys.exc_info()[2])
return ClonedRepoData(target_dir, repo_commit, repo_depth)
|
clone provided git repo to target_dir, optionally checkout provided commit
:param git_url: str, git repo to clone
:param target_dir: str, filesystem path where the repo should be cloned
:param commit: str, commit to checkout, SHA-1 or ref
:param retry_times: int, number of retries for git clone
:param branch: str, optional branch of the commit, required if depth is provided
:param depth: int, optional expected depth
:return: str, int, commit ID of HEAD
|
entailment
|
def reset_git_repo(target_dir, git_reference, retry_depth=None):
"""
hard reset git clone in target_dir to given git_reference
:param target_dir: str, filesystem path where the repo is cloned
:param git_reference: str, any valid git reference
:param retry_depth: int, if the repo was cloned with --shallow, this is the expected
depth of the commit
:return: str and int, commit ID of HEAD and commit depth of git_reference
"""
deepen = retry_depth or 0
base_commit_depth = 0
for _ in range(GIT_FETCH_RETRY):
try:
if not deepen:
cmd = ['git', 'rev-list', '--count', git_reference]
base_commit_depth = int(subprocess.check_output(cmd, cwd=target_dir)) - 1
cmd = ["git", "reset", "--hard", git_reference]
logger.debug("Resetting current HEAD: '%s'", cmd)
subprocess.check_call(cmd, cwd=target_dir)
break
except subprocess.CalledProcessError:
if not deepen:
raise OsbsException('cannot find commit %s in repo %s' %
(git_reference, target_dir))
deepen *= 2
cmd = ["git", "fetch", "--depth", str(deepen)]
subprocess.check_call(cmd, cwd=target_dir)
logger.debug("Couldn't find commit %s, increasing depth with '%s'", git_reference,
cmd)
else:
raise OsbsException('cannot find commit %s in repo %s' % (git_reference, target_dir))
cmd = ["git", "rev-parse", "HEAD"]
logger.debug("getting SHA-1 of provided ref '%s'", git_reference)
commit_id = subprocess.check_output(cmd, cwd=target_dir, universal_newlines=True)
commit_id = commit_id.strip()
logger.info("commit ID = %s", commit_id)
final_commit_depth = None
if not deepen:
cmd = ['git', 'rev-list', '--count', 'HEAD']
final_commit_depth = int(subprocess.check_output(cmd, cwd=target_dir)) - base_commit_depth
return commit_id, final_commit_depth
|
hard reset git clone in target_dir to given git_reference
:param target_dir: str, filesystem path where the repo is cloned
:param git_reference: str, any valid git reference
:param retry_depth: int, if the repo was cloned with --shallow, this is the expected
depth of the commit
:return: str and int, commit ID of HEAD and commit depth of git_reference
|
entailment
|
def get_imagestreamtag_from_image(image):
"""
return ImageStreamTag, give a FROM value
:param image: str, the FROM value from the Dockerfile
:return: str, ImageStreamTag
"""
ret = image
# Remove the registry part
ret = strip_registry_from_image(image)
# ImageStream names cannot contain '/'
ret = ret.replace('/', '-')
# If there is no ':' suffix value, add one
if ret.find(':') == -1:
ret += ":latest"
return ret
|
return ImageStreamTag, give a FROM value
:param image: str, the FROM value from the Dockerfile
:return: str, ImageStreamTag
|
entailment
|
def get_time_from_rfc3339(rfc3339):
"""
return time tuple from an RFC 3339-formatted time string
:param rfc3339: str, time in RFC 3339 format
:return: float, seconds since the Epoch
"""
try:
# py 3
dt = dateutil.parser.parse(rfc3339, ignoretz=False)
return dt.timestamp()
except NameError:
# py 2
# Decode the RFC 3339 date with no fractional seconds (the
# format Origin provides). Note that this will fail to parse
# valid ISO8601 timestamps not in this exact format.
time_tuple = strptime(rfc3339, '%Y-%m-%dT%H:%M:%SZ')
return timegm(time_tuple)
|
return time tuple from an RFC 3339-formatted time string
:param rfc3339: str, time in RFC 3339 format
:return: float, seconds since the Epoch
|
entailment
|
def sanitize_strings_for_openshift(str1, str2='', limit=LABEL_MAX_CHARS, separator='-',
label=True):
"""
OpenShift requires labels to be no more than 64 characters and forbids any characters other
than alphanumerics, ., and -. BuildConfig names are similar, but cannot contain /.
Sanitize and concatanate one or two strings to meet OpenShift's requirements. include an
equal number of characters from both strings if the combined length is more than the limit.
"""
filter_chars = VALID_LABEL_CHARS if label else VALID_BUILD_CONFIG_NAME_CHARS
str1_san = ''.join(filter(filter_chars.match, list(str1)))
str2_san = ''.join(filter(filter_chars.match, list(str2)))
str1_chars = []
str2_chars = []
groups = ((str1_san, str1_chars), (str2_san, str2_chars))
size = len(separator)
limit = min(limit, LABEL_MAX_CHARS)
for i in range(max(len(str1_san), len(str2_san))):
for group, group_chars in groups:
if i < len(group):
group_chars.append(group[i])
size += 1
if size >= limit:
break
else:
continue
break
final_str1 = ''.join(str1_chars).strip(separator)
final_str2 = ''.join(str2_chars).strip(separator)
return separator.join(filter(None, (final_str1, final_str2)))
|
OpenShift requires labels to be no more than 64 characters and forbids any characters other
than alphanumerics, ., and -. BuildConfig names are similar, but cannot contain /.
Sanitize and concatanate one or two strings to meet OpenShift's requirements. include an
equal number of characters from both strings if the combined length is more than the limit.
|
entailment
|
def make_name_from_git(repo, branch, limit=53, separator='-', hash_size=5):
"""
return name string representing the given git repo and branch
to be used as a build name.
NOTE: Build name will be used to generate pods which have a
limit of 64 characters and is composed as:
<buildname>-<buildnumber>-<podsuffix>
rhel7-1-build
Assuming '-XXXX' (5 chars) and '-build' (6 chars) as default
suffixes, name should be limited to 53 chars (64 - 11).
OpenShift is very peculiar in which BuildConfig names it
allows. For this reason, only certain characters are allowed.
Any disallowed characters will be removed from repo and
branch names.
:param repo: str, the git repository to be used
:param branch: str, the git branch to be used
:param limit: int, max name length
:param separator: str, used to separate the repo and branch in name
:return: str, name representing git repo and branch.
"""
branch = branch or 'unknown'
full = urlparse(repo).path.lstrip('/') + branch
repo = git_repo_humanish_part_from_uri(repo)
shaval = sha256(full.encode('utf-8')).hexdigest()
hash_str = shaval[:hash_size]
limit = limit - len(hash_str) - 1
sanitized = sanitize_strings_for_openshift(repo, branch, limit, separator, False)
return separator.join(filter(None, (sanitized, hash_str)))
|
return name string representing the given git repo and branch
to be used as a build name.
NOTE: Build name will be used to generate pods which have a
limit of 64 characters and is composed as:
<buildname>-<buildnumber>-<podsuffix>
rhel7-1-build
Assuming '-XXXX' (5 chars) and '-build' (6 chars) as default
suffixes, name should be limited to 53 chars (64 - 11).
OpenShift is very peculiar in which BuildConfig names it
allows. For this reason, only certain characters are allowed.
Any disallowed characters will be removed from repo and
branch names.
:param repo: str, the git repository to be used
:param branch: str, the git branch to be used
:param limit: int, max name length
:param separator: str, used to separate the repo and branch in name
:return: str, name representing git repo and branch.
|
entailment
|
def wrap_name_from_git(prefix, suffix, *args, **kwargs):
"""
wraps the result of make_name_from_git in a suffix and postfix
adding separators for each.
see docstring for make_name_from_git for a full list of parameters
"""
# 64 is maximum length allowed by OpenShift
# 2 is the number of dashes that will be added
prefix = ''.join(filter(VALID_BUILD_CONFIG_NAME_CHARS.match, list(prefix)))
suffix = ''.join(filter(VALID_BUILD_CONFIG_NAME_CHARS.match, list(suffix)))
kwargs['limit'] = kwargs.get('limit', 64) - len(prefix) - len(suffix) - 2
name_from_git = make_name_from_git(*args, **kwargs)
return '-'.join([prefix, name_from_git, suffix])
|
wraps the result of make_name_from_git in a suffix and postfix
adding separators for each.
see docstring for make_name_from_git for a full list of parameters
|
entailment
|
def sanitize_version(version):
"""
Take parse_version() output and standardize output from older
setuptools' parse_version() to match current setuptools.
"""
if hasattr(version, 'base_version'):
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
cleaned_version = '{}.{}.{}'.format(major, minor, micro)
return cleaned_version
|
Take parse_version() output and standardize output from older
setuptools' parse_version() to match current setuptools.
|
entailment
|
def get_name(self, label_type):
"""
returns the most preferred label name
if there isn't any correct name in the list
it will return newest label name
"""
if label_type in self._label_values:
return self._label_values[label_type][0]
else:
return Labels.LABEL_NAMES[label_type][0]
|
returns the most preferred label name
if there isn't any correct name in the list
it will return newest label name
|
entailment
|
def get_new_names_by_old():
"""Return dictionary, new label name indexed by old label name."""
newdict = {}
for label_type, label_names in Labels.LABEL_NAMES.items():
for oldname in label_names[1:]:
newdict[oldname] = Labels.LABEL_NAMES[label_type][0]
return newdict
|
Return dictionary, new label name indexed by old label name.
|
entailment
|
def get_name_and_value(self, label_type):
"""
Return tuple of (label name, label value)
Raises KeyError if label doesn't exist
"""
if label_type in self._label_values:
return self._label_values[label_type]
else:
return (label_type, self._df_labels[label_type])
|
Return tuple of (label name, label value)
Raises KeyError if label doesn't exist
|
entailment
|
def kerberos_ccache_init(principal, keytab_file, ccache_file=None):
"""
Checks whether kerberos credential cache has ticket-granting ticket that is valid for at least
an hour.
Default ccache is used unless ccache_file is provided. In that case, KRB5CCNAME environment
variable is set to the value of ccache_file if we successfully obtain the ticket.
"""
tgt_valid = False
env = {"LC_ALL": "C"} # klist uses locales to format date on RHEL7+
if ccache_file:
env["KRB5CCNAME"] = ccache_file
# check if we have tgt that is valid more than one hour
rc, klist, _ = run(["klist"], extraenv=env)
if rc == 0:
for line in klist.splitlines():
m = re.match(KLIST_TGT_RE, line)
if m:
year = m.group("year")
if len(year) == 2:
year = "20" + year
expires = datetime.datetime(
int(year), int(m.group("month")), int(m.group("day")),
int(m.group("hour")), int(m.group("minute")), int(m.group("second"))
)
if expires - datetime.datetime.now() > datetime.timedelta(hours=1):
logger.debug("Valid TGT found, not renewing")
tgt_valid = True
break
if not tgt_valid:
logger.debug("Retrieving kerberos TGT")
rc, out, err = run(["kinit", "-k", "-t", keytab_file, principal], extraenv=env)
if rc != 0:
raise OsbsException("kinit returned %s:\nstdout: %s\nstderr: %s" % (rc, out, err))
if ccache_file:
os.environ["KRB5CCNAME"] = ccache_file
|
Checks whether kerberos credential cache has ticket-granting ticket that is valid for at least
an hour.
Default ccache is used unless ccache_file is provided. In that case, KRB5CCNAME environment
variable is set to the value of ccache_file if we successfully obtain the ticket.
|
entailment
|
def load(self):
"""
Extract tabular data as |TableData| instances from a SQLite database
file. |load_source_desc_file|
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
Format specifier Value after the replacement
=================== ==============================================
``%(filename)s`` |filename_desc|
``%(key)s`` ``%(format_name)s%(format_id)s``
``%(format_name)s`` ``"sqlite"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the SQLite database file data is invalid or empty.
"""
self._validate()
formatter = SqliteTableFormatter(self.source)
formatter.accept(self)
return formatter.to_table_data()
|
Extract tabular data as |TableData| instances from a SQLite database
file. |load_source_desc_file|
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
Format specifier Value after the replacement
=================== ==============================================
``%(filename)s`` |filename_desc|
``%(key)s`` ``%(format_name)s%(format_id)s``
``%(format_name)s`` ``"sqlite"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the SQLite database file data is invalid or empty.
|
entailment
|
def get_data(self):
"""
Find the data stored in the config_map
:return: dict, the json of the data data that was passed into the ConfigMap on creation
"""
data = graceful_chain_get(self.json, "data")
if data is None:
return {}
data_dict = {}
for key in data:
if self.is_yaml(key):
data_dict[key] = yaml.load(data[key])
else:
data_dict[key] = json.loads(data[key])
return data_dict
|
Find the data stored in the config_map
:return: dict, the json of the data data that was passed into the ConfigMap on creation
|
entailment
|
def get_data_by_key(self, name):
"""
Find the object stored by a JSON string at key 'name'
:return: str or dict, the json of the str or dict stored in the ConfigMap at that location
"""
data = graceful_chain_get(self.json, "data")
if data is None or name not in data:
return {}
if self.is_yaml(name):
return yaml.load(data[name]) or {}
return json.loads(data[name])
|
Find the object stored by a JSON string at key 'name'
:return: str or dict, the json of the str or dict stored in the ConfigMap at that location
|
entailment
|
def to_table_data(self):
"""
:raises ValueError:
:raises pytablereader.error.ValidationError:
"""
self._validate_source_data()
header_list = []
for json_record in self._buffer:
for key in json_record:
if key not in header_list:
header_list.append(key)
self._loader.inc_table_count()
yield TableData(
self._make_table_name(),
header_list,
self._buffer,
dp_extractor=self._loader.dp_extractor,
type_hints=self._extract_type_hints(header_list),
)
|
:raises ValueError:
:raises pytablereader.error.ValidationError:
|
entailment
|
def list_builds(self, field_selector=None, koji_task_id=None, running=None,
labels=None):
"""
List builds with matching fields
:param field_selector: str, field selector for Builds
:param koji_task_id: str, only list builds for Koji Task ID
:return: BuildResponse list
"""
if running:
running_fs = ",".join(["status!={status}".format(status=status.capitalize())
for status in BUILD_FINISHED_STATES])
if not field_selector:
field_selector = running_fs
else:
field_selector = ','.join([field_selector, running_fs])
response = self.os.list_builds(field_selector=field_selector,
koji_task_id=koji_task_id, labels=labels)
serialized_response = response.json()
build_list = []
for build in serialized_response["items"]:
build_list.append(BuildResponse(build, self))
return build_list
|
List builds with matching fields
:param field_selector: str, field selector for Builds
:param koji_task_id: str, only list builds for Koji Task ID
:return: BuildResponse list
|
entailment
|
def get_pod_for_build(self, build_id):
"""
:return: PodResponse object for pod relating to the build
"""
pods = self.os.list_pods(label='openshift.io/build.name=%s' % build_id)
serialized_response = pods.json()
pod_list = [PodResponse(pod) for pod in serialized_response["items"]]
if not pod_list:
raise OsbsException("No pod for build")
elif len(pod_list) != 1:
raise OsbsException("Only one pod expected but %d returned",
len(pod_list))
return pod_list[0]
|
:return: PodResponse object for pod relating to the build
|
entailment
|
def get_build_request(self, build_type=None, inner_template=None,
outer_template=None, customize_conf=None,
arrangement_version=DEFAULT_ARRANGEMENT_VERSION):
"""
return instance of BuildRequest or BuildRequestV2
:param build_type: str, unused
:param inner_template: str, name of inner template for BuildRequest
:param outer_template: str, name of outer template for BuildRequest
:param customize_conf: str, name of customization config for BuildRequest
:param arrangement_version: int, value of the arrangement version
:return: instance of BuildRequest or BuildRequestV2
"""
if build_type is not None:
warnings.warn("build types are deprecated, do not use the build_type argument")
validate_arrangement_version(arrangement_version)
if not arrangement_version or arrangement_version < REACTOR_CONFIG_ARRANGEMENT_VERSION:
build_request = BuildRequest(
build_json_store=self.os_conf.get_build_json_store(),
inner_template=inner_template,
outer_template=outer_template,
customize_conf=customize_conf)
else:
build_request = BuildRequestV2(
build_json_store=self.os_conf.get_build_json_store(),
outer_template=outer_template,
customize_conf=customize_conf)
# Apply configured resource limits.
cpu_limit = self.build_conf.get_cpu_limit()
memory_limit = self.build_conf.get_memory_limit()
storage_limit = self.build_conf.get_storage_limit()
if (cpu_limit is not None or
memory_limit is not None or
storage_limit is not None):
build_request.set_resource_limits(cpu=cpu_limit,
memory=memory_limit,
storage=storage_limit)
return build_request
|
return instance of BuildRequest or BuildRequestV2
:param build_type: str, unused
:param inner_template: str, name of inner template for BuildRequest
:param outer_template: str, name of outer template for BuildRequest
:param customize_conf: str, name of customization config for BuildRequest
:param arrangement_version: int, value of the arrangement version
:return: instance of BuildRequest or BuildRequestV2
|
entailment
|
def create_build_from_buildrequest(self, build_request):
"""
render provided build_request and submit build from it
:param build_request: instance of build.build_request.BuildRequest
:return: instance of build.build_response.BuildResponse
"""
build_request.set_openshift_required_version(self.os_conf.get_openshift_required_version())
build = build_request.render()
response = self.os.create_build(json.dumps(build))
build_response = BuildResponse(response.json(), self)
return build_response
|
render provided build_request and submit build from it
:param build_request: instance of build.build_request.BuildRequest
:return: instance of build.build_response.BuildResponse
|
entailment
|
def _get_existing_build_config(self, build_config):
"""
Uses the given build config to find an existing matching build config.
Build configs are a match if:
- metadata.labels.git-repo-name AND metadata.labels.git-branch AND
metadata.labels.git-full-repo are equal
OR
- metadata.labels.git-repo-name AND metadata.labels.git-branch are equal AND
metadata.spec.source.git.uri are equal
OR
- metadata.name are equal
"""
bc_labels = build_config['metadata']['labels']
git_labels = {
"label_selectors": [(key, bc_labels[key]) for key in self._GIT_LABEL_KEYS]
}
old_labels_kwargs = {
"label_selectors": [(key, bc_labels[key]) for key in self._OLD_LABEL_KEYS],
"filter_key": FILTER_KEY,
"filter_value": graceful_chain_get(build_config, *FILTER_KEY.split('.'))
}
name = {
"build_config_id": build_config['metadata']['name']
}
queries = (
(self.os.get_build_config_by_labels, git_labels),
(self.os.get_build_config_by_labels_filtered, old_labels_kwargs),
(self.os.get_build_config, name),
)
existing_bc = None
for func, kwargs in queries:
try:
existing_bc = func(**kwargs)
# build config found
break
except OsbsException as exc:
# doesn't exist
logger.info('Build config NOT found via %s: %s',
func.__name__, str(exc))
continue
return existing_bc
|
Uses the given build config to find an existing matching build config.
Build configs are a match if:
- metadata.labels.git-repo-name AND metadata.labels.git-branch AND
metadata.labels.git-full-repo are equal
OR
- metadata.labels.git-repo-name AND metadata.labels.git-branch are equal AND
metadata.spec.source.git.uri are equal
OR
- metadata.name are equal
|
entailment
|
def _get_image_stream_info_for_build_request(self, build_request):
"""Return ImageStream, and ImageStreamTag name for base_image of build_request
If build_request is not auto instantiated, objects are not fetched
and None, None is returned.
"""
image_stream = None
image_stream_tag_name = None
if build_request.has_ist_trigger():
image_stream_tag_id = build_request.trigger_imagestreamtag
image_stream_id, image_stream_tag_name = image_stream_tag_id.split(':')
try:
image_stream = self.get_image_stream(image_stream_id).json()
except OsbsResponseException as x:
if x.status_code != 404:
raise
if image_stream:
try:
self.get_image_stream_tag(image_stream_tag_id).json()
except OsbsResponseException as x:
if x.status_code != 404:
raise
return image_stream, image_stream_tag_name
|
Return ImageStream, and ImageStreamTag name for base_image of build_request
If build_request is not auto instantiated, objects are not fetched
and None, None is returned.
|
entailment
|
def create_prod_build(self, *args, **kwargs):
"""
Create a production build
:param git_uri: str, URI of git repository
:param git_ref: str, reference to commit
:param git_branch: str, branch name
:param user: str, user name
:param component: str, not used anymore
:param target: str, koji target
:param architecture: str, build architecture
:param yum_repourls: list, URLs for yum repos
:param koji_task_id: int, koji task ID requesting build
:param scratch: bool, this is a scratch build
:param platform: str, the platform name
:param platforms: list<str>, the name of each platform
:param release: str, the release value to use
:param inner_template: str, name of inner template for BuildRequest
:param outer_template: str, name of outer template for BuildRequest
:param customize_conf: str, name of customization config for BuildRequest
:param arrangement_version: int, numbered arrangement of plugins for orchestration workflow
:param signing_intent: str, signing intent of the ODCS composes
:param compose_ids: list<int>, ODCS composes used
:return: BuildResponse instance
"""
logger.warning("prod (all-in-one) builds are deprecated, "
"please use create_orchestrator_build "
"(support will be removed in version 0.54)")
return self._do_create_prod_build(*args, **kwargs)
|
Create a production build
:param git_uri: str, URI of git repository
:param git_ref: str, reference to commit
:param git_branch: str, branch name
:param user: str, user name
:param component: str, not used anymore
:param target: str, koji target
:param architecture: str, build architecture
:param yum_repourls: list, URLs for yum repos
:param koji_task_id: int, koji task ID requesting build
:param scratch: bool, this is a scratch build
:param platform: str, the platform name
:param platforms: list<str>, the name of each platform
:param release: str, the release value to use
:param inner_template: str, name of inner template for BuildRequest
:param outer_template: str, name of outer template for BuildRequest
:param customize_conf: str, name of customization config for BuildRequest
:param arrangement_version: int, numbered arrangement of plugins for orchestration workflow
:param signing_intent: str, signing intent of the ODCS composes
:param compose_ids: list<int>, ODCS composes used
:return: BuildResponse instance
|
entailment
|
def create_worker_build(self, **kwargs):
"""
Create a worker build
Pass through method to create_prod_build with the following
modifications:
- platform param is required
- release param is required
- arrangement_version param is required, which is used to
select which worker_inner:n.json template to use
- inner template set to worker_inner:n.json if not set
- outer template set to worker.json if not set
- customize configuration set to worker_customize.json if not set
:return: BuildResponse instance
"""
missing = set()
for required in ('platform', 'release', 'arrangement_version'):
if not kwargs.get(required):
missing.add(required)
if missing:
raise ValueError("Worker build missing required parameters: %s" %
missing)
if kwargs.get('platforms'):
raise ValueError("Worker build called with unwanted platforms param")
arrangement_version = kwargs['arrangement_version']
kwargs.setdefault('inner_template', WORKER_INNER_TEMPLATE.format(
arrangement_version=arrangement_version))
kwargs.setdefault('outer_template', WORKER_OUTER_TEMPLATE)
kwargs.setdefault('customize_conf', WORKER_CUSTOMIZE_CONF)
kwargs['build_type'] = BUILD_TYPE_WORKER
try:
return self._do_create_prod_build(**kwargs)
except IOError as ex:
if os.path.basename(ex.filename) == kwargs['inner_template']:
raise OsbsValidationException("worker invalid arrangement_version %s" %
arrangement_version)
raise
|
Create a worker build
Pass through method to create_prod_build with the following
modifications:
- platform param is required
- release param is required
- arrangement_version param is required, which is used to
select which worker_inner:n.json template to use
- inner template set to worker_inner:n.json if not set
- outer template set to worker.json if not set
- customize configuration set to worker_customize.json if not set
:return: BuildResponse instance
|
entailment
|
def create_orchestrator_build(self, **kwargs):
"""
Create an orchestrator build
Pass through method to create_prod_build with the following
modifications:
- platforms param is required
- arrangement_version param may be used to select which
orchestrator_inner:n.json template to use
- inner template set to orchestrator_inner:n.json if not set
- outer template set to orchestrator.json if not set
- customize configuration set to orchestrator_customize.json if not set
:return: BuildResponse instance
"""
if not self.can_orchestrate():
raise OsbsOrchestratorNotEnabled("can't create orchestrate build "
"when can_orchestrate isn't enabled")
extra = [x for x in ('platform',) if kwargs.get(x)]
if extra:
raise ValueError("Orchestrator build called with unwanted parameters: %s" %
extra)
arrangement_version = kwargs.setdefault('arrangement_version',
self.build_conf.get_arrangement_version())
if arrangement_version < REACTOR_CONFIG_ARRANGEMENT_VERSION and not kwargs.get('platforms'):
raise ValueError('Orchestrator build requires platforms param')
kwargs.setdefault('inner_template', ORCHESTRATOR_INNER_TEMPLATE.format(
arrangement_version=arrangement_version))
kwargs.setdefault('outer_template', ORCHESTRATOR_OUTER_TEMPLATE)
kwargs.setdefault('customize_conf', ORCHESTRATOR_CUSTOMIZE_CONF)
kwargs['build_type'] = BUILD_TYPE_ORCHESTRATOR
try:
return self._do_create_prod_build(**kwargs)
except IOError as ex:
if os.path.basename(ex.filename) == kwargs['inner_template']:
raise OsbsValidationException("orchestrator invalid arrangement_version %s" %
arrangement_version)
raise
|
Create an orchestrator build
Pass through method to create_prod_build with the following
modifications:
- platforms param is required
- arrangement_version param may be used to select which
orchestrator_inner:n.json template to use
- inner template set to orchestrator_inner:n.json if not set
- outer template set to orchestrator.json if not set
- customize configuration set to orchestrator_customize.json if not set
:return: BuildResponse instance
|
entailment
|
def get_build_logs(self, build_id, follow=False, build_json=None, wait_if_missing=False,
decode=False):
"""
provide logs from build
NOTE: Since atomic-reactor 1.6.25, logs are always in UTF-8, so if
asked to decode, we assume that is the encoding in use. Otherwise, we
return the bytes exactly as they came from the container.
:param build_id: str
:param follow: bool, fetch logs as they come?
:param build_json: dict, to save one get-build query
:param wait_if_missing: bool, if build doesn't exist, wait
:param decode: bool, whether or not to decode logs as utf-8
:return: None, bytes, or iterable of bytes
"""
logs = self.os.logs(build_id, follow=follow, build_json=build_json,
wait_if_missing=wait_if_missing)
if decode and isinstance(logs, GeneratorType):
return self._decode_build_logs_generator(logs)
# str or None returned from self.os.logs()
if decode and logs is not None:
logs = logs.decode("utf-8").rstrip()
return logs
|
provide logs from build
NOTE: Since atomic-reactor 1.6.25, logs are always in UTF-8, so if
asked to decode, we assume that is the encoding in use. Otherwise, we
return the bytes exactly as they came from the container.
:param build_id: str
:param follow: bool, fetch logs as they come?
:param build_json: dict, to save one get-build query
:param wait_if_missing: bool, if build doesn't exist, wait
:param decode: bool, whether or not to decode logs as utf-8
:return: None, bytes, or iterable of bytes
|
entailment
|
def get_orchestrator_build_logs(self, build_id, follow=False, wait_if_missing=False):
"""
provide logs from orchestrator build
:param build_id: str
:param follow: bool, fetch logs as they come?
:param wait_if_missing: bool, if build doesn't exist, wait
:return: generator yielding objects with attributes 'platform' and 'line'
"""
logs = self.get_build_logs(build_id=build_id, follow=follow,
wait_if_missing=wait_if_missing, decode=True)
if logs is None:
return
if isinstance(logs, GeneratorType):
for entries in logs:
for entry in entries.splitlines():
yield LogEntry(*self._parse_build_log_entry(entry))
else:
for entry in logs.splitlines():
yield LogEntry(*self._parse_build_log_entry(entry))
|
provide logs from orchestrator build
:param build_id: str
:param follow: bool, fetch logs as they come?
:param wait_if_missing: bool, if build doesn't exist, wait
:return: generator yielding objects with attributes 'platform' and 'line'
|
entailment
|
def import_image(self, name, tags=None):
"""
Import image tags from a Docker registry into an ImageStream
:return: bool, whether tags were imported
"""
stream_import_file = os.path.join(self.os_conf.get_build_json_store(),
'image_stream_import.json')
with open(stream_import_file) as f:
stream_import = json.load(f)
return self.os.import_image(name, stream_import, tags=tags)
|
Import image tags from a Docker registry into an ImageStream
:return: bool, whether tags were imported
|
entailment
|
def import_image_tags(self, name, tags, repository, insecure=False):
"""Import image tags from specified container repository.
:param name: str, name of ImageStream object
:param tags: iterable, tags to be imported
:param repository: str, remote location of container image
in the format <registry>/<repository>
:param insecure: bool, indicates whenever registry is secure
:return: bool, whether tags were imported
"""
stream_import_file = os.path.join(self.os_conf.get_build_json_store(),
'image_stream_import.json')
with open(stream_import_file) as f:
stream_import = json.load(f)
return self.os.import_image_tags(name, stream_import, tags,
repository, insecure)
|
Import image tags from specified container repository.
:param name: str, name of ImageStream object
:param tags: iterable, tags to be imported
:param repository: str, remote location of container image
in the format <registry>/<repository>
:param insecure: bool, indicates whenever registry is secure
:return: bool, whether tags were imported
|
entailment
|
def ensure_image_stream_tag(self, stream, tag_name, scheduled=False,
source_registry=None, organization=None, base_image=None):
"""Ensures the tag is monitored in ImageStream
:param stream: dict, ImageStream object
:param tag_name: str, name of tag to check, without name of
ImageStream as prefix
:param scheduled: bool, if True, importPolicy.scheduled will be
set to True in ImageStreamTag
:param source_registry: dict, info about source registry
:param organization: str, oganization for registry
:param base_image: str, base image
:return: bool, whether or not modifications were performed
"""
img_stream_tag_file = os.path.join(self.os_conf.get_build_json_store(),
'image_stream_tag.json')
with open(img_stream_tag_file) as f:
tag_template = json.load(f)
repository = None
registry = None
insecure = False
if source_registry:
registry = RegistryURI(source_registry['url']).docker_uri
insecure = source_registry.get('insecure', False)
if base_image and registry:
repository = self._get_enclosed_repo_with_source_registry(base_image,
registry, organization)
return self.os.ensure_image_stream_tag(stream, tag_name, tag_template,
scheduled, repository=repository,
insecure=insecure)
|
Ensures the tag is monitored in ImageStream
:param stream: dict, ImageStream object
:param tag_name: str, name of tag to check, without name of
ImageStream as prefix
:param scheduled: bool, if True, importPolicy.scheduled will be
set to True in ImageStreamTag
:param source_registry: dict, info about source registry
:param organization: str, oganization for registry
:param base_image: str, base image
:return: bool, whether or not modifications were performed
|
entailment
|
def create_image_stream(self, name, docker_image_repository,
insecure_registry=False):
"""
Create an ImageStream object
Raises exception on error
:param name: str, name of ImageStream
:param docker_image_repository: str, pull spec for docker image
repository
:param insecure_registry: bool, whether plain HTTP should be used
:return: response
"""
img_stream_file = os.path.join(self.os_conf.get_build_json_store(), 'image_stream.json')
with open(img_stream_file) as f:
stream = json.load(f)
stream['metadata']['name'] = name
stream['metadata'].setdefault('annotations', {})
stream['metadata']['annotations'][ANNOTATION_SOURCE_REPO] = docker_image_repository
if insecure_registry:
stream['metadata']['annotations'][ANNOTATION_INSECURE_REPO] = 'true'
return self.os.create_image_stream(json.dumps(stream))
|
Create an ImageStream object
Raises exception on error
:param name: str, name of ImageStream
:param docker_image_repository: str, pull spec for docker image
repository
:param insecure_registry: bool, whether plain HTTP should be used
:return: response
|
entailment
|
def get_compression_extension(self):
"""
Find the filename extension for the 'docker save' output, which
may or may not be compressed.
Raises OsbsValidationException if the extension cannot be
determined due to a configuration error.
:returns: str including leading dot, or else None if no compression
"""
build_request = BuildRequest(build_json_store=self.os_conf.get_build_json_store())
inner = build_request.inner_template
postbuild_plugins = inner.get('postbuild_plugins', [])
for plugin in postbuild_plugins:
if plugin.get('name') == 'compress':
args = plugin.get('args', {})
method = args.get('method', 'gzip')
if method == 'gzip':
return '.gz'
elif method == 'lzma':
return '.xz'
raise OsbsValidationException("unknown compression method '%s'"
% method)
return None
|
Find the filename extension for the 'docker save' output, which
may or may not be compressed.
Raises OsbsValidationException if the extension cannot be
determined due to a configuration error.
:returns: str including leading dot, or else None if no compression
|
entailment
|
def create_config_map(self, name, data):
"""
Create an ConfigMap object on the server
Raises exception on error
:param name: str, name of configMap
:param data: dict, dictionary of data to be stored
:returns: ConfigMapResponse containing the ConfigMap with name and data
"""
config_data_file = os.path.join(self.os_conf.get_build_json_store(), 'config_map.json')
with open(config_data_file) as f:
config_data = json.load(f)
config_data['metadata']['name'] = name
data_dict = {}
for key, value in data.items():
data_dict[key] = json.dumps(value)
config_data['data'] = data_dict
response = self.os.create_config_map(config_data)
config_map_response = ConfigMapResponse(response.json())
return config_map_response
|
Create an ConfigMap object on the server
Raises exception on error
:param name: str, name of configMap
:param data: dict, dictionary of data to be stored
:returns: ConfigMapResponse containing the ConfigMap with name and data
|
entailment
|
def get_config_map(self, name):
"""
Get a ConfigMap object from the server
Raises exception on error
:param name: str, name of configMap to get from the server
:returns: ConfigMapResponse containing the ConfigMap with the requested name
"""
response = self.os.get_config_map(name)
config_map_response = ConfigMapResponse(response.json())
return config_map_response
|
Get a ConfigMap object from the server
Raises exception on error
:param name: str, name of configMap to get from the server
:returns: ConfigMapResponse containing the ConfigMap with the requested name
|
entailment
|
def retries_disabled(self):
"""
Context manager to disable retries on requests
:returns: OSBS object
"""
self.os.retries_enabled = False
yield
self.os.retries_enabled = True
|
Context manager to disable retries on requests
:returns: OSBS object
|
entailment
|
def wipe(self):
"""
Wipe the bolt database.
Calling this after HoverPy has been instantiated is
potentially dangerous. This function is mostly used
internally for unit tests.
"""
try:
if os.isfile(self._dbpath):
os.remove(self._dbpath)
except OSError:
pass
|
Wipe the bolt database.
Calling this after HoverPy has been instantiated is
potentially dangerous. This function is mostly used
internally for unit tests.
|
entailment
|
def simulation(self, data=None):
"""
Gets / Sets the simulation data.
If no data is passed in, then this method acts as a getter.
if data is passed in, then this method acts as a setter.
Keyword arguments:
data -- the simulation data you wish to set (default None)
"""
if data:
return self._session.put(self.__v2() + "/simulation", data=data)
else:
return self._session.get(self.__v2() + "/simulation").json()
|
Gets / Sets the simulation data.
If no data is passed in, then this method acts as a getter.
if data is passed in, then this method acts as a setter.
Keyword arguments:
data -- the simulation data you wish to set (default None)
|
entailment
|
def destination(self, name=""):
"""
Gets / Sets the destination data.
"""
if name:
return self._session.put(
self.__v2() + "/hoverfly/destination",
data={"destination": name}).json()
else:
return self._session.get(
self.__v2() + "/hoverfly/destination").json()
|
Gets / Sets the destination data.
|
entailment
|
def mode(self, mode=None):
"""
Gets / Sets the mode.
If no mode is provided, then this method acts as a getter.
Keyword arguments:
mode -- this should either be 'capture' or 'simulate' (default None)
"""
if mode:
logging.debug("SWITCHING TO %s" % mode)
url = self.__v2() + "/hoverfly/mode"
logging.debug(url)
return self._session.put(
url, data=json.dumps({"mode": mode})).json()["mode"]
else:
return self._session.get(
self.__v2() + "/hoverfly/mode").json()["mode"]
|
Gets / Sets the mode.
If no mode is provided, then this method acts as a getter.
Keyword arguments:
mode -- this should either be 'capture' or 'simulate' (default None)
|
entailment
|
def metadata(self, delete=False):
"""
Gets the metadata.
"""
if delete:
return self._session.delete(self.__v1() + "/metadata").json()
else:
return self._session.get(self.__v1() + "/metadata").json()
|
Gets the metadata.
|
entailment
|
def records(self, data=None):
"""
Gets / Sets records.
"""
if data:
return self._session.post(
self.__v1() + "/records", data=data).json()
else:
return self._session.get(self.__v1() + "/records").json()
|
Gets / Sets records.
|
entailment
|
def delays(self, delays=[]):
"""
Gets / Sets the delays.
"""
if delays:
return self._session.put(
self.__v1() + "/delays", data=json.dumps(delays)).json()
else:
return self._session.get(self.__v1() + "/delays").json()
|
Gets / Sets the delays.
|
entailment
|
def addDelay(self, urlPattern="", delay=0, httpMethod=None):
"""
Adds delays.
"""
print("addDelay is deprecated please use delays instead")
delay = {"urlPattern": urlPattern, "delay": delay}
if httpMethod:
delay["httpMethod"] = httpMethod
return self.delays(delays={"data": [delay]})
|
Adds delays.
|
entailment
|
def __enableProxy(self):
"""
Set the required environment variables to enable the use of hoverfly as a proxy.
"""
os.environ[
"HTTP_PROXY"] = self.httpProxy()
os.environ[
"HTTPS_PROXY"] = self.httpsProxy()
os.environ["REQUESTS_CA_BUNDLE"] = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
"cert.pem")
|
Set the required environment variables to enable the use of hoverfly as a proxy.
|
entailment
|
def __writepid(self, pid):
"""
HoverFly fails to launch if it's already running on
the same ports. So we have to keep track of them using
temp files with the proxy port and admin port, containing
the processe's PID.
"""
import tempfile
d = tempfile.gettempdir()
name = os.path.join(d, "hoverpy.%i.%i"%(self._proxyPort, self._adminPort))
with open(name, 'w') as f:
f.write(str(pid))
logging.debug("writing to %s"%name)
|
HoverFly fails to launch if it's already running on
the same ports. So we have to keep track of them using
temp files with the proxy port and admin port, containing
the processe's PID.
|
entailment
|
def __rmpid(self):
"""
Remove the PID file on shutdown, unfortunately
this may not get called if not given the time to
shut down.
"""
import tempfile
d = tempfile.gettempdir()
name = os.path.join(d, "hoverpy.%i.%i"%(self._proxyPort, self._adminPort))
if os.path.exists(name):
os.unlink(name)
logging.debug("deleting %s"%name)
|
Remove the PID file on shutdown, unfortunately
this may not get called if not given the time to
shut down.
|
entailment
|
def __kill_if_not_shut_properly(self):
"""
If the HoverFly process on these given ports
did not shut down correctly, then kill the pid
before launching a new instance.
todo: this will kill existing HoverFly processes
on those ports indiscriminately
"""
import tempfile
d = tempfile.gettempdir()
name = os.path.join(d, "hoverpy.%i.%i"%(self._proxyPort, self._adminPort))
if os.path.exists(name):
logging.debug("pid file exists.. killing it")
f = open(name, "r")
pid = int(f.read())
try:
import signal
os.kill(pid, signal.SIGTERM)
logging.debug("killing %i"%pid)
except:
logging.debug("nothing to clean up")
pass
finally:
os.unlink(name)
|
If the HoverFly process on these given ports
did not shut down correctly, then kill the pid
before launching a new instance.
todo: this will kill existing HoverFly processes
on those ports indiscriminately
|
entailment
|
def __start(self):
"""
Start the hoverfly process.
This function waits until it can make contact
with the hoverfly API before returning.
"""
logging.debug("starting %i" % id(self))
self.__kill_if_not_shut_properly()
self.FNULL = open(os.devnull, 'w')
flags = self.__flags()
cmd = [hoverfly] + flags
if self._showCmd:
print(cmd)
self._process = Popen(
[hoverfly] +
flags,
stdin=self.FNULL,
stdout=self.FNULL,
stderr=subprocess.STDOUT)
start = time.time()
while time.time() - start < 1:
try:
url = "http://%s:%i/api/health" % (self._host, self._adminPort)
r = self._session.get(url)
j = r.json()
up = "message" in j and "healthy" in j["message"]
if up:
logging.debug("has pid %i" % self._process.pid)
self.__writepid(self._process.pid)
return self._process
else:
time.sleep(1/100.0)
except:
# import traceback
# traceback.print_exc()
# wait 10 ms before trying again
time.sleep(1/100.0)
pass
logging.error("Could not start hoverfly!")
raise ValueError("Could not start hoverfly!")
|
Start the hoverfly process.
This function waits until it can make contact
with the hoverfly API before returning.
|
entailment
|
def __stop(self):
"""
Stop the hoverfly process.
"""
if logging:
logging.debug("stopping")
self._process.terminate()
# communicate means we wait until the process
# was actually terminated, this removes some
# warnings in python3
self._process.communicate()
self._process = None
self.FNULL.close()
self.FNULL = None
self.__disableProxy()
# del self._session
# self._session = None
self.__rmpid()
|
Stop the hoverfly process.
|
entailment
|
def __flags(self):
"""
Internal method. Turns arguments into flags.
"""
flags = []
if self._capture:
flags.append("-capture")
if self._spy:
flags.append("-spy")
if self._dbpath:
flags += ["-db-path", self._dbpath]
flags += ["-db", "boltdb"]
else:
flags += ["-db", "memory"]
if self._synthesize:
assert(self._middleware)
flags += ["-synthesize"]
if self._simulation:
flags += ["-import", self._simulation]
if self._proxyPort:
flags += ["-pp", str(self._proxyPort)]
if self._adminPort:
flags += ["-ap", str(self._adminPort)]
if self._modify:
flags += ["-modify"]
if self._verbose:
flags += ["-v"]
if self._dev:
flags += ["-dev"]
if self._metrics:
flags += ["-metrics"]
if self._auth:
flags += ["-auth"]
if self._middleware:
flags += ["-middleware", self._middleware]
if self._cert:
flags += ["-cert", self._cert]
if self._certName:
flags += ["-cert-name", self._certName]
if self._certOrg:
flags += ["-cert-org", self._certOrg]
if self._destination:
flags += ["-destination", self._destination]
if self._key:
flags += ["-key", self._key]
if self._dest:
for i in range(len(self._dest)):
flags += ["-dest", self._dest[i]]
if self._generateCACert:
flags += ["-generate-ca-cert"]
if not self._tlsVerification:
flags += ["-tls-verification", "false"]
logging.debug("flags:" + str(flags))
return flags
|
Internal method. Turns arguments into flags.
|
entailment
|
def load(self):
"""
Extract tabular data as |TableData| instances from a JSON file.
|load_source_desc_file|
This method can be loading four types of JSON formats:
**(1)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (1): single table
{
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (1)
[
{"attr_b": 4, "attr_c": "a", "attr_a": 1},
{"attr_b": 2.1, "attr_c": "bb", "attr_a": 2},
{"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3}
]
The example data will be loaded as the following tabular data:
.. table::
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
**(2)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (2): single table
{
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (2)
{
"attr_a": [1, 2, 3],
"attr_b": [4, 2.1, 120.9],
"attr_c": ["a", "bb", "ccc"]
}
The example data will be loaded as the following tabular data:
.. table::
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
**(3)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (3): single table
{
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (3)
{
"num_ratings": 27,
"support_threads": 1,
"downloaded": 925716,
"last_updated":"2017-12-01 6:22am GMT",
"added":"2010-01-20",
"num": 1.1,
"hoge": null
}
The example data will be loaded as the following tabular data:
.. table::
+---------------+---------------------+
| key | value |
+===============+=====================+
|num_ratings | 27|
+---------------+---------------------+
|support_threads| 1|
+---------------+---------------------+
|downloaded | 925716|
+---------------+---------------------+
|last_updated |2017-12-01 6:22am GMT|
+---------------+---------------------+
|added |2010-01-20 |
+---------------+---------------------+
|num | 1.1|
+---------------+---------------------+
|hoge |None |
+---------------+---------------------+
**(4)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (4): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (4)
{
"table_a" : [
{"attr_b": 4, "attr_c": "a", "attr_a": 1},
{"attr_b": 2.1, "attr_c": "bb", "attr_a": 2},
{"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3}
],
"table_b" : [
{"a": 1, "b": 4},
{"a": 2 },
{"a": 3, "b": 120.9}
]
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
.. table:: table_b
+-+-----+
|a| b |
+=+=====+
|1| 4.0|
+-+-----+
|2| None|
+-+-----+
|3|120.9|
+-+-----+
**(5)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (5): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (5)
{
"table_a" : {
"attr_a": [1, 2, 3],
"attr_b": [4, 2.1, 120.9],
"attr_c": ["a", "bb", "ccc"]
},
"table_b" : {
"a": [1, 3],
"b": [4, 120.9]
}
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
.. table:: table_b
+-+-----+
|a| b |
+=+=====+
|1| 4.0|
+-+-----+
|3|120.9|
+-+-----+
**(6)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (6): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (6)
{
"table_a": {
"num_ratings": 27,
"support_threads": 1,
"downloaded": 925716,
"last_updated":"2017-12-01 6:22am GMT",
"added":"2010-01-20",
"num": 1.1,
"hoge": null
},
"table_b": {
"a": 4,
"b": 120.9
}
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+---------------+---------------------+
| key | value |
+===============+=====================+
|num_ratings | 27|
+---------------+---------------------+
|support_threads| 1|
+---------------+---------------------+
|downloaded | 925716|
+---------------+---------------------+
|last_updated |2017-12-01 6:22am GMT|
+---------------+---------------------+
|added |2010-01-20 |
+---------------+---------------------+
|num | 1.1|
+---------------+---------------------+
|hoge |None |
+---------------+---------------------+
.. table:: table_b
+---+-----+
|key|value|
+===+=====+
|a | 4.0|
+---+-----+
|b |120.9|
+---+-----+
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
Format specifier Value after the replacement
=================== ==============================================
``%(filename)s`` |filename_desc|
``%(key)s`` | This replaced the different value
| for each single/multiple JSON tables:
| [single JSON table]
| ``%(format_name)s%(format_id)s``
| [multiple JSON table] Table data key.
``%(format_name)s`` ``"json"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the data is invalid JSON.
:raises pytablereader.error.ValidationError:
If the data is not acceptable JSON format.
"""
formatter = JsonTableFormatter(self.load_dict())
formatter.accept(self)
return formatter.to_table_data()
|
Extract tabular data as |TableData| instances from a JSON file.
|load_source_desc_file|
This method can be loading four types of JSON formats:
**(1)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (1): single table
{
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (1)
[
{"attr_b": 4, "attr_c": "a", "attr_a": 1},
{"attr_b": 2.1, "attr_c": "bb", "attr_a": 2},
{"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3}
]
The example data will be loaded as the following tabular data:
.. table::
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
**(2)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (2): single table
{
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (2)
{
"attr_a": [1, 2, 3],
"attr_b": [4, 2.1, 120.9],
"attr_c": ["a", "bb", "ccc"]
}
The example data will be loaded as the following tabular data:
.. table::
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
**(3)** Single table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (3): single table
{
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (3)
{
"num_ratings": 27,
"support_threads": 1,
"downloaded": 925716,
"last_updated":"2017-12-01 6:22am GMT",
"added":"2010-01-20",
"num": 1.1,
"hoge": null
}
The example data will be loaded as the following tabular data:
.. table::
+---------------+---------------------+
| key | value |
+===============+=====================+
|num_ratings | 27|
+---------------+---------------------+
|support_threads| 1|
+---------------+---------------------+
|downloaded | 925716|
+---------------+---------------------+
|last_updated |2017-12-01 6:22am GMT|
+---------------+---------------------+
|added |2010-01-20 |
+---------------+---------------------+
|num | 1.1|
+---------------+---------------------+
|hoge |None |
+---------------+---------------------+
**(4)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (4): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (4)
{
"table_a" : [
{"attr_b": 4, "attr_c": "a", "attr_a": 1},
{"attr_b": 2.1, "attr_c": "bb", "attr_a": 2},
{"attr_b": 120.9, "attr_c": "ccc", "attr_a": 3}
],
"table_b" : [
{"a": 1, "b": 4},
{"a": 2 },
{"a": 3, "b": 120.9}
]
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
.. table:: table_b
+-+-----+
|a| b |
+=+=====+
|1| 4.0|
+-+-----+
|2| None|
+-+-----+
|3|120.9|
+-+-----+
**(5)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (5): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "object",
"additionalProperties": {
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (5)
{
"table_a" : {
"attr_a": [1, 2, 3],
"attr_b": [4, 2.1, 120.9],
"attr_c": ["a", "bb", "ccc"]
},
"table_b" : {
"a": [1, 3],
"b": [4, 120.9]
}
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+------+------+------+
|attr_a|attr_b|attr_c|
+======+======+======+
| 1| 4.0|a |
+------+------+------+
| 2| 2.1|bb |
+------+------+------+
| 3| 120.9|ccc |
+------+------+------+
.. table:: table_b
+-+-----+
|a| b |
+=+=====+
|1| 4.0|
+-+-----+
|3|120.9|
+-+-----+
**(6)** Multiple table data in a file:
.. code-block:: json
:caption: Acceptable JSON Schema (6): multiple tables
{
"type": "object",
"additionalProperties": {
"type": "object",
"additionalProperties": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"},
{"type": "null"}
]
}
}
}
.. code-block:: json
:caption: Acceptable JSON example for the JSON schema (6)
{
"table_a": {
"num_ratings": 27,
"support_threads": 1,
"downloaded": 925716,
"last_updated":"2017-12-01 6:22am GMT",
"added":"2010-01-20",
"num": 1.1,
"hoge": null
},
"table_b": {
"a": 4,
"b": 120.9
}
}
The example data will be loaded as the following tabular data:
.. table:: table_a
+---------------+---------------------+
| key | value |
+===============+=====================+
|num_ratings | 27|
+---------------+---------------------+
|support_threads| 1|
+---------------+---------------------+
|downloaded | 925716|
+---------------+---------------------+
|last_updated |2017-12-01 6:22am GMT|
+---------------+---------------------+
|added |2010-01-20 |
+---------------+---------------------+
|num | 1.1|
+---------------+---------------------+
|hoge |None |
+---------------+---------------------+
.. table:: table_b
+---+-----+
|key|value|
+===+=====+
|a | 4.0|
+---+-----+
|b |120.9|
+---+-----+
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
Format specifier Value after the replacement
=================== ==============================================
``%(filename)s`` |filename_desc|
``%(key)s`` | This replaced the different value
| for each single/multiple JSON tables:
| [single JSON table]
| ``%(format_name)s%(format_id)s``
| [multiple JSON table] Table data key.
``%(format_name)s`` ``"json"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the data is invalid JSON.
:raises pytablereader.error.ValidationError:
If the data is not acceptable JSON format.
|
entailment
|
def load(self):
"""
Extract tabular data as |TableData| instances from a MediaWiki file.
|load_source_desc_file|
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
Format specifier Value after the replacement
=================== ==============================================
``%(filename)s`` |filename_desc|
``%(key)s`` | This replaced to:
| **(1)** ``caption`` mark of the table
| **(2)** ``%(format_name)s%(format_id)s``
| if ``caption`` mark not included
| in the table.
``%(format_name)s`` ``"mediawiki"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the MediaWiki data is invalid or empty.
"""
self._validate()
self._logger.logging_load()
self.encoding = get_file_encoding(self.source, self.encoding)
with io.open(self.source, "r", encoding=self.encoding) as fp:
formatter = MediaWikiTableFormatter(fp.read())
formatter.accept(self)
return formatter.to_table_data()
|
Extract tabular data as |TableData| instances from a MediaWiki file.
|load_source_desc_file|
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
Format specifier Value after the replacement
=================== ==============================================
``%(filename)s`` |filename_desc|
``%(key)s`` | This replaced to:
| **(1)** ``caption`` mark of the table
| **(2)** ``%(format_name)s%(format_id)s``
| if ``caption`` mark not included
| in the table.
``%(format_name)s`` ``"mediawiki"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the MediaWiki data is invalid or empty.
|
entailment
|
def load(self):
"""
Extract tabular data as |TableData| instances from a MediaWiki text
object.
|load_source_desc_text|
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
Format specifier Value after the replacement
=================== ==============================================
``%(filename)s`` ``""``
``%(key)s`` | This replaced to:
| **(1)** ``caption`` mark of the table
| **(2)** ``%(format_name)s%(format_id)s``
| if ``caption`` mark not included
| in the table.
``%(format_name)s`` ``"mediawiki"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the MediaWiki data is invalid or empty.
"""
self._validate()
self._logger.logging_load()
formatter = MediaWikiTableFormatter(self.source)
formatter.accept(self)
return formatter.to_table_data()
|
Extract tabular data as |TableData| instances from a MediaWiki text
object.
|load_source_desc_text|
:return:
Loaded table data iterator.
|load_table_name_desc|
=================== ==============================================
Format specifier Value after the replacement
=================== ==============================================
``%(filename)s`` ``""``
``%(key)s`` | This replaced to:
| **(1)** ``caption`` mark of the table
| **(2)** ``%(format_name)s%(format_id)s``
| if ``caption`` mark not included
| in the table.
``%(format_name)s`` ``"mediawiki"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ==============================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the MediaWiki data is invalid or empty.
|
entailment
|
def get_dock_json(self):
""" return dock json from existing build json """
env_json = self.build_json['spec']['strategy']['customStrategy']['env']
try:
p = [env for env in env_json if env["name"] == "ATOMIC_REACTOR_PLUGINS"]
except TypeError:
raise RuntimeError("\"env\" is not iterable")
if len(p) <= 0:
raise RuntimeError("\"env\" misses key ATOMIC_REACTOR_PLUGINS")
dock_json_str = p[0]['value']
dock_json = json.loads(dock_json_str)
return dock_json
|
return dock json from existing build json
|
entailment
|
def dock_json_get_plugin_conf(self, plugin_type, plugin_name):
"""
Return the configuration for a plugin.
Raises KeyError if there are no plugins of that type.
Raises IndexError if the named plugin is not listed.
"""
match = [x for x in self.dock_json[plugin_type] if x.get('name') == plugin_name]
return match[0]
|
Return the configuration for a plugin.
Raises KeyError if there are no plugins of that type.
Raises IndexError if the named plugin is not listed.
|
entailment
|
def remove_plugin(self, plugin_type, plugin_name):
"""
if config contains plugin, remove it
"""
for p in self.dock_json[plugin_type]:
if p.get('name') == plugin_name:
self.dock_json[plugin_type].remove(p)
break
|
if config contains plugin, remove it
|
entailment
|
def add_plugin(self, plugin_type, plugin_name, args_dict):
"""
if config has plugin, override it, else add it
"""
plugin_modified = False
for plugin in self.dock_json[plugin_type]:
if plugin['name'] == plugin_name:
plugin['args'] = args_dict
plugin_modified = True
if not plugin_modified:
self.dock_json[plugin_type].append({"name": plugin_name, "args": args_dict})
|
if config has plugin, override it, else add it
|
entailment
|
def dock_json_has_plugin_conf(self, plugin_type, plugin_name):
"""
Check whether a plugin is configured.
"""
try:
self.dock_json_get_plugin_conf(plugin_type, plugin_name)
return True
except (KeyError, IndexError):
return False
|
Check whether a plugin is configured.
|
entailment
|
def create_from_path(self):
"""
Create a file loader from the file extension to loading file.
Supported file extensions are as follows:
========================================= =====================================
Extension Loader
========================================= =====================================
``"csv"`` :py:class:`~.CsvTableTextLoader`
``"xls"``/``"xlsx"`` :py:class:`~.ExcelTableFileLoader`
``"htm"``/``"html"``/``"asp"``/``"aspx"`` :py:class:`~.HtmlTableTextLoader`
``"json"`` :py:class:`~.JsonTableTextLoader`
``"jsonl"``/``"ldjson"``/``"ndjson"`` :py:class:`~.JsonLinesTableTextLoader`
``"ltsv"`` :py:class:`~.LtsvTableTextLoader`
``"md"`` :py:class:`~.MarkdownTableTextLoader`
``"sqlite"``/``"sqlite3"`` :py:class:`~.SqliteFileLoader`
``"tsv"`` :py:class:`~.TsvTableTextLoader`
========================================= =====================================
:return:
Loader that coincides with the file extension of the URL.
:raises pytablereader.UrlError: If unacceptable URL format.
:raises pytablereader.LoaderNotFoundError:
|LoaderNotFoundError_desc| loading the URL.
"""
import requests
url_path = urlparse(self.__url).path
try:
url_extension = get_extension(url_path.rstrip("/"))
except InvalidFilePathError:
raise UrlError("url must include path")
logger.debug("TableUrlLoaderFactory: extension={}".format(url_extension))
loader_class = self._get_loader_class(self._get_extension_loader_mapping(), url_extension)
try:
self._fetch_source(loader_class)
except requests.exceptions.ProxyError as e:
raise ProxyError(e)
loader = self._create_from_extension(url_extension)
logger.debug("TableUrlLoaderFactory: loader={}".format(loader.format_name))
return loader
|
Create a file loader from the file extension to loading file.
Supported file extensions are as follows:
========================================= =====================================
Extension Loader
========================================= =====================================
``"csv"`` :py:class:`~.CsvTableTextLoader`
``"xls"``/``"xlsx"`` :py:class:`~.ExcelTableFileLoader`
``"htm"``/``"html"``/``"asp"``/``"aspx"`` :py:class:`~.HtmlTableTextLoader`
``"json"`` :py:class:`~.JsonTableTextLoader`
``"jsonl"``/``"ldjson"``/``"ndjson"`` :py:class:`~.JsonLinesTableTextLoader`
``"ltsv"`` :py:class:`~.LtsvTableTextLoader`
``"md"`` :py:class:`~.MarkdownTableTextLoader`
``"sqlite"``/``"sqlite3"`` :py:class:`~.SqliteFileLoader`
``"tsv"`` :py:class:`~.TsvTableTextLoader`
========================================= =====================================
:return:
Loader that coincides with the file extension of the URL.
:raises pytablereader.UrlError: If unacceptable URL format.
:raises pytablereader.LoaderNotFoundError:
|LoaderNotFoundError_desc| loading the URL.
|
entailment
|
def create_from_format_name(self, format_name):
"""
Create a file loader from a format name.
Supported file formats are as follows:
========================== ======================================
Format name Loader
========================== ======================================
``"csv"`` :py:class:`~.CsvTableTextLoader`
``"excel"`` :py:class:`~.ExcelTableFileLoader`
``"html"`` :py:class:`~.HtmlTableTextLoader`
``"json"`` :py:class:`~.JsonTableTextLoader`
``"json_lines"`` :py:class:`~.JsonLinesTableTextLoader`
``"jsonl"`` :py:class:`~.JsonLinesTableTextLoader`
``"ldjson"`` :py:class:`~.JsonLinesTableTextLoader`
``"ltsv"`` :py:class:`~.LtsvTableTextLoader`
``"markdown"`` :py:class:`~.MarkdownTableTextLoader`
``"mediawiki"`` :py:class:`~.MediaWikiTableTextLoader`
``"ndjson"`` :py:class:`~.JsonLinesTableTextLoader`
``"sqlite"`` :py:class:`~.SqliteFileLoader`
``"ssv"`` :py:class:`~.CsvTableFileLoader`
``"tsv"`` :py:class:`~.TsvTableTextLoader`
========================== ======================================
:param str format_name: Format name string (case insensitive).
:return: Loader that coincide with the ``format_name``:
:raises pytablereader.LoaderNotFoundError:
|LoaderNotFoundError_desc| the format.
:raises TypeError: If ``format_name`` is not a string.
"""
import requests
logger.debug("TableUrlLoaderFactory: name={}".format(format_name))
loader_class = self._get_loader_class(self._get_format_name_loader_mapping(), format_name)
try:
self._fetch_source(loader_class)
except requests.exceptions.ProxyError as e:
raise ProxyError(e)
loader = self._create_from_format_name(format_name)
logger.debug("TableUrlLoaderFactory: loader={}".format(loader.format_name))
return loader
|
Create a file loader from a format name.
Supported file formats are as follows:
========================== ======================================
Format name Loader
========================== ======================================
``"csv"`` :py:class:`~.CsvTableTextLoader`
``"excel"`` :py:class:`~.ExcelTableFileLoader`
``"html"`` :py:class:`~.HtmlTableTextLoader`
``"json"`` :py:class:`~.JsonTableTextLoader`
``"json_lines"`` :py:class:`~.JsonLinesTableTextLoader`
``"jsonl"`` :py:class:`~.JsonLinesTableTextLoader`
``"ldjson"`` :py:class:`~.JsonLinesTableTextLoader`
``"ltsv"`` :py:class:`~.LtsvTableTextLoader`
``"markdown"`` :py:class:`~.MarkdownTableTextLoader`
``"mediawiki"`` :py:class:`~.MediaWikiTableTextLoader`
``"ndjson"`` :py:class:`~.JsonLinesTableTextLoader`
``"sqlite"`` :py:class:`~.SqliteFileLoader`
``"ssv"`` :py:class:`~.CsvTableFileLoader`
``"tsv"`` :py:class:`~.TsvTableTextLoader`
========================== ======================================
:param str format_name: Format name string (case insensitive).
:return: Loader that coincide with the ``format_name``:
:raises pytablereader.LoaderNotFoundError:
|LoaderNotFoundError_desc| the format.
:raises TypeError: If ``format_name`` is not a string.
|
entailment
|
def _get_extension_loader_mapping(self):
"""
:return: Mappings of format-extension and loader class.
:rtype: dict
"""
loader_table = self._get_common_loader_mapping()
loader_table.update(
{
"asp": HtmlTableTextLoader,
"aspx": HtmlTableTextLoader,
"htm": HtmlTableTextLoader,
"md": MarkdownTableTextLoader,
"sqlite3": SqliteFileLoader,
"xls": ExcelTableFileLoader,
"xlsx": ExcelTableFileLoader,
}
)
return loader_table
|
:return: Mappings of format-extension and loader class.
:rtype: dict
|
entailment
|
def _get_format_name_loader_mapping(self):
"""
:return: Mappings of format-name and loader class.
:rtype: dict
"""
loader_table = self._get_common_loader_mapping()
loader_table.update(
{
"excel": ExcelTableFileLoader,
"json_lines": JsonLinesTableTextLoader,
"markdown": MarkdownTableTextLoader,
"mediawiki": MediaWikiTableTextLoader,
"ssv": CsvTableFileLoader,
}
)
return loader_table
|
:return: Mappings of format-name and loader class.
:rtype: dict
|
entailment
|
def get_container_image_ids(self):
"""
Find the image IDs the containers use.
:return: dict, image tag to docker ID
"""
statuses = graceful_chain_get(self.json, "status", "containerStatuses")
if statuses is None:
return {}
def remove_prefix(image_id, prefix):
if image_id.startswith(prefix):
return image_id[len(prefix):]
return image_id
return {status['image']: remove_prefix(status['imageID'], 'docker://')
for status in statuses}
|
Find the image IDs the containers use.
:return: dict, image tag to docker ID
|
entailment
|
def get_failure_reason(self):
"""
Find the reason a pod failed
:return: dict, which will always have key 'reason':
reason: brief reason for state
containerID (if known): ID of container
exitCode (if known): numeric exit code
"""
reason_key = 'reason'
cid_key = 'containerID'
exit_key = 'exitCode'
pod_status = self.json.get('status', {})
statuses = pod_status.get('containerStatuses', [])
# Find the first non-zero exit code from a container
# and return its 'message' or 'reason' value
for status in statuses:
try:
terminated = status['state']['terminated']
exit_code = terminated['exitCode']
if exit_code != 0:
reason_dict = {
exit_key: exit_code,
}
if 'containerID' in terminated:
reason_dict[cid_key] = terminated['containerID']
for key in ['message', 'reason']:
try:
reason_dict[reason_key] = terminated[key]
break
except KeyError:
continue
else:
# Both 'message' and 'reason' are missing
reason_dict[reason_key] = 'Exit code {code}'.format(
code=exit_code
)
return reason_dict
except KeyError:
continue
# Failing that, return the 'message' or 'reason' value for the
# pod
for key in ['message', 'reason']:
try:
return {reason_key: pod_status[key]}
except KeyError:
continue
return {reason_key: pod_status['phase']}
|
Find the reason a pod failed
:return: dict, which will always have key 'reason':
reason: brief reason for state
containerID (if known): ID of container
exitCode (if known): numeric exit code
|
entailment
|
def get_error_message(self):
"""
Return an error message based on atomic-reactor's metadata
"""
error_reason = self.get_error_reason()
if error_reason:
error_message = error_reason.get('pod') or None
if error_message:
return "Error in pod: %s" % error_message
plugin = error_reason.get('plugin')[0] or None
error_message = error_reason.get('plugin')[1] or None
if error_message:
# Plugin has non-empty error description
return "Error in plugin %s: %s" % (plugin, error_message)
else:
return "Error in plugin %s" % plugin
|
Return an error message based on atomic-reactor's metadata
|
entailment
|
def create_from_path(self):
"""
Create a file loader from the file extension to loading file.
Supported file extensions are as follows:
========================== =======================================
Extension Loader
========================== =======================================
``"csv"`` :py:class:`~.CsvTableFileLoader`
``"xls"``/``"xlsx"`` :py:class:`~.ExcelTableFileLoader`
``"htm"``/``"html"`` :py:class:`~.HtmlTableFileLoader`
``"json"`` :py:class:`~.JsonTableFileLoader`
``"jsonl"`` :py:class:`~.JsonLinesTableFileLoader`
``"ldjson"`` :py:class:`~.JsonLinesTableFileLoader`
``"ltsv"`` :py:class:`~.LtsvTableFileLoader`
``"md"`` :py:class:`~.MarkdownTableFileLoader`
``"ndjson"`` :py:class:`~.JsonLinesTableFileLoader`
``"sqlite"``/``"sqlite3"`` :py:class:`~.SqliteFileLoader`
``"tsv"`` :py:class:`~.TsvTableFileLoader`
========================== =======================================
:return:
Loader that coincides with the file extension of the
:py:attr:`.file_extension`.
:raises pytablereader.LoaderNotFoundError:
|LoaderNotFoundError_desc| loading the file.
"""
loader = self._create_from_extension(self.file_extension)
logger.debug(
"TableFileLoaderFactory.create_from_path: extension={}, loader={}".format(
self.file_extension, loader.format_name
)
)
return loader
|
Create a file loader from the file extension to loading file.
Supported file extensions are as follows:
========================== =======================================
Extension Loader
========================== =======================================
``"csv"`` :py:class:`~.CsvTableFileLoader`
``"xls"``/``"xlsx"`` :py:class:`~.ExcelTableFileLoader`
``"htm"``/``"html"`` :py:class:`~.HtmlTableFileLoader`
``"json"`` :py:class:`~.JsonTableFileLoader`
``"jsonl"`` :py:class:`~.JsonLinesTableFileLoader`
``"ldjson"`` :py:class:`~.JsonLinesTableFileLoader`
``"ltsv"`` :py:class:`~.LtsvTableFileLoader`
``"md"`` :py:class:`~.MarkdownTableFileLoader`
``"ndjson"`` :py:class:`~.JsonLinesTableFileLoader`
``"sqlite"``/``"sqlite3"`` :py:class:`~.SqliteFileLoader`
``"tsv"`` :py:class:`~.TsvTableFileLoader`
========================== =======================================
:return:
Loader that coincides with the file extension of the
:py:attr:`.file_extension`.
:raises pytablereader.LoaderNotFoundError:
|LoaderNotFoundError_desc| loading the file.
|
entailment
|
def create_from_format_name(self, format_name):
"""
Create a file loader from a format name.
Supported file formats are as follows:
================ ======================================
Format name Loader
================ ======================================
``"csv"`` :py:class:`~.CsvTableFileLoader`
``"excel"`` :py:class:`~.ExcelTableFileLoader`
``"html"`` :py:class:`~.HtmlTableFileLoader`
``"json"`` :py:class:`~.JsonTableFileLoader`
``"json"`` :py:class:`~.JsonTableFileLoader`
``"json_lines"`` :py:class:`~.JsonTableFileLoader`
``"jsonl"`` :py:class:`~.JsonLinesTableFileLoader`
``"ltsv"`` :py:class:`~.LtsvTableFileLoader`
``"markdown"`` :py:class:`~.MarkdownTableFileLoader`
``"mediawiki"`` :py:class:`~.MediaWikiTableFileLoader`
``"ndjson"`` :py:class:`~.JsonLinesTableFileLoader`
``"sqlite"`` :py:class:`~.SqliteFileLoader`
``"ssv"`` :py:class:`~.CsvTableFileLoader`
``"tsv"`` :py:class:`~.TsvTableFileLoader`
================ ======================================
:param str format_name: Format name string (case insensitive).
:return: Loader that coincides with the ``format_name``:
:raises pytablereader.LoaderNotFoundError:
|LoaderNotFoundError_desc| the format.
"""
loader = self._create_from_format_name(format_name)
logger.debug(
"TableFileLoaderFactory.create_from_format_name: name={}, loader={}".format(
format_name, loader.format_name
)
)
return loader
|
Create a file loader from a format name.
Supported file formats are as follows:
================ ======================================
Format name Loader
================ ======================================
``"csv"`` :py:class:`~.CsvTableFileLoader`
``"excel"`` :py:class:`~.ExcelTableFileLoader`
``"html"`` :py:class:`~.HtmlTableFileLoader`
``"json"`` :py:class:`~.JsonTableFileLoader`
``"json"`` :py:class:`~.JsonTableFileLoader`
``"json_lines"`` :py:class:`~.JsonTableFileLoader`
``"jsonl"`` :py:class:`~.JsonLinesTableFileLoader`
``"ltsv"`` :py:class:`~.LtsvTableFileLoader`
``"markdown"`` :py:class:`~.MarkdownTableFileLoader`
``"mediawiki"`` :py:class:`~.MediaWikiTableFileLoader`
``"ndjson"`` :py:class:`~.JsonLinesTableFileLoader`
``"sqlite"`` :py:class:`~.SqliteFileLoader`
``"ssv"`` :py:class:`~.CsvTableFileLoader`
``"tsv"`` :py:class:`~.TsvTableFileLoader`
================ ======================================
:param str format_name: Format name string (case insensitive).
:return: Loader that coincides with the ``format_name``:
:raises pytablereader.LoaderNotFoundError:
|LoaderNotFoundError_desc| the format.
|
entailment
|
def _get_extension_loader_mapping(self):
"""
:return: Mappings of format extension and loader class.
:rtype: dict
"""
loader_table = self._get_common_loader_mapping()
loader_table.update(
{
"htm": HtmlTableFileLoader,
"md": MarkdownTableFileLoader,
"sqlite3": SqliteFileLoader,
"xlsx": ExcelTableFileLoader,
"xls": ExcelTableFileLoader,
}
)
return loader_table
|
:return: Mappings of format extension and loader class.
:rtype: dict
|
entailment
|
def _get_format_name_loader_mapping(self):
"""
:return: Mappings of format name and loader class.
:rtype: dict
"""
loader_table = self._get_common_loader_mapping()
loader_table.update(
{
"excel": ExcelTableFileLoader,
"json_lines": JsonLinesTableFileLoader,
"markdown": MarkdownTableFileLoader,
"mediawiki": MediaWikiTableFileLoader,
"ssv": CsvTableFileLoader,
}
)
return loader_table
|
:return: Mappings of format name and loader class.
:rtype: dict
|
entailment
|
def load(self):
"""
Extract tabular data as |TableData| instances from an Excel file.
|spreadsheet_load_desc|
:return:
Loaded |TableData| iterator.
|TableData| created for each sheet in the workbook.
|load_table_name_desc|
=================== ====================================
Format specifier Value after the replacement
=================== ====================================
``%(filename)s`` Filename of the workbook
``%(sheet)s`` Name of the sheet
``%(format_name)s`` ``"spreadsheet"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ====================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the header row is not found.
:raises pytablereader.error.OpenError:
If failed to open the source file.
"""
import xlrd
self._validate()
self._logger.logging_load()
try:
workbook = xlrd.open_workbook(self.source)
except xlrd.biffh.XLRDError as e:
raise OpenError(e)
for worksheet in workbook.sheets():
self._worksheet = worksheet
if self._is_empty_sheet():
continue
self.__extract_not_empty_col_idx()
try:
start_row_idx = self._get_start_row_idx()
except DataError:
continue
rows = [
self.__get_row_values(row_idx)
for row_idx in range(start_row_idx + 1, self._row_count)
]
self.inc_table_count()
headers = self.__get_row_values(start_row_idx)
yield TableData(
self._make_table_name(),
headers,
rows,
dp_extractor=self.dp_extractor,
type_hints=self._extract_type_hints(headers),
)
|
Extract tabular data as |TableData| instances from an Excel file.
|spreadsheet_load_desc|
:return:
Loaded |TableData| iterator.
|TableData| created for each sheet in the workbook.
|load_table_name_desc|
=================== ====================================
Format specifier Value after the replacement
=================== ====================================
``%(filename)s`` Filename of the workbook
``%(sheet)s`` Name of the sheet
``%(format_name)s`` ``"spreadsheet"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ====================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the header row is not found.
:raises pytablereader.error.OpenError:
If failed to open the source file.
|
entailment
|
def load(self):
"""
Extract tabular data as |TableData| instances from a LTSV file.
|load_source_desc_file|
:return:
Loaded table data.
|load_table_name_desc|
=================== ========================================
Format specifier Value after the replacement
=================== ========================================
``%(filename)s`` |filename_desc|
``%(format_name)s`` ``"ltsv"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ========================================
:rtype: |TableData| iterator
:raises pytablereader.InvalidHeaderNameError:
If an invalid label name is included in the LTSV file.
:raises pytablereader.DataError:
If the LTSV data is invalid.
"""
self._validate()
self._logger.logging_load()
self.encoding = get_file_encoding(self.source, self.encoding)
self._ltsv_input_stream = io.open(self.source, "r", encoding=self.encoding)
for data_matrix in self._to_data_matrix():
formatter = SingleJsonTableConverterA(data_matrix)
formatter.accept(self)
return formatter.to_table_data()
|
Extract tabular data as |TableData| instances from a LTSV file.
|load_source_desc_file|
:return:
Loaded table data.
|load_table_name_desc|
=================== ========================================
Format specifier Value after the replacement
=================== ========================================
``%(filename)s`` |filename_desc|
``%(format_name)s`` ``"ltsv"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ========================================
:rtype: |TableData| iterator
:raises pytablereader.InvalidHeaderNameError:
If an invalid label name is included in the LTSV file.
:raises pytablereader.DataError:
If the LTSV data is invalid.
|
entailment
|
def load(self):
"""
Extract tabular data as |TableData| instances from a LTSV text object.
|load_source_desc_text|
:return:
Loaded table data.
|load_table_name_desc|
=================== ========================================
Format specifier Value after the replacement
=================== ========================================
``%(filename)s`` ``""``
``%(format_name)s`` ``"ltsv"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ========================================
:rtype: |TableData| iterator
:raises pytablereader.InvalidHeaderNameError:
If an invalid label name is included in the LTSV file.
:raises pytablereader.DataError:
If the LTSV data is invalid.
"""
self._validate()
self._logger.logging_load()
self._ltsv_input_stream = self.source.splitlines()
for data_matrix in self._to_data_matrix():
formatter = SingleJsonTableConverterA(data_matrix)
formatter.accept(self)
return formatter.to_table_data()
|
Extract tabular data as |TableData| instances from a LTSV text object.
|load_source_desc_text|
:return:
Loaded table data.
|load_table_name_desc|
=================== ========================================
Format specifier Value after the replacement
=================== ========================================
``%(filename)s`` ``""``
``%(format_name)s`` ``"ltsv"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ========================================
:rtype: |TableData| iterator
:raises pytablereader.InvalidHeaderNameError:
If an invalid label name is included in the LTSV file.
:raises pytablereader.DataError:
If the LTSV data is invalid.
|
entailment
|
def totals(self):
"""
Computes and returns dictionary containing home/away by player, shots and face-off totals
:returns: dict of the form ``{ 'home/away': { 'all_keys': w_numeric_data } }``
"""
def agg(d):
keys = ['g','a','p','pm','pn','pim','s','ab','ms','ht','gv','tk','bs']
res = { k: 0 for k in keys }
res['fo'] = { 'won': 0, 'total': 0 }
for _, v in d.items():
for k in keys:
res[k] += v[k]
for fi in res['fo'].keys():
res['fo'][fi] += v['fo'][fi]
return res
return self.__apply_to_both(agg)
|
Computes and returns dictionary containing home/away by player, shots and face-off totals
:returns: dict of the form ``{ 'home/away': { 'all_keys': w_numeric_data } }``
|
entailment
|
def filter_players(self, pl_filter):
"""
Return the subset home and away players that satisfy the provided filter function.
:param pl_filter: function that takes a by player dictionary and returns bool
:returns: dict of the form ``{ 'home/away': { by_player_dict } }``. See :py:func:`home_players` and :py:func:`away_players`
"""
def each(d):
return {
k: v
for k, v in d.items()
if pl_filter(k, v)
}
return self.__apply_to_both(each)
|
Return the subset home and away players that satisfy the provided filter function.
:param pl_filter: function that takes a by player dictionary and returns bool
:returns: dict of the form ``{ 'home/away': { by_player_dict } }``. See :py:func:`home_players` and :py:func:`away_players`
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.