sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def _set_results_dir(self):
"""Create results directory if not exists."""
if self.running_instance_id:
self.results_dir = os.path.join(
self.results_dir,
self.cloud,
self.image_id,
self.running_instance_id
)
else:
self.results_dir = os.path.join(
self.results_dir,
self.cloud,
self.instance_ip
)
try:
os.makedirs(self.results_dir)
except OSError as error:
if not os.path.isdir(self.results_dir):
raise IpaCloudException(
'Unable to create ipa results directory: %s' % error
)
self.time_stamp = datetime.now().strftime('%Y%m%d%H%M%S')
self.log_file = ''.join(
[self.results_dir, os.sep, self.time_stamp, '.log']
)
self.logger.debug('Created log file %s' % self.log_file)
self.results_file = ''.join(
[self.results_dir, os.sep, self.time_stamp, '.results']
)
self.logger.debug('Created results file %s' % self.results_file)
# Add log file handler
file_handler = logging.FileHandler(self.log_file)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('\n%(message)s\n'))
self.logger.addHandler(file_handler) | Create results directory if not exists. | entailment |
def _collect_vm_info(self):
"""
Gather basic info about VM
"""
self.logger.info('Collecting basic info about VM')
client = self._get_ssh_client()
out = self.distro.get_vm_info(client)
self._write_to_log(out) | Gather basic info about VM | entailment |
def _update_history(self):
"""Save the current test information to history json."""
ipa_utils.update_history_log(
self.history_log,
description=self.description,
test_log=self.log_file
) | Save the current test information to history json. | entailment |
def _wait_on_instance(self, state, timeout=600, wait_period=10):
"""Wait until instance is in given state."""
current_state = 'Undefined'
start = time.time()
end = start + timeout
while time.time() < end:
current_state = self._get_instance_state()
if state.lower() == current_state.lower():
return
time.sleep(wait_period)
raise IpaCloudException(
'Instance has not arrived at the given state: {state}'.format(
state=state
)
) | Wait until instance is in given state. | entailment |
def execute_ssh_command(self, client, command):
"""Execute the provided command and log output."""
try:
out = ipa_utils.execute_ssh_command(client, command)
except Exception as error:
raise IpaCloudException(
'Command: "{0}", failed execution: {1}.'.format(
command, error
)
)
else:
self._write_to_log(out) | Execute the provided command and log output. | entailment |
def extract_archive(self, client, archive_path, extract_path=None):
"""Extract the archive files using the client in the current path."""
try:
out = ipa_utils.extract_archive(client, archive_path, extract_path)
except Exception as error:
raise IpaCloudException(
'Failed to extract archive, "{0}": {1}.'.format(
archive_path, error
)
)
else:
self._write_to_log(out) | Extract the archive files using the client in the current path. | entailment |
def hard_reboot_instance(self):
"""Stop then start the instance."""
self._stop_instance()
self._start_instance()
self._set_instance_ip()
self.logger.debug('IP of instance: %s' % self.instance_ip)
ipa_utils.clear_cache() | Stop then start the instance. | entailment |
def install_package(self, client, package):
"""
Install package using distro specific install method.
"""
try:
out = self.distro.install_package(client, package)
except Exception as error:
raise IpaCloudException(
'Failed installing package, "{0}"; {1}.'.format(
package, error
)
)
else:
self._write_to_log(out) | Install package using distro specific install method. | entailment |
def process_injection_file(self, client):
"""
Load yaml file and process injection configuration.
There are 5 injection options:
:inject_packages: an rpm path or list of rpm paths which will be
copied and installed on the test instance.
:inject_archives: an archive or list of archives which will
be copied and extracted on the test instance.
:inject_files: a file path or list of file paths which
will be copied to the test instance.
:execute: a command or list of commands to run on the test instance.
:install: a package name or list of package names to
install from an existing repo on the test instance.
The order of processing is as follows: inject_packages,
inject_archives, inject_files, execute, install.
"""
configuration = ipa_utils.get_yaml_config(self.inject)
if configuration.get('inject_packages'):
inject_packages = configuration['inject_packages']
if not isinstance(inject_packages, list):
inject_packages = [inject_packages]
for package in inject_packages:
package_path = self.put_file(client, package)
self.install_package(client, package_path)
if configuration.get('inject_archives'):
inject_archives = configuration['inject_archives']
if not isinstance(inject_archives, list):
inject_archives = [inject_archives]
for archive in inject_archives:
archive_path = self.put_file(client, archive)
self.extract_archive(client, archive_path)
if configuration.get('inject_files'):
inject_files = configuration['inject_files']
if not isinstance(inject_files, list):
inject_files = [inject_files]
for file_path in inject_files:
self.put_file(client, file_path)
if configuration.get('execute'):
execute = configuration['execute']
if not isinstance(execute, list):
execute = [execute]
for command in execute:
self.execute_ssh_command(client, command)
if configuration.get('install'):
install = configuration['install']
if not isinstance(install, list):
install = [install]
for package in install:
self.install_package(client, package) | Load yaml file and process injection configuration.
There are 5 injection options:
:inject_packages: an rpm path or list of rpm paths which will be
copied and installed on the test instance.
:inject_archives: an archive or list of archives which will
be copied and extracted on the test instance.
:inject_files: a file path or list of file paths which
will be copied to the test instance.
:execute: a command or list of commands to run on the test instance.
:install: a package name or list of package names to
install from an existing repo on the test instance.
The order of processing is as follows: inject_packages,
inject_archives, inject_files, execute, install. | entailment |
def put_file(self, client, source_file):
"""
Put file on instance in default SSH directory.
"""
try:
file_name = os.path.basename(source_file)
ipa_utils.put_file(client, source_file, file_name)
except Exception as error:
raise IpaCloudException(
'Failed copying file, "{0}"; {1}.'.format(
source_file, error
)
)
else:
return file_name | Put file on instance in default SSH directory. | entailment |
def build_transgenic_lines(self):
"""
init class | "transgenic_line_source_name":"stock_number" a Class
add superClass | rdfs:subClassOf ilxtr:transgenicLine
add *order* | ilxtr:useObjectProperty ilxtr:<order>
add name | rdfs:label "name"
add def | definition: "description"
add transtype | rdfs:hasTransgenicType "transgenic_line_type_name"
"""
triples = []
for cell_line in self.neuron_data:
for tl in cell_line['donor']['transgenic_lines']:
_id = tl['stock_number'] if tl['stock_number'] else tl['id']
prefix = tl['transgenic_line_source_name']
line_type = tl['transgenic_line_type_name']
if prefix not in ['JAX', 'MMRRC', 'AIBS']:
print(tc.red('WARNING:'), 'unknown prefix', prefix, json.dumps(tl, indent=4))
continue
elif prefix == 'AIBS':
prefix = 'AllenTL'
_class = self.ns[prefix][str(_id)]
triples.append((_class, rdf.type, owl.Class))
triples.append((_class, rdfs.label, rdflib.Literal(tl['name'])))
triples.append((_class, definition, rdflib.Literal(tl['description'])))
triples.append((_class, rdfs.subClassOf, ilxtr.transgenicLine))
triples.append((_class, ilxtr.hasTransgenicType, ilxtr[line_type + 'Line']))
# TODO aspects.ttl?
transgenic_lines = simpleOnt(filename='allen-transgenic-lines',
path='ttl/generated/',
prefixes=self.prefixes,
triples=triples,
comment='Allen transgenic lines for cell types',
branch=self.branch)
transgenic_lines._graph.write() | init class | "transgenic_line_source_name":"stock_number" a Class
add superClass | rdfs:subClassOf ilxtr:transgenicLine
add *order* | ilxtr:useObjectProperty ilxtr:<order>
add name | rdfs:label "name"
add def | definition: "description"
add transtype | rdfs:hasTransgenicType "transgenic_line_type_name" | entailment |
def decodeIlxResp(resp):
""" We need this until we can get json back directly and this is SUPER nasty"""
lines = [_ for _ in resp.text.split('\n') if _] # strip empties
if 'successfull' in lines[0]:
return [(_.split('"')[1],
ilxIdFix(_.split(': ')[-1]))
for _ in lines[1:]]
elif 'errors' in lines[0]:
return [(_.split('"')[1],
ilxIdFix(_.split('(')[1].split(')')[0]))
for _ in lines[1:]] | We need this until we can get json back directly and this is SUPER nasty | entailment |
def getSubOrder(existing):
""" Alpha sort by the full chain of parents. """
alpha = list(zip(*sorted(((k, v['rec']['label']) for k, v in existing.items()), key=lambda a: a[1])))[0]
depths = {}
def getDepth(id_):
if id_ in depths:
return depths[id_]
else:
if id_ in existing:
names_above = getDepth(existing[id_]['sc'])
depths[id_] = names_above + [existing[id_]['rec']['label']]
return depths[id_]
else:
return ['']
for id_ in existing:
getDepth(id_)
print(sorted(depths.values()))
def key_(id_):
return depths[id_]
return sorted(depths, key=key_) | Alpha sort by the full chain of parents. | entailment |
def clean(string):
''' Begining of the string can sometimes have odd noise '''
# manual fixes in the source
# 24/1.png
# 9/1.png
# 3/1.png
#if ')' in string: # fix in the source data
#string = string.split(')')[0] + ')' # remove trailing garbage
return (string
.replace('_', '')
.replace('-', '')
.replace('—', '')
.replace('.', '')
.replace('=', '')
.replace('\u2018',"'") # LEFT SINGLE QUOTATION MARK
.replace('\u2019', "'") # RIGHT SINGLE QUOTATION MARK
.strip()) | Begining of the string can sometimes have odd noise | entailment |
def loadData(cls):
""" Sigh, this was indeed a poorly conceived approach
since it hard blocks when the files are not in the source
so you can't easily bootstrap from another source and the
cognitive overhead is way, way too high :/
Adding dry_run/bootstrap to __new__ sort of helps? """
""" Have to run this out here because resSource is handicapped """
data = []
if cls.source_images.exists():
for folder in cls.source_images.glob('*'):
plate_num = int(folder.stem)
text_file = cls.source / f'{plate_num}.txt'
if not text_file.exists() or cls.run_ocr:
legends = []
raw_text = ''
for img in folder.glob('*.png'):
print('num', plate_num, img.stem)
p = subprocess.Popen(('tesseract',
img.as_posix(),
'stdout', '-l', 'eng', '--oem', '2', '--psm', '6'),
stdout=subprocess.PIPE)
bytes_text, err = p.communicate()
raw_text += bytes_text.decode() + '\n'
with open(text_file, 'wt') as f:
f.write(raw_text)
else:
with open(text_file, 'rt') as f:
raw_text = f.read()
legends = get_legends(raw_text)
data.append((plate_num, legends))
elif cls.source.exists():
for text_file in cls.source.glob('*.txt'):
plate_num = int(text_file.stem)
with open(text_file, 'rt') as f:
raw_text = f.read()
legends = get_legends(raw_text)
data.append((plate_num, legends))
return data | Sigh, this was indeed a poorly conceived approach
since it hard blocks when the files are not in the source
so you can't easily bootstrap from another source and the
cognitive overhead is way, way too high :/
Adding dry_run/bootstrap to __new__ sort of helps? | entailment |
def ilx_conv(graph, prefix, ilx_start):
""" convert a set of temporary identifiers to ilx and modify the graph in place """
to_sub = set()
for subject in graph.subjects(rdflib.RDF.type, rdflib.OWL.Class):
if PREFIXES[prefix] in subject:
to_sub.add(subject)
ilx_base = 'ilx_{:0>7}'
ILX_base = 'ILX:{:0>7}' # ah rdflib/owlapi, you infuriate me
ilx_labels = {}
replace = {}
for sub in sorted(to_sub):
ilx_format = ilx_base.format(ilx_start)
ILX_format = ILX_base.format(ilx_start)
ilx_start += 1
prefix, url, suffix = graph.namespace_manager.compute_qname(sub)
curie = prefix + ':' + suffix
replace[curie] = ILX_format
label = [_ for _ in graph.objects(sub, rdflib.RDFS.label)][0]
ilx_labels[ilx_format] = label
new_sub = expand('ilx:' + ilx_format)
for p, o in graph.predicate_objects(sub):
graph.remove((sub, p, o))
graph.add((new_sub, p, o))
for s, p in graph.subject_predicates(sub):
graph.remove((s, p, sub))
graph.add((s, p, new_sub))
return ilx_labels, replace | convert a set of temporary identifiers to ilx and modify the graph in place | entailment |
def alreadyHasEntry(oldClassString, og):
""" Return true if there is already an owl:Class with the old id"""
namespace = oldClassString.split(':')[0]
if namespace == 'http':
target = rdflib.URIRef(oldClassString)
print('OLD CLASS ID IS A URL', oldClassString)
else:
try:
og.add_known_namespaces(namespace)
target = og.expand(oldClassString)
except KeyError:
print('MISSING NAMESPACE', namespace, oldClassString)
return True # we only want known namespaces
return (target, rdf.type, owl.Class) in og.g | Return true if there is already an owl:Class with the old id | entailment |
def config(remote_base= 'https://raw.githubusercontent.com/SciCrunch/NIF-Ontology/',
local_base= None, # devconfig.ontology_local_repo by default
branch= devconfig.neurons_branch,
core_graph_paths= ['ttl/phenotype-core.ttl',
'ttl/phenotypes.ttl'],
core_graph= None,
in_graph_paths= tuple(),
out_graph_path= '/tmp/_Neurons.ttl',
out_imports= ['ttl/phenotype-core.ttl'],
out_graph= None,
prefixes= tuple(),
force_remote= False,
checkout_ok= ont_checkout_ok,
scigraph= None, # defaults to devconfig.scigraph_api
iri= None,
sources= tuple(),
source_file= None,
use_local_import_paths=True,
ignore_existing= True):
""" Wraps graphBase.configGraphIO to provide a set of sane defaults
for input ontologies and output files. """
graphBase.configGraphIO(remote_base=remote_base,
local_base=local_base,
branch=branch,
core_graph_paths=core_graph_paths,
core_graph=core_graph,
in_graph_paths=in_graph_paths,
out_graph_path=out_graph_path,
out_imports=out_imports,
out_graph=out_graph,
prefixes=prefixes,
force_remote=force_remote,
checkout_ok=checkout_ok,
scigraph=scigraph,
iri=iri,
sources=sources,
source_file=source_file,
use_local_import_paths=use_local_import_paths,
ignore_existing=ignore_existing)
pred = graphBase._predicates
return pred | Wraps graphBase.configGraphIO to provide a set of sane defaults
for input ontologies and output files. | entailment |
def add_version_iri(graph, epoch):
""" Also remove the previous versionIRI if there was one."""
for ont in graph.subjects(rdf.type, owl.Ontology):
for versionIRI in graph.objects(ont, owl.versionIRI):
graph.remove((ont, owl.versionIRI, versionIRI))
t = ont, owl.versionIRI, make_version_iri_from_iri(ont, epoch)
graph.add(t) | Also remove the previous versionIRI if there was one. | entailment |
def auth(self):
""" Return credentials for current Bitbucket user. """
if self.oauth:
return self.oauth
return (self.username, self.password) | Return credentials for current Bitbucket user. | entailment |
def authorize(self, consumer_key, consumer_secret, callback_url=None,
access_token=None, access_token_secret=None):
"""
Call this with your consumer key, secret and callback URL, to
generate a token for verification.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if not access_token and not access_token_secret:
if not callback_url:
return (False, "Callback URL required")
oauth = OAuth1(
consumer_key,
client_secret=consumer_secret,
callback_uri=callback_url)
r = requests.post(self.url('REQUEST_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
self.access_token = creds.get('oauth_token')[0]
self.access_token_secret = creds.get('oauth_token_secret')[0]
else:
return (False, r.content)
else:
self.finalize_oauth(access_token, access_token_secret)
return (True, None) | Call this with your consumer key, secret and callback URL, to
generate a token for verification. | entailment |
def verify(self, verifier, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""
After converting the token into verifier, call this to finalize the
authorization.
"""
# Stored values can be supplied to verify
self.consumer_key = consumer_key or self.consumer_key
self.consumer_secret = consumer_secret or self.consumer_secret
self.access_token = access_token or self.access_token
self.access_token_secret = access_token_secret or self.access_token_secret
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
verifier=verifier)
r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
else:
return (False, r.content)
self.finalize_oauth(creds.get('oauth_token')[0],
creds.get('oauth_token_secret')[0])
return (True, None) | After converting the token into verifier, call this to finalize the
authorization. | entailment |
def finalize_oauth(self, access_token, access_token_secret):
""" Called internally once auth process is complete. """
self.access_token = access_token
self.access_token_secret = access_token_secret
# Final OAuth object
self.oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret) | Called internally once auth process is complete. | entailment |
def dispatch(self, method, url, auth=None, params=None, **kwargs):
""" Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success.
"""
r = Request(
method=method,
url=url,
auth=auth,
params=params,
data=kwargs)
s = Session()
resp = s.send(r.prepare())
status = resp.status_code
text = resp.text
error = resp.reason
if status >= 200 and status < 300:
if text:
try:
return (True, json.loads(text))
except TypeError:
pass
except ValueError:
pass
return (True, text)
elif status >= 300 and status < 400:
return (
False,
'Unauthorized access, '
'please check your credentials.')
elif status >= 400 and status < 500:
return (False, 'Service not found.')
elif status >= 500 and status < 600:
return (False, 'Server error.')
else:
return (False, error) | Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success. | entailment |
def url(self, action, **kwargs):
""" Construct and return the URL for a specific API service. """
# TODO : should be static method ?
return self.URLS['BASE'] % self.URLS[action] % kwargs | Construct and return the URL for a specific API service. | entailment |
def get_user(self, username=None):
""" Returns user informations.
If username is not defined, tries to return own informations.
"""
username = username or self.username or ''
url = self.url('GET_USER', username=username)
response = self.dispatch('GET', url)
try:
return (response[0], response[1]['user'])
except TypeError:
pass
return response | Returns user informations.
If username is not defined, tries to return own informations. | entailment |
def get_tags(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its tags."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth) | Get a single repository on Bitbucket and return its tags. | entailment |
def get_branches(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its branches."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_BRANCHES',
username=self.username,
repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth) | Get a single repository on Bitbucket and return its branches. | entailment |
def get_privileges(self):
""" Get privledges for this user. """
url = self.url('GET_USER_PRIVILEGES')
return self.dispatch('GET', url, auth=self.auth) | Get privledges for this user. | entailment |
def create(self, repo_slug=None, key=None, label=None):
""" Associate an ssh key with your repo and return it.
"""
key = '%s' % key
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('SET_DEPLOY_KEY',
username=self.bitbucket.username,
repo_slug=repo_slug)
return self.bitbucket.dispatch('POST',
url,
auth=self.bitbucket.auth,
key=key,
label=label) | Associate an ssh key with your repo and return it. | entailment |
def public(self, username=None):
""" Returns all public repositories from an user.
If username is not defined, tries to return own public repos.
"""
username = username or self.bitbucket.username or ''
url = self.bitbucket.url('GET_USER', username=username)
response = self.bitbucket.dispatch('GET', url)
try:
return (response[0], response[1]['repositories'])
except TypeError:
pass
return response | Returns all public repositories from an user.
If username is not defined, tries to return own public repos. | entailment |
def all(self):
""" Return own repositories."""
url = self.bitbucket.url('GET_USER', username=self.bitbucket.username)
response = self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
try:
return (response[0], response[1]['repositories'])
except TypeError:
pass
return response | Return own repositories. | entailment |
def create(self, repo_name, scm='git', private=True, **kwargs):
""" Creates a new repository on own Bitbucket account and return it."""
url = self.bitbucket.url('CREATE_REPO')
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, name=repo_name, scm=scm, is_private=private, **kwargs) | Creates a new repository on own Bitbucket account and return it. | entailment |
def archive(self, repo_slug=None, format='zip', prefix=''):
""" Get one of your repositories and compress it as an archive.
Return the path of the archive.
format parameter is curently not supported.
"""
prefix = '%s'.lstrip('/') % prefix
self._get_files_in_dir(repo_slug=repo_slug, dir='/')
if self.bitbucket.repo_tree:
with NamedTemporaryFile(delete=False) as archive:
with ZipFile(archive, 'w') as zip_archive:
for name, file in self.bitbucket.repo_tree.items():
with NamedTemporaryFile(delete=False) as temp_file:
temp_file.write(file.encode('utf-8'))
zip_archive.write(temp_file.name, prefix + name)
return (True, archive.name)
return (False, 'Could not archive your project.') | Get one of your repositories and compress it as an archive.
Return the path of the archive.
format parameter is curently not supported. | entailment |
def create(self, issue_id=None, repo_slug=None, **kwargs):
""" Add an issue comment to one of your repositories.
Each issue comment require only the content data field
the system autopopulate the rest.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('CREATE_COMMENT',
username=self.bitbucket.username,
repo_slug=repo_slug,
issue_id=issue_id)
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, **kwargs) | Add an issue comment to one of your repositories.
Each issue comment require only the content data field
the system autopopulate the rest. | entailment |
def delete(self, comment_id, issue_id=None, repo_slug=None):
""" Delete an issue from one of your repositories.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('DELETE_COMMENT',
username=self.bitbucket.username,
repo_slug=repo_slug,
issue_id=issue_id,
comment_id=comment_id)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth) | Delete an issue from one of your repositories. | entailment |
def all(self):
""" Get all ssh keys associated with your account.
"""
url = self.bitbucket.url('GET_SSH_KEYS')
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth) | Get all ssh keys associated with your account. | entailment |
def get(self, key_id=None):
""" Get one of the ssh keys associated with your account.
"""
url = self.bitbucket.url('GET_SSH_KEY', key_id=key_id)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth) | Get one of the ssh keys associated with your account. | entailment |
def create(self, key=None, label=None):
""" Associate an ssh key with your account and return it.
"""
key = '%s' % key
url = self.bitbucket.url('SET_SSH_KEY')
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, key=key, label=label) | Associate an ssh key with your account and return it. | entailment |
def delete(self, key_id=None):
""" Delete one of the ssh keys associated with your account.
Please use with caution as there is NO confimation and NO undo.
"""
url = self.bitbucket.url('DELETE_SSH_KEY', key_id=key_id)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth) | Delete one of the ssh keys associated with your account.
Please use with caution as there is NO confimation and NO undo. | entailment |
def which(program):
'''
Emulate unix 'which' command. If program is a path to an executable file
(i.e. it contains any directory components, like './myscript'), return
program. Otherwise, if an executable file matching program is found in one
of the directories in the PATH environment variable, return the first match
found.
On Windows, if PATHEXT is defined and program does not include an
extension, include the extensions in PATHEXT when searching for a matching
executable file.
Return None if no executable file is found.
http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python/377028#377028
https://github.com/webcoyote/vagrant/blob/f70507062e3b30c00db1f0d8b90f9245c4c997d4/lib/vagrant/util/file_util.rb
Python3.3+ implementation:
https://hg.python.org/cpython/file/default/Lib/shutil.py
'''
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
# Shortcut: If program contains any dir components, do not search the path
# e.g. './backup', '/bin/ls'
if os.path.dirname(program):
if is_exe(program):
return program
else:
return None
# Are we on windows?
# http://stackoverflow.com/questions/1325581/how-do-i-check-if-im-running-on-windows-in-python
windows = (os.name == 'nt')
# Or cygwin?
# https://docs.python.org/2/library/sys.html#sys.platform
cygwin = sys.platform.startswith('cygwin')
# Paths: a list of directories
path_str = os.environ.get('PATH', os.defpath)
if not path_str:
paths = []
else:
paths = path_str.split(os.pathsep)
# The current directory takes precedence on Windows.
if windows:
paths.insert(0, os.curdir)
# Only search PATH if there is one to search.
if not paths:
return None
# Files: add any necessary extensions to program
# On cygwin and non-windows systems do not add extensions when searching
# for the executable
if cygwin or not windows:
files = [program]
else:
# windows path extensions in PATHEXT.
# e.g. ['.EXE', '.CMD', '.BAT']
# http://environmentvariables.org/PathExt
# This might not properly use extensions that have been "registered" in
# Windows. In the future it might make sense to use one of the many
# "which" packages on PyPI.
exts = os.environ.get('PATHEXT', '').split(os.pathsep)
# if the program ends with one of the extensions, only test that one.
# otherwise test all the extensions.
matching_exts = [ext for ext in exts if
program.lower().endswith(ext.lower())]
if matching_exts:
files = [program + ext for ext in matching_exts]
else:
files = [program + ext for ext in exts]
# Check each combination of path, program, and extension, returning
# the first combination that exists and is executable.
for path in paths:
for f in files:
fpath = os.path.normcase(os.path.join(path, f))
if is_exe(fpath):
return fpath
return None | Emulate unix 'which' command. If program is a path to an executable file
(i.e. it contains any directory components, like './myscript'), return
program. Otherwise, if an executable file matching program is found in one
of the directories in the PATH environment variable, return the first match
found.
On Windows, if PATHEXT is defined and program does not include an
extension, include the extensions in PATHEXT when searching for a matching
executable file.
Return None if no executable file is found.
http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python/377028#377028
https://github.com/webcoyote/vagrant/blob/f70507062e3b30c00db1f0d8b90f9245c4c997d4/lib/vagrant/util/file_util.rb
Python3.3+ implementation:
https://hg.python.org/cpython/file/default/Lib/shutil.py | entailment |
def make_file_cm(filename, mode='a'):
'''
Open a file for appending and yield the open filehandle. Close the
filehandle after yielding it. This is useful for creating a context
manager for logging the output of a `Vagrant` instance.
filename: a path to a file
mode: The mode in which to open the file. Defaults to 'a', append
Usage example:
log_cm = make_file_cm('application.log')
v = Vagrant(out_cm=log_cm, err_cm=log_cm)
'''
@contextlib.contextmanager
def cm():
with open(filename, mode=mode) as fh:
yield fh
return cm | Open a file for appending and yield the open filehandle. Close the
filehandle after yielding it. This is useful for creating a context
manager for logging the output of a `Vagrant` instance.
filename: a path to a file
mode: The mode in which to open the file. Defaults to 'a', append
Usage example:
log_cm = make_file_cm('application.log')
v = Vagrant(out_cm=log_cm, err_cm=log_cm) | entailment |
def version(self):
'''
Return the installed vagrant version, as a string, e.g. '1.5.0'
'''
output = self._run_vagrant_command(['--version'])
m = re.search(r'^Vagrant (?P<version>.+)$', output)
if m is None:
raise Exception('Failed to parse vagrant --version output. output={!r}'.format(output))
return m.group('version') | Return the installed vagrant version, as a string, e.g. '1.5.0' | entailment |
def up(self, no_provision=False, provider=None, vm_name=None,
provision=None, provision_with=None, stream_output=False):
'''
Invoke `vagrant up` to start a box or boxes, possibly streaming the
command output.
vm_name=None: name of VM.
provision_with: optional list of provisioners to enable.
provider: Back the machine with a specific provider
no_provision: if True, disable provisioning. Same as 'provision=False'.
provision: optional boolean. Enable or disable provisioning. Default
behavior is to use the underlying vagrant default.
stream_output: if True, return a generator that yields each line of the
output of running the command. Consume the generator or the
subprocess might hang. if False, None is returned and the command
is run to completion without streaming the output. Defaults to
False.
Note: If provision and no_provision are not None, no_provision will be
ignored.
returns: None or a generator yielding lines of output.
'''
provider_arg = '--provider=%s' % provider if provider else None
prov_with_arg = None if provision_with is None else '--provision-with'
providers_arg = None if provision_with is None else ','.join(provision_with)
# For the sake of backward compatibility, no_provision is allowed.
# However it is ignored if provision is set.
if provision is not None:
no_provision = None
no_provision_arg = '--no-provision' if no_provision else None
provision_arg = None if provision is None else '--provision' if provision else '--no-provision'
args = ['up', vm_name, no_provision_arg, provision_arg, provider_arg, prov_with_arg, providers_arg]
if stream_output:
generator = self._stream_vagrant_command(args)
else:
self._call_vagrant_command(args)
self._cached_conf[vm_name] = None # remove cached configuration
return generator if stream_output else None | Invoke `vagrant up` to start a box or boxes, possibly streaming the
command output.
vm_name=None: name of VM.
provision_with: optional list of provisioners to enable.
provider: Back the machine with a specific provider
no_provision: if True, disable provisioning. Same as 'provision=False'.
provision: optional boolean. Enable or disable provisioning. Default
behavior is to use the underlying vagrant default.
stream_output: if True, return a generator that yields each line of the
output of running the command. Consume the generator or the
subprocess might hang. if False, None is returned and the command
is run to completion without streaming the output. Defaults to
False.
Note: If provision and no_provision are not None, no_provision will be
ignored.
returns: None or a generator yielding lines of output. | entailment |
def provision(self, vm_name=None, provision_with=None):
'''
Runs the provisioners defined in the Vagrantfile.
vm_name: optional VM name string.
provision_with: optional list of provisioners to enable.
e.g. ['shell', 'chef_solo']
'''
prov_with_arg = None if provision_with is None else '--provision-with'
providers_arg = None if provision_with is None else ','.join(provision_with)
self._call_vagrant_command(['provision', vm_name, prov_with_arg,
providers_arg]) | Runs the provisioners defined in the Vagrantfile.
vm_name: optional VM name string.
provision_with: optional list of provisioners to enable.
e.g. ['shell', 'chef_solo'] | entailment |
def reload(self, vm_name=None, provision=None, provision_with=None,
stream_output=False):
'''
Quoting from Vagrant docs:
> The equivalent of running a halt followed by an up.
> This command is usually required for changes made in the Vagrantfile
to take effect. After making any modifications to the Vagrantfile, a
reload should be called.
> The configured provisioners will not run again, by default. You can
force the provisioners to re-run by specifying the --provision flag.
provision: optional boolean. Enable or disable provisioning. Default
behavior is to use the underlying vagrant default.
provision_with: optional list of provisioners to enable.
e.g. ['shell', 'chef_solo']
stream_output: if True, return a generator that yields each line of the
output of running the command. Consume the generator or the
subprocess might hang. if False, None is returned and the command
is run to completion without streaming the output. Defaults to
False.
returns: None or a generator yielding lines of output.
'''
prov_with_arg = None if provision_with is None else '--provision-with'
providers_arg = None if provision_with is None else ','.join(provision_with)
provision_arg = None if provision is None else '--provision' if provision else '--no-provision'
args = ['reload', vm_name, provision_arg, prov_with_arg, providers_arg]
if stream_output:
generator = self._stream_vagrant_command(args)
else:
self._call_vagrant_command(args)
self._cached_conf[vm_name] = None # remove cached configuration
return generator if stream_output else None | Quoting from Vagrant docs:
> The equivalent of running a halt followed by an up.
> This command is usually required for changes made in the Vagrantfile
to take effect. After making any modifications to the Vagrantfile, a
reload should be called.
> The configured provisioners will not run again, by default. You can
force the provisioners to re-run by specifying the --provision flag.
provision: optional boolean. Enable or disable provisioning. Default
behavior is to use the underlying vagrant default.
provision_with: optional list of provisioners to enable.
e.g. ['shell', 'chef_solo']
stream_output: if True, return a generator that yields each line of the
output of running the command. Consume the generator or the
subprocess might hang. if False, None is returned and the command
is run to completion without streaming the output. Defaults to
False.
returns: None or a generator yielding lines of output. | entailment |
def halt(self, vm_name=None, force=False):
'''
Halt the Vagrant box.
force: If True, force shut down.
'''
force_opt = '--force' if force else None
self._call_vagrant_command(['halt', vm_name, force_opt])
self._cached_conf[vm_name] = None | Halt the Vagrant box.
force: If True, force shut down. | entailment |
def status(self, vm_name=None):
'''
Return the results of a `vagrant status` call as a list of one or more
Status objects. A Status contains the following attributes:
- name: The VM name in a multi-vm environment. 'default' otherwise.
- state: The state of the underlying guest machine (i.e. VM).
- provider: the name of the VM provider, e.g. 'virtualbox'. None
if no provider is output by vagrant.
Example return values for a multi-VM environment:
[Status(name='web', state='not created', provider='virtualbox'),
Status(name='db', state='not created', provider='virtualbox')]
And for a single-VM environment:
[Status(name='default', state='not created', provider='virtualbox')]
Possible states include, but are not limited to (since new states are
being added as Vagrant evolves):
- 'not_created' if the vm is destroyed
- 'running' if the vm is up
- 'poweroff' if the vm is halted
- 'saved' if the vm is suspended
- 'aborted' if the vm is aborted
Implementation Details:
This command uses the `--machine-readable` flag added in
Vagrant 1.5, mapping the target name, state, and provider-name
to a Status object.
Example with no VM name and multi-vm Vagrantfile:
$ vagrant status --machine-readable
1424098924,web,provider-name,virtualbox
1424098924,web,state,running
1424098924,web,state-human-short,running
1424098924,web,state-human-long,The VM is running. To stop this VM%!(VAGRANT_COMMA) you can run `vagrant halt` to\nshut it down forcefully%!(VAGRANT_COMMA) or you can run `vagrant suspend` to simply\nsuspend the virtual machine. In either case%!(VAGRANT_COMMA) to restart it again%!(VAGRANT_COMMA)\nsimply run `vagrant up`.
1424098924,db,provider-name,virtualbox
1424098924,db,state,not_created
1424098924,db,state-human-short,not created
1424098924,db,state-human-long,The environment has not yet been created. Run `vagrant up` to\ncreate the environment. If a machine is not created%!(VAGRANT_COMMA) only the\ndefault provider will be shown. So if a provider is not listed%!(VAGRANT_COMMA)\nthen the machine is not created for that environment.
Example with VM name:
$ vagrant status --machine-readable web
1424099027,web,provider-name,virtualbox
1424099027,web,state,running
1424099027,web,state-human-short,running
1424099027,web,state-human-long,The VM is running. To stop this VM%!(VAGRANT_COMMA) you can run `vagrant halt` to\nshut it down forcefully%!(VAGRANT_COMMA) or you can run `vagrant suspend` to simply\nsuspend the virtual machine. In either case%!(VAGRANT_COMMA) to restart it again%!(VAGRANT_COMMA)\nsimply run `vagrant up`.
Example with no VM name and single-vm Vagrantfile:
$ vagrant status --machine-readable
1424100021,default,provider-name,virtualbox
1424100021,default,state,not_created
1424100021,default,state-human-short,not created
1424100021,default,state-human-long,The environment has not yet been created. Run `vagrant up` to\ncreate the environment. If a machine is not created%!(VAGRANT_COMMA) only the\ndefault provider will be shown. So if a provider is not listed%!(VAGRANT_COMMA)\nthen the machine is not created for that environment.
Error example with incorrect VM name:
$ vagrant status --machine-readable api
1424099042,,error-exit,Vagrant::Errors::MachineNotFound,The machine with the name 'api' was not found configured for\nthis Vagrant environment.
Error example with missing Vagrantfile:
$ vagrant status --machine-readable
1424099094,,error-exit,Vagrant::Errors::NoEnvironmentError,A Vagrant environment or target machine is required to run this\ncommand. Run `vagrant init` to create a new Vagrant environment. Or%!(VAGRANT_COMMA)\nget an ID of a target machine from `vagrant global-status` to run\nthis command on. A final option is to change to a directory with a\nVagrantfile and to try again.
'''
# machine-readable output are CSV lines
output = self._run_vagrant_command(['status', '--machine-readable', vm_name])
return self._parse_status(output) | Return the results of a `vagrant status` call as a list of one or more
Status objects. A Status contains the following attributes:
- name: The VM name in a multi-vm environment. 'default' otherwise.
- state: The state of the underlying guest machine (i.e. VM).
- provider: the name of the VM provider, e.g. 'virtualbox'. None
if no provider is output by vagrant.
Example return values for a multi-VM environment:
[Status(name='web', state='not created', provider='virtualbox'),
Status(name='db', state='not created', provider='virtualbox')]
And for a single-VM environment:
[Status(name='default', state='not created', provider='virtualbox')]
Possible states include, but are not limited to (since new states are
being added as Vagrant evolves):
- 'not_created' if the vm is destroyed
- 'running' if the vm is up
- 'poweroff' if the vm is halted
- 'saved' if the vm is suspended
- 'aborted' if the vm is aborted
Implementation Details:
This command uses the `--machine-readable` flag added in
Vagrant 1.5, mapping the target name, state, and provider-name
to a Status object.
Example with no VM name and multi-vm Vagrantfile:
$ vagrant status --machine-readable
1424098924,web,provider-name,virtualbox
1424098924,web,state,running
1424098924,web,state-human-short,running
1424098924,web,state-human-long,The VM is running. To stop this VM%!(VAGRANT_COMMA) you can run `vagrant halt` to\nshut it down forcefully%!(VAGRANT_COMMA) or you can run `vagrant suspend` to simply\nsuspend the virtual machine. In either case%!(VAGRANT_COMMA) to restart it again%!(VAGRANT_COMMA)\nsimply run `vagrant up`.
1424098924,db,provider-name,virtualbox
1424098924,db,state,not_created
1424098924,db,state-human-short,not created
1424098924,db,state-human-long,The environment has not yet been created. Run `vagrant up` to\ncreate the environment. If a machine is not created%!(VAGRANT_COMMA) only the\ndefault provider will be shown. So if a provider is not listed%!(VAGRANT_COMMA)\nthen the machine is not created for that environment.
Example with VM name:
$ vagrant status --machine-readable web
1424099027,web,provider-name,virtualbox
1424099027,web,state,running
1424099027,web,state-human-short,running
1424099027,web,state-human-long,The VM is running. To stop this VM%!(VAGRANT_COMMA) you can run `vagrant halt` to\nshut it down forcefully%!(VAGRANT_COMMA) or you can run `vagrant suspend` to simply\nsuspend the virtual machine. In either case%!(VAGRANT_COMMA) to restart it again%!(VAGRANT_COMMA)\nsimply run `vagrant up`.
Example with no VM name and single-vm Vagrantfile:
$ vagrant status --machine-readable
1424100021,default,provider-name,virtualbox
1424100021,default,state,not_created
1424100021,default,state-human-short,not created
1424100021,default,state-human-long,The environment has not yet been created. Run `vagrant up` to\ncreate the environment. If a machine is not created%!(VAGRANT_COMMA) only the\ndefault provider will be shown. So if a provider is not listed%!(VAGRANT_COMMA)\nthen the machine is not created for that environment.
Error example with incorrect VM name:
$ vagrant status --machine-readable api
1424099042,,error-exit,Vagrant::Errors::MachineNotFound,The machine with the name 'api' was not found configured for\nthis Vagrant environment.
Error example with missing Vagrantfile:
$ vagrant status --machine-readable
1424099094,,error-exit,Vagrant::Errors::NoEnvironmentError,A Vagrant environment or target machine is required to run this\ncommand. Run `vagrant init` to create a new Vagrant environment. Or%!(VAGRANT_COMMA)\nget an ID of a target machine from `vagrant global-status` to run\nthis command on. A final option is to change to a directory with a\nVagrantfile and to try again. | entailment |
def _parse_status(self, output):
'''
Unit testing is so much easier when Vagrant is removed from the
equation.
'''
parsed = self._parse_machine_readable_output(output)
statuses = []
# group tuples by target name
# assuming tuples are sorted by target name, this should group all
# the tuples with info for each target.
for target, tuples in itertools.groupby(parsed, lambda tup: tup[1]):
# transform tuples into a dict mapping "type" to "data"
info = {kind: data for timestamp, _, kind, data in tuples}
status = Status(name=target, state=info.get('state'),
provider=info.get('provider-name'))
statuses.append(status)
return statuses | Unit testing is so much easier when Vagrant is removed from the
equation. | entailment |
def conf(self, ssh_config=None, vm_name=None):
'''
Parse ssh_config into a dict containing the keys defined in ssh_config,
which should include these keys (listed with example values): 'User'
(e.g. 'vagrant'), 'HostName' (e.g. 'localhost'), 'Port' (e.g. '2222'),
'IdentityFile' (e.g. '/home/todd/.ssh/id_dsa'). Cache the parsed
configuration dict. Return the dict.
If ssh_config is not given, return the cached dict. If there is no
cached configuration, call ssh_config() to get the configuration, then
parse, cache, and return the config dict. Calling ssh_config() raises
an Exception if the Vagrant box has not yet been created or has been
destroyed.
vm_name: required in a Multi-VM Vagrant environment. This name will be
used to get the configuration for the named vm and associate the config
with the vm name in the cache.
ssh_config: a valid ssh confige file host section. Defaults to
the value returned from ssh_config(). For speed, the configuration
parsed from ssh_config is cached for subsequent calls.
'''
if self._cached_conf.get(vm_name) is None or ssh_config is not None:
if ssh_config is None:
ssh_config = self.ssh_config(vm_name=vm_name)
conf = self._parse_config(ssh_config)
self._cached_conf[vm_name] = conf
return self._cached_conf[vm_name] | Parse ssh_config into a dict containing the keys defined in ssh_config,
which should include these keys (listed with example values): 'User'
(e.g. 'vagrant'), 'HostName' (e.g. 'localhost'), 'Port' (e.g. '2222'),
'IdentityFile' (e.g. '/home/todd/.ssh/id_dsa'). Cache the parsed
configuration dict. Return the dict.
If ssh_config is not given, return the cached dict. If there is no
cached configuration, call ssh_config() to get the configuration, then
parse, cache, and return the config dict. Calling ssh_config() raises
an Exception if the Vagrant box has not yet been created or has been
destroyed.
vm_name: required in a Multi-VM Vagrant environment. This name will be
used to get the configuration for the named vm and associate the config
with the vm name in the cache.
ssh_config: a valid ssh confige file host section. Defaults to
the value returned from ssh_config(). For speed, the configuration
parsed from ssh_config is cached for subsequent calls. | entailment |
def user_hostname(self, vm_name=None):
'''
Return a string combining user and hostname, e.g. 'vagrant@127.0.0.1'.
This string is suitable for use in an ssh commmand. If user is None
or empty, it will be left out of the string, e.g. 'localhost'. If
hostname is None, have bigger problems.
Raises an Exception if the Vagrant box has not yet been created or
has been destroyed.
'''
user = self.user(vm_name=vm_name)
user_prefix = user + '@' if user else ''
return user_prefix + self.hostname(vm_name=vm_name) | Return a string combining user and hostname, e.g. 'vagrant@127.0.0.1'.
This string is suitable for use in an ssh commmand. If user is None
or empty, it will be left out of the string, e.g. 'localhost'. If
hostname is None, have bigger problems.
Raises an Exception if the Vagrant box has not yet been created or
has been destroyed. | entailment |
def user_hostname_port(self, vm_name=None):
'''
Return a string combining user, hostname and port, e.g.
'vagrant@127.0.0.1:2222'. This string is suitable for use with Fabric,
in env.hosts. If user or port is None or empty, they will be left
out of the string. E.g. 'vagrant@localhost', or 'localhost:2222' or
'localhost'. If hostname is None, you have bigger problems.
Raises an Exception if the Vagrant box has not yet been created or
has been destroyed.
'''
user = self.user(vm_name=vm_name)
port = self.port(vm_name=vm_name)
user_prefix = user + '@' if user else ''
port_suffix = ':' + port if port else ''
return user_prefix + self.hostname(vm_name=vm_name) + port_suffix | Return a string combining user, hostname and port, e.g.
'vagrant@127.0.0.1:2222'. This string is suitable for use with Fabric,
in env.hosts. If user or port is None or empty, they will be left
out of the string. E.g. 'vagrant@localhost', or 'localhost:2222' or
'localhost'. If hostname is None, you have bigger problems.
Raises an Exception if the Vagrant box has not yet been created or
has been destroyed. | entailment |
def box_add(self, name, url, provider=None, force=False):
'''
Adds a box with given name, from given url.
force: If True, overwrite an existing box if it exists.
'''
force_opt = '--force' if force else None
cmd = ['box', 'add', name, url, force_opt]
if provider is not None:
cmd += ['--provider', provider]
self._call_vagrant_command(cmd) | Adds a box with given name, from given url.
force: If True, overwrite an existing box if it exists. | entailment |
def package(self, vm_name=None, base=None, output=None, vagrantfile=None):
'''
Packages a running vagrant environment into a box.
vm_name=None: name of VM.
base=None: name of a VM in virtualbox to package as a base box
output=None: name of the file to output
vagrantfile=None: Vagrantfile to package with this box
'''
cmd = ['package', vm_name]
if output is not None:
cmd += ['--output', output]
if vagrantfile is not None:
cmd += ['--vagrantfile', vagrantfile]
self._call_vagrant_command(cmd) | Packages a running vagrant environment into a box.
vm_name=None: name of VM.
base=None: name of a VM in virtualbox to package as a base box
output=None: name of the file to output
vagrantfile=None: Vagrantfile to package with this box | entailment |
def snapshot_pop(self):
'''
This command is the inverse of vagrant snapshot push: it will restore the pushed state.
'''
NO_SNAPSHOTS_PUSHED = 'No pushed snapshot found!'
output = self._run_vagrant_command(['snapshot', 'pop'])
if NO_SNAPSHOTS_PUSHED in output:
raise RuntimeError(NO_SNAPSHOTS_PUSHED) | This command is the inverse of vagrant snapshot push: it will restore the pushed state. | entailment |
def snapshot_list(self):
'''
This command will list all the snapshots taken.
'''
NO_SNAPSHOTS_TAKEN = 'No snapshots have been taken yet!'
output = self._run_vagrant_command(['snapshot', 'list'])
if NO_SNAPSHOTS_TAKEN in output:
return []
else:
return output.splitlines() | This command will list all the snapshots taken. | entailment |
def ssh(self, vm_name=None, command=None, extra_ssh_args=None):
'''
Execute a command via ssh on the vm specified.
command: The command to execute via ssh.
extra_ssh_args: Corresponds to '--' option in the vagrant ssh command
Returns the output of running the command.
'''
cmd = ['ssh', vm_name, '--command', command]
if extra_ssh_args is not None:
cmd += ['--', extra_ssh_args]
return self._run_vagrant_command(cmd) | Execute a command via ssh on the vm specified.
command: The command to execute via ssh.
extra_ssh_args: Corresponds to '--' option in the vagrant ssh command
Returns the output of running the command. | entailment |
def _parse_box_list(self, output):
'''
Remove Vagrant usage for unit testing
'''
# Parse box list output
boxes = []
# initialize box values
name = provider = version = None
for timestamp, target, kind, data in self._parse_machine_readable_output(output):
if kind == 'box-name':
# finish the previous box, if any
if name is not None:
boxes.append(Box(name=name, provider=provider, version=version))
# start a new box
name = data # box name
provider = version = None
elif kind == 'box-provider':
provider = data
elif kind == 'box-version':
version = data
# finish the previous box, if any
if name is not None:
boxes.append(Box(name=name, provider=provider, version=version))
return boxes | Remove Vagrant usage for unit testing | entailment |
def _parse_plugin_list(self, output):
'''
Remove Vagrant from the equation for unit testing.
'''
ENCODED_COMMA = '%!(VAGRANT_COMMA)'
plugins = []
# initialize plugin values
name = None
version = None
system = False
for timestamp, target, kind, data in self._parse_machine_readable_output(output):
if kind == 'plugin-name':
# finish the previous plugin, if any
if name is not None:
plugins.append(Plugin(name=name, version=version, system=system))
# start a new plugin
name = data # plugin name
version = None
system = False
elif kind == 'plugin-version':
if ENCODED_COMMA in data:
version, etc = data.split(ENCODED_COMMA)
system = (etc.strip().lower() == 'system')
else:
version = data
system = False
# finish the previous plugin, if any
if name is not None:
plugins.append(Plugin(name=name, version=version, system=system))
return plugins | Remove Vagrant from the equation for unit testing. | entailment |
def _parse_machine_readable_output(self, output):
'''
param output: a string containing the output of a vagrant command with the `--machine-readable` option.
returns: a dict mapping each 'target' in the machine readable output to
a dict. The dict of each target, maps each target line type/kind to
its data.
Machine-readable output is a collection of CSV lines in the format:
timestamp, target, kind, data
Target is a VM name, possibly 'default', or ''. The empty string
denotes information not specific to a particular VM, such as the
results of `vagrant box list`.
'''
# each line is a tuple of (timestamp, target, type, data)
# target is the VM name
# type is the type of data, e.g. 'provider-name', 'box-version'
# data is a (possibly comma separated) type-specific value, e.g. 'virtualbox', '0'
parsed_lines = [line.split(',', 4) for line in output.splitlines() if line.strip()]
# vagrant 1.8 adds additional fields that aren't required,
# and will break parsing if included in the status lines.
# filter them out pending future implementation.
parsed_lines = list(filter(lambda x: x[2] not in ["metadata", "ui", "action"], parsed_lines))
return parsed_lines | param output: a string containing the output of a vagrant command with the `--machine-readable` option.
returns: a dict mapping each 'target' in the machine readable output to
a dict. The dict of each target, maps each target line type/kind to
its data.
Machine-readable output is a collection of CSV lines in the format:
timestamp, target, kind, data
Target is a VM name, possibly 'default', or ''. The empty string
denotes information not specific to a particular VM, such as the
results of `vagrant box list`. | entailment |
def _parse_config(self, ssh_config):
'''
This lame parser does not parse the full grammar of an ssh config
file. It makes assumptions that are (hopefully) correct for the output
of `vagrant ssh-config [vm-name]`. Specifically it assumes that there
is only one Host section, the default vagrant host. It assumes that
the parameters of the ssh config are not changing.
every line is of the form 'key value', where key is a single token
without any whitespace and value is the remaining part of the line.
Value may optionally be surrounded in double quotes. All leading and
trailing whitespace is removed from key and value. Example lines:
' User vagrant\n'
' IdentityFile "/home/robert/.vagrant.d/insecure_private_key"\n'
Lines with '#' as the first non-whitespace character are considered
comments and ignored. Whitespace-only lines are ignored. This parser
does NOT handle using an '=' in options. Values surrounded in double
quotes will have the double quotes removed.
See https://github.com/bitprophet/ssh/blob/master/ssh/config.py for a
more compliant ssh config file parser.
'''
conf = dict()
started_parsing = False
for line in ssh_config.splitlines():
if line.strip().startswith('Host ') and not started_parsing:
started_parsing = True
if not started_parsing or not line.strip() or line.strip().startswith('#'):
continue
key, value = line.strip().split(None, 1)
# Remove leading and trailing " from the values
conf[key] = value.strip('"')
return conf | This lame parser does not parse the full grammar of an ssh config
file. It makes assumptions that are (hopefully) correct for the output
of `vagrant ssh-config [vm-name]`. Specifically it assumes that there
is only one Host section, the default vagrant host. It assumes that
the parameters of the ssh config are not changing.
every line is of the form 'key value', where key is a single token
without any whitespace and value is the remaining part of the line.
Value may optionally be surrounded in double quotes. All leading and
trailing whitespace is removed from key and value. Example lines:
' User vagrant\n'
' IdentityFile "/home/robert/.vagrant.d/insecure_private_key"\n'
Lines with '#' as the first non-whitespace character are considered
comments and ignored. Whitespace-only lines are ignored. This parser
does NOT handle using an '=' in options. Values surrounded in double
quotes will have the double quotes removed.
See https://github.com/bitprophet/ssh/blob/master/ssh/config.py for a
more compliant ssh config file parser. | entailment |
def _call_vagrant_command(self, args):
'''
Run a vagrant command. Return None.
args: A sequence of arguments to a vagrant command line.
'''
# Make subprocess command
command = self._make_vagrant_command(args)
with self.out_cm() as out_fh, self.err_cm() as err_fh:
subprocess.check_call(command, cwd=self.root, stdout=out_fh,
stderr=err_fh, env=self.env) | Run a vagrant command. Return None.
args: A sequence of arguments to a vagrant command line. | entailment |
def _run_vagrant_command(self, args):
'''
Run a vagrant command and return its stdout.
args: A sequence of arguments to a vagrant command line.
e.g. ['up', 'my_vm_name', '--no-provision'] or
['up', None, '--no-provision'] for a non-Multi-VM environment.
'''
# Make subprocess command
command = self._make_vagrant_command(args)
with self.err_cm() as err_fh:
return compat.decode(subprocess.check_output(command, cwd=self.root,
env=self.env, stderr=err_fh)) | Run a vagrant command and return its stdout.
args: A sequence of arguments to a vagrant command line.
e.g. ['up', 'my_vm_name', '--no-provision'] or
['up', None, '--no-provision'] for a non-Multi-VM environment. | entailment |
def _stream_vagrant_command(self, args):
"""
Execute a vagrant command, returning a generator of the output lines.
Caller should consume the entire generator to avoid the hanging the
subprocess.
:param args: Arguments for the Vagrant command.
:return: generator that yields each line of the command stdout.
:rtype: generator iterator
"""
py3 = sys.version_info > (3, 0)
# Make subprocess command
command = self._make_vagrant_command(args)
with self.err_cm() as err_fh:
sp_args = dict(args=command, cwd=self.root, env=self.env,
stdout=subprocess.PIPE, stderr=err_fh, bufsize=1)
# Iterate over output lines.
# See http://stackoverflow.com/questions/2715847/python-read-streaming-input-from-subprocess-communicate#17698359
p = subprocess.Popen(**sp_args)
with p.stdout:
for line in iter(p.stdout.readline, b''):
yield compat.decode(line) # if PY3 decode bytestrings
p.wait()
# Raise CalledProcessError for consistency with _call_vagrant_command
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, command) | Execute a vagrant command, returning a generator of the output lines.
Caller should consume the entire generator to avoid the hanging the
subprocess.
:param args: Arguments for the Vagrant command.
:return: generator that yields each line of the command stdout.
:rtype: generator iterator | entailment |
def sandbox_status(self, vm_name=None):
'''
Returns the status of the sandbox mode.
Possible values are:
- on
- off
- unknown
- not installed
'''
vagrant_sandbox_output = self._run_sandbox_command(['status', vm_name])
return self._parse_vagrant_sandbox_status(vagrant_sandbox_output) | Returns the status of the sandbox mode.
Possible values are:
- on
- off
- unknown
- not installed | entailment |
def _parse_vagrant_sandbox_status(self, vagrant_output):
'''
Returns the status of the sandbox mode given output from
'vagrant sandbox status'.
'''
# typical output
# [default] - snapshot mode is off
# or
# [default] - machine not created
# if the box VM is down
tokens = [token.strip() for token in vagrant_output.split(' ')]
if tokens[0] == 'Usage:':
sahara_status = 'not installed'
elif "{} {}".format(tokens[-2], tokens[-1]) == 'not created':
sahara_status = 'unknown'
else:
sahara_status = tokens[-1]
return sahara_status | Returns the status of the sandbox mode given output from
'vagrant sandbox status'. | entailment |
def deserialize(obj):
"""Convert JSON dicts back into objects."""
# Be careful of shallow copy here
target = dict(obj)
class_name = None
if '__class__' in target:
class_name = target.pop('__class__')
if '__module__' in obj:
obj.pop('__module__')
# Use getattr(module, class_name) for custom types if needed
if class_name == 'datetime':
return datetime.datetime(tzinfo=utc, **target)
if class_name == 'StreamingBody':
return StringIO(target['body'])
# Return unrecognized structures as-is
return obj | Convert JSON dicts back into objects. | entailment |
def serialize(obj):
"""Convert objects into JSON structures."""
# Record class and module information for deserialization
result = {'__class__': obj.__class__.__name__}
try:
result['__module__'] = obj.__module__
except AttributeError:
pass
# Convert objects to dictionary representation based on type
if isinstance(obj, datetime.datetime):
result['year'] = obj.year
result['month'] = obj.month
result['day'] = obj.day
result['hour'] = obj.hour
result['minute'] = obj.minute
result['second'] = obj.second
result['microsecond'] = obj.microsecond
return result
if isinstance(obj, StreamingBody):
result['body'] = obj.read()
obj._raw_stream = StringIO(result['body'])
obj._amount_read = 0
return result
# Raise a TypeError if the object isn't recognized
raise TypeError("Type not serializable") | Convert objects into JSON structures. | entailment |
def _serialize_json(obj, fp):
""" Serialize ``obj`` as a JSON formatted stream to ``fp`` """
json.dump(obj, fp, indent=4, default=serialize) | Serialize ``obj`` as a JSON formatted stream to ``fp`` | entailment |
def get_serializer(serializer_format):
""" Get the serializer for a specific format """
if serializer_format == Format.JSON:
return _serialize_json
if serializer_format == Format.PICKLE:
return _serialize_pickle | Get the serializer for a specific format | entailment |
def get_deserializer(serializer_format):
""" Get the deserializer for a specific format """
if serializer_format == Format.JSON:
return _deserialize_json
if serializer_format == Format.PICKLE:
return _deserialize_pickle | Get the deserializer for a specific format | entailment |
def _set_logger(logger_name, level=logging.INFO):
"""
Convenience function to quickly configure full debug output
to go to the console.
"""
log = logging.getLogger(logger_name)
log.setLevel(level)
ch = logging.StreamHandler(None)
ch.setLevel(level)
formatter = logging.Formatter(DebugFmtString)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch) | Convenience function to quickly configure full debug output
to go to the console. | entailment |
def find_file_format(file_name):
"""
Returns a tuple with the file path and format found, or (None, None)
"""
for file_format in Format.ALLOWED:
file_path = '.'.join((file_name, file_format))
if os.path.exists(file_path):
return file_path, file_format
return None, None | Returns a tuple with the file path and format found, or (None, None) | entailment |
def get_next_file_path(self, service, operation):
"""
Returns a tuple with the next file to read and the serializer
format used
"""
base_name = '{0}.{1}'.format(service, operation)
if self.prefix:
base_name = '{0}.{1}'.format(self.prefix, base_name)
LOG.debug('get_next_file_path: %s', base_name)
next_file = None
serializer_format = None
index = self._index.setdefault(base_name, 1)
while not next_file:
file_name = os.path.join(
self._data_path, base_name + '_{0}'.format(index))
next_file, serializer_format = self.find_file_format(file_name)
if next_file:
self._index[base_name] += 1
elif index != 1:
index = 1
self._index[base_name] = 1
else:
raise IOError('response file ({0}.[{1}]) not found'.format(
file_name, "|".join(Format.ALLOWED)))
return next_file, serializer_format | Returns a tuple with the next file to read and the serializer
format used | entailment |
def save_response(self, service, operation, response_data,
http_response=200):
"""
Store a response to the data directory. The ``operation``
should be the name of the operation in the service API (e.g.
DescribeInstances), the ``response_data`` should a value you want
to return from a placebo call and the ``http_response`` should be
the HTTP status code returned from the service. You can add
multiple responses for a given operation and they will be
returned in order.
"""
LOG.debug('save_response: %s.%s', service, operation)
filepath = self.get_new_file_path(service, operation)
LOG.debug('save_response: path=%s', filepath)
data = {'status_code': http_response,
'data': response_data}
with open(filepath, Format.write_mode(self.record_format)) as fp:
self._serializer(data, fp) | Store a response to the data directory. The ``operation``
should be the name of the operation in the service API (e.g.
DescribeInstances), the ``response_data`` should a value you want
to return from a placebo call and the ``http_response`` should be
the HTTP status code returned from the service. You can add
multiple responses for a given operation and they will be
returned in order. | entailment |
def _mock_request(self, **kwargs):
"""
A mocked out make_request call that bypasses all network calls
and simply returns any mocked responses defined.
"""
model = kwargs.get('model')
service = model.service_model.endpoint_prefix
operation = model.name
LOG.debug('_make_request: %s.%s', service, operation)
return self.load_response(service, operation) | A mocked out make_request call that bypasses all network calls
and simply returns any mocked responses defined. | entailment |
def placebo_session(function):
"""
Decorator to help do testing with placebo.
Simply wrap the function you want to test and make sure to add
a "session" argument so the decorator can pass the placebo session.
Accepts the following environment variables to configure placebo:
PLACEBO_MODE: set to "record" to record AWS calls and save them
PLACEBO_PROFILE: optionally set an AWS credential profile to record with
PLACEBO_DIR: set the directory to record to / read from
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
session_kwargs = {
'region_name': os.environ.get('AWS_DEFAULT_REGION', 'us-east-1')
}
profile_name = os.environ.get('PLACEBO_PROFILE', None)
if profile_name:
session_kwargs['profile_name'] = profile_name
session = boto3.Session(**session_kwargs)
self = args[0]
prefix = self.__class__.__name__ + '.' + function.__name__
base_dir = os.environ.get(
"PLACEBO_DIR", os.path.join(os.getcwd(), "placebo"))
record_dir = os.path.join(base_dir, prefix)
record_format = os.environ.get('PLACEBO_FORMAT', Format.DEFAULT)
if not os.path.exists(record_dir):
os.makedirs(record_dir)
pill = placebo.attach(session, data_path=record_dir,
record_format=record_format)
if os.environ.get('PLACEBO_MODE') == 'record':
pill.record()
else:
pill.playback()
kwargs['session'] = session
return function(*args, **kwargs)
return wrapper | Decorator to help do testing with placebo.
Simply wrap the function you want to test and make sure to add
a "session" argument so the decorator can pass the placebo session.
Accepts the following environment variables to configure placebo:
PLACEBO_MODE: set to "record" to record AWS calls and save them
PLACEBO_PROFILE: optionally set an AWS credential profile to record with
PLACEBO_DIR: set the directory to record to / read from | entailment |
def parse_authentication_request(self, request_body, http_headers=None):
# type: (str, Optional[Mapping[str, str]]) -> oic.oic.message.AuthorizationRequest
"""
Parses and verifies an authentication request.
:param request_body: urlencoded authentication request
:param http_headers: http headers
"""
auth_req = AuthorizationRequest().deserialize(request_body)
for validator in self.authentication_request_validators:
validator(auth_req)
logger.debug('parsed authentication_request: %s', auth_req)
return auth_req | Parses and verifies an authentication request.
:param request_body: urlencoded authentication request
:param http_headers: http headers | entailment |
def authorize(self, authentication_request, # type: oic.oic.message.AuthorizationRequest
user_id, # type: str
extra_id_token_claims=None
# type: Optional[Union[Mapping[str, Union[str, List[str]]], Callable[[str, str], Mapping[str, Union[str, List[str]]]]]
):
# type: (...) -> oic.oic.message.AuthorizationResponse
"""
Creates an Authentication Response for the specified authentication request and local identifier of the
authenticated user.
"""
custom_sub = self.userinfo[user_id].get('sub')
if custom_sub:
self.authz_state.subject_identifiers[user_id] = {'public': custom_sub}
sub = custom_sub
else:
sub = self._create_subject_identifier(user_id, authentication_request['client_id'],
authentication_request['redirect_uri'])
self._check_subject_identifier_matches_requested(authentication_request, sub)
response = AuthorizationResponse()
authz_code = None
if 'code' in authentication_request['response_type']:
authz_code = self.authz_state.create_authorization_code(authentication_request, sub)
response['code'] = authz_code
access_token_value = None
if 'token' in authentication_request['response_type']:
access_token = self.authz_state.create_access_token(authentication_request, sub)
access_token_value = access_token.value
self._add_access_token_to_response(response, access_token)
if 'id_token' in authentication_request['response_type']:
if extra_id_token_claims is None:
extra_id_token_claims = {}
elif callable(extra_id_token_claims):
extra_id_token_claims = extra_id_token_claims(user_id, authentication_request['client_id'])
requested_claims = self._get_requested_claims_in(authentication_request, 'id_token')
if len(authentication_request['response_type']) == 1:
# only id token is issued -> no way of doing userinfo request, so include all claims in ID Token,
# even those requested by the scope parameter
requested_claims.update(
scope2claims(
authentication_request['scope'], extra_scope_dict=self.extra_scopes
)
)
user_claims = self.userinfo.get_claims_for(user_id, requested_claims)
response['id_token'] = self._create_signed_id_token(authentication_request['client_id'], sub,
user_claims,
authentication_request.get('nonce'),
authz_code, access_token_value, extra_id_token_claims)
logger.debug('issued id_token=%s from requested_claims=%s userinfo=%s extra_claims=%s',
response['id_token'], requested_claims, user_claims, extra_id_token_claims)
if 'state' in authentication_request:
response['state'] = authentication_request['state']
return response | Creates an Authentication Response for the specified authentication request and local identifier of the
authenticated user. | entailment |
def _add_access_token_to_response(self, response, access_token):
# type: (oic.message.AccessTokenResponse, se_leg_op.access_token.AccessToken) -> None
"""
Adds the Access Token and the associated parameters to the Token Response.
"""
response['access_token'] = access_token.value
response['token_type'] = access_token.type
response['expires_in'] = access_token.expires_in | Adds the Access Token and the associated parameters to the Token Response. | entailment |
def _create_subject_identifier(self, user_id, client_id, redirect_uri):
# type (str, str, str) -> str
"""
Creates a subject identifier for the specified client and user
see <a href="http://openid.net/specs/openid-connect-core-1_0.html#Terminology">
"OpenID Connect Core 1.0", Section 1.2</a>.
:param user_id: local user identifier
:param client_id: which client to generate a subject identifier for
:param redirect_uri: the clients' redirect_uri
:return: a subject identifier for the user intended for client who made the authentication request
"""
supported_subject_types = self.configuration_information['subject_types_supported'][0]
subject_type = self.clients[client_id].get('subject_type', supported_subject_types)
sector_identifier = urlparse(redirect_uri).netloc
return self.authz_state.get_subject_identifier(subject_type, user_id, sector_identifier) | Creates a subject identifier for the specified client and user
see <a href="http://openid.net/specs/openid-connect-core-1_0.html#Terminology">
"OpenID Connect Core 1.0", Section 1.2</a>.
:param user_id: local user identifier
:param client_id: which client to generate a subject identifier for
:param redirect_uri: the clients' redirect_uri
:return: a subject identifier for the user intended for client who made the authentication request | entailment |
def _get_requested_claims_in(self, authentication_request, response_method):
# type (oic.oic.message.AuthorizationRequest, str) -> Mapping[str, Optional[Mapping[str, Union[str, List[str]]]]
"""
Parses any claims requested using the 'claims' request parameter, see
<a href="http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter">
"OpenID Connect Core 1.0", Section 5.5</a>.
:param authentication_request: the authentication request
:param response_method: 'id_token' or 'userinfo'
"""
if response_method != 'id_token' and response_method != 'userinfo':
raise ValueError('response_method must be \'id_token\' or \'userinfo\'')
requested_claims = {}
if 'claims' in authentication_request and response_method in authentication_request['claims']:
requested_claims.update(authentication_request['claims'][response_method])
return requested_claims | Parses any claims requested using the 'claims' request parameter, see
<a href="http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter">
"OpenID Connect Core 1.0", Section 5.5</a>.
:param authentication_request: the authentication request
:param response_method: 'id_token' or 'userinfo' | entailment |
def _create_signed_id_token(self,
client_id, # type: str
sub, # type: str
user_claims=None, # type: Optional[Mapping[str, Union[str, List[str]]]]
nonce=None, # type: Optional[str]
authorization_code=None, # type: Optional[str]
access_token_value=None, # type: Optional[str]
extra_id_token_claims=None): # type: Optional[Mappings[str, Union[str, List[str]]]]
# type: (...) -> str
"""
Creates a signed ID Token.
:param client_id: who the ID Token is intended for
:param sub: who the ID Token is regarding
:param user_claims: any claims about the user to be included
:param nonce: nonce from the authentication request
:param authorization_code: the authorization code issued together with this ID Token
:param access_token_value: the access token issued together with this ID Token
:param extra_id_token_claims: any extra claims that should be included in the ID Token
:return: a JWS, containing the ID Token as payload
"""
alg = self.clients[client_id].get('id_token_signed_response_alg',
self.configuration_information['id_token_signing_alg_values_supported'][0])
args = {}
hash_alg = 'HS{}'.format(alg[-3:])
if authorization_code:
args['c_hash'] = jws.left_hash(authorization_code.encode('utf-8'), hash_alg)
if access_token_value:
args['at_hash'] = jws.left_hash(access_token_value.encode('utf-8'), hash_alg)
if user_claims:
args.update(user_claims)
if extra_id_token_claims:
args.update(extra_id_token_claims)
id_token = IdToken(iss=self.configuration_information['issuer'],
sub=sub,
aud=client_id,
iat=int(time.time()),
exp=int(time.time()) + self.id_token_lifetime,
**args)
if nonce:
id_token['nonce'] = nonce
logger.debug('signed id_token with kid=%s using alg=%s', self.signing_key, alg)
return id_token.to_jwt([self.signing_key], alg) | Creates a signed ID Token.
:param client_id: who the ID Token is intended for
:param sub: who the ID Token is regarding
:param user_claims: any claims about the user to be included
:param nonce: nonce from the authentication request
:param authorization_code: the authorization code issued together with this ID Token
:param access_token_value: the access token issued together with this ID Token
:param extra_id_token_claims: any extra claims that should be included in the ID Token
:return: a JWS, containing the ID Token as payload | entailment |
def _check_subject_identifier_matches_requested(self, authentication_request, sub):
# type (oic.message.AuthorizationRequest, str) -> None
"""
Verifies the subject identifier against any requested subject identifier using the claims request parameter.
:param authentication_request: authentication request
:param sub: subject identifier
:raise AuthorizationError: if the subject identifier does not match the requested one
"""
if 'claims' in authentication_request:
requested_id_token_sub = authentication_request['claims'].get('id_token', {}).get('sub')
requested_userinfo_sub = authentication_request['claims'].get('userinfo', {}).get('sub')
if requested_id_token_sub and requested_userinfo_sub and requested_id_token_sub != requested_userinfo_sub:
raise AuthorizationError('Requested different subject identifier for IDToken and userinfo: {} != {}'
.format(requested_id_token_sub, requested_userinfo_sub))
requested_sub = requested_id_token_sub or requested_userinfo_sub
if requested_sub and sub != requested_sub:
raise AuthorizationError('Requested subject identifier \'{}\' could not be matched'
.format(requested_sub)) | Verifies the subject identifier against any requested subject identifier using the claims request parameter.
:param authentication_request: authentication request
:param sub: subject identifier
:raise AuthorizationError: if the subject identifier does not match the requested one | entailment |
def handle_token_request(self, request_body, # type: str
http_headers=None, # type: Optional[Mapping[str, str]]
extra_id_token_claims=None
# type: Optional[Union[Mapping[str, Union[str, List[str]]], Callable[[str, str], Mapping[str, Union[str, List[str]]]]]
):
# type: (...) -> oic.oic.message.AccessTokenResponse
"""
Handles a token request, either for exchanging an authorization code or using a refresh token.
:param request_body: urlencoded token request
:param http_headers: http headers
:param extra_id_token_claims: extra claims to include in the signed ID Token
"""
token_request = self._verify_client_authentication(request_body, http_headers)
if 'grant_type' not in token_request:
raise InvalidTokenRequest('grant_type missing', token_request)
elif token_request['grant_type'] == 'authorization_code':
return self._do_code_exchange(token_request, extra_id_token_claims)
elif token_request['grant_type'] == 'refresh_token':
return self._do_token_refresh(token_request)
raise InvalidTokenRequest('grant_type \'{}\' unknown'.format(token_request['grant_type']), token_request,
oauth_error='unsupported_grant_type') | Handles a token request, either for exchanging an authorization code or using a refresh token.
:param request_body: urlencoded token request
:param http_headers: http headers
:param extra_id_token_claims: extra claims to include in the signed ID Token | entailment |
def _do_code_exchange(self, request, # type: Dict[str, str]
extra_id_token_claims=None
# type: Optional[Union[Mapping[str, Union[str, List[str]]], Callable[[str, str], Mapping[str, Union[str, List[str]]]]]
):
# type: (...) -> oic.message.AccessTokenResponse
"""
Handles a token request for exchanging an authorization code for an access token
(grant_type=authorization_code).
:param request: parsed http request parameters
:param extra_id_token_claims: any extra parameters to include in the signed ID Token, either as a dict-like
object or as a callable object accepting the local user identifier and client identifier which returns
any extra claims which might depend on the user id and/or client id.
:return: a token response containing a signed ID Token, an Access Token, and a Refresh Token
:raise InvalidTokenRequest: if the token request is invalid
"""
token_request = AccessTokenRequest().from_dict(request)
try:
token_request.verify()
except MessageException as e:
raise InvalidTokenRequest(str(e), token_request) from e
authentication_request = self.authz_state.get_authorization_request_for_code(token_request['code'])
if token_request['client_id'] != authentication_request['client_id']:
logger.info('Authorization code \'%s\' belonging to \'%s\' was used by \'%s\'',
token_request['code'], authentication_request['client_id'], token_request['client_id'])
raise InvalidAuthorizationCode('{} unknown'.format(token_request['code']))
if token_request['redirect_uri'] != authentication_request['redirect_uri']:
raise InvalidTokenRequest('Invalid redirect_uri: {} != {}'.format(token_request['redirect_uri'],
authentication_request['redirect_uri']),
token_request)
sub = self.authz_state.get_subject_identifier_for_code(token_request['code'])
user_id = self.authz_state.get_user_id_for_subject_identifier(sub)
response = AccessTokenResponse()
access_token = self.authz_state.exchange_code_for_token(token_request['code'])
self._add_access_token_to_response(response, access_token)
refresh_token = self.authz_state.create_refresh_token(access_token.value)
if refresh_token is not None:
response['refresh_token'] = refresh_token
if extra_id_token_claims is None:
extra_id_token_claims = {}
elif callable(extra_id_token_claims):
extra_id_token_claims = extra_id_token_claims(user_id, authentication_request['client_id'])
requested_claims = self._get_requested_claims_in(authentication_request, 'id_token')
user_claims = self.userinfo.get_claims_for(user_id, requested_claims)
response['id_token'] = self._create_signed_id_token(authentication_request['client_id'], sub,
user_claims,
authentication_request.get('nonce'),
None, access_token.value,
extra_id_token_claims)
logger.debug('issued id_token=%s from requested_claims=%s userinfo=%s extra_claims=%s',
response['id_token'], requested_claims, user_claims, extra_id_token_claims)
return response | Handles a token request for exchanging an authorization code for an access token
(grant_type=authorization_code).
:param request: parsed http request parameters
:param extra_id_token_claims: any extra parameters to include in the signed ID Token, either as a dict-like
object or as a callable object accepting the local user identifier and client identifier which returns
any extra claims which might depend on the user id and/or client id.
:return: a token response containing a signed ID Token, an Access Token, and a Refresh Token
:raise InvalidTokenRequest: if the token request is invalid | entailment |
def _do_token_refresh(self, request):
# type: (Mapping[str, str]) -> oic.oic.message.AccessTokenResponse
"""
Handles a token request for refreshing an access token (grant_type=refresh_token).
:param request: parsed http request parameters
:return: a token response containing a new Access Token and possibly a new Refresh Token
:raise InvalidTokenRequest: if the token request is invalid
"""
token_request = RefreshAccessTokenRequest().from_dict(request)
try:
token_request.verify()
except MessageException as e:
raise InvalidTokenRequest(str(e), token_request) from e
response = AccessTokenResponse()
access_token, refresh_token = self.authz_state.use_refresh_token(token_request['refresh_token'],
scope=token_request.get('scope'))
self._add_access_token_to_response(response, access_token)
if refresh_token:
response['refresh_token'] = refresh_token
return response | Handles a token request for refreshing an access token (grant_type=refresh_token).
:param request: parsed http request parameters
:return: a token response containing a new Access Token and possibly a new Refresh Token
:raise InvalidTokenRequest: if the token request is invalid | entailment |
def _verify_client_authentication(self, request_body, http_headers=None):
# type (str, Optional[Mapping[str, str]] -> Mapping[str, str]
"""
Verifies the client authentication.
:param request_body: urlencoded token request
:param http_headers:
:return: The parsed request body.
"""
if http_headers is None:
http_headers = {}
token_request = dict(parse_qsl(request_body))
token_request['client_id'] = verify_client_authentication(self.clients, token_request, http_headers.get('Authorization'))
return token_request | Verifies the client authentication.
:param request_body: urlencoded token request
:param http_headers:
:return: The parsed request body. | entailment |
def handle_userinfo_request(self, request=None, http_headers=None):
# type: (Optional[str], Optional[Mapping[str, str]]) -> oic.oic.message.OpenIDSchema
"""
Handles a userinfo request.
:param request: urlencoded request (either query string or POST body)
:param http_headers: http headers
"""
if http_headers is None:
http_headers = {}
userinfo_request = dict(parse_qsl(request))
bearer_token = extract_bearer_token_from_http_request(userinfo_request, http_headers.get('Authorization'))
introspection = self.authz_state.introspect_access_token(bearer_token)
if not introspection['active']:
raise InvalidAccessToken('The access token has expired')
scopes = introspection['scope'].split()
user_id = self.authz_state.get_user_id_for_subject_identifier(introspection['sub'])
requested_claims = scope2claims(scopes, extra_scope_dict=self.extra_scopes)
authentication_request = self.authz_state.get_authorization_request_for_access_token(bearer_token)
requested_claims.update(self._get_requested_claims_in(authentication_request, 'userinfo'))
user_claims = self.userinfo.get_claims_for(user_id, requested_claims)
user_claims.setdefault('sub', introspection['sub'])
response = OpenIDSchema(**user_claims)
logger.debug('userinfo=%s from requested_claims=%s userinfo=%s',
response, requested_claims, user_claims)
return response | Handles a userinfo request.
:param request: urlencoded request (either query string or POST body)
:param http_headers: http headers | entailment |
def match_client_preferences_with_provider_capabilities(self, client_preferences):
# type: (oic.message.RegistrationRequest) -> Mapping[str, Union[str, List[str]]]
"""
Match as many as of the client preferences as possible.
:param client_preferences: requested preferences from client registration request
:return: the matched preferences selected by the provider
"""
matched_prefs = client_preferences.to_dict()
for pref in ['response_types', 'default_acr_values']:
if pref not in client_preferences:
continue
capability = PREFERENCE2PROVIDER[pref]
# only preserve the common values
matched_values = find_common_values(client_preferences[pref], self.configuration_information[capability])
# deal with space separated values
matched_prefs[pref] = [' '.join(v) for v in matched_values]
return matched_prefs | Match as many as of the client preferences as possible.
:param client_preferences: requested preferences from client registration request
:return: the matched preferences selected by the provider | entailment |
def handle_client_registration_request(self, request, http_headers=None):
# type: (Optional[str], Optional[Mapping[str, str]]) -> oic.oic.message.RegistrationResponse
"""
Handles a client registration request.
:param request: JSON request from POST body
:param http_headers: http headers
"""
registration_req = RegistrationRequest().deserialize(request, 'json')
for validator in self.registration_request_validators:
validator(registration_req)
logger.debug('parsed authentication_request: %s', registration_req)
client_id, client_secret = self._issue_new_client()
credentials = {
'client_id': client_id,
'client_id_issued_at': int(time.time()),
'client_secret': client_secret,
'client_secret_expires_at': 0 # never expires
}
response_params = self.match_client_preferences_with_provider_capabilities(registration_req)
response_params.update(credentials)
self.clients[client_id] = copy.deepcopy(response_params)
registration_resp = RegistrationResponse(**response_params)
logger.debug('registration_resp=%s from registration_req=%s', registration_resp, registration_req)
return registration_resp | Handles a client registration request.
:param request: JSON request from POST body
:param http_headers: http headers | entailment |
def extract_bearer_token_from_http_request(parsed_request=None, authz_header=None):
# type (Optional[Mapping[str, str]], Optional[str] -> str
"""
Extracts a Bearer token from an http request
:param parsed_request: parsed request (URL query part of request body)
:param authz_header: HTTP Authorization header
:return: Bearer access token, if found
:raise BearerTokenError: if no Bearer token could be extracted from the request
"""
if authz_header:
# Authorization Request Header Field: https://tools.ietf.org/html/rfc6750#section-2.1
if authz_header.startswith(AccessToken.BEARER_TOKEN_TYPE):
access_token = authz_header[len(AccessToken.BEARER_TOKEN_TYPE) + 1:]
logger.debug('found access token %s in authz header', access_token)
return access_token
elif parsed_request:
if 'access_token' in parsed_request:
"""
Form-Encoded Body Parameter: https://tools.ietf.org/html/rfc6750#section-2.2, and
URI Query Parameter: https://tools.ietf.org/html/rfc6750#section-2.3
"""
access_token = parsed_request['access_token']
logger.debug('found access token %s in request', access_token)
return access_token
raise BearerTokenError('Bearer Token could not be found in the request') | Extracts a Bearer token from an http request
:param parsed_request: parsed request (URL query part of request body)
:param authz_header: HTTP Authorization header
:return: Bearer access token, if found
:raise BearerTokenError: if no Bearer token could be extracted from the request | entailment |
def _format_mongodb_uri(parsed_uri):
"""
Painstakingly reconstruct a MongoDB URI parsed using pymongo.uri_parser.parse_uri.
:param parsed_uri: Result of pymongo.uri_parser.parse_uri
:type parsed_uri: dict
:return: New URI
:rtype: str | unicode
"""
user_pass = ''
if parsed_uri.get('username') and parsed_uri.get('password'):
user_pass = '{username!s}:{password!s}@'.format(**parsed_uri)
_nodes = []
for host, port in parsed_uri.get('nodelist'):
if ':' in host and not host.endswith(']'):
# IPv6 address without brackets
host = '[{!s}]'.format(host)
if port == 27017:
_nodes.append(host)
else:
_nodes.append('{!s}:{!s}'.format(host, port))
nodelist = ','.join(_nodes)
options = ''
if parsed_uri.get('options'):
_opt_list = []
for key, value in parsed_uri.get('options').items():
if isinstance(value, bool):
value = str(value).lower()
_opt_list.append('{!s}={!s}'.format(key, value))
options = '?' + '&'.join(_opt_list)
db_name = parsed_uri.get('database') or ''
res = "mongodb://{user_pass!s}{nodelist!s}/{db_name!s}{options!s}".format(
user_pass=user_pass,
nodelist=nodelist,
db_name=db_name,
# collection is ignored
options=options)
return res | Painstakingly reconstruct a MongoDB URI parsed using pymongo.uri_parser.parse_uri.
:param parsed_uri: Result of pymongo.uri_parser.parse_uri
:type parsed_uri: dict
:return: New URI
:rtype: str | unicode | entailment |
def sanitized_uri(self):
"""
Return the database URI we're using in a format sensible for logging etc.
:return: db_uri
"""
if self._sanitized_uri is None:
_parsed = copy.copy(self._parsed_uri)
if 'username' in _parsed:
_parsed['password'] = 'secret'
_parsed['nodelist'] = [_parsed['nodelist'][0]]
self._sanitized_uri = _format_mongodb_uri(_parsed)
return self._sanitized_uri | Return the database URI we're using in a format sensible for logging etc.
:return: db_uri | entailment |
def get_database(self, database_name=None, username=None, password=None):
"""
Get a pymongo database handle, after authenticating.
Authenticates using the username/password in the DB URI given to
__init__() unless username/password is supplied as arguments.
:param database_name: (optional) Name of database
:param username: (optional) Username to login with
:param password: (optional) Password to login with
:return: Pymongo database object
"""
if database_name is None:
database_name = self._database_name
if database_name is None:
raise ValueError('No database_name supplied, and no default provided to __init__')
db = self._connection[database_name]
if username and password:
db.authenticate(username, password)
elif self._parsed_uri.get("username", None):
if 'authSource' in self._options and self._options['authSource'] is not None:
db.authenticate(
self._parsed_uri.get("username", None),
self._parsed_uri.get("password", None),
source=self._options['authSource']
)
else:
db.authenticate(
self._parsed_uri.get("username", None),
self._parsed_uri.get("password", None)
)
return db | Get a pymongo database handle, after authenticating.
Authenticates using the username/password in the DB URI given to
__init__() unless username/password is supplied as arguments.
:param database_name: (optional) Name of database
:param username: (optional) Username to login with
:param password: (optional) Password to login with
:return: Pymongo database object | entailment |
def get_collection(self, collection, database_name=None, username=None, password=None):
"""
Get a pymongo collection handle.
:param collection: Name of collection
:param database_name: (optional) Name of database
:param username: (optional) Username to login with
:param password: (optional) Password to login with
:return: Pymongo collection object
"""
_db = self.get_database(database_name, username, password)
return _db[collection] | Get a pymongo collection handle.
:param collection: Name of collection
:param database_name: (optional) Name of database
:param username: (optional) Username to login with
:param password: (optional) Password to login with
:return: Pymongo collection object | entailment |
def balanced_binary_tree(n_leaves):
"""
Create a balanced binary tree
"""
def _balanced_subtree(leaves):
if len(leaves) == 1:
return leaves[0]
elif len(leaves) == 2:
return (leaves[0], leaves[1])
else:
split = len(leaves) // 2
return (_balanced_subtree(leaves[:split]),
_balanced_subtree(leaves[split:]))
return _balanced_subtree(np.arange(n_leaves)) | Create a balanced binary tree | entailment |
def decision_list(n_leaves):
"""
Create a decision list
"""
def _list(leaves):
if len(leaves) == 2:
return (leaves[0], leaves[1])
else:
return (leaves[0], _list(leaves[1:]))
return _list(np.arange(n_leaves)) | Create a decision list | entailment |
def random_tree(n_leaves):
"""
Randomly partition the nodes
"""
def _random_subtree(leaves):
if len(leaves) == 1:
return leaves[0]
elif len(leaves) == 2:
return (leaves[0], leaves[1])
else:
split = npr.randint(1, len(leaves)-1)
return (_random_subtree(leaves[:split]),
_random_subtree(leaves[split:]))
return _random_subtree(np.arange(n_leaves)) | Randomly partition the nodes | entailment |
def leaves(tree):
"""
Return the leaves in this subtree.
"""
lvs = []
def _leaves(node):
if np.isscalar(node):
lvs.append(node)
elif isinstance(node, tuple) and len(node) == 2:
_leaves(node[0])
_leaves(node[1])
else:
raise Exception("Not a tree!")
_leaves(tree)
return lvs | Return the leaves in this subtree. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.