code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
history_log = context.obj['history_log'] no_color = context.obj['no_color'] if not results_file: # Find results/log file from history # Default -1 is most recent test run try: with open(history_log, 'r') as f: lines = f.readlines() history = lines[len(lines) - item] except IndexError: echo_style( 'History result at index %s does not exist.' % item, no_color, fg='red' ) sys.exit(1) except Exception: echo_style( 'Unable to retrieve results history, ' 'provide results file or re-run test.', no_color, fg='red' ) sys.exit(1) log_file = get_log_file_from_item(history) if log: echo_log(log_file, no_color) else: echo_results_file( log_file.rsplit('.', 1)[0] + '.results', no_color, verbose ) elif log: # Log file provided echo_log(results_file, no_color) else: # Results file provided echo_results_file(results_file, no_color, verbose)
def show(context, log, results_file, verbose, item)
Print test results info from provided results json file. If no results file is supplied echo results from most recent test in history if it exists. If verbose option selected, echo all test cases. If log option selected echo test log.
3.470543
3.293739
1.053679
return ipa_utils.get_ssh_client( self.instance_ip, self.ssh_private_key_file, self.ssh_user, timeout=self.timeout )
def _get_ssh_client(self)
Return a new or existing SSH client for given ip.
4.522306
3.930179
1.150662
if self.cloud == 'ssh': self.results['info'] = { 'platform': self.cloud, 'distro': self.distro_name, 'image': self.instance_ip, 'timestamp': self.time_stamp, 'log_file': self.log_file, 'results_file': self.results_file } else: self.results['info'] = { 'platform': self.cloud, 'region': self.region, 'distro': self.distro_name, 'image': self.image_id, 'instance': self.running_instance_id, 'timestamp': self.time_stamp, 'log_file': self.log_file, 'results_file': self.results_file } self._write_to_log( '\n'.join( '%s: %s' % (key, val) for key, val in self.results['info'].items() ) )
def _log_info(self)
Output test run information to top of log file.
2.41488
2.303007
1.048577
with open(self.log_file, 'a') as log_file: log_file.write('\n') log_file.write(output) log_file.write('\n')
def _write_to_log(self, output)
Write the output string to the log file.
2.187477
2.081885
1.050719
self.results['tests'] += results['tests'] for key, value in results['summary'].items(): self.results['summary'][key] += value
def _merge_results(self, results)
Combine results of test run with exisiting dict.
4.158841
3.193116
1.30244
with open(self.results_file, 'w') as results_file: json.dump(self.results, results_file)
def _save_results(self)
Save results dictionary to json file.
2.764799
2.24424
1.231953
if self.distro_name == 'sles': self.distro = SLES() elif self.distro_name == 'opensuse_leap': self.distro = openSUSE_Leap() else: raise IpaCloudException( 'Distribution: %s, not supported.' % self.distro_name )
def _set_distro(self)
Determine distro for image and create instance of class.
3.613757
3.354335
1.07734
if self.running_instance_id: self.results_dir = os.path.join( self.results_dir, self.cloud, self.image_id, self.running_instance_id ) else: self.results_dir = os.path.join( self.results_dir, self.cloud, self.instance_ip ) try: os.makedirs(self.results_dir) except OSError as error: if not os.path.isdir(self.results_dir): raise IpaCloudException( 'Unable to create ipa results directory: %s' % error ) self.time_stamp = datetime.now().strftime('%Y%m%d%H%M%S') self.log_file = ''.join( [self.results_dir, os.sep, self.time_stamp, '.log'] ) self.logger.debug('Created log file %s' % self.log_file) self.results_file = ''.join( [self.results_dir, os.sep, self.time_stamp, '.results'] ) self.logger.debug('Created results file %s' % self.results_file) # Add log file handler file_handler = logging.FileHandler(self.log_file) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(logging.Formatter('\n%(message)s\n')) self.logger.addHandler(file_handler)
def _set_results_dir(self)
Create results directory if not exists.
2.00924
1.96972
1.020064
self.logger.info('Collecting basic info about VM') client = self._get_ssh_client() out = self.distro.get_vm_info(client) self._write_to_log(out)
def _collect_vm_info(self)
Gather basic info about VM
6.285985
4.71068
1.334411
ipa_utils.update_history_log( self.history_log, description=self.description, test_log=self.log_file )
def _update_history(self)
Save the current test information to history json.
9.377744
7.2959
1.285344
current_state = 'Undefined' start = time.time() end = start + timeout while time.time() < end: current_state = self._get_instance_state() if state.lower() == current_state.lower(): return time.sleep(wait_period) raise IpaCloudException( 'Instance has not arrived at the given state: {state}'.format( state=state ) )
def _wait_on_instance(self, state, timeout=600, wait_period=10)
Wait until instance is in given state.
3.379498
3.165201
1.067704
try: out = ipa_utils.execute_ssh_command(client, command) except Exception as error: raise IpaCloudException( 'Command: "{0}", failed execution: {1}.'.format( command, error ) ) else: self._write_to_log(out)
def execute_ssh_command(self, client, command)
Execute the provided command and log output.
5.018481
4.844262
1.035964
try: out = ipa_utils.extract_archive(client, archive_path, extract_path) except Exception as error: raise IpaCloudException( 'Failed to extract archive, "{0}": {1}.'.format( archive_path, error ) ) else: self._write_to_log(out)
def extract_archive(self, client, archive_path, extract_path=None)
Extract the archive files using the client in the current path.
4.828075
4.646776
1.039016
self._stop_instance() self._start_instance() self._set_instance_ip() self.logger.debug('IP of instance: %s' % self.instance_ip) ipa_utils.clear_cache()
def hard_reboot_instance(self)
Stop then start the instance.
5.381461
4.756632
1.13136
try: out = self.distro.install_package(client, package) except Exception as error: raise IpaCloudException( 'Failed installing package, "{0}"; {1}.'.format( package, error ) ) else: self._write_to_log(out)
def install_package(self, client, package)
Install package using distro specific install method.
5.437075
4.772928
1.139149
configuration = ipa_utils.get_yaml_config(self.inject) if configuration.get('inject_packages'): inject_packages = configuration['inject_packages'] if not isinstance(inject_packages, list): inject_packages = [inject_packages] for package in inject_packages: package_path = self.put_file(client, package) self.install_package(client, package_path) if configuration.get('inject_archives'): inject_archives = configuration['inject_archives'] if not isinstance(inject_archives, list): inject_archives = [inject_archives] for archive in inject_archives: archive_path = self.put_file(client, archive) self.extract_archive(client, archive_path) if configuration.get('inject_files'): inject_files = configuration['inject_files'] if not isinstance(inject_files, list): inject_files = [inject_files] for file_path in inject_files: self.put_file(client, file_path) if configuration.get('execute'): execute = configuration['execute'] if not isinstance(execute, list): execute = [execute] for command in execute: self.execute_ssh_command(client, command) if configuration.get('install'): install = configuration['install'] if not isinstance(install, list): install = [install] for package in install: self.install_package(client, package)
def process_injection_file(self, client)
Load yaml file and process injection configuration. There are 5 injection options: :inject_packages: an rpm path or list of rpm paths which will be copied and installed on the test instance. :inject_archives: an archive or list of archives which will be copied and extracted on the test instance. :inject_files: a file path or list of file paths which will be copied to the test instance. :execute: a command or list of commands to run on the test instance. :install: a package name or list of package names to install from an existing repo on the test instance. The order of processing is as follows: inject_packages, inject_archives, inject_files, execute, install.
1.735165
1.536544
1.129264
try: file_name = os.path.basename(source_file) ipa_utils.put_file(client, source_file, file_name) except Exception as error: raise IpaCloudException( 'Failed copying file, "{0}"; {1}.'.format( source_file, error ) ) else: return file_name
def put_file(self, client, source_file)
Put file on instance in default SSH directory.
4.009193
3.900536
1.027857
triples = [] for cell_line in self.neuron_data: for tl in cell_line['donor']['transgenic_lines']: _id = tl['stock_number'] if tl['stock_number'] else tl['id'] prefix = tl['transgenic_line_source_name'] line_type = tl['transgenic_line_type_name'] if prefix not in ['JAX', 'MMRRC', 'AIBS']: print(tc.red('WARNING:'), 'unknown prefix', prefix, json.dumps(tl, indent=4)) continue elif prefix == 'AIBS': prefix = 'AllenTL' _class = self.ns[prefix][str(_id)] triples.append((_class, rdf.type, owl.Class)) triples.append((_class, rdfs.label, rdflib.Literal(tl['name']))) triples.append((_class, definition, rdflib.Literal(tl['description']))) triples.append((_class, rdfs.subClassOf, ilxtr.transgenicLine)) triples.append((_class, ilxtr.hasTransgenicType, ilxtr[line_type + 'Line'])) # TODO aspects.ttl? transgenic_lines = simpleOnt(filename='allen-transgenic-lines', path='ttl/generated/', prefixes=self.prefixes, triples=triples, comment='Allen transgenic lines for cell types', branch=self.branch) transgenic_lines._graph.write()
def build_transgenic_lines(self)
init class | "transgenic_line_source_name":"stock_number" a Class add superClass | rdfs:subClassOf ilxtr:transgenicLine add *order* | ilxtr:useObjectProperty ilxtr:<order> add name | rdfs:label "name" add def | definition: "description" add transtype | rdfs:hasTransgenicType "transgenic_line_type_name"
4.660748
3.961905
1.176391
lines = [_ for _ in resp.text.split('\n') if _] # strip empties if 'successfull' in lines[0]: return [(_.split('"')[1], ilxIdFix(_.split(': ')[-1])) for _ in lines[1:]] elif 'errors' in lines[0]: return [(_.split('"')[1], ilxIdFix(_.split('(')[1].split(')')[0])) for _ in lines[1:]]
def decodeIlxResp(resp)
We need this until we can get json back directly and this is SUPER nasty
4.266877
4.117163
1.036363
alpha = list(zip(*sorted(((k, v['rec']['label']) for k, v in existing.items()), key=lambda a: a[1])))[0] depths = {} def getDepth(id_): if id_ in depths: return depths[id_] else: if id_ in existing: names_above = getDepth(existing[id_]['sc']) depths[id_] = names_above + [existing[id_]['rec']['label']] return depths[id_] else: return [''] for id_ in existing: getDepth(id_) print(sorted(depths.values())) def key_(id_): return depths[id_] return sorted(depths, key=key_)
def getSubOrder(existing)
Alpha sort by the full chain of parents.
3.893638
3.765543
1.034018
''' Begining of the string can sometimes have odd noise ''' # manual fixes in the source # 24/1.png # 9/1.png # 3/1.png #if ')' in string: # fix in the source data #string = string.split(')')[0] + ')' # remove trailing garbage return (string .replace('_', '') .replace('-', '') .replace('—', '') .replace('.', '') .replace('=', '') .replace('\u2018',"'") # LEFT SINGLE QUOTATION MARK .replace('\u2019', "'") # RIGHT SINGLE QUOTATION MARK .strip())
def clean(string)
Begining of the string can sometimes have odd noise
6.544648
5.210373
1.25608
data = [] if cls.source_images.exists(): for folder in cls.source_images.glob('*'): plate_num = int(folder.stem) text_file = cls.source / f'{plate_num}.txt' if not text_file.exists() or cls.run_ocr: legends = [] raw_text = '' for img in folder.glob('*.png'): print('num', plate_num, img.stem) p = subprocess.Popen(('tesseract', img.as_posix(), 'stdout', '-l', 'eng', '--oem', '2', '--psm', '6'), stdout=subprocess.PIPE) bytes_text, err = p.communicate() raw_text += bytes_text.decode() + '\n' with open(text_file, 'wt') as f: f.write(raw_text) else: with open(text_file, 'rt') as f: raw_text = f.read() legends = get_legends(raw_text) data.append((plate_num, legends)) elif cls.source.exists(): for text_file in cls.source.glob('*.txt'): plate_num = int(text_file.stem) with open(text_file, 'rt') as f: raw_text = f.read() legends = get_legends(raw_text) data.append((plate_num, legends)) return data
def loadData(cls)
Sigh, this was indeed a poorly conceived approach since it hard blocks when the files are not in the source so you can't easily bootstrap from another source and the cognitive overhead is way, way too high :/ Adding dry_run/bootstrap to __new__ sort of helps?
2.515156
2.46323
1.021081
to_sub = set() for subject in graph.subjects(rdflib.RDF.type, rdflib.OWL.Class): if PREFIXES[prefix] in subject: to_sub.add(subject) ilx_base = 'ilx_{:0>7}' ILX_base = 'ILX:{:0>7}' # ah rdflib/owlapi, you infuriate me ilx_labels = {} replace = {} for sub in sorted(to_sub): ilx_format = ilx_base.format(ilx_start) ILX_format = ILX_base.format(ilx_start) ilx_start += 1 prefix, url, suffix = graph.namespace_manager.compute_qname(sub) curie = prefix + ':' + suffix replace[curie] = ILX_format label = [_ for _ in graph.objects(sub, rdflib.RDFS.label)][0] ilx_labels[ilx_format] = label new_sub = expand('ilx:' + ilx_format) for p, o in graph.predicate_objects(sub): graph.remove((sub, p, o)) graph.add((new_sub, p, o)) for s, p in graph.subject_predicates(sub): graph.remove((s, p, sub)) graph.add((s, p, new_sub)) return ilx_labels, replace
def ilx_conv(graph, prefix, ilx_start)
convert a set of temporary identifiers to ilx and modify the graph in place
3.167676
3.156821
1.003439
namespace = oldClassString.split(':')[0] if namespace == 'http': target = rdflib.URIRef(oldClassString) print('OLD CLASS ID IS A URL', oldClassString) else: try: og.add_known_namespaces(namespace) target = og.expand(oldClassString) except KeyError: print('MISSING NAMESPACE', namespace, oldClassString) return True # we only want known namespaces return (target, rdf.type, owl.Class) in og.g
def alreadyHasEntry(oldClassString, og)
Return true if there is already an owl:Class with the old id
5.972903
5.676337
1.052246
graphBase.configGraphIO(remote_base=remote_base, local_base=local_base, branch=branch, core_graph_paths=core_graph_paths, core_graph=core_graph, in_graph_paths=in_graph_paths, out_graph_path=out_graph_path, out_imports=out_imports, out_graph=out_graph, prefixes=prefixes, force_remote=force_remote, checkout_ok=checkout_ok, scigraph=scigraph, iri=iri, sources=sources, source_file=source_file, use_local_import_paths=use_local_import_paths, ignore_existing=ignore_existing) pred = graphBase._predicates return pred
def config(remote_base= 'https://raw.githubusercontent.com/SciCrunch/NIF-Ontology/', local_base= None, # devconfig.ontology_local_repo by default branch= devconfig.neurons_branch, core_graph_paths= ['ttl/phenotype-core.ttl', 'ttl/phenotypes.ttl'], core_graph= None, in_graph_paths= tuple(), out_graph_path= '/tmp/_Neurons.ttl', out_imports= ['ttl/phenotype-core.ttl'], out_graph= None, prefixes= tuple(), force_remote= False, checkout_ok= ont_checkout_ok, scigraph= None, # defaults to devconfig.scigraph_api iri= None, sources= tuple(), source_file= None, use_local_import_paths=True, ignore_existing= True)
Wraps graphBase.configGraphIO to provide a set of sane defaults for input ontologies and output files.
1.589373
1.402114
1.133555
for ont in graph.subjects(rdf.type, owl.Ontology): for versionIRI in graph.objects(ont, owl.versionIRI): graph.remove((ont, owl.versionIRI, versionIRI)) t = ont, owl.versionIRI, make_version_iri_from_iri(ont, epoch) graph.add(t)
def add_version_iri(graph, epoch)
Also remove the previous versionIRI if there was one.
3.022582
2.677044
1.129075
if self.oauth: return self.oauth return (self.username, self.password)
def auth(self)
Return credentials for current Bitbucket user.
6.352725
4.350581
1.460202
self.consumer_key = consumer_key self.consumer_secret = consumer_secret if not access_token and not access_token_secret: if not callback_url: return (False, "Callback URL required") oauth = OAuth1( consumer_key, client_secret=consumer_secret, callback_uri=callback_url) r = requests.post(self.url('REQUEST_TOKEN'), auth=oauth) if r.status_code == 200: creds = parse_qs(r.content) self.access_token = creds.get('oauth_token')[0] self.access_token_secret = creds.get('oauth_token_secret')[0] else: return (False, r.content) else: self.finalize_oauth(access_token, access_token_secret) return (True, None)
def authorize(self, consumer_key, consumer_secret, callback_url=None, access_token=None, access_token_secret=None)
Call this with your consumer key, secret and callback URL, to generate a token for verification.
1.985216
1.98102
1.002118
# Stored values can be supplied to verify self.consumer_key = consumer_key or self.consumer_key self.consumer_secret = consumer_secret or self.consumer_secret self.access_token = access_token or self.access_token self.access_token_secret = access_token_secret or self.access_token_secret oauth = OAuth1( self.consumer_key, client_secret=self.consumer_secret, resource_owner_key=self.access_token, resource_owner_secret=self.access_token_secret, verifier=verifier) r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth) if r.status_code == 200: creds = parse_qs(r.content) else: return (False, r.content) self.finalize_oauth(creds.get('oauth_token')[0], creds.get('oauth_token_secret')[0]) return (True, None)
def verify(self, verifier, consumer_key=None, consumer_secret=None, access_token=None, access_token_secret=None)
After converting the token into verifier, call this to finalize the authorization.
2.050216
2.062072
0.99425
self.access_token = access_token self.access_token_secret = access_token_secret # Final OAuth object self.oauth = OAuth1( self.consumer_key, client_secret=self.consumer_secret, resource_owner_key=self.access_token, resource_owner_secret=self.access_token_secret)
def finalize_oauth(self, access_token, access_token_secret)
Called internally once auth process is complete.
1.8283
1.779542
1.027399
r = Request( method=method, url=url, auth=auth, params=params, data=kwargs) s = Session() resp = s.send(r.prepare()) status = resp.status_code text = resp.text error = resp.reason if status >= 200 and status < 300: if text: try: return (True, json.loads(text)) except TypeError: pass except ValueError: pass return (True, text) elif status >= 300 and status < 400: return ( False, 'Unauthorized access, ' 'please check your credentials.') elif status >= 400 and status < 500: return (False, 'Service not found.') elif status >= 500 and status < 600: return (False, 'Server error.') else: return (False, error)
def dispatch(self, method, url, auth=None, params=None, **kwargs)
Send HTTP request, with given method, credentials and data to the given URL, and return the success and the result on success.
2.22555
2.171153
1.025054
# TODO : should be static method ? return self.URLS['BASE'] % self.URLS[action] % kwargs
def url(self, action, **kwargs)
Construct and return the URL for a specific API service.
14.770577
14.040135
1.052025
username = username or self.username or '' url = self.url('GET_USER', username=username) response = self.dispatch('GET', url) try: return (response[0], response[1]['user']) except TypeError: pass return response
def get_user(self, username=None)
Returns user informations. If username is not defined, tries to return own informations.
4.807551
4.890546
0.98303
repo_slug = repo_slug or self.repo_slug or '' url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug) return self.dispatch('GET', url, auth=self.auth)
def get_tags(self, repo_slug=None)
Get a single repository on Bitbucket and return its tags.
3.860533
3.639794
1.060646
repo_slug = repo_slug or self.repo_slug or '' url = self.url('GET_BRANCHES', username=self.username, repo_slug=repo_slug) return self.dispatch('GET', url, auth=self.auth)
def get_branches(self, repo_slug=None)
Get a single repository on Bitbucket and return its branches.
4.021613
3.860767
1.041661
url = self.url('GET_USER_PRIVILEGES') return self.dispatch('GET', url, auth=self.auth)
def get_privileges(self)
Get privledges for this user.
7.930784
5.740793
1.381479
key = '%s' % key repo_slug = repo_slug or self.bitbucket.repo_slug or '' url = self.bitbucket.url('SET_DEPLOY_KEY', username=self.bitbucket.username, repo_slug=repo_slug) return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, key=key, label=label)
def create(self, repo_slug=None, key=None, label=None)
Associate an ssh key with your repo and return it.
3.951126
3.863699
1.022628
username = username or self.bitbucket.username or '' url = self.bitbucket.url('GET_USER', username=username) response = self.bitbucket.dispatch('GET', url) try: return (response[0], response[1]['repositories']) except TypeError: pass return response
def public(self, username=None)
Returns all public repositories from an user. If username is not defined, tries to return own public repos.
5.906857
5.891006
1.002691
url = self.bitbucket.url('GET_USER', username=self.bitbucket.username) response = self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth) try: return (response[0], response[1]['repositories']) except TypeError: pass return response
def all(self)
Return own repositories.
6.300477
5.230628
1.204536
url = self.bitbucket.url('CREATE_REPO') return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, name=repo_name, scm=scm, is_private=private, **kwargs)
def create(self, repo_name, scm='git', private=True, **kwargs)
Creates a new repository on own Bitbucket account and return it.
4.647784
3.949881
1.17669
prefix = '%s'.lstrip('/') % prefix self._get_files_in_dir(repo_slug=repo_slug, dir='/') if self.bitbucket.repo_tree: with NamedTemporaryFile(delete=False) as archive: with ZipFile(archive, 'w') as zip_archive: for name, file in self.bitbucket.repo_tree.items(): with NamedTemporaryFile(delete=False) as temp_file: temp_file.write(file.encode('utf-8')) zip_archive.write(temp_file.name, prefix + name) return (True, archive.name) return (False, 'Could not archive your project.')
def archive(self, repo_slug=None, format='zip', prefix='')
Get one of your repositories and compress it as an archive. Return the path of the archive. format parameter is curently not supported.
3.569417
3.897441
0.915836
issue_id = issue_id or self.issue_id repo_slug = repo_slug or self.bitbucket.repo_slug or '' url = self.bitbucket.url('CREATE_COMMENT', username=self.bitbucket.username, repo_slug=repo_slug, issue_id=issue_id) return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, **kwargs)
def create(self, issue_id=None, repo_slug=None, **kwargs)
Add an issue comment to one of your repositories. Each issue comment require only the content data field the system autopopulate the rest.
3.085964
3.072982
1.004225
issue_id = issue_id or self.issue_id repo_slug = repo_slug or self.bitbucket.repo_slug or '' url = self.bitbucket.url('DELETE_COMMENT', username=self.bitbucket.username, repo_slug=repo_slug, issue_id=issue_id, comment_id=comment_id) return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
def delete(self, comment_id, issue_id=None, repo_slug=None)
Delete an issue from one of your repositories.
2.734807
2.678021
1.021205
url = self.bitbucket.url('GET_SSH_KEYS') return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def all(self)
Get all ssh keys associated with your account.
11.249886
7.057727
1.593981
url = self.bitbucket.url('GET_SSH_KEY', key_id=key_id) return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def get(self, key_id=None)
Get one of the ssh keys associated with your account.
6.195859
4.892487
1.266403
key = '%s' % key url = self.bitbucket.url('SET_SSH_KEY') return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, key=key, label=label)
def create(self, key=None, label=None)
Associate an ssh key with your account and return it.
6.135691
5.010428
1.224584
url = self.bitbucket.url('DELETE_SSH_KEY', key_id=key_id) return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
def delete(self, key_id=None)
Delete one of the ssh keys associated with your account. Please use with caution as there is NO confimation and NO undo.
6.65535
5.34037
1.246234
''' Open a file for appending and yield the open filehandle. Close the filehandle after yielding it. This is useful for creating a context manager for logging the output of a `Vagrant` instance. filename: a path to a file mode: The mode in which to open the file. Defaults to 'a', append Usage example: log_cm = make_file_cm('application.log') v = Vagrant(out_cm=log_cm, err_cm=log_cm) ''' @contextlib.contextmanager def cm(): with open(filename, mode=mode) as fh: yield fh return cm
def make_file_cm(filename, mode='a')
Open a file for appending and yield the open filehandle. Close the filehandle after yielding it. This is useful for creating a context manager for logging the output of a `Vagrant` instance. filename: a path to a file mode: The mode in which to open the file. Defaults to 'a', append Usage example: log_cm = make_file_cm('application.log') v = Vagrant(out_cm=log_cm, err_cm=log_cm)
5.622391
1.541297
3.64783
''' Return the installed vagrant version, as a string, e.g. '1.5.0' ''' output = self._run_vagrant_command(['--version']) m = re.search(r'^Vagrant (?P<version>.+)$', output) if m is None: raise Exception('Failed to parse vagrant --version output. output={!r}'.format(output)) return m.group('version')
def version(self)
Return the installed vagrant version, as a string, e.g. '1.5.0'
3.8775
2.519897
1.538753
''' Invoke `vagrant up` to start a box or boxes, possibly streaming the command output. vm_name=None: name of VM. provision_with: optional list of provisioners to enable. provider: Back the machine with a specific provider no_provision: if True, disable provisioning. Same as 'provision=False'. provision: optional boolean. Enable or disable provisioning. Default behavior is to use the underlying vagrant default. stream_output: if True, return a generator that yields each line of the output of running the command. Consume the generator or the subprocess might hang. if False, None is returned and the command is run to completion without streaming the output. Defaults to False. Note: If provision and no_provision are not None, no_provision will be ignored. returns: None or a generator yielding lines of output. ''' provider_arg = '--provider=%s' % provider if provider else None prov_with_arg = None if provision_with is None else '--provision-with' providers_arg = None if provision_with is None else ','.join(provision_with) # For the sake of backward compatibility, no_provision is allowed. # However it is ignored if provision is set. if provision is not None: no_provision = None no_provision_arg = '--no-provision' if no_provision else None provision_arg = None if provision is None else '--provision' if provision else '--no-provision' args = ['up', vm_name, no_provision_arg, provision_arg, provider_arg, prov_with_arg, providers_arg] if stream_output: generator = self._stream_vagrant_command(args) else: self._call_vagrant_command(args) self._cached_conf[vm_name] = None # remove cached configuration return generator if stream_output else None
def up(self, no_provision=False, provider=None, vm_name=None, provision=None, provision_with=None, stream_output=False)
Invoke `vagrant up` to start a box or boxes, possibly streaming the command output. vm_name=None: name of VM. provision_with: optional list of provisioners to enable. provider: Back the machine with a specific provider no_provision: if True, disable provisioning. Same as 'provision=False'. provision: optional boolean. Enable or disable provisioning. Default behavior is to use the underlying vagrant default. stream_output: if True, return a generator that yields each line of the output of running the command. Consume the generator or the subprocess might hang. if False, None is returned and the command is run to completion without streaming the output. Defaults to False. Note: If provision and no_provision are not None, no_provision will be ignored. returns: None or a generator yielding lines of output.
4.696936
1.869815
2.51198
''' Runs the provisioners defined in the Vagrantfile. vm_name: optional VM name string. provision_with: optional list of provisioners to enable. e.g. ['shell', 'chef_solo'] ''' prov_with_arg = None if provision_with is None else '--provision-with' providers_arg = None if provision_with is None else ','.join(provision_with) self._call_vagrant_command(['provision', vm_name, prov_with_arg, providers_arg])
def provision(self, vm_name=None, provision_with=None)
Runs the provisioners defined in the Vagrantfile. vm_name: optional VM name string. provision_with: optional list of provisioners to enable. e.g. ['shell', 'chef_solo']
5.02939
2.444291
2.057607
''' Quoting from Vagrant docs: > The equivalent of running a halt followed by an up. > This command is usually required for changes made in the Vagrantfile to take effect. After making any modifications to the Vagrantfile, a reload should be called. > The configured provisioners will not run again, by default. You can force the provisioners to re-run by specifying the --provision flag. provision: optional boolean. Enable or disable provisioning. Default behavior is to use the underlying vagrant default. provision_with: optional list of provisioners to enable. e.g. ['shell', 'chef_solo'] stream_output: if True, return a generator that yields each line of the output of running the command. Consume the generator or the subprocess might hang. if False, None is returned and the command is run to completion without streaming the output. Defaults to False. returns: None or a generator yielding lines of output. ''' prov_with_arg = None if provision_with is None else '--provision-with' providers_arg = None if provision_with is None else ','.join(provision_with) provision_arg = None if provision is None else '--provision' if provision else '--no-provision' args = ['reload', vm_name, provision_arg, prov_with_arg, providers_arg] if stream_output: generator = self._stream_vagrant_command(args) else: self._call_vagrant_command(args) self._cached_conf[vm_name] = None # remove cached configuration return generator if stream_output else None
def reload(self, vm_name=None, provision=None, provision_with=None, stream_output=False)
Quoting from Vagrant docs: > The equivalent of running a halt followed by an up. > This command is usually required for changes made in the Vagrantfile to take effect. After making any modifications to the Vagrantfile, a reload should be called. > The configured provisioners will not run again, by default. You can force the provisioners to re-run by specifying the --provision flag. provision: optional boolean. Enable or disable provisioning. Default behavior is to use the underlying vagrant default. provision_with: optional list of provisioners to enable. e.g. ['shell', 'chef_solo'] stream_output: if True, return a generator that yields each line of the output of running the command. Consume the generator or the subprocess might hang. if False, None is returned and the command is run to completion without streaming the output. Defaults to False. returns: None or a generator yielding lines of output.
5.574232
1.674422
3.329049
''' Halt the Vagrant box. force: If True, force shut down. ''' force_opt = '--force' if force else None self._call_vagrant_command(['halt', vm_name, force_opt]) self._cached_conf[vm_name] = None
def halt(self, vm_name=None, force=False)
Halt the Vagrant box. force: If True, force shut down.
7.281995
4.145759
1.756493
''' Unit testing is so much easier when Vagrant is removed from the equation. ''' parsed = self._parse_machine_readable_output(output) statuses = [] # group tuples by target name # assuming tuples are sorted by target name, this should group all # the tuples with info for each target. for target, tuples in itertools.groupby(parsed, lambda tup: tup[1]): # transform tuples into a dict mapping "type" to "data" info = {kind: data for timestamp, _, kind, data in tuples} status = Status(name=target, state=info.get('state'), provider=info.get('provider-name')) statuses.append(status) return statuses
def _parse_status(self, output)
Unit testing is so much easier when Vagrant is removed from the equation.
8.962017
5.694421
1.573824
''' Parse ssh_config into a dict containing the keys defined in ssh_config, which should include these keys (listed with example values): 'User' (e.g. 'vagrant'), 'HostName' (e.g. 'localhost'), 'Port' (e.g. '2222'), 'IdentityFile' (e.g. '/home/todd/.ssh/id_dsa'). Cache the parsed configuration dict. Return the dict. If ssh_config is not given, return the cached dict. If there is no cached configuration, call ssh_config() to get the configuration, then parse, cache, and return the config dict. Calling ssh_config() raises an Exception if the Vagrant box has not yet been created or has been destroyed. vm_name: required in a Multi-VM Vagrant environment. This name will be used to get the configuration for the named vm and associate the config with the vm name in the cache. ssh_config: a valid ssh confige file host section. Defaults to the value returned from ssh_config(). For speed, the configuration parsed from ssh_config is cached for subsequent calls. ''' if self._cached_conf.get(vm_name) is None or ssh_config is not None: if ssh_config is None: ssh_config = self.ssh_config(vm_name=vm_name) conf = self._parse_config(ssh_config) self._cached_conf[vm_name] = conf return self._cached_conf[vm_name]
def conf(self, ssh_config=None, vm_name=None)
Parse ssh_config into a dict containing the keys defined in ssh_config, which should include these keys (listed with example values): 'User' (e.g. 'vagrant'), 'HostName' (e.g. 'localhost'), 'Port' (e.g. '2222'), 'IdentityFile' (e.g. '/home/todd/.ssh/id_dsa'). Cache the parsed configuration dict. Return the dict. If ssh_config is not given, return the cached dict. If there is no cached configuration, call ssh_config() to get the configuration, then parse, cache, and return the config dict. Calling ssh_config() raises an Exception if the Vagrant box has not yet been created or has been destroyed. vm_name: required in a Multi-VM Vagrant environment. This name will be used to get the configuration for the named vm and associate the config with the vm name in the cache. ssh_config: a valid ssh confige file host section. Defaults to the value returned from ssh_config(). For speed, the configuration parsed from ssh_config is cached for subsequent calls.
4.708577
1.243573
3.786329
''' Return a string combining user and hostname, e.g. 'vagrant@127.0.0.1'. This string is suitable for use in an ssh commmand. If user is None or empty, it will be left out of the string, e.g. 'localhost'. If hostname is None, have bigger problems. Raises an Exception if the Vagrant box has not yet been created or has been destroyed. ''' user = self.user(vm_name=vm_name) user_prefix = user + '@' if user else '' return user_prefix + self.hostname(vm_name=vm_name)
def user_hostname(self, vm_name=None)
Return a string combining user and hostname, e.g. 'vagrant@127.0.0.1'. This string is suitable for use in an ssh commmand. If user is None or empty, it will be left out of the string, e.g. 'localhost'. If hostname is None, have bigger problems. Raises an Exception if the Vagrant box has not yet been created or has been destroyed.
5.54212
1.587685
3.490691
''' Return a string combining user, hostname and port, e.g. 'vagrant@127.0.0.1:2222'. This string is suitable for use with Fabric, in env.hosts. If user or port is None or empty, they will be left out of the string. E.g. 'vagrant@localhost', or 'localhost:2222' or 'localhost'. If hostname is None, you have bigger problems. Raises an Exception if the Vagrant box has not yet been created or has been destroyed. ''' user = self.user(vm_name=vm_name) port = self.port(vm_name=vm_name) user_prefix = user + '@' if user else '' port_suffix = ':' + port if port else '' return user_prefix + self.hostname(vm_name=vm_name) + port_suffix
def user_hostname_port(self, vm_name=None)
Return a string combining user, hostname and port, e.g. 'vagrant@127.0.0.1:2222'. This string is suitable for use with Fabric, in env.hosts. If user or port is None or empty, they will be left out of the string. E.g. 'vagrant@localhost', or 'localhost:2222' or 'localhost'. If hostname is None, you have bigger problems. Raises an Exception if the Vagrant box has not yet been created or has been destroyed.
4.327737
1.465125
2.953835
''' Adds a box with given name, from given url. force: If True, overwrite an existing box if it exists. ''' force_opt = '--force' if force else None cmd = ['box', 'add', name, url, force_opt] if provider is not None: cmd += ['--provider', provider] self._call_vagrant_command(cmd)
def box_add(self, name, url, provider=None, force=False)
Adds a box with given name, from given url. force: If True, overwrite an existing box if it exists.
4.062099
2.568864
1.581282
''' Packages a running vagrant environment into a box. vm_name=None: name of VM. base=None: name of a VM in virtualbox to package as a base box output=None: name of the file to output vagrantfile=None: Vagrantfile to package with this box ''' cmd = ['package', vm_name] if output is not None: cmd += ['--output', output] if vagrantfile is not None: cmd += ['--vagrantfile', vagrantfile] self._call_vagrant_command(cmd)
def package(self, vm_name=None, base=None, output=None, vagrantfile=None)
Packages a running vagrant environment into a box. vm_name=None: name of VM. base=None: name of a VM in virtualbox to package as a base box output=None: name of the file to output vagrantfile=None: Vagrantfile to package with this box
4.750585
1.920545
2.473561
''' This command is the inverse of vagrant snapshot push: it will restore the pushed state. ''' NO_SNAPSHOTS_PUSHED = 'No pushed snapshot found!' output = self._run_vagrant_command(['snapshot', 'pop']) if NO_SNAPSHOTS_PUSHED in output: raise RuntimeError(NO_SNAPSHOTS_PUSHED)
def snapshot_pop(self)
This command is the inverse of vagrant snapshot push: it will restore the pushed state.
8.175077
4.044209
2.021428
''' This command will list all the snapshots taken. ''' NO_SNAPSHOTS_TAKEN = 'No snapshots have been taken yet!' output = self._run_vagrant_command(['snapshot', 'list']) if NO_SNAPSHOTS_TAKEN in output: return [] else: return output.splitlines()
def snapshot_list(self)
This command will list all the snapshots taken.
5.01852
3.631706
1.381863
''' Execute a command via ssh on the vm specified. command: The command to execute via ssh. extra_ssh_args: Corresponds to '--' option in the vagrant ssh command Returns the output of running the command. ''' cmd = ['ssh', vm_name, '--command', command] if extra_ssh_args is not None: cmd += ['--', extra_ssh_args] return self._run_vagrant_command(cmd)
def ssh(self, vm_name=None, command=None, extra_ssh_args=None)
Execute a command via ssh on the vm specified. command: The command to execute via ssh. extra_ssh_args: Corresponds to '--' option in the vagrant ssh command Returns the output of running the command.
4.230711
2.16136
1.95743
''' Remove Vagrant usage for unit testing ''' # Parse box list output boxes = [] # initialize box values name = provider = version = None for timestamp, target, kind, data in self._parse_machine_readable_output(output): if kind == 'box-name': # finish the previous box, if any if name is not None: boxes.append(Box(name=name, provider=provider, version=version)) # start a new box name = data # box name provider = version = None elif kind == 'box-provider': provider = data elif kind == 'box-version': version = data # finish the previous box, if any if name is not None: boxes.append(Box(name=name, provider=provider, version=version)) return boxes
def _parse_box_list(self, output)
Remove Vagrant usage for unit testing
4.216984
3.26123
1.293066
''' Remove Vagrant from the equation for unit testing. ''' ENCODED_COMMA = '%!(VAGRANT_COMMA)' plugins = [] # initialize plugin values name = None version = None system = False for timestamp, target, kind, data in self._parse_machine_readable_output(output): if kind == 'plugin-name': # finish the previous plugin, if any if name is not None: plugins.append(Plugin(name=name, version=version, system=system)) # start a new plugin name = data # plugin name version = None system = False elif kind == 'plugin-version': if ENCODED_COMMA in data: version, etc = data.split(ENCODED_COMMA) system = (etc.strip().lower() == 'system') else: version = data system = False # finish the previous plugin, if any if name is not None: plugins.append(Plugin(name=name, version=version, system=system)) return plugins
def _parse_plugin_list(self, output)
Remove Vagrant from the equation for unit testing.
4.552587
3.557408
1.279748
''' param output: a string containing the output of a vagrant command with the `--machine-readable` option. returns: a dict mapping each 'target' in the machine readable output to a dict. The dict of each target, maps each target line type/kind to its data. Machine-readable output is a collection of CSV lines in the format: timestamp, target, kind, data Target is a VM name, possibly 'default', or ''. The empty string denotes information not specific to a particular VM, such as the results of `vagrant box list`. ''' # each line is a tuple of (timestamp, target, type, data) # target is the VM name # type is the type of data, e.g. 'provider-name', 'box-version' # data is a (possibly comma separated) type-specific value, e.g. 'virtualbox', '0' parsed_lines = [line.split(',', 4) for line in output.splitlines() if line.strip()] # vagrant 1.8 adds additional fields that aren't required, # and will break parsing if included in the status lines. # filter them out pending future implementation. parsed_lines = list(filter(lambda x: x[2] not in ["metadata", "ui", "action"], parsed_lines)) return parsed_lines
def _parse_machine_readable_output(self, output)
param output: a string containing the output of a vagrant command with the `--machine-readable` option. returns: a dict mapping each 'target' in the machine readable output to a dict. The dict of each target, maps each target line type/kind to its data. Machine-readable output is a collection of CSV lines in the format: timestamp, target, kind, data Target is a VM name, possibly 'default', or ''. The empty string denotes information not specific to a particular VM, such as the results of `vagrant box list`.
8.674151
3.146385
2.756863
''' This lame parser does not parse the full grammar of an ssh config file. It makes assumptions that are (hopefully) correct for the output of `vagrant ssh-config [vm-name]`. Specifically it assumes that there is only one Host section, the default vagrant host. It assumes that the parameters of the ssh config are not changing. every line is of the form 'key value', where key is a single token without any whitespace and value is the remaining part of the line. Value may optionally be surrounded in double quotes. All leading and trailing whitespace is removed from key and value. Example lines: ' User vagrant\n' ' IdentityFile "/home/robert/.vagrant.d/insecure_private_key"\n' Lines with '#' as the first non-whitespace character are considered comments and ignored. Whitespace-only lines are ignored. This parser does NOT handle using an '=' in options. Values surrounded in double quotes will have the double quotes removed. See https://github.com/bitprophet/ssh/blob/master/ssh/config.py for a more compliant ssh config file parser. ''' conf = dict() started_parsing = False for line in ssh_config.splitlines(): if line.strip().startswith('Host ') and not started_parsing: started_parsing = True if not started_parsing or not line.strip() or line.strip().startswith('#'): continue key, value = line.strip().split(None, 1) # Remove leading and trailing " from the values conf[key] = value.strip('"') return conf
def _parse_config(self, ssh_config)
This lame parser does not parse the full grammar of an ssh config file. It makes assumptions that are (hopefully) correct for the output of `vagrant ssh-config [vm-name]`. Specifically it assumes that there is only one Host section, the default vagrant host. It assumes that the parameters of the ssh config are not changing. every line is of the form 'key value', where key is a single token without any whitespace and value is the remaining part of the line. Value may optionally be surrounded in double quotes. All leading and trailing whitespace is removed from key and value. Example lines: ' User vagrant\n' ' IdentityFile "/home/robert/.vagrant.d/insecure_private_key"\n' Lines with '#' as the first non-whitespace character are considered comments and ignored. Whitespace-only lines are ignored. This parser does NOT handle using an '=' in options. Values surrounded in double quotes will have the double quotes removed. See https://github.com/bitprophet/ssh/blob/master/ssh/config.py for a more compliant ssh config file parser.
6.195265
1.397506
4.433087
''' Run a vagrant command. Return None. args: A sequence of arguments to a vagrant command line. ''' # Make subprocess command command = self._make_vagrant_command(args) with self.out_cm() as out_fh, self.err_cm() as err_fh: subprocess.check_call(command, cwd=self.root, stdout=out_fh, stderr=err_fh, env=self.env)
def _call_vagrant_command(self, args)
Run a vagrant command. Return None. args: A sequence of arguments to a vagrant command line.
5.026715
3.282604
1.531319
''' Run a vagrant command and return its stdout. args: A sequence of arguments to a vagrant command line. e.g. ['up', 'my_vm_name', '--no-provision'] or ['up', None, '--no-provision'] for a non-Multi-VM environment. ''' # Make subprocess command command = self._make_vagrant_command(args) with self.err_cm() as err_fh: return compat.decode(subprocess.check_output(command, cwd=self.root, env=self.env, stderr=err_fh))
def _run_vagrant_command(self, args)
Run a vagrant command and return its stdout. args: A sequence of arguments to a vagrant command line. e.g. ['up', 'my_vm_name', '--no-provision'] or ['up', None, '--no-provision'] for a non-Multi-VM environment.
6.604949
3.038013
2.174102
py3 = sys.version_info > (3, 0) # Make subprocess command command = self._make_vagrant_command(args) with self.err_cm() as err_fh: sp_args = dict(args=command, cwd=self.root, env=self.env, stdout=subprocess.PIPE, stderr=err_fh, bufsize=1) # Iterate over output lines. # See http://stackoverflow.com/questions/2715847/python-read-streaming-input-from-subprocess-communicate#17698359 p = subprocess.Popen(**sp_args) with p.stdout: for line in iter(p.stdout.readline, b''): yield compat.decode(line) # if PY3 decode bytestrings p.wait() # Raise CalledProcessError for consistency with _call_vagrant_command if p.returncode != 0: raise subprocess.CalledProcessError(p.returncode, command)
def _stream_vagrant_command(self, args)
Execute a vagrant command, returning a generator of the output lines. Caller should consume the entire generator to avoid the hanging the subprocess. :param args: Arguments for the Vagrant command. :return: generator that yields each line of the command stdout. :rtype: generator iterator
4.544907
4.666434
0.973957
''' Returns the status of the sandbox mode. Possible values are: - on - off - unknown - not installed ''' vagrant_sandbox_output = self._run_sandbox_command(['status', vm_name]) return self._parse_vagrant_sandbox_status(vagrant_sandbox_output)
def sandbox_status(self, vm_name=None)
Returns the status of the sandbox mode. Possible values are: - on - off - unknown - not installed
5.849627
3.286889
1.779685
''' Returns the status of the sandbox mode given output from 'vagrant sandbox status'. ''' # typical output # [default] - snapshot mode is off # or # [default] - machine not created # if the box VM is down tokens = [token.strip() for token in vagrant_output.split(' ')] if tokens[0] == 'Usage:': sahara_status = 'not installed' elif "{} {}".format(tokens[-2], tokens[-1]) == 'not created': sahara_status = 'unknown' else: sahara_status = tokens[-1] return sahara_status
def _parse_vagrant_sandbox_status(self, vagrant_output)
Returns the status of the sandbox mode given output from 'vagrant sandbox status'.
6.363273
5.001441
1.272288
# Be careful of shallow copy here target = dict(obj) class_name = None if '__class__' in target: class_name = target.pop('__class__') if '__module__' in obj: obj.pop('__module__') # Use getattr(module, class_name) for custom types if needed if class_name == 'datetime': return datetime.datetime(tzinfo=utc, **target) if class_name == 'StreamingBody': return StringIO(target['body']) # Return unrecognized structures as-is return obj
def deserialize(obj)
Convert JSON dicts back into objects.
5.078002
5.037438
1.008053
# Record class and module information for deserialization result = {'__class__': obj.__class__.__name__} try: result['__module__'] = obj.__module__ except AttributeError: pass # Convert objects to dictionary representation based on type if isinstance(obj, datetime.datetime): result['year'] = obj.year result['month'] = obj.month result['day'] = obj.day result['hour'] = obj.hour result['minute'] = obj.minute result['second'] = obj.second result['microsecond'] = obj.microsecond return result if isinstance(obj, StreamingBody): result['body'] = obj.read() obj._raw_stream = StringIO(result['body']) obj._amount_read = 0 return result # Raise a TypeError if the object isn't recognized raise TypeError("Type not serializable")
def serialize(obj)
Convert objects into JSON structures.
2.817255
2.853736
0.987216
json.dump(obj, fp, indent=4, default=serialize)
def _serialize_json(obj, fp)
Serialize ``obj`` as a JSON formatted stream to ``fp``
4.752787
5.727649
0.829797
if serializer_format == Format.JSON: return _serialize_json if serializer_format == Format.PICKLE: return _serialize_pickle
def get_serializer(serializer_format)
Get the serializer for a specific format
3.41671
3.26904
1.045172
if serializer_format == Format.JSON: return _deserialize_json if serializer_format == Format.PICKLE: return _deserialize_pickle
def get_deserializer(serializer_format)
Get the deserializer for a specific format
3.039114
2.913188
1.043226
log = logging.getLogger(logger_name) log.setLevel(level) ch = logging.StreamHandler(None) ch.setLevel(level) formatter = logging.Formatter(DebugFmtString) # add formatter to ch ch.setFormatter(formatter) # add ch to logger log.addHandler(ch)
def _set_logger(logger_name, level=logging.INFO)
Convenience function to quickly configure full debug output to go to the console.
2.713344
2.536251
1.069825
for file_format in Format.ALLOWED: file_path = '.'.join((file_name, file_format)) if os.path.exists(file_path): return file_path, file_format return None, None
def find_file_format(file_name)
Returns a tuple with the file path and format found, or (None, None)
3.132086
2.740982
1.142688
base_name = '{0}.{1}'.format(service, operation) if self.prefix: base_name = '{0}.{1}'.format(self.prefix, base_name) LOG.debug('get_next_file_path: %s', base_name) next_file = None serializer_format = None index = self._index.setdefault(base_name, 1) while not next_file: file_name = os.path.join( self._data_path, base_name + '_{0}'.format(index)) next_file, serializer_format = self.find_file_format(file_name) if next_file: self._index[base_name] += 1 elif index != 1: index = 1 self._index[base_name] = 1 else: raise IOError('response file ({0}.[{1}]) not found'.format( file_name, "|".join(Format.ALLOWED))) return next_file, serializer_format
def get_next_file_path(self, service, operation)
Returns a tuple with the next file to read and the serializer format used
3.191555
3.001489
1.063324
LOG.debug('save_response: %s.%s', service, operation) filepath = self.get_new_file_path(service, operation) LOG.debug('save_response: path=%s', filepath) data = {'status_code': http_response, 'data': response_data} with open(filepath, Format.write_mode(self.record_format)) as fp: self._serializer(data, fp)
def save_response(self, service, operation, response_data, http_response=200)
Store a response to the data directory. The ``operation`` should be the name of the operation in the service API (e.g. DescribeInstances), the ``response_data`` should a value you want to return from a placebo call and the ``http_response`` should be the HTTP status code returned from the service. You can add multiple responses for a given operation and they will be returned in order.
4.12695
4.136358
0.997726
model = kwargs.get('model') service = model.service_model.endpoint_prefix operation = model.name LOG.debug('_make_request: %s.%s', service, operation) return self.load_response(service, operation)
def _mock_request(self, **kwargs)
A mocked out make_request call that bypasses all network calls and simply returns any mocked responses defined.
7.33554
6.295677
1.165171
@functools.wraps(function) def wrapper(*args, **kwargs): session_kwargs = { 'region_name': os.environ.get('AWS_DEFAULT_REGION', 'us-east-1') } profile_name = os.environ.get('PLACEBO_PROFILE', None) if profile_name: session_kwargs['profile_name'] = profile_name session = boto3.Session(**session_kwargs) self = args[0] prefix = self.__class__.__name__ + '.' + function.__name__ base_dir = os.environ.get( "PLACEBO_DIR", os.path.join(os.getcwd(), "placebo")) record_dir = os.path.join(base_dir, prefix) record_format = os.environ.get('PLACEBO_FORMAT', Format.DEFAULT) if not os.path.exists(record_dir): os.makedirs(record_dir) pill = placebo.attach(session, data_path=record_dir, record_format=record_format) if os.environ.get('PLACEBO_MODE') == 'record': pill.record() else: pill.playback() kwargs['session'] = session return function(*args, **kwargs) return wrapper
def placebo_session(function)
Decorator to help do testing with placebo. Simply wrap the function you want to test and make sure to add a "session" argument so the decorator can pass the placebo session. Accepts the following environment variables to configure placebo: PLACEBO_MODE: set to "record" to record AWS calls and save them PLACEBO_PROFILE: optionally set an AWS credential profile to record with PLACEBO_DIR: set the directory to record to / read from
2.2464
1.991455
1.128019
# type: (str, Optional[Mapping[str, str]]) -> oic.oic.message.AuthorizationRequest auth_req = AuthorizationRequest().deserialize(request_body) for validator in self.authentication_request_validators: validator(auth_req) logger.debug('parsed authentication_request: %s', auth_req) return auth_req
def parse_authentication_request(self, request_body, http_headers=None)
Parses and verifies an authentication request. :param request_body: urlencoded authentication request :param http_headers: http headers
3.317751
5.218082
0.635818
# type: (...) -> oic.oic.message.AuthorizationResponse custom_sub = self.userinfo[user_id].get('sub') if custom_sub: self.authz_state.subject_identifiers[user_id] = {'public': custom_sub} sub = custom_sub else: sub = self._create_subject_identifier(user_id, authentication_request['client_id'], authentication_request['redirect_uri']) self._check_subject_identifier_matches_requested(authentication_request, sub) response = AuthorizationResponse() authz_code = None if 'code' in authentication_request['response_type']: authz_code = self.authz_state.create_authorization_code(authentication_request, sub) response['code'] = authz_code access_token_value = None if 'token' in authentication_request['response_type']: access_token = self.authz_state.create_access_token(authentication_request, sub) access_token_value = access_token.value self._add_access_token_to_response(response, access_token) if 'id_token' in authentication_request['response_type']: if extra_id_token_claims is None: extra_id_token_claims = {} elif callable(extra_id_token_claims): extra_id_token_claims = extra_id_token_claims(user_id, authentication_request['client_id']) requested_claims = self._get_requested_claims_in(authentication_request, 'id_token') if len(authentication_request['response_type']) == 1: # only id token is issued -> no way of doing userinfo request, so include all claims in ID Token, # even those requested by the scope parameter requested_claims.update( scope2claims( authentication_request['scope'], extra_scope_dict=self.extra_scopes ) ) user_claims = self.userinfo.get_claims_for(user_id, requested_claims) response['id_token'] = self._create_signed_id_token(authentication_request['client_id'], sub, user_claims, authentication_request.get('nonce'), authz_code, access_token_value, extra_id_token_claims) logger.debug('issued id_token=%s from requested_claims=%s userinfo=%s extra_claims=%s', response['id_token'], requested_claims, user_claims, extra_id_token_claims) if 'state' in authentication_request: response['state'] = authentication_request['state'] return response
def authorize(self, authentication_request, # type: oic.oic.message.AuthorizationRequest user_id, # type: str extra_id_token_claims=None # type: Optional[Union[Mapping[str, Union[str, List[str]]], Callable[[str, str], Mapping[str, Union[str, List[str]]]]] )
Creates an Authentication Response for the specified authentication request and local identifier of the authenticated user.
2.654735
2.607558
1.018093
# type: (oic.message.AccessTokenResponse, se_leg_op.access_token.AccessToken) -> None response['access_token'] = access_token.value response['token_type'] = access_token.type response['expires_in'] = access_token.expires_in
def _add_access_token_to_response(self, response, access_token)
Adds the Access Token and the associated parameters to the Token Response.
4.464557
4.607203
0.969039
# type (str, str, str) -> str supported_subject_types = self.configuration_information['subject_types_supported'][0] subject_type = self.clients[client_id].get('subject_type', supported_subject_types) sector_identifier = urlparse(redirect_uri).netloc return self.authz_state.get_subject_identifier(subject_type, user_id, sector_identifier)
def _create_subject_identifier(self, user_id, client_id, redirect_uri)
Creates a subject identifier for the specified client and user see <a href="http://openid.net/specs/openid-connect-core-1_0.html#Terminology"> "OpenID Connect Core 1.0", Section 1.2</a>. :param user_id: local user identifier :param client_id: which client to generate a subject identifier for :param redirect_uri: the clients' redirect_uri :return: a subject identifier for the user intended for client who made the authentication request
4.334612
5.011747
0.864891
# type (oic.oic.message.AuthorizationRequest, str) -> Mapping[str, Optional[Mapping[str, Union[str, List[str]]]] if response_method != 'id_token' and response_method != 'userinfo': raise ValueError('response_method must be \'id_token\' or \'userinfo\'') requested_claims = {} if 'claims' in authentication_request and response_method in authentication_request['claims']: requested_claims.update(authentication_request['claims'][response_method]) return requested_claims
def _get_requested_claims_in(self, authentication_request, response_method)
Parses any claims requested using the 'claims' request parameter, see <a href="http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter"> "OpenID Connect Core 1.0", Section 5.5</a>. :param authentication_request: the authentication request :param response_method: 'id_token' or 'userinfo'
2.578986
3.072365
0.839414
args['c_hash'] = jws.left_hash(authorization_code.encode('utf-8'), hash_alg) if access_token_value: args['at_hash'] = jws.left_hash(access_token_value.encode('utf-8'), hash_alg) if user_claims: args.update(user_claims) if extra_id_token_claims: args.update(extra_id_token_claims) id_token = IdToken(iss=self.configuration_information['issuer'], sub=sub, aud=client_id, iat=int(time.time()), exp=int(time.time()) + self.id_token_lifetime, **args) if nonce: id_token['nonce'] = nonce logger.debug('signed id_token with kid=%s using alg=%s', self.signing_key, alg) return id_token.to_jwt([self.signing_key], alg)
def _create_signed_id_token(self, client_id, # type: str sub, # type: str user_claims=None, # type: Optional[Mapping[str, Union[str, List[str]]]] nonce=None, # type: Optional[str] authorization_code=None, # type: Optional[str] access_token_value=None, # type: Optional[str] extra_id_token_claims=None): # type: Optional[Mappings[str, Union[str, List[str]]]] # type: (...) -> str alg = self.clients[client_id].get('id_token_signed_response_alg', self.configuration_information['id_token_signing_alg_values_supported'][0]) args = {} hash_alg = 'HS{}'.format(alg[-3:]) if authorization_code
Creates a signed ID Token. :param client_id: who the ID Token is intended for :param sub: who the ID Token is regarding :param user_claims: any claims about the user to be included :param nonce: nonce from the authentication request :param authorization_code: the authorization code issued together with this ID Token :param access_token_value: the access token issued together with this ID Token :param extra_id_token_claims: any extra claims that should be included in the ID Token :return: a JWS, containing the ID Token as payload
2.300478
2.348492
0.979556
# type (oic.message.AuthorizationRequest, str) -> None if 'claims' in authentication_request: requested_id_token_sub = authentication_request['claims'].get('id_token', {}).get('sub') requested_userinfo_sub = authentication_request['claims'].get('userinfo', {}).get('sub') if requested_id_token_sub and requested_userinfo_sub and requested_id_token_sub != requested_userinfo_sub: raise AuthorizationError('Requested different subject identifier for IDToken and userinfo: {} != {}' .format(requested_id_token_sub, requested_userinfo_sub)) requested_sub = requested_id_token_sub or requested_userinfo_sub if requested_sub and sub != requested_sub: raise AuthorizationError('Requested subject identifier \'{}\' could not be matched' .format(requested_sub))
def _check_subject_identifier_matches_requested(self, authentication_request, sub)
Verifies the subject identifier against any requested subject identifier using the claims request parameter. :param authentication_request: authentication request :param sub: subject identifier :raise AuthorizationError: if the subject identifier does not match the requested one
2.394689
2.488813
0.962181
# type: (...) -> oic.oic.message.AccessTokenResponse token_request = self._verify_client_authentication(request_body, http_headers) if 'grant_type' not in token_request: raise InvalidTokenRequest('grant_type missing', token_request) elif token_request['grant_type'] == 'authorization_code': return self._do_code_exchange(token_request, extra_id_token_claims) elif token_request['grant_type'] == 'refresh_token': return self._do_token_refresh(token_request) raise InvalidTokenRequest('grant_type \'{}\' unknown'.format(token_request['grant_type']), token_request, oauth_error='unsupported_grant_type')
def handle_token_request(self, request_body, # type: str http_headers=None, # type: Optional[Mapping[str, str]] extra_id_token_claims=None # type: Optional[Union[Mapping[str, Union[str, List[str]]], Callable[[str, str], Mapping[str, Union[str, List[str]]]]] )
Handles a token request, either for exchanging an authorization code or using a refresh token. :param request_body: urlencoded token request :param http_headers: http headers :param extra_id_token_claims: extra claims to include in the signed ID Token
2.43879
2.690257
0.906527
# type: (...) -> oic.message.AccessTokenResponse token_request = AccessTokenRequest().from_dict(request) try: token_request.verify() except MessageException as e: raise InvalidTokenRequest(str(e), token_request) from e authentication_request = self.authz_state.get_authorization_request_for_code(token_request['code']) if token_request['client_id'] != authentication_request['client_id']: logger.info('Authorization code \'%s\' belonging to \'%s\' was used by \'%s\'', token_request['code'], authentication_request['client_id'], token_request['client_id']) raise InvalidAuthorizationCode('{} unknown'.format(token_request['code'])) if token_request['redirect_uri'] != authentication_request['redirect_uri']: raise InvalidTokenRequest('Invalid redirect_uri: {} != {}'.format(token_request['redirect_uri'], authentication_request['redirect_uri']), token_request) sub = self.authz_state.get_subject_identifier_for_code(token_request['code']) user_id = self.authz_state.get_user_id_for_subject_identifier(sub) response = AccessTokenResponse() access_token = self.authz_state.exchange_code_for_token(token_request['code']) self._add_access_token_to_response(response, access_token) refresh_token = self.authz_state.create_refresh_token(access_token.value) if refresh_token is not None: response['refresh_token'] = refresh_token if extra_id_token_claims is None: extra_id_token_claims = {} elif callable(extra_id_token_claims): extra_id_token_claims = extra_id_token_claims(user_id, authentication_request['client_id']) requested_claims = self._get_requested_claims_in(authentication_request, 'id_token') user_claims = self.userinfo.get_claims_for(user_id, requested_claims) response['id_token'] = self._create_signed_id_token(authentication_request['client_id'], sub, user_claims, authentication_request.get('nonce'), None, access_token.value, extra_id_token_claims) logger.debug('issued id_token=%s from requested_claims=%s userinfo=%s extra_claims=%s', response['id_token'], requested_claims, user_claims, extra_id_token_claims) return response
def _do_code_exchange(self, request, # type: Dict[str, str] extra_id_token_claims=None # type: Optional[Union[Mapping[str, Union[str, List[str]]], Callable[[str, str], Mapping[str, Union[str, List[str]]]]] )
Handles a token request for exchanging an authorization code for an access token (grant_type=authorization_code). :param request: parsed http request parameters :param extra_id_token_claims: any extra parameters to include in the signed ID Token, either as a dict-like object or as a callable object accepting the local user identifier and client identifier which returns any extra claims which might depend on the user id and/or client id. :return: a token response containing a signed ID Token, an Access Token, and a Refresh Token :raise InvalidTokenRequest: if the token request is invalid
2.386796
2.34152
1.019336
# type: (Mapping[str, str]) -> oic.oic.message.AccessTokenResponse token_request = RefreshAccessTokenRequest().from_dict(request) try: token_request.verify() except MessageException as e: raise InvalidTokenRequest(str(e), token_request) from e response = AccessTokenResponse() access_token, refresh_token = self.authz_state.use_refresh_token(token_request['refresh_token'], scope=token_request.get('scope')) self._add_access_token_to_response(response, access_token) if refresh_token: response['refresh_token'] = refresh_token return response
def _do_token_refresh(self, request)
Handles a token request for refreshing an access token (grant_type=refresh_token). :param request: parsed http request parameters :return: a token response containing a new Access Token and possibly a new Refresh Token :raise InvalidTokenRequest: if the token request is invalid
3.480928
3.469516
1.003289
# type (str, Optional[Mapping[str, str]] -> Mapping[str, str] if http_headers is None: http_headers = {} token_request = dict(parse_qsl(request_body)) token_request['client_id'] = verify_client_authentication(self.clients, token_request, http_headers.get('Authorization')) return token_request
def _verify_client_authentication(self, request_body, http_headers=None)
Verifies the client authentication. :param request_body: urlencoded token request :param http_headers: :return: The parsed request body.
4.007155
3.905383
1.026059
# type: (Optional[str], Optional[Mapping[str, str]]) -> oic.oic.message.OpenIDSchema if http_headers is None: http_headers = {} userinfo_request = dict(parse_qsl(request)) bearer_token = extract_bearer_token_from_http_request(userinfo_request, http_headers.get('Authorization')) introspection = self.authz_state.introspect_access_token(bearer_token) if not introspection['active']: raise InvalidAccessToken('The access token has expired') scopes = introspection['scope'].split() user_id = self.authz_state.get_user_id_for_subject_identifier(introspection['sub']) requested_claims = scope2claims(scopes, extra_scope_dict=self.extra_scopes) authentication_request = self.authz_state.get_authorization_request_for_access_token(bearer_token) requested_claims.update(self._get_requested_claims_in(authentication_request, 'userinfo')) user_claims = self.userinfo.get_claims_for(user_id, requested_claims) user_claims.setdefault('sub', introspection['sub']) response = OpenIDSchema(**user_claims) logger.debug('userinfo=%s from requested_claims=%s userinfo=%s', response, requested_claims, user_claims) return response
def handle_userinfo_request(self, request=None, http_headers=None)
Handles a userinfo request. :param request: urlencoded request (either query string or POST body) :param http_headers: http headers
3.31866
3.657743
0.907297
# type: (oic.message.RegistrationRequest) -> Mapping[str, Union[str, List[str]]] matched_prefs = client_preferences.to_dict() for pref in ['response_types', 'default_acr_values']: if pref not in client_preferences: continue capability = PREFERENCE2PROVIDER[pref] # only preserve the common values matched_values = find_common_values(client_preferences[pref], self.configuration_information[capability]) # deal with space separated values matched_prefs[pref] = [' '.join(v) for v in matched_values] return matched_prefs
def match_client_preferences_with_provider_capabilities(self, client_preferences)
Match as many as of the client preferences as possible. :param client_preferences: requested preferences from client registration request :return: the matched preferences selected by the provider
5.384818
5.867831
0.917684
# type: (Optional[str], Optional[Mapping[str, str]]) -> oic.oic.message.RegistrationResponse registration_req = RegistrationRequest().deserialize(request, 'json') for validator in self.registration_request_validators: validator(registration_req) logger.debug('parsed authentication_request: %s', registration_req) client_id, client_secret = self._issue_new_client() credentials = { 'client_id': client_id, 'client_id_issued_at': int(time.time()), 'client_secret': client_secret, 'client_secret_expires_at': 0 # never expires } response_params = self.match_client_preferences_with_provider_capabilities(registration_req) response_params.update(credentials) self.clients[client_id] = copy.deepcopy(response_params) registration_resp = RegistrationResponse(**response_params) logger.debug('registration_resp=%s from registration_req=%s', registration_resp, registration_req) return registration_resp
def handle_client_registration_request(self, request, http_headers=None)
Handles a client registration request. :param request: JSON request from POST body :param http_headers: http headers
3.274256
3.697346
0.885569
# type (Optional[Mapping[str, str]], Optional[str] -> str if authz_header: # Authorization Request Header Field: https://tools.ietf.org/html/rfc6750#section-2.1 if authz_header.startswith(AccessToken.BEARER_TOKEN_TYPE): access_token = authz_header[len(AccessToken.BEARER_TOKEN_TYPE) + 1:] logger.debug('found access token %s in authz header', access_token) return access_token elif parsed_request: if 'access_token' in parsed_request: access_token = parsed_request['access_token'] logger.debug('found access token %s in request', access_token) return access_token raise BearerTokenError('Bearer Token could not be found in the request')
def extract_bearer_token_from_http_request(parsed_request=None, authz_header=None)
Extracts a Bearer token from an http request :param parsed_request: parsed request (URL query part of request body) :param authz_header: HTTP Authorization header :return: Bearer access token, if found :raise BearerTokenError: if no Bearer token could be extracted from the request
2.607728
2.768598
0.941895
user_pass = '' if parsed_uri.get('username') and parsed_uri.get('password'): user_pass = '{username!s}:{password!s}@'.format(**parsed_uri) _nodes = [] for host, port in parsed_uri.get('nodelist'): if ':' in host and not host.endswith(']'): # IPv6 address without brackets host = '[{!s}]'.format(host) if port == 27017: _nodes.append(host) else: _nodes.append('{!s}:{!s}'.format(host, port)) nodelist = ','.join(_nodes) options = '' if parsed_uri.get('options'): _opt_list = [] for key, value in parsed_uri.get('options').items(): if isinstance(value, bool): value = str(value).lower() _opt_list.append('{!s}={!s}'.format(key, value)) options = '?' + '&'.join(_opt_list) db_name = parsed_uri.get('database') or '' res = "mongodb://{user_pass!s}{nodelist!s}/{db_name!s}{options!s}".format( user_pass=user_pass, nodelist=nodelist, db_name=db_name, # collection is ignored options=options) return res
def _format_mongodb_uri(parsed_uri)
Painstakingly reconstruct a MongoDB URI parsed using pymongo.uri_parser.parse_uri. :param parsed_uri: Result of pymongo.uri_parser.parse_uri :type parsed_uri: dict :return: New URI :rtype: str | unicode
1.99153
2.04909
0.971909
if self._sanitized_uri is None: _parsed = copy.copy(self._parsed_uri) if 'username' in _parsed: _parsed['password'] = 'secret' _parsed['nodelist'] = [_parsed['nodelist'][0]] self._sanitized_uri = _format_mongodb_uri(_parsed) return self._sanitized_uri
def sanitized_uri(self)
Return the database URI we're using in a format sensible for logging etc. :return: db_uri
4.502066
4.419681
1.01864
if database_name is None: database_name = self._database_name if database_name is None: raise ValueError('No database_name supplied, and no default provided to __init__') db = self._connection[database_name] if username and password: db.authenticate(username, password) elif self._parsed_uri.get("username", None): if 'authSource' in self._options and self._options['authSource'] is not None: db.authenticate( self._parsed_uri.get("username", None), self._parsed_uri.get("password", None), source=self._options['authSource'] ) else: db.authenticate( self._parsed_uri.get("username", None), self._parsed_uri.get("password", None) ) return db
def get_database(self, database_name=None, username=None, password=None)
Get a pymongo database handle, after authenticating. Authenticates using the username/password in the DB URI given to __init__() unless username/password is supplied as arguments. :param database_name: (optional) Name of database :param username: (optional) Username to login with :param password: (optional) Password to login with :return: Pymongo database object
2.19416
2.104793
1.042459