sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def validate_netmask(s): """Validate that a dotted-quad ip address is a valid netmask. >>> validate_netmask('0.0.0.0') True >>> validate_netmask('128.0.0.0') True >>> validate_netmask('255.0.0.0') True >>> validate_netmask('255.255.255.255') True >>> validate_netmask(BROADCAST) True >>> validate_netmask('128.0.0.1') False >>> validate_netmask('1.255.255.0') False >>> validate_netmask('0.255.255.0') False :param s: String to validate as a dotted-quad notation netmask. :type s: str :returns: ``True`` if a valid netmask, ``False`` otherwise. :raises: TypeError """ if validate_ip(s): # Convert to binary string, strip '0b' prefix, 0 pad to 32 bits mask = bin(ip2network(s))[2:].zfill(32) # all left most bits must be 1, all right most must be 0 seen0 = False for c in mask: if '1' == c: if seen0: return False else: seen0 = True return True else: return False
Validate that a dotted-quad ip address is a valid netmask. >>> validate_netmask('0.0.0.0') True >>> validate_netmask('128.0.0.0') True >>> validate_netmask('255.0.0.0') True >>> validate_netmask('255.255.255.255') True >>> validate_netmask(BROADCAST) True >>> validate_netmask('128.0.0.1') False >>> validate_netmask('1.255.255.0') False >>> validate_netmask('0.255.255.0') False :param s: String to validate as a dotted-quad notation netmask. :type s: str :returns: ``True`` if a valid netmask, ``False`` otherwise. :raises: TypeError
entailment
def validate_subnet(s): """Validate a dotted-quad ip address including a netmask. The string is considered a valid dotted-quad address with netmask if it consists of one to four octets (0-255) seperated by periods (.) followed by a forward slash (/) and a subnet bitmask which is expressed in dotted-quad format. >>> validate_subnet('127.0.0.1/255.255.255.255') True >>> validate_subnet('127.0/255.0.0.0') True >>> validate_subnet('127.0/255') True >>> validate_subnet('127.0.0.256/255.255.255.255') False >>> validate_subnet('127.0.0.1/255.255.255.256') False >>> validate_subnet('127.0.0.0') False >>> validate_subnet(None) Traceback (most recent call last): ... TypeError: expected string or unicode :param s: String to validate as a dotted-quad ip address with netmask. :type s: str :returns: ``True`` if a valid dotted-quad ip address with netmask, ``False`` otherwise. :raises: TypeError """ if isinstance(s, basestring): if '/' in s: start, mask = s.split('/', 2) return validate_ip(start) and validate_netmask(mask) else: return False raise TypeError("expected string or unicode")
Validate a dotted-quad ip address including a netmask. The string is considered a valid dotted-quad address with netmask if it consists of one to four octets (0-255) seperated by periods (.) followed by a forward slash (/) and a subnet bitmask which is expressed in dotted-quad format. >>> validate_subnet('127.0.0.1/255.255.255.255') True >>> validate_subnet('127.0/255.0.0.0') True >>> validate_subnet('127.0/255') True >>> validate_subnet('127.0.0.256/255.255.255.255') False >>> validate_subnet('127.0.0.1/255.255.255.256') False >>> validate_subnet('127.0.0.0') False >>> validate_subnet(None) Traceback (most recent call last): ... TypeError: expected string or unicode :param s: String to validate as a dotted-quad ip address with netmask. :type s: str :returns: ``True`` if a valid dotted-quad ip address with netmask, ``False`` otherwise. :raises: TypeError
entailment
def ip2long(ip): """Convert a dotted-quad ip address to a network byte order 32-bit integer. >>> ip2long('127.0.0.1') 2130706433 >>> ip2long('127.1') 2130706433 >>> ip2long('127') 2130706432 >>> ip2long('127.0.0.256') is None True :param ip: Dotted-quad ip address (eg. '127.0.0.1'). :type ip: str :returns: Network byte order 32-bit integer or ``None`` if ip is invalid. """ if not validate_ip(ip): return None quads = ip.split('.') if len(quads) == 1: # only a network quad quads = quads + [0, 0, 0] elif len(quads) < 4: # partial form, last supplied quad is host address, rest is network host = quads[-1:] quads = quads[:-1] + [0, ] * (4 - len(quads)) + host lngip = 0 for q in quads: lngip = (lngip << 8) | int(q) return lngip
Convert a dotted-quad ip address to a network byte order 32-bit integer. >>> ip2long('127.0.0.1') 2130706433 >>> ip2long('127.1') 2130706433 >>> ip2long('127') 2130706432 >>> ip2long('127.0.0.256') is None True :param ip: Dotted-quad ip address (eg. '127.0.0.1'). :type ip: str :returns: Network byte order 32-bit integer or ``None`` if ip is invalid.
entailment
def ip2network(ip): """Convert a dotted-quad ip to base network number. This differs from :func:`ip2long` in that partial addresses as treated as all network instead of network plus host (eg. '127.1' expands to '127.1.0.0') :param ip: dotted-quad ip address (eg. ‘127.0.0.1’). :type ip: str :returns: Network byte order 32-bit integer or `None` if ip is invalid. """ if not validate_ip(ip): return None quads = ip.split('.') netw = 0 for i in range(4): netw = (netw << 8) | int(len(quads) > i and quads[i] or 0) return netw
Convert a dotted-quad ip to base network number. This differs from :func:`ip2long` in that partial addresses as treated as all network instead of network plus host (eg. '127.1' expands to '127.1.0.0') :param ip: dotted-quad ip address (eg. ‘127.0.0.1’). :type ip: str :returns: Network byte order 32-bit integer or `None` if ip is invalid.
entailment
def long2ip(l): """Convert a network byte order 32-bit integer to a dotted quad ip address. >>> long2ip(2130706433) '127.0.0.1' >>> long2ip(MIN_IP) '0.0.0.0' >>> long2ip(MAX_IP) '255.255.255.255' >>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int' >>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and 4294967295 inclusive >>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and 4294967295 inclusive >>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and 4294967295 inclusive :param l: Network byte order 32-bit integer. :type l: int :returns: Dotted-quad ip address (eg. '127.0.0.1'). :raises: TypeError """ if MAX_IP < l or l < MIN_IP: raise TypeError( "expected int between %d and %d inclusive" % (MIN_IP, MAX_IP)) return '%d.%d.%d.%d' % ( l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255)
Convert a network byte order 32-bit integer to a dotted quad ip address. >>> long2ip(2130706433) '127.0.0.1' >>> long2ip(MIN_IP) '0.0.0.0' >>> long2ip(MAX_IP) '255.255.255.255' >>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int' >>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and 4294967295 inclusive >>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and 4294967295 inclusive >>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: expected int between 0 and 4294967295 inclusive :param l: Network byte order 32-bit integer. :type l: int :returns: Dotted-quad ip address (eg. '127.0.0.1'). :raises: TypeError
entailment
def cidr2block(cidr): """Convert a CIDR notation ip address into a tuple containing the network block start and end addresses. >>> cidr2block('127.0.0.1/32') ('127.0.0.1', '127.0.0.1') >>> cidr2block('127/8') ('127.0.0.0', '127.255.255.255') >>> cidr2block('127.0.1/16') ('127.0.0.0', '127.0.255.255') >>> cidr2block('127.1/24') ('127.1.0.0', '127.1.0.255') >>> cidr2block('127.0.0.3/29') ('127.0.0.0', '127.0.0.7') >>> cidr2block('127/0') ('0.0.0.0', '255.255.255.255') :param cidr: CIDR notation ip address (eg. '127.0.0.1/8'). :type cidr: str :returns: Tuple of block (start, end) or ``None`` if invalid. :raises: TypeError """ if not validate_cidr(cidr): return None ip, prefix = cidr.split('/') prefix = int(prefix) # convert dotted-quad ip to base network number network = ip2network(ip) return _block_from_ip_and_prefix(network, prefix)
Convert a CIDR notation ip address into a tuple containing the network block start and end addresses. >>> cidr2block('127.0.0.1/32') ('127.0.0.1', '127.0.0.1') >>> cidr2block('127/8') ('127.0.0.0', '127.255.255.255') >>> cidr2block('127.0.1/16') ('127.0.0.0', '127.0.255.255') >>> cidr2block('127.1/24') ('127.1.0.0', '127.1.0.255') >>> cidr2block('127.0.0.3/29') ('127.0.0.0', '127.0.0.7') >>> cidr2block('127/0') ('0.0.0.0', '255.255.255.255') :param cidr: CIDR notation ip address (eg. '127.0.0.1/8'). :type cidr: str :returns: Tuple of block (start, end) or ``None`` if invalid. :raises: TypeError
entailment
def subnet2block(subnet): """Convert a dotted-quad ip address including a netmask into a tuple containing the network block start and end addresses. >>> subnet2block('127.0.0.1/255.255.255.255') ('127.0.0.1', '127.0.0.1') >>> subnet2block('127/255') ('127.0.0.0', '127.255.255.255') >>> subnet2block('127.0.1/255.255') ('127.0.0.0', '127.0.255.255') >>> subnet2block('127.1/255.255.255.0') ('127.1.0.0', '127.1.0.255') >>> subnet2block('127.0.0.3/255.255.255.248') ('127.0.0.0', '127.0.0.7') >>> subnet2block('127/0') ('0.0.0.0', '255.255.255.255') :param subnet: dotted-quad ip address with netmask (eg. '127.0.0.1/255.0.0.0'). :type subnet: str :returns: Tuple of block (start, end) or ``None`` if invalid. :raises: TypeError """ if not validate_subnet(subnet): return None ip, netmask = subnet.split('/') prefix = netmask2prefix(netmask) # convert dotted-quad ip to base network number network = ip2network(ip) return _block_from_ip_and_prefix(network, prefix)
Convert a dotted-quad ip address including a netmask into a tuple containing the network block start and end addresses. >>> subnet2block('127.0.0.1/255.255.255.255') ('127.0.0.1', '127.0.0.1') >>> subnet2block('127/255') ('127.0.0.0', '127.255.255.255') >>> subnet2block('127.0.1/255.255') ('127.0.0.0', '127.0.255.255') >>> subnet2block('127.1/255.255.255.0') ('127.1.0.0', '127.1.0.255') >>> subnet2block('127.0.0.3/255.255.255.248') ('127.0.0.0', '127.0.0.7') >>> subnet2block('127/0') ('0.0.0.0', '255.255.255.255') :param subnet: dotted-quad ip address with netmask (eg. '127.0.0.1/255.0.0.0'). :type subnet: str :returns: Tuple of block (start, end) or ``None`` if invalid. :raises: TypeError
entailment
def _block_from_ip_and_prefix(ip, prefix): """Create a tuple of (start, end) dotted-quad addresses from the given ip address and prefix length. :param ip: Ip address in block :type ip: long :param prefix: Prefix size for block :type prefix: int :returns: Tuple of block (start, end) """ # keep left most prefix bits of ip shift = 32 - prefix block_start = ip >> shift << shift # expand right most 32 - prefix bits to 1 mask = (1 << shift) - 1 block_end = block_start | mask return (long2ip(block_start), long2ip(block_end))
Create a tuple of (start, end) dotted-quad addresses from the given ip address and prefix length. :param ip: Ip address in block :type ip: long :param prefix: Prefix size for block :type prefix: int :returns: Tuple of block (start, end)
entailment
def download_shared_files(job, samples, config): """ Downloads files shared by all samples in the pipeline :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param list[list] samples: A nested list of samples containing sample information """ job.fileStore.logToMaster('Downloaded shared files') file_names = ['reference', 'phase', 'mills', 'dbsnp', 'cosmic'] urls = [config.reference, config.phase, config.mills, config.dbsnp, config.cosmic] for name, url in zip(file_names, urls): if url: vars(config)[name] = job.addChildJobFn(download_url_job, url=url).rv() job.addFollowOnJobFn(reference_preprocessing, samples, config)
Downloads files shared by all samples in the pipeline :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param list[list] samples: A nested list of samples containing sample information
entailment
def reference_preprocessing(job, samples, config): """ Spawn the jobs that create index and dict file for reference :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param list[list] samples: A nested list of samples containing sample information """ job.fileStore.logToMaster('Processed reference files') config.fai = job.addChildJobFn(run_samtools_faidx, config.reference).rv() config.dict = job.addChildJobFn(run_picard_create_sequence_dictionary, config.reference).rv() job.addFollowOnJobFn(map_job, download_sample, samples, config)
Spawn the jobs that create index and dict file for reference :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param list[list] samples: A nested list of samples containing sample information
entailment
def download_sample(job, sample, config): """ Download sample and store sample specific attributes :param JobFunctionWrappingJob job: passed automatically by Toil :param list sample: Contains uuid, normal URL, and tumor URL :param Namespace config: Argparse Namespace object containing argument inputs """ # Create copy of config that is sample specific config = argparse.Namespace(**vars(config)) uuid, normal_url, tumor_url = sample job.fileStore.logToMaster('Downloaded sample: ' + uuid) config.uuid = uuid config.normal = normal_url config.tumor = tumor_url config.cores = min(config.maxCores, int(multiprocessing.cpu_count())) disk = '1G' if config.ci_test else '20G' # Download sample bams and launch pipeline config.normal_bam = job.addChildJobFn(download_url_job, url=config.normal, s3_key_path=config.ssec, cghub_key_path=config.gtkey, disk=disk).rv() config.tumor_bam = job.addChildJobFn(download_url_job, url=config.tumor, s3_key_path=config.ssec, cghub_key_path=config.gtkey, disk=disk).rv() job.addFollowOnJobFn(index_bams, config)
Download sample and store sample specific attributes :param JobFunctionWrappingJob job: passed automatically by Toil :param list sample: Contains uuid, normal URL, and tumor URL :param Namespace config: Argparse Namespace object containing argument inputs
entailment
def index_bams(job, config): """ Convenience job for handling bam indexing to make the workflow declaration cleaner :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs """ job.fileStore.logToMaster('Indexed sample BAMS: ' + config.uuid) disk = '1G' if config.ci_test else '20G' config.normal_bai = job.addChildJobFn(run_samtools_index, config.normal_bam, cores=1, disk=disk).rv() config.tumor_bai = job.addChildJobFn(run_samtools_index, config.tumor_bam, cores=1, disk=disk).rv() job.addFollowOnJobFn(preprocessing_declaration, config)
Convenience job for handling bam indexing to make the workflow declaration cleaner :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs
entailment
def preprocessing_declaration(job, config): """ Declare jobs related to preprocessing :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs """ if config.preprocessing: job.fileStore.logToMaster('Ran preprocessing: ' + config.uuid) disk = '1G' if config.ci_test else '20G' mem = '2G' if config.ci_test else '10G' processed_normal = job.wrapJobFn(run_gatk_preprocessing, config.normal_bam, config.normal_bai, config.reference, config.dict, config.fai, config.phase, config.mills, config.dbsnp, mem, cores=1, memory=mem, disk=disk) processed_tumor = job.wrapJobFn(run_gatk_preprocessing, config.tumor_bam, config.tumor_bai, config.reference, config.dict, config.fai, config.phase, config.mills, config.dbsnp, mem, cores=1, memory=mem, disk=disk) static_workflow = job.wrapJobFn(static_workflow_declaration, config, processed_normal.rv(0), processed_normal.rv(1), processed_tumor.rv(0), processed_tumor.rv(1)) job.addChild(processed_normal) job.addChild(processed_tumor) job.addFollowOn(static_workflow) else: job.addFollowOnJobFn(static_workflow_declaration, config, config.normal_bam, config.normal_bai, config.tumor_bam, config.tumor_bai)
Declare jobs related to preprocessing :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs
entailment
def static_workflow_declaration(job, config, normal_bam, normal_bai, tumor_bam, tumor_bai): """ Statically declare workflow so sections can be modularly repurposed :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param str normal_bam: Normal BAM FileStoreID :param str normal_bai: Normal BAM index FileStoreID :param str tumor_bam: Tumor BAM FileStoreID :param str tumor_bai: Tumor BAM Index FileStoreID """ # Mutation and indel tool wiring memory = '1G' if config.ci_test else '10G' disk = '1G' if config.ci_test else '75G' mutect_results, pindel_results, muse_results = None, None, None if config.run_mutect: mutect_results = job.addChildJobFn(run_mutect, normal_bam, normal_bai, tumor_bam, tumor_bai, config.reference, config.dict, config.fai, config.cosmic, config.dbsnp, cores=1, memory=memory, disk=disk).rv() if config.run_pindel: pindel_results = job.addChildJobFn(run_pindel, normal_bam, normal_bai, tumor_bam, tumor_bai, config.reference, config.fai, cores=config.cores, memory=memory, disk=disk).rv() if config.run_muse: muse_results = job.addChildJobFn(run_muse, normal_bam, normal_bai, tumor_bam, tumor_bai, config.reference, config.dict, config.fai, config.dbsnp, cores=config.cores, memory=memory, disk=disk).rv() # Pass tool results (whether None or a promised return value) to consolidation step consolidation = job.wrapJobFn(consolidate_output, config, mutect_results, pindel_results, muse_results) job.addFollowOn(consolidation)
Statically declare workflow so sections can be modularly repurposed :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param str normal_bam: Normal BAM FileStoreID :param str normal_bai: Normal BAM index FileStoreID :param str tumor_bam: Tumor BAM FileStoreID :param str tumor_bai: Tumor BAM Index FileStoreID
entailment
def consolidate_output(job, config, mutect, pindel, muse): """ Combine the contents of separate tarball outputs into one via streaming :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param str mutect: MuTect tarball FileStoreID :param str pindel: Pindel tarball FileStoreID :param str muse: MuSe tarball FileStoreID """ work_dir = job.fileStore.getLocalTempDir() mutect_tar, pindel_tar, muse_tar = None, None, None if mutect: mutect_tar = job.fileStore.readGlobalFile(mutect, os.path.join(work_dir, 'mutect.tar.gz')) if pindel: pindel_tar = job.fileStore.readGlobalFile(pindel, os.path.join(work_dir, 'pindel.tar.gz')) if muse: muse_tar = job.fileStore.readGlobalFile(muse, os.path.join(work_dir, 'muse.tar.gz')) out_tar = os.path.join(work_dir, config.uuid + '.tar.gz') # Consolidate separate tarballs into one as streams (avoids unnecessary untaring) tar_list = [x for x in [mutect_tar, pindel_tar, muse_tar] if x is not None] with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out: for tar in tar_list: with tarfile.open(tar, 'r') as f_in: for tarinfo in f_in: with closing(f_in.extractfile(tarinfo)) as f_in_file: if tar is mutect_tar: tarinfo.name = os.path.join(config.uuid, 'mutect', os.path.basename(tarinfo.name)) elif tar is pindel_tar: tarinfo.name = os.path.join(config.uuid, 'pindel', os.path.basename(tarinfo.name)) else: tarinfo.name = os.path.join(config.uuid, 'muse', os.path.basename(tarinfo.name)) f_out.addfile(tarinfo, fileobj=f_in_file) # Move to output location if urlparse(config.output_dir).scheme == 's3': job.fileStore.logToMaster('Uploading {} to S3: {}'.format(config.uuid, config.output_dir)) s3am_upload(job=job, fpath=out_tar, s3_dir=config.output_dir, num_cores=config.cores) else: job.fileStore.logToMaster('Moving {} to output dir: {}'.format(config.uuid, config.output_dir)) mkdir_p(config.output_dir) copy_files(file_paths=[out_tar], output_dir=config.output_dir)
Combine the contents of separate tarball outputs into one via streaming :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param str mutect: MuTect tarball FileStoreID :param str pindel: Pindel tarball FileStoreID :param str muse: MuSe tarball FileStoreID
entailment
def parse_manifest(path_to_manifest): """ Parses samples, specified in either a manifest or listed with --samples :param str path_to_manifest: Path to configuration file :return: Samples and their attributes as defined in the manifest :rtype: list[list] """ samples = [] with open(path_to_manifest, 'r') as f: for line in f.readlines(): if not line.isspace() and not line.startswith('#'): sample = line.strip().split('\t') require(len(sample) == 3, 'Bad manifest format! ' 'Expected 3 tab separated columns, got: {}'.format(sample)) uuid, normal, tumor = sample for url in [normal, tumor]: require(urlparse(url).scheme and urlparse(url), 'Invalid URL passed for {}'.format(url)) samples.append(sample) return samples
Parses samples, specified in either a manifest or listed with --samples :param str path_to_manifest: Path to configuration file :return: Samples and their attributes as defined in the manifest :rtype: list[list]
entailment
def main(): """ Computational Genomics Lab, Genomics Institute, UC Santa Cruz Toil exome pipeline Perform variant / indel analysis given a pair of tumor/normal BAM files. Samples are optionally preprocessed (indel realignment and base quality score recalibration) The output of this pipeline is a tarball containing results from MuTect, MuSe, and Pindel. General usage: 1. Type "toil-exome generate" to create an editable manifest and config in the current working directory. 2. Parameterize the pipeline by editing the config. 3. Fill in the manifest with information pertaining to your samples. 4. Type "toil-exome run [jobStore]" to execute the pipeline. Please read the README.md located in the source directory or at: https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/exome_variant_pipeline Structure of variant pipeline (per sample) 1 2 3 4 14 ------- | | | | | | 0 --------- 5 ----- 15 -------- 17 | | | --- 16 ------- | | 6 7 | | 8 9 | | 10 11 | | 12 13 0 = Start node 1 = reference index 2 = reference dict 3 = normal bam index 4 = tumor bam index 5 = pre-processing node / DAG declaration 6,7 = RealignerTargetCreator 8,9 = IndelRealigner 10,11 = BaseRecalibration 12,13 = PrintReads 14 = MuTect 15 = Pindel 16 = MuSe 17 = Consolidate Output and move/upload results ================================================== Dependencies Curl: apt-get install curl Docker: wget -qO- https://get.docker.com/ | sh Toil: pip install toil Boto: pip install boto (OPTIONAL) """ parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter) subparsers = parser.add_subparsers(dest='command') # Generate subparsers subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.') subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.') subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.') # Run subparser parser_run = subparsers.add_parser('run', help='Runs the CGL exome pipeline') parser_run.add_argument('--config', default='config-toil-exome.yaml', type=str, help='Path to the (filled in) config file, generated with "generate-config". ' '\nDefault value: "%(default)s"') parser_run.add_argument('--manifest', default='manifest-toil-exome.tsv', type=str, help='Path to the (filled in) manifest file, generated with "generate-manifest". ' '\nDefault value: "%(default)s"') parser_run.add_argument('--normal', default=None, type=str, help='URL for the normal BAM. URLs can take the form: http://, ftp://, file://, s3://, ' 'and gnos://. The UUID for the sample must be given with the "--uuid" flag.') parser_run.add_argument('--tumor', default=None, type=str, help='URL for the tumor BAM. URLs can take the form: http://, ftp://, file://, s3://, ' 'and gnos://. The UUID for the sample must be given with the "--uuid" flag.') parser_run.add_argument('--uuid', default=None, type=str, help='Provide the UUID of a sample when using the' '"--tumor" and "--normal" option') # If no arguments provided, print full help menu if len(sys.argv) == 1: parser.print_help() sys.exit(1) # Add Toil options Job.Runner.addToilOptions(parser_run) args = parser.parse_args() # Parse subparsers related to generation of config and manifest cwd = os.getcwd() if args.command == 'generate-config' or args.command == 'generate': generate_file(os.path.join(cwd, 'config-toil-exome.yaml'), generate_config) if args.command == 'generate-manifest' or args.command == 'generate': generate_file(os.path.join(cwd, 'manifest-toil-exome.tsv'), generate_manifest) # Pipeline execution elif args.command == 'run': require(os.path.exists(args.config), '{} not found. Please run ' '"toil-rnaseq generate-config"'.format(args.config)) if args.normal or args.tumor or args.uuid: require(args.normal and args.tumor and args.uuid, '"--tumor", "--normal" and "--uuid" must all be supplied') samples = [[args.uuid, args.normal, args.tumor]] else: samples = parse_manifest(args.manifest) # Parse config parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()} config = argparse.Namespace(**parsed_config) config.maxCores = int(args.maxCores) if args.maxCores else sys.maxint # Exome pipeline sanity checks if config.preprocessing: require(config.reference and config.phase and config.mills and config.dbsnp, 'Missing inputs for preprocessing, check config file.') if config.run_mutect: require(config.reference and config.dbsnp and config.cosmic, 'Missing inputs for MuTect, check config file.') if config.run_pindel: require(config.reference, 'Missing input (reference) for Pindel.') if config.run_muse: require(config.reference and config.dbsnp, 'Missing inputs for MuSe, check config file.') require(config.output_dir, 'No output location specified: {}'.format(config.output_dir)) # Program checks for program in ['curl', 'docker']: require(next(which(program), None), program + ' must be installed on every node.'.format(program)) # Launch Pipeline Job.Runner.startToil(Job.wrapJobFn(download_shared_files, samples, config), args)
Computational Genomics Lab, Genomics Institute, UC Santa Cruz Toil exome pipeline Perform variant / indel analysis given a pair of tumor/normal BAM files. Samples are optionally preprocessed (indel realignment and base quality score recalibration) The output of this pipeline is a tarball containing results from MuTect, MuSe, and Pindel. General usage: 1. Type "toil-exome generate" to create an editable manifest and config in the current working directory. 2. Parameterize the pipeline by editing the config. 3. Fill in the manifest with information pertaining to your samples. 4. Type "toil-exome run [jobStore]" to execute the pipeline. Please read the README.md located in the source directory or at: https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/exome_variant_pipeline Structure of variant pipeline (per sample) 1 2 3 4 14 ------- | | | | | | 0 --------- 5 ----- 15 -------- 17 | | | --- 16 ------- | | 6 7 | | 8 9 | | 10 11 | | 12 13 0 = Start node 1 = reference index 2 = reference dict 3 = normal bam index 4 = tumor bam index 5 = pre-processing node / DAG declaration 6,7 = RealignerTargetCreator 8,9 = IndelRealigner 10,11 = BaseRecalibration 12,13 = PrintReads 14 = MuTect 15 = Pindel 16 = MuSe 17 = Consolidate Output and move/upload results ================================================== Dependencies Curl: apt-get install curl Docker: wget -qO- https://get.docker.com/ | sh Toil: pip install toil Boto: pip install boto (OPTIONAL)
entailment
def download_reference_files(job, inputs, samples): """ Downloads shared files that are used by all samples for alignment, or generates them if they were not provided. :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace inputs: Input arguments (see main) :param list[list[str, list[str, str]]] samples: Samples in the format [UUID, [URL1, URL2]] """ # Create dictionary to store FileStoreIDs of shared input files shared_ids = {} urls = [('amb', inputs.amb), ('ann', inputs.ann), ('bwt', inputs.bwt), ('pac', inputs.pac), ('sa', inputs.sa)] # Alt file is optional and can only be provided, not generated if inputs.alt: urls.append(('alt', inputs.alt)) # Download reference download_ref = job.wrapJobFn(download_url_job, inputs.ref, disk='3G') # Human genomes are typically ~3G job.addChild(download_ref) shared_ids['ref'] = download_ref.rv() # If FAI is provided, download it. Otherwise, generate it if inputs.fai: shared_ids['fai'] = job.addChildJobFn(download_url_job, inputs.fai).rv() else: faidx = job.wrapJobFn(run_samtools_faidx, download_ref.rv()) shared_ids['fai'] = download_ref.addChild(faidx).rv() # If all BWA index files are provided, download them. Otherwise, generate them if all(x[1] for x in urls): for name, url in urls: shared_ids[name] = job.addChildJobFn(download_url_job, url).rv() else: job.fileStore.logToMaster('BWA index files not provided, creating now') bwa_index = job.wrapJobFn(run_bwa_index, download_ref.rv()) download_ref.addChild(bwa_index) for x, name in enumerate(['amb', 'ann', 'bwt', 'pac', 'sa']): shared_ids[name] = bwa_index.rv(x) # Map_job distributes one sample in samples to the downlaod_sample_and_align function job.addFollowOnJobFn(map_job, download_sample_and_align, samples, inputs, shared_ids)
Downloads shared files that are used by all samples for alignment, or generates them if they were not provided. :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace inputs: Input arguments (see main) :param list[list[str, list[str, str]]] samples: Samples in the format [UUID, [URL1, URL2]]
entailment
def download_sample_and_align(job, sample, inputs, ids): """ Downloads the sample and runs BWA-kit :param JobFunctionWrappingJob job: Passed by Toil automatically :param tuple(str, list) sample: UUID and URLS for sample :param Namespace inputs: Contains input arguments :param dict ids: FileStore IDs for shared inputs """ uuid, urls = sample r1_url, r2_url = urls if len(urls) == 2 else (urls[0], None) job.fileStore.logToMaster('Downloaded sample: {0}. R1 {1}\nR2 {2}\nStarting BWA Run'.format(uuid, r1_url, r2_url)) # Read fastq samples from file store ids['r1'] = job.addChildJobFn(download_url_job, r1_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv() if r2_url: ids['r2'] = job.addChildJobFn(download_url_job, r2_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv() else: ids['r2'] = None # Create config for bwakit inputs.cores = min(inputs.maxCores, multiprocessing.cpu_count()) inputs.uuid = uuid config = dict(**vars(inputs)) # Create config as a copy of inputs since it has values we want config.update(ids) # Overwrite attributes with the FileStoreIDs from ids config = argparse.Namespace(**config) # Define and wire job functions bam_id = job.wrapJobFn(run_bwakit, config, sort=inputs.sort, trim=inputs.trim, disk=inputs.file_size, cores=inputs.cores) job.addFollowOn(bam_id) output_name = uuid + '.bam' + str(inputs.suffix) if inputs.suffix else uuid + '.bam' if urlparse(inputs.output_dir).scheme == 's3': bam_id.addChildJobFn(s3am_upload_job, file_id=bam_id.rv(), file_name=output_name, s3_dir=inputs.output_dir, s3_key_path=inputs.ssec, cores=inputs.cores, disk=inputs.file_size) else: mkdir_p(inputs.ouput_dir) bam_id.addChildJobFn(copy_file_job, name=output_name, file_id=bam_id.rv(), output_dir=inputs.output_dir, disk=inputs.file_size)
Downloads the sample and runs BWA-kit :param JobFunctionWrappingJob job: Passed by Toil automatically :param tuple(str, list) sample: UUID and URLS for sample :param Namespace inputs: Contains input arguments :param dict ids: FileStore IDs for shared inputs
entailment
def parse_manifest(manifest_path): """ Parse manifest file :param str manifest_path: Path to manifest file :return: samples :rtype: list[str, list] """ samples = [] with open(manifest_path, 'r') as f: for line in f: if not line.isspace() and not line.startswith('#'): sample = line.strip().split('\t') require(2 <= len(sample) <= 3, 'Bad manifest format! ' 'Expected UUID\tURL1\t[URL2] (tab separated), got: {}'.format(sample)) uuid = sample[0] urls = sample[1:] for url in urls: require(urlparse(url).scheme and urlparse(url), 'Invalid URL passed for {}'.format(url)) samples.append([uuid, urls]) return samples
Parse manifest file :param str manifest_path: Path to manifest file :return: samples :rtype: list[str, list]
entailment
def main(): """ Computational Genomics Lab, Genomics Institute, UC Santa Cruz Toil BWA pipeline Alignment of fastq reads via BWA-kit General usage: 1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory. 2. Parameterize the pipeline by editing the config. 3. Fill in the manifest with information pertaining to your samples. 4. Type "toil-bwa run [jobStore]" to execute the pipeline. Please read the README.md located in the source directory or at: https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment Structure of the BWA pipeline (per sample) 0 --> 1 0 = Download sample 1 = Run BWA-kit =================================================================== :Dependencies: cURL: apt-get install curl Toil: pip install toil Docker: wget -qO- https://get.docker.com/ | sh Optional: S3AM: pip install --s3am (requires ~/.boto config file) Boto: pip install boto """ # Define Parser object and add to Toil parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter) subparsers = parser.add_subparsers(dest='command') # Generate subparsers subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.') subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.') subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.') # Run subparser parser_run = subparsers.add_parser('run', help='Runs the BWA alignment pipeline') group = parser_run.add_mutually_exclusive_group() parser_run.add_argument('--config', default='config-toil-bwa.yaml', type=str, help='Path to the (filled in) config file, generated with "generate-config".') group.add_argument('--manifest', default='manifest-toil-bwa.tsv', type=str, help='Path to the (filled in) manifest file, generated with "generate-manifest". ' '\nDefault value: "%(default)s".') group.add_argument('--sample', nargs='+', action=required_length(2, 3), help='Space delimited sample UUID and fastq files in the format: uuid url1 [url2].') # Print docstring help if no arguments provided if len(sys.argv) == 1: parser.print_help() sys.exit(1) Job.Runner.addToilOptions(parser_run) args = parser.parse_args() # Parse subparsers related to generation of config and manifest cwd = os.getcwd() if args.command == 'generate-config' or args.command == 'generate': generate_file(os.path.join(cwd, 'config-toil-bwa.yaml'), generate_config) if args.command == 'generate-manifest' or args.command == 'generate': generate_file(os.path.join(cwd, 'manifest-toil-bwa.tsv'), generate_manifest) # Pipeline execution elif args.command == 'run': require(os.path.exists(args.config), '{} not found. Please run generate-config'.format(args.config)) if not args.sample: args.sample = None require(os.path.exists(args.manifest), '{} not found and no sample provided. ' 'Please run "generate-manifest"'.format(args.manifest)) # Parse config parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()} config = argparse.Namespace(**parsed_config) config.maxCores = int(args.maxCores) if args.maxCores else sys.maxint samples = [args.sample[0], args.sample[1:]] if args.sample else parse_manifest(args.manifest) # Sanity checks require(config.ref, 'Missing URL for reference file: {}'.format(config.ref)) require(config.output_dir, 'No output location specified: {}'.format(config.output_dir)) # Launch Pipeline Job.Runner.startToil(Job.wrapJobFn(download_reference_files, config, samples), args)
Computational Genomics Lab, Genomics Institute, UC Santa Cruz Toil BWA pipeline Alignment of fastq reads via BWA-kit General usage: 1. Type "toil-bwa generate" to create an editable manifest and config in the current working directory. 2. Parameterize the pipeline by editing the config. 3. Fill in the manifest with information pertaining to your samples. 4. Type "toil-bwa run [jobStore]" to execute the pipeline. Please read the README.md located in the source directory or at: https://github.com/BD2KGenomics/toil-scripts/tree/master/src/toil_scripts/bwa_alignment Structure of the BWA pipeline (per sample) 0 --> 1 0 = Download sample 1 = Run BWA-kit =================================================================== :Dependencies: cURL: apt-get install curl Toil: pip install toil Docker: wget -qO- https://get.docker.com/ | sh Optional: S3AM: pip install --s3am (requires ~/.boto config file) Boto: pip install boto
entailment
def _address2long(address): """ Convert an address string to a long. """ parsed = ipv4.ip2long(address) if parsed is None: parsed = ipv6.ip2long(address) return parsed
Convert an address string to a long.
entailment
def index(self, item): """ Return the 0-based position of `item` in this IpRange. >>> r = IpRange('127.0.0.1', '127.255.255.255') >>> r.index('127.0.0.1') 0 >>> r.index('127.255.255.255') 16777214 >>> r.index('10.0.0.1') Traceback (most recent call last): ... ValueError: 10.0.0.1 is not in range :param item: Dotted-quad ip address. :type item: str :returns: Index of ip address in range """ item = self._cast(item) offset = item - self.startIp if offset >= 0 and offset < self._len: return offset raise ValueError('%s is not in range' % self._ipver.long2ip(item))
Return the 0-based position of `item` in this IpRange. >>> r = IpRange('127.0.0.1', '127.255.255.255') >>> r.index('127.0.0.1') 0 >>> r.index('127.255.255.255') 16777214 >>> r.index('10.0.0.1') Traceback (most recent call last): ... ValueError: 10.0.0.1 is not in range :param item: Dotted-quad ip address. :type item: str :returns: Index of ip address in range
entailment
def _detach_process(): """ Detach daemon process. Forks the current process into a parent and a detached child. The child process resides in its own process group, has no controlling terminal attached and is cleaned up by the init process. Returns ``True`` for the parent and ``False`` for the child. """ # To detach from our process group we need to call ``setsid``. We # can only do that if we aren't a process group leader. Therefore # we fork once, which makes sure that the new child process is not # a process group leader. pid = os.fork() if pid > 0: # Parent process # Use waitpid to "collect" the child process and avoid Zombies os.waitpid(pid, 0) return True os.setsid() # We now fork a second time and let the second's fork parent exit. # This makes the second fork's child process an orphan. Orphans are # cleaned up by the init process, so we won't end up with a zombie. # In addition, the second fork's child is no longer a session # leader and can therefore never acquire a controlling terminal. pid = os.fork() if pid > 0: os._exit(os.EX_OK) return False
Detach daemon process. Forks the current process into a parent and a detached child. The child process resides in its own process group, has no controlling terminal attached and is cleaned up by the init process. Returns ``True`` for the parent and ``False`` for the child.
entailment
def _block(predicate, timeout): """ Block until a predicate becomes true. ``predicate`` is a function taking no arguments. The call to ``_block`` blocks until ``predicate`` returns a true value. This is done by polling ``predicate``. ``timeout`` is either ``True`` (block indefinitely) or a timeout in seconds. The return value is the value of the predicate after the timeout. """ if timeout: if timeout is True: timeout = float('Inf') timeout = time.time() + timeout while not predicate() and time.time() < timeout: time.sleep(0.1) return predicate()
Block until a predicate becomes true. ``predicate`` is a function taking no arguments. The call to ``_block`` blocks until ``predicate`` returns a true value. This is done by polling ``predicate``. ``timeout`` is either ``True`` (block indefinitely) or a timeout in seconds. The return value is the value of the predicate after the timeout.
entailment
def read_pid(self): """ Return the PID of the process owning the lock. Returns ``None`` if no lock is present. """ try: with open(self._path, 'r') as f: s = f.read().strip() if not s: return None return int(s) except IOError as e: if e.errno == errno.ENOENT: return None raise
Return the PID of the process owning the lock. Returns ``None`` if no lock is present.
entailment
def _get_logger_file_handles(self): """ Find the file handles used by our logger's handlers. """ handles = [] for handler in self.logger.handlers: # The following code works for logging's SysLogHandler, # StreamHandler, SocketHandler, and their subclasses. for attr in ['sock', 'socket', 'stream']: try: handle = getattr(handler, attr) if handle: handles.append(handle) break except AttributeError: continue return handles
Find the file handles used by our logger's handlers.
entailment
def is_running(self): """ Check if the daemon is running. """ pid = self.get_pid() if pid is None: return False # The PID file may still exist even if the daemon isn't running, # for example if it has crashed. try: os.kill(pid, 0) except OSError as e: if e.errno == errno.ESRCH: # In this case the PID file shouldn't have existed in # the first place, so we remove it self.pid_file.release() return False # We may also get an exception if we're not allowed to use # kill on the process, but that means that the process does # exist, which is all we care about here. return True
Check if the daemon is running.
entailment
def _get_signal_event(self, s): ''' Get the event for a signal. Checks if the signal has been enabled and raises a ``ValueError`` if not. ''' try: return self._signal_events[int(s)] except KeyError: raise ValueError('Signal {} has not been enabled'.format(s))
Get the event for a signal. Checks if the signal has been enabled and raises a ``ValueError`` if not.
entailment
def send_signal(self, s): """ Send a signal to the daemon process. The signal must have been enabled using the ``signals`` parameter of :py:meth:`Service.__init__`. Otherwise, a ``ValueError`` is raised. """ self._get_signal_event(s) # Check if signal has been enabled pid = self.get_pid() if not pid: raise ValueError('Daemon is not running.') os.kill(pid, s)
Send a signal to the daemon process. The signal must have been enabled using the ``signals`` parameter of :py:meth:`Service.__init__`. Otherwise, a ``ValueError`` is raised.
entailment
def stop(self, block=False): """ Tell the daemon process to stop. Sends the SIGTERM signal to the daemon process, requesting it to terminate. If ``block`` is true then the call blocks until the daemon process has exited. This may take some time since the daemon process will complete its on-going backup activities before shutting down. ``block`` can either be ``True`` (in which case it blocks indefinitely) or a timeout in seconds. The return value is ``True`` if the daemon process has been stopped and ``False`` otherwise. .. versionadded:: 0.3 The ``block`` parameter """ self.send_signal(signal.SIGTERM) return _block(lambda: not self.is_running(), block)
Tell the daemon process to stop. Sends the SIGTERM signal to the daemon process, requesting it to terminate. If ``block`` is true then the call blocks until the daemon process has exited. This may take some time since the daemon process will complete its on-going backup activities before shutting down. ``block`` can either be ``True`` (in which case it blocks indefinitely) or a timeout in seconds. The return value is ``True`` if the daemon process has been stopped and ``False`` otherwise. .. versionadded:: 0.3 The ``block`` parameter
entailment
def kill(self, block=False): """ Kill the daemon process. Sends the SIGKILL signal to the daemon process, killing it. You probably want to try :py:meth:`stop` first. If ``block`` is true then the call blocks until the daemon process has exited. ``block`` can either be ``True`` (in which case it blocks indefinitely) or a timeout in seconds. Returns ``True`` if the daemon process has (already) exited and ``False`` otherwise. The PID file is always removed, whether the process has already exited or not. Note that this means that subsequent calls to :py:meth:`is_running` and :py:meth:`get_pid` will behave as if the process has exited. If you need to be sure that the process has already exited, set ``block`` to ``True``. .. versionadded:: 0.5.1 The ``block`` parameter """ pid = self.get_pid() if not pid: raise ValueError('Daemon is not running.') try: os.kill(pid, signal.SIGKILL) return _block(lambda: not self.is_running(), block) except OSError as e: if e.errno == errno.ESRCH: raise ValueError('Daemon is not running.') raise finally: self.pid_file.release()
Kill the daemon process. Sends the SIGKILL signal to the daemon process, killing it. You probably want to try :py:meth:`stop` first. If ``block`` is true then the call blocks until the daemon process has exited. ``block`` can either be ``True`` (in which case it blocks indefinitely) or a timeout in seconds. Returns ``True`` if the daemon process has (already) exited and ``False`` otherwise. The PID file is always removed, whether the process has already exited or not. Note that this means that subsequent calls to :py:meth:`is_running` and :py:meth:`get_pid` will behave as if the process has exited. If you need to be sure that the process has already exited, set ``block`` to ``True``. .. versionadded:: 0.5.1 The ``block`` parameter
entailment
def start(self, block=False): """ Start the daemon process. The daemon process is started in the background and the calling process returns. Once the daemon process is initialized it calls the :py:meth:`run` method. If ``block`` is true then the call blocks until the daemon process has started. ``block`` can either be ``True`` (in which case it blocks indefinitely) or a timeout in seconds. The return value is ``True`` if the daemon process has been started and ``False`` otherwise. .. versionadded:: 0.3 The ``block`` parameter """ pid = self.get_pid() if pid: raise ValueError('Daemon is already running at PID %d.' % pid) # The default is to place the PID file into ``/var/run``. This # requires root privileges. Since not having these is a common # problem we check a priori whether we can create the lock file. try: self.pid_file.acquire() finally: self.pid_file.release() # Clear previously received SIGTERMs. This must be done before # the calling process returns so that the calling process can # call ``stop`` directly after ``start`` returns without the # signal being lost. self.clear_signal(signal.SIGTERM) if _detach_process(): # Calling process returns return _block(lambda: self.is_running(), block) # Daemon process continues here self._debug('Daemon has detached') def on_signal(s, frame): self._debug('Received signal {}'.format(s)) self._signal_events[int(s)].set() def runner(): try: # We acquire the PID as late as possible, since its # existence is used to verify whether the service # is running. self.pid_file.acquire() self._debug('PID file has been acquired') self._debug('Calling `run`') self.run() self._debug('`run` returned without exception') except Exception as e: self.logger.exception(e) except SystemExit: self._debug('`run` called `sys.exit`') try: self.pid_file.release() self._debug('PID file has been released') except Exception as e: self.logger.exception(e) os._exit(os.EX_OK) # FIXME: This seems redundant try: setproctitle.setproctitle(self.name) self._debug('Process title has been set') files_preserve = (self.files_preserve + self._get_logger_file_handles()) signal_map = {s: on_signal for s in self._signal_events} signal_map.update({ signal.SIGTTIN: None, signal.SIGTTOU: None, signal.SIGTSTP: None, }) with DaemonContext( detach_process=False, signal_map=signal_map, files_preserve=files_preserve): self._debug('Daemon context has been established') # Python's signal handling mechanism only forwards signals to # the main thread and only when that thread is doing something # (e.g. not when it's waiting for a lock, etc.). If we use the # main thread for the ``run`` method this means that we cannot # use the synchronization devices from ``threading`` for # communicating the reception of SIGTERM to ``run``. Hence we # use a separate thread for ``run`` and make sure that the # main loop receives signals. See # https://bugs.python.org/issue1167930 thread = threading.Thread(target=runner) thread.start() while thread.is_alive(): time.sleep(1) except Exception as e: self.logger.exception(e) # We need to shutdown the daemon process at this point, because # otherwise it will continue executing from after the original # call to ``start``. os._exit(os.EX_OK)
Start the daemon process. The daemon process is started in the background and the calling process returns. Once the daemon process is initialized it calls the :py:meth:`run` method. If ``block`` is true then the call blocks until the daemon process has started. ``block`` can either be ``True`` (in which case it blocks indefinitely) or a timeout in seconds. The return value is ``True`` if the daemon process has been started and ``False`` otherwise. .. versionadded:: 0.3 The ``block`` parameter
entailment
def create(opts): """Create a new environment Usage: datacats create [-bin] [--interactive] [-s NAME] [--address=IP] [--syslog] [--ckan=CKAN_VERSION] [--no-datapusher] [--site-url SITE_URL] [--no-init-db] ENVIRONMENT_DIR [PORT] Options: --address=IP Address to listen on (Linux-only) --ckan=CKAN_VERSION Use CKAN version CKAN_VERSION [default: 2.4] -b --bare Bare CKAN site with no example extension -i --image-only Create the environment but don't start containers --interactive Doesn't detach from the web container --no-datapusher Don't install/enable ckanext-datapusher --no-init-db Don't initialize the database. Useful for importing CKANs. -n --no-sysadmin Don't prompt for an initial sysadmin user account -s --site=NAME Pick a site to create [default: primary] --site-url SITE_URL The site_url to use in API responses (e.g. http://example.org:{port}/) --syslog Log to the syslog ENVIRONMENT_DIR is a path for the new environment directory. The last part of this path will be used as the environment name. """ if opts['--address'] and is_boot2docker(): raise DatacatsError('Cannot specify address on boot2docker.') return create_environment( environment_dir=opts['ENVIRONMENT_DIR'], port=opts['PORT'], create_skin=not opts['--bare'], start_web=not opts['--image-only'], create_sysadmin=not opts['--no-sysadmin'], site_name=opts['--site'], ckan_version=opts['--ckan'], address=opts['--address'], log_syslog=opts['--syslog'], datapusher=not opts['--no-datapusher'], site_url=opts['--site-url'], interactive=opts['--interactive'], init_db=not opts['--no-init-db'], )
Create a new environment Usage: datacats create [-bin] [--interactive] [-s NAME] [--address=IP] [--syslog] [--ckan=CKAN_VERSION] [--no-datapusher] [--site-url SITE_URL] [--no-init-db] ENVIRONMENT_DIR [PORT] Options: --address=IP Address to listen on (Linux-only) --ckan=CKAN_VERSION Use CKAN version CKAN_VERSION [default: 2.4] -b --bare Bare CKAN site with no example extension -i --image-only Create the environment but don't start containers --interactive Doesn't detach from the web container --no-datapusher Don't install/enable ckanext-datapusher --no-init-db Don't initialize the database. Useful for importing CKANs. -n --no-sysadmin Don't prompt for an initial sysadmin user account -s --site=NAME Pick a site to create [default: primary] --site-url SITE_URL The site_url to use in API responses (e.g. http://example.org:{port}/) --syslog Log to the syslog ENVIRONMENT_DIR is a path for the new environment directory. The last part of this path will be used as the environment name.
entailment
def reset(environment, opts): """Resets a site to the default state. This will re-initialize the database and recreate the administrator account. Usage: datacats reset [-iyn] [-s NAME] [ENVIRONMENT] Options: -i --interactive Don't detach from the web container -s --site=NAME The site to reset [default: primary] -y --yes Respond yes to all questions -n --no-sysadmin Don't prompt for a sysadmin password""" # pylint: disable=unused-argument if not opts['--yes']: y_or_n_prompt('Reset will remove all data related to the ' 'site {} and recreate the database'.format(opts['--site'])) print 'Resetting...' environment.stop_supporting_containers() environment.stop_ckan() clean_pyc(environment) # Save the port. saved_port = environment.port environment.purge_data([opts['--site']], never_delete=True) init({ 'ENVIRONMENT_DIR': opts['ENVIRONMENT'], '--site': opts['--site'], 'PORT': saved_port, '--syslog': None, '--address': None, '--image-only': False, '--interactive': opts['--interactive'], '--no-init-db': False, '--no-sysadmin': opts['--no-sysadmin'], '--site-url': None }, no_install=True)
Resets a site to the default state. This will re-initialize the database and recreate the administrator account. Usage: datacats reset [-iyn] [-s NAME] [ENVIRONMENT] Options: -i --interactive Don't detach from the web container -s --site=NAME The site to reset [default: primary] -y --yes Respond yes to all questions -n --no-sysadmin Don't prompt for a sysadmin password
entailment
def init(opts, no_install=False, quiet=False): """Initialize a purged environment or copied environment directory Usage: datacats init [-in] [--syslog] [-s NAME] [--address=IP] [--interactive] [--site-url SITE_URL] [ENVIRONMENT_DIR [PORT]] [--no-init-db] Options: --address=IP Address to listen on (Linux-only) --interactive Don't detach from the web container -i --image-only Create the environment but don't start containers --no-init-db Don't initialize the database. Useful for importing other CKANs -n --no-sysadmin Don't prompt for an initial sysadmin user account -s --site=NAME Pick a site to initialize [default: primary] --site-url SITE_URL The site_url to use in API responses (e.g. http://example.org:{port}/) --syslog Log to the syslog ENVIRONMENT_DIR is an existing datacats environment directory. Defaults to '.' """ if opts['--address'] and is_boot2docker(): raise DatacatsError('Cannot specify address on boot2docker.') environment_dir = opts['ENVIRONMENT_DIR'] port = opts['PORT'] address = opts['--address'] start_web = not opts['--image-only'] create_sysadmin = not opts['--no-sysadmin'] site_name = opts['--site'] site_url = opts['--site-url'] interactive = opts['--interactive'] init_db = not opts['--no-init-db'] environment_dir = abspath(environment_dir or '.') log_syslog = opts['--syslog'] environment = Environment.load(environment_dir, site_name) if address: environment.address = address if port: environment.port = int(port) if site_url: environment.site_url = site_url try: if environment.sites and site_name in environment.sites: raise DatacatsError('Site named {0} already exists.' .format(site_name)) # There are a couple of steps we can/must skip if we're making a sub-site only making_full_environment = not environment.data_exists() if not quiet: write('Creating environment {0}/{1} ' 'from existing environment directory "{0}"' .format(environment.name, environment.site_name)) steps = [ lambda: environment.create_directories(create_project_dir=False)] + ([ environment.save, environment.create_virtualenv ] if making_full_environment else []) + [ environment.save_site, environment.start_supporting_containers, environment.fix_storage_permissions, ] for fn in steps: fn() if not quiet: write('.') if not quiet: write('\n') except: if not quiet: print raise return finish_init(environment, start_web, create_sysadmin, log_syslog=log_syslog, do_install=not no_install, quiet=quiet, site_url=site_url, interactive=interactive, init_db=init_db)
Initialize a purged environment or copied environment directory Usage: datacats init [-in] [--syslog] [-s NAME] [--address=IP] [--interactive] [--site-url SITE_URL] [ENVIRONMENT_DIR [PORT]] [--no-init-db] Options: --address=IP Address to listen on (Linux-only) --interactive Don't detach from the web container -i --image-only Create the environment but don't start containers --no-init-db Don't initialize the database. Useful for importing other CKANs -n --no-sysadmin Don't prompt for an initial sysadmin user account -s --site=NAME Pick a site to initialize [default: primary] --site-url SITE_URL The site_url to use in API responses (e.g. http://example.org:{port}/) --syslog Log to the syslog ENVIRONMENT_DIR is an existing datacats environment directory. Defaults to '.'
entailment
def finish_init(environment, start_web, create_sysadmin, log_syslog=False, do_install=True, quiet=False, site_url=None, interactive=False, init_db=True): """ Common parts of create and init: Install, init db, start site, sysadmin """ if not init_db: start_web = False create_sysadmin = False if do_install: install_all(environment, False, verbose=False, quiet=quiet) if init_db: if not quiet: write('Initializing database') environment.install_postgis_sql() environment.ckan_db_init() if not quiet: write('\n') if site_url: try: site_url = site_url.format(address=environment.address, port=environment.port) environment.site_url = site_url environment.save_site(False) except (KeyError, IndexError, ValueError) as e: raise DatacatsError('Could not parse site_url: {}'.format(e)) if start_web: environment.start_ckan(log_syslog=log_syslog) if not quiet and not interactive: write('Starting web server at {0} ...\n'.format( environment.web_address())) if create_sysadmin: try: adminpw = confirm_password() environment.create_admin_set_password(adminpw) except KeyboardInterrupt: print if not start_web: environment.stop_supporting_containers()
Common parts of create and init: Install, init db, start site, sysadmin
entailment
def save(self): """ Save profile settings into user profile directory """ config = self.profiledir + '/config' if not isdir(self.profiledir): makedirs(self.profiledir) cp = SafeConfigParser() cp.add_section('ssh') cp.set('ssh', 'private_key', self.ssh_private_key) cp.set('ssh', 'public_key', self.ssh_public_key) with open(config, 'w') as cfile: cp.write(cfile)
Save profile settings into user profile directory
entailment
def generate_ssh_key(self): """ Generate a new ssh private and public key """ web_command( command=["ssh-keygen", "-q", "-t", "rsa", "-N", "", "-C", "datacats generated {0}@{1}".format( getuser(), gethostname()), "-f", "/output/id_rsa"], rw={self.profiledir: '/output'}, )
Generate a new ssh private and public key
entailment
def create(self, environment, target_name): """ Sends "create project" command to the remote server """ remote_server_command( ["ssh", environment.deploy_target, "create", target_name], environment, self, clean_up=True, )
Sends "create project" command to the remote server
entailment
def admin_password(self, environment, target_name, password): """ Return True if password was set successfully """ try: remote_server_command( ["ssh", environment.deploy_target, "admin_password", target_name, password], environment, self, clean_up=True ) return True except WebCommandError: return False
Return True if password was set successfully
entailment
def deploy(self, environment, target_name, stream_output=None): """ Return True if deployment was successful """ try: remote_server_command( [ "rsync", "-lrv", "--safe-links", "--munge-links", "--delete", "--inplace", "--chmod=ugo=rwX", "--exclude=.datacats-environment", "--exclude=.git", "/project/.", environment.deploy_target + ':' + target_name ], environment, self, include_project_dir=True, stream_output=stream_output, clean_up=True, ) except WebCommandError as e: raise DatacatsError( "Unable to deploy `{0}` to remote server for some reason:\n" " datacats was not able to copy data to the remote server" .format((target_name,)), parent_exception=e ) try: remote_server_command( [ "ssh", environment.deploy_target, "install", target_name, ], environment, self, clean_up=True, ) return True except WebCommandError as e: raise DatacatsError( "Unable to deploy `{0}` to remote server for some reason:\n" "datacats copied data to the server but failed to register\n" "(or `install`) the new catalog" .format((target_name,)), parent_exception=e )
Return True if deployment was successful
entailment
def num_batches(n, batch_size): """Compute the number of mini-batches required to cover a data set of size `n` using batches of size `batch_size`. Parameters ---------- n: int the number of samples in the data set batch_size: int the mini-batch size Returns ------- int: the number of batches required """ b = n // batch_size if n % batch_size > 0: b += 1 return b
Compute the number of mini-batches required to cover a data set of size `n` using batches of size `batch_size`. Parameters ---------- n: int the number of samples in the data set batch_size: int the mini-batch size Returns ------- int: the number of batches required
entailment
def num_indices_generated(self): """ Get the number of indices that would be generated by this sampler. Returns ------- int, `np.inf` or `None`. An int if the number of samples is known, `np.inf` if it is infinite or `None` if the number of samples is unknown. """ if self.repeats == -1: return np.inf else: return self.length * self.repeats
Get the number of indices that would be generated by this sampler. Returns ------- int, `np.inf` or `None`. An int if the number of samples is known, `np.inf` if it is infinite or `None` if the number of samples is unknown.
entailment
def in_order_indices_batch_iterator(self, batch_size): """ Create an iterator that generates in-order mini-batches of sample indices. The batches will have `batch_size` elements, with the exception of the final batch which will have less if there are not enough samples left to fill it. The generated mini-batches indices take the form of 1D NumPy integer arrays. Parameters ---------- batch_size: int Mini-batch size Returns ------- iterator An iterator that generates mini-batches in the form of 1D NumPy integer arrays. """ if self.repeats == 1: for i in range(0, self.length, batch_size): yield np.arange(i, min(i + batch_size, self.length)) else: repeats = self.repeats i = 0 while True: j = i + batch_size if j <= self.length: # Within size of data yield np.arange(i, j) i = j elif j <= self.length * 2: # One restart is required # Reduce the number of remaining repeats if repeats != -1: repeats -= 1 if repeats == 0: # Finished; emit remaining elements if i < self.length: yield np.arange(i, self.length) break # Wrap over # Compute number of elements required to make up # the batch k = batch_size - (self.length - i) yield np.append(np.arange(i, self.length), np.arange(0, k), axis=0) i = k else: # Multiple restarts required to fill the batch batch_ndx = np.arange(0) # i = 0 while len(batch_ndx) < batch_size: # Wrap over k = min(batch_size - len(batch_ndx), self.length - i) batch_ndx = np.append( batch_ndx, np.arange(i, i + k), axis=0) i += k if i >= self.length: i -= self.length # Reduce the number of remaining repeats if repeats != -1: repeats -= 1 if repeats == 0: break if len(batch_ndx) > 0: yield batch_ndx if repeats == 0: break
Create an iterator that generates in-order mini-batches of sample indices. The batches will have `batch_size` elements, with the exception of the final batch which will have less if there are not enough samples left to fill it. The generated mini-batches indices take the form of 1D NumPy integer arrays. Parameters ---------- batch_size: int Mini-batch size Returns ------- iterator An iterator that generates mini-batches in the form of 1D NumPy integer arrays.
entailment
def shuffled_indices_batch_iterator(self, batch_size, shuffle_rng): """ Create an iterator that generates randomly shuffled mini-batches of sample indices. The batches will have `batch_size` elements, with the exception of the final batch which will have less if there are not enough samples left to fill it. The generated mini-batches indices take the form of 1D NumPy integer arrays. Parameters ---------- batch_size: int Mini-batch size shuffle_rng: a `numpy.random.RandomState` that will be used to randomise element order. Returns ------- iterator An iterator that generates mini-batches in the form of 1D NumPy integer arrays. """ if self.repeats == 1: indices = shuffle_rng.permutation(self.length) for i in range(0, self.length, batch_size): yield indices[i:i + batch_size] else: repeats = self.repeats indices = shuffle_rng.permutation(self.length) i = 0 while True: j = i + batch_size if j <= self.length: # Within size of data yield indices[i:j] i = j else: # Multiple restarts required to fill the batch batch_ndx = np.arange(0) while len(batch_ndx) < batch_size: # Wrap over k = min(batch_size - len(batch_ndx), self.length - i) batch_ndx = np.append( batch_ndx, indices[i:i + k], axis=0) i += k if i >= self.length: # Loop over; new permutation indices = shuffle_rng.permutation(self.length) i -= self.length # Reduce the number of remaining repeats if repeats != -1: repeats -= 1 if repeats == 0: break if len(batch_ndx) > 0: yield batch_ndx if repeats == 0: break
Create an iterator that generates randomly shuffled mini-batches of sample indices. The batches will have `batch_size` elements, with the exception of the final batch which will have less if there are not enough samples left to fill it. The generated mini-batches indices take the form of 1D NumPy integer arrays. Parameters ---------- batch_size: int Mini-batch size shuffle_rng: a `numpy.random.RandomState` that will be used to randomise element order. Returns ------- iterator An iterator that generates mini-batches in the form of 1D NumPy integer arrays.
entailment
def shuffled_indices_batch_iterator(self, batch_size, shuffle_rng): """ Create an iterator that generates randomly shuffled mini-batches of sample indices. The batches will have `batch_size` elements. The generated mini-batches indices take the form of 1D NumPy integer arrays. Parameters ---------- batch_size: int Mini-batch size shuffle_rng: a `numpy.random.RandomState` that will be used to randomise element order. Returns ------- iterator An iterator that generates mini-batches in the form of 1D NumPy integer arrays. """ while True: yield shuffle_rng.choice(len(self.weights), size=(batch_size,), p=self.weights)
Create an iterator that generates randomly shuffled mini-batches of sample indices. The batches will have `batch_size` elements. The generated mini-batches indices take the form of 1D NumPy integer arrays. Parameters ---------- batch_size: int Mini-batch size shuffle_rng: a `numpy.random.RandomState` that will be used to randomise element order. Returns ------- iterator An iterator that generates mini-batches in the form of 1D NumPy integer arrays.
entailment
def class_balancing_sample_weights(y): """ Compute sample weight given an array of sample classes. The weights are assigned on a per-class basis and the per-class weights are inversely proportional to their frequency. Parameters ---------- y: NumPy array, 1D dtype=int sample classes, values must be 0 or positive Returns ------- NumPy array, 1D dtype=float per sample weight array """ h = np.bincount(y) cls_weight = 1.0 / (h.astype(float) * len(np.nonzero(h)[0])) cls_weight[np.isnan(cls_weight)] = 0.0 sample_weight = cls_weight[y] return sample_weight
Compute sample weight given an array of sample classes. The weights are assigned on a per-class basis and the per-class weights are inversely proportional to their frequency. Parameters ---------- y: NumPy array, 1D dtype=int sample classes, values must be 0 or positive Returns ------- NumPy array, 1D dtype=float per sample weight array
entailment
def shuffled_indices_batch_iterator(self, batch_size, shuffle_rng): """ Create an iterator that generates randomly shuffled mini-batches of sample indices. The batches will have `batch_size` elements. The generated mini-batches indices take the form of 1D NumPy integer arrays. Parameters ---------- batch_size: int Mini-batch size shuffle_rng: a `numpy.random.RandomState` that will be used to randomise element order. Returns ------- iterator An iterator that generates mini-batches in the form of 1D NumPy integer arrays. """ while True: yield shuffle_rng.choice(self.indices, size=(batch_size,), p=self.sub_weights)
Create an iterator that generates randomly shuffled mini-batches of sample indices. The batches will have `batch_size` elements. The generated mini-batches indices take the form of 1D NumPy integer arrays. Parameters ---------- batch_size: int Mini-batch size shuffle_rng: a `numpy.random.RandomState` that will be used to randomise element order. Returns ------- iterator An iterator that generates mini-batches in the form of 1D NumPy integer arrays.
entailment
def class_balancing_sampler(y, indices): """ Construct a `WeightedSubsetSampler` that compensates for class imbalance. Parameters ---------- y: NumPy array, 1D dtype=int sample classes, values must be 0 or positive indices: NumPy array, 1D dtype=int An array of indices that identify the subset of samples drawn from data that are to be used Returns ------- WeightedSubsetSampler instance Sampler """ weights = WeightedSampler.class_balancing_sample_weights(y[indices]) return WeightedSubsetSampler(weights, indices=indices)
Construct a `WeightedSubsetSampler` that compensates for class imbalance. Parameters ---------- y: NumPy array, 1D dtype=int sample classes, values must be 0 or positive indices: NumPy array, 1D dtype=int An array of indices that identify the subset of samples drawn from data that are to be used Returns ------- WeightedSubsetSampler instance Sampler
entailment
def wait_for_service_available(container, url, timeout): """ Wait up to timeout seconds for service at host:port to start. Returns True if service becomes available, False if the container stops or raises ServiceTimeout if timeout is reached. """ start = time.time() remaining = timeout while True: remaining = start + timeout - time.time() if remaining < 0: raise ServiceTimeout try: response = get(url, timeout=min(remaining, REQUEST_TIMEOUT_SECONDS)) if 500 <= response.status_code < 600: return False return True except (ConnectionError, Timeout): pass if not inspect_container(container)['State']['Running']: return False remaining = start + timeout - time.time() delay = max(0, min(RETRY_DELAY_SECONDS, remaining)) time.sleep(delay) raise ServiceTimeout
Wait up to timeout seconds for service at host:port to start. Returns True if service becomes available, False if the container stops or raises ServiceTimeout if timeout is reached.
entailment
def get_data_path(filename): """ Get the path of the given file within the batchup data directory Parameters ---------- filename: str The filename to locate within the batchup data directory Returns ------- str The full path of the file """ if os.path.isabs(filename): return filename else: return os.path.join(get_data_dir(), filename)
Get the path of the given file within the batchup data directory Parameters ---------- filename: str The filename to locate within the batchup data directory Returns ------- str The full path of the file
entailment
def download(path, source_url): """ Download a file to a given path from a given URL, if it does not exist. Parameters ---------- path: str The (destination) path of the file on the local filesystem source_url: str The URL from which to download the file Returns ------- str The path of the file """ dir_path = os.path.dirname(path) if not os.path.exists(dir_path): os.makedirs(dir_path) if not os.path.exists(path): print('Downloading {} to {}'.format(source_url, path)) filename = source_url.split('/')[-1] def _progress(count, block_size, total_size): sys.stdout.write('\rDownloading {} {:.2%}'.format( filename, float(count * block_size) / float(total_size))) sys.stdout.flush() try: urlretrieve(source_url, path, reporthook=_progress) except: sys.stdout.write('\r') # Exception; remove any partially downloaded file and re-raise if os.path.exists(path): os.remove(path) raise sys.stdout.write('\r') return path
Download a file to a given path from a given URL, if it does not exist. Parameters ---------- path: str The (destination) path of the file on the local filesystem source_url: str The URL from which to download the file Returns ------- str The path of the file
entailment
def compute_sha256(path): """ Compute the SHA-256 hash of the file at the given path Parameters ---------- path: str The path of the file Returns ------- str The SHA-256 HEX digest """ hasher = hashlib.sha256() with open(path, 'rb') as f: # 10MB chunks for chunk in iter(lambda: f.read(10 * 1024 * 1024), b''): hasher.update(chunk) return hasher.hexdigest()
Compute the SHA-256 hash of the file at the given path Parameters ---------- path: str The path of the file Returns ------- str The SHA-256 HEX digest
entailment
def verify_file(path, sha256): """ Verify the integrity of a file by checking its SHA-256 hash. If no digest is supplied, the digest is printed to the console. Closely follows the code in `torchvision.datasets.utils.check_integrity` Parameters ---------- path: str The path of the file to check sha256: str The expected SHA-256 hex digest of the file, or `None` to print the digest of the file to the console Returns ------- bool Indicates if the file passes the integrity check or not """ if not os.path.isfile(path): return False digest = compute_sha256(path) if sha256 is None: # No digest supplied; report it to the console so a develop can fill # it in print('SHA-256 of {}:'.format(path)) print(' "{}"'.format(digest)) else: if digest != sha256: return False return True
Verify the integrity of a file by checking its SHA-256 hash. If no digest is supplied, the digest is printed to the console. Closely follows the code in `torchvision.datasets.utils.check_integrity` Parameters ---------- path: str The path of the file to check sha256: str The expected SHA-256 hex digest of the file, or `None` to print the digest of the file to the console Returns ------- bool Indicates if the file passes the integrity check or not
entailment
def download_and_verify(path, source_url, sha256): """ Download a file to a given path from a given URL, if it does not exist. After downloading it, verify it integrity by checking the SHA-256 hash. Parameters ---------- path: str The (destination) path of the file on the local filesystem source_url: str The URL from which to download the file sha256: str The expected SHA-256 hex digest of the file, or `None` to print the digest of the file to the console Returns ------- str or None The path of the file if successfully downloaded otherwise `None` """ if os.path.exists(path): # Already exists? # Nothing to do, except print the SHA-256 if necessary if sha256 is None: print('The SHA-256 of {} is "{}"'.format( path, compute_sha256(path))) return path # Compute the path of the unverified file unverified_path = path + '.unverified' for i in range(_MAX_DOWNLOAD_TRIES): # Download it try: unverified_path = download(unverified_path, source_url) except Exception as e: # Report failure print( 'Download of {} unsuccessful; error {}; ' 'deleting and re-trying...'.format(source_url, e)) # Delete so that we can retry if os.path.exists(unverified_path): os.remove(unverified_path) else: if os.path.exists(unverified_path): # Got something... if verify_file(unverified_path, sha256): # Success: rename the unverified file to the destination # filename os.rename(unverified_path, path) return path else: # Report failure print( 'Download of {} unsuccessful; verification failed; ' 'deleting and re-trying...'.format(source_url)) # Delete so that we can retry os.remove(unverified_path) print('Did not succeed in downloading {} (tried {} times)'.format( source_url, _MAX_DOWNLOAD_TRIES )) return None
Download a file to a given path from a given URL, if it does not exist. After downloading it, verify it integrity by checking the SHA-256 hash. Parameters ---------- path: str The (destination) path of the file on the local filesystem source_url: str The URL from which to download the file sha256: str The expected SHA-256 hex digest of the file, or `None` to print the digest of the file to the console Returns ------- str or None The path of the file if successfully downloaded otherwise `None`
entailment
def copy_and_verify(path, source_path, sha256): """ Copy a file to a given path from a given path, if it does not exist. After copying it, verify it integrity by checking the SHA-256 hash. Parameters ---------- path: str The (destination) path of the file on the local filesystem source_path: str The path from which to copy the file sha256: str The expected SHA-256 hex digest of the file, or `None` to print the digest of the file to the console Returns ------- str or None The path of the file if successfully downloaded otherwise `None` """ if os.path.exists(path): # Already exists? # Nothing to do, except print the SHA-256 if necessary if sha256 is None: print('The SHA-256 of {} is "{}"'.format( path, compute_sha256(path))) return path if not os.path.exists(source_path): return None # Compute the path of the unverified file unverified_path = path + '.unverified' # Copy it dir_path = os.path.dirname(path) if not os.path.exists(dir_path): os.makedirs(dir_path) shutil.copy(source_path, unverified_path) if os.path.exists(unverified_path): # Got something... if verify_file(unverified_path, sha256): # Success: rename the unverified file to the destination # filename os.rename(unverified_path, path) return path else: # Report failure print('SHA verification of file {} failed'.format(source_path)) # Delete os.remove(unverified_path) return None
Copy a file to a given path from a given path, if it does not exist. After copying it, verify it integrity by checking the SHA-256 hash. Parameters ---------- path: str The (destination) path of the file on the local filesystem source_path: str The path from which to copy the file sha256: str The expected SHA-256 hex digest of the file, or `None` to print the digest of the file to the console Returns ------- str or None The path of the file if successfully downloaded otherwise `None`
entailment
def ckan_extension_template(name, target): """ Create ckanext-(name) in target directory. """ setupdir = '{0}/ckanext-{1}theme'.format(target, name) extdir = setupdir + '/ckanext/{0}theme'.format(name) templatedir = extdir + '/templates/' staticdir = extdir + '/static/datacats' makedirs(templatedir + '/home/snippets') makedirs(staticdir) here = dirname(__file__) copyfile(here + '/images/chart.png', staticdir + '/chart.png') copyfile(here + '/images/datacats-footer.png', staticdir + '/datacats-footer.png') filecontents = [ (setupdir + '/setup.py', SETUP_PY), (setupdir + '/.gitignore', DOT_GITIGNORE), (setupdir + '/ckanext/__init__.py', NAMESPACE_PACKAGE), (extdir + '/__init__.py', ''), (extdir + '/plugins.py', PLUGINS_PY), (templatedir + '/home/snippets/promoted.html', PROMOTED_SNIPPET), (templatedir + '/footer.html', FOOTER_HTML), ] for filename, content in filecontents: with open(filename, 'w') as f: f.write(content.replace('##name##', name))
Create ckanext-(name) in target directory.
entailment
def shell(environment, opts): """Run a command or interactive shell within this environment Usage: datacats [-d] [-s NAME] shell [ENVIRONMENT [COMMAND...]] Options: -d --detach Run the resulting container in the background -s --site=NAME Specify a site to run the shell on [default: primary] ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.' """ environment.require_data() environment.start_supporting_containers() return environment.interactive_shell( opts['COMMAND'], detach=opts['--detach'] )
Run a command or interactive shell within this environment Usage: datacats [-d] [-s NAME] shell [ENVIRONMENT [COMMAND...]] Options: -d --detach Run the resulting container in the background -s --site=NAME Specify a site to run the shell on [default: primary] ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.'
entailment
def paster(opts): """Run a paster command from the current directory Usage: datacats paster [-d] [-s NAME] [COMMAND...] Options: -s --site=NAME Specify a site to run this paster command on [default: primary] -d --detach Run the resulting container in the background You must be inside a datacats environment to run this. The paster command will run within your current directory inside the environment. You don't need to specify the --plugin option. The --config option also need not be specified. """ environment = Environment.load('.') environment.require_data() environment.start_supporting_containers() if not opts['COMMAND']: opts['COMMAND'] = ['--', 'help'] assert opts['COMMAND'][0] == '--' return environment.interactive_shell( opts['COMMAND'][1:], paster=True, detach=opts['--detach'] )
Run a paster command from the current directory Usage: datacats paster [-d] [-s NAME] [COMMAND...] Options: -s --site=NAME Specify a site to run this paster command on [default: primary] -d --detach Run the resulting container in the background You must be inside a datacats environment to run this. The paster command will run within your current directory inside the environment. You don't need to specify the --plugin option. The --config option also need not be specified.
entailment
def save_new_site(site_name, sitedir, srcdir, port, address, site_url, passwords): """ Add a site's configuration to the source dir and site dir """ cp = ConfigParser.SafeConfigParser() cp.read([srcdir + '/.datacats-environment']) section_name = 'site_' + site_name if not cp.has_section(section_name): cp.add_section(section_name) cp.set(section_name, 'port', str(port)) if address: cp.set(section_name, 'address', address) if site_url: cp.set(section_name, 'site_url', site_url) with open(srcdir + '/.datacats-environment', 'w') as config: cp.write(config) # save passwords to datadir cp = ConfigParser.SafeConfigParser() cp.add_section('passwords') for n in sorted(passwords): cp.set('passwords', n.lower(), passwords[n]) # Write to the sitedir so we maintain separate passwords. with open(sitedir + '/passwords.ini', 'w') as config: cp.write(config)
Add a site's configuration to the source dir and site dir
entailment
def save_new_environment(name, datadir, srcdir, ckan_version, deploy_target=None, always_prod=False): """ Save an environment's configuration to the source dir and data dir """ with open(datadir + '/.version', 'w') as f: f.write('2') cp = ConfigParser.SafeConfigParser() cp.read(srcdir + '/.datacats-environment') if not cp.has_section('datacats'): cp.add_section('datacats') cp.set('datacats', 'name', name) cp.set('datacats', 'ckan_version', ckan_version) if deploy_target: if not cp.has_section('deploy'): cp.add_section('deploy') cp.set('deploy', 'target', deploy_target) if always_prod: cp.set('datacats', 'always_prod', 'true') with open(srcdir + '/.datacats-environment', 'w') as config: cp.write(config) save_srcdir_location(datadir, srcdir)
Save an environment's configuration to the source dir and data dir
entailment
def find_environment_dirs(environment_name=None, data_only=False): """ :param environment_name: exising environment name, path or None to look in current or parent directories for project returns (srcdir, extension_dir, datadir) extension_dir is the name of extension directory user was in/referenced, default: 'ckan'. This value is used by the paster cli command. datadir will be None if environment_name was a path or None (not a name) """ docker.require_images() if environment_name is None: environment_name = '.' extension_dir = 'ckan' if validate.valid_name(environment_name) and path.isdir( path.expanduser('~/.datacats/' + environment_name)): # loading from a name datadir = path.expanduser('~/.datacats/' + environment_name) with open(datadir + '/project-dir') as pd: srcdir = pd.read() if not data_only and not path.exists(srcdir + '/.datacats-environment'): raise DatacatsError( 'Environment data found but environment directory is' ' missing. Try again from the new environment directory' ' location or remove the environment data with' ' "datacats purge"') return srcdir, extension_dir, datadir # loading from a path srcdir = path.abspath(environment_name) if not path.isdir(srcdir): raise DatacatsError('No environment found with that name') wd = srcdir oldwd = None while not path.exists(wd + '/.datacats-environment'): oldwd = wd wd, _ = path.split(wd) if wd == oldwd: raise DatacatsError( 'Environment not found in {0} or above'.format(srcdir)) srcdir = wd if oldwd: _, extension_dir = path.split(oldwd) return srcdir, extension_dir, None
:param environment_name: exising environment name, path or None to look in current or parent directories for project returns (srcdir, extension_dir, datadir) extension_dir is the name of extension directory user was in/referenced, default: 'ckan'. This value is used by the paster cli command. datadir will be None if environment_name was a path or None (not a name)
entailment
def load_environment(srcdir, datadir=None, allow_old=False): """ Load configuration values for an environment :param srcdir: environment source directory :param datadir: environment data direcory, if None will be discovered from srcdir :param allow_old: Don't throw an exception if this is an old site This is only valid for sites that you are purging. if datadir is None it will be discovered from srcdir Returns (datadir, name, ckan_version, always_prod, deploy_target, remote_server_key) """ cp = ConfigParser.SafeConfigParser() try: cp.read([srcdir + '/.datacats-environment']) except ConfigParser.Error: raise DatacatsError('Error reading environment information') name = cp.get('datacats', 'name') if datadir: # update the link in case user moved their srcdir save_srcdir_location(datadir, srcdir) else: datadir = path.expanduser('~/.datacats/' + name) # FIXME: check if datadir is sane, project-dir points back to srcdir if migrate.needs_format_conversion(datadir) and not allow_old: raise DatacatsError('This environment uses an old format. You must' ' migrate to the new format. To do so, use the' ' "datacats migrate" command.') if migrate.is_locked(datadir): raise DatacatsError('Migration in progress, cannot continue.\n' 'If you interrupted a migration, you should' ' attempt manual recovery or contact us by' ' filing an issue at http://github.com/datacats/' 'datacats.\nAs a last resort, you could delete' ' all your stored data and create a new environment' ' by running "datacats purge" followed by' ' "datacats init".') # FIXME: consider doing data_complete check here ckan_version = cp.get('datacats', 'ckan_version') try: always_prod = cp.getboolean('datacats', 'always_prod') except ConfigParser.NoOptionError: always_prod = False try: extra_containers = cp.get('datacats', 'extra_containers').split(' ') except ConfigParser.NoOptionError: extra_containers = () # if remote_server's custom ssh connection # address is defined, # we overwrite the default datacats.com one try: deploy_target = cp.get('deploy', 'remote_server_user') \ + "@" + cp.get('deploy', 'remote_server') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): deploy_target = DEFAULT_REMOTE_SERVER_TARGET # if remote_server's ssh public key is given, # we overwrite the default datacats.com one try: remote_server_key = cp.get('deploy', 'remote_server_key') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): remote_server_key = None return (datadir, name, ckan_version, always_prod, deploy_target, remote_server_key, extra_containers)
Load configuration values for an environment :param srcdir: environment source directory :param datadir: environment data direcory, if None will be discovered from srcdir :param allow_old: Don't throw an exception if this is an old site This is only valid for sites that you are purging. if datadir is None it will be discovered from srcdir Returns (datadir, name, ckan_version, always_prod, deploy_target, remote_server_key)
entailment
def load_site(srcdir, datadir, site_name=None): """ Load configuration values for a site. Returns (port, address, site_url, passwords) """ if site_name is None: site_name = 'primary' if not validate.valid_name(site_name): raise DatacatsError('{} is not a valid site name.'.format(site_name)) cp = ConfigParser.SafeConfigParser() try: cp.read([srcdir + '/.datacats-environment']) except ConfigParser.Error: raise DatacatsError('Error reading environment information') site_section = 'site_' + site_name try: port = cp.getint(site_section, 'port') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): port = None try: address = cp.get(site_section, 'address') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): address = None try: site_url = cp.get(site_section, 'site_url') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): site_url = None passwords = {} cp = ConfigParser.SafeConfigParser() cp.read(datadir + '/sites/' + site_name + '/passwords.ini') try: pw_options = cp.options('passwords') except ConfigParser.NoSectionError: pw_options = [] for n in pw_options: passwords[n.upper()] = cp.get('passwords', n) return port, address, site_url, passwords
Load configuration values for a site. Returns (port, address, site_url, passwords)
entailment
def new_environment_check(srcpath, site_name, ckan_version): """ Check if a new environment or site can be created at the given path. Returns (name, datadir, sitedir, srcdir) or raises DatacatsError """ docker.require_images() workdir, name = path.split(path.abspath(path.expanduser(srcpath))) if not validate.valid_name(name): raise DatacatsError('Please choose an environment name starting' ' with a letter and including only lowercase letters' ' and digits') if not path.isdir(workdir): raise DatacatsError('Parent directory for environment' ' does not exist') datadir = path.expanduser('~/.datacats/' + name) sitedir = datadir + '/sites/' + site_name # We track through the datadir to the target if we are just making a # site if path.isdir(datadir): with open(datadir + '/project-dir') as pd: srcdir = pd.read() else: srcdir = workdir + '/' + name if ckan_version not in SUPPORTED_PRELOADS: raise DatacatsError('''Datacats does not currently support CKAN version {}. Versions that are currently supported are: {}'''.format(ckan_version, ', '.join(SUPPORTED_PRELOADS))) preload_name = str(ckan_version) # Get all the versions from the tags downloaded_versions = [tag for tag in docker.get_tags('datacats/ckan')] if ckan_version not in downloaded_versions: retrying_pull_image('datacats/ckan:{}'.format(preload_name)) if path.isdir(sitedir): raise DatacatsError('Site data directory {0} already exists'.format( sitedir)) # This is the case where the data dir has been removed, if path.isdir(srcdir) and not path.isdir(datadir): raise DatacatsError('Environment directory exists, but data directory does not.\n' 'If you simply want to recreate the data directory, run ' '"datacats init" in the environment directory.') return name, datadir, srcdir
Check if a new environment or site can be created at the given path. Returns (name, datadir, sitedir, srcdir) or raises DatacatsError
entailment
def data_complete(datadir, sitedir, get_container_name): """ Return True if the directories and containers we're expecting are present in datadir, sitedir and containers """ if any(not path.isdir(sitedir + x) for x in ('/files', '/run', '/solr')): return False if docker.is_boot2docker(): # Inspect returns None if the container doesn't exist. return all(docker.inspect_container(get_container_name(x)) for x in ('pgdata', 'venv')) return path.isdir(datadir + '/venv') and path.isdir(sitedir + '/postgres')
Return True if the directories and containers we're expecting are present in datadir, sitedir and containers
entailment
def create_directories(datadir, sitedir, srcdir=None): """ Create expected directories in datadir, sitedir and optionally srcdir """ # It's possible that the datadir already exists # (we're making a secondary site) if not path.isdir(datadir): os.makedirs(datadir, mode=0o700) try: # This should take care if the 'site' subdir if needed os.makedirs(sitedir, mode=0o700) except OSError: raise DatacatsError("Site already exists.") # venv isn't site-specific, the rest are. if not docker.is_boot2docker(): if not path.isdir(datadir + '/venv'): os.makedirs(datadir + '/venv') os.makedirs(sitedir + '/postgres') os.makedirs(sitedir + '/solr') os.makedirs(sitedir + '/files') os.makedirs(sitedir + '/run') if srcdir: os.makedirs(srcdir)
Create expected directories in datadir, sitedir and optionally srcdir
entailment
def create_virtualenv(srcdir, datadir, preload_image, get_container_name): """ Populate venv from preloaded image """ try: if docker.is_boot2docker(): docker.data_only_container( get_container_name('venv'), ['/usr/lib/ckan'], ) img_id = docker.web_command( '/bin/mv /usr/lib/ckan/ /usr/lib/ckan_original', image=preload_image, commit=True, ) docker.web_command( command='/bin/cp -a /usr/lib/ckan_original/. /usr/lib/ckan/.', volumes_from=get_container_name('venv'), image=img_id, ) docker.remove_image(img_id) return docker.web_command( command='/bin/cp -a /usr/lib/ckan/. /usr/lib/ckan_target/.', rw={datadir + '/venv': '/usr/lib/ckan_target'}, image=preload_image, ) finally: rw = {datadir + '/venv': '/usr/lib/ckan'} if not docker.is_boot2docker() else {} volumes_from = get_container_name('venv') if docker.is_boot2docker() else None # fix venv permissions docker.web_command( command='/bin/chown -R --reference=/project /usr/lib/ckan', rw=rw, volumes_from=volumes_from, ro={srcdir: '/project'}, )
Populate venv from preloaded image
entailment
def create_source(srcdir, preload_image, datapusher=False): """ Copy ckan source, datapusher source (optional), who.ini and schema.xml from preload image into srcdir """ try: docker.web_command( command='/bin/cp -a /project/ckan /project_target/ckan', rw={srcdir: '/project_target'}, image=preload_image) if datapusher: docker.web_command( command='/bin/cp -a /project/datapusher /project_target/datapusher', rw={srcdir: '/project_target'}, image=preload_image) shutil.copy( srcdir + '/ckan/ckan/config/who.ini', srcdir) shutil.copy( srcdir + '/ckan/ckan/config/solr/schema.xml', srcdir) finally: # fix srcdir permissions docker.web_command( command='/bin/chown -R --reference=/project /project', rw={srcdir: '/project'}, )
Copy ckan source, datapusher source (optional), who.ini and schema.xml from preload image into srcdir
entailment
def start_supporting_containers(sitedir, srcdir, passwords, get_container_name, extra_containers, log_syslog=False): """ Start all supporting containers (containers required for CKAN to operate) if they aren't already running, along with some extra containers specified by the user """ if docker.is_boot2docker(): docker.data_only_container(get_container_name('pgdata'), ['/var/lib/postgresql/data']) rw = {} volumes_from = get_container_name('pgdata') else: rw = {sitedir + '/postgres': '/var/lib/postgresql/data'} volumes_from = None running = set(containers_running(get_container_name)) needed = set(extra_containers).union({'postgres', 'solr'}) if not needed.issubset(running): stop_supporting_containers(get_container_name, extra_containers) # users are created when data dir is blank so we must pass # all the user passwords as environment vars # XXX: postgres entrypoint magic docker.run_container( name=get_container_name('postgres'), image='datacats/postgres', environment=passwords, rw=rw, volumes_from=volumes_from, log_syslog=log_syslog) docker.run_container( name=get_container_name('solr'), image='datacats/solr', rw={sitedir + '/solr': '/var/lib/solr'}, ro={srcdir + '/schema.xml': '/etc/solr/conf/schema.xml'}, log_syslog=log_syslog) for container in extra_containers: # We don't know a whole lot about the extra containers so we're just gonna have to # mount /project and /datadir r/o even if they're not needed for ease of # implementation. docker.run_container( name=get_container_name(container), image=EXTRA_IMAGE_MAPPING[container], ro={ sitedir: '/datadir', srcdir: '/project' }, log_syslog=log_syslog )
Start all supporting containers (containers required for CKAN to operate) if they aren't already running, along with some extra containers specified by the user
entailment
def stop_supporting_containers(get_container_name, extra_containers): """ Stop postgres and solr containers, along with any specified extra containers """ docker.remove_container(get_container_name('postgres')) docker.remove_container(get_container_name('solr')) for container in extra_containers: docker.remove_container(get_container_name(container))
Stop postgres and solr containers, along with any specified extra containers
entailment
def containers_running(get_container_name): """ Return a list of containers tracked by this environment that are running """ running = [] for n in ['web', 'postgres', 'solr', 'datapusher', 'redis']: info = docker.inspect_container(get_container_name(n)) if info and not info['State']['Running']: running.append(n + '(halted)') elif info: running.append(n) return running
Return a list of containers tracked by this environment that are running
entailment
def _load_sites(self): """ Gets the names of all of the sites from the datadir and stores them in self.sites. Also returns this list. """ if not self.sites: self.sites = task.list_sites(self.datadir) return self.sites
Gets the names of all of the sites from the datadir and stores them in self.sites. Also returns this list.
entailment
def save_site(self, create=True): """ Save environment settings in the directory that need to be saved even when creating only a new sub-site env. """ self._load_sites() if create: self.sites.append(self.site_name) task.save_new_site(self.site_name, self.sitedir, self.target, self.port, self.address, self.site_url, self.passwords)
Save environment settings in the directory that need to be saved even when creating only a new sub-site env.
entailment
def save(self): """ Save environment settings into environment directory, overwriting any existing configuration and discarding site config """ task.save_new_environment(self.name, self.datadir, self.target, self.ckan_version, self.deploy_target, self.always_prod)
Save environment settings into environment directory, overwriting any existing configuration and discarding site config
entailment
def new(cls, path, ckan_version, site_name, **kwargs): """ Return a Environment object with settings for a new project. No directories or containers are created by this call. :params path: location for new project directory, may be relative :params ckan_version: release of CKAN to install :params site_name: The name of the site to install database and solr \ eventually. For additional keyword arguments see the __init__ method. Raises DatcatsError if directories or project with same name already exits. """ if ckan_version == 'master': ckan_version = 'latest' name, datadir, srcdir = task.new_environment_check(path, site_name, ckan_version) environment = cls(name, srcdir, datadir, site_name, ckan_version, **kwargs) environment._generate_passwords() return environment
Return a Environment object with settings for a new project. No directories or containers are created by this call. :params path: location for new project directory, may be relative :params ckan_version: release of CKAN to install :params site_name: The name of the site to install database and solr \ eventually. For additional keyword arguments see the __init__ method. Raises DatcatsError if directories or project with same name already exits.
entailment
def load(cls, environment_name=None, site_name='primary', data_only=False, allow_old=False): """ Return an Environment object based on an existing environnment+site. :param environment_name: exising environment name, path or None to look in current or parent directories for project :param data_only: set to True to only load from data dir, not the project dir; Used for purging environment data. :param allow_old: load a very minimal subset of what we usually load. This will only work for purging environment data on an old site. Raises DatacatsError if environment can't be found or if there is an error parsing the environment information. """ srcdir, extension_dir, datadir = task.find_environment_dirs( environment_name, data_only) if datadir and data_only: return cls(environment_name, None, datadir, site_name) (datadir, name, ckan_version, always_prod, deploy_target, remote_server_key, extra_containers) = task.load_environment(srcdir, datadir, allow_old) if not allow_old: (port, address, site_url, passwords) = task.load_site(srcdir, datadir, site_name) else: (port, address, site_url, passwords) = (None, None, None, None) environment = cls(name, srcdir, datadir, site_name, ckan_version=ckan_version, port=port, deploy_target=deploy_target, site_url=site_url, always_prod=always_prod, address=address, extension_dir=extension_dir, remote_server_key=remote_server_key, extra_containers=extra_containers) if passwords: environment.passwords = passwords else: environment._generate_passwords() if not allow_old: environment._load_sites() return environment
Return an Environment object based on an existing environnment+site. :param environment_name: exising environment name, path or None to look in current or parent directories for project :param data_only: set to True to only load from data dir, not the project dir; Used for purging environment data. :param allow_old: load a very minimal subset of what we usually load. This will only work for purging environment data on an old site. Raises DatacatsError if environment can't be found or if there is an error parsing the environment information.
entailment
def data_complete(self): """ Return True if all the expected datadir files are present """ return task.data_complete(self.datadir, self.sitedir, self._get_container_name)
Return True if all the expected datadir files are present
entailment
def require_data(self): """ raise a DatacatsError if the datadir or volumes are missing or damaged """ files = task.source_missing(self.target) if files: raise DatacatsError('Missing files in source directory:\n' + '\n'.join(files)) if not self.data_exists(): raise DatacatsError('Environment datadir missing. ' 'Try "datacats init".') if not self.data_complete(): raise DatacatsError('Environment datadir damaged or volumes ' 'missing. ' 'To reset and discard all data use ' '"datacats reset"')
raise a DatacatsError if the datadir or volumes are missing or damaged
entailment
def create_directories(self, create_project_dir=True): """ Call once for new projects to create the initial project directories. """ return task.create_directories(self.datadir, self.sitedir, self.target if create_project_dir else None)
Call once for new projects to create the initial project directories.
entailment
def create_virtualenv(self): """ Populate venv from preloaded image """ return task.create_virtualenv(self.target, self.datadir, self._preload_image(), self._get_container_name)
Populate venv from preloaded image
entailment
def clean_virtualenv(self): """ Empty our virtualenv so that new (or older) dependencies may be installed """ self.user_run_script( script=scripts.get_script_path('clean_virtualenv.sh'), args=[], rw_venv=True, )
Empty our virtualenv so that new (or older) dependencies may be installed
entailment
def create_source(self, datapusher=True): """ Populate ckan directory from preloaded image and copy who.ini and schema.xml info conf directory """ task.create_source(self.target, self._preload_image(), datapusher)
Populate ckan directory from preloaded image and copy who.ini and schema.xml info conf directory
entailment
def start_supporting_containers(self, log_syslog=False): """ Start all supporting containers (containers required for CKAN to operate) if they aren't already running. :param log_syslog: A flag to redirect all container logs to host's syslog """ log_syslog = True if self.always_prod else log_syslog # in production we always use log_syslog driver (to aggregate all the logs) task.start_supporting_containers( self.sitedir, self.target, self.passwords, self._get_container_name, self.extra_containers, log_syslog=log_syslog )
Start all supporting containers (containers required for CKAN to operate) if they aren't already running. :param log_syslog: A flag to redirect all container logs to host's syslog
entailment
def create_ckan_ini(self): """ Use make-config to generate an initial development.ini file """ self.run_command( command='/scripts/run_as_user.sh /usr/lib/ckan/bin/paster make-config' ' ckan /project/development.ini', rw_project=True, ro={scripts.get_script_path('run_as_user.sh'): '/scripts/run_as_user.sh'}, )
Use make-config to generate an initial development.ini file
entailment
def update_ckan_ini(self, skin=True): """ Use config-tool to update development.ini with our environment settings :param skin: use environment template skin plugin True/False """ command = [ '/usr/lib/ckan/bin/paster', '--plugin=ckan', 'config-tool', '/project/development.ini', '-e', 'sqlalchemy.url = postgresql://<hidden>', 'ckan.datastore.read_url = postgresql://<hidden>', 'ckan.datastore.write_url = postgresql://<hidden>', 'ckan.datapusher.url = http://datapusher:8800', 'solr_url = http://solr:8080/solr', 'ckan.storage_path = /var/www/storage', 'ckan.plugins = datastore resource_proxy text_view ' + ('datapusher ' if exists(self.target + '/datapusher') else '') + 'recline_grid_view recline_graph_view' + (' {0}_theme'.format(self.name) if skin else ''), 'ckan.site_title = ' + self.name, 'ckan.site_logo =', 'ckan.auth.create_user_via_web = false', ] self.run_command(command=command, rw_project=True)
Use config-tool to update development.ini with our environment settings :param skin: use environment template skin plugin True/False
entailment
def create_install_template_skin(self): """ Create an example ckan extension for this environment and install it """ ckan_extension_template(self.name, self.target) self.install_package_develop('ckanext-' + self.name + 'theme')
Create an example ckan extension for this environment and install it
entailment
def ckan_db_init(self, retry_seconds=DB_INIT_RETRY_SECONDS): """ Run db init to create all ckan tables :param retry_seconds: how long to retry waiting for db to start """ # XXX workaround for not knowing how long we need to wait # for postgres to be ready. fix this by changing the postgres # entrypoint, or possibly running once with command=/bin/true started = time.time() while True: try: self.run_command( '/usr/lib/ckan/bin/paster --plugin=ckan db init ' '-c /project/development.ini', db_links=True, clean_up=True, ) break except WebCommandError: if started + retry_seconds > time.time(): raise time.sleep(DB_INIT_RETRY_DELAY)
Run db init to create all ckan tables :param retry_seconds: how long to retry waiting for db to start
entailment
def start_ckan(self, production=False, log_syslog=False, paster_reload=True, interactive=False): """ Start the apache server or paster serve :param log_syslog: A flag to redirect all container logs to host's syslog :param production: True for apache, False for paster serve + debug on :param paster_reload: Instruct paster to watch for file changes """ self.stop_ckan() address = self.address or '127.0.0.1' port = self.port # in prod we always use log_syslog driver log_syslog = True if self.always_prod else log_syslog production = production or self.always_prod # We only override the site URL with the docker URL on three conditions override_site_url = (self.address is None and not is_boot2docker() and not self.site_url) command = ['/scripts/web.sh', str(production), str(override_site_url), str(paster_reload)] # XXX nasty hack, remove this once we have a lessc command # for users (not just for building our preload image) if not production: css = self.target + '/ckan/ckan/public/base/css' if not exists(css + '/main.debug.css'): from shutil import copyfile copyfile(css + '/main.css', css + '/main.debug.css') ro = { self.target: '/project', scripts.get_script_path('datapusher.sh'): '/scripts/datapusher.sh' } if not is_boot2docker(): ro[self.datadir + '/venv'] = '/usr/lib/ckan' datapusher = self.needs_datapusher() if datapusher: run_container( self._get_container_name('datapusher'), 'datacats/web', '/scripts/datapusher.sh', ro=ro, volumes_from=(self._get_container_name('venv') if is_boot2docker() else None), log_syslog=log_syslog) while True: self._create_run_ini(port, production) try: self._run_web_container(port, command, address, log_syslog=log_syslog, datapusher=datapusher, interactive=interactive) if not is_boot2docker(): self.address = address except PortAllocatedError: port = self._next_port(port) continue break
Start the apache server or paster serve :param log_syslog: A flag to redirect all container logs to host's syslog :param production: True for apache, False for paster serve + debug on :param paster_reload: Instruct paster to watch for file changes
entailment
def _create_run_ini(self, port, production, output='development.ini', source='development.ini', override_site_url=True): """ Create run/development.ini in datadir with debug and site_url overridden and with correct db passwords inserted """ cp = SafeConfigParser() try: cp.read([self.target + '/' + source]) except ConfigParserError: raise DatacatsError('Error reading development.ini') cp.set('DEFAULT', 'debug', 'false' if production else 'true') if self.site_url: site_url = self.site_url else: if is_boot2docker(): web_address = socket.gethostbyname(docker_host()) else: web_address = self.address site_url = 'http://{}:{}'.format(web_address, port) if override_site_url: cp.set('app:main', 'ckan.site_url', site_url) cp.set('app:main', 'sqlalchemy.url', 'postgresql://ckan:{0}@db:5432/ckan' .format(self.passwords['CKAN_PASSWORD'])) cp.set('app:main', 'ckan.datastore.read_url', 'postgresql://ckan_datastore_readonly:{0}@db:5432/ckan_datastore' .format(self.passwords['DATASTORE_RO_PASSWORD'])) cp.set('app:main', 'ckan.datastore.write_url', 'postgresql://ckan_datastore_readwrite:{0}@db:5432/ckan_datastore' .format(self.passwords['DATASTORE_RW_PASSWORD'])) cp.set('app:main', 'solr_url', 'http://solr:8080/solr') cp.set('app:main', 'ckan.redis.url', 'http://redis:6379') cp.set('app:main', 'beaker.session.secret', self.passwords['BEAKER_SESSION_SECRET']) if not isdir(self.sitedir + '/run'): makedirs(self.sitedir + '/run') # upgrade old datadir with open(self.sitedir + '/run/' + output, 'w') as runini: cp.write(runini)
Create run/development.ini in datadir with debug and site_url overridden and with correct db passwords inserted
entailment
def _run_web_container(self, port, command, address, log_syslog=False, datapusher=True, interactive=False): """ Start web container on port with command """ if is_boot2docker(): ro = {} volumes_from = self._get_container_name('venv') else: ro = {self.datadir + '/venv': '/usr/lib/ckan'} volumes_from = None links = { self._get_container_name('solr'): 'solr', self._get_container_name('postgres'): 'db' } links.update({self._get_container_name(container): container for container in self.extra_containers}) if datapusher: if 'datapusher' not in self.containers_running(): raise DatacatsError(container_logs(self._get_container_name('datapusher'), "all", False, False)) links[self._get_container_name('datapusher')] = 'datapusher' ro = dict({ self.target: '/project/', scripts.get_script_path('web.sh'): '/scripts/web.sh', scripts.get_script_path('adjust_devini.py'): '/scripts/adjust_devini.py'}, **ro) rw = { self.sitedir + '/files': '/var/www/storage', self.sitedir + '/run/development.ini': '/project/development.ini' } try: if not interactive: run_container( name=self._get_container_name('web'), image='datacats/web', rw=rw, ro=ro, links=links, volumes_from=volumes_from, command=command, port_bindings={ 5000: port if is_boot2docker() else (address, port)}, log_syslog=log_syslog ) else: # FIXME: share more code with interactive_shell if is_boot2docker(): switches = ['--volumes-from', self._get_container_name('pgdata'), '--volumes-from', self._get_container_name('venv')] else: switches = [] switches += ['--volume={}:{}:ro'.format(vol, ro[vol]) for vol in ro] switches += ['--volume={}:{}'.format(vol, rw[vol]) for vol in rw] links = ['--link={}:{}'.format(link, links[link]) for link in links] args = ['docker', 'run', '-it', '--name', self._get_container_name('web'), '-p', '{}:5000'.format(port) if is_boot2docker() else '{}:{}:5000'.format(address, port)] + \ switches + links + ['datacats/web', ] + command subprocess.call(args) except APIError as e: if '409' in str(e): raise DatacatsError('Web container already running. ' 'Please stop_web before running.') else: raise
Start web container on port with command
entailment
def wait_for_web_available(self): """ Wait for the web server to become available or raise DatacatsError if it fails to start. """ try: if not wait_for_service_available( self._get_container_name('web'), self.web_address(), WEB_START_TIMEOUT_SECONDS): raise DatacatsError('Error while starting web container:\n' + container_logs(self._get_container_name('web'), "all", False, None)) except ServiceTimeout: raise DatacatsError('Timeout while starting web container. Logs:' + container_logs(self._get_container_name('web'), "all", False, None))
Wait for the web server to become available or raise DatacatsError if it fails to start.
entailment
def _choose_port(self): """ Return a port number from 5000-5999 based on the environment name to be used as a default when the user hasn't selected one. """ # instead of random let's base it on the name chosen (and the site name) return 5000 + unpack('Q', sha((self.name + self.site_name) .decode('ascii')).digest()[:8])[0] % 1000
Return a port number from 5000-5999 based on the environment name to be used as a default when the user hasn't selected one.
entailment
def _next_port(self, port): """ Return another port from the 5000-5999 range """ port = 5000 + (port + 1) % 1000 if port == self.port: raise DatacatsError('Too many instances running') return port
Return another port from the 5000-5999 range
entailment
def stop_ckan(self): """ Stop and remove the web container """ remove_container(self._get_container_name('web'), force=True) remove_container(self._get_container_name('datapusher'), force=True)
Stop and remove the web container
entailment
def _current_web_port(self): """ return just the port number for the web container, or None if not running """ info = inspect_container(self._get_container_name('web')) if info is None: return None try: if not info['State']['Running']: return None return info['NetworkSettings']['Ports']['5000/tcp'][0]['HostPort'] except TypeError: return None
return just the port number for the web container, or None if not running
entailment
def add_extra_container(self, container, error_on_exists=False): """ Add a container as a 'extra'. These are running containers which are not necessary for running default CKAN but are useful for certain extensions :param container: The container name to add :param error_on_exists: Raise a DatacatsError if the extra container already exists. """ if container in self.extra_containers: if error_on_exists: raise DatacatsError('{} is already added as an extra container.'.format(container)) else: return self.extra_containers.append(container) cp = SafeConfigParser() cp.read(self.target + '/.datacats-environment') cp.set('datacats', 'extra_containers', ' '.join(self.extra_containers)) with open(self.target + '/.datacats-environment', 'w') as f: cp.write(f)
Add a container as a 'extra'. These are running containers which are not necessary for running default CKAN but are useful for certain extensions :param container: The container name to add :param error_on_exists: Raise a DatacatsError if the extra container already exists.
entailment
def web_address(self): """ Return the url of the web server or None if not running """ port = self._current_web_port() address = self.address or '127.0.0.1' if port is None: return None return 'http://{0}:{1}/'.format( address if address and not is_boot2docker() else docker_host(), port)
Return the url of the web server or None if not running
entailment
def create_admin_set_password(self, password): """ create 'admin' account with given password """ with open(self.sitedir + '/run/admin.json', 'w') as out: json.dump({ 'name': 'admin', 'email': 'none', 'password': password, 'sysadmin': True}, out) self.user_run_script( script=scripts.get_script_path('update_add_admin.sh'), args=[], db_links=True, ro={ self.sitedir + '/run/admin.json': '/input/admin.json' }, ) remove(self.sitedir + '/run/admin.json')
create 'admin' account with given password
entailment