_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q37800
WeatherReport.parse_xml_data
train
def parse_xml_data(self): """ Parses `xml_data` and loads it into object properties. """ self.raw_text = self.xml_data.find('raw_text').text self.station = WeatherStation(self.xml_data.find('station_id').text) self.station.latitude = float(self.xml_data.find('latitude').text) self.station.longitude = float(self.xml_data.find('longitude').text) self.station.elevation = float(self.xml_data.find('elevation_m').text) * 3.28084
python
{ "resource": "" }
q37801
WeatherReportSet.download_data
train
def download_data(self, mock_response=None): """ Loads XML data into the `xml_data` attribute. """ if mock_response is not None: body = mock_response else: api_url = self.get_api_url() body = urlopen(api_url).read() xml_root = ElementTree.fromstring(body) xml_warnings = xml_root.find('warnings') if len(xml_warnings.attrib) != 0: print("Data warnings found: %s" % xml_warnings.attrib) xml_errors = xml_root.find('errors') if len(xml_errors.attrib) != 0: raise Exception("Data errors found: %s" % xml_errors.attrib) self.xml_data = xml_root.find('data')
python
{ "resource": "" }
q37802
sequenceCategoryLengths
train
def sequenceCategoryLengths(read, categories, defaultCategory=None, suppressedCategory='...', minLength=1): """ Summarize the nucleotides or AAs found in a read by assigning each to a category and reporting the lengths of the contiguous category classes found along the sequence. @param read: A C{Read} instance or one of its subclasses. @param categories: A C{dict} mapping nucleotides or AAs to category. @param defaultCategory: The category to use if a sequence base is not in C{categories}. @param suppressedCategory: The category to use to indicate suppressed sequence regions (i.e., made up of stretches of bases that are less than C{minLength} in length). @param minLength: stretches of the read that are less than this C{int} length will be summed and reported as being in the C{suppressedCategory} category. @raise ValueError: If minLength is less than one. @return: A C{list} of 2-C{tuples}. Each tuple contains a (category, count). """ result = [] append = result.append get = categories.get first = True currentCategory = None currentCount = 0 suppressing = False suppressedCount = 0 if minLength < 1: raise ValueError('minLength must be at least 1') for base in read.sequence: thisCategory = get(base, defaultCategory) if first: first = False currentCategory = thisCategory currentCount += 1 else: if thisCategory == currentCategory: # This base is still in the same category as the last base. # Keep counting. currentCount += 1 else: # This is a new category. if currentCount < minLength: # The category region that was just seen will not be # emitted. if suppressing: # Already suppressing. Suppress the just-seen # region too. suppressedCount += currentCount else: # Start suppressing. suppressedCount = currentCount suppressing = True else: if suppressing: append((suppressedCategory, suppressedCount)) suppressedCount = 0 suppressing = False append((currentCategory, currentCount)) currentCategory = thisCategory currentCount = 1 if suppressing: append((suppressedCategory, suppressedCount + currentCount)) elif currentCount >= minLength: append((currentCategory, currentCount)) elif currentCount: append((suppressedCategory, currentCount)) return result
python
{ "resource": "" }
q37803
simplifyTitle
train
def simplifyTitle(title, target): """ Simplify a given sequence title. Given a title, look for the first occurrence of target anywhere in any of its words. Return a space-separated string of the words of the title up to and including the occurrence of the target. Ignore case. E.g., # Suffix simplifyTitle('Bovine polyomavirus DNA, complete genome', 'virus') -> 'Bovine polyomavirus' # Prefix simplifyTitle('California sea lion polyomavirus 1 CSL6994', 'polyoma') -> 'California sea lion polyoma' # Contained simplifyTitle('California sea lion polyomavirus 1 CSL6994', 'yoma') -> 'California sea lion polyoma' title: The string title of the sequence. target: The word in the title that we should stop at. """ targetLen = len(target) result = [] for word in title.split(): if len(word) >= targetLen: offset = word.lower().find(target.lower()) if offset > -1: result.append(word[:offset + targetLen]) break result.append(word) return ' '.join(result)
python
{ "resource": "" }
q37804
DUTer._enableTracesVerilog
train
def _enableTracesVerilog(self, verilogFile): ''' Enables traces in a Verilog file''' fname, _ = os.path.splitext(verilogFile) inserted = False for _, line in enumerate(fileinput.input(verilogFile, inplace = 1)): sys.stdout.write(line) if line.startswith("end") and not inserted: sys.stdout.write('\n\n') sys.stdout.write('initial begin\n') sys.stdout.write(' $dumpfile("{}_cosim.vcd");\n'.format(fname)) sys.stdout.write(' $dumpvars(0, dut);\n') sys.stdout.write('end\n\n') inserted = True
python
{ "resource": "" }
q37805
Scheduler.add_task
train
def add_task(self, task): """ Add a task to the scheduler. task: The task to add. """ if not self._valid_name(task.name): raise ValueError(task.name) self._tasks[task.name] = task incomplete_dependencies = set() for dependency in task.dependencies: if not self._valid_name(dependency) or dependency in self._failed: # there may already be tasks dependent on this one. self._cascade_failure(task.name) break if dependency not in self._completed: incomplete_dependencies.add(dependency) else: # task hasn't failed try: self._graph.add(task.name, incomplete_dependencies) except ValueError: self._cascade_failure(task.name)
python
{ "resource": "" }
q37806
Scheduler.end_task
train
def end_task(self, name, success=True): """ End a running task. Raises an exception if the task isn't running. name: The name of the task to complete. success: (optional, True) Whether the task was successful. """ self._running.remove(name) if success: self._completed.add(name) self._graph.remove(name, strategy=Strategy.orphan) else: self._cascade_failure(name)
python
{ "resource": "" }
q37807
main
train
def main(gi, ranges): """ Print the features of the genbank entry given by gi. If ranges is non-emtpy, only print features that include the ranges. gi: either a hit from a BLAST record, in the form 'gi|63148399|gb|DQ011818.1|' or a gi number (63148399 in this example). ranges: a possibly empty list of ranges to print information for. Each range is a non-descending (start, end) pair of integers. """ # TODO: Make it so we can pass a 'db' argument to getSequence. record = getSequence(gi) if record is None: print("Looks like you're offline.") sys.exit(3) else: printed = set() if ranges: for (start, end) in ranges: for index, feature in enumerate(record.features): if (start < int(feature.location.end) and end > int(feature.location.start) and index not in printed): print(feature) printed.add(index) else: # Print all features. for feature in record.features: print(feature)
python
{ "resource": "" }
q37808
create_instance
train
def create_instance(credentials, project, zone, name, startup_script=None, startup_script_url=None, metadata=None, machine_type='f1-micro', tags=None, disk_size_gb=10, wait_until_done=False): """Create instance with startup script. TODO: docstring""" if startup_script is not None and startup_script_url is not None: raise ValueError('Cannot specify a startup script string and URL ' 'at the same time!') access_token = credentials.get_access_token() if metadata is None: metadata = {} meta_items = [{'key': k, 'value': v} for k, v in metadata.items()] if tags is None: tags = [] if startup_script is not None: meta_items.insert( 0, {'key': 'startup-script', 'value': startup_script} ) elif startup_script_url is not None: meta_items.insert( 0, {'key': 'startup-script-url', 'value': startup_script_url}) payload = { "name": name, "zone": "projects/%s/zones/%s" % (project, zone), "machineType": "projects/%s/zones/%s/machineTypes/%s" % (project, zone, machine_type), "metadata": { "items": meta_items }, "tags": { "items": tags }, "disks": [ { "type": "PERSISTENT", "boot": True, "mode": "READ_WRITE", "autoDelete": True, "deviceName": name, "initializeParams": { "sourceImage": "projects/ubuntu-os-cloud/global/images/ubuntu-1604-xenial-v20170815a", "diskType": "projects/%s/zones/%s/diskTypes/pd-standard" % (project, zone), "diskSizeGb": str(disk_size_gb) } } ], "canIpForward": False, "networkInterfaces": [ { "network": "projects/%s/global/networks/default" % project, "subnetwork": "projects/%s/regions/%s/subnetworks/default" % (project, zone[:-2]), "accessConfigs": [ { "name": "External NAT", "type": "ONE_TO_ONE_NAT" } ] } ], "description": "", "scheduling": { "preemptible": False, "onHostMaintenance": "MIGRATE", "automaticRestart": True }, "serviceAccounts": [ { "email": "default", "scopes": [ 'https://www.googleapis.com/auth/compute', "https://www.googleapis.com/auth/devstorage.read_write", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/monitoring.write", "https://www.googleapis.com/auth/servicecontrol", "https://www.googleapis.com/auth/service.management.readonly", "https://www.googleapis.com/auth/trace.append" ] } ] } #header = 'Authorization: Bearer 1/fFBGRNJru1FQd44AzqT3Zg' headers = { 'Authorization': 'Bearer %s' % access_token.access_token } #print('Test:', json.dumps(payload, indent=4, sort_keys=True)) _LOGGER.debug('Access token: %s' % access_token.access_token) _LOGGER.debug('Payload: %s', json.dumps(payload, sort_keys=True, indent=4)) r = requests.post('https://www.googleapis.com/compute/v1/' 'projects/%s/zones/%s/instances' % (project, zone), headers=headers, json=payload) r.raise_for_status() op_name = r.json()['name'] _LOGGER.info('Submitted request to create intsance ' '(HTTP code: %d).', r.status_code) if wait_until_done: _LOGGER.info('Waiting until operation is done...') wait_for_zone_op(access_token, project, zone, op_name) return op_name
python
{ "resource": "" }
q37809
SAMFilter.referenceLengths
train
def referenceLengths(self): """ Get the lengths of wanted references. @raise UnknownReference: If a reference id is not present in the SAM/BAM file. @return: A C{dict} of C{str} reference id to C{int} length with a key for each reference id in C{self.referenceIds} or for all references if C{self.referenceIds} is C{None}. """ result = {} with samfile(self.filename) as sam: if self.referenceIds: for referenceId in self.referenceIds: tid = sam.get_tid(referenceId) if tid == -1: raise UnknownReference( 'Reference %r is not present in the SAM/BAM file.' % referenceId) else: result[referenceId] = sam.lengths[tid] else: result = dict(zip(sam.references, sam.lengths)) return result
python
{ "resource": "" }
q37810
main
train
def main(args=None): """Download all .sra from NCBI SRA for a given experiment ID. Parameters ---------- args: argparse.Namespace object, optional The argument values. If not specified, the values will be obtained by parsing the command line arguments using the `argparse` module. Returns ------- int Exit code (0 if no error occurred). """ if args is None: # parse command-line arguments parser = get_argument_parser() args = parser.parse_args() experiment_file = args.experiment_file output_file = args.output_file # log_file = args.log_file # quiet = args.quiet # verbose = args.verbose # logger = misc.get_logger(log_file=log_file, quiet=quiet, # verbose=verbose) host = 'ftp-trace.ncbi.nlm.nih.gov' user = 'anonymous' password = 'anonymous' # output_dir = download_dir + experiment_id + '/' # make sure output directory exists # misc.make_sure_dir_exists(output_dir) # logger.info('Created output directory: "%s".', output_dir) experiments = misc.read_single(experiment_file) runs = [] with ftputil.FTPHost(host, user, password) as ftp_host: for exp in experiments: exp_dir = '/sra/sra-instant/reads/ByExp/sra/SRX/%s/%s/' \ % (exp[:6], exp) ftp_host.chdir(exp_dir) run_folders = ftp_host.listdir(ftp_host.curdir) # logging.info('Found %d run folders.',len(run_folders)) for folder in run_folders: files = ftp_host.listdir(folder) assert len(files) == 1 runs.append((exp, folder)) with open(output_file, 'wb') as ofh: writer = csv.writer(ofh, dialect='excel-tab', lineterminator=os.linesep, quoting=csv.QUOTE_NONE) for r in runs: writer.writerow(r) return 0
python
{ "resource": "" }
q37811
QueryManager.version
train
def version(self): """Version of UniPort knowledgebase :returns: dictionary with version info :rtype: dict """ return [x for x in self.session.query(models.Version).all()]
python
{ "resource": "" }
q37812
IxePort.write
train
def write(self): """ Write configuration to chassis. Raise StreamWarningsError if configuration warnings found. """ self.ix_command('write') stream_warnings = self.streamRegion.generateWarningList() warnings_list = (self.api.call('join ' + ' {' + stream_warnings + '} ' + ' LiStSeP').split('LiStSeP') if self.streamRegion.generateWarningList() else []) for warning in warnings_list: if warning: raise StreamWarningsError(warning)
python
{ "resource": "" }
q37813
IxePort.load_config
train
def load_config(self, config_file_name): """ Load configuration file from prt or str. Configuration file type is extracted from the file suffix - prt or str. :param config_file_name: full path to the configuration file. IxTclServer must have access to the file location. either: The config file is on shared folder. IxTclServer run on the client machine. """ config_file_name = config_file_name.replace('\\', '/') ext = path.splitext(config_file_name)[-1].lower() if ext == '.prt': self.api.call_rc('port import "{}" {}'.format(config_file_name, self.uri)) elif ext == '.str': self.reset() self.api.call_rc('stream import "{}" {}'.format(config_file_name, self.uri)) else: raise ValueError('Configuration file type {} not supported.'.format(ext)) self.write() self.discover()
python
{ "resource": "" }
q37814
IxePort.save_config
train
def save_config(self, config_file_name): """ Save configuration file from prt or str. Configuration file type is extracted from the file suffix - prt or str. :param config_file_name: full path to the configuration file. IxTclServer must have access to the file location. either: The config file is on shared folder. IxTclServer run on the client machine. """ config_file_name = config_file_name.replace('\\', '/') ext = path.splitext(config_file_name)[-1].lower() if ext == '.prt': self.api.call_rc('port export "{}" {}'.format(config_file_name, self.uri)) elif ext == '.str': # self.reset() self.api.call_rc('stream export "{}" {}'.format(config_file_name, self.uri)) else: raise ValueError('Configuration file type {} not supported.'.format(ext))
python
{ "resource": "" }
q37815
IxePort.start_transmit
train
def start_transmit(self, blocking=False): """ Start transmit on port. :param blocking: True - wait for traffic end, False - return after traffic start. """ self.session.start_transmit(blocking, False, self)
python
{ "resource": "" }
q37816
IxePort.stop_capture
train
def stop_capture(self, cap_file_name=None, cap_file_format=IxeCapFileFormat.mem): """ Stop capture on port. :param cap_file_name: prefix for the capture file name. Capture file will be saved as pcap file named 'prefix' + 'URI'.pcap. :param cap_file_format: exported file format :return: number of captured frames """ return self.session.stop_capture(cap_file_name, cap_file_format, self)[self]
python
{ "resource": "" }
q37817
IxePort.set_transmit_mode
train
def set_transmit_mode(self, mode): """ set port transmit mode :param mode: request transmit mode :type mode: ixexplorer.ixe_port.IxeTransmitMode """ self.api.call_rc('port setTransmitMode {} {}'.format(mode, self.uri))
python
{ "resource": "" }
q37818
get_template_context
train
def get_template_context(src, container="div", classes="", inner_classes="", alt="", background_image=False, no_css=False, aria_hidden=False): """Returns a template context for a flexible image template tag implementation.""" context = { "container": container, "classes": classes, "aspect_padding_bottom": aspect_ratio_percent(src), "alt": alt, "background_image": background_image, "no_css": no_css, "inner_classes": inner_classes, "aria_hidden": aria_hidden, } # We can't do any of the srcset (or JS switching fallback) if we don't # have a thumbnail library installed. if not get_thumbnail_engine(): context["image"] = src return context sizes = get_image_sizes(src) context["image_sizes"] = sizes # Set the first image in the list as the one to be rendered initially # (pre-JS-fallback). `if sizes` might not be a necessary check... context["image"] = sizes[0] context["image_sizes_json"] = json.dumps(sizes) srcset_items = ["{} {}w".format(size["url"], size["width"]) for size in sizes] context["image_sizes_srcset"] = ", ".join(srcset_items) return context
python
{ "resource": "" }
q37819
CrossCorr.fit
train
def fit(self, images, reference=None): """ Estimate registration model using cross-correlation. Use cross correlation to compute displacements between images or volumes and reference. Displacements will be 2D for images and 3D for volumes. Parameters ---------- images : array-like or thunder images The sequence of images / volumes to register. reference : array-like A reference image to align to. """ images = check_images(images) reference = check_reference(images, reference) def func(item): key, image = item return asarray([key, self._get(image, reference)]) transformations = images.map(func, with_keys=True).toarray() if images.shape[0] == 1: transformations = [transformations] algorithm = self.__class__.__name__ return RegistrationModel(dict(transformations), algorithm=algorithm)
python
{ "resource": "" }
q37820
CrossCorr.fit_and_transform
train
def fit_and_transform(self, images, reference=None): """ Estimate and apply registration model using cross-correlation. Use cross correlation to compute displacements between images or volumes and reference, and apply the estimated model to the data. Displacements will be 2D for images and 3D for volumes. Parameters ---------- images : array-like or thunder images The sequence of images / volumes to register. reference : array-like A reference image to align to. """ images = check_images(images) check_reference(images, reference) def func(image): t = self._get(image, reference) return t.apply(image) return images.map(func)
python
{ "resource": "" }
q37821
get_settings_from_environment
train
def get_settings_from_environment(environ): '''Deduce settings from environment variables''' settings = {} for name, value in environ.items(): if not name.startswith('DJANGO_'): continue name = name.replace('DJANGO_', '', 1) if _ignore_setting(name): continue try: settings[name] = ast.literal_eval(value) except (SyntaxError, ValueError) as err: LOGGER.warn("Unable to parse setting %s=%s (%s)", name, value, err) return settings
python
{ "resource": "" }
q37822
filter_variance
train
def filter_variance(matrix, top): """Filter genes in an expression matrix by variance. Parameters ---------- matrix: ExpMatrix The expression matrix. top: int The number of genes to retain. Returns ------- ExpMatrix The filtered expression matrix. """ assert isinstance(matrix, ExpMatrix) assert isinstance(top, (int, np.integer)) if top >= matrix.p: logger.warning('Variance filter has no effect ' '("top" parameter is >= number of genes).') return matrix.copy() var = np.var(matrix.X, axis=1, ddof=1) total_var = np.sum(var) # total sum of variance a = np.argsort(var) a = a[::-1] sel = np.zeros(matrix.p, dtype=np.bool_) sel[a[:top]] = True lost_p = matrix.p - top lost_var = total_var - np.sum(var[sel]) logger.info('Selected the %d most variable genes ' '(excluded %.1f%% of genes, representing %.1f%% ' 'of total variance).', top, 100 * (lost_p / float(matrix.p)), 100 * (lost_var / total_var)) matrix = matrix.loc[sel] return matrix
python
{ "resource": "" }
q37823
filter_mean
train
def filter_mean(matrix, top): """Filter genes in an expression matrix by mean expression. Parameters ---------- matrix: ExpMatrix The expression matrix. top: int The number of genes to retain. Returns ------- ExpMatrix The filtered expression matrix. """ assert isinstance(matrix, ExpMatrix) assert isinstance(top, int) if top >= matrix.p: logger.warning('Gene expression filter with `top` parameter that is ' '>= the number of genes!') top = matrix.p a = np.argsort(np.mean(matrix.X, axis=1)) a = a[::-1] sel = np.zeros(matrix.p, dtype=np.bool_) sel[a[:top]] = True matrix = matrix.loc[sel] return matrix
python
{ "resource": "" }
q37824
filter_percentile
train
def filter_percentile(matrix, top, percentile=50): """Filter genes in an expression matrix by percentile expression. Parameters ---------- matrix: ExpMatrix The expression matrix. top: int The number of genes to retain. percentile: int or float, optinonal The percentile to use Defaults to the median (50th percentile). Returns ------- ExpMatrix The filtered expression matrix. """ assert isinstance(matrix, ExpMatrix) assert isinstance(top, int) assert isinstance(percentile, (int, float)) if top >= matrix.p: logger.warning('Gene expression filter with `top` parameter that is ' ' >= the number of genes!') top = matrix.p a = np.argsort(np.percentile(matrix.X, percentile, axis=1)) a = a[::-1] sel = np.zeros(matrix.p, dtype=np.bool_) sel[a[:top]] = True matrix = matrix.loc[sel] return matrix
python
{ "resource": "" }
q37825
_transform_chrom
train
def _transform_chrom(chrom): """Helper function to obtain specific sort order.""" try: c = int(chrom) except: if chrom in ['X', 'Y']: return chrom elif chrom == 'MT': return '_MT' # sort to the end else: return '__' + chrom # sort to the very end else: # make sure numbered chromosomes are sorted numerically return '%02d' % c
python
{ "resource": "" }
q37826
get_chromosome_lengths
train
def get_chromosome_lengths(fasta_file, fancy_sort=True): """Extract chromosome lengths from genome FASTA file.""" chromlen = [] with gzip.open(fasta_file, 'rt', encoding='ascii') as fh: fasta = SeqIO.parse(fh, 'fasta') for i, f in enumerate(fasta): chromlen.append((f.id, len(f.seq))) _LOGGER.info('Processed chromosome "%s"...', f.id) #print(dir(f)) #if i == 1: break # convert to pandas Series chromlen = pd.Series(OrderedDict(chromlen)) chromlen.index.name = 'Chromosome' chromlen.name = 'Length' if fancy_sort: # sort using fancy ordering chrom_for_sorting = chromlen.index.to_series().apply(_transform_chrom) a = chrom_for_sorting.argsort(kind='mergesort') chromlen = chromlen.iloc[a] return chromlen
python
{ "resource": "" }
q37827
resolve_schema
train
def resolve_schema(schema): """Transform JSON schemas "allOf". This is the default schema resolver. This function was created because some javascript JSON Schema libraries don't support "allOf". We recommend to use this function only in this specific case. This function is transforming the JSON Schema by removing "allOf" keywords. It recursively merges the sub-schemas as dictionaries. The process is completely custom and works only for simple JSON Schemas which use basic types (object, string, number, ...). Optional structures like "schema dependencies" or "oneOf" keywords are not supported. :param dict schema: the schema to resolve. :returns: the resolved schema .. note:: The schema should have the ``$ref`` already resolved before running this method. """ def traverse(schema): if isinstance(schema, dict): if 'allOf' in schema: for x in schema['allOf']: sub_schema = x sub_schema.pop('title', None) schema = _merge_dicts(schema, sub_schema) schema.pop('allOf') schema = traverse(schema) elif 'properties' in schema: for x in schema.get('properties', []): schema['properties'][x] = traverse( schema['properties'][x]) elif 'items' in schema: schema['items'] = traverse(schema['items']) return schema return traverse(schema)
python
{ "resource": "" }
q37828
_merge_dicts
train
def _merge_dicts(first, second): """Merge the 'second' multiple-dictionary into the 'first' one.""" new = deepcopy(first) for k, v in second.items(): if isinstance(v, dict) and v: ret = _merge_dicts(new.get(k, dict()), v) new[k] = ret else: new[k] = second[k] return new
python
{ "resource": "" }
q37829
read_colorscale
train
def read_colorscale(cmap_file): """Return a colorscale in the format expected by plotly. Parameters ---------- cmap_file : str Path of a plain-text file containing the colorscale. Returns ------- list The colorscale. Notes ----- A plotly colorscale is a list where each item is a pair (i.e., a list with two elements) consisting of a decimal number x between 0 and 1 and a corresponding "rgb(r,g,b)" string, where r, g, and b are integers between 0 and 255. The `cmap_file` is a tab-separated text file containing four columns (x,r,g,b), so that each row corresponds to an entry in the list described above. """ assert isinstance(cmap_file, str) cm = np.loadtxt(cmap_file, delimiter='\t', dtype=np.float64) # x = cm[:, 0] rgb = np.int64(cm[:, 1:]) # normalize to 0-1? n = cm.shape[0] colorscale = [] for i in range(n): colorscale.append( [i / float(n - 1), 'rgb(%d, %d, %d)' % (rgb[i, 0], rgb[i, 1], rgb[i, 2])] ) return colorscale
python
{ "resource": "" }
q37830
make_router
train
def make_router(): """Return a WSGI application that searches requests to controllers """ global router routings = [ ('GET', '^/$', index), ('GET', '^/api/?$', index), ('POST', '^/api/1/calculate/?$', calculate.api1_calculate), ('GET', '^/api/2/entities/?$', entities.api2_entities), ('GET', '^/api/1/field/?$', field.api1_field), ('GET', '^/api/1/formula/(?P<name>[^/]+)/?$', formula.api1_formula), ('GET', '^/api/2/formula/(?:(?P<period>[A-Za-z0-9:-]*)/)?(?P<names>[A-Za-z0-9_+-]+)/?$', formula.api2_formula), ('GET', '^/api/1/parameters/?$', parameters.api1_parameters), ('GET', '^/api/1/reforms/?$', reforms.api1_reforms), ('POST', '^/api/1/simulate/?$', simulate.api1_simulate), ('GET', '^/api/1/swagger$', swagger.api1_swagger), ('GET', '^/api/1/variables/?$', variables.api1_variables), ] router = urls.make_router(*routings) return router
python
{ "resource": "" }
q37831
InvenioJSONSchemasState.register_schemas_dir
train
def register_schemas_dir(self, directory): """Recursively register all json-schemas in a directory. :param directory: directory path. """ for root, dirs, files in os.walk(directory): dir_path = os.path.relpath(root, directory) if dir_path == '.': dir_path = '' for file_ in files: if file_.lower().endswith(('.json')): schema_name = os.path.join(dir_path, file_) if schema_name in self.schemas: raise JSONSchemaDuplicate( schema_name, self.schemas[schema_name], directory ) self.schemas[schema_name] = os.path.abspath(directory)
python
{ "resource": "" }
q37832
InvenioJSONSchemasState.register_schema
train
def register_schema(self, directory, path): """Register a json-schema. :param directory: root directory path. :param path: schema path, relative to the root directory. """ self.schemas[path] = os.path.abspath(directory)
python
{ "resource": "" }
q37833
InvenioJSONSchemasState.get_schema_dir
train
def get_schema_dir(self, path): """Retrieve the directory containing the given schema. :param path: Schema path, relative to the directory where it was registered. :raises invenio_jsonschemas.errors.JSONSchemaNotFound: If no schema was found in the specified path. :returns: The schema directory. """ if path not in self.schemas: raise JSONSchemaNotFound(path) return self.schemas[path]
python
{ "resource": "" }
q37834
InvenioJSONSchemasState.get_schema_path
train
def get_schema_path(self, path): """Compute the schema's absolute path from a schema relative path. :param path: relative path of the schema. :raises invenio_jsonschemas.errors.JSONSchemaNotFound: If no schema was found in the specified path. :returns: The absolute path. """ if path not in self.schemas: raise JSONSchemaNotFound(path) return os.path.join(self.schemas[path], path)
python
{ "resource": "" }
q37835
InvenioJSONSchemasState.get_schema
train
def get_schema(self, path, with_refs=False, resolved=False): """Retrieve a schema. :param path: schema's relative path. :param with_refs: replace $refs in the schema. :param resolved: resolve schema using the resolver :py:const:`invenio_jsonschemas.config.JSONSCHEMAS_RESOLVER_CLS` :raises invenio_jsonschemas.errors.JSONSchemaNotFound: If no schema was found in the specified path. :returns: The schema in a dictionary form. """ if path not in self.schemas: raise JSONSchemaNotFound(path) with open(os.path.join(self.schemas[path], path)) as file_: schema = json.load(file_) if with_refs: schema = JsonRef.replace_refs( schema, base_uri=request.base_url, loader=self.loader_cls() if self.loader_cls else None, ) if resolved: schema = self.resolver_cls(schema) return schema
python
{ "resource": "" }
q37836
InvenioJSONSchemasState.url_to_path
train
def url_to_path(self, url): """Convert schema URL to path. :param url: The schema URL. :returns: The schema path or ``None`` if the schema can't be resolved. """ parts = urlsplit(url) try: loader, args = self.url_map.bind(parts.netloc).match(parts.path) path = args.get('path') if loader == 'schema' and path in self.schemas: return path except HTTPException: return None
python
{ "resource": "" }
q37837
InvenioJSONSchemasState.path_to_url
train
def path_to_url(self, path): """Build URL from a path. :param path: relative path of the schema. :returns: The schema complete URL or ``None`` if not found. """ if path not in self.schemas: return None return self.url_map.bind( self.app.config['JSONSCHEMAS_HOST'], url_scheme=self.app.config['JSONSCHEMAS_URL_SCHEME'] ).build( 'schema', values={'path': path}, force_external=True)
python
{ "resource": "" }
q37838
InvenioJSONSchemasState.loader_cls
train
def loader_cls(self): """Loader class used in `JsonRef.replace_refs`.""" cls = self.app.config['JSONSCHEMAS_LOADER_CLS'] if isinstance(cls, six.string_types): return import_string(cls) return cls
python
{ "resource": "" }
q37839
scatterAlign
train
def scatterAlign(seq1, seq2, window=7): """ Visually align two sequences. """ d1 = defaultdict(list) d2 = defaultdict(list) for (seq, section_dict) in [(seq1, d1), (seq2, d2)]: for i in range(len(seq) - window): section = seq[i:i + window] section_dict[section].append(i) matches = set(d1).intersection(d2) print('%i unique matches' % len(matches)) x = [] y = [] for section in matches: for i in d1[section]: for j in d2[section]: x.append(i) y.append(j) # plt.cla() # clear any prior graph plt.gray() plt.scatter(x, y) plt.xlim(0, len(seq1) - window) plt.ylim(0, len(seq2) - window) plt.xlabel('length %i bp' % (len(seq1))) plt.ylabel('length %i bp' % (len(seq2))) plt.title('Dot plot using window size %i\n(allowing no mis-matches)' % window) plt.show()
python
{ "resource": "" }
q37840
plotAAProperties
train
def plotAAProperties(sequence, propertyNames, showLines=True, showFigure=True): """ Plot amino acid property values for a sequence. @param sequence: An C{AARead} (or a subclass) instance. @param propertyNames: An iterable of C{str} property names (each of which must be a key of a key in the C{dark.aa.PROPERTY_DETAILS} C{dict}). @param showLines: If C{True}, lines will be drawn between successive AA property values. If not, just the values will be plotted as a scatter plot (this greatly reduces visual clutter if the sequence is long and AA property values are variable). @param showFigure: If C{True}, display the plot. Passing C{False} is useful in testing. @raise ValueError: If an unknown property is given in C{propertyNames}. @return: The return value from calling dark.aa.propertiesForSequence: a C{dict} keyed by (lowercase) property name, with values that are C{list}s of the corresponding property value according to sequence position. """ MISSING_AA_VALUE = -1.1 propertyValues = propertiesForSequence(sequence, propertyNames, missingAAValue=MISSING_AA_VALUE) if showFigure: legend = [] x = np.arange(0, len(sequence)) plot = plt.plot if showLines else plt.scatter for index, propertyName in enumerate(propertyValues): color = TABLEAU20[index] plot(x, propertyValues[propertyName], color=color) legend.append(patches.Patch(color=color, label=propertyName)) plt.legend(handles=legend, loc=(0, 1.1)) plt.xlim(-0.2, len(sequence) - 0.8) plt.ylim(min(MISSING_AA_VALUE, -1.1), 1.1) plt.xlabel('Sequence index') plt.ylabel('Property value') plt.title(sequence.id) plt.show() return propertyValues
python
{ "resource": "" }
q37841
plotAAClusters
train
def plotAAClusters(sequence, propertyNames, showLines=True, showFigure=True): """ Plot amino acid property cluster numbers for a sequence. @param sequence: An C{AARead} (or a subclass) instance. @param propertyNames: An iterable of C{str} property names (each of which must be a key of a key in the C{dark.aa.PROPERTY_CLUSTERS} C{dict}). @param showLines: If C{True}, lines will be drawn between successive AA property values. If not, just the values will be plotted as a scatter plot (this greatly reduces visual clutter if the sequence is long and AA property values are variable). @param showFigure: If C{True}, display the plot. Passing C{False} is useful in testing. @raise ValueError: If an unknown property is given in C{propertyNames}. @return: The return value from calling dark.aa.clustersForSequence: a C{dict} keyed by (lowercase) property name, with values that are C{list}s of the corresponding property value according to sequence position. """ MISSING_AA_VALUE = 0 propertyClusters = clustersForSequence(sequence, propertyNames, missingAAValue=MISSING_AA_VALUE) if showFigure: minCluster = 1 maxCluster = -1 legend = [] x = np.arange(0, len(sequence)) plot = plt.plot if showLines else plt.scatter for index, propertyName in enumerate(propertyClusters): color = TABLEAU20[index] clusterNumbers = propertyClusters[propertyName] plot(x, clusterNumbers, color=color) legend.append(patches.Patch(color=color, label=propertyName)) propertyMinCluster = min(clusterNumbers) if propertyMinCluster < minCluster: minCluster = propertyMinCluster propertyMaxCluster = max(clusterNumbers) if propertyMaxCluster > maxCluster: maxCluster = propertyMaxCluster plt.legend(handles=legend, loc=(0, 1.1)) plt.xlim(-0.2, len(sequence) - 0.8) plt.ylim(minCluster - 0.5, maxCluster + 0.5) plt.yticks(range(maxCluster + 1)) plt.xlabel('Sequence index') plt.ylabel('Property cluster number') plt.title(sequence.id) plt.show() return propertyClusters
python
{ "resource": "" }
q37842
task_loop
train
def task_loop(tasks, execute, wait=None, store=TaskStore()): """ The inner task loop for a task runner. execute: A function that runs a task. It should take a task as its sole argument, and may optionally return a TaskResult. wait: (optional, None) A function to run whenever there aren't any runnable tasks (but there are still tasks listed as running). If given, this function should take no arguments, and should return an iterable of TaskResults. """ completed = set() failed = set() exceptions = [] def collect(task): args = [] kwargs = {} for arg in task.args: if isinstance(arg, Task): args.append(store.get(arg.name)) else: args.append(arg) for key in task.kwargs: if isinstance(task.kwargs[key], Task): kwargs[key] = store.get(task.kwargs[key].name) else: kwargs[key] = task.kwargs[key] return args, kwargs def complete(scheduler, result): store.put(result.name, result.data) scheduler.end_task(result.name, result.successful) if result.exception: exceptions.append(result.exception) with Scheduler(tasks, completed=completed, failed=failed) as scheduler: while not scheduler.is_finished(): task = scheduler.start_task() while task is not None: # Collect any dependent results args, kwargs = collect(task) func = partial(task.function, *args, **kwargs) if task.handler: func = partial(task.handler, func) result = execute(func, task.name) # result exists iff execute is synchroous if result: complete(scheduler, result) task = scheduler.start_task() if wait: for result in wait(): complete(scheduler, result) # TODO: if in debug mode print out all failed tasks? return Results(completed, failed, exceptions)
python
{ "resource": "" }
q37843
findOrDie
train
def findOrDie(s): """ Look up an amino acid. @param s: A C{str} amino acid specifier. This may be a full name, a 3-letter abbreviation or a 1-letter abbreviation. Case is ignored. @return: An C{AminoAcid} instance, if one can be found. Else exit. """ aa = find(s) if aa: return aa else: print('Unknown amino acid or codon: %s' % s, file=sys.stderr) print('Valid arguments are: %s.' % list(CODONS.keys()), file=sys.stderr) sys.exit(1)
python
{ "resource": "" }
q37844
ftp_download
train
def ftp_download(url, download_file, if_exists='error', user_name='anonymous', password='', blocksize=4194304): """Downloads a file from an FTP server. Parameters ---------- url : str The URL of the file to download. download_file : str The path of the local file to download to. if_exists : str, optional Desired behavior when the download file already exists. One of: 'error' - Raise an OSError 'skip' - Do nothing, only report a warning. 'overwrite' - Overwrite the file. reporting a warning. Default: 'error'. user_name : str, optional The user name to use for logging into the FTP server. ['anonymous'] password : str, optional The password to use for logging into the FTP server. [''] blocksize : int, optional The blocksize (in bytes) to use for downloading. [4194304] Returns ------- None """ assert isinstance(url, (str, _oldstr)) assert isinstance(download_file, (str, _oldstr)) assert isinstance(if_exists, (str, _oldstr)) assert isinstance(user_name, (str, _oldstr)) assert isinstance(password, (str, _oldstr)) u = urlparse.urlparse(url) assert u.scheme == 'ftp' if if_exists not in ['error', 'skip', 'overwrite']: raise ValueError('"if_exists" must be "error", "skip", or "overwrite" ' '(was: "%s").', str(if_exists)) if os.path.isfile(download_file): if if_exists == 'error': raise OSError('File "%s" already exists.' % download_file) elif if_exists == 'skip': _logger.warning('File "%s" already exists. Skipping...', download_file) return else: _logger.warning('Overwriting file "%s"...', download_file) ftp_server = u.netloc ftp_path = u.path if six.PY3: with ftplib.FTP(ftp_server) as ftp: ftp.login(user_name, password) with open(download_file, 'wb') as ofh: ftp.retrbinary('RETR %s' % ftp_path, callback=ofh.write, blocksize=blocksize) else: ftp = ftplib.FTP(ftp_server) ftp.login(user_name, password) with open(download_file, 'wb') as ofh: ftp.retrbinary('RETR %s' % ftp_path, callback=ofh.write, blocksize=blocksize) ftp.close() _logger.info('Downloaded file "%s" over FTP.', download_file)
python
{ "resource": "" }
q37845
get_cdna_url
train
def get_cdna_url(species, release=None, ftp=None): """Returns the URL for a cDNA file hosted on the Ensembl FTP server. Parameters ---------- species: str The scientific name of the species. It should be all lower-case, and the genus and species parts should be separated by an underscore (e.g., "homo_sapiens"). release: int or ``None``, optional The Ensembl release number. If ``None``, the latest release is used. [None] ftp: ftplib.FTP or ``None``, optional The FTP connection. If ``None``, create a new connection. [None] """ #species_list, release=None, ftp=None # type checks assert isinstance(species, (str, _oldstr)) if release is not None: assert isinstance(release, int) if ftp is not None: assert isinstance(ftp, ftplib.FTP) # open FTP connection, if necessary close_connection = False if ftp is None: ftp_server = 'ftp.ensembl.org' ftp_user = 'anonymous' ftp = ftplib.FTP(ftp_server) ftp.login(ftp_user) close_connection = True # determine latest release, if necessary if release is None: release = util.get_latest_release(ftp=ftp) # check if species exists fasta_dir = '/pub/release-%d/fasta' % release ftp.cwd(fasta_dir) if not species in ftp.nlst(): logger.error('Species "%s" not found on Ensembl FTP server.', species) fasta_url = 'ftp://%s%s' %(ftp_server, fasta_dir) raise ValueError('Species "%s" not found. ' 'See %s for a list of species available.' % (species, fasta_url)) # determine URL of the cdna file # (file names are not consistent across species) cdna_dir = '/pub/release-%d/fasta/%s/cdna' %(release, species) ftp.cwd(cdna_dir) files = ftp.nlst() cdna_file = [f for f in files if f.endswith('.cdna.all.fa.gz')][0] cdna_url = 'ftp://%s%s/%s' %(ftp_server, cdna_dir, cdna_file) # close FTP connection, if we opened it if close_connection: ftp.close() return cdna_url
python
{ "resource": "" }
q37846
GeneOntology.write_pickle
train
def write_pickle(self, path, compress=False): """Serialize the current `GOParser` object and store it in a pickle file. Parameters ---------- path: str Path of the output file. compress: bool, optional Whether to compress the file using gzip. Returns ------- None Notes ----- Compression with gzip is significantly slower than storing the file in uncompressed form. """ logger.info('Writing pickle to "%s"...', path) if compress: with gzip.open(path, 'wb') as ofh: pickle.dump(self, ofh, pickle.HIGHEST_PROTOCOL) else: with open(path, 'wb') as ofh: pickle.dump(self, ofh, pickle.HIGHEST_PROTOCOL)
python
{ "resource": "" }
q37847
GeneOntology.read_pickle
train
def read_pickle(fn): """Load a GOParser object from a pickle file. The function automatically detects whether the file is compressed with gzip. Parameters ---------- fn: str Path of the pickle file. Returns ------- `GOParser` The GOParser object stored in the pickle file. """ with misc.open_plain_or_gzip(fn, 'rb') as fh: parser = pickle.load(fh) return parser
python
{ "resource": "" }
q37848
GeneOntology._flatten_descendants
train
def _flatten_descendants(self, include_parts=True): """Determines and stores all descendants of each GO term. Parameters ---------- include_parts: bool, optional Whether to include ``part_of`` relations in determining descendants. Returns ------- None """ def get_all_descendants(term): descendants = set() for id_ in term.children: descendants.add(id_) descendants.update(get_all_descendants(self[id_])) if include_parts: for id_ in term.parts: descendants.add(id_) descendants.update(get_all_descendants(self[id_])) return descendants for term in self: term.descendants = get_all_descendants(term)
python
{ "resource": "" }
q37849
InstanceConfig.wait_for_instance_deletion
train
def wait_for_instance_deletion(self, credentials, name, **kwargs): """Wait for deletion of instance based on the configuration data. TODO: docstring""" op_name = wait_for_instance_deletion( credentials, self.project, self.zone, name, **kwargs) return op_name
python
{ "resource": "" }
q37850
get_cytoband_names
train
def get_cytoband_names(): """Returns the names of available cytoband data files >> get_cytoband_names() ['ucsc-hg38', 'ucsc-hg19'] """ return [ n.replace(".json.gz", "") for n in pkg_resources.resource_listdir(__name__, _data_dir) if n.endswith(".json.gz") ]
python
{ "resource": "" }
q37851
get_cytoband_map
train
def get_cytoband_map(name): """Fetch one cytoband map by name >>> map = get_cytoband_map("ucsc-hg38") >>> map["1"]["p32.2"] [55600000, 58500000, 'gpos50'] """ fn = pkg_resources.resource_filename( __name__, _data_path_fmt.format(name=name)) return json.load(gzip.open(fn, mode="rt", encoding="utf-8"))
python
{ "resource": "" }
q37852
get_cytoband_maps
train
def get_cytoband_maps(names=[]): """Load all cytoband maps >>> maps = get_cytoband_maps() >>> maps["ucsc-hg38"]["1"]["p32.2"] [55600000, 58500000, 'gpos50'] >>> maps["ucsc-hg19"]["1"]["p32.2"] [56100000, 59000000, 'gpos50'] """ if names == []: names = get_cytoband_names() return {name: get_cytoband_map(name) for name in names}
python
{ "resource": "" }
q37853
ExpProfile.filter_genes
train
def filter_genes(self, gene_names : Iterable[str]): """Filter the expression matrix against a set of genes. Parameters ---------- gene_names: list of str The genome to filter the genes against. Returns ------- ExpMatrix The filtered expression matrix. """ filt = self.loc[self.index & gene_names] return filt
python
{ "resource": "" }
q37854
ExpProfile.read_tsv
train
def read_tsv(cls, filepath_or_buffer: str, gene_table: ExpGeneTable = None, encoding='UTF-8'): """Read expression profile from a tab-delimited text file. Parameters ---------- path: str The path of the text file. gene_table: `ExpGeneTable` object, optional The set of valid genes. If given, the genes in the text file will be filtered against this set of genes. (None) encoding: str, optional The file encoding. ("UTF-8") Returns ------- `ExpProfile` The expression profile. """ # "squeeze = True" ensures that a pd.read_tsv returns a series # as long as there is only one column e = cls(pd.read_csv(filepath_or_buffer, sep='\t', index_col=0, header=0, encoding=encoding, squeeze=True)) if gene_table is not None: # filter genes e = e.filter_genes(gene_table.gene_names) return e
python
{ "resource": "" }
q37855
run_fastqc
train
def run_fastqc(credentials, instance_config, instance_name, script_dir, input_file, output_dir, self_destruct=True, **kwargs): """Run FASTQC. TODO: docstring""" template = _TEMPLATE_ENV.get_template('fastqc.sh') startup_script = template.render( script_dir=script_dir, input_file=input_file, output_dir=output_dir, self_destruct=self_destruct) if len(startup_script) > 32768: raise ValueError('Startup script larger than 32,768 bytes!') #print(startup_script) op_name = instance_config.create_instance( credentials, instance_name, startup_script=startup_script, **kwargs) return op_name
python
{ "resource": "" }
q37856
sra_download_paired_end
train
def sra_download_paired_end(credentials, instance_config, instance_name, script_dir, sra_run_acc, output_dir, **kwargs): """Download paired-end reads from SRA and convert to gzip'ed FASTQ files. TODO: docstring""" template = _TEMPLATE_ENV.get_template('sra_download_paired-end.sh') startup_script = template.render( script_dir=script_dir, sra_run_acc=sra_run_acc, output_dir=output_dir) if len(startup_script) > 32768: raise ValueError('Startup script larger than 32,768 bytes!') #print(startup_script) instance_config.create_instance( credentials, instance_name, startup_script=startup_script, **kwargs)
python
{ "resource": "" }
q37857
_runshell
train
def _runshell(cmd, exception): """ Run a shell command. if fails, raise a proper exception. """ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if p.wait() != 0: raise BridgeException(exception) return p
python
{ "resource": "" }
q37858
Bridge.addif
train
def addif(self, iname): """ Add an interface to the bridge """ _runshell([brctlexe, 'addif', self.name, iname], "Could not add interface %s to %s." % (iname, self.name))
python
{ "resource": "" }
q37859
Bridge.delif
train
def delif(self, iname): """ Delete an interface from the bridge. """ _runshell([brctlexe, 'delif', self.name, iname], "Could not delete interface %s from %s." % (iname, self.name))
python
{ "resource": "" }
q37860
Bridge.setageing
train
def setageing(self, time): """ Set bridge ageing time. """ _runshell([brctlexe, 'setageing', self.name, str(time)], "Could not set ageing time in %s." % self.name)
python
{ "resource": "" }
q37861
Bridge.setbridgeprio
train
def setbridgeprio(self, prio): """ Set bridge priority value. """ _runshell([brctlexe, 'setbridgeprio', self.name, str(prio)], "Could not set bridge priority in %s." % self.name)
python
{ "resource": "" }
q37862
Bridge.setfd
train
def setfd(self, time): """ Set bridge forward delay time value. """ _runshell([brctlexe, 'setfd', self.name, str(time)], "Could not set forward delay in %s." % self.name)
python
{ "resource": "" }
q37863
Bridge.sethello
train
def sethello(self, time): """ Set bridge hello time value. """ _runshell([brctlexe, 'sethello', self.name, str(time)], "Could not set hello time in %s." % self.name)
python
{ "resource": "" }
q37864
Bridge.setmaxage
train
def setmaxage(self, time): """ Set bridge max message age time. """ _runshell([brctlexe, 'setmaxage', self.name, str(time)], "Could not set max message age in %s." % self.name)
python
{ "resource": "" }
q37865
Bridge.setpathcost
train
def setpathcost(self, port, cost): """ Set port path cost value for STP protocol. """ _runshell([brctlexe, 'setpathcost', self.name, port, str(cost)], "Could not set path cost in port %s in %s." % (port, self.name))
python
{ "resource": "" }
q37866
Bridge.setportprio
train
def setportprio(self, port, prio): """ Set port priority value. """ _runshell([brctlexe, 'setportprio', self.name, port, str(prio)], "Could not set priority in port %s in %s." % (port, self.name))
python
{ "resource": "" }
q37867
Bridge._show
train
def _show(self): """ Return a list of unsorted bridge details. """ p = _runshell([brctlexe, 'show', self.name], "Could not show %s." % self.name) return p.stdout.read().split()[7:]
python
{ "resource": "" }
q37868
BridgeController.addbr
train
def addbr(self, name): """ Create a bridge and set the device up. """ _runshell([brctlexe, 'addbr', name], "Could not create bridge %s." % name) _runshell([ipexe, 'link', 'set', 'dev', name, 'up'], "Could not set link up for %s." % name) return Bridge(name)
python
{ "resource": "" }
q37869
BridgeController.delbr
train
def delbr(self, name): """ Set the device down and delete the bridge. """ self.getbr(name) # Check if exists _runshell([ipexe, 'link', 'set', 'dev', name, 'down'], "Could not set link down for %s." % name) _runshell([brctlexe, 'delbr', name], "Could not delete bridge %s." % name)
python
{ "resource": "" }
q37870
BridgeController.showall
train
def showall(self): """ Return a list of all available bridges. """ p = _runshell([brctlexe, 'show'], "Could not show bridges.") wlist = map(str.split, p.stdout.read().splitlines()[1:]) brwlist = filter(lambda x: len(x) != 1, wlist) brlist = map(lambda x: x[0], brwlist) return map(Bridge, brlist)
python
{ "resource": "" }
q37871
BridgeController.getbr
train
def getbr(self, name): """ Return a bridge object.""" for br in self.showall(): if br.name == name: return br raise BridgeException("Bridge does not exist.")
python
{ "resource": "" }
q37872
Field.clean
train
def clean(self, value): """Take a dirty value and clean it.""" if ( self.base_type is not None and value is not None and not isinstance(value, self.base_type) ): if isinstance(self.base_type, tuple): allowed_types = [typ.__name__ for typ in self.base_type] allowed_types_text = ' or '.join(allowed_types) else: allowed_types_text = self.base_type.__name__ err_msg = 'Value must be of %s type.' % allowed_types_text raise ValidationError(err_msg) if not self.has_value(value): if self.default is not None: raise StopValidation(self.default) if self.required: raise ValidationError('This field is required.') else: raise StopValidation(self.blank_value) return value
python
{ "resource": "" }
q37873
EmbeddedReference.clean_new
train
def clean_new(self, value): """Return a new object instantiated with cleaned data.""" value = self.schema_class(value).full_clean() return self.object_class(**value)
python
{ "resource": "" }
q37874
EmbeddedReference.clean_existing
train
def clean_existing(self, value): """Clean the data and return an existing document with its fields updated based on the cleaned values. """ existing_pk = value[self.pk_field] try: obj = self.fetch_existing(existing_pk) except ReferenceNotFoundError: raise ValidationError('Object does not exist.') orig_data = self.get_orig_data_from_existing(obj) # Clean the data (passing the new data dict and the original data to # the schema). value = self.schema_class(value, orig_data).full_clean() # Set cleaned data on the object (except for the pk_field). for field_name, field_value in value.items(): if field_name != self.pk_field: setattr(obj, field_name, field_value) return obj
python
{ "resource": "" }
q37875
Schema.get_fields
train
def get_fields(cls): """ Returns a dictionary of fields and field instances for this schema. """ fields = {} for field_name in dir(cls): if isinstance(getattr(cls, field_name), Field): field = getattr(cls, field_name) field_name = field.field_name or field_name fields[field_name] = field return fields
python
{ "resource": "" }
q37876
Schema.obj_to_dict
train
def obj_to_dict(cls, obj): """ Takes a model object and converts it into a dictionary suitable for passing to the constructor's data attribute. """ data = {} for field_name in cls.get_fields(): try: value = getattr(obj, field_name) except AttributeError: # If the field doesn't exist on the object, fail gracefully # and don't include the field in the data dict at all. Fail # loudly if the field exists but produces a different error # (edge case: accessing an *existing* field could technically # produce an unrelated AttributeError). continue if callable(value): value = value() data[field_name] = value return data
python
{ "resource": "" }
q37877
RegistrationModel.transform
train
def transform(self, images): """ Apply the transformation to an Images object. Will apply the underlying dictionary of transformations to the images or volumes of the Images object. The dictionary acts as a lookup table specifying which transformation should be applied to which record of the Images object based on the key. Because transformations are small, we broadcast the transformations rather than using a join. Parameters ---------- images : array-like or thunder images The sequence of images / volumes to register. """ images = check_images(images) def apply(item): (k, v) = item return self.transformations[k].apply(v) return images.map(apply, value_shape=images.value_shape, dtype=images.dtype, with_keys=True)
python
{ "resource": "" }
q37878
load_module_in_background
train
def load_module_in_background(name, package=None, debug='DEBUG', env=None, replacements=None): """Entry point for loading modules in background thread. Parameters ---------- name : str Module name to load in background thread. package : str or None, optional Package name, has the same meaning as in importlib.import_module(). debug : str, optional Debugging symbol name to look up in the environment. env : Mapping or None, optional Environment this will default to __xonsh_env__, if available, and os.environ otherwise. replacements : Mapping or None, optional Dictionary mapping fully qualified module names (eg foo.bar.baz) that import the lazily loaded moudle, with the variable name in that module. For example, suppose that foo.bar imports module a as b, this dict is then {'foo.bar': 'b'}. Returns ------- module : ModuleType This is either the original module that is found in sys.modules or a proxy module that will block until delay attribute access until the module is fully loaded. """ modname = resolve_name(name, package) if modname in sys.modules: return sys.modules[modname] if env is None: try: import builtins env = getattr(builtins, '__xonsh_env__', os.environ) except: return os.environ if env.get(debug, None): mod = importlib.import_module(name, package=package) return mod proxy = sys.modules[modname] = BackgroundModuleProxy(modname) BackgroundModuleLoader(name, package, replacements or {}) return proxy
python
{ "resource": "" }
q37879
TokenProvider.get_token
train
def get_token(self): """Performs Neurio API token authentication using provided key and secret. Note: This method is generally not called by hand; rather it is usually called as-needed by a Neurio Client object. Returns: string: the access token """ if self.__token is not None: return self.__token url = "https://api.neur.io/v1/oauth2/token" creds = b64encode(":".join([self.__key,self.__secret]).encode()).decode() headers = { "Authorization": " ".join(["Basic", creds]), } payload = { "grant_type": "client_credentials" } r = requests.post(url, data=payload, headers=headers) self.__token = r.json()["access_token"] return self.__token
python
{ "resource": "" }
q37880
Client.__append_url_params
train
def __append_url_params(self, url, params): """Utility method formatting url request parameters.""" url_parts = list(urlparse(url)) query = dict(parse_qsl(url_parts[4])) query.update(params) url_parts[4] = urlencode(query) return urlunparse(url_parts)
python
{ "resource": "" }
q37881
Client.get_appliance
train
def get_appliance(self, appliance_id): """Get the information for a specified appliance Args: appliance_id (string): identifiying string of appliance Returns: list: dictionary object containing information about the specified appliance """ url = "https://api.neur.io/v1/appliances/%s"%(appliance_id) headers = self.__gen_headers() headers["Content-Type"] = "application/json" r = requests.get(url, headers=headers) return r.json()
python
{ "resource": "" }
q37882
Client.get_appliances
train
def get_appliances(self, location_id): """Get the appliances added for a specified location. Args: location_id (string): identifiying string of appliance Returns: list: dictionary objects containing appliances data """ url = "https://api.neur.io/v1/appliances" headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "locationId": location_id, } url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
python
{ "resource": "" }
q37883
Client.get_appliance_event_after_time
train
def get_appliance_event_after_time(self, location_id, since, per_page=None, page=None, min_power=None): """Get appliance events by location Id after defined time. Args: location_id (string): hexadecimal id of the sensor to query, e.g. ``0x0013A20040B65FAD`` since (string): ISO 8601 start time for getting the events that are created or updated after it. Maxiumim value allowed is 1 day from the current time. min_power (string): The minimum average power (in watts) for filtering. Only events with an average power above this value will be returned. (default: 400) per_page (string, optional): the number of returned results per page (min 1, max 500) (default: 10) page (string, optional): the page number to return (min 1, max 100000) (default: 1) Returns: list: dictionary objects containing appliance events meeting specified criteria """ url = "https://api.neur.io/v1/appliances/events" headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "locationId": location_id, "since": since } if min_power: params["minPower"] = min_power if per_page: params["perPage"] = per_page if page: params["page"] = page url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
python
{ "resource": "" }
q37884
Client.get_appliance_stats_by_location
train
def get_appliance_stats_by_location(self, location_id, start, end, granularity=None, per_page=None, page=None, min_power=None): """Get appliance usage data for a given location within a given time range. Stats are generated by fetching appliance events that match the supplied criteria and then aggregating them together based on the granularity specified with the request. Note: This endpoint uses the location's time zone when generating time intervals for the stats, which is relevant if that time zone uses daylight saving time (some days will be 23 or 25 hours long). Args: location_id (string): hexadecimal id of the sensor to query, e.g. ``0x0013A20040B65FAD`` start (string): ISO 8601 start time for getting the events of appliances. end (string): ISO 8601 stop time for getting the events of appliances. Cannot be larger than 1 month from start time granularity (string): granularity of stats. If the granularity is 'unknown', the stats for the appliances between the start and end time is returned.; must be one of "minutes", "hours", "days", "weeks", "months", or "unknown" (default: days) min_power (string): The minimum average power (in watts) for filtering. Only events with an average power above this value will be returned. (default: 400) per_page (string, optional): the number of returned results per page (min 1, max 500) (default: 10) page (string, optional): the page number to return (min 1, max 100000) (default: 1) Returns: list: dictionary objects containing appliance events meeting specified criteria """ url = "https://api.neur.io/v1/appliances/stats" headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "locationId": location_id, "start": start, "end": end } if granularity: params["granularity"] = granularity if min_power: params["minPower"] = min_power if per_page: params["perPage"] = per_page if page: params["page"] = page url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
python
{ "resource": "" }
q37885
Client.get_samples_live
train
def get_samples_live(self, sensor_id, last=None): """Get recent samples, one sample per second for up to the last 2 minutes. Args: sensor_id (string): hexadecimal id of the sensor to query, e.g. ``0x0013A20040B65FAD`` last (string): starting range, as ISO8601 timestamp Returns: list: dictionary objects containing sample data """ url = "https://api.neur.io/v1/samples/live" headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "sensorId": sensor_id } if last: params["last"] = last url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
python
{ "resource": "" }
q37886
Client.get_samples_live_last
train
def get_samples_live_last(self, sensor_id): """Get the last sample recorded by the sensor. Args: sensor_id (string): hexadecimal id of the sensor to query, e.g. ``0x0013A20040B65FAD`` Returns: list: dictionary objects containing sample data """ url = "https://api.neur.io/v1/samples/live/last" headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "sensorId": sensor_id } url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
python
{ "resource": "" }
q37887
Client.get_samples
train
def get_samples(self, sensor_id, start, granularity, end=None, frequency=None, per_page=None, page=None, full=False): """Get a sensor's samples for a specified time interval. Args: sensor_id (string): hexadecimal id of the sensor to query, e.g. ``0x0013A20040B65FAD`` start (string): ISO 8601 start time of sampling; depends on the ``granularity`` parameter value, the maximum supported time ranges are: 1 day for minutes or hours granularities, 1 month for days, 6 months for weeks, 1 year for months granularity, and 10 years for years granularity granularity (string): granularity of the sampled data; must be one of "minutes", "hours", "days", "weeks", "months", or "years" end (string, optional): ISO 8601 stop time for sampling; should be later than start time (default: the current time) frequency (string, optional): frequency of the sampled data (e.g. with granularity set to days, a value of 3 will result in a sample for every third day, should be a multiple of 5 when using minutes granularity) (default: 1) (example: "1, 5") per_page (string, optional): the number of returned results per page (min 1, max 500) (default: 10) page (string, optional): the page number to return (min 1, max 100000) (default: 1) full (bool, optional): include additional information per sample (default: False) Returns: list: dictionary objects containing sample data """ url = "https://api.neur.io/v1/samples" if full: url = "https://api.neur.io/v1/samples/full" headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "sensorId": sensor_id, "start": start, "granularity": granularity } if end: params["end"] = end if frequency: params["frequency"] = frequency if per_page: params["perPage"] = per_page if page: params["page"] = page url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
python
{ "resource": "" }
q37888
Client.get_user_information
train
def get_user_information(self): """Gets the current user information, including sensor ID Args: None Returns: dictionary object containing information about the current user """ url = "https://api.neur.io/v1/users/current" headers = self.__gen_headers() headers["Content-Type"] = "application/json" r = requests.get(url, headers=headers) return r.json()
python
{ "resource": "" }
q37889
api2_formula
train
def api2_formula(req): """ A simple `GET`-, URL-based API to OpenFisca, making the assumption of computing formulas for a single person. Combination ----------- You can compute several formulas at once by combining the paths and joining them with `+`. Example: ``` /salaire_super_brut+salaire_net_a_payer?salaire_de_base=1440 ``` This will compute both `salaire_super_brut` and `salaire_net_a_payer` in a single request. Reforms ----------- Reforms can be requested to patch the simulation system. To keep this endpoint URL simple, they are requested as a list in a custom HTTP header. ``` X-OpenFisca-Extensions: de_net_a_brut, landais_piketty_saez ``` This header is of course optional. URL size limit -------------- Using combination with a lot of parameters may lead to long URLs. If used within the browser, make sure the resulting URL is kept [under 2047 characters](http://stackoverflow.com/questions/417142) for cross-browser compatibility, by splitting combined requests. On a server, just test what your library handles. """ API_VERSION = '2.1.0' wsgihelpers.track(req.url.decode('utf-8')) params = dict(req.GET) data = dict() try: extensions_header = req.headers.get('X-Openfisca-Extensions') tax_benefit_system = model.get_cached_composed_reform( reform_keys = extensions_header.split(','), tax_benefit_system = model.tax_benefit_system, ) if extensions_header is not None else model.tax_benefit_system params = normalize(params, tax_benefit_system) formula_names = req.urlvars.get('names').split('+') data['values'] = dict() data['period'] = parse_period(req.urlvars.get('period')) simulation = create_simulation(params, data['period'], tax_benefit_system) for formula_name in formula_names: column = get_column_from_formula_name(formula_name, tax_benefit_system) data['values'][formula_name] = compute(column.name, simulation) except Exception as error: if isinstance(error.args[0], dict): # we raised it ourselves, in this controller error = error.args[0] else: error = dict( message = unicode(error), code = 500 ) data['error'] = error finally: return respond(req, API_VERSION, data, params)
python
{ "resource": "" }
q37890
arithmetic_mean4
train
def arithmetic_mean4(rst, clk, rx_rdy, rx_vld, rx_dat, tx_rdy, tx_vld, tx_dat): ''' Calculates the arithmetic mean of every 4 consecutive input numbers Input handshake & data rx_rdy - (o) Ready rx_vld - (i) Valid rx_dat - (i) Data Output handshake & data tx_rdy - (i) Ready tx_vld - (o) Valid tx_dat - (o) Data Implementation: 3-stage pipeline stage 0: registers input data stage 1: sum each 4 consecutive numbers and produce the sum as a single result stage 2: divide the sum by 4 Each stage is implemented as a separate process controlled by a central pipeline control unit via an enable signal The pipeline control unit manages the handshake and synchronizes the operation of the stages ''' DATA_WIDTH = len(rx_dat) NUM_STAGES = 3 stage_en = Signal(intbv(0)[NUM_STAGES:]) stop_tx = Signal(intbv(0)[NUM_STAGES:]) pipe_ctrl = pipeline_control( rst = rst, clk = clk, rx_vld = rx_vld, rx_rdy = rx_rdy, tx_vld = tx_vld, tx_rdy = tx_rdy, stage_enable = stage_en, stop_tx = stop_tx) s0_dat = Signal(intbv(0)[DATA_WIDTH:]) @always_seq(clk.posedge, reset=rst) def stage_0(): ''' Register input data''' if (stage_en[0]): s0_dat.next = rx_dat s1_sum = Signal(intbv(0)[DATA_WIDTH+2:]) s1_cnt = Signal(intbv(0, min=0, max=4)) @always(clk.posedge) def stage_1(): ''' Sum each 4 consecutive data''' if (rst): s1_cnt.next = 0 stop_tx.next[1] = 1 elif (stage_en[1]): # Count input data s1_cnt.next = (s1_cnt + 1) % 4 if (s1_cnt == 0): s1_sum.next = s0_dat else: s1_sum.next = s1_sum.next + s0_dat # Produce result only after data 0, 1, 2, and 3 have been summed if (s1_cnt == 3): stop_tx.next[1] = 0 else: stop_tx.next[1] = 1 ''' stop_tx[1] concerns the data currently registered in stage 1 - it determines whether the data will be sent to the next pipeline stage (stop_tx==0) or will be dropped (stop_tx==1 ). The signals stop_rx and stop_tx must be registered ''' s2_dat = Signal(intbv(0)[DATA_WIDTH:]) @always_seq(clk.posedge, reset=rst) def stage_2(): ''' Divide by 4''' if (stage_en[2]): s2_dat.next = s1_sum // 4 @always_comb def comb(): tx_dat.next = s2_dat return instances()
python
{ "resource": "" }
q37891
pipeline_control_stop_tx
train
def pipeline_control_stop_tx(): ''' Instantiates the arithmetic_mean4 pipeline, feeds it with data and drains its output ''' clk = sim.Clock(val=0, period=10, units="ns") rst = sim.ResetSync(clk=clk, val=0, active=1) rx_rdy, rx_vld, tx_rdy, tx_vld = [Signal(bool(0)) for _ in range(4)] rx_dat = Signal(intbv(0)[8:]) tx_dat = Signal(intbv(0)[8:]) clkgen = clk.gen() dut = arithmetic_mean4(rst, clk, rx_rdy, rx_vld, rx_dat, tx_rdy, tx_vld, tx_dat) # dut = traceSignals(arithmetic_mean4, rst, clk, rx_rdy, rx_vld, rx_dat, tx_rdy, tx_vld, tx_dat) drv = Driver(rst, clk, rx_rdy, rx_vld, rx_dat) cap = Capture(rst, clk, tx_rdy, tx_vld, tx_dat) data_in = [] data_out = [] def stim(GAP=0): ''' Stimulates the pipeline input ''' @instance def _stim(): yield rst.pulse(10) yield clk.posedge for i in range(5*4): yield drv.write(i) data_in.append(i) for _ in range(GAP): yield clk.posedge for _ in range(10): yield clk.posedge raise StopSimulation return _stim def drain(GAP=0): ''' Drains the pipeline output ''' @instance def _drain(): yield clk.posedge while True: yield cap.read() data_out.append(cap.d) for _ in range(GAP): yield clk.posedge return _drain # You can play with the gap size at the input and at the output to see how the pipeline responds (see time diagrams) Simulation(clkgen, dut, stim(GAP=0), drain(GAP=0)).run() print "data_in ({}): {}".format(len(data_in), data_in) print "data_out ({}): {}".format(len(data_out), data_out) data_out_expected = [sum(data_in[i:i+4])//4 for i in range(0, len(data_in), 4)] assert cmp(data_out_expected, data_out)==0, "expected: data_out ({}): {}".format(len(data_out_expected), data_out_expected)
python
{ "resource": "" }
q37892
MongoEmbedded.clean
train
def clean(self, value): """Clean the provided dict of values and then return an EmbeddedDocument instantiated with them. """ value = super(MongoEmbedded, self).clean(value) return self.document_class(**value)
python
{ "resource": "" }
q37893
MongoReference.fetch_object
train
def fetch_object(self, doc_id): """Fetch the document by its PK.""" try: return self.object_class.objects.get(pk=doc_id) except self.object_class.DoesNotExist: raise ReferenceNotFoundError
python
{ "resource": "" }
q37894
get_argument_parser
train
def get_argument_parser(): """Create the argument parser for the script. Parameters ---------- Returns ------- `argparse.ArgumentParser` The arguemnt parser. """ desc = 'Generate a sample sheet based on a GEO series matrix.' parser = cli.get_argument_parser(desc=desc) g = parser.add_argument_group('Input and output files') g.add_argument( '-s', '--series-matrix-file', type=cli.str_type, required=True, metavar=cli.file_mv, help='The GEO series matrix file.' ) g.add_argument( '-o', '--output-file', type=cli.str_type, required=True, metavar=cli.file_mv, help='The output file.' ) g.add_argument( '-e', '--encoding', type=cli.str_type, metavar=cli.str_mv, default='UTF-8', help='The encoding of the series matrix file. [UTF-8]' ) cli.add_reporting_args(parser) return parser
python
{ "resource": "" }
q37895
read_series_matrix
train
def read_series_matrix(path, encoding): """Read the series matrix.""" assert isinstance(path, str) accessions = None titles = None celfile_urls = None with misc.smart_open_read(path, mode='rb', try_gzip=True) as fh: reader = csv.reader(fh, dialect='excel-tab', encoding=encoding) for l in reader: if not l: continue if l[0] == '!Sample_geo_accession': accessions = l[1:] elif l[0] == '!Sample_title': titles = l[1:] elif l[0] == '!Sample_supplementary_file' and celfile_urls is None: celfile_urls = l[1:] elif l[0] == '!series_matrix_table_begin': # we've read the end of the section containing metadata break return accessions, titles, celfile_urls
python
{ "resource": "" }
q37896
write_sample_sheet
train
def write_sample_sheet(path, accessions, names, celfile_urls, sel=None): """Write the sample sheet.""" with open(path, 'wb') as ofh: writer = csv.writer(ofh, dialect='excel-tab', lineterminator=os.linesep, quoting=csv.QUOTE_NONE) # write header writer.writerow(['Accession', 'Name', 'CEL file', 'CEL file URL']) n = len(names) if sel is None: sel = range(n) for i in sel: cf = celfile_urls[i].split('/')[-1] # row = [accessions[i], names[i], cf, celfile_urls[i]] writer.writerow([accessions[i], names[i], cf, celfile_urls[i]])
python
{ "resource": "" }
q37897
create_blueprint
train
def create_blueprint(state): """Create blueprint serving JSON schemas. :param state: :class:`invenio_jsonschemas.ext.InvenioJSONSchemasState` instance used to retrieve the schemas. """ blueprint = Blueprint( 'invenio_jsonschemas', __name__, ) @blueprint.route('/<path:schema_path>') def get_schema(schema_path): """Retrieve a schema.""" try: schema_dir = state.get_schema_dir(schema_path) except JSONSchemaNotFound: abort(404) resolved = request.args.get( 'resolved', current_app.config.get('JSONSCHEMAS_RESOLVE_SCHEMA'), type=int ) with_refs = request.args.get( 'refs', current_app.config.get('JSONSCHEMAS_REPLACE_REFS'), type=int ) or resolved if resolved or with_refs: schema = state.get_schema( schema_path, with_refs=with_refs, resolved=resolved ) return jsonify(schema) else: return send_from_directory(schema_dir, schema_path) return blueprint
python
{ "resource": "" }
q37898
printBlastRecord
train
def printBlastRecord(record): """ Print a BLAST record. @param record: A BioPython C{Bio.Blast.Record.Blast} instance. """ for key in sorted(record.__dict__.keys()): if key not in ['alignments', 'descriptions', 'reference']: print('%s: %r' % (key, record.__dict__[key])) print('alignments: (%d in total):' % len(record.alignments)) for i, alignment in enumerate(record.alignments): print(' description %d:' % (i + 1)) for attr in ['accession', 'bits', 'e', 'num_alignments', 'score']: print(' %s: %s' % (attr, getattr(record.descriptions[i], attr))) print(' alignment %d:' % (i + 1)) for attr in 'accession', 'hit_def', 'hit_id', 'length', 'title': print(' %s: %s' % (attr, getattr(alignment, attr))) print(' HSPs (%d in total):' % len(alignment.hsps)) for hspIndex, hsp in enumerate(alignment.hsps, start=1): print(' hsp %d:' % hspIndex) printHSP(hsp, ' ')
python
{ "resource": "" }
q37899
get_goa_gene_sets
train
def get_goa_gene_sets(go_annotations): """Generate a list of gene sets from a collection of GO annotations. Each gene set corresponds to all genes annotated with a certain GO term. """ go_term_genes = OrderedDict() term_ids = {} for ann in go_annotations: term_ids[ann.go_term.id] = ann.go_term try: go_term_genes[ann.go_term.id].append(ann.db_symbol) except KeyError: go_term_genes[ann.go_term.id] = [ann.db_symbol] go_term_genes = OrderedDict(sorted(go_term_genes.items())) gene_sets = [] for tid, genes in go_term_genes.items(): go_term = term_ids[tid] gs = GeneSet(id=tid, name=go_term.name, genes=genes, source='GO', collection=go_term.domain_short, description=go_term.definition) gene_sets.append(gs) gene_sets = GeneSetCollection(gene_sets) return gene_sets
python
{ "resource": "" }