_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q12200
get_namespace_keys
train
def get_namespace_keys(app, limit): """Get namespace keys.""" ns_query = datastore.Query('__namespace__', keys_only=True, _app=app) return list(ns_query.Run(limit=limit, batch_size=limit))
python
{ "resource": "" }
q12201
NamespaceRange.split_range
train
def split_range(self): """Splits the NamespaceRange into two nearly equal-sized ranges. Returns: If this NamespaceRange contains a single namespace then a list containing this NamespaceRange is returned. Otherwise a two-element list containing two NamespaceRanges whose total range is identical to this NamespaceRange's is returned. """ if self.is_single_namespace: return [self] mid_point = (_namespace_to_ord(self.namespace_start) + _namespace_to_ord(self.namespace_end)) // 2 return [NamespaceRange(self.namespace_start, _ord_to_namespace(mid_point), _app=self.app), NamespaceRange(_ord_to_namespace(mid_point+1), self.namespace_end, _app=self.app)]
python
{ "resource": "" }
q12202
NamespaceRange.with_start_after
train
def with_start_after(self, after_namespace): """Returns a copy of this NamespaceName with a new namespace_start. Args: after_namespace: A namespace string. Returns: A NamespaceRange object whose namespace_start is the lexographically next namespace after the given namespace string. Raises: ValueError: if the NamespaceRange includes only a single namespace. """ namespace_start = _ord_to_namespace(_namespace_to_ord(after_namespace) + 1) return NamespaceRange(namespace_start, self.namespace_end, _app=self.app)
python
{ "resource": "" }
q12203
NamespaceRange.make_datastore_query
train
def make_datastore_query(self, cursor=None): """Returns a datastore.Query that generates all namespaces in the range. Args: cursor: start cursor for the query. Returns: A datastore.Query instance that generates db.Keys for each namespace in the NamespaceRange. """ filters = {} filters['__key__ >= '] = _key_for_namespace( self.namespace_start, self.app) filters['__key__ <= '] = _key_for_namespace( self.namespace_end, self.app) return datastore.Query('__namespace__', filters=filters, keys_only=True, cursor=cursor, _app=self.app)
python
{ "resource": "" }
q12204
NamespaceRange.normalized_start
train
def normalized_start(self): """Returns a NamespaceRange with leading non-existant namespaces removed. Returns: A copy of this NamespaceRange whose namespace_start is adjusted to exclude the portion of the range that contains no actual namespaces in the datastore. None is returned if the NamespaceRange contains no actual namespaces in the datastore. """ namespaces_after_key = list(self.make_datastore_query().Run(limit=1)) if not namespaces_after_key: return None namespace_after_key = namespaces_after_key[0].name() or '' return NamespaceRange(namespace_after_key, self.namespace_end, _app=self.app)
python
{ "resource": "" }
q12205
NamespaceRange.to_json_object
train
def to_json_object(self): """Returns a dict representation that can be serialized to JSON.""" obj_dict = dict(namespace_start=self.namespace_start, namespace_end=self.namespace_end) if self.app is not None: obj_dict['app'] = self.app return obj_dict
python
{ "resource": "" }
q12206
NamespaceRange.split
train
def split(cls, n, contiguous, can_query=itertools.chain(itertools.repeat(True, 50), itertools.repeat(False)).next, _app=None): # pylint: disable=g-doc-args """Splits the complete NamespaceRange into n equally-sized NamespaceRanges. Args: n: The maximum number of NamespaceRanges to return. Fewer than n namespaces may be returned. contiguous: If True then the returned NamespaceRanges will cover the entire space of possible namespaces (i.e. from MIN_NAMESPACE to MAX_NAMESPACE) without gaps. If False then the returned NamespaceRanges may exclude namespaces that don't appear in the datastore. can_query: A function that returns True if split() can query the datastore to generate more fair namespace range splits, and False otherwise. If not set then split() is allowed to make 50 datastore queries. Returns: A list of at most n NamespaceRanges representing a near-equal distribution of actual existant datastore namespaces. The returned list will be sorted lexographically. Raises: ValueError: if n is < 1. """ if n < 1: raise ValueError('n must be >= 1') ranges = None if can_query(): if not contiguous: ns_keys = get_namespace_keys(_app, n + 1) if not ns_keys: return [] else: if len(ns_keys) <= n: # If you have less actual namespaces than number of NamespaceRanges # to return, then just return the list of those namespaces. ns_range = [] for ns_key in ns_keys: ns_range.append(NamespaceRange(ns_key.name() or '', ns_key.name() or '', _app=_app)) return sorted(ns_range, key=lambda ns_range: ns_range.namespace_start) # Use the first key and save the initial normalized_start() call. ranges = [NamespaceRange(ns_keys[0].name() or '', _app=_app)] else: ns_range = NamespaceRange(_app=_app).normalized_start() if ns_range is None: return [NamespaceRange(_app=_app)] ranges = [ns_range] else: ranges = [NamespaceRange(_app=_app)] singles = [] while ranges and (len(ranges) + len(singles)) < n: namespace_range = ranges.pop(0) if namespace_range.is_single_namespace: singles.append(namespace_range) else: left, right = namespace_range.split_range() if can_query(): right = right.normalized_start() if right is not None: ranges.append(right) ranges.append(left) ns_ranges = sorted(singles + ranges, key=lambda ns_range: ns_range.namespace_start) if contiguous: if not ns_ranges: # This condition is possible if every namespace was deleted after the # first call to ns_range.normalized_start(). return [NamespaceRange(_app=_app)] continuous_ns_ranges = [] for i in range(len(ns_ranges)): if i == 0: namespace_start = MIN_NAMESPACE else: namespace_start = ns_ranges[i].namespace_start if i == len(ns_ranges) - 1: namespace_end = MAX_NAMESPACE else: namespace_end = _ord_to_namespace( _namespace_to_ord(ns_ranges[i+1].namespace_start) - 1) continuous_ns_ranges.append(NamespaceRange(namespace_start, namespace_end, _app=_app)) return continuous_ns_ranges else: return ns_ranges
python
{ "resource": "" }
q12207
_RecordsPoolBase.append
train
def append(self, data): """Append data to a file.""" data_length = len(data) if self._size + data_length > self._flush_size: self.flush() if not self._exclusive and data_length > _FILE_POOL_MAX_SIZE: raise errors.Error( "Too big input %s (%s)." % (data_length, _FILE_POOL_MAX_SIZE)) else: self._buffer.append(data) self._size += data_length if self._size > self._flush_size: self.flush()
python
{ "resource": "" }
q12208
GCSRecordsPool._write
train
def _write(self, str_buf): """Uses the filehandle to the file in GCS to write to it.""" self._filehandle.write(str_buf) self._buf_size += len(str_buf)
python
{ "resource": "" }
q12209
_GoogleCloudStorageBase._get_tmp_gcs_bucket
train
def _get_tmp_gcs_bucket(cls, writer_spec): """Returns bucket used for writing tmp files.""" if cls.TMP_BUCKET_NAME_PARAM in writer_spec: return writer_spec[cls.TMP_BUCKET_NAME_PARAM] return cls._get_gcs_bucket(writer_spec)
python
{ "resource": "" }
q12210
_GoogleCloudStorageBase._get_tmp_account_id
train
def _get_tmp_account_id(cls, writer_spec): """Returns the account id to use with tmp bucket.""" # pick tmp id iff tmp bucket is set explicitly if cls.TMP_BUCKET_NAME_PARAM in writer_spec: return writer_spec.get(cls._TMP_ACCOUNT_ID_PARAM, None) return cls._get_account_id(writer_spec)
python
{ "resource": "" }
q12211
_GoogleCloudStorageOutputWriterBase._generate_filename
train
def _generate_filename(cls, writer_spec, name, job_id, num, attempt=None, seg_index=None): """Generates a filename for a particular output. Args: writer_spec: specification dictionary for the output writer. name: name of the job. job_id: the ID number assigned to the job. num: shard number. attempt: the shard attempt number. seg_index: index of the seg. None means the final output. Returns: a string containing the filename. Raises: BadWriterParamsError: if the template contains any errors such as invalid syntax or contains unknown substitution placeholders. """ naming_format = cls._TMP_FILE_NAMING_FORMAT if seg_index is None: naming_format = writer_spec.get(cls.NAMING_FORMAT_PARAM, cls._DEFAULT_NAMING_FORMAT) template = string.Template(naming_format) try: # Check that template doesn't use undefined mappings and is formatted well if seg_index is None: return template.substitute(name=name, id=job_id, num=num) else: return template.substitute(name=name, id=job_id, num=num, attempt=attempt, seg=seg_index) except ValueError, error: raise errors.BadWriterParamsError("Naming template is bad, %s" % (error)) except KeyError, error: raise errors.BadWriterParamsError("Naming template '%s' has extra " "mappings, %s" % (naming_format, error))
python
{ "resource": "" }
q12212
_GoogleCloudStorageOutputWriterBase._open_file
train
def _open_file(cls, writer_spec, filename_suffix, use_tmp_bucket=False): """Opens a new gcs file for writing.""" if use_tmp_bucket: bucket = cls._get_tmp_gcs_bucket(writer_spec) account_id = cls._get_tmp_account_id(writer_spec) else: bucket = cls._get_gcs_bucket(writer_spec) account_id = cls._get_account_id(writer_spec) # GoogleCloudStorage format for filenames, Initial slash is required filename = "/%s/%s" % (bucket, filename_suffix) content_type = writer_spec.get(cls.CONTENT_TYPE_PARAM, None) options = {} if cls.ACL_PARAM in writer_spec: options["x-goog-acl"] = writer_spec.get(cls.ACL_PARAM) return cloudstorage.open(filename, mode="w", content_type=content_type, options=options, _account_id=account_id)
python
{ "resource": "" }
q12213
_GoogleCloudStorageOutputWriterBase.write
train
def write(self, data): """Write data to the GoogleCloudStorage file. Args: data: string containing the data to be written. """ start_time = time.time() self._get_write_buffer().write(data) ctx = context.get() operation.counters.Increment(COUNTER_IO_WRITE_BYTES, len(data))(ctx) operation.counters.Increment( COUNTER_IO_WRITE_MSEC, int((time.time() - start_time) * 1000))(ctx)
python
{ "resource": "" }
q12214
_GoogleCloudStorageOutputWriter._create
train
def _create(cls, writer_spec, filename_suffix): """Helper method that actually creates the file in cloud storage.""" writer = cls._open_file(writer_spec, filename_suffix) return cls(writer, writer_spec=writer_spec)
python
{ "resource": "" }
q12215
GoogleCloudStorageConsistentOutputWriter._create_tmpfile
train
def _create_tmpfile(cls, status): """Creates a new random-named tmpfile.""" # We can't put the tmpfile in the same directory as the output. There are # rare circumstances when we leave trash behind and we don't want this trash # to be loaded into bigquery and/or used for restore. # # We used mapreduce id, shard number and attempt and 128 random bits to make # collisions virtually impossible. tmpl = string.Template(cls._TMPFILE_PATTERN) filename = tmpl.substitute( id=status.mapreduce_id, shard=status.shard, random=random.getrandbits(cls._RAND_BITS)) return cls._open_file(status.writer_spec, filename, use_tmp_bucket=True)
python
{ "resource": "" }
q12216
GoogleCloudStorageConsistentOutputWriter._try_to_clean_garbage
train
def _try_to_clean_garbage(self, writer_spec, exclude_list=()): """Tries to remove any files created by this shard that aren't needed. Args: writer_spec: writer_spec for the MR. exclude_list: A list of filenames (strings) that should not be removed. """ # Try to remove garbage (if any). Note that listbucket is not strongly # consistent so something might survive. tmpl = string.Template(self._TMPFILE_PREFIX) prefix = tmpl.substitute( id=self.status.mapreduce_id, shard=self.status.shard) bucket = self._get_tmp_gcs_bucket(writer_spec) account_id = self._get_tmp_account_id(writer_spec) for f in cloudstorage.listbucket("/%s/%s" % (bucket, prefix), _account_id=account_id): if f.filename not in exclude_list: self._remove_tmpfile(f.filename, self.status.writer_spec)
python
{ "resource": "" }
q12217
_get_weights
train
def _get_weights(max_length): """Get weights for each offset in str of certain max length. Args: max_length: max length of the strings. Returns: A list of ints as weights. Example: If max_length is 2 and alphabet is "ab", then we have order "", "a", "aa", "ab", "b", "ba", "bb". So the weight for the first char is 3. """ weights = [1] for i in range(1, max_length): weights.append(weights[i-1] * len(_ALPHABET) + 1) weights.reverse() return weights
python
{ "resource": "" }
q12218
_str_to_ord
train
def _str_to_ord(content, weights): """Converts a string to its lexicographical order. Args: content: the string to convert. Of type str. weights: weights from _get_weights. Returns: an int or long that represents the order of this string. "" has order 0. """ ordinal = 0 for i, c in enumerate(content): ordinal += weights[i] * _ALPHABET.index(c) + 1 return ordinal
python
{ "resource": "" }
q12219
_ord_to_str
train
def _ord_to_str(ordinal, weights): """Reverse function of _str_to_ord.""" chars = [] for weight in weights: if ordinal == 0: return "".join(chars) ordinal -= 1 index, ordinal = divmod(ordinal, weight) chars.append(_ALPHABET[index]) return "".join(chars)
python
{ "resource": "" }
q12220
PropertyRange._get_range_from_filters
train
def _get_range_from_filters(cls, filters, model_class): """Get property range from filters user provided. This method also validates there is one and only one closed range on a single property. Args: filters: user supplied filters. Each filter should be a list or tuple of format (<property_name_as_str>, <query_operator_as_str>, <value_of_certain_type>). Value type should satisfy the property's type. model_class: the model class for the entity type to apply filters on. Returns: a tuple of (property, start_filter, end_filter). property is the model's field that the range is about. start_filter and end_filter define the start and the end of the range. (None, None, None) if no range is found. Raises: BadReaderParamsError: if any filter is invalid in any way. """ if not filters: return None, None, None range_property = None start_val = None end_val = None start_filter = None end_filter = None for f in filters: prop, op, val = f if op in [">", ">=", "<", "<="]: if range_property and range_property != prop: raise errors.BadReaderParamsError( "Range on only one property is supported.") range_property = prop if val is None: raise errors.BadReaderParamsError( "Range can't be None in filter %s", f) if op in [">", ">="]: if start_val is not None: raise errors.BadReaderParamsError( "Operation %s is specified more than once.", op) start_val = val start_filter = f else: if end_val is not None: raise errors.BadReaderParamsError( "Operation %s is specified more than once.", op) end_val = val end_filter = f elif op != "=": raise errors.BadReaderParamsError( "Only < <= > >= = are supported as operation. Got %s", op) if not range_property: return None, None, None if start_val is None or end_val is None: raise errors.BadReaderParamsError( "Filter should contains a complete range on property %s", range_property) if issubclass(model_class, db.Model): property_obj = model_class.properties()[range_property] else: property_obj = ( model_class._properties[ # pylint: disable=protected-access range_property]) supported_properties = ( _DISCRETE_PROPERTY_SPLIT_FUNCTIONS.keys() + _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS.keys()) if not isinstance(property_obj, tuple(supported_properties)): raise errors.BadReaderParamsError( "Filtered property %s is not supported by sharding.", range_property) if not start_val < end_val: raise errors.BadReaderParamsError( "Start value %s should be smaller than end value %s", start_val, end_val) return property_obj, start_filter, end_filter
python
{ "resource": "" }
q12221
PropertyRange.split
train
def split(self, n): """Evenly split this range into contiguous, non overlapping subranges. Args: n: number of splits. Returns: a list of contiguous, non overlapping sub PropertyRanges. Maybe less than n when not enough subranges. """ new_range_filters = [] name = self.start[0] prop_cls = self.prop.__class__ if prop_cls in _DISCRETE_PROPERTY_SPLIT_FUNCTIONS: splitpoints = _DISCRETE_PROPERTY_SPLIT_FUNCTIONS[prop_cls]( self.start[2], self.end[2], n, self.start[1] == ">=", self.end[1] == "<=") start_filter = (name, ">=", splitpoints[0]) for p in splitpoints[1:]: end_filter = (name, "<", p) new_range_filters.append([start_filter, end_filter]) start_filter = (name, ">=", p) else: splitpoints = _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS[prop_cls]( self.start[2], self.end[2], n) start_filter = self.start for p in splitpoints: end_filter = (name, "<", p) new_range_filters.append([start_filter, end_filter]) start_filter = (name, ">=", p) new_range_filters.append([start_filter, self.end]) for f in new_range_filters: f.extend(self._equality_filters) return [self.__class__(f, self.model_class_path) for f in new_range_filters]
python
{ "resource": "" }
q12222
PropertyRange.make_query
train
def make_query(self, ns): """Make a query of entities within this range. Query options are not supported. They should be specified when the query is run. Args: ns: namespace of this query. Returns: a db.Query or ndb.Query, depends on the model class's type. """ if issubclass(self.model_class, db.Model): query = db.Query(self.model_class, namespace=ns) for f in self.filters: query.filter("%s %s" % (f[0], f[1]), f[2]) else: query = self.model_class.query(namespace=ns) for f in self.filters: query = query.filter(ndb.FilterNode(*f)) return query
python
{ "resource": "" }
q12223
OutputWriter.commit_output
train
def commit_output(cls, shard_ctx, iterator): """Saves output references when a shard finishes. Inside end_shard(), an output writer can optionally use this method to persist some references to the outputs from this shard (e.g a list of filenames) Args: shard_ctx: map_job_context.ShardContext for this shard. iterator: an iterator that yields json serializable references to the outputs from this shard. Contents from the iterator can be accessible later via map_job.Job.get_outputs. """ # We accept an iterator just in case output references get too big. outs = tuple(iterator) shard_ctx._state.writer_state["outs"] = outs
python
{ "resource": "" }
q12224
KeyRangesFactory.from_json
train
def from_json(cls, json): """Deserialize from json. Args: json: a dict of json compatible fields. Returns: a KeyRanges object. Raises: ValueError: if the json is invalid. """ if json["name"] in _KEYRANGES_CLASSES: return _KEYRANGES_CLASSES[json["name"]].from_json(json) raise ValueError("Invalid json %s", json)
python
{ "resource": "" }
q12225
split_into_sentences
train
def split_into_sentences(s): """Split text into list of sentences.""" s = re.sub(r"\s+", " ", s) s = re.sub(r"[\\.\\?\\!]", "\n", s) return s.split("\n")
python
{ "resource": "" }
q12226
split_into_words
train
def split_into_words(s): """Split a sentence into list of words.""" s = re.sub(r"\W+", " ", s) s = re.sub(r"[_0-9]+", " ", s) return s.split()
python
{ "resource": "" }
q12227
index_map
train
def index_map(data): """Index demo map function.""" (entry, text_fn) = data text = text_fn() logging.debug("Got %s", entry.filename) for s in split_into_sentences(text): for w in split_into_words(s.lower()): yield (w, entry.filename)
python
{ "resource": "" }
q12228
phrases_map
train
def phrases_map(data): """Phrases demo map function.""" (entry, text_fn) = data text = text_fn() filename = entry.filename logging.debug("Got %s", filename) for s in split_into_sentences(text): words = split_into_words(s.lower()) if len(words) < PHRASE_LENGTH: yield (":".join(words), filename) continue for i in range(0, len(words) - PHRASE_LENGTH): yield (":".join(words[i:i+PHRASE_LENGTH]), filename)
python
{ "resource": "" }
q12229
phrases_reduce
train
def phrases_reduce(key, values): """Phrases demo reduce function.""" if len(values) < 10: return counts = {} for filename in values: counts[filename] = counts.get(filename, 0) + 1 words = re.sub(r":", " ", key) threshold = len(values) / 2 for filename, count in counts.items(): if count > threshold: yield "%s:%s\n" % (words, filename)
python
{ "resource": "" }
q12230
FileMetadata.getKeyName
train
def getKeyName(username, date, blob_key): """Returns the internal key for a particular item in the database. Our items are stored with keys of the form 'user/date/blob_key' ('/' is not the real separator, but __SEP is). Args: username: The given user's e-mail address. date: A datetime object representing the date and time that an input file was uploaded to this app. blob_key: The blob key corresponding to the location of the input file in the Blobstore. Returns: The internal key for the item specified by (username, date, blob_key). """ sep = FileMetadata.__SEP return str(username + sep + str(date) + sep + blob_key)
python
{ "resource": "" }
q12231
Custodian.from_spec
train
def from_spec(cls, spec): """ Load a Custodian instance where the jobs are specified from a structure and a spec dict. This allows simple custom job sequences to be constructed quickly via a YAML file. Args: spec (dict): A dict specifying job. A sample of the dict in YAML format for the usual MP workflow is given as follows ``` jobs: - jb: custodian.vasp.jobs.VaspJob params: final: False suffix: .relax1 - jb: custodian.vasp.jobs.VaspJob params: final: True suffix: .relax2 settings_override: {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}} jobs_common_params: vasp_cmd: /opt/vasp handlers: - hdlr: custodian.vasp.handlers.VaspErrorHandler - hdlr: custodian.vasp.handlers.AliasingErrorHandler - hdlr: custodian.vasp.handlers.MeshSymmetryHandler validators: - vldr: custodian.vasp.validators.VasprunXMLValidator custodian_params: scratch_dir: /tmp ``` The `jobs` key is a list of jobs. Each job is specified via "job": <explicit path>, and all parameters are specified via `params` which is a dict. `common_params` specify a common set of parameters that are passed to all jobs, e.g., vasp_cmd. Returns: Custodian instance. """ dec = MontyDecoder() def load_class(dotpath): modname, classname = dotpath.rsplit(".", 1) mod = __import__(modname, globals(), locals(), [classname], 0) return getattr(mod, classname) def process_params(d): decoded = {} for k, v in d.items(): if k.startswith("$"): if isinstance(v, list): v = [os.path.expandvars(i) for i in v] elif isinstance(v, dict): v = {k2: os.path.expandvars(v2) for k2, v2 in v.items()} else: v = os.path.expandvars(v) decoded[k.strip("$")] = dec.process_decoded(v) return decoded jobs = [] common_params = process_params(spec.get("jobs_common_params", {})) for d in spec["jobs"]: cls_ = load_class(d["jb"]) params = process_params(d.get("params", {})) params.update(common_params) jobs.append(cls_(**params)) handlers = [] for d in spec.get("handlers", []): cls_ = load_class(d["hdlr"]) params = process_params(d.get("params", {})) handlers.append(cls_(**params)) validators = [] for d in spec.get("validators", []): cls_ = load_class(d["vldr"]) params = process_params(d.get("params", {})) validators.append(cls_(**params)) custodian_params = process_params(spec.get("custodian_params", {})) return cls(jobs=jobs, handlers=handlers, validators=validators, **custodian_params)
python
{ "resource": "" }
q12232
Custodian.run
train
def run(self): """ Runs all jobs. Returns: All errors encountered as a list of list. [[error_dicts for job 1], [error_dicts for job 2], ....] Raises: ValidationError: if a job fails validation ReturnCodeError: if the process has a return code different from 0 NonRecoverableError: if an unrecoverable occurs MaxCorrectionsPerJobError: if max_errors_per_job is reached MaxCorrectionsError: if max_errors is reached MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached """ cwd = os.getcwd() with ScratchDir(self.scratch_dir, create_symbolic_link=True, copy_to_current_on_exit=True, copy_from_current_on_enter=True) as temp_dir: self.total_errors = 0 start = datetime.datetime.now() logger.info("Run started at {} in {}.".format( start, temp_dir)) v = sys.version.replace("\n", " ") logger.info("Custodian running on Python version {}".format(v)) logger.info("Hostname: {}, Cluster: {}".format( *get_execution_host_info())) try: # skip jobs until the restart for job_n, job in islice(enumerate(self.jobs, 1), self.restart, None): self._run_job(job_n, job) # We do a dump of the run log after each job. dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) # Checkpoint after each job so that we can recover from last # point and remove old checkpoints if self.checkpoint: self.restart = job_n Custodian._save_checkpoint(cwd, job_n) except CustodianError as ex: logger.error(ex.message) if ex.raises: raise finally: # Log the corrections to a json file. logger.info("Logging to {}...".format(Custodian.LOG_FILE)) dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) end = datetime.datetime.now() logger.info("Run ended at {}.".format(end)) run_time = end - start logger.info("Run completed. Total time taken = {}." .format(run_time)) if self.gzipped_output: gzip_dir(".") # Cleanup checkpoint files (if any) if run is successful. Custodian._delete_checkpoints(cwd) return self.run_log
python
{ "resource": "" }
q12233
Custodian._do_check
train
def _do_check(self, handlers, terminate_func=None): """ checks the specified handlers. Returns True iff errors caught """ corrections = [] for h in handlers: try: if h.check(): if h.max_num_corrections is not None \ and h.n_applied_corrections >= h.max_num_corrections: msg = "Maximum number of corrections {} reached " \ "for handler {}".format(h.max_num_corrections, h) if h.raise_on_max: self.run_log[-1]["handler"] = h self.run_log[-1]["max_errors_per_handler"] = True raise MaxCorrectionsPerHandlerError(msg, True, h.max_num_corrections, h) else: logger.warning(msg+" Correction not applied.") continue if terminate_func is not None and h.is_terminating: logger.info("Terminating job") terminate_func() # make sure we don't terminate twice terminate_func = None d = h.correct() d["handler"] = h logger.error("\n" + pformat(d, indent=2, width=-1)) corrections.append(d) h.n_applied_corrections += 1 except Exception: if not self.skip_over_errors: raise else: import traceback logger.error("Bad handler %s " % h) logger.error(traceback.format_exc()) corrections.append( {"errors": ["Bad handler %s " % h], "actions": []}) self.total_errors += len(corrections) self.errors_current_job += len(corrections) self.run_log[-1]["corrections"].extend(corrections) # We do a dump of the run log after each check. dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) return len(corrections) > 0
python
{ "resource": "" }
q12234
FeffModder.apply_actions
train
def apply_actions(self, actions): """ Applies a list of actions to the FEFF Input Set and rewrites modified files. Args: actions [dict]: A list of actions of the form {'file': filename, 'action': moddermodification} or {'dict': feffinput_key, 'action': moddermodification} """ modified = [] for a in actions: if "dict" in a: k = a["dict"] modified.append(k) self.feffinp[k] = self.modify_object(a["action"], self.feffinp[k]) elif "file" in a: self.modify(a["action"], a["file"]) else: raise ValueError("Unrecognized format: {}".format(a)) if modified: feff = self.feffinp feff_input = "\n\n".join(str(feff[k]) for k in ["HEADER", "PARAMETERS", "POTENTIALS", "ATOMS"] if k in feff) for k, v in six.iteritems(feff): with open(os.path.join('.', k), "w") as f: f.write(str(v)) with open(os.path.join('.', "feff.inp"), "w") as f: f.write(feff_input)
python
{ "resource": "" }
q12235
FileActions.file_modify
train
def file_modify(filename, settings): """ Modifies file access Args: filename (str): Filename. settings (dict): Can be "mode" or "owners" """ for k, v in settings.items(): if k == "mode": os.chmod(filename,v) if k == "owners": os.chown(filename,v)
python
{ "resource": "" }
q12236
Modder.modify
train
def modify(self, modification, obj): """ Note that modify makes actual in-place modifications. It does not return a copy. Args: modification (dict): Modification must be {action_keyword : settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}} obj (dict/str/object): Object to modify depending on actions. For example, for DictActions, obj will be a dict to be modified. For FileActions, obj will be a string with a full pathname to a file. """ for action, settings in modification.items(): if action in self.supported_actions: self.supported_actions[action].__call__(obj, settings) elif self.strict: raise ValueError("{} is not a supported action!" .format(action))
python
{ "resource": "" }
q12237
backup
train
def backup(filenames, prefix="error"): """ Backup files to a tar.gz file. Used, for example, in backing up the files of an errored run before performing corrections. Args: filenames ([str]): List of files to backup. Supports wildcards, e.g., *.*. prefix (str): prefix to the files. Defaults to error, which means a series of error.1.tar.gz, error.2.tar.gz, ... will be generated. """ num = max([0] + [int(f.split(".")[1]) for f in glob("{}.*.tar.gz".format(prefix))]) filename = "{}.{}.tar.gz".format(prefix, num + 1) logging.info("Backing up run to {}.".format(filename)) with tarfile.open(filename, "w:gz") as tar: for fname in filenames: for f in glob(fname): tar.add(f)
python
{ "resource": "" }
q12238
get_execution_host_info
train
def get_execution_host_info(): """ Tries to return a tuple describing the execution host. Doesn't work for all queueing systems Returns: (HOSTNAME, CLUSTER_NAME) """ host = os.environ.get('HOSTNAME', None) cluster = os.environ.get('SGE_O_HOST', None) if host is None: try: import socket host = host or socket.gethostname() except: pass return host or 'unknown', cluster or 'unknown'
python
{ "resource": "" }
q12239
QCJob.run
train
def run(self): """ Perform the actual QChem run. Returns: (subprocess.Popen) Used for monitoring. """ qclog = open(self.qclog_file, 'w') p = subprocess.Popen(self.current_command, stdout=qclog) return p
python
{ "resource": "" }
q12240
VaspJob.setup
train
def setup(self): """ Performs initial setup for VaspJob, including overriding any settings and backing up. """ decompress_dir('.') if self.backup: for f in VASP_INPUT_FILES: shutil.copy(f, "{}.orig".format(f)) if self.auto_npar: try: incar = Incar.from_file("INCAR") # Only optimized NPAR for non-HF and non-RPA calculations. if not (incar.get("LHFCALC") or incar.get("LRPA") or incar.get("LEPSILON")): if incar.get("IBRION") in [5, 6, 7, 8]: # NPAR should not be set for Hessian matrix # calculations, whether in DFPT or otherwise. del incar["NPAR"] else: import multiprocessing # try sge environment variable first # (since multiprocessing counts cores on the current # machine only) ncores = os.environ.get('NSLOTS') or \ multiprocessing.cpu_count() ncores = int(ncores) for npar in range(int(math.sqrt(ncores)), ncores): if ncores % npar == 0: incar["NPAR"] = npar break incar.write_file("INCAR") except: pass if self.auto_continue: if os.path.exists("continue.json"): actions = loadfn("continue.json").get("actions") logger.info("Continuing previous VaspJob. Actions: {}".format(actions)) backup(VASP_BACKUP_FILES, prefix="prev_run") VaspModder().apply_actions(actions) else: # Default functionality is to copy CONTCAR to POSCAR and set # ISTART to 1 in the INCAR, but other actions can be specified if self.auto_continue is True: actions = [{"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}, {"dict": "INCAR", "action": {"_set": {"ISTART": 1}}}] else: actions = self.auto_continue dumpfn({"actions": actions}, "continue.json") if self.settings_override is not None: VaspModder().apply_actions(self.settings_override)
python
{ "resource": "" }
q12241
VaspJob.run
train
def run(self): """ Perform the actual VASP run. Returns: (subprocess.Popen) Used for monitoring. """ cmd = list(self.vasp_cmd) if self.auto_gamma: vi = VaspInput.from_directory(".") kpts = vi["KPOINTS"] if kpts.style == Kpoints.supported_modes.Gamma \ and tuple(kpts.kpts[0]) == (1, 1, 1): if self.gamma_vasp_cmd is not None and which( self.gamma_vasp_cmd[-1]): cmd = self.gamma_vasp_cmd elif which(cmd[-1] + ".gamma"): cmd[-1] += ".gamma" logger.info("Running {}".format(" ".join(cmd))) with open(self.output_file, 'w') as f_std, \ open(self.stderr_file, "w", buffering=1) as f_err: # use line buffering for stderr p = subprocess.Popen(cmd, stdout=f_std, stderr=f_err) return p
python
{ "resource": "" }
q12242
VaspJob.postprocess
train
def postprocess(self): """ Postprocessing includes renaming and gzipping where necessary. Also copies the magmom to the incar if necessary """ for f in VASP_OUTPUT_FILES + [self.output_file]: if os.path.exists(f): if self.final and self.suffix != "": shutil.move(f, "{}{}".format(f, self.suffix)) elif self.suffix != "": shutil.copy(f, "{}{}".format(f, self.suffix)) if self.copy_magmom and not self.final: try: outcar = Outcar("OUTCAR") magmom = [m['tot'] for m in outcar.magnetization] incar = Incar.from_file("INCAR") incar['MAGMOM'] = magmom incar.write_file("INCAR") except: logger.error('MAGMOM copy from OUTCAR to INCAR failed') # Remove continuation so if a subsequent job is run in # the same directory, will not restart this job. if os.path.exists("continue.json"): os.remove("continue.json")
python
{ "resource": "" }
q12243
VaspJob.double_relaxation_run
train
def double_relaxation_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05, half_kpts_first_relax=False, auto_continue=False): """ Returns a list of two jobs corresponding to an AFLOW style double relaxation run. Args: vasp_cmd (str): Command to run vasp as a list of args. For example, if you are using mpirun, it can be something like ["mpirun", "pvasp.5.2.11"] auto_npar (bool): Whether to automatically tune NPAR to be sqrt( number of cores) as recommended by VASP for DFT calculations. Generally, this results in significant speedups. Defaults to True. Set to False for HF, GW and RPA calculations. ediffg (float): Force convergence criteria for subsequent runs ( ignored for the initial run.) half_kpts_first_relax (bool): Whether to halve the kpoint grid for the first relaxation. Speeds up difficult convergence considerably. Defaults to False. Returns: List of two jobs corresponding to an AFLOW style run. """ incar_update = {"ISTART": 1} if ediffg: incar_update["EDIFFG"] = ediffg settings_overide_1 = None settings_overide_2 = [ {"dict": "INCAR", "action": {"_set": incar_update}}, {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}] if half_kpts_first_relax and os.path.exists("KPOINTS") and \ os.path.exists("POSCAR"): kpts = Kpoints.from_file("KPOINTS") orig_kpts_dict = kpts.as_dict() # lattice vectors with length < 8 will get >1 KPOINT kpts.kpts = np.round(np.maximum(np.array(kpts.kpts) / 2, 1)).astype(int).tolist() low_kpts_dict = kpts.as_dict() settings_overide_1 = [ {"dict": "KPOINTS", "action": {"_set": low_kpts_dict}} ] settings_overide_2.append( {"dict": "KPOINTS", "action": {"_set": orig_kpts_dict}} ) return [VaspJob(vasp_cmd, final=False, suffix=".relax1", auto_npar=auto_npar, auto_continue=auto_continue, settings_override=settings_overide_1), VaspJob(vasp_cmd, final=True, backup=False, suffix=".relax2", auto_npar=auto_npar, auto_continue=auto_continue, settings_override=settings_overide_2)]
python
{ "resource": "" }
q12244
VaspJob.metagga_opt_run
train
def metagga_opt_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05, half_kpts_first_relax=False, auto_continue=False): """ Returns a list of thres jobs to perform an optimization for any metaGGA functional. There is an initial calculation of the GGA wavefunction which is fed into the initial metaGGA optimization to precondition the electronic structure optimizer. The metaGGA optimization is performed using the double relaxation scheme """ incar = Incar.from_file("INCAR") # Defaults to using the SCAN metaGGA metaGGA = incar.get("METAGGA", "SCAN") # Pre optimze WAVECAR and structure using regular GGA pre_opt_setings = [{"dict": "INCAR", "action": {"_set": {"METAGGA": None, "LWAVE": True, "NSW": 0}}}] jobs = [VaspJob(vasp_cmd, auto_npar=auto_npar, final=False, suffix=".precondition", settings_override=pre_opt_setings)] # Finish with regular double relaxation style run using SCAN jobs.extend(VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar, ediffg=ediffg, half_kpts_first_relax=half_kpts_first_relax)) # Ensure the first relaxation doesn't overwrite the original inputs jobs[1].backup = False # Update double_relaxation job to start from pre-optimized run post_opt_settings = [{"dict": "INCAR", "action": {"_set": {"METAGGA": metaGGA, "ISTART": 1, "NSW": incar.get("NSW", 99), "LWAVE": incar.get("LWAVE", False)}}}, {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}] if jobs[1].settings_override: post_opt_settings = jobs[1].settings_override + post_opt_settings jobs[1].settings_override = post_opt_settings return jobs
python
{ "resource": "" }
q12245
VaspJob.full_opt_run
train
def full_opt_run(cls, vasp_cmd, vol_change_tol=0.02, max_steps=10, ediffg=-0.05, half_kpts_first_relax=False, **vasp_job_kwargs): """ Returns a generator of jobs for a full optimization run. Basically, this runs an infinite series of geometry optimization jobs until the % vol change in a particular optimization is less than vol_change_tol. Args: vasp_cmd (str): Command to run vasp as a list of args. For example, if you are using mpirun, it can be something like ["mpirun", "pvasp.5.2.11"] vol_change_tol (float): The tolerance at which to stop a run. Defaults to 0.05, i.e., 5%. max_steps (int): The maximum number of runs. Defaults to 10 ( highly unlikely that this limit is ever reached). ediffg (float): Force convergence criteria for subsequent runs ( ignored for the initial run.) half_kpts_first_relax (bool): Whether to halve the kpoint grid for the first relaxation. Speeds up difficult convergence considerably. Defaults to False. \*\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See :class:`custodian.vasp.jobs.VaspJob`. Returns: Generator of jobs. """ for i in range(max_steps): if i == 0: settings = None backup = True if half_kpts_first_relax and os.path.exists("KPOINTS") and \ os.path.exists("POSCAR"): kpts = Kpoints.from_file("KPOINTS") orig_kpts_dict = kpts.as_dict() kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1).tolist() low_kpts_dict = kpts.as_dict() settings = [ {"dict": "KPOINTS", "action": {"_set": low_kpts_dict}} ] else: backup = False initial = Poscar.from_file("POSCAR").structure final = Poscar.from_file("CONTCAR").structure vol_change = (final.volume - initial.volume) / initial.volume logger.info("Vol change = %.1f %%!" % (vol_change * 100)) if abs(vol_change) < vol_change_tol: logger.info("Stopping optimization!") break else: incar_update = {"ISTART": 1} if ediffg: incar_update["EDIFFG"] = ediffg settings = [ {"dict": "INCAR", "action": {"_set": incar_update}}, {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}] if i == 1 and half_kpts_first_relax: settings.append({"dict": "KPOINTS", "action": {"_set": orig_kpts_dict}}) logger.info("Generating job = %d!" % (i+1)) yield VaspJob(vasp_cmd, final=False, backup=backup, suffix=".relax%d" % (i+1), settings_override=settings, **vasp_job_kwargs)
python
{ "resource": "" }
q12246
VaspNEBJob.setup
train
def setup(self): """ Performs initial setup for VaspNEBJob, including overriding any settings and backing up. """ neb_dirs = self.neb_dirs if self.backup: # Back up KPOINTS, INCAR, POTCAR for f in VASP_NEB_INPUT_FILES: shutil.copy(f, "{}.orig".format(f)) # Back up POSCARs for path in neb_dirs: poscar = os.path.join(path, "POSCAR") shutil.copy(poscar, "{}.orig".format(poscar)) if self.half_kpts and os.path.exists("KPOINTS"): kpts = Kpoints.from_file("KPOINTS") kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1) kpts.kpts = kpts.kpts.astype(int).tolist() if tuple(kpts.kpts[0]) == (1, 1, 1): kpt_dic = kpts.as_dict() kpt_dic["generation_style"] = 'Gamma' kpts = Kpoints.from_dict(kpt_dic) kpts.write_file("KPOINTS") if self.auto_npar: try: incar = Incar.from_file("INCAR") import multiprocessing # Try sge environment variable first # (since multiprocessing counts cores on the current # machine only) ncores = os.environ.get('NSLOTS') or multiprocessing.cpu_count() ncores = int(ncores) for npar in range(int(math.sqrt(ncores)), ncores): if ncores % npar == 0: incar["NPAR"] = npar break incar.write_file("INCAR") except: pass if self.auto_continue and \ os.path.exists("STOPCAR") and \ not os.access("STOPCAR", os.W_OK): # Remove STOPCAR os.chmod("STOPCAR", 0o644) os.remove("STOPCAR") # Copy CONTCAR to POSCAR for path in self.neb_sub: contcar = os.path.join(path, "CONTCAR") poscar = os.path.join(path, "POSCAR") shutil.copy(contcar, poscar) if self.settings_override is not None: VaspModder().apply_actions(self.settings_override)
python
{ "resource": "" }
q12247
VaspNEBJob.postprocess
train
def postprocess(self): """ Postprocessing includes renaming and gzipping where necessary. """ # Add suffix to all sub_dir/{items} for path in self.neb_dirs: for f in VASP_NEB_OUTPUT_SUB_FILES: f = os.path.join(path, f) if os.path.exists(f): if self.final and self.suffix != "": shutil.move(f, "{}{}".format(f, self.suffix)) elif self.suffix != "": shutil.copy(f, "{}{}".format(f, self.suffix)) # Add suffix to all output files for f in VASP_NEB_OUTPUT_FILES + [self.output_file]: if os.path.exists(f): if self.final and self.suffix != "": shutil.move(f, "{}{}".format(f, self.suffix)) elif self.suffix != "": shutil.copy(f, "{}{}".format(f, self.suffix))
python
{ "resource": "" }
q12248
NwchemJob.setup
train
def setup(self): """ Performs backup if necessary. """ if self.backup: shutil.copy(self.input_file, "{}.orig".format(self.input_file))
python
{ "resource": "" }
q12249
NwchemJob.run
train
def run(self): """ Performs actual nwchem run. """ with zopen(self.output_file, 'w') as fout: return subprocess.Popen(self.nwchem_cmd + [self.input_file], stdout=fout)
python
{ "resource": "" }
q12250
valid_GC
train
def valid_GC(x): """type function for argparse to check GC values. Check if the supplied value for minGC and maxGC is a valid input, being between 0 and 1 """ x = float(x) if x < 0.0 or x > 1.0: raise ArgumentTypeError("{} not in range [0.0, 1.0]".format(x)) return x
python
{ "resource": "" }
q12251
filter_stream
train
def filter_stream(fq, args): """Filter a fastq file on stdin. Print fastq record to stdout if it passes - quality filter (optional) - length filter (optional) - min/maxGC filter (optional) Optionally trim a number of nucleotides from beginning and end. Record has to be longer than args.length (default 1) after trimming Use a faster silent quality_check if no filtering on quality is required """ if args.quality: quality_check = ave_qual else: quality_check = silent_quality_check minlen = args.length + int(args.headcrop or 0) - (int(args.tailcrop or 0)) for rec in SeqIO.parse(fq, "fastq"): if args.GC_filter: gc = (rec.seq.upper().count("C") + rec.seq.upper().count("G")) / len(rec) else: gc = 0.50 # dummy variable if quality_check(rec.letter_annotations["phred_quality"]) > args.quality \ and minlen <= len(rec) <= args.maxlength \ and args.minGC <= gc <= args.maxGC: print(rec[args.headcrop:args.tailcrop].format("fastq"), end="")
python
{ "resource": "" }
q12252
filter_using_summary
train
def filter_using_summary(fq, args): """Use quality scores from albacore summary file for filtering Use the summary file from albacore for more accurate quality estimate Get the dataframe from nanoget, convert to dictionary """ data = {entry[0]: entry[1] for entry in process_summary( summaryfile=args.summary, threads="NA", readtype=args.readtype, barcoded=False)[ ["readIDs", "quals"]].itertuples(index=False)} try: for record in SeqIO.parse(fq, "fastq"): if data[record.id] > args.quality \ and args.length <= len(record) <= args.maxlength: print(record[args.headcrop:args.tailcrop].format("fastq"), end="") except KeyError: logging.error("mismatch between summary and fastq: \ {} was not found in the summary file.".format(record.id)) sys.exit('\nERROR: mismatch between sequencing_summary and fastq file: \ {} was not found in the summary file.\nQuitting.'.format(record.id))
python
{ "resource": "" }
q12253
master_key_required
train
def master_key_required(func): '''decorator describing methods that require the master key''' def ret(obj, *args, **kw): conn = ACCESS_KEYS if not (conn and conn.get('master_key')): message = '%s requires the master key' % func.__name__ raise core.ParseError(message) func(obj, *args, **kw) return ret
python
{ "resource": "" }
q12254
ParseBase.execute
train
def execute(cls, uri, http_verb, extra_headers=None, batch=False, _body=None, **kw): """ if batch == False, execute a command with the given parameters and return the response JSON. If batch == True, return the dictionary that would be used in a batch command. """ if batch: urlsplitter = urlparse(API_ROOT).netloc ret = {"method": http_verb, "path": uri.split(urlsplitter, 1)[1]} if kw: ret["body"] = kw return ret if not ('app_id' in ACCESS_KEYS and 'rest_key' in ACCESS_KEYS): raise core.ParseError('Missing connection credentials') app_id = ACCESS_KEYS.get('app_id') rest_key = ACCESS_KEYS.get('rest_key') master_key = ACCESS_KEYS.get('master_key') url = uri if uri.startswith(API_ROOT) else cls.ENDPOINT_ROOT + uri if _body is None: data = kw and json.dumps(kw, default=date_handler) or "{}" else: data = _body if http_verb == 'GET' and data: url += '?%s' % urlencode(kw) data = None else: if cls.__name__ == 'File': data = data else: data = data.encode('utf-8') headers = { 'Content-type': 'application/json', 'X-Parse-Application-Id': app_id, 'X-Parse-REST-API-Key': rest_key } headers.update(extra_headers or {}) if cls.__name__ == 'File': request = Request(url.encode('utf-8'), data, headers) else: request = Request(url, data, headers) if ACCESS_KEYS.get('session_token'): request.add_header('X-Parse-Session-Token', ACCESS_KEYS.get('session_token')) elif master_key: request.add_header('X-Parse-Master-Key', master_key) request.get_method = lambda: http_verb try: response = urlopen(request, timeout=CONNECTION_TIMEOUT) except HTTPError as e: exc = { 400: core.ResourceRequestBadRequest, 401: core.ResourceRequestLoginRequired, 403: core.ResourceRequestForbidden, 404: core.ResourceRequestNotFound }.get(e.code, core.ParseError) raise exc(e.read()) return json.loads(response.read().decode('utf-8'))
python
{ "resource": "" }
q12255
ParseBatcher.batch
train
def batch(self, methods): """ Given a list of create, update or delete methods to call, call all of them in a single batch operation. """ methods = list(methods) # methods can be iterator if not methods: #accepts also empty list (or generator) - it allows call batch directly with query result (eventually empty) return queries, callbacks = list(zip(*[m(batch=True) for m in methods])) # perform all the operations in one batch responses = self.execute("", "POST", requests=queries) # perform the callbacks with the response data (updating the existing # objets, etc) batched_errors = [] for callback, response in zip(callbacks, responses): if "success" in response: callback(response["success"]) else: batched_errors.append(response["error"]) if batched_errors: raise core.ParseBatchError(batched_errors)
python
{ "resource": "" }
q12256
Installation.update_channels
train
def update_channels(cls, installation_id, channels_to_add=set(), channels_to_remove=set(), **kw): """ Allow an application to manually subscribe or unsubscribe an installation to a certain push channel in a unified operation. this is based on: https://www.parse.com/docs/rest#installations-updating installation_id: the installation id you'd like to add a channel to channels_to_add: the name of the channel you'd like to subscribe the user to channels_to_remove: the name of the channel you'd like to unsubscribe the user from """ installation_url = cls._get_installation_url(installation_id) current_config = cls.GET(installation_url) new_channels = list(set(current_config['channels']).union(channels_to_add).difference(channels_to_remove)) cls.PUT(installation_url, channels=new_channels)
python
{ "resource": "" }
q12257
Queryset._fetch
train
def _fetch(self, count=False): if self._result_cache is not None: return len(self._result_cache) if count else self._result_cache """ Return a list of objects matching query, or if count == True return only the number of objects matching. """ options = dict(self._options) # make a local copy if self._where: # JSON encode WHERE values options['where'] = json.dumps(self._where) if self._select_related: options['include'] = ','.join(self._select_related) if count: return self._manager._count(**options) self._result_cache = self._manager._fetch(**options) return self._result_cache
python
{ "resource": "" }
q12258
complex_type
train
def complex_type(name=None): '''Decorator for registering complex types''' def wrapped(cls): ParseType.type_mapping[name or cls.__name__] = cls return cls return wrapped
python
{ "resource": "" }
q12259
Object.schema
train
def schema(cls): """Retrieves the class' schema.""" root = '/'.join([API_ROOT, 'schemas', cls.__name__]) schema = cls.GET(root) return schema
python
{ "resource": "" }
q12260
Object.schema_delete_field
train
def schema_delete_field(cls, key): """Deletes a field.""" root = '/'.join([API_ROOT, 'schemas', cls.__name__]) payload = { 'className': cls.__name__, 'fields': { key: { '__op': 'Delete' } } } cls.PUT(root, **payload)
python
{ "resource": "" }
q12261
login_required
train
def login_required(func): '''decorator describing User methods that need to be logged in''' def ret(obj, *args, **kw): if not hasattr(obj, 'sessionToken'): message = '%s requires a logged-in session' % func.__name__ raise ResourceRequestLoginRequired(message) return func(obj, *args, **kw) return ret
python
{ "resource": "" }
q12262
parse
train
def parse(d): """Convert iso formatted timestamps found as values in the dict d to datetime objects. :return: A shallow copy of d with converted timestamps. """ res = {} for k, v in iteritems(d): if isinstance(v, string_types) and DATETIME_ISO_FORMAT.match(v): v = dateutil.parser.parse(v) res[k] = v return res
python
{ "resource": "" }
q12263
dump
train
def dump(obj, path, **kw): """Python 2 + 3 compatible version of json.dump. :param obj: The object to be dumped. :param path: The path of the JSON file to be written. :param kw: Keyword parameters are passed to json.dump """ open_kw = {'mode': 'w'} if PY3: # pragma: no cover open_kw['encoding'] = 'utf-8' # avoid indented lines ending with ", " on PY2 if kw.get('indent') and kw.get('separators') is None: kw['separators'] = (',', ': ') with open(str(path), **open_kw) as fp: return json.dump(obj, fp, **kw)
python
{ "resource": "" }
q12264
strip_brackets
train
def strip_brackets(text, brackets=None): """Strip brackets and what is inside brackets from text. .. note:: If the text contains only one opening bracket, the rest of the text will be ignored. This is a feature, not a bug, as we want to avoid that this function raises errors too easily. """ res = [] for c, type_ in _tokens(text, brackets=brackets): if type_ == TextType.text: res.append(c) return ''.join(res).strip()
python
{ "resource": "" }
q12265
split_text_with_context
train
def split_text_with_context(text, separators=WHITESPACE, brackets=None): """Splits text at separators outside of brackets. :param text: :param separators: An iterable of single character tokens. :param brackets: :return: A `list` of non-empty chunks. .. note:: This function leaves content in brackets in the chunks. """ res, chunk = [], [] for c, type_ in _tokens(text, brackets=brackets): if type_ == TextType.text and c in separators: res.append(''.join(chunk).strip()) chunk = [] else: chunk.append(c) res.append(''.join(chunk).strip()) return nfilter(res)
python
{ "resource": "" }
q12266
split_text
train
def split_text(text, separators=re.compile('\s'), brackets=None, strip=False): """Split text along the separators unless they appear within brackets. :param separators: An iterable single characters or a compiled regex pattern. :param brackets: `dict` mapping start tokens to end tokens of what is to be \ recognized as brackets. .. note:: This function will also strip content within brackets. """ if not isinstance(separators, PATTERN_TYPE): separators = re.compile( '[{0}]'.format(''.join('\{0}'.format(c) for c in separators))) return nfilter( s.strip() if strip else s for s in separators.split(strip_brackets(text, brackets=brackets)))
python
{ "resource": "" }
q12267
Entry.get
train
def get(self, key, default=None): """Retrieve the first value for a marker or None.""" for k, v in self: if k == key: return v return default
python
{ "resource": "" }
q12268
SFM.read
train
def read(self, filename, encoding='utf-8', marker_map=None, entry_impl=Entry, entry_sep='\n\n', entry_prefix=None, keep_empty=False): """Extend the list by parsing new entries from a file. :param filename: :param encoding: :param marker_map: A dict used to map marker names. :param entry_impl: Subclass of Entry or None :param entry_sep: :param entry_prefix: """ marker_map = marker_map or {} for entry in parse( filename, encoding, entry_sep, entry_prefix or entry_sep, keep_empty=keep_empty): if entry: self.append(entry_impl([(marker_map.get(k, k), v) for k, v in entry]))
python
{ "resource": "" }
q12269
SFM.write
train
def write(self, filename, encoding='utf-8'): """Write the list of entries to a file. :param filename: :param encoding: :return: """ with io.open(str(filename), 'w', encoding=encoding) as fp: for entry in self: fp.write(entry.__unicode__()) fp.write('\n\n')
python
{ "resource": "" }
q12270
data_url
train
def data_url(content, mimetype=None): """ Returns content encoded as base64 Data URI. :param content: bytes or str or Path :param mimetype: mimetype for :return: str object (consisting only of ASCII, though) .. seealso:: https://en.wikipedia.org/wiki/Data_URI_scheme """ if isinstance(content, pathlib.Path): if not mimetype: mimetype = guess_type(content.name)[0] with content.open('rb') as fp: content = fp.read() else: if isinstance(content, text_type): content = content.encode('utf8') return "data:{0};base64,{1}".format( mimetype or 'application/octet-stream', b64encode(content).decode())
python
{ "resource": "" }
q12271
to_binary
train
def to_binary(s, encoding='utf8'): """Portable cast function. In python 2 the ``str`` function which is used to coerce objects to bytes does not accept an encoding argument, whereas python 3's ``bytes`` function requires one. :param s: object to be converted to binary_type :return: binary_type instance, representing s. """ if PY3: # pragma: no cover return s if isinstance(s, binary_type) else binary_type(s, encoding=encoding) return binary_type(s)
python
{ "resource": "" }
q12272
dict_merged
train
def dict_merged(d, _filter=None, **kw): """Update dictionary d with the items passed as kw if the value passes _filter.""" def f(s): if _filter: return _filter(s) return s is not None d = d or {} for k, v in iteritems(kw): if f(v): d[k] = v return d
python
{ "resource": "" }
q12273
xmlchars
train
def xmlchars(text): """Not all of UTF-8 is considered valid character data in XML ... Thus, this function can be used to remove illegal characters from ``text``. """ invalid = list(range(0x9)) invalid.extend([0xb, 0xc]) invalid.extend(range(0xe, 0x20)) return re.sub('|'.join('\\x%0.2X' % i for i in invalid), '', text)
python
{ "resource": "" }
q12274
slug
train
def slug(s, remove_whitespace=True, lowercase=True): """Condensed version of s, containing only lowercase alphanumeric characters.""" res = ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') if lowercase: res = res.lower() for c in string.punctuation: res = res.replace(c, '') res = re.sub('\s+', '' if remove_whitespace else ' ', res) res = res.encode('ascii', 'ignore').decode('ascii') assert re.match('[ A-Za-z0-9]*$', res) return res
python
{ "resource": "" }
q12275
encoded
train
def encoded(string, encoding='utf-8'): """Cast string to binary_type. :param string: six.binary_type or six.text_type :param encoding: encoding which the object is forced to :return: six.binary_type """ assert isinstance(string, string_types) or isinstance(string, binary_type) if isinstance(string, text_type): return string.encode(encoding) try: # make sure the string can be decoded in the specified encoding ... string.decode(encoding) return string except UnicodeDecodeError: # ... if not use latin1 as best guess to decode the string before encoding as # specified. return string.decode('latin1').encode(encoding)
python
{ "resource": "" }
q12276
readlines
train
def readlines(p, encoding=None, strip=False, comment=None, normalize=None, linenumbers=False): """ Read a `list` of lines from a text file. :param p: File path (or `list` or `tuple` of text) :param encoding: Registered codec. :param strip: If `True`, strip leading and trailing whitespace. :param comment: String used as syntax to mark comment lines. When not `None`, \ commented lines will be stripped. This implies `strip=True`. :param normalize: 'NFC', 'NFKC', 'NFD', 'NFKD' :param linenumbers: return also line numbers. :return: `list` of text lines or pairs (`int`, text or `None`). """ if comment: strip = True if isinstance(p, (list, tuple)): res = [l.decode(encoding) if encoding else l for l in p] else: with Path(p).open(encoding=encoding or 'utf-8') as fp: res = fp.readlines() if strip: res = [l.strip() or None for l in res] if comment: res = [None if l and l.startswith(comment) else l for l in res] if normalize: res = [unicodedata.normalize(normalize, l) if l else l for l in res] if linenumbers: return [(n, l) for n, l in enumerate(res, 1)] return [l for l in res if l is not None]
python
{ "resource": "" }
q12277
walk
train
def walk(p, mode='all', **kw): """Wrapper for `os.walk`, yielding `Path` objects. :param p: root of the directory tree to walk. :param mode: 'all|dirs|files', defaulting to 'all'. :param kw: Keyword arguments are passed to `os.walk`. :return: Generator for the requested Path objects. """ for dirpath, dirnames, filenames in os.walk(as_posix(p), **kw): if mode in ('all', 'dirs'): for dirname in dirnames: yield Path(dirpath).joinpath(dirname) if mode in ('all', 'files'): for fname in filenames: yield Path(dirpath).joinpath(fname)
python
{ "resource": "" }
q12278
Source.bibtex
train
def bibtex(self): """Represent the source in BibTeX format. :return: string encoding the source in BibTeX syntax. """ m = max(itertools.chain(map(len, self), [0])) fields = (" %s = {%s}" % (k.ljust(m), self[k]) for k in self) return "@%s{%s,\n%s\n}" % ( getattr(self.genre, 'value', self.genre), self.id, ",\n".join(fields))
python
{ "resource": "" }
q12279
Client.request
train
def request(self, path, method='GET', params=None, type=REST_TYPE): """Builds a request, gets a response and decodes it.""" response_text = self._get_http_client(type).request(path, method, params) if not response_text: return response_text response_json = json.loads(response_text) if 'errors' in response_json: raise (ErrorException([Error().load(e) for e in response_json['errors']])) return response_json
python
{ "resource": "" }
q12280
Client.message_create
train
def message_create(self, originator, recipients, body, params=None): """Create a new message.""" if params is None: params = {} if type(recipients) == list: recipients = ','.join(recipients) params.update({'originator': originator, 'body': body, 'recipients': recipients}) return Message().load(self.request('messages', 'POST', params))
python
{ "resource": "" }
q12281
Client.voice_message_create
train
def voice_message_create(self, recipients, body, params=None): """Create a new voice message.""" if params is None: params = {} if type(recipients) == list: recipients = ','.join(recipients) params.update({'recipients': recipients, 'body': body}) return VoiceMessage().load(self.request('voicemessages', 'POST', params))
python
{ "resource": "" }
q12282
Client.lookup
train
def lookup(self, phonenumber, params=None): """Do a new lookup.""" if params is None: params = {} return Lookup().load(self.request('lookup/' + str(phonenumber), 'GET', params))
python
{ "resource": "" }
q12283
Client.lookup_hlr
train
def lookup_hlr(self, phonenumber, params=None): """Retrieve the information of a specific HLR lookup.""" if params is None: params = {} return HLR().load(self.request('lookup/' + str(phonenumber) + '/hlr', 'GET', params))
python
{ "resource": "" }
q12284
Client.verify_create
train
def verify_create(self, recipient, params=None): """Create a new verification.""" if params is None: params = {} params.update({'recipient': recipient}) return Verify().load(self.request('verify', 'POST', params))
python
{ "resource": "" }
q12285
Client.verify_verify
train
def verify_verify(self, id, token): """Verify the token of a specific verification.""" return Verify().load(self.request('verify/' + str(id), params={'token': token}))
python
{ "resource": "" }
q12286
BaseList.items
train
def items(self, value): """Create typed objects from the dicts.""" items = [] for item in value: items.append(self.itemType().load(item)) self._items = items
python
{ "resource": "" }
q12287
HttpClient.request
train
def request(self, path, method='GET', params=None): """Builds a request and gets a response.""" if params is None: params = {} url = urljoin(self.endpoint, path) headers = { 'Accept': 'application/json', 'Authorization': 'AccessKey ' + self.access_key, 'User-Agent': self.user_agent, 'Content-Type': 'application/json' } if method == 'DELETE': response = requests.delete(url, verify=True, headers=headers, data=json.dumps(params)) elif method == 'GET': response = requests.get(url, verify=True, headers=headers, params=params) elif method == 'PATCH': response = requests.patch(url, verify=True, headers=headers, data=json.dumps(params)) elif method == 'POST': response = requests.post(url, verify=True, headers=headers, data=json.dumps(params)) elif method == 'PUT': response = requests.put(url, verify=True, headers=headers, data=json.dumps(params)) else: raise ValueError(str(method) + ' is not a supported HTTP method') if response.status_code in self.__supported_status_codes: response_text = response.text else: response.raise_for_status() return response_text
python
{ "resource": "" }
q12288
cython_debug_files
train
def cython_debug_files(): """ Cython extra debug information files """ # Search all subdirectories of sys.path directories for a # "cython_debug" directory. Note that sys_path is a variable set by # cysignals-CSI. It may differ from sys.path if GDB is run with a # different Python interpreter. files = [] for path in sys_path: # noqa pattern = os.path.join(path, '*', 'cython_debug', 'cython_debug_info_*') files.extend(glob.glob(pattern)) return files
python
{ "resource": "" }
q12289
ColorizedPhoXiSensor._colorize
train
def _colorize(self, depth_im, color_im): """Colorize a depth image from the PhoXi using a color image from the webcam. Parameters ---------- depth_im : DepthImage The PhoXi depth image. color_im : ColorImage Corresponding color image. Returns ------- ColorImage A colorized image corresponding to the PhoXi depth image. """ # Project the point cloud into the webcam's frame target_shape = (depth_im.data.shape[0], depth_im.data.shape[1], 3) pc_depth = self._phoxi.ir_intrinsics.deproject(depth_im) pc_color = self._T_webcam_world.inverse().dot(self._T_phoxi_world).apply(pc_depth) # Sort the points by their distance from the webcam's apeture pc_data = pc_color.data.T dists = np.linalg.norm(pc_data, axis=1) order = np.argsort(dists) pc_data = pc_data[order] pc_color = PointCloud(pc_data.T, frame=self._webcam.color_intrinsics.frame) sorted_dists = dists[order] sorted_depths = depth_im.data.flatten()[order] # Generate image coordinates for each sorted point icds = self._webcam.color_intrinsics.project(pc_color).data.T # Create mask for points that are masked by others rounded_icds = np.array(icds / 3.0, dtype=np.uint32) unique_icds, unique_inds, unique_inv = np.unique(rounded_icds, axis=0, return_index=True, return_inverse=True) icd_depths = sorted_dists[unique_inds] min_depths_pp = icd_depths[unique_inv] depth_delta_mask = np.abs(min_depths_pp - sorted_dists) < 5e-3 # Create mask for points with missing depth or that lie outside the image valid_mask = np.logical_and(np.logical_and(icds[:,0] >= 0, icds[:,0] < self._webcam.color_intrinsics.width), np.logical_and(icds[:,1] >= 0, icds[:,1] < self._webcam.color_intrinsics.height)) valid_mask = np.logical_and(valid_mask, sorted_depths != 0.0) valid_mask = np.logical_and(valid_mask, depth_delta_mask) valid_icds = icds[valid_mask] colors = color_im.data[valid_icds[:,1],valid_icds[:,0],:] color_im_data = np.zeros((target_shape[0] * target_shape[1], target_shape[2]), dtype=np.uint8) color_im_data[valid_mask] = colors color_im_data[order] = color_im_data.copy() color_im_data = color_im_data.reshape(target_shape) return ColorImage(color_im_data, frame=self._frame)
python
{ "resource": "" }
q12290
RgbdSensorFactory.sensor
train
def sensor(sensor_type, cfg): """ Creates a camera sensor of the specified type. Parameters ---------- sensor_type : :obj:`str` the type of the sensor (real or virtual) cfg : :obj:`YamlConfig` dictionary of parameters for sensor initialization """ sensor_type = sensor_type.lower() if sensor_type == 'kinect2': s = Kinect2Sensor(packet_pipeline_mode=cfg['pipeline_mode'], device_num=cfg['device_num'], frame=cfg['frame']) elif sensor_type == 'bridged_kinect2': s = KinectSensorBridged(quality=cfg['quality'], frame=cfg['frame']) elif sensor_type == 'primesense': flip_images = True if 'flip_images' in cfg.keys(): flip_images = cfg['flip_images'] s = PrimesenseSensor(auto_white_balance=cfg['auto_white_balance'], flip_images=flip_images, frame=cfg['frame']) elif sensor_type == 'virtual': s = VirtualSensor(cfg['image_dir'], frame=cfg['frame']) elif sensor_type == 'tensor_dataset': s = TensorDatasetVirtualSensor(cfg['dataset_dir'], frame=cfg['frame']) elif sensor_type == 'primesense_ros': s = PrimesenseSensor_ROS(frame=cfg['frame']) elif sensor_type == 'ensenso': s = EnsensoSensor(frame=cfg['frame']) elif sensor_type == 'phoxi': s = PhoXiSensor(frame=cfg['frame'], device_name=cfg['device_name'], size=cfg['size']) elif sensor_type == 'webcam': s = WebcamSensor(frame=cfg['frame'], device_id=cfg['device_id']) elif sensor_type == 'colorized_phoxi': s = ColorizedPhoXiSensor(frame=cfg['frame'], phoxi_config=cfg['phoxi_config'], webcam_config=cfg['webcam_config'], calib_dir=cfg['calib_dir']) elif sensor_type == 'realsense': s = RealSenseSensor( cam_id=cfg['cam_id'], filter_depth=cfg['filter_depth'], frame=cfg['frame'], ) else: raise ValueError('RGBD sensor type %s not supported' %(sensor_type)) return s
python
{ "resource": "" }
q12291
FeatureMatcher.get_point_index
train
def get_point_index(point, all_points, eps = 1e-4): """ Get the index of a point in an array """ inds = np.where(np.linalg.norm(point - all_points, axis=1) < eps) if inds[0].shape[0] == 0: return -1 return inds[0][0]
python
{ "resource": "" }
q12292
RawDistanceFeatureMatcher.match
train
def match(self, source_obj_features, target_obj_features): """ Matches features between two graspable objects based on a full distance matrix. Parameters ---------- source_obj_features : :obj:`BagOfFeatures` bag of the source objects features target_obj_features : :obj:`BagOfFeatures` bag of the target objects features Returns ------- corrs : :obj:`Correspondences` the correspondences between source and target """ if not isinstance(source_obj_features, f.BagOfFeatures): raise ValueError('Must supply source bag of object features') if not isinstance(target_obj_features, f.BagOfFeatures): raise ValueError('Must supply target bag of object features') # source feature descriptors and keypoints source_descriptors = source_obj_features.descriptors target_descriptors = target_obj_features.descriptors source_keypoints = source_obj_features.keypoints target_keypoints = target_obj_features.keypoints #calculate distance between this model's descriptors and each of the other_model's descriptors dists = spatial.distance.cdist(source_descriptors, target_descriptors) #calculate the indices of the target_model that minimize the distance to the descriptors in this model source_closest_descriptors = dists.argmin(axis=1) target_closest_descriptors = dists.argmin(axis=0) match_indices = [] source_matched_points = np.zeros((0,3)) target_matched_points = np.zeros((0,3)) #calculate which points/indices the closest descriptors correspond to for i, j in enumerate(source_closest_descriptors): # for now, only keep correspondences that are a 2-way match if target_closest_descriptors[j] == i: match_indices.append(j) source_matched_points = np.r_[source_matched_points, source_keypoints[i:i+1, :]] target_matched_points = np.r_[target_matched_points, target_keypoints[j:j+1, :]] else: match_indices.append(-1) return Correspondences(match_indices, source_matched_points, target_matched_points)
python
{ "resource": "" }
q12293
PointToPlaneFeatureMatcher.match
train
def match(self, source_points, target_points, source_normals, target_normals): """ Matches points between two point-normal sets. Uses the closest ip to choose matches, with distance for thresholding only. Parameters ---------- source_point_cloud : Nx3 :obj:`numpy.ndarray` source object points target_point_cloud : Nx3 :obj:`numpy.ndarray` target object points source_normal_cloud : Nx3 :obj:`numpy.ndarray` source object outward-pointing normals target_normal_cloud : Nx3 :obj`numpy.ndarray` target object outward-pointing normals Returns ------- :obj`Correspondences` the correspondences between source and target """ # compute the distances and inner products between the point sets dists = ssd.cdist(source_points, target_points, 'euclidean') ip = source_normals.dot(target_normals.T) # abs because we don't have correct orientations source_ip = source_points.dot(target_normals.T) target_ip = target_points.dot(target_normals.T) target_ip = np.diag(target_ip) target_ip = np.tile(target_ip, [source_points.shape[0], 1]) abs_diff = np.abs(source_ip - target_ip) # difference in inner products # mark invalid correspondences invalid_dists = np.where(dists > self.dist_thresh_) abs_diff[invalid_dists[0], invalid_dists[1]] = np.inf invalid_norms = np.where(ip < self.norm_thresh_) abs_diff[invalid_norms[0], invalid_norms[1]] = np.inf # choose the closest matches match_indices = np.argmin(abs_diff, axis=1) match_vals = np.min(abs_diff, axis=1) invalid_matches = np.where(match_vals == np.inf) match_indices[invalid_matches[0]] = -1 return NormalCorrespondences(match_indices, source_points, target_points, source_normals, target_normals)
python
{ "resource": "" }
q12294
RealSenseSensor._config_pipe
train
def _config_pipe(self): """Configures the pipeline to stream color and depth. """ self._cfg.enable_device(self.id) # configure the color stream self._cfg.enable_stream( rs.stream.color, RealSenseSensor.COLOR_IM_WIDTH, RealSenseSensor.COLOR_IM_HEIGHT, rs.format.bgr8, RealSenseSensor.FPS ) # configure the depth stream self._cfg.enable_stream( rs.stream.depth, RealSenseSensor.DEPTH_IM_WIDTH, 360 if self._depth_align else RealSenseSensor.DEPTH_IM_HEIGHT, rs.format.z16, RealSenseSensor.FPS )
python
{ "resource": "" }
q12295
RealSenseSensor._set_depth_scale
train
def _set_depth_scale(self): """Retrieve the scale of the depth sensor. """ sensor = self._profile.get_device().first_depth_sensor() self._depth_scale = sensor.get_depth_scale()
python
{ "resource": "" }
q12296
RealSenseSensor._set_intrinsics
train
def _set_intrinsics(self): """Read the intrinsics matrix from the stream. """ strm = self._profile.get_stream(rs.stream.color) obj = strm.as_video_stream_profile().get_intrinsics() self._intrinsics[0, 0] = obj.fx self._intrinsics[1, 1] = obj.fy self._intrinsics[0, 2] = obj.ppx self._intrinsics[1, 2] = obj.ppy
python
{ "resource": "" }
q12297
RealSenseSensor._read_color_and_depth_image
train
def _read_color_and_depth_image(self): """Read a color and depth image from the device. """ frames = self._pipe.wait_for_frames() if self._depth_align: frames = self._align.process(frames) depth_frame = frames.get_depth_frame() color_frame = frames.get_color_frame() if not depth_frame or not color_frame: logging.warning('Could not retrieve frames.') return None, None if self._filter_depth: depth_frame = self._filter_depth_frame(depth_frame) # convert to numpy arrays depth_image = self._to_numpy(depth_frame, np.float32) color_image = self._to_numpy(color_frame, np.uint8) # convert depth to meters depth_image *= self._depth_scale # bgr to rgb color_image = color_image[..., ::-1] depth = DepthImage(depth_image, frame=self._frame) color = ColorImage(color_image, frame=self._frame) return color, depth
python
{ "resource": "" }
q12298
EnsensoSensor._set_format
train
def _set_format(self, msg): """ Set the buffer formatting. """ num_points = msg.height * msg.width self._format = '<' + num_points * 'ffff'
python
{ "resource": "" }
q12299
EnsensoSensor._set_camera_properties
train
def _set_camera_properties(self, msg): """ Set the camera intrinsics from an info msg. """ focal_x = msg.K[0] focal_y = msg.K[4] center_x = msg.K[2] center_y = msg.K[5] im_height = msg.height im_width = msg.width self._camera_intr = CameraIntrinsics(self._frame, focal_x, focal_y, center_x, center_y, height=im_height, width=im_width)
python
{ "resource": "" }