code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
n = 0 for i, c in enumerate(namespace): n += (_LEX_DISTANCE[MAX_NAMESPACE_LENGTH - i- 1] * NAMESPACE_CHARACTERS.index(c) + 1) return n
def _namespace_to_ord(namespace)
Converts a namespace string into an int representing its lexographic order. >>> _namespace_to_ord('') '' >>> _namespace_to_ord('_') 1 >>> _namespace_to_ord('__') 2 Args: namespace: A namespace string. Returns: An int representing the lexographical order of the given namespace string.
10.477764
11.295026
0.927644
if namespace: return db.Key.from_path(metadata.Namespace.KIND_NAME, namespace, _app=app) else: return db.Key.from_path(metadata.Namespace.KIND_NAME, metadata.Namespace.EMPTY_NAMESPACE_ID, _app=app)
def _key_for_namespace(namespace, app)
Return the __namespace__ key for a namespace. Args: namespace: The namespace whose key is requested. app: The id of the application that the key belongs to. Returns: A db.Key representing the namespace.
3.45805
3.55854
0.971761
ns_query = datastore.Query('__namespace__', keys_only=True, _app=app) return list(ns_query.Run(limit=limit, batch_size=limit))
def get_namespace_keys(app, limit)
Get namespace keys.
6.36247
5.295509
1.201484
if self.is_single_namespace: return [self] mid_point = (_namespace_to_ord(self.namespace_start) + _namespace_to_ord(self.namespace_end)) // 2 return [NamespaceRange(self.namespace_start, _ord_to_namespace(mid_point), _app=self.app), NamespaceRange(_ord_to_namespace(mid_point+1), self.namespace_end, _app=self.app)]
def split_range(self)
Splits the NamespaceRange into two nearly equal-sized ranges. Returns: If this NamespaceRange contains a single namespace then a list containing this NamespaceRange is returned. Otherwise a two-element list containing two NamespaceRanges whose total range is identical to this NamespaceRange's is returned.
3.74369
2.95814
1.265556
namespace_start = _ord_to_namespace(_namespace_to_ord(after_namespace) + 1) return NamespaceRange(namespace_start, self.namespace_end, _app=self.app)
def with_start_after(self, after_namespace)
Returns a copy of this NamespaceName with a new namespace_start. Args: after_namespace: A namespace string. Returns: A NamespaceRange object whose namespace_start is the lexographically next namespace after the given namespace string. Raises: ValueError: if the NamespaceRange includes only a single namespace.
9.140614
8.637564
1.05824
filters = {} filters['__key__ >= '] = _key_for_namespace( self.namespace_start, self.app) filters['__key__ <= '] = _key_for_namespace( self.namespace_end, self.app) return datastore.Query('__namespace__', filters=filters, keys_only=True, cursor=cursor, _app=self.app)
def make_datastore_query(self, cursor=None)
Returns a datastore.Query that generates all namespaces in the range. Args: cursor: start cursor for the query. Returns: A datastore.Query instance that generates db.Keys for each namespace in the NamespaceRange.
3.90983
3.958642
0.98767
namespaces_after_key = list(self.make_datastore_query().Run(limit=1)) if not namespaces_after_key: return None namespace_after_key = namespaces_after_key[0].name() or '' return NamespaceRange(namespace_after_key, self.namespace_end, _app=self.app)
def normalized_start(self)
Returns a NamespaceRange with leading non-existant namespaces removed. Returns: A copy of this NamespaceRange whose namespace_start is adjusted to exclude the portion of the range that contains no actual namespaces in the datastore. None is returned if the NamespaceRange contains no actual namespaces in the datastore.
8.856381
5.700984
1.553483
obj_dict = dict(namespace_start=self.namespace_start, namespace_end=self.namespace_end) if self.app is not None: obj_dict['app'] = self.app return obj_dict
def to_json_object(self)
Returns a dict representation that can be serialized to JSON.
3.975513
3.504384
1.13444
# pylint: disable=g-doc-args if n < 1: raise ValueError('n must be >= 1') ranges = None if can_query(): if not contiguous: ns_keys = get_namespace_keys(_app, n + 1) if not ns_keys: return [] else: if len(ns_keys) <= n: # If you have less actual namespaces than number of NamespaceRanges # to return, then just return the list of those namespaces. ns_range = [] for ns_key in ns_keys: ns_range.append(NamespaceRange(ns_key.name() or '', ns_key.name() or '', _app=_app)) return sorted(ns_range, key=lambda ns_range: ns_range.namespace_start) # Use the first key and save the initial normalized_start() call. ranges = [NamespaceRange(ns_keys[0].name() or '', _app=_app)] else: ns_range = NamespaceRange(_app=_app).normalized_start() if ns_range is None: return [NamespaceRange(_app=_app)] ranges = [ns_range] else: ranges = [NamespaceRange(_app=_app)] singles = [] while ranges and (len(ranges) + len(singles)) < n: namespace_range = ranges.pop(0) if namespace_range.is_single_namespace: singles.append(namespace_range) else: left, right = namespace_range.split_range() if can_query(): right = right.normalized_start() if right is not None: ranges.append(right) ranges.append(left) ns_ranges = sorted(singles + ranges, key=lambda ns_range: ns_range.namespace_start) if contiguous: if not ns_ranges: # This condition is possible if every namespace was deleted after the # first call to ns_range.normalized_start(). return [NamespaceRange(_app=_app)] continuous_ns_ranges = [] for i in range(len(ns_ranges)): if i == 0: namespace_start = MIN_NAMESPACE else: namespace_start = ns_ranges[i].namespace_start if i == len(ns_ranges) - 1: namespace_end = MAX_NAMESPACE else: namespace_end = _ord_to_namespace( _namespace_to_ord(ns_ranges[i+1].namespace_start) - 1) continuous_ns_ranges.append(NamespaceRange(namespace_start, namespace_end, _app=_app)) return continuous_ns_ranges else: return ns_ranges
def split(cls, n, contiguous, can_query=itertools.chain(itertools.repeat(True, 50), itertools.repeat(False)).next, _app=None)
Splits the complete NamespaceRange into n equally-sized NamespaceRanges. Args: n: The maximum number of NamespaceRanges to return. Fewer than n namespaces may be returned. contiguous: If True then the returned NamespaceRanges will cover the entire space of possible namespaces (i.e. from MIN_NAMESPACE to MAX_NAMESPACE) without gaps. If False then the returned NamespaceRanges may exclude namespaces that don't appear in the datastore. can_query: A function that returns True if split() can query the datastore to generate more fair namespace range splits, and False otherwise. If not set then split() is allowed to make 50 datastore queries. Returns: A list of at most n NamespaceRanges representing a near-equal distribution of actual existant datastore namespaces. The returned list will be sorted lexographically. Raises: ValueError: if n is < 1.
2.921406
2.733735
1.06865
data_length = len(data) if self._size + data_length > self._flush_size: self.flush() if not self._exclusive and data_length > _FILE_POOL_MAX_SIZE: raise errors.Error( "Too big input %s (%s)." % (data_length, _FILE_POOL_MAX_SIZE)) else: self._buffer.append(data) self._size += data_length if self._size > self._flush_size: self.flush()
def append(self, data)
Append data to a file.
4.002002
3.939983
1.015741
# Write data to in-memory buffer first. buf = cStringIO.StringIO() with records.RecordsWriter(buf) as w: for record in self._buffer: w.write(record) w._pad_block() str_buf = buf.getvalue() buf.close() if not self._exclusive and len(str_buf) > _FILE_POOL_MAX_SIZE: # Shouldn't really happen because of flush size. raise errors.Error( "Buffer too big. Can't write more than %s bytes in one request: " "risk of writes interleaving. Got: %s" % (_FILE_POOL_MAX_SIZE, len(str_buf))) # Write data to file. start_time = time.time() self._write(str_buf) if self._ctx: operation.counters.Increment( COUNTER_IO_WRITE_BYTES, len(str_buf))(self._ctx) operation.counters.Increment( COUNTER_IO_WRITE_MSEC, int((time.time() - start_time) * 1000))(self._ctx) # reset buffer self._buffer = [] self._size = 0 gc.collect()
def flush(self)
Flush pool contents.
4.317591
4.227587
1.02129
self._filehandle.write(str_buf) self._buf_size += len(str_buf)
def _write(self, str_buf)
Uses the filehandle to the file in GCS to write to it.
4.484193
3.166713
1.416041
super(GCSRecordsPool, self).flush() if force: extra_padding = self._buf_size % self._GCS_BLOCK_SIZE if extra_padding > 0: self._write("\x00" * (self._GCS_BLOCK_SIZE - extra_padding)) self._filehandle.flush()
def flush(self, force=False)
Flush pool contents. Args: force: Inserts additional padding to achieve the minimum block size required for GCS.
5.639147
4.16708
1.353261
if cls.TMP_BUCKET_NAME_PARAM in writer_spec: return writer_spec[cls.TMP_BUCKET_NAME_PARAM] return cls._get_gcs_bucket(writer_spec)
def _get_tmp_gcs_bucket(cls, writer_spec)
Returns bucket used for writing tmp files.
2.870249
2.376379
1.207825
# pick tmp id iff tmp bucket is set explicitly if cls.TMP_BUCKET_NAME_PARAM in writer_spec: return writer_spec.get(cls._TMP_ACCOUNT_ID_PARAM, None) return cls._get_account_id(writer_spec)
def _get_tmp_account_id(cls, writer_spec)
Returns the account id to use with tmp bucket.
6.730409
5.66443
1.188188
naming_format = cls._TMP_FILE_NAMING_FORMAT if seg_index is None: naming_format = writer_spec.get(cls.NAMING_FORMAT_PARAM, cls._DEFAULT_NAMING_FORMAT) template = string.Template(naming_format) try: # Check that template doesn't use undefined mappings and is formatted well if seg_index is None: return template.substitute(name=name, id=job_id, num=num) else: return template.substitute(name=name, id=job_id, num=num, attempt=attempt, seg=seg_index) except ValueError, error: raise errors.BadWriterParamsError("Naming template is bad, %s" % (error)) except KeyError, error: raise errors.BadWriterParamsError("Naming template '%s' has extra " "mappings, %s" % (naming_format, error))
def _generate_filename(cls, writer_spec, name, job_id, num, attempt=None, seg_index=None)
Generates a filename for a particular output. Args: writer_spec: specification dictionary for the output writer. name: name of the job. job_id: the ID number assigned to the job. num: shard number. attempt: the shard attempt number. seg_index: index of the seg. None means the final output. Returns: a string containing the filename. Raises: BadWriterParamsError: if the template contains any errors such as invalid syntax or contains unknown substitution placeholders.
3.656901
3.568588
1.024747
writer_spec = cls.get_params(mapper_spec, allow_old=False) # Bucket Name is required if cls.BUCKET_NAME_PARAM not in writer_spec: raise errors.BadWriterParamsError( "%s is required for Google Cloud Storage" % cls.BUCKET_NAME_PARAM) try: cloudstorage.validate_bucket_name( writer_spec[cls.BUCKET_NAME_PARAM]) except ValueError, error: raise errors.BadWriterParamsError("Bad bucket name, %s" % (error)) # Validate the naming format does not throw any errors using dummy values cls._generate_filename(writer_spec, "name", "id", 0) cls._generate_filename(writer_spec, "name", "id", 0, 1, 0)
def validate(cls, mapper_spec)
Validate mapper specification. Args: mapper_spec: an instance of model.MapperSpec. Raises: BadWriterParamsError: if the specification is invalid for any reason such as missing the bucket name or providing an invalid bucket name.
4.613594
4.326405
1.066381
if use_tmp_bucket: bucket = cls._get_tmp_gcs_bucket(writer_spec) account_id = cls._get_tmp_account_id(writer_spec) else: bucket = cls._get_gcs_bucket(writer_spec) account_id = cls._get_account_id(writer_spec) # GoogleCloudStorage format for filenames, Initial slash is required filename = "/%s/%s" % (bucket, filename_suffix) content_type = writer_spec.get(cls.CONTENT_TYPE_PARAM, None) options = {} if cls.ACL_PARAM in writer_spec: options["x-goog-acl"] = writer_spec.get(cls.ACL_PARAM) return cloudstorage.open(filename, mode="w", content_type=content_type, options=options, _account_id=account_id)
def _open_file(cls, writer_spec, filename_suffix, use_tmp_bucket=False)
Opens a new gcs file for writing.
2.731224
2.611433
1.045872
start_time = time.time() self._get_write_buffer().write(data) ctx = context.get() operation.counters.Increment(COUNTER_IO_WRITE_BYTES, len(data))(ctx) operation.counters.Increment( COUNTER_IO_WRITE_MSEC, int((time.time() - start_time) * 1000))(ctx)
def write(self, data)
Write data to the GoogleCloudStorage file. Args: data: string containing the data to be written.
4.858087
5.140531
0.945055
writer_spec = cls.get_params(mapper_spec, allow_old=False) if writer_spec.get(cls._NO_DUPLICATE, False) not in (True, False): raise errors.BadWriterParamsError("No duplicate must a boolean.") super(_GoogleCloudStorageOutputWriter, cls).validate(mapper_spec)
def validate(cls, mapper_spec)
Inherit docs.
7.572923
7.623572
0.993356
writer_spec = cls.get_params(mr_spec.mapper, allow_old=False) seg_index = None if writer_spec.get(cls._NO_DUPLICATE, False): seg_index = 0 # Determine parameters key = cls._generate_filename(writer_spec, mr_spec.name, mr_spec.mapreduce_id, shard_number, shard_attempt, seg_index) return cls._create(writer_spec, key)
def create(cls, mr_spec, shard_number, shard_attempt, _writer_state=None)
Inherit docs.
6.818772
6.742283
1.011345
writer = cls._open_file(writer_spec, filename_suffix) return cls(writer, writer_spec=writer_spec)
def _create(cls, writer_spec, filename_suffix)
Helper method that actually creates the file in cloud storage.
4.395921
3.979457
1.104653
writer_spec = cls.get_params(mr_spec.mapper, allow_old=False) # Determine parameters key = cls._generate_filename(writer_spec, mr_spec.name, mr_spec.mapreduce_id, shard_number, shard_attempt) status = _ConsistentStatus() status.writer_spec = writer_spec status.mainfile = cls._open_file(writer_spec, key) status.mapreduce_id = mr_spec.mapreduce_id status.shard = shard_number return cls(status)
def create(cls, mr_spec, shard_number, shard_attempt, _writer_state=None)
Inherit docs.
6.04606
5.991301
1.00914
if mainfile.closed: # can happen when finalize fails return account_id = self._get_tmp_account_id(writer_spec) f = cloudstorage_api.open(tmpfile, _account_id=account_id) # both reads and writes are buffered - the number here doesn't matter data = f.read(self._REWRITE_BLOCK_SIZE) while data: mainfile.write(data) data = f.read(self._REWRITE_BLOCK_SIZE) f.close() mainfile.flush()
def _rewrite_tmpfile(self, mainfile, tmpfile, writer_spec)
Copies contents of tmpfile (name) to mainfile (buffer).
5.485805
5.246861
1.04554
# We can't put the tmpfile in the same directory as the output. There are # rare circumstances when we leave trash behind and we don't want this trash # to be loaded into bigquery and/or used for restore. # # We used mapreduce id, shard number and attempt and 128 random bits to make # collisions virtually impossible. tmpl = string.Template(cls._TMPFILE_PATTERN) filename = tmpl.substitute( id=status.mapreduce_id, shard=status.shard, random=random.getrandbits(cls._RAND_BITS)) return cls._open_file(status.writer_spec, filename, use_tmp_bucket=True)
def _create_tmpfile(cls, status)
Creates a new random-named tmpfile.
11.19859
10.434837
1.073193
# Try to remove garbage (if any). Note that listbucket is not strongly # consistent so something might survive. tmpl = string.Template(self._TMPFILE_PREFIX) prefix = tmpl.substitute( id=self.status.mapreduce_id, shard=self.status.shard) bucket = self._get_tmp_gcs_bucket(writer_spec) account_id = self._get_tmp_account_id(writer_spec) for f in cloudstorage.listbucket("/%s/%s" % (bucket, prefix), _account_id=account_id): if f.filename not in exclude_list: self._remove_tmpfile(f.filename, self.status.writer_spec)
def _try_to_clean_garbage(self, writer_spec, exclude_list=())
Tries to remove any files created by this shard that aren't needed. Args: writer_spec: writer_spec for the MR. exclude_list: A list of filenames (strings) that should not be removed.
6.799249
6.30928
1.077658
return cls(state[cls.COUNT], state[cls.STRING_LENGTH])
def from_json(cls, state)
Inherit docs.
18.345787
15.695101
1.168886
params = job_config.input_reader_params count = params[cls.COUNT] string_length = params.get(cls.STRING_LENGTH, cls._DEFAULT_STRING_LENGTH) shard_count = job_config.shard_count count_per_shard = count // shard_count mr_input_readers = [ cls(count_per_shard, string_length) for _ in range(shard_count)] left = count - count_per_shard*shard_count if left > 0: mr_input_readers.append(cls(left, string_length)) return mr_input_readers
def split_input(cls, job_config)
Inherit docs.
3.013722
2.907602
1.036497
super(SampleInputReader, cls).validate(job_config) params = job_config.input_reader_params # Validate count. if cls.COUNT not in params: raise errors.BadReaderParamsError("Must specify %s" % cls.COUNT) if not isinstance(params[cls.COUNT], int): raise errors.BadReaderParamsError("%s should be an int but is %s" % (cls.COUNT, type(params[cls.COUNT]))) if params[cls.COUNT] <= 0: raise errors.BadReaderParamsError("%s should be a positive int") # Validate string length. if cls.STRING_LENGTH in params and not ( isinstance(params[cls.STRING_LENGTH], int) and params[cls.STRING_LENGTH] > 0): raise errors.BadReaderParamsError("%s should be a positive int " "but is %s" % (cls.STRING_LENGTH, params[cls.STRING_LENGTH]))
def validate(cls, job_config)
Inherit docs.
2.102468
2.035703
1.032797
weights = [1] for i in range(1, max_length): weights.append(weights[i-1] * len(_ALPHABET) + 1) weights.reverse() return weights
def _get_weights(max_length)
Get weights for each offset in str of certain max length. Args: max_length: max length of the strings. Returns: A list of ints as weights. Example: If max_length is 2 and alphabet is "ab", then we have order "", "a", "aa", "ab", "b", "ba", "bb". So the weight for the first char is 3.
3.336026
3.400343
0.981085
ordinal = 0 for i, c in enumerate(content): ordinal += weights[i] * _ALPHABET.index(c) + 1 return ordinal
def _str_to_ord(content, weights)
Converts a string to its lexicographical order. Args: content: the string to convert. Of type str. weights: weights from _get_weights. Returns: an int or long that represents the order of this string. "" has order 0.
3.611295
4.848166
0.744879
chars = [] for weight in weights: if ordinal == 0: return "".join(chars) ordinal -= 1 index, ordinal = divmod(ordinal, weight) chars.append(_ALPHABET[index]) return "".join(chars)
def _ord_to_str(ordinal, weights)
Reverse function of _str_to_ord.
3.199074
3.003321
1.065179
if not filters: return None, None, None range_property = None start_val = None end_val = None start_filter = None end_filter = None for f in filters: prop, op, val = f if op in [">", ">=", "<", "<="]: if range_property and range_property != prop: raise errors.BadReaderParamsError( "Range on only one property is supported.") range_property = prop if val is None: raise errors.BadReaderParamsError( "Range can't be None in filter %s", f) if op in [">", ">="]: if start_val is not None: raise errors.BadReaderParamsError( "Operation %s is specified more than once.", op) start_val = val start_filter = f else: if end_val is not None: raise errors.BadReaderParamsError( "Operation %s is specified more than once.", op) end_val = val end_filter = f elif op != "=": raise errors.BadReaderParamsError( "Only < <= > >= = are supported as operation. Got %s", op) if not range_property: return None, None, None if start_val is None or end_val is None: raise errors.BadReaderParamsError( "Filter should contains a complete range on property %s", range_property) if issubclass(model_class, db.Model): property_obj = model_class.properties()[range_property] else: property_obj = ( model_class._properties[ # pylint: disable=protected-access range_property]) supported_properties = ( _DISCRETE_PROPERTY_SPLIT_FUNCTIONS.keys() + _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS.keys()) if not isinstance(property_obj, tuple(supported_properties)): raise errors.BadReaderParamsError( "Filtered property %s is not supported by sharding.", range_property) if not start_val < end_val: raise errors.BadReaderParamsError( "Start value %s should be smaller than end value %s", start_val, end_val) return property_obj, start_filter, end_filter
def _get_range_from_filters(cls, filters, model_class)
Get property range from filters user provided. This method also validates there is one and only one closed range on a single property. Args: filters: user supplied filters. Each filter should be a list or tuple of format (<property_name_as_str>, <query_operator_as_str>, <value_of_certain_type>). Value type should satisfy the property's type. model_class: the model class for the entity type to apply filters on. Returns: a tuple of (property, start_filter, end_filter). property is the model's field that the range is about. start_filter and end_filter define the start and the end of the range. (None, None, None) if no range is found. Raises: BadReaderParamsError: if any filter is invalid in any way.
2.482923
2.313109
1.073414
new_range_filters = [] name = self.start[0] prop_cls = self.prop.__class__ if prop_cls in _DISCRETE_PROPERTY_SPLIT_FUNCTIONS: splitpoints = _DISCRETE_PROPERTY_SPLIT_FUNCTIONS[prop_cls]( self.start[2], self.end[2], n, self.start[1] == ">=", self.end[1] == "<=") start_filter = (name, ">=", splitpoints[0]) for p in splitpoints[1:]: end_filter = (name, "<", p) new_range_filters.append([start_filter, end_filter]) start_filter = (name, ">=", p) else: splitpoints = _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS[prop_cls]( self.start[2], self.end[2], n) start_filter = self.start for p in splitpoints: end_filter = (name, "<", p) new_range_filters.append([start_filter, end_filter]) start_filter = (name, ">=", p) new_range_filters.append([start_filter, self.end]) for f in new_range_filters: f.extend(self._equality_filters) return [self.__class__(f, self.model_class_path) for f in new_range_filters]
def split(self, n)
Evenly split this range into contiguous, non overlapping subranges. Args: n: number of splits. Returns: a list of contiguous, non overlapping sub PropertyRanges. Maybe less than n when not enough subranges.
2.449888
2.399543
1.020981
if issubclass(self.model_class, db.Model): query = db.Query(self.model_class, namespace=ns) for f in self.filters: query.filter("%s %s" % (f[0], f[1]), f[2]) else: query = self.model_class.query(namespace=ns) for f in self.filters: query = query.filter(ndb.FilterNode(*f)) return query
def make_query(self, ns)
Make a query of entities within this range. Query options are not supported. They should be specified when the query is run. Args: ns: namespace of this query. Returns: a db.Query or ndb.Query, depends on the model class's type.
2.699275
2.533662
1.065365
if job_config.output_writer_cls != cls: raise errors.BadWriterParamsError( "Expect output writer class %r, got %r." % (cls, job_config.output_writer_cls))
def validate(cls, job_config)
Validates relevant parameters. This method can validate fields which it deems relevant. Args: job_config: an instance of map_job.JobConfig. Raises: errors.BadWriterParamsError: required parameters are missing or invalid.
4.899034
3.895283
1.257684
# We accept an iterator just in case output references get too big. outs = tuple(iterator) shard_ctx._state.writer_state["outs"] = outs
def commit_output(cls, shard_ctx, iterator)
Saves output references when a shard finishes. Inside end_shard(), an output writer can optionally use this method to persist some references to the outputs from this shard (e.g a list of filenames) Args: shard_ctx: map_job_context.ShardContext for this shard. iterator: an iterator that yields json serializable references to the outputs from this shard. Contents from the iterator can be accessible later via map_job.Job.get_outputs.
19.684826
19.599567
1.00435
pipeline_handlers_map = [] if pipeline: pipeline_handlers_map = pipeline.create_handlers_map(prefix=".*/pipeline") return pipeline_handlers_map + [ # Task queue handlers. # Always suffix by mapreduce_id or shard_id for log analysis purposes. # mapreduce_id or shard_id also presents in headers or payload. (r".*/worker_callback.*", handlers.MapperWorkerCallbackHandler), (r".*/controller_callback.*", handlers.ControllerCallbackHandler), (r".*/kickoffjob_callback.*", handlers.KickOffJobHandler), (r".*/finalizejob_callback.*", handlers.FinalizeJobHandler), # RPC requests with JSON responses # All JSON handlers should have /command/ prefix. (r".*/command/start_job", handlers.StartJobHandler), (r".*/command/cleanup_job", handlers.CleanUpJobHandler), (r".*/command/abort_job", handlers.AbortJobHandler), (r".*/command/list_configs", status.ListConfigsHandler), (r".*/command/list_jobs", status.ListJobsHandler), (r".*/command/get_job_detail", status.GetJobDetailHandler), # UI static files (STATIC_RE, status.ResourceHandler), # Redirect non-file URLs that do not end in status/detail to status page. (r".*", RedirectHandler), ]
def create_handlers_map()
Create new handlers map. Returns: list of (regexp, handler) pairs for WSGIApplication constructor.
5.901572
5.775101
1.021899
if json["name"] in _KEYRANGES_CLASSES: return _KEYRANGES_CLASSES[json["name"]].from_json(json) raise ValueError("Invalid json %s", json)
def from_json(cls, json)
Deserialize from json. Args: json: a dict of json compatible fields. Returns: a KeyRanges object. Raises: ValueError: if the json is invalid.
5.9568
4.677443
1.273516
s = re.sub(r"\s+", " ", s) s = re.sub(r"[\\.\\?\\!]", "\n", s) return s.split("\n")
def split_into_sentences(s)
Split text into list of sentences.
3.562944
3.519218
1.012425
s = re.sub(r"\W+", " ", s) s = re.sub(r"[_0-9]+", " ", s) return s.split()
def split_into_words(s)
Split a sentence into list of words.
2.849825
2.807964
1.014908
(entry, text_fn) = data text = text_fn() logging.debug("Got %s", entry.filename) for s in split_into_sentences(text): for w in split_into_words(s.lower()): yield (w, entry.filename)
def index_map(data)
Index demo map function.
6.345213
6.76652
0.937737
(entry, text_fn) = data text = text_fn() filename = entry.filename logging.debug("Got %s", filename) for s in split_into_sentences(text): words = split_into_words(s.lower()) if len(words) < PHRASE_LENGTH: yield (":".join(words), filename) continue for i in range(0, len(words) - PHRASE_LENGTH): yield (":".join(words[i:i+PHRASE_LENGTH]), filename)
def phrases_map(data)
Phrases demo map function.
3.644968
3.688737
0.988134
if len(values) < 10: return counts = {} for filename in values: counts[filename] = counts.get(filename, 0) + 1 words = re.sub(r":", " ", key) threshold = len(values) / 2 for filename, count in counts.items(): if count > threshold: yield "%s:%s\n" % (words, filename)
def phrases_reduce(key, values)
Phrases demo reduce function.
3.323651
3.19943
1.038826
sep = FileMetadata.__SEP return str(username + sep + str(date) + sep + blob_key)
def getKeyName(username, date, blob_key)
Returns the internal key for a particular item in the database. Our items are stored with keys of the form 'user/date/blob_key' ('/' is not the real separator, but __SEP is). Args: username: The given user's e-mail address. date: A datetime object representing the date and time that an input file was uploaded to this app. blob_key: The blob key corresponding to the location of the input file in the Blobstore. Returns: The internal key for the item specified by (username, date, blob_key).
11.265136
8.202984
1.373297
dec = MontyDecoder() def load_class(dotpath): modname, classname = dotpath.rsplit(".", 1) mod = __import__(modname, globals(), locals(), [classname], 0) return getattr(mod, classname) def process_params(d): decoded = {} for k, v in d.items(): if k.startswith("$"): if isinstance(v, list): v = [os.path.expandvars(i) for i in v] elif isinstance(v, dict): v = {k2: os.path.expandvars(v2) for k2, v2 in v.items()} else: v = os.path.expandvars(v) decoded[k.strip("$")] = dec.process_decoded(v) return decoded jobs = [] common_params = process_params(spec.get("jobs_common_params", {})) for d in spec["jobs"]: cls_ = load_class(d["jb"]) params = process_params(d.get("params", {})) params.update(common_params) jobs.append(cls_(**params)) handlers = [] for d in spec.get("handlers", []): cls_ = load_class(d["hdlr"]) params = process_params(d.get("params", {})) handlers.append(cls_(**params)) validators = [] for d in spec.get("validators", []): cls_ = load_class(d["vldr"]) params = process_params(d.get("params", {})) validators.append(cls_(**params)) custodian_params = process_params(spec.get("custodian_params", {})) return cls(jobs=jobs, handlers=handlers, validators=validators, **custodian_params)
def from_spec(cls, spec)
Load a Custodian instance where the jobs are specified from a structure and a spec dict. This allows simple custom job sequences to be constructed quickly via a YAML file. Args: spec (dict): A dict specifying job. A sample of the dict in YAML format for the usual MP workflow is given as follows ``` jobs: - jb: custodian.vasp.jobs.VaspJob params: final: False suffix: .relax1 - jb: custodian.vasp.jobs.VaspJob params: final: True suffix: .relax2 settings_override: {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}} jobs_common_params: vasp_cmd: /opt/vasp handlers: - hdlr: custodian.vasp.handlers.VaspErrorHandler - hdlr: custodian.vasp.handlers.AliasingErrorHandler - hdlr: custodian.vasp.handlers.MeshSymmetryHandler validators: - vldr: custodian.vasp.validators.VasprunXMLValidator custodian_params: scratch_dir: /tmp ``` The `jobs` key is a list of jobs. Each job is specified via "job": <explicit path>, and all parameters are specified via `params` which is a dict. `common_params` specify a common set of parameters that are passed to all jobs, e.g., vasp_cmd. Returns: Custodian instance.
2.094849
1.711521
1.22397
cwd = os.getcwd() with ScratchDir(self.scratch_dir, create_symbolic_link=True, copy_to_current_on_exit=True, copy_from_current_on_enter=True) as temp_dir: self.total_errors = 0 start = datetime.datetime.now() logger.info("Run started at {} in {}.".format( start, temp_dir)) v = sys.version.replace("\n", " ") logger.info("Custodian running on Python version {}".format(v)) logger.info("Hostname: {}, Cluster: {}".format( *get_execution_host_info())) try: # skip jobs until the restart for job_n, job in islice(enumerate(self.jobs, 1), self.restart, None): self._run_job(job_n, job) # We do a dump of the run log after each job. dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) # Checkpoint after each job so that we can recover from last # point and remove old checkpoints if self.checkpoint: self.restart = job_n Custodian._save_checkpoint(cwd, job_n) except CustodianError as ex: logger.error(ex.message) if ex.raises: raise finally: # Log the corrections to a json file. logger.info("Logging to {}...".format(Custodian.LOG_FILE)) dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) end = datetime.datetime.now() logger.info("Run ended at {}.".format(end)) run_time = end - start logger.info("Run completed. Total time taken = {}." .format(run_time)) if self.gzipped_output: gzip_dir(".") # Cleanup checkpoint files (if any) if run is successful. Custodian._delete_checkpoints(cwd) return self.run_log
def run(self)
Runs all jobs. Returns: All errors encountered as a list of list. [[error_dicts for job 1], [error_dicts for job 2], ....] Raises: ValidationError: if a job fails validation ReturnCodeError: if the process has a return code different from 0 NonRecoverableError: if an unrecoverable occurs MaxCorrectionsPerJobError: if max_errors_per_job is reached MaxCorrectionsError: if max_errors is reached MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
4.456719
4.455315
1.000315
corrections = [] for h in handlers: try: if h.check(): if h.max_num_corrections is not None \ and h.n_applied_corrections >= h.max_num_corrections: msg = "Maximum number of corrections {} reached " \ "for handler {}".format(h.max_num_corrections, h) if h.raise_on_max: self.run_log[-1]["handler"] = h self.run_log[-1]["max_errors_per_handler"] = True raise MaxCorrectionsPerHandlerError(msg, True, h.max_num_corrections, h) else: logger.warning(msg+" Correction not applied.") continue if terminate_func is not None and h.is_terminating: logger.info("Terminating job") terminate_func() # make sure we don't terminate twice terminate_func = None d = h.correct() d["handler"] = h logger.error("\n" + pformat(d, indent=2, width=-1)) corrections.append(d) h.n_applied_corrections += 1 except Exception: if not self.skip_over_errors: raise else: import traceback logger.error("Bad handler %s " % h) logger.error(traceback.format_exc()) corrections.append( {"errors": ["Bad handler %s " % h], "actions": []}) self.total_errors += len(corrections) self.errors_current_job += len(corrections) self.run_log[-1]["corrections"].extend(corrections) # We do a dump of the run log after each check. dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) return len(corrections) > 0
def _do_check(self, handlers, terminate_func=None)
checks the specified handlers. Returns True iff errors caught
3.751977
3.762247
0.99727
modified = [] for a in actions: if "dict" in a: k = a["dict"] modified.append(k) self.feffinp[k] = self.modify_object(a["action"], self.feffinp[k]) elif "file" in a: self.modify(a["action"], a["file"]) else: raise ValueError("Unrecognized format: {}".format(a)) if modified: feff = self.feffinp feff_input = "\n\n".join(str(feff[k]) for k in ["HEADER", "PARAMETERS", "POTENTIALS", "ATOMS"] if k in feff) for k, v in six.iteritems(feff): with open(os.path.join('.', k), "w") as f: f.write(str(v)) with open(os.path.join('.', "feff.inp"), "w") as f: f.write(feff_input)
def apply_actions(self, actions)
Applies a list of actions to the FEFF Input Set and rewrites modified files. Args: actions [dict]: A list of actions of the form {'file': filename, 'action': moddermodification} or {'dict': feffinput_key, 'action': moddermodification}
3.407947
2.882182
1.182419
if len(settings) != 1: raise ValueError("Settings must only contain one item with key " "'content'.") for k, v in settings.items(): if k == "content": with open(filename, 'w') as f: f.write(v)
def file_create(filename, settings)
Creates a file. Args: filename (str): Filename. settings (dict): Must be {"content": actual_content}
3.472253
3.114638
1.114818
if len(settings) != 1: raise ValueError("Settings must only contain one item with key " "'dest'.") for k, v in settings.items(): if k == "dest": shutil.move(filename, v)
def file_move(filename, settings)
Moves a file. {'_file_move': {'dest': 'new_file_name'}} Args: filename (str): Filename. settings (dict): Must be {"dest": path of new file}
4.334567
3.925163
1.104302
if len(settings) != 1: raise ValueError("Settings must only contain one item with key " "'mode'.") for k, v in settings.items(): if k == "mode" and v == "actual": try: os.remove(filename) except OSError: #Skip file not found error. pass elif k == "mode" and v == "simulated": print("Simulated removal of {}".format(filename))
def file_delete(filename, settings)
Deletes a file. {'_file_delete': {'mode': "actual"}} Args: filename (str): Filename. settings (dict): Must be {"mode": actual/simulated}. Simulated mode only prints the action without performing it.
4.109392
3.091292
1.329344
for k, v in settings.items(): if k.startswith("dest"): shutil.copyfile(filename, v)
def file_copy(filename, settings)
Copies a file. {'_file_copy': {'dest': 'new_file_name'}} Args: filename (str): Filename. settings (dict): Must be {"dest": path of new file}
4.928189
5.312162
0.927718
for k, v in settings.items(): if k == "mode": os.chmod(filename,v) if k == "owners": os.chown(filename,v)
def file_modify(filename, settings)
Modifies file access Args: filename (str): Filename. settings (dict): Can be "mode" or "owners"
3.54555
2.757357
1.285851
decompress_dir('.') if self.backup: for f in FEFF_INPUT_FILES: shutil.copy(f, "{}.orig".format(f)) for f in FEFF_BACKUP_FILES: if os.path.isfile(f): shutil.copy(f, "{}.orig".format(f))
def setup(self)
Performs initial setup for FeffJob, do backing up. Returns:
5.291708
4.737677
1.116941
with open(self.output_file, "w") as f_std, \ open(self.stderr_file, "w", buffering=1) as f_err: # Use line buffering for stderr # On TSCC, need to run shell command p = subprocess.Popen(self.feff_cmd, stdout=f_std, stderr=f_err, shell=True) return p
def run(self)
Performs the actual FEFF run Returns: (subprocess.Popen) Used for monitoring.
5.885653
4.72782
1.244898
for action, settings in modification.items(): if action in self.supported_actions: self.supported_actions[action].__call__(obj, settings) elif self.strict: raise ValueError("{} is not a supported action!" .format(action))
def modify(self, modification, obj)
Note that modify makes actual in-place modifications. It does not return a copy. Args: modification (dict): Modification must be {action_keyword : settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}} obj (dict/str/object): Object to modify depending on actions. For example, for DictActions, obj will be a dict to be modified. For FileActions, obj will be a string with a full pathname to a file.
4.234822
3.980025
1.064019
d = obj.as_dict() self.modify(modification, d) return obj.from_dict(d)
def modify_object(self, modification, obj)
Modify an object that supports pymatgen's as_dict() and from_dict API. Args: modification (dict): Modification must be {action_keyword : settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}} obj (object): Object to modify
4.770017
4.69803
1.015323
modified = [] for a in actions: if "dict" in a: k = a["dict"] modified.append(k) self.vi[k] = self.modify_object(a["action"], self.vi[k]) elif "file" in a: self.modify(a["action"], a["file"]) else: raise ValueError("Unrecognized format: {}".format(a)) for f in modified: self.vi[f].write_file(f)
def apply_actions(self, actions)
Applies a list of actions to the Vasp Input Set and rewrites modified files. Args: actions [dict]: A list of actions of the form {'file': filename, 'action': moddermodification} or {'dict': vaspinput_key, 'action': moddermodification}
4.229114
2.868114
1.474528
num = max([0] + [int(f.split(".")[1]) for f in glob("{}.*.tar.gz".format(prefix))]) filename = "{}.{}.tar.gz".format(prefix, num + 1) logging.info("Backing up run to {}.".format(filename)) with tarfile.open(filename, "w:gz") as tar: for fname in filenames: for f in glob(fname): tar.add(f)
def backup(filenames, prefix="error")
Backup files to a tar.gz file. Used, for example, in backing up the files of an errored run before performing corrections. Args: filenames ([str]): List of files to backup. Supports wildcards, e.g., *.*. prefix (str): prefix to the files. Defaults to error, which means a series of error.1.tar.gz, error.2.tar.gz, ... will be generated.
2.871451
2.96482
0.968508
host = os.environ.get('HOSTNAME', None) cluster = os.environ.get('SGE_O_HOST', None) if host is None: try: import socket host = host or socket.gethostname() except: pass return host or 'unknown', cluster or 'unknown'
def get_execution_host_info()
Tries to return a tuple describing the execution host. Doesn't work for all queueing systems Returns: (HOSTNAME, CLUSTER_NAME)
3.597482
3.248044
1.107584
qclog = open(self.qclog_file, 'w') p = subprocess.Popen(self.current_command, stdout=qclog) return p
def run(self)
Perform the actual QChem run. Returns: (subprocess.Popen) Used for monitoring.
6.27874
4.571953
1.373317
decompress_dir('.') if self.backup: for f in VASP_INPUT_FILES: shutil.copy(f, "{}.orig".format(f)) if self.auto_npar: try: incar = Incar.from_file("INCAR") # Only optimized NPAR for non-HF and non-RPA calculations. if not (incar.get("LHFCALC") or incar.get("LRPA") or incar.get("LEPSILON")): if incar.get("IBRION") in [5, 6, 7, 8]: # NPAR should not be set for Hessian matrix # calculations, whether in DFPT or otherwise. del incar["NPAR"] else: import multiprocessing # try sge environment variable first # (since multiprocessing counts cores on the current # machine only) ncores = os.environ.get('NSLOTS') or \ multiprocessing.cpu_count() ncores = int(ncores) for npar in range(int(math.sqrt(ncores)), ncores): if ncores % npar == 0: incar["NPAR"] = npar break incar.write_file("INCAR") except: pass if self.auto_continue: if os.path.exists("continue.json"): actions = loadfn("continue.json").get("actions") logger.info("Continuing previous VaspJob. Actions: {}".format(actions)) backup(VASP_BACKUP_FILES, prefix="prev_run") VaspModder().apply_actions(actions) else: # Default functionality is to copy CONTCAR to POSCAR and set # ISTART to 1 in the INCAR, but other actions can be specified if self.auto_continue is True: actions = [{"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}, {"dict": "INCAR", "action": {"_set": {"ISTART": 1}}}] else: actions = self.auto_continue dumpfn({"actions": actions}, "continue.json") if self.settings_override is not None: VaspModder().apply_actions(self.settings_override)
def setup(self)
Performs initial setup for VaspJob, including overriding any settings and backing up.
4.644039
4.450589
1.043466
cmd = list(self.vasp_cmd) if self.auto_gamma: vi = VaspInput.from_directory(".") kpts = vi["KPOINTS"] if kpts.style == Kpoints.supported_modes.Gamma \ and tuple(kpts.kpts[0]) == (1, 1, 1): if self.gamma_vasp_cmd is not None and which( self.gamma_vasp_cmd[-1]): cmd = self.gamma_vasp_cmd elif which(cmd[-1] + ".gamma"): cmd[-1] += ".gamma" logger.info("Running {}".format(" ".join(cmd))) with open(self.output_file, 'w') as f_std, \ open(self.stderr_file, "w", buffering=1) as f_err: # use line buffering for stderr p = subprocess.Popen(cmd, stdout=f_std, stderr=f_err) return p
def run(self)
Perform the actual VASP run. Returns: (subprocess.Popen) Used for monitoring.
3.522165
3.387298
1.039815
for f in VASP_OUTPUT_FILES + [self.output_file]: if os.path.exists(f): if self.final and self.suffix != "": shutil.move(f, "{}{}".format(f, self.suffix)) elif self.suffix != "": shutil.copy(f, "{}{}".format(f, self.suffix)) if self.copy_magmom and not self.final: try: outcar = Outcar("OUTCAR") magmom = [m['tot'] for m in outcar.magnetization] incar = Incar.from_file("INCAR") incar['MAGMOM'] = magmom incar.write_file("INCAR") except: logger.error('MAGMOM copy from OUTCAR to INCAR failed') # Remove continuation so if a subsequent job is run in # the same directory, will not restart this job. if os.path.exists("continue.json"): os.remove("continue.json")
def postprocess(self)
Postprocessing includes renaming and gzipping where necessary. Also copies the magmom to the incar if necessary
3.847858
3.532475
1.089281
incar_update = {"ISTART": 1} if ediffg: incar_update["EDIFFG"] = ediffg settings_overide_1 = None settings_overide_2 = [ {"dict": "INCAR", "action": {"_set": incar_update}}, {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}] if half_kpts_first_relax and os.path.exists("KPOINTS") and \ os.path.exists("POSCAR"): kpts = Kpoints.from_file("KPOINTS") orig_kpts_dict = kpts.as_dict() # lattice vectors with length < 8 will get >1 KPOINT kpts.kpts = np.round(np.maximum(np.array(kpts.kpts) / 2, 1)).astype(int).tolist() low_kpts_dict = kpts.as_dict() settings_overide_1 = [ {"dict": "KPOINTS", "action": {"_set": low_kpts_dict}} ] settings_overide_2.append( {"dict": "KPOINTS", "action": {"_set": orig_kpts_dict}} ) return [VaspJob(vasp_cmd, final=False, suffix=".relax1", auto_npar=auto_npar, auto_continue=auto_continue, settings_override=settings_overide_1), VaspJob(vasp_cmd, final=True, backup=False, suffix=".relax2", auto_npar=auto_npar, auto_continue=auto_continue, settings_override=settings_overide_2)]
def double_relaxation_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05, half_kpts_first_relax=False, auto_continue=False)
Returns a list of two jobs corresponding to an AFLOW style double relaxation run. Args: vasp_cmd (str): Command to run vasp as a list of args. For example, if you are using mpirun, it can be something like ["mpirun", "pvasp.5.2.11"] auto_npar (bool): Whether to automatically tune NPAR to be sqrt( number of cores) as recommended by VASP for DFT calculations. Generally, this results in significant speedups. Defaults to True. Set to False for HF, GW and RPA calculations. ediffg (float): Force convergence criteria for subsequent runs ( ignored for the initial run.) half_kpts_first_relax (bool): Whether to halve the kpoint grid for the first relaxation. Speeds up difficult convergence considerably. Defaults to False. Returns: List of two jobs corresponding to an AFLOW style run.
2.645477
2.681523
0.986558
incar = Incar.from_file("INCAR") # Defaults to using the SCAN metaGGA metaGGA = incar.get("METAGGA", "SCAN") # Pre optimze WAVECAR and structure using regular GGA pre_opt_setings = [{"dict": "INCAR", "action": {"_set": {"METAGGA": None, "LWAVE": True, "NSW": 0}}}] jobs = [VaspJob(vasp_cmd, auto_npar=auto_npar, final=False, suffix=".precondition", settings_override=pre_opt_setings)] # Finish with regular double relaxation style run using SCAN jobs.extend(VaspJob.double_relaxation_run(vasp_cmd, auto_npar=auto_npar, ediffg=ediffg, half_kpts_first_relax=half_kpts_first_relax)) # Ensure the first relaxation doesn't overwrite the original inputs jobs[1].backup = False # Update double_relaxation job to start from pre-optimized run post_opt_settings = [{"dict": "INCAR", "action": {"_set": {"METAGGA": metaGGA, "ISTART": 1, "NSW": incar.get("NSW", 99), "LWAVE": incar.get("LWAVE", False)}}}, {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}] if jobs[1].settings_override: post_opt_settings = jobs[1].settings_override + post_opt_settings jobs[1].settings_override = post_opt_settings return jobs
def metagga_opt_run(cls, vasp_cmd, auto_npar=True, ediffg=-0.05, half_kpts_first_relax=False, auto_continue=False)
Returns a list of thres jobs to perform an optimization for any metaGGA functional. There is an initial calculation of the GGA wavefunction which is fed into the initial metaGGA optimization to precondition the electronic structure optimizer. The metaGGA optimization is performed using the double relaxation scheme
4.268615
4.216098
1.012456
for i in range(max_steps): if i == 0: settings = None backup = True if half_kpts_first_relax and os.path.exists("KPOINTS") and \ os.path.exists("POSCAR"): kpts = Kpoints.from_file("KPOINTS") orig_kpts_dict = kpts.as_dict() kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1).tolist() low_kpts_dict = kpts.as_dict() settings = [ {"dict": "KPOINTS", "action": {"_set": low_kpts_dict}} ] else: backup = False initial = Poscar.from_file("POSCAR").structure final = Poscar.from_file("CONTCAR").structure vol_change = (final.volume - initial.volume) / initial.volume logger.info("Vol change = %.1f %%!" % (vol_change * 100)) if abs(vol_change) < vol_change_tol: logger.info("Stopping optimization!") break else: incar_update = {"ISTART": 1} if ediffg: incar_update["EDIFFG"] = ediffg settings = [ {"dict": "INCAR", "action": {"_set": incar_update}}, {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}] if i == 1 and half_kpts_first_relax: settings.append({"dict": "KPOINTS", "action": {"_set": orig_kpts_dict}}) logger.info("Generating job = %d!" % (i+1)) yield VaspJob(vasp_cmd, final=False, backup=backup, suffix=".relax%d" % (i+1), settings_override=settings, **vasp_job_kwargs)
def full_opt_run(cls, vasp_cmd, vol_change_tol=0.02, max_steps=10, ediffg=-0.05, half_kpts_first_relax=False, **vasp_job_kwargs)
Returns a generator of jobs for a full optimization run. Basically, this runs an infinite series of geometry optimization jobs until the % vol change in a particular optimization is less than vol_change_tol. Args: vasp_cmd (str): Command to run vasp as a list of args. For example, if you are using mpirun, it can be something like ["mpirun", "pvasp.5.2.11"] vol_change_tol (float): The tolerance at which to stop a run. Defaults to 0.05, i.e., 5%. max_steps (int): The maximum number of runs. Defaults to 10 ( highly unlikely that this limit is ever reached). ediffg (float): Force convergence criteria for subsequent runs ( ignored for the initial run.) half_kpts_first_relax (bool): Whether to halve the kpoint grid for the first relaxation. Speeds up difficult convergence considerably. Defaults to False. \*\*vasp_job_kwargs: Passthrough kwargs to VaspJob. See :class:`custodian.vasp.jobs.VaspJob`. Returns: Generator of jobs.
2.806628
2.720504
1.031658
neb_dirs = self.neb_dirs if self.backup: # Back up KPOINTS, INCAR, POTCAR for f in VASP_NEB_INPUT_FILES: shutil.copy(f, "{}.orig".format(f)) # Back up POSCARs for path in neb_dirs: poscar = os.path.join(path, "POSCAR") shutil.copy(poscar, "{}.orig".format(poscar)) if self.half_kpts and os.path.exists("KPOINTS"): kpts = Kpoints.from_file("KPOINTS") kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1) kpts.kpts = kpts.kpts.astype(int).tolist() if tuple(kpts.kpts[0]) == (1, 1, 1): kpt_dic = kpts.as_dict() kpt_dic["generation_style"] = 'Gamma' kpts = Kpoints.from_dict(kpt_dic) kpts.write_file("KPOINTS") if self.auto_npar: try: incar = Incar.from_file("INCAR") import multiprocessing # Try sge environment variable first # (since multiprocessing counts cores on the current # machine only) ncores = os.environ.get('NSLOTS') or multiprocessing.cpu_count() ncores = int(ncores) for npar in range(int(math.sqrt(ncores)), ncores): if ncores % npar == 0: incar["NPAR"] = npar break incar.write_file("INCAR") except: pass if self.auto_continue and \ os.path.exists("STOPCAR") and \ not os.access("STOPCAR", os.W_OK): # Remove STOPCAR os.chmod("STOPCAR", 0o644) os.remove("STOPCAR") # Copy CONTCAR to POSCAR for path in self.neb_sub: contcar = os.path.join(path, "CONTCAR") poscar = os.path.join(path, "POSCAR") shutil.copy(contcar, poscar) if self.settings_override is not None: VaspModder().apply_actions(self.settings_override)
def setup(self)
Performs initial setup for VaspNEBJob, including overriding any settings and backing up.
3.162629
3.046184
1.038227
# Add suffix to all sub_dir/{items} for path in self.neb_dirs: for f in VASP_NEB_OUTPUT_SUB_FILES: f = os.path.join(path, f) if os.path.exists(f): if self.final and self.suffix != "": shutil.move(f, "{}{}".format(f, self.suffix)) elif self.suffix != "": shutil.copy(f, "{}{}".format(f, self.suffix)) # Add suffix to all output files for f in VASP_NEB_OUTPUT_FILES + [self.output_file]: if os.path.exists(f): if self.final and self.suffix != "": shutil.move(f, "{}{}".format(f, self.suffix)) elif self.suffix != "": shutil.copy(f, "{}{}".format(f, self.suffix))
def postprocess(self)
Postprocessing includes renaming and gzipping where necessary.
2.931559
2.784898
1.052663
if self.backup: shutil.copy(self.input_file, "{}.orig".format(self.input_file))
def setup(self)
Performs backup if necessary.
5.906226
4.220378
1.399454
with zopen(self.output_file, 'w') as fout: return subprocess.Popen(self.nwchem_cmd + [self.input_file], stdout=fout)
def run(self)
Performs actual nwchem run.
6.501354
4.009345
1.62155
x = float(x) if x < 0.0 or x > 1.0: raise ArgumentTypeError("{} not in range [0.0, 1.0]".format(x)) return x
def valid_GC(x)
type function for argparse to check GC values. Check if the supplied value for minGC and maxGC is a valid input, being between 0 and 1
2.561621
2.483165
1.031595
if args.quality: quality_check = ave_qual else: quality_check = silent_quality_check minlen = args.length + int(args.headcrop or 0) - (int(args.tailcrop or 0)) for rec in SeqIO.parse(fq, "fastq"): if args.GC_filter: gc = (rec.seq.upper().count("C") + rec.seq.upper().count("G")) / len(rec) else: gc = 0.50 # dummy variable if quality_check(rec.letter_annotations["phred_quality"]) > args.quality \ and minlen <= len(rec) <= args.maxlength \ and args.minGC <= gc <= args.maxGC: print(rec[args.headcrop:args.tailcrop].format("fastq"), end="")
def filter_stream(fq, args)
Filter a fastq file on stdin. Print fastq record to stdout if it passes - quality filter (optional) - length filter (optional) - min/maxGC filter (optional) Optionally trim a number of nucleotides from beginning and end. Record has to be longer than args.length (default 1) after trimming Use a faster silent quality_check if no filtering on quality is required
3.803419
3.247976
1.171012
data = {entry[0]: entry[1] for entry in process_summary( summaryfile=args.summary, threads="NA", readtype=args.readtype, barcoded=False)[ ["readIDs", "quals"]].itertuples(index=False)} try: for record in SeqIO.parse(fq, "fastq"): if data[record.id] > args.quality \ and args.length <= len(record) <= args.maxlength: print(record[args.headcrop:args.tailcrop].format("fastq"), end="") except KeyError: logging.error("mismatch between summary and fastq: \ {} was not found in the summary file.".format(record.id)) sys.exit('\nERROR: mismatch between sequencing_summary and fastq file: \ {} was not found in the summary file.\nQuitting.'.format(record.id))
def filter_using_summary(fq, args)
Use quality scores from albacore summary file for filtering Use the summary file from albacore for more accurate quality estimate Get the dataframe from nanoget, convert to dictionary
5.77005
5.789872
0.996576
'''decorator describing methods that require the master key''' def ret(obj, *args, **kw): conn = ACCESS_KEYS if not (conn and conn.get('master_key')): message = '%s requires the master key' % func.__name__ raise core.ParseError(message) func(obj, *args, **kw) return ret
def master_key_required(func)
decorator describing methods that require the master key
6.594866
5.983007
1.102266
if batch: urlsplitter = urlparse(API_ROOT).netloc ret = {"method": http_verb, "path": uri.split(urlsplitter, 1)[1]} if kw: ret["body"] = kw return ret if not ('app_id' in ACCESS_KEYS and 'rest_key' in ACCESS_KEYS): raise core.ParseError('Missing connection credentials') app_id = ACCESS_KEYS.get('app_id') rest_key = ACCESS_KEYS.get('rest_key') master_key = ACCESS_KEYS.get('master_key') url = uri if uri.startswith(API_ROOT) else cls.ENDPOINT_ROOT + uri if _body is None: data = kw and json.dumps(kw, default=date_handler) or "{}" else: data = _body if http_verb == 'GET' and data: url += '?%s' % urlencode(kw) data = None else: if cls.__name__ == 'File': data = data else: data = data.encode('utf-8') headers = { 'Content-type': 'application/json', 'X-Parse-Application-Id': app_id, 'X-Parse-REST-API-Key': rest_key } headers.update(extra_headers or {}) if cls.__name__ == 'File': request = Request(url.encode('utf-8'), data, headers) else: request = Request(url, data, headers) if ACCESS_KEYS.get('session_token'): request.add_header('X-Parse-Session-Token', ACCESS_KEYS.get('session_token')) elif master_key: request.add_header('X-Parse-Master-Key', master_key) request.get_method = lambda: http_verb try: response = urlopen(request, timeout=CONNECTION_TIMEOUT) except HTTPError as e: exc = { 400: core.ResourceRequestBadRequest, 401: core.ResourceRequestLoginRequired, 403: core.ResourceRequestForbidden, 404: core.ResourceRequestNotFound }.get(e.code, core.ParseError) raise exc(e.read()) return json.loads(response.read().decode('utf-8'))
def execute(cls, uri, http_verb, extra_headers=None, batch=False, _body=None, **kw)
if batch == False, execute a command with the given parameters and return the response JSON. If batch == True, return the dictionary that would be used in a batch command.
2.471925
2.498943
0.989188
methods = list(methods) # methods can be iterator if not methods: #accepts also empty list (or generator) - it allows call batch directly with query result (eventually empty) return queries, callbacks = list(zip(*[m(batch=True) for m in methods])) # perform all the operations in one batch responses = self.execute("", "POST", requests=queries) # perform the callbacks with the response data (updating the existing # objets, etc) batched_errors = [] for callback, response in zip(callbacks, responses): if "success" in response: callback(response["success"]) else: batched_errors.append(response["error"]) if batched_errors: raise core.ParseBatchError(batched_errors)
def batch(self, methods)
Given a list of create, update or delete methods to call, call all of them in a single batch operation.
8.404237
8.214931
1.023044
installation_url = cls._get_installation_url(installation_id) current_config = cls.GET(installation_url) new_channels = list(set(current_config['channels']).union(channels_to_add).difference(channels_to_remove)) cls.PUT(installation_url, channels=new_channels)
def update_channels(cls, installation_id, channels_to_add=set(), channels_to_remove=set(), **kw)
Allow an application to manually subscribe or unsubscribe an installation to a certain push channel in a unified operation. this is based on: https://www.parse.com/docs/rest#installations-updating installation_id: the installation id you'd like to add a channel to channels_to_add: the name of the channel you'd like to subscribe the user to channels_to_remove: the name of the channel you'd like to unsubscribe the user from
2.883833
3.096701
0.93126
if self._result_cache is not None: return len(self._result_cache) if count else self._result_cache options = dict(self._options) # make a local copy if self._where: # JSON encode WHERE values options['where'] = json.dumps(self._where) if self._select_related: options['include'] = ','.join(self._select_related) if count: return self._manager._count(**options) self._result_cache = self._manager._fetch(**options) return self._result_cache
def _fetch(self, count=False)
Return a list of objects matching query, or if count == True return only the number of objects matching.
3.975986
3.520616
1.129344
'''Decorator for registering complex types''' def wrapped(cls): ParseType.type_mapping[name or cls.__name__] = cls return cls return wrapped
def complex_type(name=None)
Decorator for registering complex types
6.863429
6.307674
1.088108
class_name = str(class_name.lstrip('_')) types = ParseResource.__subclasses__() while types: t = types.pop() if t.__name__ == class_name: return t types.extend(t.__subclasses__()) else: return type(class_name, (Object,), {})
def factory(cls, class_name)
find proper Object subclass matching class_name system types like _User are mapped to types without underscore (parse_resr.user.User) If user don't declare matching type, class is created on the fly
3.797232
3.000932
1.265351
root = '/'.join([API_ROOT, 'schemas', cls.__name__]) schema = cls.GET(root) return schema
def schema(cls)
Retrieves the class' schema.
9.960745
7.129465
1.397124
root = '/'.join([API_ROOT, 'schemas', cls.__name__]) payload = { 'className': cls.__name__, 'fields': { key: { '__op': 'Delete' } } } cls.PUT(root, **payload)
def schema_delete_field(cls, key)
Deletes a field.
5.182523
4.807434
1.078023
payload = { key: { '__op': 'Increment', 'amount': amount } } self.__class__.PUT(self._absolute_url, **payload) self.__dict__[key] += amount
def increment(self, key, amount=1)
Increment one value in the object. Note that this happens immediately: it does not wait for save() to be called
6.825861
5.523836
1.23571
payload = { key: { '__op': 'Delete' } } self.__class__.PUT(self._absolute_url, **payload) del self.__dict__[key]
def remove(self, key)
Clear a column value in the object. Note that this happens immediately: it does not wait for save() to be called.
8.429971
6.493635
1.29819
'''decorator describing User methods that need to be logged in''' def ret(obj, *args, **kw): if not hasattr(obj, 'sessionToken'): message = '%s requires a logged-in session' % func.__name__ raise ResourceRequestLoginRequired(message) return func(obj, *args, **kw) return ret
def login_required(func)
decorator describing User methods that need to be logged in
6.590267
5.180013
1.272249
'''Trigger Parse\'s Password Process. Return True/False indicate success/failure on the request''' url = '/'.join([API_ROOT, 'requestPasswordReset']) try: User.POST(url, email=email) return True except ParseError: return False
def request_password_reset(email)
Trigger Parse\'s Password Process. Return True/False indicate success/failure on the request
14.047824
4.534979
3.09766
res = {} for k, v in iteritems(d): if isinstance(v, string_types) and DATETIME_ISO_FORMAT.match(v): v = dateutil.parser.parse(v) res[k] = v return res
def parse(d)
Convert iso formatted timestamps found as values in the dict d to datetime objects. :return: A shallow copy of d with converted timestamps.
2.905221
2.738451
1.0609
open_kw = {'mode': 'w'} if PY3: # pragma: no cover open_kw['encoding'] = 'utf-8' # avoid indented lines ending with ", " on PY2 if kw.get('indent') and kw.get('separators') is None: kw['separators'] = (',', ': ') with open(str(path), **open_kw) as fp: return json.dump(obj, fp, **kw)
def dump(obj, path, **kw)
Python 2 + 3 compatible version of json.dump. :param obj: The object to be dumped. :param path: The path of the JSON file to be written. :param kw: Keyword parameters are passed to json.dump
3.79473
4.17942
0.907956
_kw = {} if PY3: # pragma: no cover _kw['encoding'] = 'utf-8' with open(str(path), **_kw) as fp: return json.load(fp, **kw)
def load(path, **kw)
python 2 + 3 compatible version of json.load. :param kw: Keyword parameters are passed to json.load :return: The python object read from path.
3.158078
3.174626
0.994787
res = [] for c, type_ in _tokens(text, brackets=brackets): if type_ == TextType.text: res.append(c) return ''.join(res).strip()
def strip_brackets(text, brackets=None)
Strip brackets and what is inside brackets from text. .. note:: If the text contains only one opening bracket, the rest of the text will be ignored. This is a feature, not a bug, as we want to avoid that this function raises errors too easily.
5.145308
6.307336
0.815766
res, chunk = [], [] for c, type_ in _tokens(text, brackets=brackets): if type_ == TextType.text and c in separators: res.append(''.join(chunk).strip()) chunk = [] else: chunk.append(c) res.append(''.join(chunk).strip()) return nfilter(res)
def split_text_with_context(text, separators=WHITESPACE, brackets=None)
Splits text at separators outside of brackets. :param text: :param separators: An iterable of single character tokens. :param brackets: :return: A `list` of non-empty chunks. .. note:: This function leaves content in brackets in the chunks.
3.688356
4.046301
0.911538
if not isinstance(separators, PATTERN_TYPE): separators = re.compile( '[{0}]'.format(''.join('\{0}'.format(c) for c in separators))) return nfilter( s.strip() if strip else s for s in separators.split(strip_brackets(text, brackets=brackets)))
def split_text(text, separators=re.compile('\s'), brackets=None, strip=False)
Split text along the separators unless they appear within brackets. :param separators: An iterable single characters or a compiled regex pattern. :param brackets: `dict` mapping start tokens to end tokens of what is to be \ recognized as brackets. .. note:: This function will also strip content within brackets.
4.041597
4.880136
0.828173
marker = None value = [] for line in block.split('\n'): line = line.strip() if line.startswith('\\_'): continue # we simply ignore SFM header fields match = MARKER_PATTERN.match(line) if match: if marker: yield marker, '\n'.join(value) marker = match.group('marker') value = [line[match.end():]] else: value.append(line) if marker: yield marker, ('\n'.join(value)).strip()
def marker_split(block)
Yield marker, value pairs from a text block (i.e. a list of lines). :param block: text block consisting of \n separated lines as it will be the case for \ files read using "rU" mode. :return: generator of (marker, value) pairs.
3.472835
3.635371
0.95529
for k, v in self: if k == key: return v return default
def get(self, key, default=None)
Retrieve the first value for a marker or None.
4.00023
3.252173
1.230017
marker_map = marker_map or {} for entry in parse( filename, encoding, entry_sep, entry_prefix or entry_sep, keep_empty=keep_empty): if entry: self.append(entry_impl([(marker_map.get(k, k), v) for k, v in entry]))
def read(self, filename, encoding='utf-8', marker_map=None, entry_impl=Entry, entry_sep='\n\n', entry_prefix=None, keep_empty=False)
Extend the list by parsing new entries from a file. :param filename: :param encoding: :param marker_map: A dict used to map marker names. :param entry_impl: Subclass of Entry or None :param entry_sep: :param entry_prefix:
3.05229
3.970604
0.768722
with io.open(str(filename), 'w', encoding=encoding) as fp: for entry in self: fp.write(entry.__unicode__()) fp.write('\n\n')
def write(self, filename, encoding='utf-8')
Write the list of entries to a file. :param filename: :param encoding: :return:
2.978605
3.362438
0.885847
valid = {"": default, "yes": True, "y": True, "no": False, "n": False} while 1: choice = input(question + (" [Y/n] " if default else " [y/N] ")).lower() if choice in valid: return valid[choice] print("Please respond with 'y' or 'n' ")
def confirm(question, default=True)
Ask a yes/no question interactively. :param question: The text of the question to ask. :returns: True if the answer was "yes", False otherwise.
2.230665
2.731605
0.816613
lines = [] for line in self.get(section, option, fallback='').splitlines(): if re.match(re.escape(whitespace_preserving_prefix) + '\s+', line): line = line[len(whitespace_preserving_prefix):] lines.append(line) return '\n'.join(lines)
def gettext(self, section, option, whitespace_preserving_prefix='.')
While configparser supports multiline values, it does this at the expense of stripping leading whitespace for each line in such a value. Sometimes we want to preserve such whitespace, e.g. to be able to put markdown with nested lists into INI files. We support this be introducing a special prefix, which is prepended to lines starting with whitespace in `settext` and stripped in `gettext`.
2.499864
2.231501
1.120261