code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
self._shard_state.counters_map.increment(counter_name, delta)
def increment(self, counter_name, delta=1)
Increment counter value. Args: counter_name: name of the counter as string. delta: increment delta as int.
15.057514
12.848257
1.17195
cursor = self._cursor if self._query is not None: if isinstance(self._query, db.Query): cursor = self._query.cursor() else: cursor = self._query.cursor_after() if cursor is None or isinstance(cursor, basestring): cursor_object = False else: cursor_object = True cursor = cursor.to_websafe_string() return {"property_range": self._property_range.to_json(), "query_spec": self._query_spec.to_json(), "cursor": cursor, "ns_range": self._ns_range.to_json_object(), "name": self.__class__.__name__, "cursor_object": cursor_object}
def to_json(self)
Inherit doc.
3.539522
3.283406
1.078003
obj = cls(property_range.PropertyRange.from_json(json["property_range"]), namespace_range.NamespaceRange.from_json_object(json["ns_range"]), model.QuerySpec.from_json(json["query_spec"])) cursor = json["cursor"] # lint bug. Class method can access protected fields. # pylint: disable=protected-access if cursor and json["cursor_object"]: obj._cursor = datastore_query.Cursor.from_websafe_string(cursor) else: obj._cursor = cursor return obj
def from_json(cls, json)
Inherit doc.
6.556374
6.379225
1.02777
json = {"name": self.__class__.__name__, "num_ranges": len(self._iters)} for i in xrange(len(self._iters)): json_item = self._iters[i].to_json() query_spec = json_item["query_spec"] item_name = json_item["name"] # Delete and move one level up del json_item["query_spec"] del json_item["name"] json[str(i)] = json_item # Store once to save space json["query_spec"] = query_spec json["item_name"] = item_name return json
def to_json(self)
Inherit doc.
4.353416
4.169125
1.044204
num_ranges = int(json["num_ranges"]) query_spec = json["query_spec"] item_name = json["item_name"] p_range_iters = [] for i in xrange(num_ranges): json_item = json[str(i)] # Place query_spec, name back into each iterator json_item["query_spec"] = query_spec json_item["name"] = item_name p_range_iters.append(_PropertyRangeModelIterator.from_json(json_item)) obj = cls(p_range_iters) return obj
def from_json(cls, json)
Inherit doc.
4.497386
4.3466
1.034691
current_iter = None if self._current_iter: current_iter = self._current_iter.to_json() return {"key_ranges": self._key_ranges.to_json(), "query_spec": self._query_spec.to_json(), "current_iter": current_iter, "key_range_iter_cls": self._key_range_iter_cls.__name__, "name": self.__class__.__name__}
def to_json(self)
Inherit doc.
3.522787
3.333812
1.056684
key_range_iter_cls = _KEY_RANGE_ITERATORS[json["key_range_iter_cls"]] obj = cls(key_ranges.KeyRangesFactory.from_json(json["key_ranges"]), model.QuerySpec.from_json(json["query_spec"]), key_range_iter_cls) current_iter = None if json["current_iter"]: current_iter = key_range_iter_cls.from_json(json["current_iter"]) # pylint: disable=protected-access obj._current_iter = current_iter return obj
def from_json(cls, json)
Inherit doc.
3.535228
3.425673
1.031981
cursor = self._get_cursor() cursor_object = False if cursor and isinstance(cursor, datastore_query.Cursor): cursor = cursor.to_websafe_string() cursor_object = True return {"key_range": self._key_range.to_json(), "query_spec": self._query_spec.to_json(), "cursor": cursor, "cursor_object": cursor_object}
def to_json(self)
Serializes all states into json form. Returns: all states in json-compatible map.
3.626827
3.563128
1.017877
obj = cls(key_range.KeyRange.from_json(json["key_range"]), model.QuerySpec.from_json(json["query_spec"])) cursor = json["cursor"] # lint bug. Class method can access protected fields. # pylint: disable=protected-access if cursor and json["cursor_object"]: obj._cursor = datastore_query.Cursor.from_websafe_string(cursor) else: obj._cursor = cursor return obj
def from_json(cls, json)
Reverse of to_json.
6.384538
5.97815
1.067979
checked = set() yaml = _find_mapreduce_yaml(os.path.dirname(status_file), checked) if not yaml: yaml = _find_mapreduce_yaml(os.getcwd(), checked) return yaml
def find_mapreduce_yaml(status_file=__file__)
Traverse directory trees to find mapreduce.yaml file. Begins with the location of status.py and then moves on to check the working directory. Args: status_file: location of status.py, overridable for testing purposes. Returns: the path of mapreduce.yaml file or None if not found.
3.166582
3.667056
0.863522
dir = start while dir not in checked: checked.add(dir) for mr_yaml_name in MR_YAML_NAMES: yaml_path = os.path.join(dir, mr_yaml_name) if os.path.exists(yaml_path): return yaml_path dir = os.path.dirname(dir) return None
def _find_mapreduce_yaml(start, checked)
Traverse the directory tree identified by start until a directory already in checked is encountered or the path of mapreduce.yaml is found. Checked is present both to make loop termination easy to reason about and so that the same directories do not get rechecked. Args: start: the path to start in and work upward from checked: the set of already examined directories Returns: the path of mapreduce.yaml file or None if not found.
2.245196
2.4223
0.926886
try: builder = yaml_object.ObjectBuilder(MapReduceYaml) handler = yaml_builder.BuilderHandler(builder) listener = yaml_listener.EventListener(handler) listener.Parse(contents) mr_info = handler.GetResults() except (ValueError, yaml_errors.EventError), e: raise errors.BadYamlError(e) if len(mr_info) < 1: raise errors.BadYamlError("No configs found in mapreduce.yaml") if len(mr_info) > 1: raise errors.MultipleDocumentsInMrYaml("Found %d YAML documents" % len(mr_info)) jobs = mr_info[0] job_names = set(j.name for j in jobs.mapreduce) if len(jobs.mapreduce) != len(job_names): raise errors.BadYamlError( "Overlapping mapreduce names; names must be unique") return jobs
def parse_mapreduce_yaml(contents)
Parses mapreduce.yaml file contents. Args: contents: mapreduce.yaml file contents. Returns: MapReduceYaml object with all the data from original file. Raises: errors.BadYamlError: when contents is not a valid mapreduce.yaml file.
3.703231
3.696481
1.001826
mr_yaml_path = find_mapreduce_yaml() if not mr_yaml_path: raise errors.MissingYamlError() mr_yaml_file = open(mr_yaml_path) try: return parse(mr_yaml_file.read()) finally: mr_yaml_file.close()
def get_mapreduce_yaml(parse=parse_mapreduce_yaml)
Locates mapreduce.yaml, loads and parses its info. Args: parse: Used for testing. Returns: MapReduceYaml object. Raises: errors.BadYamlError: when contents is not a valid mapreduce.yaml file or the file is missing.
2.614231
2.976456
0.878303
all_configs = [] for config in mapreduce_yaml.mapreduce: out = { "name": config.name, "mapper_input_reader": config.mapper.input_reader, "mapper_handler": config.mapper.handler, } if config.mapper.params_validator: out["mapper_params_validator"] = config.mapper.params_validator if config.mapper.params: param_defaults = {} for param in config.mapper.params: param_defaults[param.name] = param.default or param.value out["mapper_params"] = param_defaults if config.params: param_defaults = {} for param in config.params: param_defaults[param.name] = param.default or param.value out["params"] = param_defaults if config.mapper.output_writer: out["mapper_output_writer"] = config.mapper.output_writer all_configs.append(out) return all_configs
def to_dict(mapreduce_yaml)
Converts a MapReduceYaml file into a JSON-encodable dictionary. For use in user-visible UI and internal methods for interfacing with user code (like param validation). as a list Args: mapreduce_yaml: The Pyton representation of the mapreduce.yaml document. Returns: A list of configuration dictionaries.
1.950501
1.921273
1.015213
if job_config.input_reader_cls != cls: raise errors.BadReaderParamsError( "Expect input reader class %r, got %r." % (cls, job_config.input_reader_cls))
def validate(cls, job_config)
Validates relevant parameters. This method can validate fields which it deems relevant. Args: job_config: an instance of map_job.JobConfig. Raises: errors.BadReaderParamsError: required parameters are missing or invalid.
4.630408
3.831753
1.208431
ctx = context.get() l = len(records) key_records = [None] * l logging.debug("Parsing") for i in range(l): proto = kv_pb.KeyValue() proto.ParseFromString(records[i]) key_records[i] = (proto.key(), records[i]) logging.debug("Sorting") key_records.sort(cmp=_compare_keys) logging.debug("Writing") mapper_spec = ctx.mapreduce_spec.mapper params = input_readers._get_params(mapper_spec) bucket_name = params.get("bucket_name") filename = (ctx.mapreduce_spec.name + "/" + ctx.mapreduce_id + "/output-" + ctx.shard_id + "-" + str(int(time.time()))) full_filename = "/%s/%s" % (bucket_name, filename) filehandle = cloudstorage.open(full_filename, mode="w") with output_writers.GCSRecordsPool(filehandle, ctx=ctx) as pool: for key_record in key_records: pool.append(key_record[1]) logging.debug("Finalizing") filehandle.close() entity = _OutputFile(key_name=full_filename, parent=_OutputFile.get_root_key(ctx.mapreduce_id)) entity.put()
def _sort_records_map(records)
Map function sorting records. Converts records to KeyValue protos, sorts them by key and writes them into new GCS file. Creates _OutputFile entity to record resulting file name. Args: records: list of records which are serialized KeyValue protos.
3.906863
3.491388
1.119
proto = kv_pb.KeyValues() proto.set_key(key) proto.value_list().extend(values) yield proto.Encode()
def _merge_map(key, values, partial)
A map function used in merge phase. Stores (key, values) into KeyValues proto and yields its serialization. Args: key: values key. values: values themselves. partial: True if more values for this key will follow. False otherwise. Yields: The proto.
7.980831
6.6367
1.20253
proto = kv_pb.KeyValue() proto.ParseFromString(binary_record) yield (proto.key(), proto.value())
def _hashing_map(binary_record)
A map function used in hash phase. Reads KeyValue from binary record. Args: binary_record: The binary record. Yields: The (key, value).
6.959962
5.292609
1.315034
filelists = mapper_spec.params[cls.FILES_PARAM] max_values_count = mapper_spec.params.get(cls.MAX_VALUES_COUNT_PARAM, -1) max_values_size = mapper_spec.params.get(cls.MAX_VALUES_SIZE_PARAM, -1) return [cls([0] * len(files), max_values_count, max_values_size) for files in filelists]
def split_input(cls, mapper_spec)
Split input into multiple shards.
2.994281
2.95544
1.013142
if mapper_spec.input_reader_class() != cls: raise errors.BadReaderParamsError("Input reader class mismatch") params = mapper_spec.params if cls.FILES_PARAM not in params: raise errors.BadReaderParamsError("Missing files parameter.")
def validate(cls, mapper_spec)
Validate reader parameters in mapper_spec.
4.549973
3.529092
1.289276
if mapper_spec.output_writer_class() != cls: raise errors.BadWriterParamsError("Output writer class mismatch") params = output_writers._get_params(mapper_spec) # Bucket Name is required if cls.BUCKET_NAME_PARAM not in params: raise errors.BadWriterParamsError( "%s is required for the _HashingGCSOutputWriter" % cls.BUCKET_NAME_PARAM)
def validate(cls, mapper_spec)
Validates mapper specification. Args: mapper_spec: an instance of model.MapperSpec to validate. Raises: BadWriterParamsError: when Output writer class mismatch.
5.498181
4.829538
1.138449
# Use the member variable (since we don't have access to the context) to # flush each pool to minimize the size of each filehandle before we # serialize it. for pool in self._pools: if pool is not None: pool.flush(True) return {"filehandles": pickle.dumps(self._filehandles)}
def to_json(self)
Returns writer state to serialize in json. Returns: A json-izable version of the OutputWriter state.
12.600418
11.832302
1.064917
mapper_spec = mr_spec.mapper params = output_writers._get_params(mapper_spec) bucket_name = params.get(cls.BUCKET_NAME_PARAM) shards = mapper_spec.shard_count filehandles = [] filename = (mr_spec.name + "/" + mr_spec.mapreduce_id + "/shard-" + str(shard_number) + "-bucket-") for i in range(shards): full_filename = "/%s/%s%d" % (bucket_name, filename, i) filehandles.append(cloudstorage.open(full_filename, mode="w")) return cls(filehandles)
def create(cls, mr_spec, shard_number, shard_attempt, _writer_state=None)
Inherit docs.
4.134168
4.065105
1.016989
shards = mapreduce_state.mapreduce_spec.mapper.shard_count filenames = [] for _ in range(shards): filenames.append([None] * shards) shard_states = model.ShardState.find_all_by_mapreduce_state(mapreduce_state) for x, shard_state in enumerate(shard_states): shard_filenames = shard_state.writer_state["shard_filenames"] for y in range(shards): filenames[y][x] = shard_filenames[y] return filenames
def get_filenames(cls, mapreduce_state)
See parent class.
2.963938
2.85229
1.039143
filenames = [] for filehandle in self._filehandles: filenames.append(filehandle.name) filehandle.close() shard_state.writer_state = {"shard_filenames": filenames}
def finalize(self, ctx, shard_state)
See parent class.
6.254956
5.468154
1.143888
ctx = context.get() if len(data) != 2: logging.error("Got bad tuple of length %d (2-tuple expected): %s", len(data), data) try: key = str(data[0]) value = str(data[1]) except TypeError: logging.error("Expecting a tuple, but got %s: %s", data.__class__.__name__, data) file_index = key.__hash__() % len(self._filehandles) # Work-around: Since we don't have access to the context in the to_json() # function, but we need to flush each pool before we serialize the # filehandle, we rely on a member variable instead of using context for # pool management. pool = self._pools[file_index] if pool is None: filehandle = self._filehandles[file_index] pool = output_writers.GCSRecordsPool(filehandle=filehandle, ctx=ctx) self._pools[file_index] = pool proto = kv_pb.KeyValue() proto.set_key(key) proto.set_value(value) pool.append(proto.Encode())
def write(self, data)
Write data. Args: data: actual data yielded from handler. Type is writer-specific.
4.924861
4.885136
1.008132
reader_params = self.input_reader_cls.params_to_json( self.input_reader_params) # TODO(user): Do the same for writer params. return {"input_reader": reader_params, "output_writer": self.output_writer_params}
def _get_mapper_params(self)
Converts self to model.MapperSpec.params.
5.111381
4.570985
1.118223
# pylint: disable=g-import-not-at-top from mapreduce import model return model.MapperSpec( handler_spec=util._obj_to_path(self.mapper), input_reader_spec=util._obj_to_path(self.input_reader_cls), params=self._get_mapper_params(), shard_count=self.shard_count, output_writer_spec=util._obj_to_path(self.output_writer_cls))
def _get_mapper_spec(self)
Converts self to model.MapperSpec.
2.904243
2.597773
1.117974
return {"force_writes": self._force_writes, "done_callback": self.done_callback_url, "user_params": self.user_params, "shard_max_attempts": self.shard_max_attempts, "task_max_attempts": self._task_max_attempts, "task_max_data_processing_attempts": self._task_max_data_processing_attempts, "queue_name": self.queue_name, "base_path": self._base_path, "app_id": self._app, "api_version": self._api_version}
def _get_mr_params(self)
Converts self to model.MapreduceSpec.params.
3.792789
3.279894
1.156376
cfg = cls(_lenient=True) mr_params = cfg._get_mr_params() mr_params["api_version"] = 0 return mr_params
def _get_default_mr_params(cls)
Gets default values for old API.
8.779027
6.49613
1.351424
mapper_spec = mr_spec.mapper # 0 means all the old APIs before api_version is introduced. api_version = mr_spec.params.get("api_version", 0) old_api = api_version == 0 # Deserialize params from json if input_reader/output_writer are new API. input_reader_cls = mapper_spec.input_reader_class() input_reader_params = input_readers._get_params(mapper_spec) if issubclass(input_reader_cls, input_reader.InputReader): input_reader_params = input_reader_cls.params_from_json( input_reader_params) output_writer_cls = mapper_spec.output_writer_class() output_writer_params = output_writers._get_params(mapper_spec) # TODO(user): Call json (de)serialization for writer. # if (output_writer_cls and # issubclass(output_writer_cls, output_writer.OutputWriter)): # output_writer_params = output_writer_cls.params_from_json( # output_writer_params) # We can not always convert MapreduceSpec generated by older API # to JobConfig. Thus, mr framework should use/expose the returned JobConfig # object with caution when a job is started with an old API. # In this case, this method only tries not to blow up and assemble a # JobConfig object as accurate as possible. return cls(_lenient=old_api, job_name=mr_spec.name, job_id=mr_spec.mapreduce_id, # handler_spec from older API may not have map_job.Mapper type. mapper=util.for_name(mapper_spec.handler_spec), input_reader_cls=input_reader_cls, input_reader_params=input_reader_params, output_writer_cls=output_writer_cls, output_writer_params=output_writer_params, shard_count=mapper_spec.shard_count, queue_name=queue_name, user_params=mr_spec.params.get("user_params"), shard_max_attempts=mr_spec.params.get("shard_max_attempts"), done_callback_url=mr_spec.params.get("done_callback"), _force_writes=mr_spec.params.get("force_writes"), _base_path=mr_spec.params["base_path"], _task_max_attempts=mr_spec.params.get("task_max_attempts"), _task_max_data_processing_attempts=( mr_spec.params.get("task_max_data_processing_attempts")), _hooks_cls=util.for_name(mr_spec.hooks_class_name), _app=mr_spec.params.get("app_id"), _api_version=api_version)
def _to_map_job_config(cls, mr_spec, # TODO(user): Remove this parameter after it can be # read from mr_spec. queue_name)
Converts model.MapreduceSpec back to JobConfig. This method allows our internal methods to use JobConfig directly. This method also allows us to expose JobConfig as an API during execution, despite that it is not saved into datastore. Args: mr_spec: model.MapreduceSpec. queue_name: queue name. Returns: The JobConfig object for this job.
3.759404
3.741456
1.004797
length = len(data) crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type]) crc = crc32c.crc_update(crc, data) crc = crc32c.crc_finalize(crc) self.__writer.write( struct.pack(_HEADER_FORMAT, _mask_crc(crc), length, record_type)) self.__writer.write(data) self.__position += _HEADER_LENGTH + length
def __write_record(self, record_type, data)
Write single physical record.
3.491799
3.37601
1.034297
block_remaining = _BLOCK_SIZE - self.__position % _BLOCK_SIZE if block_remaining < _HEADER_LENGTH: # Header won't fit into remainder self.__writer.write('\x00' * block_remaining) self.__position += block_remaining block_remaining = _BLOCK_SIZE if block_remaining < len(data) + _HEADER_LENGTH: first_chunk = data[:block_remaining - _HEADER_LENGTH] self.__write_record(_RECORD_TYPE_FIRST, first_chunk) data = data[len(first_chunk):] while True: block_remaining = _BLOCK_SIZE - self.__position % _BLOCK_SIZE if block_remaining >= len(data) + _HEADER_LENGTH: self.__write_record(_RECORD_TYPE_LAST, data) break else: chunk = data[:block_remaining - _HEADER_LENGTH] self.__write_record(_RECORD_TYPE_MIDDLE, chunk) data = data[len(chunk):] else: self.__write_record(_RECORD_TYPE_FULL, data)
def write(self, data)
Write single record. Args: data: record data to write as string, byte array or byte sequence.
2.492768
2.520821
0.988872
pad_length = _BLOCK_SIZE - self.__position % _BLOCK_SIZE if pad_length and pad_length != _BLOCK_SIZE: self.__writer.write('\x00' * pad_length) self.__position += pad_length
def _pad_block(self)
Pad block with 0. Pad current block with 0. Reader will simply treat these as corrupted record and skip the block. This method is idempotent.
3.83283
4.226447
0.906868
block_remaining = _BLOCK_SIZE - self.__reader.tell() % _BLOCK_SIZE if block_remaining < _HEADER_LENGTH: return ('', _RECORD_TYPE_NONE) header = self.__reader.read(_HEADER_LENGTH) if len(header) != _HEADER_LENGTH: raise EOFError('Read %s bytes instead of %s' % (len(header), _HEADER_LENGTH)) (masked_crc, length, record_type) = struct.unpack(_HEADER_FORMAT, header) crc = _unmask_crc(masked_crc) if length + _HEADER_LENGTH > block_remaining: # A record can't be bigger than one block. raise errors.InvalidRecordError('Length is too big') data = self.__reader.read(length) if len(data) != length: raise EOFError('Not enough data read. Expected: %s but got %s' % (length, len(data))) if record_type == _RECORD_TYPE_NONE: return ('', record_type) actual_crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type]) actual_crc = crc32c.crc_update(actual_crc, data) actual_crc = crc32c.crc_finalize(actual_crc) if actual_crc != crc: raise errors.InvalidRecordError('Data crc does not match') return (data, record_type)
def __try_read_record(self)
Try reading a record. Returns: (data, record_type) tuple. Raises: EOFError: when end of file was reached. InvalidRecordError: when valid record could not be read.
2.697778
2.555226
1.055789
pad_length = _BLOCK_SIZE - self.__reader.tell() % _BLOCK_SIZE if pad_length and pad_length != _BLOCK_SIZE: data = self.__reader.read(pad_length) if len(data) != pad_length: raise EOFError('Read %d bytes instead of %d' % (len(data), pad_length))
def __sync(self)
Skip reader to the block boundary.
3.46023
2.843129
1.21705
data = None while True: last_offset = self.tell() try: (chunk, record_type) = self.__try_read_record() if record_type == _RECORD_TYPE_NONE: self.__sync() elif record_type == _RECORD_TYPE_FULL: if data is not None: logging.warning( "Ordering corruption: Got FULL record while already " "in a chunk at offset %d", last_offset) return chunk elif record_type == _RECORD_TYPE_FIRST: if data is not None: logging.warning( "Ordering corruption: Got FIRST record while already " "in a chunk at offset %d", last_offset) data = chunk elif record_type == _RECORD_TYPE_MIDDLE: if data is None: logging.warning( "Ordering corruption: Got MIDDLE record before FIRST " "record at offset %d", last_offset) else: data += chunk elif record_type == _RECORD_TYPE_LAST: if data is None: logging.warning( "Ordering corruption: Got LAST record but no chunk is in " "progress at offset %d", last_offset) else: result = data + chunk data = None return result else: raise errors.InvalidRecordError( "Unsupported record type: %s" % record_type) except errors.InvalidRecordError, e: logging.warning("Invalid record encountered at %s (%s). Syncing to " "the next block", last_offset, e) data = None self.__sync()
def read(self)
Reads record from current position in reader. Returns: original bytes stored in a single record.
2.526111
2.515201
1.004338
if shards is None: shards = parameters.config.SHARD_COUNT if base_path is None: base_path = parameters.config.BASE_PATH mapreduce_id = control.start_map( job_name, handler_spec, input_reader_spec, params or {}, mapreduce_parameters={ "done_callback": self.get_callback_url(), "done_callback_method": "GET", "pipeline_id": self.pipeline_id, "base_path": base_path, }, shard_count=shards, output_writer_spec=output_writer_spec, queue_name=self.queue_name, ) self.fill(self.outputs.job_id, mapreduce_id) self.set_status(console_url="%s/detail?mapreduce_id=%s" % ( (base_path, mapreduce_id)))
def run(self, job_name, handler_spec, input_reader_spec, output_writer_spec=None, params=None, shards=None, base_path=None)
Start a mapreduce job. Args: job_name: mapreduce name. Only for display purpose. handler_spec: fully qualified name to your map function/class. input_reader_spec: fully qualified name to input reader class. output_writer_spec: fully qualified name to output writer class. params: a dictionary of parameters for input reader and output writer initialization. shards: number of shards. This provides a guide to mapreduce. The real number of shards is determined by how input are splited.
3.259929
3.45792
0.942743
if self.was_aborted: return mapreduce_id = self.outputs.job_id.value mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id) if mapreduce_state.result_status != model.MapreduceState.RESULT_SUCCESS: self.retry("Job %s had status %s" % ( mapreduce_id, mapreduce_state.result_status)) return mapper_spec = mapreduce_state.mapreduce_spec.mapper outputs = [] output_writer_class = mapper_spec.output_writer_class() if (output_writer_class and mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS): outputs = output_writer_class.get_filenames(mapreduce_state) self.fill(self.outputs.result_status, mapreduce_state.result_status) self.fill(self.outputs.counters, mapreduce_state.counters_map.to_dict()) self.complete(outputs)
def callback(self)
Callback after this async pipeline finishes.
3.001325
2.912655
1.030443
if shard_count is None: shard_count = parameters.config.SHARD_COUNT if mapper_parameters: mapper_parameters = dict(mapper_parameters) # Make sure this old API fill all parameters with default values. mr_params = map_job.JobConfig._get_default_mr_params() if mapreduce_parameters: mr_params.update(mapreduce_parameters) # Override default values if user specified them as arguments. if base_path: mr_params["base_path"] = base_path mr_params["queue_name"] = util.get_queue_name(queue_name) mapper_spec = model.MapperSpec(handler_spec, reader_spec, mapper_parameters, shard_count, output_writer_spec=output_writer_spec) if in_xg_transaction and not db.is_in_transaction(): logging.warning("Expects an opened xg transaction to start mapreduce " "when transactional is True.") return handlers.StartJobHandler._start_map( name, mapper_spec, mr_params, # TODO(user): Now that "queue_name" is part of mr_params. # Remove all the other ways to get queue_name after one release. queue_name=mr_params["queue_name"], eta=eta, countdown=countdown, hooks_class_name=hooks_class_name, _app=_app, in_xg_transaction=in_xg_transaction)
def start_map(name, handler_spec, reader_spec, mapper_parameters, shard_count=None, output_writer_spec=None, mapreduce_parameters=None, base_path=None, queue_name=None, eta=None, countdown=None, hooks_class_name=None, _app=None, in_xg_transaction=False)
Start a new, mapper-only mapreduce. Deprecated! Use map_job.start instead. If a value can be specified both from an explicit argument and from a dictionary, the value from the explicit argument wins. Args: name: mapreduce name. Used only for display purposes. handler_spec: fully qualified name of mapper handler function/class to call. reader_spec: fully qualified name of mapper reader to use mapper_parameters: dictionary of parameters to pass to mapper. These are mapper-specific and also used for reader/writer initialization. Should have format {"input_reader": {}, "output_writer":{}}. Old deprecated style does not have sub dictionaries. shard_count: number of shards to create. mapreduce_parameters: dictionary of mapreduce parameters relevant to the whole job. base_path: base path of mapreduce library handler specified in app.yaml. "/mapreduce" by default. queue_name: taskqueue queue name to be used for mapreduce tasks. see util.get_queue_name. eta: absolute time when the MR should execute. May not be specified if 'countdown' is also supplied. This may be timezone-aware or timezone-naive. countdown: time in seconds into the future that this MR should execute. Defaults to zero. hooks_class_name: fully qualified name of a hooks.Hooks subclass. in_xg_transaction: controls what transaction scope to use to start this MR job. If True, there has to be an already opened cross-group transaction scope. MR will use one entity group from it. If False, MR will create an independent transaction to start the job regardless of any existing transaction scopes. Returns: mapreduce id as string.
3.500737
3.687196
0.949431
self.__update_state() if self._state.active: return self.RUNNING else: return self._state.result_status
def get_status(self)
Get status enum. Returns: One of the status enum.
9.355903
10.72814
0.87209
self.__update_state() return self._state.counters_map.get(counter_name, default)
def get_counter(self, counter_name, default=0)
Get the value of the named counter from this job. When a job is running, counter values won't be very accurate. Args: counter_name: name of the counter in string. default: default value if the counter doesn't exist. Returns: Value in int of the named counter.
7.577941
10.53075
0.719601
assert self.SUCCESS == self.get_status() ss = model.ShardState.find_all_by_mapreduce_state(self._state) for s in ss: yield iter(s.writer_state.get("outs", []))
def get_outputs(self)
Get outputs of this job. Should only call if status is SUCCESS. Yields: Iterators, one for each shard. Each iterator is from the argument of map_job.output_writer.commit_output.
14.807792
10.484276
1.412381
cls.__validate_job_config(job_config) mapper_spec = job_config._get_mapper_spec() # Create mr spec. mapreduce_params = job_config._get_mr_params() mapreduce_spec = model.MapreduceSpec( job_config.job_name, job_config.job_id, mapper_spec.to_json(), mapreduce_params, util._obj_to_path(job_config._hooks_cls)) # Save states and enqueue task. if in_xg_transaction: propagation = db.MANDATORY else: propagation = db.INDEPENDENT state = None @db.transactional(propagation=propagation) def _txn(): state = cls.__create_and_save_state(job_config, mapreduce_spec) cls.__add_kickoff_task(job_config, mapreduce_spec) return state state = _txn() return cls(state)
def submit(cls, job_config, in_xg_transaction=False)
Submit the job to run. Args: job_config: an instance of map_job.MapJobConfig. in_xg_transaction: controls what transaction scope to use to start this MR job. If True, there has to be an already opened cross-group transaction scope. MR will use one entity group from it. If False, MR will create an independent transaction to start the job regardless of any existing transaction scopes. Returns: a Job instance representing the submitted job.
4.150525
4.168872
0.995599
# Only if the job was not in a terminal state. if self._state.active: self._state = self.__get_state_by_id(self.job_config.job_id)
def __update_state(self)
Fetches most up to date state from db.
9.202238
7.402293
1.24316
state = model.MapreduceState.get_by_job_id(job_id) if state is None: raise ValueError("Job state for job %s is missing." % job_id) return state
def __get_state_by_id(cls, job_id)
Get job state by id. Args: job_id: job id. Returns: model.MapreduceState for the job. Raises: ValueError: if the job state is missing.
4.243301
3.168998
1.339004
state = model.MapreduceState.create_new(job_config.job_id) state.mapreduce_spec = mapreduce_spec state.active = True state.active_shards = 0 state.app_id = job_config._app config = datastore_rpc.Configuration(force_writes=job_config._force_writes) state.put(config=config) return state
def __create_and_save_state(cls, job_config, mapreduce_spec)
Save map job state to datastore. Save state to datastore so that UI can see it immediately. Args: job_config: map_job.JobConfig. mapreduce_spec: model.MapreduceSpec. Returns: model.MapreduceState for this job.
3.770304
4.114815
0.916275
params = {"mapreduce_id": job_config.job_id} # Task is not named so that it can be added within a transaction. kickoff_task = taskqueue.Task( # TODO(user): Perhaps make this url a computed field of job_config. url=job_config._base_path + "/kickoffjob_callback/" + job_config.job_id, headers=util._get_task_headers(job_config.job_id), params=params) if job_config._hooks_cls: hooks = job_config._hooks_cls(mapreduce_spec) try: hooks.enqueue_kickoff_task(kickoff_task, job_config.queue_name) return except NotImplementedError: pass kickoff_task.add(job_config.queue_name, transactional=True)
def __add_kickoff_task(cls, job_config, mapreduce_spec)
Add kickoff task to taskqueue. Args: job_config: map_job.JobConfig. mapreduce_spec: model.MapreduceSpec,
4.394909
4.612523
0.952821
_json = self.to_json() try: return json.dumps(_json, sort_keys=True, cls=JsonEncoder) except: logging.exception("Could not serialize JSON: %r", _json) raise
def to_json_str(self)
Convert data to json string representation. Returns: json representation as string.
4.195897
4.506641
0.931047
return cls.from_json(json.loads(json_str, cls=JsonDecoder))
def from_json_str(cls, json_str)
Convert json string representation into class instance. Args: json_str: json representation as string. Returns: New instance of the class with data loaded from json string.
4.53682
8.210345
0.552574
value = super(JsonProperty, self).get_value_for_datastore(model_instance) if not value: return None json_value = value if not isinstance(value, dict): json_value = value.to_json() if not json_value: return None return datastore_types.Text(json.dumps( json_value, sort_keys=True, cls=JsonEncoder))
def get_value_for_datastore(self, model_instance)
Gets value for datastore. Args: model_instance: instance of the model class. Returns: datastore-compatible value.
2.732436
3.093827
0.88319
if value is None: return None _json = json.loads(value, cls=JsonDecoder) if self.data_type == dict: return _json return self.data_type.from_json(_json)
def make_value_from_datastore(self, value)
Convert value from datastore representation. Args: value: datastore value. Returns: value to store in the model.
5.282518
5.270368
1.002305
if value is not None and not isinstance(value, self.data_type): raise datastore_errors.BadValueError( "Property %s must be convertible to a %s instance (%s)" % (self.name, self.data_type, value)) return super(JsonProperty, self).validate(value)
def validate(self, value)
Validate value. Args: value: model value. Returns: Whether the specified value is valid data type value. Raises: BadValueError: when value is not of self.data_type type.
3.404284
3.406548
0.999335
task = self.to_task() task.add(queue_name, transactional)
def add(self, queue_name, transactional=False)
Add task to the queue.
8.34599
5.757287
1.449639
# Never pass params to taskqueue.Task. Use payload instead. Otherwise, # it's up to a particular taskqueue implementation to generate # payload from params. It could blow up payload size over limit. return taskqueue.Task( url=self.url, payload=self._payload, name=self.name, eta=self.eta, countdown=self.countdown, headers=self._headers)
def to_task(self)
Convert to a taskqueue task.
8.245244
7.078018
1.164909
# TODO(user): Pass mr_id into headers. Otherwise when payload decoding # failed, we can't abort a mr. if request.headers.get(cls.PAYLOAD_VERSION_HEADER) != cls.PAYLOAD_VERSION: raise DeprecationWarning( "Task is generated by an older incompatible version of mapreduce. " "Please kill this job manually") return cls._decode_payload(request.body)
def decode_payload(cls, request)
Decode task payload. HugeTask controls its own payload entirely including urlencoding. It doesn't depend on any particular web framework. Args: request: a webapp Request instance. Returns: A dict of str to str. The same as the params argument to __init__. Raises: DeprecationWarning: When task payload constructed from an older incompatible version of mapreduce.
13.189553
9.198361
1.433902
current_value = self.counters.get(counter_name, 0) new_value = current_value + delta self.counters[counter_name] = new_value return new_value
def increment(self, counter_name, delta)
Increment counter value. Args: counter_name: counter name as String. delta: increment delta as Integer. Returns: new counter value.
2.101744
2.310487
0.909654
for counter_name in counters_map.counters: self.increment(counter_name, counters_map.counters[counter_name])
def add_map(self, counters_map)
Add all counters from the map. For each counter in the passed map, adds its value to the counter in this map. Args: counters_map: CounterMap instance to add.
4.299105
3.846082
1.117788
for counter_name in counters_map.counters: self.increment(counter_name, -counters_map.counters[counter_name])
def sub_map(self, counters_map)
Subtracts all counters from the map. For each counter in the passed map, subtracts its value to the counter in this map. Args: counters_map: CounterMap instance to subtract.
4.978383
4.012544
1.240705
result = { "mapper_handler_spec": self.handler_spec, "mapper_input_reader": self.input_reader_spec, "mapper_params": self.params, "mapper_shard_count": self.shard_count } if self.output_writer_spec: result["mapper_output_writer"] = self.output_writer_spec return result
def to_json(self)
Serializes this MapperSpec into a json-izable object.
3.206843
2.591647
1.237377
if self.__hooks is None and self.hooks_class_name is not None: hooks_class = util.for_name(self.hooks_class_name) if not isinstance(hooks_class, type): raise ValueError("hooks_class_name must refer to a class, got %s" % type(hooks_class).__name__) if not issubclass(hooks_class, hooks.Hooks): raise ValueError( "hooks_class_name must refer to a hooks.Hooks subclass") self.__hooks = hooks_class(self) return self.__hooks
def get_hooks(self)
Returns a hooks.Hooks class or None if no hooks class has been set.
2.486602
2.184706
1.138186
mapper_spec = self.mapper.to_json() return { "name": self.name, "mapreduce_id": self.mapreduce_id, "mapper_spec": mapper_spec, "params": self.params, "hooks_class_name": self.hooks_class_name, }
def to_json(self)
Serializes all data in this mapreduce spec into json form. Returns: data in json format.
3.53133
3.444908
1.025087
mapreduce_spec = cls(json["name"], json["mapreduce_id"], json["mapper_spec"], json.get("params"), json.get("hooks_class_name")) return mapreduce_spec
def from_json(cls, json)
Create new MapreduceSpec from the json, encoded by to_json. Args: json: json representation of MapreduceSpec. Returns: an instance of MapreduceSpec with all data deserialized from json.
5.972353
5.887987
1.014328
key = 'GAE-MR-spec: %s' % mr_id spec_json = memcache.get(key) if spec_json: return cls.from_json(spec_json) state = MapreduceState.get_by_job_id(mr_id) spec = state.mapreduce_spec spec_json = spec.to_json() memcache.set(key, spec_json) return spec
def _get_mapreduce_spec(cls, mr_id)
Get Mapreduce spec from mr id.
2.725979
2.518561
1.082356
return db.Key.from_path(cls.kind(), str(mapreduce_id))
def get_key_by_job_id(cls, mapreduce_id)
Retrieves the Key for a Job. Args: mapreduce_id: The job to retrieve. Returns: Datastore Key that can be used to fetch the MapreduceState.
3.573607
7.721068
0.462838
chart = google_chart_api.BarChart() def filter_status(status_to_filter): return [count if status == status_to_filter else 0 for count, status in zip(shards_processed, shards_status)] if shards_status: # Each index will have only one non-zero count, so stack them to color- # code the bars by status # These status values are computed in _update_state_from_shard_states, # in mapreduce/handlers.py. chart.stacked = True chart.AddBars(filter_status("unknown"), color="404040") chart.AddBars(filter_status("success"), color="00ac42") chart.AddBars(filter_status("running"), color="3636a9") chart.AddBars(filter_status("aborted"), color="e29e24") chart.AddBars(filter_status("failed"), color="f6350f") else: chart.AddBars(shards_processed) shard_count = len(shards_processed) if shard_count > 95: # Auto-spacing does not work for large numbers of shards. pixels_per_shard = 700.0 / shard_count bar_thickness = int(pixels_per_shard * .9) chart.style = bar_chart.BarChartStyle(bar_thickness=bar_thickness, bar_gap=0.1, use_fractional_gap_spacing=True) if shards_processed and shard_count <= 95: # Adding labels puts us in danger of exceeding the URL length, only # do it when we have a small amount of data to plot. # Only 16 labels on the whole chart. stride_length = max(1, shard_count / 16) chart.bottom.labels = [] for x in xrange(shard_count): if (x % stride_length == 0 or x == shard_count - 1): chart.bottom.labels.append(x) else: chart.bottom.labels.append("") chart.left.labels = ["0", str(max(shards_processed))] chart.left.min = 0 self.chart_width = min(700, max(300, shard_count * 20)) self.chart_url = chart.display.Url(self.chart_width, 200)
def set_processed_counts(self, shards_processed, shards_status)
Updates a chart url to display processed count for each shard. Args: shards_processed: list of integers with number of processed entities in each shard
4.859759
4.98882
0.97413
if not mapreduce_id: mapreduce_id = MapreduceState.new_mapreduce_id() state = MapreduceState(key_name=mapreduce_id, last_poll_time=gettime()) state.set_processed_counts([], []) return state
def create_new(mapreduce_id=None, gettime=datetime.datetime.now)
Create a new MapreduceState. Args: mapreduce_id: Mapreduce id as string. gettime: Used for testing.
4.056563
4.159254
0.97531
self.input_reader = self.initial_input_reader self.slice_id = 0 self.retries += 1 self.output_writer = output_writer self.handler = self.mapreduce_spec.mapper.handler
def reset_for_retry(self, output_writer)
Reset self for shard retry. Args: output_writer: new output writer that contains new output files.
6.379719
6.77839
0.941185
if recovery_slice: self.slice_id += 2 # Restore input reader to the beginning of the slice. self.input_reader = self.input_reader.from_json(self._input_reader_json) else: self.slice_id += 1
def advance_for_next_slice(self, recovery_slice=False)
Advance relavent states for next slice. Args: recovery_slice: True if this slice is running recovery logic. See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery for more info.
5.425477
5.523311
0.982287
result = {"mapreduce_spec": self.mapreduce_spec.to_json_str(), "shard_id": self.shard_id, "slice_id": str(self.slice_id), "input_reader_state": self.input_reader.to_json_str(), "initial_input_reader_state": self.initial_input_reader.to_json_str(), "retries": str(self.retries)} if self.output_writer: result["output_writer_state"] = self.output_writer.to_json_str() serialized_handler = util.try_serialize_handler(self.handler) if serialized_handler: result["serialized_handler"] = serialized_handler return result
def to_dict(self)
Convert state to dictionary to save in task payload.
2.883094
2.760095
1.044564
mapreduce_spec = MapreduceSpec.from_json_str(request.get("mapreduce_spec")) mapper_spec = mapreduce_spec.mapper input_reader_spec_dict = json.loads(request.get("input_reader_state"), cls=json_util.JsonDecoder) input_reader = mapper_spec.input_reader_class().from_json( input_reader_spec_dict) initial_input_reader_spec_dict = json.loads( request.get("initial_input_reader_state"), cls=json_util.JsonDecoder) initial_input_reader = mapper_spec.input_reader_class().from_json( initial_input_reader_spec_dict) output_writer = None if mapper_spec.output_writer_class(): output_writer = mapper_spec.output_writer_class().from_json( json.loads(request.get("output_writer_state", "{}"), cls=json_util.JsonDecoder)) assert isinstance(output_writer, mapper_spec.output_writer_class()), ( "%s.from_json returned an instance of wrong class: %s" % ( mapper_spec.output_writer_class(), output_writer.__class__)) handler = util.try_deserialize_handler(request.get("serialized_handler")) if not handler: handler = mapreduce_spec.mapper.handler return cls(mapreduce_spec.params["base_path"], mapreduce_spec, str(request.get("shard_id")), int(request.get("slice_id")), input_reader, initial_input_reader, output_writer=output_writer, retries=int(request.get("retries")), handler=handler)
def from_request(cls, request)
Create new TransientShardState from webapp request.
2.309356
2.247946
1.027318
self.retries += 1 self.last_work_item = "" self.active = True self.result_status = None self.input_finished = False self.counters_map = CountersMap() self.slice_id = 0 self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False
def reset_for_retry(self)
Reset self for shard retry.
5.573263
5.323893
1.04684
self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False if recovery_slice: self.slice_id += 2 else: self.slice_id += 1
def advance_for_next_slice(self, recovery_slice=False)
Advance self for next slice. Args: recovery_slice: True if this slice is running recovery logic. See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery for more info.
3.901522
4.072349
0.958052
for prop in self.properties().values(): setattr(self, prop.name, getattr(other_state, prop.name))
def copy_from(self, other_state)
Copy data from another shard state entity to self.
4.11611
3.898881
1.055716
keys = cls.calculate_keys_by_mapreduce_state(mapreduce_state) i = 0 while i < len(keys): @db.non_transactional def no_tx_get(i): return db.get(keys[i:i+cls._MAX_STATES_IN_MEMORY]) # We need a separate function to so that we can mix non-transactional and # use be a generator states = no_tx_get(i) for s in states: i += 1 if s is not None: yield s
def find_all_by_mapreduce_state(cls, mapreduce_state)
Find all shard states for given mapreduce. Args: mapreduce_state: MapreduceState instance Yields: shard states sorted by shard id.
5.390713
5.780981
0.932491
if mapreduce_state is None: return [] keys = [] for i in range(mapreduce_state.mapreduce_spec.mapper.shard_count): shard_id = cls.shard_id_from_number(mapreduce_state.key().name(), i) keys.append(cls.get_key_by_shard_id(shard_id)) return keys
def calculate_keys_by_mapreduce_state(cls, mapreduce_state)
Calculate all shard states keys for given mapreduce. Args: mapreduce_state: MapreduceState instance Returns: A list of keys for shard states, sorted by shard id. The corresponding shard states may not exist.
2.812648
2.96787
0.947699
shard_id = cls.shard_id_from_number(mapreduce_id, shard_number) state = cls(key_name=shard_id, mapreduce_id=mapreduce_id) return state
def create_new(cls, mapreduce_id, shard_number)
Create new shard state. Args: mapreduce_id: unique mapreduce id as string. shard_number: shard number for which to create shard state. Returns: new instance of ShardState ready to put into datastore.
3.064193
3.765728
0.813705
return db.Key.from_path(cls.kind(), "%s:%s" % (mapreduce_id, cls._KEY_NAME))
def get_key_by_job_id(cls, mapreduce_id)
Retrieves the Key for a mapreduce ID. Args: mapreduce_id: The job to fetch. Returns: Datastore Key for the command for the given job ID.
4.295803
7.117618
0.603545
cls(key_name="%s:%s" % (mapreduce_id, cls._KEY_NAME), command=cls.ABORT).put(**kwargs)
def abort(cls, mapreduce_id, **kwargs)
Causes a job to abort. Args: mapreduce_id: The job to abort. Not verified as a valid job.
7.284751
11.14943
0.653374
super(DatastoreInputReader, cls).validate(job_config) params = job_config.input_reader_params entity_kind = params[cls.ENTITY_KIND_PARAM] # Check for a "." in the entity kind. if "." in entity_kind: logging.warning( ". detected in entity kind %s specified for reader %s." "Assuming entity kind contains the dot.", entity_kind, cls.__name__) # Validate the filters parameters. if cls.FILTERS_PARAM in params: filters = params[cls.FILTERS_PARAM] for f in filters: if f[1] != "=": raise errors.BadReaderParamsError( "Only equality filters are supported: %s", f)
def validate(cls, job_config)
Inherit docs.
3.906967
3.754467
1.040618
super(TaskQueueHandler, self).initialize(request, response) # Check request is from taskqueue. if "X-AppEngine-QueueName" not in self.request.headers: logging.error(self.request.headers) logging.error("Task queue handler received non-task queue request") self.response.set_status( 403, message="Task queue handler received non-task queue request") return # Check task has not been retried too many times. if self.task_retry_count() + 1 > parameters.config.TASK_MAX_ATTEMPTS: logging.error( "Task %s has been attempted %s times. Dropping it permanently.", self.request.headers["X-AppEngine-TaskName"], self.task_retry_count() + 1) self._drop_gracefully() return try: self._preprocess() self._preprocess_success = True # pylint: disable=bare-except except: self._preprocess_success = False logging.error( "Preprocess task %s failed. Dropping it permanently.", self.request.headers["X-AppEngine-TaskName"]) self._drop_gracefully()
def initialize(self, request, response)
Initialize. 1. call webapp init. 2. check request is indeed from taskqueue. 3. check the task has not been retried too many times. 4. run handler specific processing logic. 5. run error handling logic if precessing failed. Args: request: a webapp.Request instance. response: a webapp.Response instance.
2.816096
2.540222
1.108602
self.response.set_status(httplib.SERVICE_UNAVAILABLE, "Retry task") self.response.clear()
def retry_task(self)
Ask taskqueue to retry this task. Even though raising an exception can cause a task retry, it will flood logs with highly visible ERROR logs. Handlers should uses this method to perform controlled task retries. Only raise exceptions for those deserve ERROR log entries.
6.550064
5.922943
1.10588
path = self.request.path base_path = path[:path.rfind("/")] if not base_path.endswith("/command"): raise BadRequestPathError( "Json handlers should have /command path prefix") return base_path[:base_path.rfind("/")]
def base_path(self)
Base path for all mapreduce-related urls. JSON handlers are mapped to /base_path/command/command_name thus they require special treatment. Raises: BadRequestPathError: if the path does not end with "/command". Returns: The base path.
7.664159
3.920131
1.955077
if self.request.headers.get("X-Requested-With") != "XMLHttpRequest": logging.error("Got JSON request with no X-Requested-With header") self.response.set_status( 403, message="Got JSON request with no X-Requested-With header") return self.json_response.clear() try: self.handle() except errors.MissingYamlError: logging.debug("Could not find 'mapreduce.yaml' file.") self.json_response.clear() self.json_response["error_class"] = "Notice" self.json_response["error_message"] = "Could not find 'mapreduce.yaml'" except Exception, e: logging.exception("Error in JsonHandler, returning exception.") # TODO(user): Include full traceback here for the end-user. self.json_response.clear() self.json_response["error_class"] = e.__class__.__name__ self.json_response["error_message"] = str(e) self.response.headers["Content-Type"] = "text/javascript" try: output = json.dumps(self.json_response, cls=json_util.JsonEncoder) # pylint: disable=broad-except except Exception, e: logging.exception("Could not serialize to JSON") self.response.set_status(500, message="Could not serialize to JSON") return else: self.response.out.write(output)
def _handle_wrapper(self)
The helper method for handling JSON Post and Get requests.
2.765071
2.669392
1.035843
if self._EOF: return "" while self._seg_index <= self._last_seg_index: result = self._read_from_seg(n) if result != "": return result else: self._next_seg() self._EOF = True return ""
def read(self, n)
Read data from file segs. Args: n: max bytes to read. Must be positive. Returns: some bytes. May be smaller than n bytes. "" when no more data is left.
4.935452
3.995227
1.235337
if self._seg: self._seg.close() self._seg_index += 1 if self._seg_index > self._last_seg_index: self._seg = None return filename = self._seg_prefix + str(self._seg_index) stat = cloudstorage.stat(filename) writer = output_writers._GoogleCloudStorageOutputWriter if writer._VALID_LENGTH not in stat.metadata: raise ValueError( "Expect %s in metadata for file %s." % (writer._VALID_LENGTH, filename)) self._seg_valid_length = int(stat.metadata[writer._VALID_LENGTH]) if self._seg_valid_length > stat.st_size: raise ValueError( "Valid length %s is too big for file %s of length %s" % (self._seg_valid_length, filename, stat.st_size)) self._seg = cloudstorage.open(filename)
def _next_seg(self)
Get next seg.
3.213564
3.070625
1.04655
result = self._seg.read(size=n) if result == "": return result offset = self._seg.tell() if offset > self._seg_valid_length: extra = offset - self._seg_valid_length result = result[:-1*extra] self._offset += len(result) return result
def _read_from_seg(self, n)
Read from current seg. Args: n: max number of bytes to read. Returns: valid bytes from the current seg. "" if no more is left.
4.256155
3.93261
1.082272
now_descending = int((_FUTURE_TIME - gettime()) * 100) request_id_hash = os.environ.get("REQUEST_ID_HASH") if not request_id_hash: request_id_hash = str(random.getrandbits(32)) return "%d%s" % (now_descending, request_id_hash)
def _get_descending_key(gettime=time.time)
Returns a key name lexically ordered by time descending. This lets us have a key name for use with Datastore entities which returns rows in time descending order when it is scanned in lexically ascending order, allowing us to bypass index building for descending indexes. Args: gettime: Used for testing. Returns: A string with a time descending key.
3.445218
3.88734
0.886266
version = os.environ["CURRENT_VERSION_ID"].split(".")[0] default_host = os.environ["DEFAULT_VERSION_HOSTNAME"] module = os.environ["CURRENT_MODULE_ID"] if os.environ["CURRENT_MODULE_ID"] == "default": return "%s.%s" % (version, default_host) return "%s.%s.%s" % (version, module, default_host)
def _get_task_host()
Get the Host header value for all mr tasks. Task Host header determines which instance this task would be routed to. Current version id format is: v7.368834058928280579 Current module id is just the module's name. It could be "default" Default version hostname is app_id.appspot.com Returns: A complete host name is of format version.module.app_id.appspot.com If module is the default module, just version.app_id.appspot.com. The reason is if an app doesn't have modules enabled and the url is "version.default.app_id", "version" is ignored and "default" is used as version. If "default" version doesn't exist, the url is routed to the default version.
3.030124
2.024034
1.497072
if queue_name: return queue_name queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", parameters.config.QUEUE_NAME) if len(queue_name) > 1 and queue_name[0:2] == "__": # We are currently in some special queue. E.g. __cron. return parameters.config.QUEUE_NAME else: return queue_name
def get_queue_name(queue_name)
Determine which queue MR should run on. How to choose the queue: 1. If user provided one, use that. 2. If we are starting a mr from taskqueue, inherit that queue. If it's a special queue, fall back to the default queue. 3. Default queue. If user is using any MR pipeline interface, pipeline.start takes a "queue_name" argument. The pipeline will run on that queue and MR will simply inherit the queue_name. Args: queue_name: queue_name from user. Maybe None. Returns: The queue name to run on.
4.356284
4.274179
1.019209
secs = td.seconds + td.days * 24 * 3600 if td.microseconds: secs += 1 return secs
def total_seconds(td)
convert a timedelta to seconds. This is patterned after timedelta.total_seconds, which is only available in python 27. Args: td: a timedelta object. Returns: total seconds within a timedelta. Rounded up to seconds.
4.097479
4.391144
0.933123
resolved_name = for_name(fq_name) if isinstance(resolved_name, (type, types.ClassType)): # create new instance if this is type return resolved_name() elif isinstance(resolved_name, types.MethodType): # bind the method return getattr(resolved_name.im_class(), resolved_name.__name__) else: return resolved_name
def handler_for_name(fq_name)
Resolves and instantiates handler by fully qualified name. First resolves the name using for_name call. Then if it resolves to a class, instantiates a class, if it resolves to a method - instantiates the class and binds method to the instance. Args: fq_name: fully qualified name of something to find. Returns: handler instance which is ready to be called.
4.252308
3.486405
1.219683
if (isinstance(handler, types.InstanceType) or # old style class (isinstance(handler, object) and # new style class not inspect.isfunction(handler) and not inspect.ismethod(handler)) and hasattr(handler, "__call__")): return pickle.dumps(handler) return None
def try_serialize_handler(handler)
Try to serialize map/reduce handler. Args: handler: handler function/instance. Handler can be a function or an instance of a callable class. In the latter case, the handler will be serialized across slices to allow users to save states. Returns: serialized handler string or None.
3.513979
3.709946
0.947178
if isinstance(obj, types.GeneratorType): return True CO_GENERATOR = 0x20 return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and obj.func_code.co_flags & CO_GENERATOR))
def is_generator(obj)
Return true if the object is generator or generator function. Generator function objects provides same attributes as functions. See isfunction.__doc__ for attributes listing. Adapted from Python 2.6. Args: obj: an object to test. Returns: true if the object is generator function.
3.249876
3.709716
0.876044
if type(obj) is str: TRUTH_VALUE_SET = ["true", "1", "yes", "t", "on"] return obj.lower() in TRUTH_VALUE_SET else: return bool(obj)
def parse_bool(obj)
Return true if the object represents a truth value, false otherwise. For bool and numeric objects, uses Python's built-in bool function. For str objects, checks string against a list of possible truth values. Args: obj: object to determine boolean value of; expected Returns: Boolean value according to 5.1 of Python docs if object is not a str object. For str objects, return True if str is in TRUTH_VALUE_SET and False otherwise. http://docs.python.org/library/stdtypes.html
3.601803
2.597528
1.386627
force_writes = parse_bool(mapreduce_spec.params.get("force_writes", "false")) if force_writes: return datastore_rpc.Configuration(force_writes=force_writes) else: # dev server doesn't support force_writes. return datastore_rpc.Configuration()
def create_datastore_write_config(mapreduce_spec)
Creates datastore config to use in write operations. Args: mapreduce_spec: current mapreduce specification as MapreduceSpec. Returns: an instance of datastore_rpc.Configuration to use for all write operations in the mapreduce.
4.418692
3.987028
1.108267
ndb_ctx = ndb.get_context() ndb_ctx.set_cache_policy(lambda key: False) ndb_ctx.set_memcache_policy(lambda key: False)
def _set_ndb_cache_policy()
Tell NDB to never cache anything in memcache or in-process. This ensures that entities fetched from Datastore input_readers via NDB will not bloat up the request memory size and Datastore Puts will avoid doing calls to memcache. Without this you get soft memory limit exits, which hurts overall throughput.
2.540584
2.320629
1.094783
if obj is None: return obj if inspect.isclass(obj) or inspect.isfunction(obj): fetched = getattr(sys.modules[obj.__module__], obj.__name__, None) if fetched is None: raise ValueError( "Object %r must be defined on the top level of a module." % obj) return "%s.%s" % (obj.__module__, obj.__name__) raise TypeError("Unexpected type %s." % type(obj))
def _obj_to_path(obj)
Returns the fully qualified path to the object. Args: obj: obj must be a new style top level class, or a top level function. No inner function or static method. Returns: Fully qualified path to the object. Raises: TypeError: when argument obj has unsupported type. ValueError: when obj can't be discovered on the top level.
3.424649
3.060434
1.119008
items_no_prefix = [] for item in items: if item.startswith(prefix): items_no_prefix.append(item[len(prefix):]) else: items_no_prefix.append(item) return items_no_prefix
def strip_prefix_from_items(prefix, items)
Strips out the prefix from each of the items if it is present. Args: prefix: the string for that you wish to strip from the beginning of each of the items. items: a list of strings that may or may not contain the prefix you want to strip out. Returns: items_no_prefix: a copy of the list of items (same order) without the prefix (if present).
1.669446
1.990879
0.838547
if hooks is not None: try: getattr(hooks, method)(task, queue_name) except NotImplementedError: # Use the default task addition implementation. return False return True return False
def _run_task_hook(hooks, method, task, queue_name)
Invokes hooks.method(task, queue_name). Args: hooks: A hooks.Hooks instance or None. method: The name of the method to invoke on the hooks class e.g. "enqueue_kickoff_task". task: The taskqueue.Task to pass to the hook method. queue_name: The name of the queue to pass to the hook method. Returns: True if the hooks.Hooks instance handled the method, False otherwise.
6.76999
7.244551
0.934494