_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q12100
|
HugeTask.add
|
train
|
def add(self, queue_name, transactional=False):
"""Add task to the queue."""
task = self.to_task()
task.add(queue_name, transactional)
|
python
|
{
"resource": ""
}
|
q12101
|
HugeTask.to_task
|
train
|
def to_task(self):
"""Convert to a taskqueue task."""
# Never pass params to taskqueue.Task. Use payload instead. Otherwise,
# it's up to a particular taskqueue implementation to generate
# payload from params. It could blow up payload size over limit.
return taskqueue.Task(
url=self.url,
payload=self._payload,
name=self.name,
eta=self.eta,
countdown=self.countdown,
headers=self._headers)
|
python
|
{
"resource": ""
}
|
q12102
|
HugeTask.decode_payload
|
train
|
def decode_payload(cls, request):
"""Decode task payload.
HugeTask controls its own payload entirely including urlencoding.
It doesn't depend on any particular web framework.
Args:
request: a webapp Request instance.
Returns:
A dict of str to str. The same as the params argument to __init__.
Raises:
DeprecationWarning: When task payload constructed from an older
incompatible version of mapreduce.
"""
# TODO(user): Pass mr_id into headers. Otherwise when payload decoding
# failed, we can't abort a mr.
if request.headers.get(cls.PAYLOAD_VERSION_HEADER) != cls.PAYLOAD_VERSION:
raise DeprecationWarning(
"Task is generated by an older incompatible version of mapreduce. "
"Please kill this job manually")
return cls._decode_payload(request.body)
|
python
|
{
"resource": ""
}
|
q12103
|
CountersMap.add_map
|
train
|
def add_map(self, counters_map):
"""Add all counters from the map.
For each counter in the passed map, adds its value to the counter in this
map.
Args:
counters_map: CounterMap instance to add.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, counters_map.counters[counter_name])
|
python
|
{
"resource": ""
}
|
q12104
|
CountersMap.sub_map
|
train
|
def sub_map(self, counters_map):
"""Subtracts all counters from the map.
For each counter in the passed map, subtracts its value to the counter in
this map.
Args:
counters_map: CounterMap instance to subtract.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, -counters_map.counters[counter_name])
|
python
|
{
"resource": ""
}
|
q12105
|
MapperSpec.to_json
|
train
|
def to_json(self):
"""Serializes this MapperSpec into a json-izable object."""
result = {
"mapper_handler_spec": self.handler_spec,
"mapper_input_reader": self.input_reader_spec,
"mapper_params": self.params,
"mapper_shard_count": self.shard_count
}
if self.output_writer_spec:
result["mapper_output_writer"] = self.output_writer_spec
return result
|
python
|
{
"resource": ""
}
|
q12106
|
MapreduceSpec.get_hooks
|
train
|
def get_hooks(self):
"""Returns a hooks.Hooks class or None if no hooks class has been set."""
if self.__hooks is None and self.hooks_class_name is not None:
hooks_class = util.for_name(self.hooks_class_name)
if not isinstance(hooks_class, type):
raise ValueError("hooks_class_name must refer to a class, got %s" %
type(hooks_class).__name__)
if not issubclass(hooks_class, hooks.Hooks):
raise ValueError(
"hooks_class_name must refer to a hooks.Hooks subclass")
self.__hooks = hooks_class(self)
return self.__hooks
|
python
|
{
"resource": ""
}
|
q12107
|
MapreduceSpec.to_json
|
train
|
def to_json(self):
"""Serializes all data in this mapreduce spec into json form.
Returns:
data in json format.
"""
mapper_spec = self.mapper.to_json()
return {
"name": self.name,
"mapreduce_id": self.mapreduce_id,
"mapper_spec": mapper_spec,
"params": self.params,
"hooks_class_name": self.hooks_class_name,
}
|
python
|
{
"resource": ""
}
|
q12108
|
MapreduceSpec.from_json
|
train
|
def from_json(cls, json):
"""Create new MapreduceSpec from the json, encoded by to_json.
Args:
json: json representation of MapreduceSpec.
Returns:
an instance of MapreduceSpec with all data deserialized from json.
"""
mapreduce_spec = cls(json["name"],
json["mapreduce_id"],
json["mapper_spec"],
json.get("params"),
json.get("hooks_class_name"))
return mapreduce_spec
|
python
|
{
"resource": ""
}
|
q12109
|
MapreduceSpec._get_mapreduce_spec
|
train
|
def _get_mapreduce_spec(cls, mr_id):
"""Get Mapreduce spec from mr id."""
key = 'GAE-MR-spec: %s' % mr_id
spec_json = memcache.get(key)
if spec_json:
return cls.from_json(spec_json)
state = MapreduceState.get_by_job_id(mr_id)
spec = state.mapreduce_spec
spec_json = spec.to_json()
memcache.set(key, spec_json)
return spec
|
python
|
{
"resource": ""
}
|
q12110
|
MapreduceState.get_key_by_job_id
|
train
|
def get_key_by_job_id(cls, mapreduce_id):
"""Retrieves the Key for a Job.
Args:
mapreduce_id: The job to retrieve.
Returns:
Datastore Key that can be used to fetch the MapreduceState.
"""
return db.Key.from_path(cls.kind(), str(mapreduce_id))
|
python
|
{
"resource": ""
}
|
q12111
|
MapreduceState.set_processed_counts
|
train
|
def set_processed_counts(self, shards_processed, shards_status):
"""Updates a chart url to display processed count for each shard.
Args:
shards_processed: list of integers with number of processed entities in
each shard
"""
chart = google_chart_api.BarChart()
def filter_status(status_to_filter):
return [count if status == status_to_filter else 0
for count, status in zip(shards_processed, shards_status)]
if shards_status:
# Each index will have only one non-zero count, so stack them to color-
# code the bars by status
# These status values are computed in _update_state_from_shard_states,
# in mapreduce/handlers.py.
chart.stacked = True
chart.AddBars(filter_status("unknown"), color="404040")
chart.AddBars(filter_status("success"), color="00ac42")
chart.AddBars(filter_status("running"), color="3636a9")
chart.AddBars(filter_status("aborted"), color="e29e24")
chart.AddBars(filter_status("failed"), color="f6350f")
else:
chart.AddBars(shards_processed)
shard_count = len(shards_processed)
if shard_count > 95:
# Auto-spacing does not work for large numbers of shards.
pixels_per_shard = 700.0 / shard_count
bar_thickness = int(pixels_per_shard * .9)
chart.style = bar_chart.BarChartStyle(bar_thickness=bar_thickness,
bar_gap=0.1, use_fractional_gap_spacing=True)
if shards_processed and shard_count <= 95:
# Adding labels puts us in danger of exceeding the URL length, only
# do it when we have a small amount of data to plot.
# Only 16 labels on the whole chart.
stride_length = max(1, shard_count / 16)
chart.bottom.labels = []
for x in xrange(shard_count):
if (x % stride_length == 0 or
x == shard_count - 1):
chart.bottom.labels.append(x)
else:
chart.bottom.labels.append("")
chart.left.labels = ["0", str(max(shards_processed))]
chart.left.min = 0
self.chart_width = min(700, max(300, shard_count * 20))
self.chart_url = chart.display.Url(self.chart_width, 200)
|
python
|
{
"resource": ""
}
|
q12112
|
MapreduceState.create_new
|
train
|
def create_new(mapreduce_id=None,
gettime=datetime.datetime.now):
"""Create a new MapreduceState.
Args:
mapreduce_id: Mapreduce id as string.
gettime: Used for testing.
"""
if not mapreduce_id:
mapreduce_id = MapreduceState.new_mapreduce_id()
state = MapreduceState(key_name=mapreduce_id,
last_poll_time=gettime())
state.set_processed_counts([], [])
return state
|
python
|
{
"resource": ""
}
|
q12113
|
TransientShardState.advance_for_next_slice
|
train
|
def advance_for_next_slice(self, recovery_slice=False):
"""Advance relavent states for next slice.
Args:
recovery_slice: True if this slice is running recovery logic.
See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
for more info.
"""
if recovery_slice:
self.slice_id += 2
# Restore input reader to the beginning of the slice.
self.input_reader = self.input_reader.from_json(self._input_reader_json)
else:
self.slice_id += 1
|
python
|
{
"resource": ""
}
|
q12114
|
TransientShardState.to_dict
|
train
|
def to_dict(self):
"""Convert state to dictionary to save in task payload."""
result = {"mapreduce_spec": self.mapreduce_spec.to_json_str(),
"shard_id": self.shard_id,
"slice_id": str(self.slice_id),
"input_reader_state": self.input_reader.to_json_str(),
"initial_input_reader_state":
self.initial_input_reader.to_json_str(),
"retries": str(self.retries)}
if self.output_writer:
result["output_writer_state"] = self.output_writer.to_json_str()
serialized_handler = util.try_serialize_handler(self.handler)
if serialized_handler:
result["serialized_handler"] = serialized_handler
return result
|
python
|
{
"resource": ""
}
|
q12115
|
TransientShardState.from_request
|
train
|
def from_request(cls, request):
"""Create new TransientShardState from webapp request."""
mapreduce_spec = MapreduceSpec.from_json_str(request.get("mapreduce_spec"))
mapper_spec = mapreduce_spec.mapper
input_reader_spec_dict = json.loads(request.get("input_reader_state"),
cls=json_util.JsonDecoder)
input_reader = mapper_spec.input_reader_class().from_json(
input_reader_spec_dict)
initial_input_reader_spec_dict = json.loads(
request.get("initial_input_reader_state"), cls=json_util.JsonDecoder)
initial_input_reader = mapper_spec.input_reader_class().from_json(
initial_input_reader_spec_dict)
output_writer = None
if mapper_spec.output_writer_class():
output_writer = mapper_spec.output_writer_class().from_json(
json.loads(request.get("output_writer_state", "{}"),
cls=json_util.JsonDecoder))
assert isinstance(output_writer, mapper_spec.output_writer_class()), (
"%s.from_json returned an instance of wrong class: %s" % (
mapper_spec.output_writer_class(),
output_writer.__class__))
handler = util.try_deserialize_handler(request.get("serialized_handler"))
if not handler:
handler = mapreduce_spec.mapper.handler
return cls(mapreduce_spec.params["base_path"],
mapreduce_spec,
str(request.get("shard_id")),
int(request.get("slice_id")),
input_reader,
initial_input_reader,
output_writer=output_writer,
retries=int(request.get("retries")),
handler=handler)
|
python
|
{
"resource": ""
}
|
q12116
|
ShardState.advance_for_next_slice
|
train
|
def advance_for_next_slice(self, recovery_slice=False):
"""Advance self for next slice.
Args:
recovery_slice: True if this slice is running recovery logic.
See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
for more info.
"""
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
if recovery_slice:
self.slice_id += 2
else:
self.slice_id += 1
|
python
|
{
"resource": ""
}
|
q12117
|
ShardState.copy_from
|
train
|
def copy_from(self, other_state):
"""Copy data from another shard state entity to self."""
for prop in self.properties().values():
setattr(self, prop.name, getattr(other_state, prop.name))
|
python
|
{
"resource": ""
}
|
q12118
|
ShardState.find_all_by_mapreduce_state
|
train
|
def find_all_by_mapreduce_state(cls, mapreduce_state):
"""Find all shard states for given mapreduce.
Args:
mapreduce_state: MapreduceState instance
Yields:
shard states sorted by shard id.
"""
keys = cls.calculate_keys_by_mapreduce_state(mapreduce_state)
i = 0
while i < len(keys):
@db.non_transactional
def no_tx_get(i):
return db.get(keys[i:i+cls._MAX_STATES_IN_MEMORY])
# We need a separate function to so that we can mix non-transactional and
# use be a generator
states = no_tx_get(i)
for s in states:
i += 1
if s is not None:
yield s
|
python
|
{
"resource": ""
}
|
q12119
|
ShardState.calculate_keys_by_mapreduce_state
|
train
|
def calculate_keys_by_mapreduce_state(cls, mapreduce_state):
"""Calculate all shard states keys for given mapreduce.
Args:
mapreduce_state: MapreduceState instance
Returns:
A list of keys for shard states, sorted by shard id.
The corresponding shard states may not exist.
"""
if mapreduce_state is None:
return []
keys = []
for i in range(mapreduce_state.mapreduce_spec.mapper.shard_count):
shard_id = cls.shard_id_from_number(mapreduce_state.key().name(), i)
keys.append(cls.get_key_by_shard_id(shard_id))
return keys
|
python
|
{
"resource": ""
}
|
q12120
|
ShardState.create_new
|
train
|
def create_new(cls, mapreduce_id, shard_number):
"""Create new shard state.
Args:
mapreduce_id: unique mapreduce id as string.
shard_number: shard number for which to create shard state.
Returns:
new instance of ShardState ready to put into datastore.
"""
shard_id = cls.shard_id_from_number(mapreduce_id, shard_number)
state = cls(key_name=shard_id,
mapreduce_id=mapreduce_id)
return state
|
python
|
{
"resource": ""
}
|
q12121
|
MapreduceControl.get_key_by_job_id
|
train
|
def get_key_by_job_id(cls, mapreduce_id):
"""Retrieves the Key for a mapreduce ID.
Args:
mapreduce_id: The job to fetch.
Returns:
Datastore Key for the command for the given job ID.
"""
return db.Key.from_path(cls.kind(), "%s:%s" % (mapreduce_id, cls._KEY_NAME))
|
python
|
{
"resource": ""
}
|
q12122
|
MapreduceControl.abort
|
train
|
def abort(cls, mapreduce_id, **kwargs):
"""Causes a job to abort.
Args:
mapreduce_id: The job to abort. Not verified as a valid job.
"""
cls(key_name="%s:%s" % (mapreduce_id, cls._KEY_NAME),
command=cls.ABORT).put(**kwargs)
|
python
|
{
"resource": ""
}
|
q12123
|
TaskQueueHandler.retry_task
|
train
|
def retry_task(self):
"""Ask taskqueue to retry this task.
Even though raising an exception can cause a task retry, it
will flood logs with highly visible ERROR logs. Handlers should uses
this method to perform controlled task retries. Only raise exceptions
for those deserve ERROR log entries.
"""
self.response.set_status(httplib.SERVICE_UNAVAILABLE, "Retry task")
self.response.clear()
|
python
|
{
"resource": ""
}
|
q12124
|
JsonHandler.base_path
|
train
|
def base_path(self):
"""Base path for all mapreduce-related urls.
JSON handlers are mapped to /base_path/command/command_name thus they
require special treatment.
Raises:
BadRequestPathError: if the path does not end with "/command".
Returns:
The base path.
"""
path = self.request.path
base_path = path[:path.rfind("/")]
if not base_path.endswith("/command"):
raise BadRequestPathError(
"Json handlers should have /command path prefix")
return base_path[:base_path.rfind("/")]
|
python
|
{
"resource": ""
}
|
q12125
|
JsonHandler._handle_wrapper
|
train
|
def _handle_wrapper(self):
"""The helper method for handling JSON Post and Get requests."""
if self.request.headers.get("X-Requested-With") != "XMLHttpRequest":
logging.error("Got JSON request with no X-Requested-With header")
self.response.set_status(
403, message="Got JSON request with no X-Requested-With header")
return
self.json_response.clear()
try:
self.handle()
except errors.MissingYamlError:
logging.debug("Could not find 'mapreduce.yaml' file.")
self.json_response.clear()
self.json_response["error_class"] = "Notice"
self.json_response["error_message"] = "Could not find 'mapreduce.yaml'"
except Exception, e:
logging.exception("Error in JsonHandler, returning exception.")
# TODO(user): Include full traceback here for the end-user.
self.json_response.clear()
self.json_response["error_class"] = e.__class__.__name__
self.json_response["error_message"] = str(e)
self.response.headers["Content-Type"] = "text/javascript"
try:
output = json.dumps(self.json_response, cls=json_util.JsonEncoder)
# pylint: disable=broad-except
except Exception, e:
logging.exception("Could not serialize to JSON")
self.response.set_status(500, message="Could not serialize to JSON")
return
else:
self.response.out.write(output)
|
python
|
{
"resource": ""
}
|
q12126
|
_GCSFileSegReader.read
|
train
|
def read(self, n):
"""Read data from file segs.
Args:
n: max bytes to read. Must be positive.
Returns:
some bytes. May be smaller than n bytes. "" when no more data is left.
"""
if self._EOF:
return ""
while self._seg_index <= self._last_seg_index:
result = self._read_from_seg(n)
if result != "":
return result
else:
self._next_seg()
self._EOF = True
return ""
|
python
|
{
"resource": ""
}
|
q12127
|
_GCSFileSegReader._next_seg
|
train
|
def _next_seg(self):
"""Get next seg."""
if self._seg:
self._seg.close()
self._seg_index += 1
if self._seg_index > self._last_seg_index:
self._seg = None
return
filename = self._seg_prefix + str(self._seg_index)
stat = cloudstorage.stat(filename)
writer = output_writers._GoogleCloudStorageOutputWriter
if writer._VALID_LENGTH not in stat.metadata:
raise ValueError(
"Expect %s in metadata for file %s." %
(writer._VALID_LENGTH, filename))
self._seg_valid_length = int(stat.metadata[writer._VALID_LENGTH])
if self._seg_valid_length > stat.st_size:
raise ValueError(
"Valid length %s is too big for file %s of length %s" %
(self._seg_valid_length, filename, stat.st_size))
self._seg = cloudstorage.open(filename)
|
python
|
{
"resource": ""
}
|
q12128
|
_GCSFileSegReader._read_from_seg
|
train
|
def _read_from_seg(self, n):
"""Read from current seg.
Args:
n: max number of bytes to read.
Returns:
valid bytes from the current seg. "" if no more is left.
"""
result = self._seg.read(size=n)
if result == "":
return result
offset = self._seg.tell()
if offset > self._seg_valid_length:
extra = offset - self._seg_valid_length
result = result[:-1*extra]
self._offset += len(result)
return result
|
python
|
{
"resource": ""
}
|
q12129
|
_get_descending_key
|
train
|
def _get_descending_key(gettime=time.time):
"""Returns a key name lexically ordered by time descending.
This lets us have a key name for use with Datastore entities which returns
rows in time descending order when it is scanned in lexically ascending order,
allowing us to bypass index building for descending indexes.
Args:
gettime: Used for testing.
Returns:
A string with a time descending key.
"""
now_descending = int((_FUTURE_TIME - gettime()) * 100)
request_id_hash = os.environ.get("REQUEST_ID_HASH")
if not request_id_hash:
request_id_hash = str(random.getrandbits(32))
return "%d%s" % (now_descending, request_id_hash)
|
python
|
{
"resource": ""
}
|
q12130
|
_get_task_host
|
train
|
def _get_task_host():
"""Get the Host header value for all mr tasks.
Task Host header determines which instance this task would be routed to.
Current version id format is: v7.368834058928280579
Current module id is just the module's name. It could be "default"
Default version hostname is app_id.appspot.com
Returns:
A complete host name is of format version.module.app_id.appspot.com
If module is the default module, just version.app_id.appspot.com. The reason
is if an app doesn't have modules enabled and the url is
"version.default.app_id", "version" is ignored and "default" is used as
version. If "default" version doesn't exist, the url is routed to the
default version.
"""
version = os.environ["CURRENT_VERSION_ID"].split(".")[0]
default_host = os.environ["DEFAULT_VERSION_HOSTNAME"]
module = os.environ["CURRENT_MODULE_ID"]
if os.environ["CURRENT_MODULE_ID"] == "default":
return "%s.%s" % (version, default_host)
return "%s.%s.%s" % (version, module, default_host)
|
python
|
{
"resource": ""
}
|
q12131
|
get_queue_name
|
train
|
def get_queue_name(queue_name):
"""Determine which queue MR should run on.
How to choose the queue:
1. If user provided one, use that.
2. If we are starting a mr from taskqueue, inherit that queue.
If it's a special queue, fall back to the default queue.
3. Default queue.
If user is using any MR pipeline interface, pipeline.start takes a
"queue_name" argument. The pipeline will run on that queue and MR will
simply inherit the queue_name.
Args:
queue_name: queue_name from user. Maybe None.
Returns:
The queue name to run on.
"""
if queue_name:
return queue_name
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
parameters.config.QUEUE_NAME)
if len(queue_name) > 1 and queue_name[0:2] == "__":
# We are currently in some special queue. E.g. __cron.
return parameters.config.QUEUE_NAME
else:
return queue_name
|
python
|
{
"resource": ""
}
|
q12132
|
total_seconds
|
train
|
def total_seconds(td):
"""convert a timedelta to seconds.
This is patterned after timedelta.total_seconds, which is only
available in python 27.
Args:
td: a timedelta object.
Returns:
total seconds within a timedelta. Rounded up to seconds.
"""
secs = td.seconds + td.days * 24 * 3600
if td.microseconds:
secs += 1
return secs
|
python
|
{
"resource": ""
}
|
q12133
|
handler_for_name
|
train
|
def handler_for_name(fq_name):
"""Resolves and instantiates handler by fully qualified name.
First resolves the name using for_name call. Then if it resolves to a class,
instantiates a class, if it resolves to a method - instantiates the class and
binds method to the instance.
Args:
fq_name: fully qualified name of something to find.
Returns:
handler instance which is ready to be called.
"""
resolved_name = for_name(fq_name)
if isinstance(resolved_name, (type, types.ClassType)):
# create new instance if this is type
return resolved_name()
elif isinstance(resolved_name, types.MethodType):
# bind the method
return getattr(resolved_name.im_class(), resolved_name.__name__)
else:
return resolved_name
|
python
|
{
"resource": ""
}
|
q12134
|
is_generator
|
train
|
def is_generator(obj):
"""Return true if the object is generator or generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
"""
if isinstance(obj, types.GeneratorType):
return True
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR))
|
python
|
{
"resource": ""
}
|
q12135
|
parse_bool
|
train
|
def parse_bool(obj):
"""Return true if the object represents a truth value, false otherwise.
For bool and numeric objects, uses Python's built-in bool function. For
str objects, checks string against a list of possible truth values.
Args:
obj: object to determine boolean value of; expected
Returns:
Boolean value according to 5.1 of Python docs if object is not a str
object. For str objects, return True if str is in TRUTH_VALUE_SET
and False otherwise.
http://docs.python.org/library/stdtypes.html
"""
if type(obj) is str:
TRUTH_VALUE_SET = ["true", "1", "yes", "t", "on"]
return obj.lower() in TRUTH_VALUE_SET
else:
return bool(obj)
|
python
|
{
"resource": ""
}
|
q12136
|
create_datastore_write_config
|
train
|
def create_datastore_write_config(mapreduce_spec):
"""Creates datastore config to use in write operations.
Args:
mapreduce_spec: current mapreduce specification as MapreduceSpec.
Returns:
an instance of datastore_rpc.Configuration to use for all write
operations in the mapreduce.
"""
force_writes = parse_bool(mapreduce_spec.params.get("force_writes", "false"))
if force_writes:
return datastore_rpc.Configuration(force_writes=force_writes)
else:
# dev server doesn't support force_writes.
return datastore_rpc.Configuration()
|
python
|
{
"resource": ""
}
|
q12137
|
_set_ndb_cache_policy
|
train
|
def _set_ndb_cache_policy():
"""Tell NDB to never cache anything in memcache or in-process.
This ensures that entities fetched from Datastore input_readers via NDB
will not bloat up the request memory size and Datastore Puts will avoid
doing calls to memcache. Without this you get soft memory limit exits,
which hurts overall throughput.
"""
ndb_ctx = ndb.get_context()
ndb_ctx.set_cache_policy(lambda key: False)
ndb_ctx.set_memcache_policy(lambda key: False)
|
python
|
{
"resource": ""
}
|
q12138
|
_obj_to_path
|
train
|
def _obj_to_path(obj):
"""Returns the fully qualified path to the object.
Args:
obj: obj must be a new style top level class, or a top level function.
No inner function or static method.
Returns:
Fully qualified path to the object.
Raises:
TypeError: when argument obj has unsupported type.
ValueError: when obj can't be discovered on the top level.
"""
if obj is None:
return obj
if inspect.isclass(obj) or inspect.isfunction(obj):
fetched = getattr(sys.modules[obj.__module__], obj.__name__, None)
if fetched is None:
raise ValueError(
"Object %r must be defined on the top level of a module." % obj)
return "%s.%s" % (obj.__module__, obj.__name__)
raise TypeError("Unexpected type %s." % type(obj))
|
python
|
{
"resource": ""
}
|
q12139
|
strip_prefix_from_items
|
train
|
def strip_prefix_from_items(prefix, items):
"""Strips out the prefix from each of the items if it is present.
Args:
prefix: the string for that you wish to strip from the beginning of each
of the items.
items: a list of strings that may or may not contain the prefix you want
to strip out.
Returns:
items_no_prefix: a copy of the list of items (same order) without the
prefix (if present).
"""
items_no_prefix = []
for item in items:
if item.startswith(prefix):
items_no_prefix.append(item[len(prefix):])
else:
items_no_prefix.append(item)
return items_no_prefix
|
python
|
{
"resource": ""
}
|
q12140
|
MapperWorkerCallbackHandler._drop_gracefully
|
train
|
def _drop_gracefully(self):
"""Drop worker task gracefully.
Set current shard_state to failed. Controller logic will take care of
other shards and the entire MR.
"""
shard_id = self.request.headers[util._MR_SHARD_ID_TASK_HEADER]
mr_id = self.request.headers[util._MR_ID_TASK_HEADER]
shard_state, mr_state = db.get([
model.ShardState.get_key_by_shard_id(shard_id),
model.MapreduceState.get_key_by_job_id(mr_id)])
if shard_state and shard_state.active:
shard_state.set_for_failure()
config = util.create_datastore_write_config(mr_state.mapreduce_spec)
shard_state.put(config=config)
|
python
|
{
"resource": ""
}
|
q12141
|
MapperWorkerCallbackHandler._has_old_request_ended
|
train
|
def _has_old_request_ended(self, shard_state):
"""Whether previous slice retry has ended according to Logs API.
Args:
shard_state: shard state.
Returns:
True if the request of previous slice retry has ended. False if it has
not or unknown.
"""
assert shard_state.slice_start_time is not None
assert shard_state.slice_request_id is not None
request_ids = [shard_state.slice_request_id]
logs = None
try:
logs = list(logservice.fetch(request_ids=request_ids))
except (apiproxy_errors.FeatureNotEnabledError,
apiproxy_errors.CapabilityDisabledError) as e:
# Managed VMs do not have access to the logservice API
# See https://groups.google.com/forum/#!topic/app-engine-managed-vms/r8i65uiFW0w
logging.warning("Ignoring exception: %s", e)
if not logs or not logs[0].finished:
return False
return True
|
python
|
{
"resource": ""
}
|
q12142
|
MapperWorkerCallbackHandler._wait_time
|
train
|
def _wait_time(self, shard_state, secs, now=datetime.datetime.now):
"""Time to wait until slice_start_time is secs ago from now.
Args:
shard_state: shard state.
secs: duration in seconds.
now: a func that gets now.
Returns:
0 if no wait. A positive int in seconds otherwise. Always around up.
"""
assert shard_state.slice_start_time is not None
delta = now() - shard_state.slice_start_time
duration = datetime.timedelta(seconds=secs)
if delta < duration:
return util.total_seconds(duration - delta)
else:
return 0
|
python
|
{
"resource": ""
}
|
q12143
|
MapperWorkerCallbackHandler._try_free_lease
|
train
|
def _try_free_lease(self, shard_state, slice_retry=False):
"""Try to free lease.
A lightweight transaction to update shard_state and unset
slice_start_time to allow the next retry to happen without blocking.
We don't care if this fails or not because the lease will expire
anyway.
Under normal execution, _save_state_and_schedule_next is the exit point.
It updates/saves shard state and schedules the next slice or returns.
Other exit points are:
1. _are_states_consistent: at the beginning of handle, checks
if datastore states and the task are in sync.
If not, raise or return.
2. _attempt_slice_retry: may raise exception to taskqueue.
3. _save_state_and_schedule_next: may raise exception when taskqueue/db
unreachable.
This handler should try to free the lease on every exceptional exit point.
Args:
shard_state: model.ShardState.
slice_retry: whether to count this as a failed slice execution.
"""
@db.transactional
def _tx():
fresh_state = model.ShardState.get_by_shard_id(shard_state.shard_id)
if fresh_state and fresh_state.active:
# Free lease.
fresh_state.slice_start_time = None
fresh_state.slice_request_id = None
if slice_retry:
fresh_state.slice_retries += 1
fresh_state.put()
try:
_tx()
# pylint: disable=broad-except
except Exception, e:
logging.warning(e)
logging.warning(
"Release lock for shard %s failed. Wait for lease to expire.",
shard_state.shard_id)
|
python
|
{
"resource": ""
}
|
q12144
|
MapperWorkerCallbackHandler._maintain_LC
|
train
|
def _maintain_LC(self, obj, slice_id, last_slice=False, begin_slice=True,
shard_ctx=None, slice_ctx=None):
"""Makes sure shard life cycle interface are respected.
Args:
obj: the obj that may have implemented _ShardLifeCycle.
slice_id: current slice_id
last_slice: whether this is the last slice.
begin_slice: whether this is the beginning or the end of a slice.
shard_ctx: shard ctx for dependency injection. If None, it will be read
from self.
slice_ctx: slice ctx for dependency injection. If None, it will be read
from self.
"""
if obj is None or not isinstance(obj, shard_life_cycle._ShardLifeCycle):
return
shard_context = shard_ctx or self.shard_context
slice_context = slice_ctx or self.slice_context
if begin_slice:
if slice_id == 0:
obj.begin_shard(shard_context)
obj.begin_slice(slice_context)
else:
obj.end_slice(slice_context)
if last_slice:
obj.end_shard(shard_context)
|
python
|
{
"resource": ""
}
|
q12145
|
MapperWorkerCallbackHandler.__return
|
train
|
def __return(self, shard_state, tstate, task_directive):
"""Handler should always call this as the last statement."""
task_directive = self._set_state(shard_state, tstate, task_directive)
self._save_state_and_schedule_next(shard_state, tstate, task_directive)
context.Context._set(None)
|
python
|
{
"resource": ""
}
|
q12146
|
MapperWorkerCallbackHandler._process_inputs
|
train
|
def _process_inputs(self,
input_reader,
shard_state,
tstate,
ctx):
"""Read inputs, process them, and write out outputs.
This is the core logic of MapReduce. It reads inputs from input reader,
invokes user specified mapper function, and writes output with
output writer. It also updates shard_state accordingly.
e.g. if shard processing is done, set shard_state.active to False.
If errors.FailJobError is caught, it will fail this MR job.
All other exceptions will be logged and raised to taskqueue for retry
until the number of retries exceeds a limit.
Args:
input_reader: input reader.
shard_state: shard state.
tstate: transient shard state.
ctx: mapreduce context.
Returns:
Whether this shard has finished processing all its input split.
"""
processing_limit = self._processing_limit(tstate.mapreduce_spec)
if processing_limit == 0:
return
finished_shard = True
# Input reader may not be an iterator. It is only a container.
iterator = iter(input_reader)
while True:
try:
entity = iterator.next()
except StopIteration:
break
# Reading input got exception. If we assume
# 1. The input reader have done enough retries.
# 2. The input reader can still serialize correctly after this exception.
# 3. The input reader, upon resume, will try to re-read this failed
# record.
# 4. This exception doesn't imply the input reader is permanently stuck.
# we can serialize current slice immediately to avoid duplicated
# outputs.
# TODO(user): Validate these assumptions on all readers. MR should
# also have a way to detect fake forward progress.
if isinstance(entity, db.Model):
shard_state.last_work_item = repr(entity.key())
elif isinstance(entity, ndb.Model):
shard_state.last_work_item = repr(entity.key)
else:
shard_state.last_work_item = repr(entity)[:100]
processing_limit -= 1
if not self._process_datum(
entity, input_reader, ctx, tstate):
finished_shard = False
break
elif processing_limit == 0:
finished_shard = False
break
# Flush context and its pools.
self.slice_context.incr(
context.COUNTER_MAPPER_WALLTIME_MS,
int((self._time() - self._start_time)*1000))
return finished_shard
|
python
|
{
"resource": ""
}
|
q12147
|
MapperWorkerCallbackHandler._process_datum
|
train
|
def _process_datum(self, data, input_reader, ctx, transient_shard_state):
"""Process a single data piece.
Call mapper handler on the data.
Args:
data: a datum to process.
input_reader: input reader.
ctx: mapreduce context
transient_shard_state: transient shard state.
Returns:
True if scan should be continued, False if scan should be stopped.
"""
if data is not input_readers.ALLOW_CHECKPOINT:
self.slice_context.incr(context.COUNTER_MAPPER_CALLS)
handler = transient_shard_state.handler
if isinstance(handler, map_job.Mapper):
handler(self.slice_context, data)
else:
if input_reader.expand_parameters:
result = handler(*data)
else:
result = handler(data)
if util.is_generator(result):
for output in result:
if isinstance(output, operation.Operation):
output(ctx)
else:
output_writer = transient_shard_state.output_writer
if not output_writer:
logging.warning(
"Handler yielded %s, but no output writer is set.", output)
else:
output_writer.write(output)
if self._time() - self._start_time >= parameters.config._SLICE_DURATION_SEC:
return False
return True
|
python
|
{
"resource": ""
}
|
q12148
|
MapperWorkerCallbackHandler._set_state
|
train
|
def _set_state(self, shard_state, tstate, task_directive):
"""Set shard_state and tstate based on task_directive.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
task_directive: self._TASK_DIRECTIVE for current shard.
Returns:
A _TASK_DIRECTIVE enum.
PROCEED_TASK if task should proceed normally.
RETRY_SHARD if shard should be retried.
RETRY_SLICE if slice should be retried.
FAIL_TASK if sahrd should fail.
RECOVER_SLICE if slice should be recovered.
ABORT_SHARD if shard should be aborted.
RETRY_TASK if task should be retried.
DROP_TASK if task should be dropped.
"""
if task_directive in (self._TASK_DIRECTIVE.RETRY_TASK,
self._TASK_DIRECTIVE.DROP_TASK):
return task_directive
if task_directive == self._TASK_DIRECTIVE.ABORT_SHARD:
shard_state.set_for_abort()
return task_directive
if task_directive == self._TASK_DIRECTIVE.PROCEED_TASK:
shard_state.advance_for_next_slice()
tstate.advance_for_next_slice()
return task_directive
if task_directive == self._TASK_DIRECTIVE.RECOVER_SLICE:
tstate.advance_for_next_slice(recovery_slice=True)
shard_state.advance_for_next_slice(recovery_slice=True)
return task_directive
if task_directive == self._TASK_DIRECTIVE.RETRY_SLICE:
task_directive = self._attempt_slice_retry(shard_state, tstate)
if task_directive == self._TASK_DIRECTIVE.RETRY_SHARD:
task_directive = self._attempt_shard_retry(shard_state, tstate)
if task_directive == self._TASK_DIRECTIVE.FAIL_TASK:
shard_state.set_for_failure()
return task_directive
|
python
|
{
"resource": ""
}
|
q12149
|
MapperWorkerCallbackHandler._save_state_and_schedule_next
|
train
|
def _save_state_and_schedule_next(self, shard_state, tstate, task_directive):
"""Save state and schedule task.
Save shard state to datastore.
Schedule next slice if needed.
Set HTTP response code.
No modification to any shard_state or tstate.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
task_directive: enum _TASK_DIRECTIVE.
Returns:
The task to retry if applicable.
"""
spec = tstate.mapreduce_spec
if task_directive == self._TASK_DIRECTIVE.DROP_TASK:
return
if task_directive in (self._TASK_DIRECTIVE.RETRY_SLICE,
self._TASK_DIRECTIVE.RETRY_TASK):
# Set HTTP code to 500.
return self.retry_task()
elif task_directive == self._TASK_DIRECTIVE.ABORT_SHARD:
logging.info("Aborting shard %d of job '%s'",
shard_state.shard_number, shard_state.mapreduce_id)
task = None
elif task_directive == self._TASK_DIRECTIVE.FAIL_TASK:
logging.critical("Shard %s failed permanently.", shard_state.shard_id)
task = None
elif task_directive == self._TASK_DIRECTIVE.RETRY_SHARD:
logging.warning("Shard %s is going to be attempted for the %s time.",
shard_state.shard_id,
shard_state.retries + 1)
task = self._state_to_task(tstate, shard_state)
elif task_directive == self._TASK_DIRECTIVE.RECOVER_SLICE:
logging.warning("Shard %s slice %s is being recovered.",
shard_state.shard_id,
shard_state.slice_id)
task = self._state_to_task(tstate, shard_state)
else:
assert task_directive == self._TASK_DIRECTIVE.PROCEED_TASK
countdown = self._get_countdown_for_next_slice(spec)
task = self._state_to_task(tstate, shard_state, countdown=countdown)
# Prepare parameters for db transaction and taskqueue.
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
# For test only.
# TODO(user): Remove this.
"default")
config = util.create_datastore_write_config(spec)
@db.transactional(retries=5)
def _tx():
"""The Transaction helper."""
fresh_shard_state = model.ShardState.get_by_shard_id(tstate.shard_id)
if not fresh_shard_state:
raise db.Rollback()
if (not fresh_shard_state.active or
"worker_active_state_collision" in _TEST_INJECTED_FAULTS):
logging.warning("Shard %s is not active. Possible spurious task "
"execution. Dropping this task.", tstate.shard_id)
logging.warning("Datastore's %s", str(fresh_shard_state))
logging.warning("Slice's %s", str(shard_state))
return
fresh_shard_state.copy_from(shard_state)
fresh_shard_state.put(config=config)
# Add task in the same datastore transaction.
# This way we guarantee taskqueue is never behind datastore states.
# Old tasks will be dropped.
# Future task won't run until datastore states catches up.
if fresh_shard_state.active:
# Not adding task transactionally.
# transactional enqueue requires tasks with no name.
self._add_task(task, spec, queue_name)
try:
_tx()
except (datastore_errors.Error,
taskqueue.Error,
runtime.DeadlineExceededError,
apiproxy_errors.Error), e:
logging.warning(
"Can't transactionally continue shard. "
"Will retry slice %s %s for the %s time.",
tstate.shard_id,
tstate.slice_id,
self.task_retry_count() + 1)
self._try_free_lease(shard_state)
raise e
|
python
|
{
"resource": ""
}
|
q12150
|
MapperWorkerCallbackHandler._attempt_slice_recovery
|
train
|
def _attempt_slice_recovery(self, shard_state, tstate):
"""Recover a slice.
This is run when a slice had been previously attempted and output
may have been written. If an output writer requires slice recovery,
we run those logic to remove output duplicates. Otherwise we just retry
the slice.
If recovery is needed, then the entire slice will be dedicated
to recovery logic. No data processing will take place. Thus we call
the slice "recovery slice". This is needed for correctness:
An output writer instance can be out of sync from its physical
medium only when the slice dies after acquring the shard lock but before
committing shard state to db. The worst failure case is when
shard state failed to commit after the NAMED task for the next slice was
added. Thus, recovery slice has a special logic to increment current
slice_id n to n+2. If the task for n+1 had been added, it will be dropped
because it is behind shard state.
Args:
shard_state: an instance of Model.ShardState.
tstate: an instance of Model.TransientShardState.
Returns:
_TASK_DIRECTIVE.PROCEED_TASK to continue with this retry.
_TASK_DIRECTIVE.RECOVER_SLICE to recover this slice.
The next slice will start at the same input as
this slice but output to a new instance of output writer.
Combining outputs from all writer instances is up to implementation.
"""
mapper_spec = tstate.mapreduce_spec.mapper
if not (tstate.output_writer and
tstate.output_writer._supports_slice_recovery(mapper_spec)):
return self._TASK_DIRECTIVE.PROCEED_TASK
tstate.output_writer = tstate.output_writer._recover(
tstate.mapreduce_spec, shard_state.shard_number,
shard_state.retries + 1)
return self._TASK_DIRECTIVE.RECOVER_SLICE
|
python
|
{
"resource": ""
}
|
q12151
|
MapperWorkerCallbackHandler._attempt_shard_retry
|
train
|
def _attempt_shard_retry(self, shard_state, tstate):
"""Whether to retry shard.
This method may modify shard_state and tstate to prepare for retry or fail.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
Returns:
A _TASK_DIRECTIVE enum. RETRY_SHARD if shard should be retried.
FAIL_TASK otherwise.
"""
shard_attempts = shard_state.retries + 1
if shard_attempts >= parameters.config.SHARD_MAX_ATTEMPTS:
logging.warning(
"Shard attempt %s exceeded %s max attempts.",
shard_attempts, parameters.config.SHARD_MAX_ATTEMPTS)
return self._TASK_DIRECTIVE.FAIL_TASK
if tstate.output_writer and (
not tstate.output_writer._supports_shard_retry(tstate)):
logging.warning("Output writer %s does not support shard retry.",
tstate.output_writer.__class__.__name__)
return self._TASK_DIRECTIVE.FAIL_TASK
shard_state.reset_for_retry()
logging.warning("Shard %s attempt %s failed with up to %s attempts.",
shard_state.shard_id,
shard_state.retries,
parameters.config.SHARD_MAX_ATTEMPTS)
output_writer = None
if tstate.output_writer:
output_writer = tstate.output_writer.create(
tstate.mapreduce_spec, shard_state.shard_number, shard_attempts + 1)
tstate.reset_for_retry(output_writer)
return self._TASK_DIRECTIVE.RETRY_SHARD
|
python
|
{
"resource": ""
}
|
q12152
|
MapperWorkerCallbackHandler._attempt_slice_retry
|
train
|
def _attempt_slice_retry(self, shard_state, tstate):
"""Attempt to retry this slice.
This method may modify shard_state and tstate to prepare for retry or fail.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
Returns:
A _TASK_DIRECTIVE enum. RETRY_SLICE if slice should be retried.
RETRY_SHARD if shard retry should be attempted.
"""
if (shard_state.slice_retries + 1 <
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS):
logging.warning(
"Slice %s %s failed for the %s of up to %s attempts "
"(%s of %s taskqueue execution attempts). "
"Will retry now.",
tstate.shard_id,
tstate.slice_id,
shard_state.slice_retries + 1,
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS,
self.task_retry_count() + 1,
parameters.config.TASK_MAX_ATTEMPTS)
# Clear info related to current exception. Otherwise, the real
# callstack that includes a frame for this method will show up
# in log.
sys.exc_clear()
self._try_free_lease(shard_state, slice_retry=True)
return self._TASK_DIRECTIVE.RETRY_SLICE
if parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS > 0:
logging.warning("Slice attempt %s exceeded %s max attempts.",
self.task_retry_count() + 1,
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS)
return self._TASK_DIRECTIVE.RETRY_SHARD
|
python
|
{
"resource": ""
}
|
q12153
|
MapperWorkerCallbackHandler._get_countdown_for_next_slice
|
train
|
def _get_countdown_for_next_slice(self, spec):
"""Get countdown for next slice's task.
When user sets processing rate, we set countdown to delay task execution.
Args:
spec: model.MapreduceSpec
Returns:
countdown in int.
"""
countdown = 0
if self._processing_limit(spec) != -1:
countdown = max(
int(parameters.config._SLICE_DURATION_SEC -
(self._time() - self._start_time)), 0)
return countdown
|
python
|
{
"resource": ""
}
|
q12154
|
MapperWorkerCallbackHandler._state_to_task
|
train
|
def _state_to_task(cls,
tstate,
shard_state,
eta=None,
countdown=None):
"""Generate task for slice according to current states.
Args:
tstate: An instance of TransientShardState.
shard_state: An instance of ShardState.
eta: Absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: Time in seconds into the future that this MR should execute.
Defaults to zero.
Returns:
A model.HugeTask instance for the slice specified by current states.
"""
base_path = tstate.base_path
task_name = MapperWorkerCallbackHandler.get_task_name(
tstate.shard_id,
tstate.slice_id,
tstate.retries)
headers = util._get_task_headers(tstate.mapreduce_spec.mapreduce_id)
headers[util._MR_SHARD_ID_TASK_HEADER] = tstate.shard_id
worker_task = model.HugeTask(
url=base_path + "/worker_callback/" + tstate.shard_id,
params=tstate.to_dict(),
name=task_name,
eta=eta,
countdown=countdown,
parent=shard_state,
headers=headers)
return worker_task
|
python
|
{
"resource": ""
}
|
q12155
|
MapperWorkerCallbackHandler._processing_limit
|
train
|
def _processing_limit(self, spec):
"""Get the limit on the number of map calls allowed by this slice.
Args:
spec: a Mapreduce spec.
Returns:
The limit as a positive int if specified by user. -1 otherwise.
"""
processing_rate = float(spec.mapper.params.get("processing_rate", 0))
slice_processing_limit = -1
if processing_rate > 0:
slice_processing_limit = int(math.ceil(
parameters.config._SLICE_DURATION_SEC*processing_rate/
int(spec.mapper.shard_count)))
return slice_processing_limit
|
python
|
{
"resource": ""
}
|
q12156
|
ControllerCallbackHandler._drop_gracefully
|
train
|
def _drop_gracefully(self):
"""Gracefully drop controller task.
This method is called when decoding controller task payload failed.
Upon this we mark ShardState and MapreduceState as failed so all
tasks can stop.
Writing to datastore is forced (ignore read-only mode) because we
want the tasks to stop badly, and if force_writes was False,
the job would have never been started.
"""
mr_id = self.request.headers[util._MR_ID_TASK_HEADER]
state = model.MapreduceState.get_by_job_id(mr_id)
if not state or not state.active:
return
state.active = False
state.result_status = model.MapreduceState.RESULT_FAILED
config = util.create_datastore_write_config(state.mapreduce_spec)
puts = []
for ss in model.ShardState.find_all_by_mapreduce_state(state):
if ss.active:
ss.set_for_failure()
puts.append(ss)
# Avoid having too many shard states in memory.
if len(puts) > model.ShardState._MAX_STATES_IN_MEMORY:
db.put(puts, config=config)
puts = []
db.put(puts, config=config)
# Put mr_state only after all shard_states are put.
db.put(state, config=config)
|
python
|
{
"resource": ""
}
|
q12157
|
ControllerCallbackHandler._update_state_from_shard_states
|
train
|
def _update_state_from_shard_states(self, state, shard_states, control):
"""Update mr state by examing shard states.
Args:
state: current mapreduce state as MapreduceState.
shard_states: an iterator over shard states.
control: model.MapreduceControl entity.
"""
# Initialize vars.
state.active_shards, state.aborted_shards, state.failed_shards = 0, 0, 0
total_shards = 0
processed_counts = []
processed_status = []
state.counters_map.clear()
# Tally across shard states once.
for s in shard_states:
total_shards += 1
status = 'unknown'
if s.active:
state.active_shards += 1
status = 'running'
if s.result_status == model.ShardState.RESULT_SUCCESS:
status = 'success'
elif s.result_status == model.ShardState.RESULT_ABORTED:
state.aborted_shards += 1
status = 'aborted'
elif s.result_status == model.ShardState.RESULT_FAILED:
state.failed_shards += 1
status = 'failed'
# Update stats in mapreduce state by aggregating stats from shard states.
state.counters_map.add_map(s.counters_map)
processed_counts.append(s.counters_map.get(context.COUNTER_MAPPER_CALLS))
processed_status.append(status)
state.set_processed_counts(processed_counts, processed_status)
state.last_poll_time = datetime.datetime.utcfromtimestamp(self._time())
spec = state.mapreduce_spec
if total_shards != spec.mapper.shard_count:
logging.error("Found %d shard states. Expect %d. "
"Issuing abort command to job '%s'",
total_shards, spec.mapper.shard_count,
spec.mapreduce_id)
# We issue abort command to allow shards to stop themselves.
model.MapreduceControl.abort(spec.mapreduce_id)
# If any shard is active then the mr is active.
# This way, controller won't prematurely stop before all the shards have.
state.active = bool(state.active_shards)
if not control and (state.failed_shards or state.aborted_shards):
# Issue abort command if there are failed shards.
model.MapreduceControl.abort(spec.mapreduce_id)
if not state.active:
# Set final result status derived from shard states.
if state.failed_shards or not total_shards:
state.result_status = model.MapreduceState.RESULT_FAILED
# It's important failed shards is checked before aborted shards
# because failed shards will trigger other shards to abort.
elif state.aborted_shards:
state.result_status = model.MapreduceState.RESULT_ABORTED
else:
state.result_status = model.MapreduceState.RESULT_SUCCESS
self._finalize_outputs(spec, state)
self._finalize_job(spec, state)
else:
@db.transactional(retries=5)
def _put_state():
"""The helper for storing the state."""
fresh_state = model.MapreduceState.get_by_job_id(spec.mapreduce_id)
# We don't check anything other than active because we are only
# updating stats. It's OK if they are briefly inconsistent.
if not fresh_state.active:
logging.warning(
"Job %s is not active. Looks like spurious task execution. "
"Dropping controller task.", spec.mapreduce_id)
return
config = util.create_datastore_write_config(spec)
state.put(config=config)
_put_state()
|
python
|
{
"resource": ""
}
|
q12158
|
ControllerCallbackHandler._finalize_outputs
|
train
|
def _finalize_outputs(cls, mapreduce_spec, mapreduce_state):
"""Finalize outputs.
Args:
mapreduce_spec: an instance of MapreduceSpec.
mapreduce_state: an instance of MapreduceState.
"""
# Only finalize the output writers if the job is successful.
if (mapreduce_spec.mapper.output_writer_class() and
mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS):
mapreduce_spec.mapper.output_writer_class().finalize_job(mapreduce_state)
|
python
|
{
"resource": ""
}
|
q12159
|
ControllerCallbackHandler._finalize_job
|
train
|
def _finalize_job(cls, mapreduce_spec, mapreduce_state):
"""Finalize job execution.
Invokes done callback and save mapreduce state in a transaction,
and schedule necessary clean ups. This method is idempotent.
Args:
mapreduce_spec: an instance of MapreduceSpec
mapreduce_state: an instance of MapreduceState
"""
config = util.create_datastore_write_config(mapreduce_spec)
queue_name = util.get_queue_name(mapreduce_spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE))
done_callback = mapreduce_spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK)
done_task = None
if done_callback:
done_task = taskqueue.Task(
url=done_callback,
headers=util._get_task_headers(mapreduce_spec.mapreduce_id,
util.CALLBACK_MR_ID_TASK_HEADER),
method=mapreduce_spec.params.get("done_callback_method", "POST"))
@db.transactional(retries=5)
def _put_state():
"""Helper to store state."""
fresh_state = model.MapreduceState.get_by_job_id(
mapreduce_spec.mapreduce_id)
if not fresh_state.active:
logging.warning(
"Job %s is not active. Looks like spurious task execution. "
"Dropping task.", mapreduce_spec.mapreduce_id)
return
mapreduce_state.put(config=config)
# Enqueue done_callback if needed.
if done_task and not _run_task_hook(
mapreduce_spec.get_hooks(),
"enqueue_done_task",
done_task,
queue_name):
done_task.add(queue_name, transactional=True)
_put_state()
logging.info("Final result for job '%s' is '%s'",
mapreduce_spec.mapreduce_id, mapreduce_state.result_status)
cls._clean_up_mr(mapreduce_spec)
|
python
|
{
"resource": ""
}
|
q12160
|
ControllerCallbackHandler.reschedule
|
train
|
def reschedule(cls,
mapreduce_state,
mapreduce_spec,
serial_id,
queue_name=None):
"""Schedule new update status callback task.
Args:
mapreduce_state: mapreduce state as model.MapreduceState
mapreduce_spec: mapreduce specification as MapreduceSpec.
serial_id: id of the invocation as int.
queue_name: The queue to schedule this task on. Will use the current
queue of execution if not supplied.
"""
task_name = ControllerCallbackHandler.get_task_name(
mapreduce_spec, serial_id)
task_params = ControllerCallbackHandler.controller_parameters(
mapreduce_spec, serial_id)
if not queue_name:
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")
controller_callback_task = model.HugeTask(
url=(mapreduce_spec.params["base_path"] + "/controller_callback/" +
mapreduce_spec.mapreduce_id),
name=task_name, params=task_params,
countdown=parameters.config._CONTROLLER_PERIOD_SEC,
parent=mapreduce_state,
headers=util._get_task_headers(mapreduce_spec.mapreduce_id))
if not _run_task_hook(mapreduce_spec.get_hooks(),
"enqueue_controller_task",
controller_callback_task,
queue_name):
try:
controller_callback_task.add(queue_name)
except (taskqueue.TombstonedTaskError,
taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r with params %r already exists. %s: %s",
task_name, task_params, e.__class__, e)
|
python
|
{
"resource": ""
}
|
q12161
|
KickOffJobHandler.handle
|
train
|
def handle(self):
"""Handles kick off request."""
# Get and verify mr state.
mr_id = self.request.get("mapreduce_id")
# Log the mr_id since this is started in an unnamed task
logging.info("Processing kickoff for job %s", mr_id)
state = model.MapreduceState.get_by_job_id(mr_id)
if not self._check_mr_state(state, mr_id):
return
# Create input readers.
readers, serialized_readers_entity = self._get_input_readers(state)
if readers is None:
# We don't have any data. Finish map.
logging.warning("Found no mapper input data to process.")
state.active = False
state.result_status = model.MapreduceState.RESULT_SUCCESS
ControllerCallbackHandler._finalize_job(
state.mapreduce_spec, state)
return False
# Create output writers.
self._setup_output_writer(state)
# Save states and make sure we use the saved input readers for
# subsequent operations.
result = self._save_states(state, serialized_readers_entity)
if result is None:
readers, _ = self._get_input_readers(state)
elif not result:
return
queue_name = self.request.headers.get("X-AppEngine-QueueName")
KickOffJobHandler._schedule_shards(state.mapreduce_spec, readers,
queue_name,
state.mapreduce_spec.params["base_path"],
state)
ControllerCallbackHandler.reschedule(
state, state.mapreduce_spec, serial_id=0, queue_name=queue_name)
|
python
|
{
"resource": ""
}
|
q12162
|
KickOffJobHandler._drop_gracefully
|
train
|
def _drop_gracefully(self):
"""See parent."""
mr_id = self.request.get("mapreduce_id")
logging.error("Failed to kick off job %s", mr_id)
state = model.MapreduceState.get_by_job_id(mr_id)
if not self._check_mr_state(state, mr_id):
return
# Issue abort command just in case there are running tasks.
config = util.create_datastore_write_config(state.mapreduce_spec)
model.MapreduceControl.abort(mr_id, config=config)
# Finalize job and invoke callback.
state.active = False
state.result_status = model.MapreduceState.RESULT_FAILED
ControllerCallbackHandler._finalize_job(state.mapreduce_spec, state)
|
python
|
{
"resource": ""
}
|
q12163
|
KickOffJobHandler._get_input_readers
|
train
|
def _get_input_readers(self, state):
"""Get input readers.
Args:
state: a MapreduceState model.
Returns:
A tuple: (a list of input readers, a model._HugeTaskPayload entity).
The payload entity contains the json serialized input readers.
(None, None) when input reader inplitting returned no data to process.
"""
serialized_input_readers_key = (self._SERIALIZED_INPUT_READERS_KEY %
state.key().id_or_name())
serialized_input_readers = model._HugeTaskPayload.get_by_key_name(
serialized_input_readers_key, parent=state)
# Initialize input readers.
input_reader_class = state.mapreduce_spec.mapper.input_reader_class()
split_param = state.mapreduce_spec.mapper
if issubclass(input_reader_class, map_job.InputReader):
split_param = map_job.JobConfig._to_map_job_config(
state.mapreduce_spec,
os.environ.get("HTTP_X_APPENGINE_QUEUENAME"))
if serialized_input_readers is None:
readers = input_reader_class.split_input(split_param)
else:
readers = [input_reader_class.from_json_str(_json) for _json in
json.loads(zlib.decompress(
serialized_input_readers.payload))]
if not readers:
return None, None
# Update state and spec with actual shard count.
state.mapreduce_spec.mapper.shard_count = len(readers)
state.active_shards = len(readers)
# Prepare to save serialized input readers.
if serialized_input_readers is None:
# Use mr_state as parent so it can be easily cleaned up later.
serialized_input_readers = model._HugeTaskPayload(
key_name=serialized_input_readers_key, parent=state)
readers_json_str = [i.to_json_str() for i in readers]
serialized_input_readers.payload = zlib.compress(json.dumps(
readers_json_str))
return readers, serialized_input_readers
|
python
|
{
"resource": ""
}
|
q12164
|
KickOffJobHandler._save_states
|
train
|
def _save_states(self, state, serialized_readers_entity):
"""Run transaction to save state.
Args:
state: a model.MapreduceState entity.
serialized_readers_entity: a model._HugeTaskPayload entity containing
json serialized input readers.
Returns:
False if a fatal error is encountered and this task should be dropped
immediately. True if transaction is successful. None if a previous
attempt of this same transaction has already succeeded.
"""
mr_id = state.key().id_or_name()
fresh_state = model.MapreduceState.get_by_job_id(mr_id)
if not self._check_mr_state(fresh_state, mr_id):
return False
if fresh_state.active_shards != 0:
logging.warning(
"Mapreduce %s already has active shards. Looks like spurious task "
"execution.", mr_id)
return None
config = util.create_datastore_write_config(state.mapreduce_spec)
db.put([state, serialized_readers_entity], config=config)
return True
|
python
|
{
"resource": ""
}
|
q12165
|
KickOffJobHandler._schedule_shards
|
train
|
def _schedule_shards(cls,
spec,
readers,
queue_name,
base_path,
mr_state):
"""Prepares shard states and schedules their execution.
Even though this method does not schedule shard task and save shard state
transactionally, it's safe for taskqueue to retry this logic because
the initial shard_state for each shard is the same from any retry.
This is an important yet reasonable assumption on model.ShardState.
Args:
spec: mapreduce specification as MapreduceSpec.
readers: list of InputReaders describing shard splits.
queue_name: The queue to run this job on.
base_path: The base url path of mapreduce callbacks.
mr_state: The MapReduceState of current job.
"""
# Create shard states.
shard_states = []
for shard_number, input_reader in enumerate(readers):
shard_state = model.ShardState.create_new(spec.mapreduce_id, shard_number)
shard_state.shard_description = str(input_reader)
shard_states.append(shard_state)
# Retrieves already existing shard states.
existing_shard_states = db.get(shard.key() for shard in shard_states)
existing_shard_keys = set(shard.key() for shard in existing_shard_states
if shard is not None)
# Save non existent shard states.
# Note: we could do this transactionally if necessary.
db.put((shard for shard in shard_states
if shard.key() not in existing_shard_keys),
config=util.create_datastore_write_config(spec))
# Create output writers.
writer_class = spec.mapper.output_writer_class()
writers = [None] * len(readers)
if writer_class:
for shard_number, shard_state in enumerate(shard_states):
writers[shard_number] = writer_class.create(
mr_state.mapreduce_spec,
shard_state.shard_number, shard_state.retries + 1,
mr_state.writer_state)
# Schedule ALL shard tasks.
# Since each task is named, _add_task will fall back gracefully if a
# task already exists.
for shard_number, (input_reader, output_writer) in enumerate(
zip(readers, writers)):
shard_id = model.ShardState.shard_id_from_number(
spec.mapreduce_id, shard_number)
task = MapperWorkerCallbackHandler._state_to_task(
model.TransientShardState(
base_path, spec, shard_id, 0, input_reader, input_reader,
output_writer=output_writer,
handler=spec.mapper.handler),
shard_states[shard_number])
MapperWorkerCallbackHandler._add_task(task,
spec,
queue_name)
|
python
|
{
"resource": ""
}
|
q12166
|
KickOffJobHandler._check_mr_state
|
train
|
def _check_mr_state(cls, state, mr_id):
"""Check MapreduceState.
Args:
state: an MapreduceState instance.
mr_id: mapreduce id.
Returns:
True if state is valid. False if not and this task should be dropped.
"""
if state is None:
logging.warning(
"Mapreduce State for job %s is missing. Dropping Task.",
mr_id)
return False
if not state.active:
logging.warning(
"Mapreduce %s is not active. Looks like spurious task "
"execution. Dropping Task.", mr_id)
return False
return True
|
python
|
{
"resource": ""
}
|
q12167
|
StartJobHandler.handle
|
train
|
def handle(self):
"""Handles start request."""
# Mapper spec as form arguments.
mapreduce_name = self._get_required_param("name")
mapper_input_reader_spec = self._get_required_param("mapper_input_reader")
mapper_handler_spec = self._get_required_param("mapper_handler")
mapper_output_writer_spec = self.request.get("mapper_output_writer")
mapper_params = self._get_params(
"mapper_params_validator", "mapper_params.")
params = self._get_params(
"params_validator", "params.")
# Default values.
mr_params = map_job.JobConfig._get_default_mr_params()
mr_params.update(params)
if "queue_name" in mapper_params:
mr_params["queue_name"] = mapper_params["queue_name"]
# Set some mapper param defaults if not present.
mapper_params["processing_rate"] = int(mapper_params.get(
"processing_rate") or parameters.config.PROCESSING_RATE_PER_SEC)
# Validate the Mapper spec, handler, and input reader.
mapper_spec = model.MapperSpec(
mapper_handler_spec,
mapper_input_reader_spec,
mapper_params,
int(mapper_params.get("shard_count", parameters.config.SHARD_COUNT)),
output_writer_spec=mapper_output_writer_spec)
mapreduce_id = self._start_map(
mapreduce_name,
mapper_spec,
mr_params,
queue_name=mr_params["queue_name"],
_app=mapper_params.get("_app"))
self.json_response["mapreduce_id"] = mapreduce_id
|
python
|
{
"resource": ""
}
|
q12168
|
StartJobHandler._get_params
|
train
|
def _get_params(self, validator_parameter, name_prefix):
"""Retrieves additional user-supplied params for the job and validates them.
Args:
validator_parameter: name of the request parameter which supplies
validator for this parameter set.
name_prefix: common prefix for all parameter names in the request.
Raises:
Any exception raised by the 'params_validator' request parameter if
the params fail to validate.
Returns:
The user parameters.
"""
params_validator = self.request.get(validator_parameter)
user_params = {}
for key in self.request.arguments():
if key.startswith(name_prefix):
values = self.request.get_all(key)
adjusted_key = key[len(name_prefix):]
if len(values) == 1:
user_params[adjusted_key] = values[0]
else:
user_params[adjusted_key] = values
if params_validator:
resolved_validator = util.for_name(params_validator)
resolved_validator(user_params)
return user_params
|
python
|
{
"resource": ""
}
|
q12169
|
StartJobHandler._get_required_param
|
train
|
def _get_required_param(self, param_name):
"""Get a required request parameter.
Args:
param_name: name of request parameter to fetch.
Returns:
parameter value
Raises:
errors.NotEnoughArgumentsError: if parameter is not specified.
"""
value = self.request.get(param_name)
if not value:
raise errors.NotEnoughArgumentsError(param_name + " not specified")
return value
|
python
|
{
"resource": ""
}
|
q12170
|
StartJobHandler._start_map
|
train
|
def _start_map(cls,
name,
mapper_spec,
mapreduce_params,
queue_name,
eta=None,
countdown=None,
hooks_class_name=None,
_app=None,
in_xg_transaction=False):
# pylint: disable=g-doc-args
# pylint: disable=g-doc-return-or-yield
"""See control.start_map.
Requirements for this method:
1. The request that invokes this method can either be regular or
from taskqueue. So taskqueue specific headers can not be used.
2. Each invocation transactionally starts an isolated mapreduce job with
a unique id. MapreduceState should be immediately available after
returning. See control.start_map's doc on transactional.
3. Method should be lightweight.
"""
# Validate input reader.
mapper_input_reader_class = mapper_spec.input_reader_class()
mapper_input_reader_class.validate(mapper_spec)
# Validate output writer.
mapper_output_writer_class = mapper_spec.output_writer_class()
if mapper_output_writer_class:
mapper_output_writer_class.validate(mapper_spec)
# Create a new id and mr spec.
mapreduce_id = model.MapreduceState.new_mapreduce_id()
mapreduce_spec = model.MapreduceSpec(
name,
mapreduce_id,
mapper_spec.to_json(),
mapreduce_params,
hooks_class_name)
# Validate mapper handler.
ctx = context.Context(mapreduce_spec, None)
context.Context._set(ctx)
try:
# pylint: disable=pointless-statement
mapper_spec.handler
finally:
context.Context._set(None)
# Save states and enqueue task.
if in_xg_transaction:
propagation = db.MANDATORY
else:
propagation = db.INDEPENDENT
@db.transactional(propagation=propagation)
def _txn():
cls._create_and_save_state(mapreduce_spec, _app)
cls._add_kickoff_task(mapreduce_params["base_path"], mapreduce_spec, eta,
countdown, queue_name)
_txn()
return mapreduce_id
|
python
|
{
"resource": ""
}
|
q12171
|
StartJobHandler._create_and_save_state
|
train
|
def _create_and_save_state(cls, mapreduce_spec, _app):
"""Save mapreduce state to datastore.
Save state to datastore so that UI can see it immediately.
Args:
mapreduce_spec: model.MapreduceSpec,
_app: app id if specified. None otherwise.
Returns:
The saved Mapreduce state.
"""
state = model.MapreduceState.create_new(mapreduce_spec.mapreduce_id)
state.mapreduce_spec = mapreduce_spec
state.active = True
state.active_shards = 0
if _app:
state.app_id = _app
config = util.create_datastore_write_config(mapreduce_spec)
state.put(config=config)
return state
|
python
|
{
"resource": ""
}
|
q12172
|
StartJobHandler._add_kickoff_task
|
train
|
def _add_kickoff_task(cls,
base_path,
mapreduce_spec,
eta,
countdown,
queue_name):
"""Enqueues a new kickoff task."""
params = {"mapreduce_id": mapreduce_spec.mapreduce_id}
# Task is not named so that it can be added within a transaction.
kickoff_task = taskqueue.Task(
url=base_path + "/kickoffjob_callback/" + mapreduce_spec.mapreduce_id,
headers=util._get_task_headers(mapreduce_spec.mapreduce_id),
params=params,
eta=eta,
countdown=countdown)
hooks = mapreduce_spec.get_hooks()
if hooks is not None:
try:
hooks.enqueue_kickoff_task(kickoff_task, queue_name)
return
except NotImplementedError:
pass
kickoff_task.add(queue_name, transactional=True)
|
python
|
{
"resource": ""
}
|
q12173
|
FinalizeJobHandler.schedule
|
train
|
def schedule(cls, mapreduce_spec):
"""Schedule finalize task.
Args:
mapreduce_spec: mapreduce specification as MapreduceSpec.
"""
task_name = mapreduce_spec.mapreduce_id + "-finalize"
finalize_task = taskqueue.Task(
name=task_name,
url=(mapreduce_spec.params["base_path"] + "/finalizejob_callback/" +
mapreduce_spec.mapreduce_id),
params={"mapreduce_id": mapreduce_spec.mapreduce_id},
headers=util._get_task_headers(mapreduce_spec.mapreduce_id))
queue_name = util.get_queue_name(None)
if not _run_task_hook(mapreduce_spec.get_hooks(),
"enqueue_controller_task",
finalize_task,
queue_name):
try:
finalize_task.add(queue_name)
except (taskqueue.TombstonedTaskError,
taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r already exists. %s: %s",
task_name, e.__class__, e)
|
python
|
{
"resource": ""
}
|
q12174
|
_get_params
|
train
|
def _get_params(mapper_spec, allowed_keys=None, allow_old=True):
"""Obtain input reader parameters.
Utility function for input readers implementation. Fetches parameters
from mapreduce specification giving appropriate usage warnings.
Args:
mapper_spec: The MapperSpec for the job
allowed_keys: set of all allowed keys in parameters as strings. If it is not
None, then parameters are expected to be in a separate "input_reader"
subdictionary of mapper_spec parameters.
allow_old: Allow parameters to exist outside of the input_reader
subdictionary for compatability.
Returns:
mapper parameters as dict
Raises:
BadReaderParamsError: if parameters are invalid/missing or not allowed.
"""
if "input_reader" not in mapper_spec.params:
message = ("Input reader's parameters should be specified in "
"input_reader subdictionary.")
if not allow_old or allowed_keys:
raise errors.BadReaderParamsError(message)
params = mapper_spec.params
params = dict((str(n), v) for n, v in params.iteritems())
else:
if not isinstance(mapper_spec.params.get("input_reader"), dict):
raise errors.BadReaderParamsError(
"Input reader parameters should be a dictionary")
params = mapper_spec.params.get("input_reader")
params = dict((str(n), v) for n, v in params.iteritems())
if allowed_keys:
params_diff = set(params.keys()) - allowed_keys
if params_diff:
raise errors.BadReaderParamsError(
"Invalid input_reader parameters: %s" % ",".join(params_diff))
return params
|
python
|
{
"resource": ""
}
|
q12175
|
AbstractDatastoreInputReader._choose_split_points
|
train
|
def _choose_split_points(cls, sorted_keys, shard_count):
"""Returns the best split points given a random set of datastore.Keys."""
assert len(sorted_keys) >= shard_count
index_stride = len(sorted_keys) / float(shard_count)
return [sorted_keys[int(round(index_stride * i))]
for i in range(1, shard_count)]
|
python
|
{
"resource": ""
}
|
q12176
|
_OldAbstractDatastoreInputReader._split_input_from_namespace
|
train
|
def _split_input_from_namespace(cls, app, namespace, entity_kind,
shard_count):
"""Helper for _split_input_from_params.
If there are not enough Entities to make all of the given shards, the
returned list of KeyRanges will include Nones. The returned list will
contain KeyRanges ordered lexographically with any Nones appearing at the
end.
Args:
app: the app.
namespace: the namespace.
entity_kind: entity kind as string.
shard_count: the number of shards.
Returns:
KeyRange objects.
"""
raw_entity_kind = cls._get_raw_entity_kind(entity_kind)
if shard_count == 1:
# With one shard we don't need to calculate any splitpoints at all.
return [key_range.KeyRange(namespace=namespace, _app=app)]
ds_query = datastore.Query(kind=raw_entity_kind,
namespace=namespace,
_app=app,
keys_only=True)
ds_query.Order("__scatter__")
random_keys = ds_query.Get(shard_count * cls._OVERSAMPLING_FACTOR)
if not random_keys:
# There are no entities with scatter property. We have no idea
# how to split.
return ([key_range.KeyRange(namespace=namespace, _app=app)] +
[None] * (shard_count - 1))
random_keys.sort()
if len(random_keys) >= shard_count:
# We've got a lot of scatter values. Sample them down.
random_keys = cls._choose_split_points(random_keys, shard_count)
# pylint: disable=redefined-outer-name
key_ranges = []
key_ranges.append(key_range.KeyRange(
key_start=None,
key_end=random_keys[0],
direction=key_range.KeyRange.ASC,
include_start=False,
include_end=False,
namespace=namespace,
_app=app))
for i in range(0, len(random_keys) - 1):
key_ranges.append(key_range.KeyRange(
key_start=random_keys[i],
key_end=random_keys[i+1],
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
key_ranges.append(key_range.KeyRange(
key_start=random_keys[-1],
key_end=None,
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
if len(key_ranges) < shard_count:
# We need to have as many shards as it was requested. Add some Nones.
key_ranges += [None] * (shard_count - len(key_ranges))
return key_ranges
|
python
|
{
"resource": ""
}
|
q12177
|
_OldAbstractDatastoreInputReader._split_input_from_params
|
train
|
def _split_input_from_params(cls, app, namespaces, entity_kind_name,
params, shard_count):
"""Return input reader objects. Helper for split_input."""
# pylint: disable=redefined-outer-name
key_ranges = [] # KeyRanges for all namespaces
for namespace in namespaces:
key_ranges.extend(
cls._split_input_from_namespace(app,
namespace,
entity_kind_name,
shard_count))
# Divide the KeyRanges into shard_count shards. The KeyRanges for different
# namespaces might be very different in size so the assignment of KeyRanges
# to shards is done round-robin.
shared_ranges = [[] for _ in range(shard_count)]
for i, k_range in enumerate(key_ranges):
shared_ranges[i % shard_count].append(k_range)
batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
return [cls(entity_kind_name,
key_ranges=key_ranges,
ns_range=None,
batch_size=batch_size)
for key_ranges in shared_ranges if key_ranges]
|
python
|
{
"resource": ""
}
|
q12178
|
_OldAbstractDatastoreInputReader.split_input
|
train
|
def split_input(cls, mapper_spec):
"""Splits query into shards without fetching query results.
Tries as best as it can to split the whole query result set into equal
shards. Due to difficulty of making the perfect split, resulting shards'
sizes might differ significantly from each other.
Args:
mapper_spec: MapperSpec with params containing 'entity_kind'.
May have 'namespace' in the params as a string containing a single
namespace. If specified then the input reader will only yield values
in the given namespace. If 'namespace' is not given then values from
all namespaces will be yielded. May also have 'batch_size' in the params
to specify the number of entities to process in each batch.
Returns:
A list of InputReader objects. If the query results are empty then the
empty list will be returned. Otherwise, the list will always have a length
equal to number_of_shards but may be padded with Nones if there are too
few results for effective sharding.
"""
params = _get_params(mapper_spec)
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
shard_count = mapper_spec.shard_count
namespace = params.get(cls.NAMESPACE_PARAM)
app = params.get(cls._APP_PARAM)
filters = params.get(cls.FILTERS_PARAM)
if namespace is None:
# It is difficult to efficiently shard large numbers of namespaces because
# there can be an arbitrary number of them. So the strategy is:
# 1. if there are a small number of namespaces in the datastore then
# generate one KeyRange per namespace per shard and assign each shard a
# KeyRange for every namespace. This should lead to nearly perfect
# sharding.
# 2. if there are a large number of namespaces in the datastore then
# generate one NamespaceRange per worker. This can lead to very bad
# sharding because namespaces can contain very different numbers of
# entities and each NamespaceRange may contain very different numbers
# of namespaces.
namespace_query = datastore.Query("__namespace__",
keys_only=True,
_app=app)
namespace_keys = namespace_query.Get(
limit=cls.MAX_NAMESPACES_FOR_KEY_SHARD+1)
if len(namespace_keys) > cls.MAX_NAMESPACES_FOR_KEY_SHARD:
ns_ranges = namespace_range.NamespaceRange.split(n=shard_count,
contiguous=True,
_app=app)
return [cls(entity_kind_name,
key_ranges=None,
ns_range=ns_range,
batch_size=batch_size,
filters=filters)
for ns_range in ns_ranges]
elif not namespace_keys:
return [cls(entity_kind_name,
key_ranges=None,
ns_range=namespace_range.NamespaceRange(_app=app),
batch_size=shard_count,
filters=filters)]
else:
namespaces = [namespace_key.name() or ""
for namespace_key in namespace_keys]
else:
namespaces = [namespace]
readers = cls._split_input_from_params(
app, namespaces, entity_kind_name, params, shard_count)
if filters:
for reader in readers:
reader._filters = filters
return readers
|
python
|
{
"resource": ""
}
|
q12179
|
BlobstoreLineInputReader.to_json
|
train
|
def to_json(self):
"""Returns an json-compatible input shard spec for remaining inputs."""
new_pos = self._blob_reader.tell()
if self._has_iterated:
new_pos -= 1
return {self.BLOB_KEY_PARAM: self._blob_key,
self.INITIAL_POSITION_PARAM: new_pos,
self.END_POSITION_PARAM: self._end_position}
|
python
|
{
"resource": ""
}
|
q12180
|
BlobstoreLineInputReader.from_json
|
train
|
def from_json(cls, json):
"""Instantiates an instance of this InputReader for the given shard spec."""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.INITIAL_POSITION_PARAM],
json[cls.END_POSITION_PARAM])
|
python
|
{
"resource": ""
}
|
q12181
|
BlobstoreLineInputReader.split_input
|
train
|
def split_input(cls, mapper_spec):
"""Returns a list of shard_count input_spec_shards for input_spec.
Args:
mapper_spec: The mapper specification to split from. Must contain
'blob_keys' parameter with one or more blob keys.
Returns:
A list of BlobstoreInputReaders corresponding to the specified shards.
"""
params = _get_params(mapper_spec)
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
blob_sizes = {}
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
blob_sizes[blob_key] = blob_info.size
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
shards_per_blob = shard_count // len(blob_keys)
if shards_per_blob == 0:
shards_per_blob = 1
chunks = []
for blob_key, blob_size in blob_sizes.items():
blob_chunk_size = blob_size // shards_per_blob
for i in xrange(shards_per_blob - 1):
chunks.append(BlobstoreLineInputReader.from_json(
{cls.BLOB_KEY_PARAM: blob_key,
cls.INITIAL_POSITION_PARAM: blob_chunk_size * i,
cls.END_POSITION_PARAM: blob_chunk_size * (i + 1)}))
chunks.append(BlobstoreLineInputReader.from_json(
{cls.BLOB_KEY_PARAM: blob_key,
cls.INITIAL_POSITION_PARAM: blob_chunk_size * (shards_per_blob - 1),
cls.END_POSITION_PARAM: blob_size}))
return chunks
|
python
|
{
"resource": ""
}
|
q12182
|
BlobstoreZipInputReader._read
|
train
|
def _read(self, entry):
"""Read entry content.
Args:
entry: zip file entry as zipfile.ZipInfo.
Returns:
Entry content as string.
"""
start_time = time.time()
content = self._zip.read(entry.filename)
ctx = context.get()
if ctx:
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return content
|
python
|
{
"resource": ""
}
|
q12183
|
BlobstoreZipInputReader.split_input
|
train
|
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader):
"""Returns a list of input shard states for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader. Must contain
'blob_key' parameter with one blob key.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
Returns:
A list of InputReaders spanning files within the zip.
"""
params = _get_params(mapper_spec)
blob_key = params[cls.BLOB_KEY_PARAM]
zip_input = zipfile.ZipFile(_reader(blob_key))
zfiles = zip_input.infolist()
total_size = sum(x.file_size for x in zfiles)
num_shards = min(mapper_spec.shard_count, cls._MAX_SHARD_COUNT)
size_per_shard = total_size // num_shards
# Break the list of files into sublists, each of approximately
# size_per_shard bytes.
shard_start_indexes = [0]
current_shard_size = 0
for i, fileinfo in enumerate(zfiles):
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
shard_start_indexes.append(i + 1)
current_shard_size = 0
if shard_start_indexes[-1] != len(zfiles):
shard_start_indexes.append(len(zfiles))
return [cls(blob_key, start_index, end_index, _reader)
for start_index, end_index
in zip(shard_start_indexes, shard_start_indexes[1:])]
|
python
|
{
"resource": ""
}
|
q12184
|
BlobstoreZipLineInputReader._next_offset
|
train
|
def _next_offset(self):
"""Return the offset of the next line to read."""
if self._filestream:
offset = self._filestream.tell()
if offset:
offset -= 1
else:
offset = self._initial_offset
return offset
|
python
|
{
"resource": ""
}
|
q12185
|
NamespaceInputReader.validate
|
train
|
def validate(cls, mapper_spec):
"""Validates mapper spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BATCH_SIZE_PARAM in params:
try:
batch_size = int(params[cls.BATCH_SIZE_PARAM])
if batch_size < 1:
raise BadReaderParamsError("Bad batch size: %s" % batch_size)
except ValueError, e:
raise BadReaderParamsError("Bad batch size: %s" % e)
|
python
|
{
"resource": ""
}
|
q12186
|
LogInputReader.from_json
|
train
|
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard's state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the given JSON parameters.
"""
# Strip out unrecognized parameters, as introduced by b/5960884.
params = dict((str(k), v) for k, v in json.iteritems()
if k in cls._PARAMS)
# This is not symmetric with to_json() wrt. PROTOTYPE_REQUEST_PARAM because
# the constructor parameters need to be JSON-encodable, so the decoding
# needs to happen there anyways.
if cls._OFFSET_PARAM in params:
params[cls._OFFSET_PARAM] = base64.b64decode(params[cls._OFFSET_PARAM])
return cls(**params)
|
python
|
{
"resource": ""
}
|
q12187
|
LogInputReader.split_input
|
train
|
def split_input(cls, mapper_spec):
"""Returns a list of input readers for the given input specification.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
"""
params = _get_params(mapper_spec)
shard_count = mapper_spec.shard_count
# Pick out the overall start and end times and time step per shard.
start_time = params[cls.START_TIME_PARAM]
end_time = params[cls.END_TIME_PARAM]
seconds_per_shard = (end_time - start_time) / shard_count
# Create a LogInputReader for each shard, modulating the params as we go.
shards = []
for _ in xrange(shard_count - 1):
params[cls.END_TIME_PARAM] = (params[cls.START_TIME_PARAM] +
seconds_per_shard)
shards.append(LogInputReader(**params))
params[cls.START_TIME_PARAM] = params[cls.END_TIME_PARAM]
# Create a final shard to complete the time range.
params[cls.END_TIME_PARAM] = end_time
return shards + [LogInputReader(**params)]
|
python
|
{
"resource": ""
}
|
q12188
|
LogInputReader.validate
|
train
|
def validate(cls, mapper_spec):
"""Validates the mapper's specification and all necessary parameters.
Args:
mapper_spec: The MapperSpec to be used with this InputReader.
Raises:
BadReaderParamsError: If the user fails to specify both a starting time
and an ending time, or if the starting time is later than the ending
time.
"""
if mapper_spec.input_reader_class() != cls:
raise errors.BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec, allowed_keys=cls._PARAMS)
if (cls.VERSION_IDS_PARAM not in params and
cls.MODULE_VERSIONS_PARAM not in params):
raise errors.BadReaderParamsError("Must specify a list of version ids or "
"module/version ids for mapper input")
if (cls.VERSION_IDS_PARAM in params and
cls.MODULE_VERSIONS_PARAM in params):
raise errors.BadReaderParamsError("Can not supply both version ids or "
"module/version ids. Use only one.")
if (cls.START_TIME_PARAM not in params or
params[cls.START_TIME_PARAM] is None):
raise errors.BadReaderParamsError("Must specify a starting time for "
"mapper input")
if cls.END_TIME_PARAM not in params or params[cls.END_TIME_PARAM] is None:
params[cls.END_TIME_PARAM] = time.time()
if params[cls.START_TIME_PARAM] >= params[cls.END_TIME_PARAM]:
raise errors.BadReaderParamsError("The starting time cannot be later "
"than or the same as the ending time.")
if cls._PROTOTYPE_REQUEST_PARAM in params:
try:
params[cls._PROTOTYPE_REQUEST_PARAM] = log_service_pb.LogReadRequest(
params[cls._PROTOTYPE_REQUEST_PARAM])
except (TypeError, ProtocolBuffer.ProtocolBufferDecodeError):
raise errors.BadReaderParamsError("The prototype request must be "
"parseable as a LogReadRequest.")
# Pass the parameters to logservice.fetch() to verify any underlying
# constraints on types or values. This only constructs an iterator, it
# doesn't trigger any requests for actual log records.
try:
logservice.fetch(**params)
except logservice.InvalidArgumentError, e:
raise errors.BadReaderParamsError("One or more parameters are not valid "
"inputs to logservice.fetch(): %s" % e)
|
python
|
{
"resource": ""
}
|
q12189
|
_GoogleCloudStorageInputReader._next_file
|
train
|
def _next_file(self):
"""Find next filename.
self._filenames may need to be expanded via listbucket.
Returns:
None if no more file is left. Filename otherwise.
"""
while True:
if self._bucket_iter:
try:
return self._bucket_iter.next().filename
except StopIteration:
self._bucket_iter = None
self._bucket = None
if self._index >= len(self._filenames):
return
filename = self._filenames[self._index]
self._index += 1
if self._delimiter is None or not filename.endswith(self._delimiter):
return filename
self._bucket = cloudstorage.listbucket(filename,
delimiter=self._delimiter)
self._bucket_iter = iter(self._bucket)
|
python
|
{
"resource": ""
}
|
q12190
|
_GoogleCloudStorageInputReader.next
|
train
|
def next(self):
"""Returns the next input from this input reader, a block of bytes.
Non existent files will be logged and skipped. The file might have been
removed after input splitting.
Returns:
The next input from this input reader in the form of a cloudstorage
ReadBuffer that supports a File-like interface (read, readline, seek,
tell, and close). An error may be raised if the file can not be opened.
Raises:
StopIteration: The list of files has been exhausted.
"""
options = {}
if self._buffer_size:
options["read_buffer_size"] = self._buffer_size
if self._account_id:
options["_account_id"] = self._account_id
while True:
filename = self._next_file()
if filename is None:
raise StopIteration()
try:
start_time = time.time()
handle = cloudstorage.open(filename, **options)
ctx = context.get()
if ctx:
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return handle
except cloudstorage.NotFoundError:
# Fail the job if we're strict on missing input.
if getattr(self, "_fail_on_missing_input", False):
raise errors.FailJobError(
"File missing in GCS, aborting: %s" % filename)
# Move on otherwise.
logging.warning("File %s may have been removed. Skipping file.",
filename)
|
python
|
{
"resource": ""
}
|
q12191
|
ShardContext.incr
|
train
|
def incr(self, counter_name, delta=1):
"""Changes counter by delta.
Args:
counter_name: the name of the counter to change. str.
delta: int.
"""
self._state.counters_map.increment(counter_name, delta)
|
python
|
{
"resource": ""
}
|
q12192
|
ShardContext.counter
|
train
|
def counter(self, counter_name, default=0):
"""Get the current counter value.
Args:
counter_name: name of the counter in string.
default: default value in int if one doesn't exist.
Returns:
Current value of the counter.
"""
return self._state.counters_map.get(counter_name, default)
|
python
|
{
"resource": ""
}
|
q12193
|
SliceContext.emit
|
train
|
def emit(self, value):
"""Emits a value to output writer.
Args:
value: a value of type expected by the output writer.
"""
if not self._tstate.output_writer:
logging.error("emit is called, but no output writer is set.")
return
self._tstate.output_writer.write(value)
|
python
|
{
"resource": ""
}
|
q12194
|
GCSInputReader.next
|
train
|
def next(self):
"""Returns a handler to the next file.
Non existent files will be logged and skipped. The file might have been
removed after input splitting.
Returns:
The next input from this input reader in the form of a cloudstorage
ReadBuffer that supports a File-like interface (read, readline, seek,
tell, and close). An error may be raised if the file can not be opened.
Raises:
StopIteration: The list of files has been exhausted.
"""
options = {}
if self._buffer_size:
options["read_buffer_size"] = self._buffer_size
if self._account_id:
options["_account_id"] = self._account_id
while True:
filename = self._next_file()
if filename is None:
raise StopIteration()
if (self._path_filter and
not self._path_filter.accept(self._slice_ctx, filename)):
continue
try:
start_time = time.time()
handle = cloudstorage.open(filename, **options)
self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC,
int(time.time() - start_time) * 1000)
self._slice_ctx.incr(self.COUNTER_FILE_READ)
return handle
except cloudstorage.NotFoundError:
logging.warning("File %s may have been removed. Skipping file.",
filename)
self._slice_ctx.incr(self.COUNTER_FILE_MISSING)
|
python
|
{
"resource": ""
}
|
q12195
|
AbstractDatastoreInputReader._get_query_spec
|
train
|
def _get_query_spec(cls, params):
"""Construct a model.QuerySpec from model.MapperSpec."""
entity_kind = params[cls.ENTITY_KIND_PARAM]
filters = params.get(cls.FILTERS_PARAM)
app = params.get(cls._APP_PARAM)
ns = params.get(cls.NAMESPACE_PARAM)
return model.QuerySpec(
entity_kind=cls._get_raw_entity_kind(entity_kind),
keys_only=bool(params.get(cls.KEYS_ONLY_PARAM, False)),
filters=filters,
batch_size=int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE)),
model_class_path=entity_kind,
app=app,
ns=ns)
|
python
|
{
"resource": ""
}
|
q12196
|
AbstractDatastoreInputReader._to_key_ranges_by_shard
|
train
|
def _to_key_ranges_by_shard(cls, app, namespaces, shard_count, query_spec):
"""Get a list of key_ranges.KeyRanges objects, one for each shard.
This method uses scatter index to split each namespace into pieces
and assign those pieces to shards.
Args:
app: app_id in str.
namespaces: a list of namespaces in str.
shard_count: number of shards to split.
query_spec: model.QuerySpec.
Returns:
a list of key_ranges.KeyRanges objects.
"""
key_ranges_by_ns = []
# Split each ns into n splits. If a ns doesn't have enough scatter to
# split into n, the last few splits are None.
for namespace in namespaces:
ranges = cls._split_ns_by_scatter(
shard_count,
namespace,
query_spec.entity_kind,
app)
# The nth split of each ns will be assigned to the nth shard.
# Shuffle so that None are not all by the end.
random.shuffle(ranges)
key_ranges_by_ns.append(ranges)
# KeyRanges from different namespaces might be very different in size.
# Use round robin to make sure each shard can have at most one split
# or a None from a ns.
ranges_by_shard = [[] for _ in range(shard_count)]
for ranges in key_ranges_by_ns:
for i, k_range in enumerate(ranges):
if k_range:
ranges_by_shard[i].append(k_range)
key_ranges_by_shard = []
for ranges in ranges_by_shard:
if ranges:
key_ranges_by_shard.append(key_ranges.KeyRangesFactory.create_from_list(
ranges))
return key_ranges_by_shard
|
python
|
{
"resource": ""
}
|
q12197
|
_setup_constants
|
train
|
def _setup_constants(alphabet=NAMESPACE_CHARACTERS,
max_length=MAX_NAMESPACE_LENGTH,
batch_size=NAMESPACE_BATCH_SIZE):
"""Calculate derived constant values. Only useful for testing."""
global NAMESPACE_CHARACTERS
global MAX_NAMESPACE_LENGTH
# pylint: disable=global-variable-undefined
global MAX_NAMESPACE
global _LEX_DISTANCE
global NAMESPACE_BATCH_SIZE
NAMESPACE_CHARACTERS = alphabet
MAX_NAMESPACE_LENGTH = max_length
MAX_NAMESPACE = NAMESPACE_CHARACTERS[-1] * MAX_NAMESPACE_LENGTH
NAMESPACE_BATCH_SIZE = batch_size
# _LEX_DISTANCE will contain the lexical distance between two adjacent
# characters in NAMESPACE_CHARACTERS at each character index. This is used
# to calculate the ordinal for each string. Example:
# NAMESPACE_CHARACTERS = 'ab'
# MAX_NAMESPACE_LENGTH = 3
# _LEX_DISTANCE = [1, 3, 7]
# '' => 0
# 'a' => 1
# 'aa' => 2
# 'aaa' => 3
# 'aab' => 4 - Distance between 'aaa' and 'aab' is 1.
# 'ab' => 5 - Distance between 'aa' and 'ab' is 3.
# 'aba' => 6
# 'abb' => 7
# 'b' => 8 - Distance between 'a' and 'b' is 7.
# 'ba' => 9
# 'baa' => 10
# 'bab' => 11
# ...
# _namespace_to_ord('bab') = (1 * 7 + 1) + (0 * 3 + 1) + (1 * 1 + 1) = 11
_LEX_DISTANCE = [1]
for i in range(1, MAX_NAMESPACE_LENGTH):
_LEX_DISTANCE.append(
_LEX_DISTANCE[i-1] * len(NAMESPACE_CHARACTERS) + 1)
# pylint: disable=undefined-loop-variable
del i
|
python
|
{
"resource": ""
}
|
q12198
|
_ord_to_namespace
|
train
|
def _ord_to_namespace(n, _max_length=None):
"""Convert a namespace ordinal to a namespace string.
Converts an int, representing the sequence number of a namespace ordered
lexographically, into a namespace string.
>>> _ord_to_namespace(0)
''
>>> _ord_to_namespace(1)
'-'
>>> _ord_to_namespace(2)
'--'
>>> _ord_to_namespace(3)
'---'
Args:
n: A number representing the lexographical ordering of a namespace.
_max_length: The maximum namespace length.
Returns:
A string representing the nth namespace in lexographical order.
"""
if _max_length is None:
_max_length = MAX_NAMESPACE_LENGTH
length = _LEX_DISTANCE[_max_length - 1]
if n == 0:
return ''
n -= 1
return (NAMESPACE_CHARACTERS[n / length] +
_ord_to_namespace(n % length, _max_length - 1))
|
python
|
{
"resource": ""
}
|
q12199
|
_namespace_to_ord
|
train
|
def _namespace_to_ord(namespace):
"""Converts a namespace string into an int representing its lexographic order.
>>> _namespace_to_ord('')
''
>>> _namespace_to_ord('_')
1
>>> _namespace_to_ord('__')
2
Args:
namespace: A namespace string.
Returns:
An int representing the lexographical order of the given namespace string.
"""
n = 0
for i, c in enumerate(namespace):
n += (_LEX_DISTANCE[MAX_NAMESPACE_LENGTH - i- 1] *
NAMESPACE_CHARACTERS.index(c)
+ 1)
return n
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.