code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
shard_id = self.request.headers[util._MR_SHARD_ID_TASK_HEADER]
mr_id = self.request.headers[util._MR_ID_TASK_HEADER]
shard_state, mr_state = db.get([
model.ShardState.get_key_by_shard_id(shard_id),
model.MapreduceState.get_key_by_job_id(mr_id)])
if shard_state and shard_state.active:
shard_state.set_for_failure()
config = util.create_datastore_write_config(mr_state.mapreduce_spec)
shard_state.put(config=config)
|
def _drop_gracefully(self)
|
Drop worker task gracefully.
Set current shard_state to failed. Controller logic will take care of
other shards and the entire MR.
| 3.875701
| 3.400436
| 1.139766
|
# Controller will tally shard_states and properly handle the situation.
if not shard_state:
logging.warning("State not found for shard %s; Possible spurious task "
"execution. Dropping this task.",
tstate.shard_id)
return self._TASK_DIRECTIVE.DROP_TASK
if not shard_state.active:
logging.warning("Shard %s is not active. Possible spurious task "
"execution. Dropping this task.", tstate.shard_id)
logging.warning(str(shard_state))
return self._TASK_DIRECTIVE.DROP_TASK
# Validate shard retry count.
if shard_state.retries > tstate.retries:
logging.warning(
"Got shard %s from previous shard retry %s. Possible spurious "
"task execution. Dropping this task.",
tstate.shard_id,
tstate.retries)
logging.warning(str(shard_state))
return self._TASK_DIRECTIVE.DROP_TASK
elif shard_state.retries < tstate.retries:
# By the end of last slice, task enqueue succeeded but datastore commit
# failed. That transaction will be retried and adding the same task
# will pass.
logging.warning(
"ShardState for %s is behind slice. Waiting for it to catch up",
shard_state.shard_id)
return self._TASK_DIRECTIVE.RETRY_TASK
# Validate slice id.
# Taskqueue executes old successful tasks.
if shard_state.slice_id > tstate.slice_id:
logging.warning(
"Task %s-%s is behind ShardState %s. Dropping task.Use datastore to set slice_start_time to now.
If failed for any reason, raise error to retry the task (hence all
the previous validation code). The task would die naturally eventually.
Raises:
Rollback: If the shard state is missing.
Returns:
A _TASK_DIRECTIVE enum.
"""
fresh_state = model.ShardState.get_by_shard_id(tstate.shard_id)
if not fresh_state:
logging.warning("ShardState missing.")
raise db.Rollback()
if (fresh_state.active and
fresh_state.slice_id == shard_state.slice_id and
fresh_state.slice_start_time == shard_state.slice_start_time):
shard_state.slice_start_time = datetime.datetime.now()
shard_state.slice_request_id = os.environ.get("REQUEST_LOG_ID")
shard_state.acquired_once = True
shard_state.put(config=config)
return self._TASK_DIRECTIVE.PROCEED_TASK
else:
logging.warning(
"Contention on slice %s-%s execution. Will retry again.",
tstate.shard_id, tstate.slice_id)
# One proposer should win. In case all lost, back off arbitrarily.
time.sleep(random.randrange(1, 5))
return self._TASK_DIRECTIVE.RETRY_TASK
return _tx()
|
def _try_acquire_lease(self, shard_state, tstate)
|
Validate datastore and the task payload are consistent.
If so, attempt to get a lease on this slice's execution.
See model.ShardState doc on slice_start_time.
Args:
shard_state: model.ShardState from datastore.
tstate: model.TransientShardState from taskqueue paylod.
Returns:
A _TASK_DIRECTIVE enum. PROCEED_TASK if lock is acquired.
RETRY_TASK if task should be retried, DROP_TASK if task should
be dropped. Only old tasks (comparing to datastore state)
will be dropped. Future tasks are retried until they naturally
become old so that we don't ever stuck MR.
| 5.12889
| 4.579345
| 1.120005
|
assert shard_state.slice_start_time is not None
assert shard_state.slice_request_id is not None
request_ids = [shard_state.slice_request_id]
logs = None
try:
logs = list(logservice.fetch(request_ids=request_ids))
except (apiproxy_errors.FeatureNotEnabledError,
apiproxy_errors.CapabilityDisabledError) as e:
# Managed VMs do not have access to the logservice API
# See https://groups.google.com/forum/#!topic/app-engine-managed-vms/r8i65uiFW0w
logging.warning("Ignoring exception: %s", e)
if not logs or not logs[0].finished:
return False
return True
|
def _has_old_request_ended(self, shard_state)
|
Whether previous slice retry has ended according to Logs API.
Args:
shard_state: shard state.
Returns:
True if the request of previous slice retry has ended. False if it has
not or unknown.
| 5.036707
| 4.763941
| 1.057256
|
assert shard_state.slice_start_time is not None
delta = now() - shard_state.slice_start_time
duration = datetime.timedelta(seconds=secs)
if delta < duration:
return util.total_seconds(duration - delta)
else:
return 0
|
def _wait_time(self, shard_state, secs, now=datetime.datetime.now)
|
Time to wait until slice_start_time is secs ago from now.
Args:
shard_state: shard state.
secs: duration in seconds.
now: a func that gets now.
Returns:
0 if no wait. A positive int in seconds otherwise. Always around up.
| 3.532794
| 3.039907
| 1.162139
|
@db.transactional
def _tx():
fresh_state = model.ShardState.get_by_shard_id(shard_state.shard_id)
if fresh_state and fresh_state.active:
# Free lease.
fresh_state.slice_start_time = None
fresh_state.slice_request_id = None
if slice_retry:
fresh_state.slice_retries += 1
fresh_state.put()
try:
_tx()
# pylint: disable=broad-except
except Exception, e:
logging.warning(e)
logging.warning(
"Release lock for shard %s failed. Wait for lease to expire.",
shard_state.shard_id)
|
def _try_free_lease(self, shard_state, slice_retry=False)
|
Try to free lease.
A lightweight transaction to update shard_state and unset
slice_start_time to allow the next retry to happen without blocking.
We don't care if this fails or not because the lease will expire
anyway.
Under normal execution, _save_state_and_schedule_next is the exit point.
It updates/saves shard state and schedules the next slice or returns.
Other exit points are:
1. _are_states_consistent: at the beginning of handle, checks
if datastore states and the task are in sync.
If not, raise or return.
2. _attempt_slice_retry: may raise exception to taskqueue.
3. _save_state_and_schedule_next: may raise exception when taskqueue/db
unreachable.
This handler should try to free the lease on every exceptional exit point.
Args:
shard_state: model.ShardState.
slice_retry: whether to count this as a failed slice execution.
| 3.65593
| 3.442931
| 1.061865
|
if obj is None or not isinstance(obj, shard_life_cycle._ShardLifeCycle):
return
shard_context = shard_ctx or self.shard_context
slice_context = slice_ctx or self.slice_context
if begin_slice:
if slice_id == 0:
obj.begin_shard(shard_context)
obj.begin_slice(slice_context)
else:
obj.end_slice(slice_context)
if last_slice:
obj.end_shard(shard_context)
|
def _maintain_LC(self, obj, slice_id, last_slice=False, begin_slice=True,
shard_ctx=None, slice_ctx=None)
|
Makes sure shard life cycle interface are respected.
Args:
obj: the obj that may have implemented _ShardLifeCycle.
slice_id: current slice_id
last_slice: whether this is the last slice.
begin_slice: whether this is the beginning or the end of a slice.
shard_ctx: shard ctx for dependency injection. If None, it will be read
from self.
slice_ctx: slice ctx for dependency injection. If None, it will be read
from self.
| 2.517976
| 2.089126
| 1.205277
|
task_directive = self._set_state(shard_state, tstate, task_directive)
self._save_state_and_schedule_next(shard_state, tstate, task_directive)
context.Context._set(None)
|
def __return(self, shard_state, tstate, task_directive)
|
Handler should always call this as the last statement.
| 5.623734
| 5.078125
| 1.107443
|
processing_limit = self._processing_limit(tstate.mapreduce_spec)
if processing_limit == 0:
return
finished_shard = True
# Input reader may not be an iterator. It is only a container.
iterator = iter(input_reader)
while True:
try:
entity = iterator.next()
except StopIteration:
break
# Reading input got exception. If we assume
# 1. The input reader have done enough retries.
# 2. The input reader can still serialize correctly after this exception.
# 3. The input reader, upon resume, will try to re-read this failed
# record.
# 4. This exception doesn't imply the input reader is permanently stuck.
# we can serialize current slice immediately to avoid duplicated
# outputs.
# TODO(user): Validate these assumptions on all readers. MR should
# also have a way to detect fake forward progress.
if isinstance(entity, db.Model):
shard_state.last_work_item = repr(entity.key())
elif isinstance(entity, ndb.Model):
shard_state.last_work_item = repr(entity.key)
else:
shard_state.last_work_item = repr(entity)[:100]
processing_limit -= 1
if not self._process_datum(
entity, input_reader, ctx, tstate):
finished_shard = False
break
elif processing_limit == 0:
finished_shard = False
break
# Flush context and its pools.
self.slice_context.incr(
context.COUNTER_MAPPER_WALLTIME_MS,
int((self._time() - self._start_time)*1000))
return finished_shard
|
def _process_inputs(self,
input_reader,
shard_state,
tstate,
ctx)
|
Read inputs, process them, and write out outputs.
This is the core logic of MapReduce. It reads inputs from input reader,
invokes user specified mapper function, and writes output with
output writer. It also updates shard_state accordingly.
e.g. if shard processing is done, set shard_state.active to False.
If errors.FailJobError is caught, it will fail this MR job.
All other exceptions will be logged and raised to taskqueue for retry
until the number of retries exceeds a limit.
Args:
input_reader: input reader.
shard_state: shard state.
tstate: transient shard state.
ctx: mapreduce context.
Returns:
Whether this shard has finished processing all its input split.
| 7.070332
| 6.882288
| 1.027323
|
if data is not input_readers.ALLOW_CHECKPOINT:
self.slice_context.incr(context.COUNTER_MAPPER_CALLS)
handler = transient_shard_state.handler
if isinstance(handler, map_job.Mapper):
handler(self.slice_context, data)
else:
if input_reader.expand_parameters:
result = handler(*data)
else:
result = handler(data)
if util.is_generator(result):
for output in result:
if isinstance(output, operation.Operation):
output(ctx)
else:
output_writer = transient_shard_state.output_writer
if not output_writer:
logging.warning(
"Handler yielded %s, but no output writer is set.", output)
else:
output_writer.write(output)
if self._time() - self._start_time >= parameters.config._SLICE_DURATION_SEC:
return False
return True
|
def _process_datum(self, data, input_reader, ctx, transient_shard_state)
|
Process a single data piece.
Call mapper handler on the data.
Args:
data: a datum to process.
input_reader: input reader.
ctx: mapreduce context
transient_shard_state: transient shard state.
Returns:
True if scan should be continued, False if scan should be stopped.
| 5.348098
| 5.178436
| 1.032763
|
if task_directive in (self._TASK_DIRECTIVE.RETRY_TASK,
self._TASK_DIRECTIVE.DROP_TASK):
return task_directive
if task_directive == self._TASK_DIRECTIVE.ABORT_SHARD:
shard_state.set_for_abort()
return task_directive
if task_directive == self._TASK_DIRECTIVE.PROCEED_TASK:
shard_state.advance_for_next_slice()
tstate.advance_for_next_slice()
return task_directive
if task_directive == self._TASK_DIRECTIVE.RECOVER_SLICE:
tstate.advance_for_next_slice(recovery_slice=True)
shard_state.advance_for_next_slice(recovery_slice=True)
return task_directive
if task_directive == self._TASK_DIRECTIVE.RETRY_SLICE:
task_directive = self._attempt_slice_retry(shard_state, tstate)
if task_directive == self._TASK_DIRECTIVE.RETRY_SHARD:
task_directive = self._attempt_shard_retry(shard_state, tstate)
if task_directive == self._TASK_DIRECTIVE.FAIL_TASK:
shard_state.set_for_failure()
return task_directive
|
def _set_state(self, shard_state, tstate, task_directive)
|
Set shard_state and tstate based on task_directive.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
task_directive: self._TASK_DIRECTIVE for current shard.
Returns:
A _TASK_DIRECTIVE enum.
PROCEED_TASK if task should proceed normally.
RETRY_SHARD if shard should be retried.
RETRY_SLICE if slice should be retried.
FAIL_TASK if sahrd should fail.
RECOVER_SLICE if slice should be recovered.
ABORT_SHARD if shard should be aborted.
RETRY_TASK if task should be retried.
DROP_TASK if task should be dropped.
| 2.157817
| 1.708417
| 1.263051
|
spec = tstate.mapreduce_spec
if task_directive == self._TASK_DIRECTIVE.DROP_TASK:
return
if task_directive in (self._TASK_DIRECTIVE.RETRY_SLICE,
self._TASK_DIRECTIVE.RETRY_TASK):
# Set HTTP code to 500.
return self.retry_task()
elif task_directive == self._TASK_DIRECTIVE.ABORT_SHARD:
logging.info("Aborting shard %d of job '%s'",
shard_state.shard_number, shard_state.mapreduce_id)
task = None
elif task_directive == self._TASK_DIRECTIVE.FAIL_TASK:
logging.critical("Shard %s failed permanently.", shard_state.shard_id)
task = None
elif task_directive == self._TASK_DIRECTIVE.RETRY_SHARD:
logging.warning("Shard %s is going to be attempted for the %s time.",
shard_state.shard_id,
shard_state.retries + 1)
task = self._state_to_task(tstate, shard_state)
elif task_directive == self._TASK_DIRECTIVE.RECOVER_SLICE:
logging.warning("Shard %s slice %s is being recovered.",
shard_state.shard_id,
shard_state.slice_id)
task = self._state_to_task(tstate, shard_state)
else:
assert task_directive == self._TASK_DIRECTIVE.PROCEED_TASK
countdown = self._get_countdown_for_next_slice(spec)
task = self._state_to_task(tstate, shard_state, countdown=countdown)
# Prepare parameters for db transaction and taskqueue.
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
# For test only.
# TODO(user): Remove this.
"default")
config = util.create_datastore_write_config(spec)
@db.transactional(retries=5)
def _tx():
fresh_shard_state = model.ShardState.get_by_shard_id(tstate.shard_id)
if not fresh_shard_state:
raise db.Rollback()
if (not fresh_shard_state.active or
"worker_active_state_collision" in _TEST_INJECTED_FAULTS):
logging.warning("Shard %s is not active. Possible spurious task "
"execution. Dropping this task.", tstate.shard_id)
logging.warning("Datastore's %s", str(fresh_shard_state))
logging.warning("Slice's %s", str(shard_state))
return
fresh_shard_state.copy_from(shard_state)
fresh_shard_state.put(config=config)
# Add task in the same datastore transaction.
# This way we guarantee taskqueue is never behind datastore states.
# Old tasks will be dropped.
# Future task won't run until datastore states catches up.
if fresh_shard_state.active:
# Not adding task transactionally.
# transactional enqueue requires tasks with no name.
self._add_task(task, spec, queue_name)
try:
_tx()
except (datastore_errors.Error,
taskqueue.Error,
runtime.DeadlineExceededError,
apiproxy_errors.Error), e:
logging.warning(
"Can't transactionally continue shard. "
"Will retry slice %s %s for the %s time.",
tstate.shard_id,
tstate.slice_id,
self.task_retry_count() + 1)
self._try_free_lease(shard_state)
raise e
|
def _save_state_and_schedule_next(self, shard_state, tstate, task_directive)
|
Save state and schedule task.
Save shard state to datastore.
Schedule next slice if needed.
Set HTTP response code.
No modification to any shard_state or tstate.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
task_directive: enum _TASK_DIRECTIVE.
Returns:
The task to retry if applicable.
| 4.054155
| 3.882209
| 1.044291
|
mapper_spec = tstate.mapreduce_spec.mapper
if not (tstate.output_writer and
tstate.output_writer._supports_slice_recovery(mapper_spec)):
return self._TASK_DIRECTIVE.PROCEED_TASK
tstate.output_writer = tstate.output_writer._recover(
tstate.mapreduce_spec, shard_state.shard_number,
shard_state.retries + 1)
return self._TASK_DIRECTIVE.RECOVER_SLICE
|
def _attempt_slice_recovery(self, shard_state, tstate)
|
Recover a slice.
This is run when a slice had been previously attempted and output
may have been written. If an output writer requires slice recovery,
we run those logic to remove output duplicates. Otherwise we just retry
the slice.
If recovery is needed, then the entire slice will be dedicated
to recovery logic. No data processing will take place. Thus we call
the slice "recovery slice". This is needed for correctness:
An output writer instance can be out of sync from its physical
medium only when the slice dies after acquring the shard lock but before
committing shard state to db. The worst failure case is when
shard state failed to commit after the NAMED task for the next slice was
added. Thus, recovery slice has a special logic to increment current
slice_id n to n+2. If the task for n+1 had been added, it will be dropped
because it is behind shard state.
Args:
shard_state: an instance of Model.ShardState.
tstate: an instance of Model.TransientShardState.
Returns:
_TASK_DIRECTIVE.PROCEED_TASK to continue with this retry.
_TASK_DIRECTIVE.RECOVER_SLICE to recover this slice.
The next slice will start at the same input as
this slice but output to a new instance of output writer.
Combining outputs from all writer instances is up to implementation.
| 5.210247
| 3.54806
| 1.468478
|
shard_attempts = shard_state.retries + 1
if shard_attempts >= parameters.config.SHARD_MAX_ATTEMPTS:
logging.warning(
"Shard attempt %s exceeded %s max attempts.",
shard_attempts, parameters.config.SHARD_MAX_ATTEMPTS)
return self._TASK_DIRECTIVE.FAIL_TASK
if tstate.output_writer and (
not tstate.output_writer._supports_shard_retry(tstate)):
logging.warning("Output writer %s does not support shard retry.",
tstate.output_writer.__class__.__name__)
return self._TASK_DIRECTIVE.FAIL_TASK
shard_state.reset_for_retry()
logging.warning("Shard %s attempt %s failed with up to %s attempts.",
shard_state.shard_id,
shard_state.retries,
parameters.config.SHARD_MAX_ATTEMPTS)
output_writer = None
if tstate.output_writer:
output_writer = tstate.output_writer.create(
tstate.mapreduce_spec, shard_state.shard_number, shard_attempts + 1)
tstate.reset_for_retry(output_writer)
return self._TASK_DIRECTIVE.RETRY_SHARD
|
def _attempt_shard_retry(self, shard_state, tstate)
|
Whether to retry shard.
This method may modify shard_state and tstate to prepare for retry or fail.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
Returns:
A _TASK_DIRECTIVE enum. RETRY_SHARD if shard should be retried.
FAIL_TASK otherwise.
| 2.919015
| 2.641019
| 1.105261
|
if (shard_state.slice_retries + 1 <
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS):
logging.warning(
"Slice %s %s failed for the %s of up to %s attempts "
"(%s of %s taskqueue execution attempts). "
"Will retry now.",
tstate.shard_id,
tstate.slice_id,
shard_state.slice_retries + 1,
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS,
self.task_retry_count() + 1,
parameters.config.TASK_MAX_ATTEMPTS)
# Clear info related to current exception. Otherwise, the real
# callstack that includes a frame for this method will show up
# in log.
sys.exc_clear()
self._try_free_lease(shard_state, slice_retry=True)
return self._TASK_DIRECTIVE.RETRY_SLICE
if parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS > 0:
logging.warning("Slice attempt %s exceeded %s max attempts.",
self.task_retry_count() + 1,
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS)
return self._TASK_DIRECTIVE.RETRY_SHARD
|
def _attempt_slice_retry(self, shard_state, tstate)
|
Attempt to retry this slice.
This method may modify shard_state and tstate to prepare for retry or fail.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
Returns:
A _TASK_DIRECTIVE enum. RETRY_SLICE if slice should be retried.
RETRY_SHARD if shard retry should be attempted.
| 4.504534
| 3.929968
| 1.146201
|
countdown = 0
if self._processing_limit(spec) != -1:
countdown = max(
int(parameters.config._SLICE_DURATION_SEC -
(self._time() - self._start_time)), 0)
return countdown
|
def _get_countdown_for_next_slice(self, spec)
|
Get countdown for next slice's task.
When user sets processing rate, we set countdown to delay task execution.
Args:
spec: model.MapreduceSpec
Returns:
countdown in int.
| 7.43118
| 7.608047
| 0.976753
|
base_path = tstate.base_path
task_name = MapperWorkerCallbackHandler.get_task_name(
tstate.shard_id,
tstate.slice_id,
tstate.retries)
headers = util._get_task_headers(tstate.mapreduce_spec.mapreduce_id)
headers[util._MR_SHARD_ID_TASK_HEADER] = tstate.shard_id
worker_task = model.HugeTask(
url=base_path + "/worker_callback/" + tstate.shard_id,
params=tstate.to_dict(),
name=task_name,
eta=eta,
countdown=countdown,
parent=shard_state,
headers=headers)
return worker_task
|
def _state_to_task(cls,
tstate,
shard_state,
eta=None,
countdown=None)
|
Generate task for slice according to current states.
Args:
tstate: An instance of TransientShardState.
shard_state: An instance of ShardState.
eta: Absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: Time in seconds into the future that this MR should execute.
Defaults to zero.
Returns:
A model.HugeTask instance for the slice specified by current states.
| 4.576115
| 4.152748
| 1.101949
|
if not _run_task_hook(mapreduce_spec.get_hooks(),
"enqueue_worker_task",
worker_task,
queue_name):
try:
# Not adding transactionally because worker_task has name.
# Named task is not allowed for transactional add.
worker_task.add(queue_name)
except (taskqueue.TombstonedTaskError,
taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r already exists. %s: %s",
worker_task.name,
e.__class__,
e)
|
def _add_task(cls,
worker_task,
mapreduce_spec,
queue_name)
|
Schedule slice scanning by adding it to the task queue.
Args:
worker_task: a model.HugeTask task for slice. This is NOT a taskqueue
task.
mapreduce_spec: an instance of model.MapreduceSpec.
queue_name: Optional queue to run on; uses the current queue of
execution or the default queue if unspecified.
| 5.036812
| 5.670114
| 0.888309
|
processing_rate = float(spec.mapper.params.get("processing_rate", 0))
slice_processing_limit = -1
if processing_rate > 0:
slice_processing_limit = int(math.ceil(
parameters.config._SLICE_DURATION_SEC*processing_rate/
int(spec.mapper.shard_count)))
return slice_processing_limit
|
def _processing_limit(self, spec)
|
Get the limit on the number of map calls allowed by this slice.
Args:
spec: a Mapreduce spec.
Returns:
The limit as a positive int if specified by user. -1 otherwise.
| 5.894589
| 4.658962
| 1.265215
|
queue_name = queue_name or os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
"default")
task = cls._state_to_task(tstate, shard_state, eta, countdown)
cls._add_task(task, tstate.mapreduce_spec, queue_name)
|
def _schedule_slice(cls,
shard_state,
tstate,
queue_name=None,
eta=None,
countdown=None)
|
Schedule slice scanning by adding it to the task queue.
Args:
shard_state: An instance of ShardState.
tstate: An instance of TransientShardState.
queue_name: Optional queue to run on; uses the current queue of
execution or the default queue if unspecified.
eta: Absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: Time in seconds into the future that this MR should execute.
Defaults to zero.
| 3.904112
| 4.911994
| 0.794812
|
mr_id = self.request.headers[util._MR_ID_TASK_HEADER]
state = model.MapreduceState.get_by_job_id(mr_id)
if not state or not state.active:
return
state.active = False
state.result_status = model.MapreduceState.RESULT_FAILED
config = util.create_datastore_write_config(state.mapreduce_spec)
puts = []
for ss in model.ShardState.find_all_by_mapreduce_state(state):
if ss.active:
ss.set_for_failure()
puts.append(ss)
# Avoid having too many shard states in memory.
if len(puts) > model.ShardState._MAX_STATES_IN_MEMORY:
db.put(puts, config=config)
puts = []
db.put(puts, config=config)
# Put mr_state only after all shard_states are put.
db.put(state, config=config)
|
def _drop_gracefully(self)
|
Gracefully drop controller task.
This method is called when decoding controller task payload failed.
Upon this we mark ShardState and MapreduceState as failed so all
tasks can stop.
Writing to datastore is forced (ignore read-only mode) because we
want the tasks to stop badly, and if force_writes was False,
the job would have never been started.
| 4.221106
| 3.611125
| 1.168917
|
spec = model.MapreduceSpec.from_json_str(
self.request.get("mapreduce_spec"))
state, control = db.get([
model.MapreduceState.get_key_by_job_id(spec.mapreduce_id),
model.MapreduceControl.get_key_by_job_id(spec.mapreduce_id),
])
if not state:
logging.warning("State not found for MR '%s'; dropping controller task.",
spec.mapreduce_id)
return
if not state.active:
logging.warning(
"MR %r is not active. Looks like spurious controller task execution.",
spec.mapreduce_id)
self._clean_up_mr(spec)
return
shard_states = model.ShardState.find_all_by_mapreduce_state(state)
self._update_state_from_shard_states(state, shard_states, control)
if state.active:
ControllerCallbackHandler.reschedule(
state, spec, self.serial_id() + 1)
|
def handle(self)
|
Handle request.
| 4.649042
| 4.486186
| 1.036302
|
# Initialize vars.
state.active_shards, state.aborted_shards, state.failed_shards = 0, 0, 0
total_shards = 0
processed_counts = []
processed_status = []
state.counters_map.clear()
# Tally across shard states once.
for s in shard_states:
total_shards += 1
status = 'unknown'
if s.active:
state.active_shards += 1
status = 'running'
if s.result_status == model.ShardState.RESULT_SUCCESS:
status = 'success'
elif s.result_status == model.ShardState.RESULT_ABORTED:
state.aborted_shards += 1
status = 'aborted'
elif s.result_status == model.ShardState.RESULT_FAILED:
state.failed_shards += 1
status = 'failed'
# Update stats in mapreduce state by aggregating stats from shard states.
state.counters_map.add_map(s.counters_map)
processed_counts.append(s.counters_map.get(context.COUNTER_MAPPER_CALLS))
processed_status.append(status)
state.set_processed_counts(processed_counts, processed_status)
state.last_poll_time = datetime.datetime.utcfromtimestamp(self._time())
spec = state.mapreduce_spec
if total_shards != spec.mapper.shard_count:
logging.error("Found %d shard states. Expect %d. "
"Issuing abort command to job '%s'",
total_shards, spec.mapper.shard_count,
spec.mapreduce_id)
# We issue abort command to allow shards to stop themselves.
model.MapreduceControl.abort(spec.mapreduce_id)
# If any shard is active then the mr is active.
# This way, controller won't prematurely stop before all the shards have.
state.active = bool(state.active_shards)
if not control and (state.failed_shards or state.aborted_shards):
# Issue abort command if there are failed shards.
model.MapreduceControl.abort(spec.mapreduce_id)
if not state.active:
# Set final result status derived from shard states.
if state.failed_shards or not total_shards:
state.result_status = model.MapreduceState.RESULT_FAILED
# It's important failed shards is checked before aborted shards
# because failed shards will trigger other shards to abort.
elif state.aborted_shards:
state.result_status = model.MapreduceState.RESULT_ABORTED
else:
state.result_status = model.MapreduceState.RESULT_SUCCESS
self._finalize_outputs(spec, state)
self._finalize_job(spec, state)
else:
@db.transactional(retries=5)
def _put_state():
fresh_state = model.MapreduceState.get_by_job_id(spec.mapreduce_id)
# We don't check anything other than active because we are only
# updating stats. It's OK if they are briefly inconsistent.
if not fresh_state.active:
logging.warning(
"Job %s is not active. Looks like spurious task execution. "
"Dropping controller task.", spec.mapreduce_id)
return
config = util.create_datastore_write_config(spec)
state.put(config=config)
_put_state()
|
def _update_state_from_shard_states(self, state, shard_states, control)
|
Update mr state by examing shard states.
Args:
state: current mapreduce state as MapreduceState.
shard_states: an iterator over shard states.
control: model.MapreduceControl entity.
| 3.781816
| 3.54868
| 1.065697
|
# Only finalize the output writers if the job is successful.
if (mapreduce_spec.mapper.output_writer_class() and
mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS):
mapreduce_spec.mapper.output_writer_class().finalize_job(mapreduce_state)
|
def _finalize_outputs(cls, mapreduce_spec, mapreduce_state)
|
Finalize outputs.
Args:
mapreduce_spec: an instance of MapreduceSpec.
mapreduce_state: an instance of MapreduceState.
| 3.785046
| 4.377091
| 0.86474
|
config = util.create_datastore_write_config(mapreduce_spec)
queue_name = util.get_queue_name(mapreduce_spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE))
done_callback = mapreduce_spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK)
done_task = None
if done_callback:
done_task = taskqueue.Task(
url=done_callback,
headers=util._get_task_headers(mapreduce_spec.mapreduce_id,
util.CALLBACK_MR_ID_TASK_HEADER),
method=mapreduce_spec.params.get("done_callback_method", "POST"))
@db.transactional(retries=5)
def _put_state():
fresh_state = model.MapreduceState.get_by_job_id(
mapreduce_spec.mapreduce_id)
if not fresh_state.active:
logging.warning(
"Job %s is not active. Looks like spurious task execution. "
"Dropping task.", mapreduce_spec.mapreduce_id)
return
mapreduce_state.put(config=config)
# Enqueue done_callback if needed.
if done_task and not _run_task_hook(
mapreduce_spec.get_hooks(),
"enqueue_done_task",
done_task,
queue_name):
done_task.add(queue_name, transactional=True)
_put_state()
logging.info("Final result for job '%s' is '%s'",
mapreduce_spec.mapreduce_id, mapreduce_state.result_status)
cls._clean_up_mr(mapreduce_spec)
|
def _finalize_job(cls, mapreduce_spec, mapreduce_state)
|
Finalize job execution.
Invokes done callback and save mapreduce state in a transaction,
and schedule necessary clean ups. This method is idempotent.
Args:
mapreduce_spec: an instance of MapreduceSpec
mapreduce_state: an instance of MapreduceState
| 3.341387
| 3.378464
| 0.989026
|
task_name = ControllerCallbackHandler.get_task_name(
mapreduce_spec, serial_id)
task_params = ControllerCallbackHandler.controller_parameters(
mapreduce_spec, serial_id)
if not queue_name:
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")
controller_callback_task = model.HugeTask(
url=(mapreduce_spec.params["base_path"] + "/controller_callback/" +
mapreduce_spec.mapreduce_id),
name=task_name, params=task_params,
countdown=parameters.config._CONTROLLER_PERIOD_SEC,
parent=mapreduce_state,
headers=util._get_task_headers(mapreduce_spec.mapreduce_id))
if not _run_task_hook(mapreduce_spec.get_hooks(),
"enqueue_controller_task",
controller_callback_task,
queue_name):
try:
controller_callback_task.add(queue_name)
except (taskqueue.TombstonedTaskError,
taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r with params %r already exists. %s: %s",
task_name, task_params, e.__class__, e)
|
def reschedule(cls,
mapreduce_state,
mapreduce_spec,
serial_id,
queue_name=None)
|
Schedule new update status callback task.
Args:
mapreduce_state: mapreduce state as model.MapreduceState
mapreduce_spec: mapreduce specification as MapreduceSpec.
serial_id: id of the invocation as int.
queue_name: The queue to schedule this task on. Will use the current
queue of execution if not supplied.
| 3.426455
| 3.477554
| 0.985306
|
# Get and verify mr state.
mr_id = self.request.get("mapreduce_id")
# Log the mr_id since this is started in an unnamed task
logging.info("Processing kickoff for job %s", mr_id)
state = model.MapreduceState.get_by_job_id(mr_id)
if not self._check_mr_state(state, mr_id):
return
# Create input readers.
readers, serialized_readers_entity = self._get_input_readers(state)
if readers is None:
# We don't have any data. Finish map.
logging.warning("Found no mapper input data to process.")
state.active = False
state.result_status = model.MapreduceState.RESULT_SUCCESS
ControllerCallbackHandler._finalize_job(
state.mapreduce_spec, state)
return False
# Create output writers.
self._setup_output_writer(state)
# Save states and make sure we use the saved input readers for
# subsequent operations.
result = self._save_states(state, serialized_readers_entity)
if result is None:
readers, _ = self._get_input_readers(state)
elif not result:
return
queue_name = self.request.headers.get("X-AppEngine-QueueName")
KickOffJobHandler._schedule_shards(state.mapreduce_spec, readers,
queue_name,
state.mapreduce_spec.params["base_path"],
state)
ControllerCallbackHandler.reschedule(
state, state.mapreduce_spec, serial_id=0, queue_name=queue_name)
|
def handle(self)
|
Handles kick off request.
| 5.348391
| 5.097697
| 1.049178
|
mr_id = self.request.get("mapreduce_id")
logging.error("Failed to kick off job %s", mr_id)
state = model.MapreduceState.get_by_job_id(mr_id)
if not self._check_mr_state(state, mr_id):
return
# Issue abort command just in case there are running tasks.
config = util.create_datastore_write_config(state.mapreduce_spec)
model.MapreduceControl.abort(mr_id, config=config)
# Finalize job and invoke callback.
state.active = False
state.result_status = model.MapreduceState.RESULT_FAILED
ControllerCallbackHandler._finalize_job(state.mapreduce_spec, state)
|
def _drop_gracefully(self)
|
See parent.
| 5.636039
| 5.50456
| 1.023885
|
serialized_input_readers_key = (self._SERIALIZED_INPUT_READERS_KEY %
state.key().id_or_name())
serialized_input_readers = model._HugeTaskPayload.get_by_key_name(
serialized_input_readers_key, parent=state)
# Initialize input readers.
input_reader_class = state.mapreduce_spec.mapper.input_reader_class()
split_param = state.mapreduce_spec.mapper
if issubclass(input_reader_class, map_job.InputReader):
split_param = map_job.JobConfig._to_map_job_config(
state.mapreduce_spec,
os.environ.get("HTTP_X_APPENGINE_QUEUENAME"))
if serialized_input_readers is None:
readers = input_reader_class.split_input(split_param)
else:
readers = [input_reader_class.from_json_str(_json) for _json in
json.loads(zlib.decompress(
serialized_input_readers.payload))]
if not readers:
return None, None
# Update state and spec with actual shard count.
state.mapreduce_spec.mapper.shard_count = len(readers)
state.active_shards = len(readers)
# Prepare to save serialized input readers.
if serialized_input_readers is None:
# Use mr_state as parent so it can be easily cleaned up later.
serialized_input_readers = model._HugeTaskPayload(
key_name=serialized_input_readers_key, parent=state)
readers_json_str = [i.to_json_str() for i in readers]
serialized_input_readers.payload = zlib.compress(json.dumps(
readers_json_str))
return readers, serialized_input_readers
|
def _get_input_readers(self, state)
|
Get input readers.
Args:
state: a MapreduceState model.
Returns:
A tuple: (a list of input readers, a model._HugeTaskPayload entity).
The payload entity contains the json serialized input readers.
(None, None) when input reader inplitting returned no data to process.
| 3.357895
| 3.119151
| 1.076541
|
mr_id = state.key().id_or_name()
fresh_state = model.MapreduceState.get_by_job_id(mr_id)
if not self._check_mr_state(fresh_state, mr_id):
return False
if fresh_state.active_shards != 0:
logging.warning(
"Mapreduce %s already has active shards. Looks like spurious task "
"execution.", mr_id)
return None
config = util.create_datastore_write_config(state.mapreduce_spec)
db.put([state, serialized_readers_entity], config=config)
return True
|
def _save_states(self, state, serialized_readers_entity)
|
Run transaction to save state.
Args:
state: a model.MapreduceState entity.
serialized_readers_entity: a model._HugeTaskPayload entity containing
json serialized input readers.
Returns:
False if a fatal error is encountered and this task should be dropped
immediately. True if transaction is successful. None if a previous
attempt of this same transaction has already succeeded.
| 4.712878
| 4.084819
| 1.153754
|
# Create shard states.
shard_states = []
for shard_number, input_reader in enumerate(readers):
shard_state = model.ShardState.create_new(spec.mapreduce_id, shard_number)
shard_state.shard_description = str(input_reader)
shard_states.append(shard_state)
# Retrieves already existing shard states.
existing_shard_states = db.get(shard.key() for shard in shard_states)
existing_shard_keys = set(shard.key() for shard in existing_shard_states
if shard is not None)
# Save non existent shard states.
# Note: we could do this transactionally if necessary.
db.put((shard for shard in shard_states
if shard.key() not in existing_shard_keys),
config=util.create_datastore_write_config(spec))
# Create output writers.
writer_class = spec.mapper.output_writer_class()
writers = [None] * len(readers)
if writer_class:
for shard_number, shard_state in enumerate(shard_states):
writers[shard_number] = writer_class.create(
mr_state.mapreduce_spec,
shard_state.shard_number, shard_state.retries + 1,
mr_state.writer_state)
# Schedule ALL shard tasks.
# Since each task is named, _add_task will fall back gracefully if a
# task already exists.
for shard_number, (input_reader, output_writer) in enumerate(
zip(readers, writers)):
shard_id = model.ShardState.shard_id_from_number(
spec.mapreduce_id, shard_number)
task = MapperWorkerCallbackHandler._state_to_task(
model.TransientShardState(
base_path, spec, shard_id, 0, input_reader, input_reader,
output_writer=output_writer,
handler=spec.mapper.handler),
shard_states[shard_number])
MapperWorkerCallbackHandler._add_task(task,
spec,
queue_name)
|
def _schedule_shards(cls,
spec,
readers,
queue_name,
base_path,
mr_state)
|
Prepares shard states and schedules their execution.
Even though this method does not schedule shard task and save shard state
transactionally, it's safe for taskqueue to retry this logic because
the initial shard_state for each shard is the same from any retry.
This is an important yet reasonable assumption on model.ShardState.
Args:
spec: mapreduce specification as MapreduceSpec.
readers: list of InputReaders describing shard splits.
queue_name: The queue to run this job on.
base_path: The base url path of mapreduce callbacks.
mr_state: The MapReduceState of current job.
| 3.764115
| 3.469064
| 1.085052
|
if state is None:
logging.warning(
"Mapreduce State for job %s is missing. Dropping Task.",
mr_id)
return False
if not state.active:
logging.warning(
"Mapreduce %s is not active. Looks like spurious task "
"execution. Dropping Task.", mr_id)
return False
return True
|
def _check_mr_state(cls, state, mr_id)
|
Check MapreduceState.
Args:
state: an MapreduceState instance.
mr_id: mapreduce id.
Returns:
True if state is valid. False if not and this task should be dropped.
| 4.978769
| 4.128577
| 1.205929
|
# Mapper spec as form arguments.
mapreduce_name = self._get_required_param("name")
mapper_input_reader_spec = self._get_required_param("mapper_input_reader")
mapper_handler_spec = self._get_required_param("mapper_handler")
mapper_output_writer_spec = self.request.get("mapper_output_writer")
mapper_params = self._get_params(
"mapper_params_validator", "mapper_params.")
params = self._get_params(
"params_validator", "params.")
# Default values.
mr_params = map_job.JobConfig._get_default_mr_params()
mr_params.update(params)
if "queue_name" in mapper_params:
mr_params["queue_name"] = mapper_params["queue_name"]
# Set some mapper param defaults if not present.
mapper_params["processing_rate"] = int(mapper_params.get(
"processing_rate") or parameters.config.PROCESSING_RATE_PER_SEC)
# Validate the Mapper spec, handler, and input reader.
mapper_spec = model.MapperSpec(
mapper_handler_spec,
mapper_input_reader_spec,
mapper_params,
int(mapper_params.get("shard_count", parameters.config.SHARD_COUNT)),
output_writer_spec=mapper_output_writer_spec)
mapreduce_id = self._start_map(
mapreduce_name,
mapper_spec,
mr_params,
queue_name=mr_params["queue_name"],
_app=mapper_params.get("_app"))
self.json_response["mapreduce_id"] = mapreduce_id
|
def handle(self)
|
Handles start request.
| 3.386479
| 3.272982
| 1.034677
|
params_validator = self.request.get(validator_parameter)
user_params = {}
for key in self.request.arguments():
if key.startswith(name_prefix):
values = self.request.get_all(key)
adjusted_key = key[len(name_prefix):]
if len(values) == 1:
user_params[adjusted_key] = values[0]
else:
user_params[adjusted_key] = values
if params_validator:
resolved_validator = util.for_name(params_validator)
resolved_validator(user_params)
return user_params
|
def _get_params(self, validator_parameter, name_prefix)
|
Retrieves additional user-supplied params for the job and validates them.
Args:
validator_parameter: name of the request parameter which supplies
validator for this parameter set.
name_prefix: common prefix for all parameter names in the request.
Raises:
Any exception raised by the 'params_validator' request parameter if
the params fail to validate.
Returns:
The user parameters.
| 2.523559
| 2.522925
| 1.000251
|
value = self.request.get(param_name)
if not value:
raise errors.NotEnoughArgumentsError(param_name + " not specified")
return value
|
def _get_required_param(self, param_name)
|
Get a required request parameter.
Args:
param_name: name of request parameter to fetch.
Returns:
parameter value
Raises:
errors.NotEnoughArgumentsError: if parameter is not specified.
| 4.089744
| 3.239428
| 1.26249
|
# pylint: disable=g-doc-args
# pylint: disable=g-doc-return-or-yield
# Validate input reader.
mapper_input_reader_class = mapper_spec.input_reader_class()
mapper_input_reader_class.validate(mapper_spec)
# Validate output writer.
mapper_output_writer_class = mapper_spec.output_writer_class()
if mapper_output_writer_class:
mapper_output_writer_class.validate(mapper_spec)
# Create a new id and mr spec.
mapreduce_id = model.MapreduceState.new_mapreduce_id()
mapreduce_spec = model.MapreduceSpec(
name,
mapreduce_id,
mapper_spec.to_json(),
mapreduce_params,
hooks_class_name)
# Validate mapper handler.
ctx = context.Context(mapreduce_spec, None)
context.Context._set(ctx)
try:
# pylint: disable=pointless-statement
mapper_spec.handler
finally:
context.Context._set(None)
# Save states and enqueue task.
if in_xg_transaction:
propagation = db.MANDATORY
else:
propagation = db.INDEPENDENT
@db.transactional(propagation=propagation)
def _txn():
cls._create_and_save_state(mapreduce_spec, _app)
cls._add_kickoff_task(mapreduce_params["base_path"], mapreduce_spec, eta,
countdown, queue_name)
_txn()
return mapreduce_id
|
def _start_map(cls,
name,
mapper_spec,
mapreduce_params,
queue_name,
eta=None,
countdown=None,
hooks_class_name=None,
_app=None,
in_xg_transaction=False)
|
See control.start_map.
Requirements for this method:
1. The request that invokes this method can either be regular or
from taskqueue. So taskqueue specific headers can not be used.
2. Each invocation transactionally starts an isolated mapreduce job with
a unique id. MapreduceState should be immediately available after
returning. See control.start_map's doc on transactional.
3. Method should be lightweight.
| 3.127779
| 3.177702
| 0.98429
|
state = model.MapreduceState.create_new(mapreduce_spec.mapreduce_id)
state.mapreduce_spec = mapreduce_spec
state.active = True
state.active_shards = 0
if _app:
state.app_id = _app
config = util.create_datastore_write_config(mapreduce_spec)
state.put(config=config)
return state
|
def _create_and_save_state(cls, mapreduce_spec, _app)
|
Save mapreduce state to datastore.
Save state to datastore so that UI can see it immediately.
Args:
mapreduce_spec: model.MapreduceSpec,
_app: app id if specified. None otherwise.
Returns:
The saved Mapreduce state.
| 2.812469
| 3.142813
| 0.894889
|
params = {"mapreduce_id": mapreduce_spec.mapreduce_id}
# Task is not named so that it can be added within a transaction.
kickoff_task = taskqueue.Task(
url=base_path + "/kickoffjob_callback/" + mapreduce_spec.mapreduce_id,
headers=util._get_task_headers(mapreduce_spec.mapreduce_id),
params=params,
eta=eta,
countdown=countdown)
hooks = mapreduce_spec.get_hooks()
if hooks is not None:
try:
hooks.enqueue_kickoff_task(kickoff_task, queue_name)
return
except NotImplementedError:
pass
kickoff_task.add(queue_name, transactional=True)
|
def _add_kickoff_task(cls,
base_path,
mapreduce_spec,
eta,
countdown,
queue_name)
|
Enqueues a new kickoff task.
| 3.32916
| 3.351873
| 0.993224
|
task_name = mapreduce_spec.mapreduce_id + "-finalize"
finalize_task = taskqueue.Task(
name=task_name,
url=(mapreduce_spec.params["base_path"] + "/finalizejob_callback/" +
mapreduce_spec.mapreduce_id),
params={"mapreduce_id": mapreduce_spec.mapreduce_id},
headers=util._get_task_headers(mapreduce_spec.mapreduce_id))
queue_name = util.get_queue_name(None)
if not _run_task_hook(mapreduce_spec.get_hooks(),
"enqueue_controller_task",
finalize_task,
queue_name):
try:
finalize_task.add(queue_name)
except (taskqueue.TombstonedTaskError,
taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r already exists. %s: %s",
task_name, e.__class__, e)
|
def schedule(cls, mapreduce_spec)
|
Schedule finalize task.
Args:
mapreduce_spec: mapreduce specification as MapreduceSpec.
| 3.221651
| 3.287891
| 0.979853
|
if "input_reader" not in mapper_spec.params:
message = ("Input reader's parameters should be specified in "
"input_reader subdictionary.")
if not allow_old or allowed_keys:
raise errors.BadReaderParamsError(message)
params = mapper_spec.params
params = dict((str(n), v) for n, v in params.iteritems())
else:
if not isinstance(mapper_spec.params.get("input_reader"), dict):
raise errors.BadReaderParamsError(
"Input reader parameters should be a dictionary")
params = mapper_spec.params.get("input_reader")
params = dict((str(n), v) for n, v in params.iteritems())
if allowed_keys:
params_diff = set(params.keys()) - allowed_keys
if params_diff:
raise errors.BadReaderParamsError(
"Invalid input_reader parameters: %s" % ",".join(params_diff))
return params
|
def _get_params(mapper_spec, allowed_keys=None, allow_old=True)
|
Obtain input reader parameters.
Utility function for input readers implementation. Fetches parameters
from mapreduce specification giving appropriate usage warnings.
Args:
mapper_spec: The MapperSpec for the job
allowed_keys: set of all allowed keys in parameters as strings. If it is not
None, then parameters are expected to be in a separate "input_reader"
subdictionary of mapper_spec parameters.
allow_old: Allow parameters to exist outside of the input_reader
subdictionary for compatability.
Returns:
mapper parameters as dict
Raises:
BadReaderParamsError: if parameters are invalid/missing or not allowed.
| 2.502674
| 2.2342
| 1.120166
|
if shard_count == 1:
# With one shard we don't need to calculate any split points at all.
return [key_range.KeyRange(namespace=namespace, _app=app)]
ds_query = datastore.Query(kind=raw_entity_kind,
namespace=namespace,
_app=app,
keys_only=True)
ds_query.Order("__scatter__")
oversampling_factor = 32
random_keys = None
if filters:
ds_query_with_filters = copy.copy(ds_query)
for (key, op, value) in filters:
ds_query_with_filters.update({'%s %s' % (key, op): value})
try:
random_keys = ds_query_with_filters.Get(shard_count *
oversampling_factor)
except db.NeedIndexError, why:
logging.warning('Need to add an index for optimal mapreduce-input'
' splitting:\n%s' % why)
# We'll try again without the filter. We hope the filter
# will filter keys uniformly across the key-name space!
if not random_keys:
random_keys = ds_query.Get(shard_count * oversampling_factor)
if not random_keys:
# There are no entities with scatter property. We have no idea
# how to split.
return ([key_range.KeyRange(namespace=namespace, _app=app)] +
[None] * (shard_count - 1))
random_keys.sort()
if len(random_keys) >= shard_count:
# We've got a lot of scatter values. Sample them down.
random_keys = cls._choose_split_points(random_keys, shard_count)
k_ranges = []
k_ranges.append(key_range.KeyRange(
key_start=None,
key_end=random_keys[0],
direction=key_range.KeyRange.ASC,
include_start=False,
include_end=False,
namespace=namespace,
_app=app))
for i in range(0, len(random_keys) - 1):
k_ranges.append(key_range.KeyRange(
key_start=random_keys[i],
key_end=random_keys[i+1],
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
k_ranges.append(key_range.KeyRange(
key_start=random_keys[-1],
key_end=None,
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
if len(k_ranges) < shard_count:
# We need to have as many shards as it was requested. Add some Nones.
k_ranges += [None] * (shard_count - len(k_ranges))
return k_ranges
|
def _split_ns_by_scatter(cls,
shard_count,
namespace,
raw_entity_kind,
filters,
app)
|
Split a namespace by scatter index into key_range.KeyRange.
TODO(user): Power this with key_range.KeyRange.compute_split_points.
Args:
shard_count: number of shards.
namespace: namespace name to split. str.
raw_entity_kind: low level datastore API entity kind.
app: app id in str.
Returns:
A list of key_range.KeyRange objects. If there are not enough entities to
splits into requested shards, the returned list will contain KeyRanges
ordered lexicographically with any Nones appearing at the end.
| 2.789306
| 2.691544
| 1.036322
|
assert len(sorted_keys) >= shard_count
index_stride = len(sorted_keys) / float(shard_count)
return [sorted_keys[int(round(index_stride * i))]
for i in range(1, shard_count)]
|
def _choose_split_points(cls, sorted_keys, shard_count)
|
Returns the best split points given a random set of datastore.Keys.
| 2.933829
| 2.668986
| 1.09923
|
params = _get_params(mapper_spec)
if cls.ENTITY_KIND_PARAM not in params:
raise BadReaderParamsError("Missing input reader parameter 'entity_kind'")
if cls.BATCH_SIZE_PARAM in params:
try:
batch_size = int(params[cls.BATCH_SIZE_PARAM])
if batch_size < 1:
raise BadReaderParamsError("Bad batch size: %s" % batch_size)
except ValueError, e:
raise BadReaderParamsError("Bad batch size: %s" % e)
if cls.OVERSPLIT_FACTOR_PARAM in params:
try:
oversplit_factor = int(params[cls.OVERSPLIT_FACTOR_PARAM])
if oversplit_factor < 1:
raise BadReaderParamsError("Bad oversplit factor:"
" %s" % oversplit_factor)
except ValueError, e:
raise BadReaderParamsError("Bad oversplit factor: %s" % e)
try:
bool(params.get(cls.KEYS_ONLY_PARAM, False))
except:
raise BadReaderParamsError("keys_only expects a boolean value but got %s",
params[cls.KEYS_ONLY_PARAM])
if cls.NAMESPACE_PARAM in params:
if not isinstance(params[cls.NAMESPACE_PARAM],
(str, unicode, type(None))):
raise BadReaderParamsError(
"Expected a single namespace string")
if cls.NAMESPACES_PARAM in params:
raise BadReaderParamsError("Multiple namespaces are no longer supported")
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
if not isinstance(filters, list):
raise BadReaderParamsError("Expected list for filters parameter")
for f in filters:
if not isinstance(f, (tuple, list)):
raise BadReaderParamsError("Filter should be a tuple or list: %s", f)
if len(f) != 3:
raise BadReaderParamsError("Filter should be a 3-tuple: %s", f)
prop, op, _ = f
if not isinstance(prop, basestring):
raise BadReaderParamsError("Property should be string: %s", prop)
if not isinstance(op, basestring):
raise BadReaderParamsError("Operator should be string: %s", op)
|
def validate(cls, mapper_spec)
|
Inherit docs.
| 2.069731
| 2.062214
| 1.003645
|
super(RawDatastoreInputReader, cls).validate(mapper_spec)
params = _get_params(mapper_spec)
entity_kind = params[cls.ENTITY_KIND_PARAM]
if "." in entity_kind:
logging.warning(
". detected in entity kind %s specified for reader %s."
"Assuming entity kind contains the dot.",
entity_kind, cls.__name__)
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
for f in filters:
if f[1] != "=":
raise BadReaderParamsError(
"Only equality filters are supported: %s", f)
|
def validate(cls, mapper_spec)
|
Inherit docs.
| 4.11891
| 4.083391
| 1.008698
|
super(DatastoreInputReader, cls).validate(mapper_spec)
params = _get_params(mapper_spec)
entity_kind = params[cls.ENTITY_KIND_PARAM]
# Fail fast if Model cannot be located.
try:
model_class = util.for_name(entity_kind)
except ImportError, e:
raise BadReaderParamsError("Bad entity kind: %s" % e)
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
if issubclass(model_class, db.Model):
cls._validate_filters(filters, model_class)
else:
cls._validate_filters_ndb(filters, model_class)
property_range.PropertyRange(filters, entity_kind)
|
def validate(cls, mapper_spec)
|
Inherit docs.
| 3.584007
| 3.546192
| 1.010664
|
if not filters:
return
properties = model_class._properties
for idx, f in enumerate(filters):
prop, ineq, val = f
if prop not in properties:
raise errors.BadReaderParamsError(
"Property %s is not defined for entity type %s",
prop, model_class._get_kind())
# Attempt to cast the value to a KeyProperty if appropriate.
# This enables filtering against keys.
try:
if (isinstance(val, basestring) and
isinstance(properties[prop],
(ndb.KeyProperty, ndb.ComputedProperty))):
val = ndb.Key(urlsafe=val)
filters[idx] = [prop, ineq, val]
except:
pass
# Validate the value of each filter. We need to know filters have
# valid value to carry out splits.
try:
properties[prop]._do_validate(val)
except db.BadValueError, e:
raise errors.BadReaderParamsError(e)
|
def _validate_filters_ndb(cls, filters, model_class)
|
Validate ndb.Model filters.
| 4.646972
| 4.544138
| 1.02263
|
shard_count = mapper_spec.shard_count
query_spec = cls._get_query_spec(mapper_spec)
if not property_range.should_shard_by_property_range(query_spec.filters):
return super(DatastoreInputReader, cls).split_input(mapper_spec)
# Artificially increase the number of shards to get a more even split.
# For example, if we are creating 7 shards for one week of data based on a
# Day property and the data points tend to be clumped on certain days (say,
# Monday and Wednesday), instead of assigning each shard a single day of
# the week, we will split each day into "oversplit_factor" pieces, and
# assign each shard "oversplit_factor" pieces with "1 / oversplit_factor"
# the work, so that the data from Monday and Wednesday is more evenly
# spread across all shards.
oversplit_factor = query_spec.oversplit_factor
oversplit_shard_count = oversplit_factor * shard_count
p_range = property_range.PropertyRange(query_spec.filters,
query_spec.model_class_path)
p_ranges = p_range.split(oversplit_shard_count)
# User specified a namespace.
if query_spec.ns is not None:
ns_range = namespace_range.NamespaceRange(
namespace_start=query_spec.ns,
namespace_end=query_spec.ns,
_app=query_spec.app)
ns_ranges = [copy.copy(ns_range) for _ in p_ranges]
else:
ns_keys = namespace_range.get_namespace_keys(
query_spec.app, cls.MAX_NAMESPACES_FOR_KEY_SHARD+1)
if not ns_keys:
return
# User doesn't specify ns but the number of ns is small.
# We still split by property range.
if len(ns_keys) <= cls.MAX_NAMESPACES_FOR_KEY_SHARD:
ns_ranges = [namespace_range.NamespaceRange(_app=query_spec.app)
for _ in p_ranges]
# Lots of namespaces. Split by ns.
else:
ns_ranges = namespace_range.NamespaceRange.split(n=oversplit_shard_count,
contiguous=False,
can_query=lambda: True,
_app=query_spec.app)
p_ranges = [copy.copy(p_range) for _ in ns_ranges]
assert len(p_ranges) == len(ns_ranges)
iters = [
db_iters.RangeIteratorFactory.create_property_range_iterator(
p, ns, query_spec) for p, ns in zip(p_ranges, ns_ranges)]
# Reduce the number of ranges back down to the shard count.
# It's possible that we didn't split into enough shards even
# after oversplitting, in which case we don't need to do anything.
if len(iters) > shard_count:
# We cycle through the iterators and chain them together, e.g.
# if we look at the indices chained together, we get:
# Shard #0 gets 0, num_shards, 2 * num_shards, ...
# Shard #1 gets 1, num_shards + 1, 2 * num_shards + 1, ...
# Shard #2 gets 2, num_shards + 2, 2 * num_shards + 2, ...
# and so on. This should split fairly evenly.
iters = [
db_iters.RangeIteratorFactory.create_multi_property_range_iterator(
[iters[i] for i in xrange(start_index, len(iters), shard_count)]
) for start_index in xrange(shard_count)
]
return [cls(i) for i in iters]
|
def split_input(cls, mapper_spec)
|
Inherit docs.
| 3.890833
| 3.894628
| 0.999025
|
while True:
if self._current_key_range is None:
if self._key_ranges:
self._current_key_range = self._key_ranges.pop()
# The most recently popped key_range may be None, so continue here
# to find the next keyrange that's valid.
continue
else:
break
for key, o in self._iter_key_range(
copy.deepcopy(self._current_key_range)):
# The caller must consume yielded values so advancing the KeyRange
# before yielding is safe.
self._current_key_range.advance(key)
yield o
self._current_key_range = None
|
def _iter_key_ranges(self)
|
Iterates over self._key_ranges, delegating to self._iter_key_range().
| 5.026529
| 4.492535
| 1.118863
|
while True:
if self._current_key_range is None:
query = self._ns_range.make_datastore_query()
namespace_result = query.Get(1)
if not namespace_result:
break
namespace = namespace_result[0].name() or ""
self._current_key_range = key_range.KeyRange(
namespace=namespace, _app=self._ns_range.app)
yield ALLOW_CHECKPOINT
for key, o in self._iter_key_range(
copy.deepcopy(self._current_key_range)):
# The caller must consume yielded values so advancing the KeyRange
# before yielding is safe.
self._current_key_range.advance(key)
yield o
if (self._ns_range.is_single_namespace or
self._current_key_range.namespace == self._ns_range.namespace_end):
break
self._ns_range = self._ns_range.with_start_after(
self._current_key_range.namespace)
self._current_key_range = None
|
def _iter_ns_range(self)
|
Iterates over self._ns_range, delegating to self._iter_key_range().
| 4.680722
| 4.401479
| 1.063443
|
raw_entity_kind = cls._get_raw_entity_kind(entity_kind)
if shard_count == 1:
# With one shard we don't need to calculate any splitpoints at all.
return [key_range.KeyRange(namespace=namespace, _app=app)]
ds_query = datastore.Query(kind=raw_entity_kind,
namespace=namespace,
_app=app,
keys_only=True)
ds_query.Order("__scatter__")
random_keys = ds_query.Get(shard_count * cls._OVERSAMPLING_FACTOR)
if not random_keys:
# There are no entities with scatter property. We have no idea
# how to split.
return ([key_range.KeyRange(namespace=namespace, _app=app)] +
[None] * (shard_count - 1))
random_keys.sort()
if len(random_keys) >= shard_count:
# We've got a lot of scatter values. Sample them down.
random_keys = cls._choose_split_points(random_keys, shard_count)
# pylint: disable=redefined-outer-name
key_ranges = []
key_ranges.append(key_range.KeyRange(
key_start=None,
key_end=random_keys[0],
direction=key_range.KeyRange.ASC,
include_start=False,
include_end=False,
namespace=namespace,
_app=app))
for i in range(0, len(random_keys) - 1):
key_ranges.append(key_range.KeyRange(
key_start=random_keys[i],
key_end=random_keys[i+1],
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
key_ranges.append(key_range.KeyRange(
key_start=random_keys[-1],
key_end=None,
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
if len(key_ranges) < shard_count:
# We need to have as many shards as it was requested. Add some Nones.
key_ranges += [None] * (shard_count - len(key_ranges))
return key_ranges
|
def _split_input_from_namespace(cls, app, namespace, entity_kind,
shard_count)
|
Helper for _split_input_from_params.
If there are not enough Entities to make all of the given shards, the
returned list of KeyRanges will include Nones. The returned list will
contain KeyRanges ordered lexographically with any Nones appearing at the
end.
Args:
app: the app.
namespace: the namespace.
entity_kind: entity kind as string.
shard_count: the number of shards.
Returns:
KeyRange objects.
| 2.386018
| 2.322933
| 1.027158
|
# pylint: disable=redefined-outer-name
key_ranges = [] # KeyRanges for all namespaces
for namespace in namespaces:
key_ranges.extend(
cls._split_input_from_namespace(app,
namespace,
entity_kind_name,
shard_count))
# Divide the KeyRanges into shard_count shards. The KeyRanges for different
# namespaces might be very different in size so the assignment of KeyRanges
# to shards is done round-robin.
shared_ranges = [[] for _ in range(shard_count)]
for i, k_range in enumerate(key_ranges):
shared_ranges[i % shard_count].append(k_range)
batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
return [cls(entity_kind_name,
key_ranges=key_ranges,
ns_range=None,
batch_size=batch_size)
for key_ranges in shared_ranges if key_ranges]
|
def _split_input_from_params(cls, app, namespaces, entity_kind_name,
params, shard_count)
|
Return input reader objects. Helper for split_input.
| 3.424651
| 3.35904
| 1.019533
|
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec)
if cls.ENTITY_KIND_PARAM not in params:
raise BadReaderParamsError("Missing mapper parameter 'entity_kind'")
if cls.BATCH_SIZE_PARAM in params:
try:
batch_size = int(params[cls.BATCH_SIZE_PARAM])
if batch_size < 1:
raise BadReaderParamsError("Bad batch size: %s" % batch_size)
except ValueError, e:
raise BadReaderParamsError("Bad batch size: %s" % e)
if cls.NAMESPACE_PARAM in params:
if not isinstance(params[cls.NAMESPACE_PARAM],
(str, unicode, type(None))):
raise BadReaderParamsError(
"Expected a single namespace string")
if cls.NAMESPACES_PARAM in params:
raise BadReaderParamsError("Multiple namespaces are no longer supported")
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
if not isinstance(filters, list):
raise BadReaderParamsError("Expected list for filters parameter")
for f in filters:
if not isinstance(f, (tuple, list)):
raise BadReaderParamsError("Filter should be a tuple or list: %s", f)
if len(f) != 3:
raise BadReaderParamsError("Filter should be a 3-tuple: %s", f)
if not isinstance(f[0], basestring):
raise BadReaderParamsError("First element should be string: %s", f)
if f[1] != "=":
raise BadReaderParamsError(
"Only equality filters are supported: %s", f)
|
def validate(cls, mapper_spec)
|
Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
| 2.103568
| 2.02946
| 1.036516
|
params = _get_params(mapper_spec)
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
shard_count = mapper_spec.shard_count
namespace = params.get(cls.NAMESPACE_PARAM)
app = params.get(cls._APP_PARAM)
filters = params.get(cls.FILTERS_PARAM)
if namespace is None:
# It is difficult to efficiently shard large numbers of namespaces because
# there can be an arbitrary number of them. So the strategy is:
# 1. if there are a small number of namespaces in the datastore then
# generate one KeyRange per namespace per shard and assign each shard a
# KeyRange for every namespace. This should lead to nearly perfect
# sharding.
# 2. if there are a large number of namespaces in the datastore then
# generate one NamespaceRange per worker. This can lead to very bad
# sharding because namespaces can contain very different numbers of
# entities and each NamespaceRange may contain very different numbers
# of namespaces.
namespace_query = datastore.Query("__namespace__",
keys_only=True,
_app=app)
namespace_keys = namespace_query.Get(
limit=cls.MAX_NAMESPACES_FOR_KEY_SHARD+1)
if len(namespace_keys) > cls.MAX_NAMESPACES_FOR_KEY_SHARD:
ns_ranges = namespace_range.NamespaceRange.split(n=shard_count,
contiguous=True,
_app=app)
return [cls(entity_kind_name,
key_ranges=None,
ns_range=ns_range,
batch_size=batch_size,
filters=filters)
for ns_range in ns_ranges]
elif not namespace_keys:
return [cls(entity_kind_name,
key_ranges=None,
ns_range=namespace_range.NamespaceRange(_app=app),
batch_size=shard_count,
filters=filters)]
else:
namespaces = [namespace_key.name() or ""
for namespace_key in namespace_keys]
else:
namespaces = [namespace]
readers = cls._split_input_from_params(
app, namespaces, entity_kind_name, params, shard_count)
if filters:
for reader in readers:
reader._filters = filters
return readers
|
def split_input(cls, mapper_spec)
|
Splits query into shards without fetching query results.
Tries as best as it can to split the whole query result set into equal
shards. Due to difficulty of making the perfect split, resulting shards'
sizes might differ significantly from each other.
Args:
mapper_spec: MapperSpec with params containing 'entity_kind'.
May have 'namespace' in the params as a string containing a single
namespace. If specified then the input reader will only yield values
in the given namespace. If 'namespace' is not given then values from
all namespaces will be yielded. May also have 'batch_size' in the params
to specify the number of entities to process in each batch.
Returns:
A list of InputReader objects. If the query results are empty then the
empty list will be returned. Otherwise, the list will always have a length
equal to number_of_shards but may be padded with Nones if there are too
few results for effective sharding.
| 3.518174
| 3.437121
| 1.023582
|
if self._key_ranges is None:
key_ranges_json = None
else:
key_ranges_json = []
for k in self._key_ranges:
if k:
key_ranges_json.append(k.to_json())
else:
key_ranges_json.append(None)
if self._ns_range is None:
namespace_range_json = None
else:
namespace_range_json = self._ns_range.to_json_object()
if self._current_key_range is None:
current_key_range_json = None
else:
current_key_range_json = self._current_key_range.to_json()
json_dict = {self.KEY_RANGE_PARAM: key_ranges_json,
self.NAMESPACE_RANGE_PARAM: namespace_range_json,
self.CURRENT_KEY_RANGE_PARAM: current_key_range_json,
self.ENTITY_KIND_PARAM: self._entity_kind,
self.BATCH_SIZE_PARAM: self._batch_size,
self.FILTERS_PARAM: self._filters}
return json_dict
|
def to_json(self)
|
Serializes all the data in this query range into json form.
Returns:
all the data in json-compatible map.
| 1.939985
| 1.908547
| 1.016472
|
if json[cls.KEY_RANGE_PARAM] is None:
# pylint: disable=redefined-outer-name
key_ranges = None
else:
key_ranges = []
for k in json[cls.KEY_RANGE_PARAM]:
if k:
key_ranges.append(key_range.KeyRange.from_json(k))
else:
key_ranges.append(None)
if json[cls.NAMESPACE_RANGE_PARAM] is None:
ns_range = None
else:
ns_range = namespace_range.NamespaceRange.from_json_object(
json[cls.NAMESPACE_RANGE_PARAM])
if json[cls.CURRENT_KEY_RANGE_PARAM] is None:
current_key_range = None
else:
current_key_range = key_range.KeyRange.from_json(
json[cls.CURRENT_KEY_RANGE_PARAM])
return cls(
json[cls.ENTITY_KIND_PARAM],
key_ranges,
ns_range,
json[cls.BATCH_SIZE_PARAM],
current_key_range,
filters=json.get(cls.FILTERS_PARAM))
|
def from_json(cls, json)
|
Create new DatastoreInputReader from the json, encoded by to_json.
Args:
json: json map representation of DatastoreInputReader.
Returns:
an instance of DatastoreInputReader with all data deserialized from json.
| 2.097698
| 2.052743
| 1.0219
|
self._has_iterated = True
if self._read_before_start:
self._blob_reader.readline()
self._read_before_start = False
start_position = self._blob_reader.tell()
if start_position > self._end_position:
raise StopIteration()
line = self._blob_reader.readline()
if not line:
raise StopIteration()
return start_position, line.rstrip("\n")
|
def next(self)
|
Returns the next input from as an (offset, line) tuple.
| 3.615221
| 3.257577
| 1.109788
|
new_pos = self._blob_reader.tell()
if self._has_iterated:
new_pos -= 1
return {self.BLOB_KEY_PARAM: self._blob_key,
self.INITIAL_POSITION_PARAM: new_pos,
self.END_POSITION_PARAM: self._end_position}
|
def to_json(self)
|
Returns an json-compatible input shard spec for remaining inputs.
| 6.55948
| 5.451574
| 1.203227
|
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.INITIAL_POSITION_PARAM],
json[cls.END_POSITION_PARAM])
|
def from_json(cls, json)
|
Instantiates an instance of this InputReader for the given shard spec.
| 6.337469
| 5.393517
| 1.175016
|
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_keys' for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many 'blob_keys' for mapper input")
if not blob_keys:
raise BadReaderParamsError("No 'blob_keys' specified for mapper input")
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key)
|
def validate(cls, mapper_spec)
|
Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
| 2.829176
| 2.653938
| 1.066029
|
params = _get_params(mapper_spec)
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
blob_sizes = {}
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
blob_sizes[blob_key] = blob_info.size
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
shards_per_blob = shard_count // len(blob_keys)
if shards_per_blob == 0:
shards_per_blob = 1
chunks = []
for blob_key, blob_size in blob_sizes.items():
blob_chunk_size = blob_size // shards_per_blob
for i in xrange(shards_per_blob - 1):
chunks.append(BlobstoreLineInputReader.from_json(
{cls.BLOB_KEY_PARAM: blob_key,
cls.INITIAL_POSITION_PARAM: blob_chunk_size * i,
cls.END_POSITION_PARAM: blob_chunk_size * (i + 1)}))
chunks.append(BlobstoreLineInputReader.from_json(
{cls.BLOB_KEY_PARAM: blob_key,
cls.INITIAL_POSITION_PARAM: blob_chunk_size * (shards_per_blob - 1),
cls.END_POSITION_PARAM: blob_size}))
return chunks
|
def split_input(cls, mapper_spec)
|
Returns a list of shard_count input_spec_shards for input_spec.
Args:
mapper_spec: The mapper specification to split from. Must contain
'blob_keys' parameter with one or more blob keys.
Returns:
A list of BlobstoreInputReaders corresponding to the specified shards.
| 2.214325
| 2.106567
| 1.051153
|
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_index:self._end_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
self._start_index += 1
return (entry, lambda: self._read(entry))
|
def next(self)
|
Returns the next input from this input reader as (ZipInfo, opener) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is a zipfile.ZipInfo object.
The second element of the tuple is a zero-argument function that, when
called, returns the complete body of the file.
| 4.040734
| 3.553072
| 1.137251
|
start_time = time.time()
content = self._zip.read(entry.filename)
ctx = context.get()
if ctx:
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return content
|
def _read(self, entry)
|
Read entry content.
Args:
entry: zip file entry as zipfile.ZipInfo.
Returns:
Entry content as string.
| 4.913154
| 4.29622
| 1.143599
|
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.START_INDEX_PARAM],
json[cls.END_INDEX_PARAM])
|
def from_json(cls, json)
|
Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
| 5.301953
| 5.983547
| 0.886089
|
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_INDEX_PARAM: self._start_index,
self.END_INDEX_PARAM: self._end_index}
|
def to_json(self)
|
Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
| 3.936315
| 3.962543
| 0.993381
|
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEY_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_key' for mapper input")
blob_key = params[cls.BLOB_KEY_PARAM]
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key)
|
def validate(cls, mapper_spec)
|
Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
| 3.048602
| 2.734187
| 1.114994
|
params = _get_params(mapper_spec)
blob_key = params[cls.BLOB_KEY_PARAM]
zip_input = zipfile.ZipFile(_reader(blob_key))
zfiles = zip_input.infolist()
total_size = sum(x.file_size for x in zfiles)
num_shards = min(mapper_spec.shard_count, cls._MAX_SHARD_COUNT)
size_per_shard = total_size // num_shards
# Break the list of files into sublists, each of approximately
# size_per_shard bytes.
shard_start_indexes = [0]
current_shard_size = 0
for i, fileinfo in enumerate(zfiles):
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
shard_start_indexes.append(i + 1)
current_shard_size = 0
if shard_start_indexes[-1] != len(zfiles):
shard_start_indexes.append(len(zfiles))
return [cls(blob_key, start_index, end_index, _reader)
for start_index, end_index
in zip(shard_start_indexes, shard_start_indexes[1:])]
|
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader)
|
Returns a list of input shard states for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader. Must contain
'blob_key' parameter with one blob key.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
Returns:
A list of InputReaders spanning files within the zip.
| 2.130667
| 2.114821
| 1.007493
|
params = _get_params(mapper_spec)
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
blob_files = {}
total_size = 0
for blob_key in blob_keys:
zip_input = zipfile.ZipFile(_reader(blob_key))
blob_files[blob_key] = zip_input.infolist()
total_size += sum(x.file_size for x in blob_files[blob_key])
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
# We can break on both blob key and file-within-zip boundaries.
# A shard will span at minimum a single blob key, but may only
# handle a few files within a blob.
size_per_shard = total_size // shard_count
readers = []
for blob_key in blob_keys:
bfiles = blob_files[blob_key]
current_shard_size = 0
start_file_index = 0
next_file_index = 0
for fileinfo in bfiles:
next_file_index += 1
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
readers.append(cls(blob_key, start_file_index, next_file_index, 0,
_reader))
current_shard_size = 0
start_file_index = next_file_index
if current_shard_size != 0:
readers.append(cls(blob_key, start_file_index, next_file_index, 0,
_reader))
return readers
|
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader)
|
Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader. Must contain
'blob_keys' parameter with one or more blob keys.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
Returns:
A list of InputReaders spanning the subfiles within the blobs.
There will be at least one reader per blob, but it will otherwise
attempt to keep the expanded size even.
| 2.873805
| 2.843542
| 1.010643
|
if not self._filestream:
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_file_index:
self._end_file_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
value = self._zip.read(entry.filename)
self._filestream = StringIO.StringIO(value)
if self._initial_offset:
self._filestream.seek(self._initial_offset)
self._filestream.readline()
start_position = self._filestream.tell()
line = self._filestream.readline()
if not line:
# Done with this file in the zip. Move on to the next file.
self._filestream.close()
self._filestream = None
self._start_file_index += 1
self._initial_offset = 0
return self.next()
return ((self._blob_key, self._start_file_index, start_position),
line.rstrip("\n"))
|
def next(self)
|
Returns the next line from this input reader as (lineinfo, line) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple describes the source, it is itself
a tuple (blobkey, filenumber, byteoffset).
The second element of the tuple is the line found at that offset.
| 2.934108
| 2.834733
| 1.035056
|
if self._filestream:
offset = self._filestream.tell()
if offset:
offset -= 1
else:
offset = self._initial_offset
return offset
|
def _next_offset(self)
|
Return the offset of the next line to read.
| 5.166189
| 4.294587
| 1.202954
|
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_FILE_INDEX_PARAM: self._start_file_index,
self.END_FILE_INDEX_PARAM: self._end_file_index,
self.OFFSET_PARAM: self._next_offset()}
|
def to_json(self)
|
Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
| 4.158895
| 3.66309
| 1.135352
|
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.START_FILE_INDEX_PARAM],
json[cls.END_FILE_INDEX_PARAM],
json[cls.OFFSET_PARAM],
_reader)
|
def from_json(cls, json, _reader=blobstore.BlobReader)
|
Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
_reader: For dependency injection.
Returns:
An instance of the InputReader configured using the values of json.
| 3.717105
| 4.163477
| 0.892789
|
return {self.NAMESPACE_RANGE_PARAM: self.ns_range.to_json_object(),
self.BATCH_SIZE_PARAM: self._batch_size}
|
def to_json(self)
|
Serializes all the data in this query range into json form.
Returns:
all the data in json-compatible map.
| 11.883227
| 11.536929
| 1.030016
|
return cls(
namespace_range.NamespaceRange.from_json_object(
json[cls.NAMESPACE_RANGE_PARAM]),
json[cls.BATCH_SIZE_PARAM])
|
def from_json(cls, json)
|
Create new DatastoreInputReader from the json, encoded by to_json.
Args:
json: json map representation of DatastoreInputReader.
Returns:
an instance of DatastoreInputReader with all data deserialized from json.
| 8.494304
| 8.651111
| 0.981874
|
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BATCH_SIZE_PARAM in params:
try:
batch_size = int(params[cls.BATCH_SIZE_PARAM])
if batch_size < 1:
raise BadReaderParamsError("Bad batch size: %s" % batch_size)
except ValueError, e:
raise BadReaderParamsError("Bad batch size: %s" % e)
|
def validate(cls, mapper_spec)
|
Validates mapper spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
| 2.57251
| 2.371127
| 1.084931
|
batch_size = int(_get_params(mapper_spec).get(
cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
shard_count = mapper_spec.shard_count
namespace_ranges = namespace_range.NamespaceRange.split(shard_count,
contiguous=True)
return [NamespaceInputReader(ns_range, batch_size)
for ns_range in namespace_ranges]
|
def split_input(cls, mapper_spec)
|
Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
| 4.957789
| 5.8197
| 0.851898
|
# Strip out unrecognized parameters, as introduced by b/5960884.
params = dict((str(k), v) for k, v in json.iteritems()
if k in cls._PARAMS)
# This is not symmetric with to_json() wrt. PROTOTYPE_REQUEST_PARAM because
# the constructor parameters need to be JSON-encodable, so the decoding
# needs to happen there anyways.
if cls._OFFSET_PARAM in params:
params[cls._OFFSET_PARAM] = base64.b64decode(params[cls._OFFSET_PARAM])
return cls(**params)
|
def from_json(cls, json)
|
Creates an instance of the InputReader for the given input shard's state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the given JSON parameters.
| 7.515567
| 7.713594
| 0.974327
|
params = dict(self.__params) # Shallow copy.
if self._PROTOTYPE_REQUEST_PARAM in params:
prototype_request = params[self._PROTOTYPE_REQUEST_PARAM]
params[self._PROTOTYPE_REQUEST_PARAM] = prototype_request.Encode()
if self._OFFSET_PARAM in params:
params[self._OFFSET_PARAM] = base64.b64encode(params[self._OFFSET_PARAM])
return params
|
def to_json(self)
|
Returns an input shard state for the remaining inputs.
Returns:
A JSON serializable version of the remaining input to read.
| 3.857345
| 3.654194
| 1.055594
|
params = _get_params(mapper_spec)
shard_count = mapper_spec.shard_count
# Pick out the overall start and end times and time step per shard.
start_time = params[cls.START_TIME_PARAM]
end_time = params[cls.END_TIME_PARAM]
seconds_per_shard = (end_time - start_time) / shard_count
# Create a LogInputReader for each shard, modulating the params as we go.
shards = []
for _ in xrange(shard_count - 1):
params[cls.END_TIME_PARAM] = (params[cls.START_TIME_PARAM] +
seconds_per_shard)
shards.append(LogInputReader(**params))
params[cls.START_TIME_PARAM] = params[cls.END_TIME_PARAM]
# Create a final shard to complete the time range.
params[cls.END_TIME_PARAM] = end_time
return shards + [LogInputReader(**params)]
|
def split_input(cls, mapper_spec)
|
Returns a list of input readers for the given input specification.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
| 2.797067
| 2.934412
| 0.953195
|
if mapper_spec.input_reader_class() != cls:
raise errors.BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec, allowed_keys=cls._PARAMS)
if (cls.VERSION_IDS_PARAM not in params and
cls.MODULE_VERSIONS_PARAM not in params):
raise errors.BadReaderParamsError("Must specify a list of version ids or "
"module/version ids for mapper input")
if (cls.VERSION_IDS_PARAM in params and
cls.MODULE_VERSIONS_PARAM in params):
raise errors.BadReaderParamsError("Can not supply both version ids or "
"module/version ids. Use only one.")
if (cls.START_TIME_PARAM not in params or
params[cls.START_TIME_PARAM] is None):
raise errors.BadReaderParamsError("Must specify a starting time for "
"mapper input")
if cls.END_TIME_PARAM not in params or params[cls.END_TIME_PARAM] is None:
params[cls.END_TIME_PARAM] = time.time()
if params[cls.START_TIME_PARAM] >= params[cls.END_TIME_PARAM]:
raise errors.BadReaderParamsError("The starting time cannot be later "
"than or the same as the ending time.")
if cls._PROTOTYPE_REQUEST_PARAM in params:
try:
params[cls._PROTOTYPE_REQUEST_PARAM] = log_service_pb.LogReadRequest(
params[cls._PROTOTYPE_REQUEST_PARAM])
except (TypeError, ProtocolBuffer.ProtocolBufferDecodeError):
raise errors.BadReaderParamsError("The prototype request must be "
"parseable as a LogReadRequest.")
# Pass the parameters to logservice.fetch() to verify any underlying
# constraints on types or values. This only constructs an iterator, it
# doesn't trigger any requests for actual log records.
try:
logservice.fetch(**params)
except logservice.InvalidArgumentError, e:
raise errors.BadReaderParamsError("One or more parameters are not valid "
"inputs to logservice.fetch(): %s" % e)
|
def validate(cls, mapper_spec)
|
Validates the mapper's specification and all necessary parameters.
Args:
mapper_spec: The MapperSpec to be used with this InputReader.
Raises:
BadReaderParamsError: If the user fails to specify both a starting time
and an ending time, or if the starting time is later than the ending
time.
| 3.142161
| 3.080069
| 1.020159
|
while True:
if self._bucket_iter:
try:
return self._bucket_iter.next().filename
except StopIteration:
self._bucket_iter = None
self._bucket = None
if self._index >= len(self._filenames):
return
filename = self._filenames[self._index]
self._index += 1
if self._delimiter is None or not filename.endswith(self._delimiter):
return filename
self._bucket = cloudstorage.listbucket(filename,
delimiter=self._delimiter)
self._bucket_iter = iter(self._bucket)
|
def _next_file(self)
|
Find next filename.
self._filenames may need to be expanded via listbucket.
Returns:
None if no more file is left. Filename otherwise.
| 2.63775
| 2.492519
| 1.058267
|
reader_spec = cls.get_params(mapper_spec, allow_old=False)
# Bucket Name is required
if cls.BUCKET_NAME_PARAM not in reader_spec:
raise errors.BadReaderParamsError(
"%s is required for Google Cloud Storage" %
cls.BUCKET_NAME_PARAM)
try:
cloudstorage.validate_bucket_name(
reader_spec[cls.BUCKET_NAME_PARAM])
except ValueError, error:
raise errors.BadReaderParamsError("Bad bucket name, %s" % (error))
# Object Name(s) are required
if cls.OBJECT_NAMES_PARAM not in reader_spec:
raise errors.BadReaderParamsError(
"%s is required for Google Cloud Storage" %
cls.OBJECT_NAMES_PARAM)
filenames = reader_spec[cls.OBJECT_NAMES_PARAM]
if not isinstance(filenames, list):
raise errors.BadReaderParamsError(
"Object name list is not a list but a %s" %
filenames.__class__.__name__)
for filename in filenames:
if not isinstance(filename, basestring):
raise errors.BadReaderParamsError(
"Object name is not a string but a %s" %
filename.__class__.__name__)
if cls.DELIMITER_PARAM in reader_spec:
delimiter = reader_spec[cls.DELIMITER_PARAM]
if not isinstance(delimiter, basestring):
raise errors.BadReaderParamsError(
"%s is not a string but a %s" %
(cls.DELIMITER_PARAM, type(delimiter)))
|
def validate(cls, mapper_spec)
|
Validate mapper specification.
Args:
mapper_spec: an instance of model.MapperSpec
Raises:
BadReaderParamsError: if the specification is invalid for any reason such
as missing the bucket name or providing an invalid bucket name.
| 1.983232
| 1.940754
| 1.021887
|
reader_spec = cls.get_params(mapper_spec, allow_old=False)
bucket = reader_spec[cls.BUCKET_NAME_PARAM]
filenames = reader_spec[cls.OBJECT_NAMES_PARAM]
delimiter = reader_spec.get(cls.DELIMITER_PARAM)
account_id = reader_spec.get(cls._ACCOUNT_ID_PARAM)
buffer_size = reader_spec.get(cls.BUFFER_SIZE_PARAM)
fail_on_missing_input = reader_spec.get(cls.FAIL_ON_MISSING_INPUT)
# Gather the complete list of files (expanding wildcards)
all_filenames = []
for filename in filenames:
if filename.endswith("*"):
all_filenames.extend(
[file_stat.filename for file_stat in cloudstorage.listbucket(
"/" + bucket + "/" + filename[:-1], delimiter=delimiter,
_account_id=account_id)])
else:
all_filenames.append("/%s/%s" % (bucket, filename))
# Split into shards
readers = []
for shard in range(0, mapper_spec.shard_count):
shard_filenames = all_filenames[shard::mapper_spec.shard_count]
if shard_filenames:
reader = cls(
shard_filenames, buffer_size=buffer_size, _account_id=account_id,
delimiter=delimiter)
reader._fail_on_missing_input = fail_on_missing_input
readers.append(reader)
return readers
|
def split_input(cls, mapper_spec)
|
Returns a list of input readers.
An equal number of input files are assigned to each shard (+/- 1). If there
are fewer files than shards, fewer than the requested number of shards will
be used. Input files are currently never split (although for some formats
could be and may be split in a future implementation).
Args:
mapper_spec: an instance of model.MapperSpec.
Returns:
A list of InputReaders. None when no input data can be found.
| 2.684224
| 2.650529
| 1.012713
|
options = {}
if self._buffer_size:
options["read_buffer_size"] = self._buffer_size
if self._account_id:
options["_account_id"] = self._account_id
while True:
filename = self._next_file()
if filename is None:
raise StopIteration()
try:
start_time = time.time()
handle = cloudstorage.open(filename, **options)
ctx = context.get()
if ctx:
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return handle
except cloudstorage.NotFoundError:
# Fail the job if we're strict on missing input.
if getattr(self, "_fail_on_missing_input", False):
raise errors.FailJobError(
"File missing in GCS, aborting: %s" % filename)
# Move on otherwise.
logging.warning("File %s may have been removed. Skipping file.",
filename)
|
def next(self)
|
Returns the next input from this input reader, a block of bytes.
Non existent files will be logged and skipped. The file might have been
removed after input splitting.
Returns:
The next input from this input reader in the form of a cloudstorage
ReadBuffer that supports a File-like interface (read, readline, seek,
tell, and close). An error may be raised if the file can not be opened.
Raises:
StopIteration: The list of files has been exhausted.
| 4.854288
| 4.268618
| 1.137204
|
while True:
if not hasattr(self, "_cur_handle") or self._cur_handle is None:
# If there are no more files, StopIteration is raised here
self._cur_handle = super(_GoogleCloudStorageRecordInputReader,
self).next()
if not hasattr(self, "_record_reader") or self._record_reader is None:
self._record_reader = records.RecordsReader(self._cur_handle)
try:
start_time = time.time()
content = self._record_reader.read()
ctx = context.get()
if ctx:
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return content
except EOFError:
self._cur_handle = None
self._record_reader = None
|
def next(self)
|
Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted.
| 3.614406
| 3.441854
| 1.050133
|
result = super(_ReducerReader, self).to_json()
result["current_key"] = self.encode_data(self.current_key)
result["current_values"] = self.encode_data(self.current_values)
return result
|
def to_json(self)
|
Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
| 5.095889
| 4.732881
| 1.076699
|
result = super(_ReducerReader, cls).from_json(json)
result.current_key = _ReducerReader.decode_data(json["current_key"])
result.current_values = _ReducerReader.decode_data(json["current_values"])
return result
|
def from_json(cls, json)
|
Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
| 4.685957
| 4.8184
| 0.972513
|
self._state.counters_map.increment(counter_name, delta)
|
def incr(self, counter_name, delta=1)
|
Changes counter by delta.
Args:
counter_name: the name of the counter to change. str.
delta: int.
| 15.465787
| 15.195013
| 1.01782
|
return self._state.counters_map.get(counter_name, default)
|
def counter(self, counter_name, default=0)
|
Get the current counter value.
Args:
counter_name: name of the counter in string.
default: default value in int if one doesn't exist.
Returns:
Current value of the counter.
| 9.667696
| 14.283736
| 0.676832
|
if not self._tstate.output_writer:
logging.error("emit is called, but no output writer is set.")
return
self._tstate.output_writer.write(value)
|
def emit(self, value)
|
Emits a value to output writer.
Args:
value: a value of type expected by the output writer.
| 6.086761
| 5.308145
| 1.146683
|
reader_params = job_config.input_reader_params
# Bucket Name is required
if cls.BUCKET_NAME_PARAM not in reader_params:
raise errors.BadReaderParamsError(
"%s is required for Google Cloud Storage" %
cls.BUCKET_NAME_PARAM)
try:
cloudstorage.validate_bucket_name(
reader_params[cls.BUCKET_NAME_PARAM])
except ValueError, error:
raise errors.BadReaderParamsError("Bad bucket name, %s" % (error))
# Object Name(s) are required
if cls.OBJECT_NAMES_PARAM not in reader_params:
raise errors.BadReaderParamsError(
"%s is required for Google Cloud Storage" %
cls.OBJECT_NAMES_PARAM)
filenames = reader_params[cls.OBJECT_NAMES_PARAM]
if not isinstance(filenames, list):
raise errors.BadReaderParamsError(
"Object name list is not a list but a %s" %
filenames.__class__.__name__)
for filename in filenames:
if not isinstance(filename, basestring):
raise errors.BadReaderParamsError(
"Object name is not a string but a %s" %
filename.__class__.__name__)
# Delimiter.
if cls.DELIMITER_PARAM in reader_params:
delimiter = reader_params[cls.DELIMITER_PARAM]
if not isinstance(delimiter, basestring):
raise errors.BadReaderParamsError(
"%s is not a string but a %s" %
(cls.DELIMITER_PARAM, type(delimiter)))
# Buffer size.
if cls.BUFFER_SIZE_PARAM in reader_params:
buffer_size = reader_params[cls.BUFFER_SIZE_PARAM]
if not isinstance(buffer_size, int):
raise errors.BadReaderParamsError(
"%s is not an int but a %s" %
(cls.BUFFER_SIZE_PARAM, type(buffer_size)))
# Path filter.
if cls.PATH_FILTER_PARAM in reader_params:
path_filter = reader_params[cls.PATH_FILTER_PARAM]
if not isinstance(path_filter, PathFilter):
raise errors.BadReaderParamsError(
"%s is not an instance of PathFilter but %s." %
(cls.PATH_FILTER_PARAM, type(path_filter)))
|
def validate(cls, job_config)
|
Validate mapper specification.
Args:
job_config: map_job.JobConfig.
Raises:
BadReaderParamsError: if the specification is invalid for any reason such
as missing the bucket name or providing an invalid bucket name.
| 1.696219
| 1.674413
| 1.013023
|
reader_params = job_config.input_reader_params
bucket = reader_params[cls.BUCKET_NAME_PARAM]
filenames = reader_params[cls.OBJECT_NAMES_PARAM]
delimiter = reader_params.get(cls.DELIMITER_PARAM)
account_id = reader_params.get(cls._ACCOUNT_ID_PARAM)
buffer_size = reader_params.get(cls.BUFFER_SIZE_PARAM)
path_filter = reader_params.get(cls.PATH_FILTER_PARAM)
# Gather the complete list of files (expanding wildcards)
all_filenames = []
for filename in filenames:
if filename.endswith("*"):
all_filenames.extend(
[file_stat.filename for file_stat in cloudstorage.listbucket(
"/" + bucket + "/" + filename[:-1], delimiter=delimiter,
_account_id=account_id)])
else:
all_filenames.append("/%s/%s" % (bucket, filename))
# Split into shards
readers = []
for shard in range(0, job_config.shard_count):
shard_filenames = all_filenames[shard::job_config.shard_count]
if shard_filenames:
readers.append(cls(
shard_filenames, buffer_size=buffer_size, _account_id=account_id,
delimiter=delimiter, path_filter=path_filter))
return readers
|
def split_input(cls, job_config)
|
Returns a list of input readers.
An equal number of input files are assigned to each shard (+/- 1). If there
are fewer files than shards, fewer than the requested number of shards will
be used. Input files are currently never split (although for some formats
could be and may be split in a future implementation).
Args:
job_config: map_job.JobConfig
Returns:
A list of InputReaders. None when no input data can be found.
| 2.599777
| 2.621098
| 0.991866
|
options = {}
if self._buffer_size:
options["read_buffer_size"] = self._buffer_size
if self._account_id:
options["_account_id"] = self._account_id
while True:
filename = self._next_file()
if filename is None:
raise StopIteration()
if (self._path_filter and
not self._path_filter.accept(self._slice_ctx, filename)):
continue
try:
start_time = time.time()
handle = cloudstorage.open(filename, **options)
self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC,
int(time.time() - start_time) * 1000)
self._slice_ctx.incr(self.COUNTER_FILE_READ)
return handle
except cloudstorage.NotFoundError:
logging.warning("File %s may have been removed. Skipping file.",
filename)
self._slice_ctx.incr(self.COUNTER_FILE_MISSING)
|
def next(self)
|
Returns a handler to the next file.
Non existent files will be logged and skipped. The file might have been
removed after input splitting.
Returns:
The next input from this input reader in the form of a cloudstorage
ReadBuffer that supports a File-like interface (read, readline, seek,
tell, and close). An error may be raised if the file can not be opened.
Raises:
StopIteration: The list of files has been exhausted.
| 3.294084
| 2.916275
| 1.129552
|
params_cp = dict(params)
if cls.PATH_FILTER_PARAM in params_cp:
path_filter = params_cp[cls.PATH_FILTER_PARAM]
params_cp[cls.PATH_FILTER_PARAM] = pickle.dumps(path_filter)
return params_cp
|
def params_to_json(cls, params)
|
Inherit docs.
| 3.203947
| 3.089776
| 1.036951
|
while True:
if not hasattr(self, "_cur_handle") or self._cur_handle is None:
# If there are no more files, StopIteration is raised here
self._cur_handle = super(GCSRecordInputReader, self).next()
if not hasattr(self, "_record_reader") or self._record_reader is None:
self._record_reader = records.RecordsReader(self._cur_handle)
try:
start_time = time.time()
content = self._record_reader.read()
self._slice_ctx.incr(self.COUNTER_IO_READ_BYTE, len(content))
self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC,
int(time.time() - start_time) * 1000)
return content
except EOFError:
self._cur_handle = None
self._record_reader = None
|
def next(self)
|
Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted.
| 3.355613
| 3.212868
| 1.044429
|
entity_kind = params[cls.ENTITY_KIND_PARAM]
filters = params.get(cls.FILTERS_PARAM)
app = params.get(cls._APP_PARAM)
ns = params.get(cls.NAMESPACE_PARAM)
return model.QuerySpec(
entity_kind=cls._get_raw_entity_kind(entity_kind),
keys_only=bool(params.get(cls.KEYS_ONLY_PARAM, False)),
filters=filters,
batch_size=int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE)),
model_class_path=entity_kind,
app=app,
ns=ns)
|
def _get_query_spec(cls, params)
|
Construct a model.QuerySpec from model.MapperSpec.
| 2.814272
| 2.58446
| 1.088921
|
shard_count = job_config.shard_count
params = job_config.input_reader_params
query_spec = cls._get_query_spec(params)
namespaces = None
if query_spec.ns is not None:
k_ranges = cls._to_key_ranges_by_shard(
query_spec.app, [query_spec.ns], shard_count, query_spec)
else:
ns_keys = namespace_range.get_namespace_keys(
query_spec.app, cls.MAX_NAMESPACES_FOR_KEY_SHARD+1)
# No namespace means the app may have some data but those data are not
# visible yet. Just return.
if not ns_keys:
return
# If the number of ns is small, we shard each ns by key and assign each
# shard a piece of a ns.
elif len(ns_keys) <= cls.MAX_NAMESPACES_FOR_KEY_SHARD:
namespaces = [ns_key.name() or "" for ns_key in ns_keys]
k_ranges = cls._to_key_ranges_by_shard(
query_spec.app, namespaces, shard_count, query_spec)
# When number of ns is large, we can only split lexicographically by ns.
else:
ns_ranges = namespace_range.NamespaceRange.split(n=shard_count,
contiguous=False,
can_query=lambda: True,
_app=query_spec.app)
k_ranges = [key_ranges.KeyRangesFactory.create_from_ns_range(ns_range)
for ns_range in ns_ranges]
iters = [db_iters.RangeIteratorFactory.create_key_ranges_iterator(
r, query_spec, cls._KEY_RANGE_ITER_CLS) for r in k_ranges]
return [cls(i) for i in iters]
|
def split_input(cls, job_config)
|
Inherit doc.
| 4.379879
| 4.356047
| 1.005471
|
key_ranges_by_ns = []
# Split each ns into n splits. If a ns doesn't have enough scatter to
# split into n, the last few splits are None.
for namespace in namespaces:
ranges = cls._split_ns_by_scatter(
shard_count,
namespace,
query_spec.entity_kind,
app)
# The nth split of each ns will be assigned to the nth shard.
# Shuffle so that None are not all by the end.
random.shuffle(ranges)
key_ranges_by_ns.append(ranges)
# KeyRanges from different namespaces might be very different in size.
# Use round robin to make sure each shard can have at most one split
# or a None from a ns.
ranges_by_shard = [[] for _ in range(shard_count)]
for ranges in key_ranges_by_ns:
for i, k_range in enumerate(ranges):
if k_range:
ranges_by_shard[i].append(k_range)
key_ranges_by_shard = []
for ranges in ranges_by_shard:
if ranges:
key_ranges_by_shard.append(key_ranges.KeyRangesFactory.create_from_list(
ranges))
return key_ranges_by_shard
|
def _to_key_ranges_by_shard(cls, app, namespaces, shard_count, query_spec)
|
Get a list of key_ranges.KeyRanges objects, one for each shard.
This method uses scatter index to split each namespace into pieces
and assign those pieces to shards.
Args:
app: app_id in str.
namespaces: a list of namespaces in str.
shard_count: number of shards to split.
query_spec: model.QuerySpec.
Returns:
a list of key_ranges.KeyRanges objects.
| 4.356113
| 4.262007
| 1.02208
|
if shard_count == 1:
# With one shard we don't need to calculate any split points at all.
return [key_range.KeyRange(namespace=namespace, _app=app)]
ds_query = datastore.Query(kind=raw_entity_kind,
namespace=namespace,
_app=app,
keys_only=True)
ds_query.Order("__scatter__")
oversampling_factor = 32
random_keys = ds_query.Get(shard_count * oversampling_factor)
if not random_keys:
# There are no entities with scatter property. We have no idea
# how to split.
return ([key_range.KeyRange(namespace=namespace, _app=app)] +
[None] * (shard_count - 1))
random_keys.sort()
if len(random_keys) >= shard_count:
# We've got a lot of scatter values. Sample them down.
random_keys = cls._choose_split_points(random_keys, shard_count)
k_ranges = []
k_ranges.append(key_range.KeyRange(
key_start=None,
key_end=random_keys[0],
direction=key_range.KeyRange.ASC,
include_start=False,
include_end=False,
namespace=namespace,
_app=app))
for i in range(0, len(random_keys) - 1):
k_ranges.append(key_range.KeyRange(
key_start=random_keys[i],
key_end=random_keys[i+1],
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
k_ranges.append(key_range.KeyRange(
key_start=random_keys[-1],
key_end=None,
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
if len(k_ranges) < shard_count:
# We need to have as many shards as it was requested. Add some Nones.
k_ranges += [None] * (shard_count - len(k_ranges))
return k_ranges
|
def _split_ns_by_scatter(cls,
shard_count,
namespace,
raw_entity_kind,
app)
|
Split a namespace by scatter index into key_range.KeyRange.
TODO(user): Power this with key_range.KeyRange.compute_split_points.
Args:
shard_count: number of shards.
namespace: namespace name to split. str.
raw_entity_kind: low level datastore API entity kind.
app: app id in str.
Returns:
A list of key_range.KeyRange objects. If there are not enough entities to
splits into requested shards, the returned list will contain KeyRanges
ordered lexicographically with any Nones appearing at the end.
| 2.342886
| 2.233337
| 1.049052
|
super(AbstractDatastoreInputReader, cls).validate(job_config)
params = job_config.input_reader_params
# Check for the required entity kind parameter.
if cls.ENTITY_KIND_PARAM not in params:
raise errors.BadReaderParamsError("Missing input reader parameter "
"'entity_kind'")
# Validate the batch size parameter.
if cls.BATCH_SIZE_PARAM in params:
try:
batch_size = int(params[cls.BATCH_SIZE_PARAM])
if batch_size < 1:
raise errors.BadReaderParamsError("Bad batch size: %s" % batch_size)
except ValueError, e:
raise errors.BadReaderParamsError("Bad batch size: %s" % e)
# Validate the keys only parameter.
try:
bool(params.get(cls.KEYS_ONLY_PARAM, False))
except:
raise errors.BadReaderParamsError("keys_only expects a boolean value but "
"got %s",
params[cls.KEYS_ONLY_PARAM])
# Validate the namespace parameter.
if cls.NAMESPACE_PARAM in params:
if not isinstance(params[cls.NAMESPACE_PARAM],
(str, unicode, type(None))):
raise errors.BadReaderParamsError("Expected a single namespace string")
# Validate the filters parameter.
if cls.FILTERS_PARAM in params:
filters = params[cls.FILTERS_PARAM]
if not isinstance(filters, list):
raise errors.BadReaderParamsError("Expected list for filters parameter")
for f in filters:
if not isinstance(f, (tuple, list)):
raise errors.BadReaderParamsError("Filter should be a tuple or list: "
"%s", f)
if len(f) != 3:
raise errors.BadReaderParamsError("Filter should be a 3-tuple: %s", f)
prop, op, _ = f
if not isinstance(prop, basestring):
raise errors.BadReaderParamsError("Property should be string: %s",
prop)
if not isinstance(op, basestring):
raise errors.BadReaderParamsError("Operator should be string: %s", op)
|
def validate(cls, job_config)
|
Inherit docs.
| 2.081345
| 2.045616
| 1.017466
|
global NAMESPACE_CHARACTERS
global MAX_NAMESPACE_LENGTH
# pylint: disable=global-variable-undefined
global MAX_NAMESPACE
global _LEX_DISTANCE
global NAMESPACE_BATCH_SIZE
NAMESPACE_CHARACTERS = alphabet
MAX_NAMESPACE_LENGTH = max_length
MAX_NAMESPACE = NAMESPACE_CHARACTERS[-1] * MAX_NAMESPACE_LENGTH
NAMESPACE_BATCH_SIZE = batch_size
# _LEX_DISTANCE will contain the lexical distance between two adjacent
# characters in NAMESPACE_CHARACTERS at each character index. This is used
# to calculate the ordinal for each string. Example:
# NAMESPACE_CHARACTERS = 'ab'
# MAX_NAMESPACE_LENGTH = 3
# _LEX_DISTANCE = [1, 3, 7]
# '' => 0
# 'a' => 1
# 'aa' => 2
# 'aaa' => 3
# 'aab' => 4 - Distance between 'aaa' and 'aab' is 1.
# 'ab' => 5 - Distance between 'aa' and 'ab' is 3.
# 'aba' => 6
# 'abb' => 7
# 'b' => 8 - Distance between 'a' and 'b' is 7.
# 'ba' => 9
# 'baa' => 10
# 'bab' => 11
# ...
# _namespace_to_ord('bab') = (1 * 7 + 1) + (0 * 3 + 1) + (1 * 1 + 1) = 11
_LEX_DISTANCE = [1]
for i in range(1, MAX_NAMESPACE_LENGTH):
_LEX_DISTANCE.append(
_LEX_DISTANCE[i-1] * len(NAMESPACE_CHARACTERS) + 1)
# pylint: disable=undefined-loop-variable
del i
|
def _setup_constants(alphabet=NAMESPACE_CHARACTERS,
max_length=MAX_NAMESPACE_LENGTH,
batch_size=NAMESPACE_BATCH_SIZE)
|
Calculate derived constant values. Only useful for testing.
| 2.505719
| 2.487437
| 1.00735
|
if _max_length is None:
_max_length = MAX_NAMESPACE_LENGTH
length = _LEX_DISTANCE[_max_length - 1]
if n == 0:
return ''
n -= 1
return (NAMESPACE_CHARACTERS[n / length] +
_ord_to_namespace(n % length, _max_length - 1))
|
def _ord_to_namespace(n, _max_length=None)
|
Convert a namespace ordinal to a namespace string.
Converts an int, representing the sequence number of a namespace ordered
lexographically, into a namespace string.
>>> _ord_to_namespace(0)
''
>>> _ord_to_namespace(1)
'-'
>>> _ord_to_namespace(2)
'--'
>>> _ord_to_namespace(3)
'---'
Args:
n: A number representing the lexographical ordering of a namespace.
_max_length: The maximum namespace length.
Returns:
A string representing the nth namespace in lexographical order.
| 4.20196
| 4.483612
| 0.937182
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.