_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q13200
|
MTurkService.get_qualification_type_by_name
|
train
|
def get_qualification_type_by_name(self, name):
"""Return a Qualification Type by name. If the provided name matches
more than one Qualification, check to see if any of the results
match the provided name exactly. If there's an exact match, return
that Qualification. Otherwise, raise an exception.
"""
max_fuzzy_matches_to_check = 100
query = name.upper()
start = time.time()
args = {
"Query": query,
"MustBeRequestable": False,
"MustBeOwnedByCaller": True,
"MaxResults": max_fuzzy_matches_to_check,
}
results = self.mturk.list_qualification_types(**args)["QualificationTypes"]
# This loop is largely for tests, because there's some indexing that
# needs to happen on MTurk for search to work:
while not results and time.time() - start < self.max_wait_secs:
time.sleep(1)
results = self.mturk.list_qualification_types(**args)["QualificationTypes"]
if not results:
return None
qualifications = [self._translate_qtype(r) for r in results]
if len(qualifications) > 1:
for qualification in qualifications:
if qualification["name"].upper() == query:
return qualification
raise MTurkServiceException("{} was not a unique name".format(query))
return qualifications[0]
|
python
|
{
"resource": ""
}
|
q13201
|
MTurkService.assign_qualification
|
train
|
def assign_qualification(self, qualification_id, worker_id, score, notify=False):
"""Score a worker for a specific qualification"""
return self._is_ok(
self.mturk.associate_qualification_with_worker(
QualificationTypeId=qualification_id,
WorkerId=worker_id,
IntegerValue=score,
SendNotification=notify,
)
)
|
python
|
{
"resource": ""
}
|
q13202
|
MTurkService.increment_qualification_score
|
train
|
def increment_qualification_score(self, name, worker_id, notify=False):
"""Increment the current qualification score for a worker, on a
qualification with the provided name.
"""
result = self.get_current_qualification_score(name, worker_id)
current_score = result["score"] or 0
new_score = current_score + 1
qtype_id = result["qtype"]["id"]
self.assign_qualification(qtype_id, worker_id, new_score, notify)
return {"qtype": result["qtype"], "score": new_score}
|
python
|
{
"resource": ""
}
|
q13203
|
MTurkService.get_qualification_score
|
train
|
def get_qualification_score(self, qualification_id, worker_id):
"""Return a worker's qualification score as an iteger.
"""
try:
response = self.mturk.get_qualification_score(
QualificationTypeId=qualification_id, WorkerId=worker_id
)
except ClientError as ex:
error = str(ex)
if "does not exist" in error:
raise WorkerLacksQualification(
"Worker {} does not have qualification {}.".format(
worker_id, qualification_id
)
)
if "operation can be called with a status of: Granted" in error:
raise RevokedQualification(
"Worker {} has had qualification {} revoked.".format(
worker_id, qualification_id
)
)
raise MTurkServiceException(error)
return response["Qualification"]["IntegerValue"]
|
python
|
{
"resource": ""
}
|
q13204
|
MTurkService.get_current_qualification_score
|
train
|
def get_current_qualification_score(self, name, worker_id):
"""Return the current score for a worker, on a qualification with the
provided name.
"""
qtype = self.get_qualification_type_by_name(name)
if qtype is None:
raise QualificationNotFoundException(
'No Qualification exists with name "{}"'.format(name)
)
try:
score = self.get_qualification_score(qtype["id"], worker_id)
except (WorkerLacksQualification, RevokedQualification):
score = None
return {"qtype": qtype, "score": score}
|
python
|
{
"resource": ""
}
|
q13205
|
MTurkService.dispose_qualification_type
|
train
|
def dispose_qualification_type(self, qualification_id):
"""Remove a qualification type we created"""
return self._is_ok(
self.mturk.delete_qualification_type(QualificationTypeId=qualification_id)
)
|
python
|
{
"resource": ""
}
|
q13206
|
MTurkService.get_workers_with_qualification
|
train
|
def get_workers_with_qualification(self, qualification_id):
"""Get workers with the given qualification."""
done = False
next_token = None
while not done:
if next_token is not None:
response = self.mturk.list_workers_with_qualification_type(
QualificationTypeId=qualification_id,
MaxResults=MAX_SUPPORTED_BATCH_SIZE,
Status="Granted",
NextToken=next_token,
)
else:
response = self.mturk.list_workers_with_qualification_type(
QualificationTypeId=qualification_id,
MaxResults=MAX_SUPPORTED_BATCH_SIZE,
Status="Granted",
)
if response:
for r in response["Qualifications"]:
yield {"id": r["WorkerId"], "score": r["IntegerValue"]}
if "NextToken" in response:
next_token = response["NextToken"]
else:
done = True
|
python
|
{
"resource": ""
}
|
q13207
|
MTurkService.create_hit
|
train
|
def create_hit(
self,
title,
description,
keywords,
reward,
duration_hours,
lifetime_days,
ad_url,
notification_url,
approve_requirement,
max_assignments,
us_only,
blacklist=None,
annotation=None,
):
"""Create the actual HIT and return a dict with its useful properties."""
frame_height = 600
mturk_question = self._external_question(ad_url, frame_height)
qualifications = self.build_hit_qualifications(
approve_requirement, us_only, blacklist
)
# We need a HIT_Type in order to register for REST notifications
hit_type_id = self.register_hit_type(
title, description, reward, duration_hours, keywords, qualifications
)
self.set_rest_notification(notification_url, hit_type_id)
params = {
"HITTypeId": hit_type_id,
"Question": mturk_question,
"LifetimeInSeconds": int(
datetime.timedelta(days=lifetime_days).total_seconds()
),
"MaxAssignments": max_assignments,
"UniqueRequestToken": self._request_token(),
}
if annotation:
params["RequesterAnnotation"] = annotation
response = self.mturk.create_hit_with_hit_type(**params)
if "HIT" not in response:
raise MTurkServiceException("HIT request was invalid for unknown reason.")
return self._translate_hit(response["HIT"])
|
python
|
{
"resource": ""
}
|
q13208
|
MTurkService.extend_hit
|
train
|
def extend_hit(self, hit_id, number, duration_hours=None):
"""Extend an existing HIT and return an updated description"""
self.create_additional_assignments_for_hit(hit_id, number)
if duration_hours is not None:
self.update_expiration_for_hit(hit_id, duration_hours)
return self.get_hit(hit_id)
|
python
|
{
"resource": ""
}
|
q13209
|
MTurkService.expire_hit
|
train
|
def expire_hit(self, hit_id):
"""Expire a HIT, which will change its status to "Reviewable",
allowing it to be deleted.
"""
try:
self.mturk.update_expiration_for_hit(HITId=hit_id, ExpireAt=0)
except Exception as ex:
raise MTurkServiceException(
"Failed to expire HIT {}: {}".format(hit_id, str(ex))
)
return True
|
python
|
{
"resource": ""
}
|
q13210
|
MTurkService.grant_bonus
|
train
|
def grant_bonus(self, assignment_id, amount, reason):
"""Grant a bonus to the MTurk Worker.
Issues a payment of money from your account to a Worker. To
be eligible for a bonus, the Worker must have submitted
results for one of your HITs, and have had those results
approved or rejected. This payment happens separately from the
reward you pay to the Worker when you approve the Worker's
assignment.
"""
assignment = self.get_assignment(assignment_id)
worker_id = assignment["worker_id"]
amount_str = "{:.2f}".format(amount)
try:
return self._is_ok(
self.mturk.send_bonus(
WorkerId=worker_id,
BonusAmount=amount_str,
AssignmentId=assignment_id,
Reason=reason,
UniqueRequestToken=self._request_token(),
)
)
except ClientError as ex:
error = "Failed to pay assignment {} bonus of {}: {}".format(
assignment_id, amount_str, str(ex)
)
raise MTurkServiceException(error)
|
python
|
{
"resource": ""
}
|
q13211
|
MTurkService.get_assignment
|
train
|
def get_assignment(self, assignment_id):
"""Get an assignment by ID and reformat the response.
"""
try:
response = self.mturk.get_assignment(AssignmentId=assignment_id)
except ClientError as ex:
if "does not exist" in str(ex):
return None
raise
return self._translate_assignment(response["Assignment"])
|
python
|
{
"resource": ""
}
|
q13212
|
initialize_experiment_package
|
train
|
def initialize_experiment_package(path):
"""Make the specified directory importable as the `dallinger_experiment` package."""
# Create __init__.py if it doesn't exist (needed for Python 2)
init_py = os.path.join(path, "__init__.py")
if not os.path.exists(init_py):
open(init_py, "a").close()
# Retain already set experiment module
if sys.modules.get("dallinger_experiment") is not None:
return
dirname = os.path.dirname(path)
basename = os.path.basename(path)
sys.path.insert(0, dirname)
package = __import__(basename)
if path not in package.__path__:
raise Exception("Package was not imported from the requested path!")
sys.modules["dallinger_experiment"] = package
package.__package__ = "dallinger_experiment"
sys.path.pop(0)
|
python
|
{
"resource": ""
}
|
q13213
|
Participant.questions
|
train
|
def questions(self, type=None):
"""Get questions associated with this participant.
Return a list of questions associated with the participant. If
specified, ``type`` filters by class.
"""
if type is None:
type = Question
if not issubclass(type, Question):
raise TypeError("{} is not a valid question type.".format(type))
return type.query.filter_by(participant_id=self.id).all()
|
python
|
{
"resource": ""
}
|
q13214
|
Participant.infos
|
train
|
def infos(self, type=None, failed=False):
"""Get all infos created by the participants nodes.
Return a list of infos produced by nodes associated with the
participant. If specified, ``type`` filters by class. By default, failed
infos are excluded, to include only failed nodes use ``failed=True``,
for all nodes use ``failed=all``. Note that failed filters the infos,
not the nodes - infos from all nodes (whether failed or not) can be
returned.
"""
nodes = self.nodes(failed="all")
infos = []
for n in nodes:
infos.extend(n.infos(type=type, failed=failed))
return infos
|
python
|
{
"resource": ""
}
|
q13215
|
Question.json_data
|
train
|
def json_data(self):
"""Return json description of a question."""
return {
"number": self.number,
"type": self.type,
"participant_id": self.participant_id,
"question": self.question,
"response": self.response,
}
|
python
|
{
"resource": ""
}
|
q13216
|
Network.nodes
|
train
|
def nodes(self, type=None, failed=False, participant_id=None):
"""Get nodes in the network.
type specifies the type of Node. Failed can be "all", False
(default) or True. If a participant_id is passed only
nodes with that participant_id will be returned.
"""
if type is None:
type = Node
if not issubclass(type, Node):
raise TypeError("{} is not a valid node type.".format(type))
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid node failed".format(failed))
if participant_id is not None:
if failed == "all":
return type.query.filter_by(
network_id=self.id, participant_id=participant_id
).all()
else:
return type.query.filter_by(
network_id=self.id, participant_id=participant_id, failed=failed
).all()
else:
if failed == "all":
return type.query.filter_by(network_id=self.id).all()
else:
return type.query.filter_by(failed=failed, network_id=self.id).all()
|
python
|
{
"resource": ""
}
|
q13217
|
Network.size
|
train
|
def size(self, type=None, failed=False):
"""How many nodes in a network.
type specifies the class of node, failed
can be True/False/all.
"""
return len(self.nodes(type=type, failed=failed))
|
python
|
{
"resource": ""
}
|
q13218
|
Network.infos
|
train
|
def infos(self, type=None, failed=False):
"""
Get infos in the network.
type specifies the type of info (defaults to Info). failed { False,
True, "all" } specifies the failed state of the infos. To get infos
from a specific node, see the infos() method in class
:class:`~dallinger.models.Node`.
"""
if type is None:
type = Info
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid failed".format(failed))
if failed == "all":
return type.query.filter_by(network_id=self.id).all()
else:
return type.query.filter_by(network_id=self.id, failed=failed).all()
|
python
|
{
"resource": ""
}
|
q13219
|
Network.transmissions
|
train
|
def transmissions(self, status="all", failed=False):
"""Get transmissions in the network.
status { "all", "received", "pending" }
failed { False, True, "all" }
To get transmissions from a specific vector, see the
transmissions() method in class Vector.
"""
if status not in ["all", "pending", "received"]:
raise ValueError(
"You cannot get transmission of status {}.".format(status)
+ "Status can only be pending, received or all"
)
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid failed".format(failed))
if status == "all":
if failed == "all":
return Transmission.query.filter_by(network_id=self.id).all()
else:
return Transmission.query.filter_by(
network_id=self.id, failed=failed
).all()
else:
if failed == "all":
return Transmission.query.filter_by(
network_id=self.id, status=status
).all()
else:
return Transmission.query.filter_by(
network_id=self.id, status=status, failed=failed
).all()
|
python
|
{
"resource": ""
}
|
q13220
|
Network.vectors
|
train
|
def vectors(self, failed=False):
"""
Get vectors in the network.
failed = { False, True, "all" }
To get the vectors to/from to a specific node, see Node.vectors().
"""
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid vector failed".format(failed))
if failed == "all":
return Vector.query.filter_by(network_id=self.id).all()
else:
return Vector.query.filter_by(network_id=self.id, failed=failed).all()
|
python
|
{
"resource": ""
}
|
q13221
|
Node.neighbors
|
train
|
def neighbors(self, type=None, direction="to", failed=None):
"""Get a node's neighbors - nodes that are directly connected to it.
Type specifies the class of neighbour and must be a subclass of
Node (default is Node).
Connection is the direction of the connections and can be "to"
(default), "from", "either", or "both".
"""
# get type
if type is None:
type = Node
if not issubclass(type, Node):
raise ValueError(
"{} is not a valid neighbor type,"
"needs to be a subclass of Node.".format(type)
)
# get direction
if direction not in ["both", "either", "from", "to"]:
raise ValueError(
"{} not a valid neighbor connection."
"Should be both, either, to or from.".format(direction)
)
if failed is not None:
raise ValueError(
"You should not pass a failed argument to neighbors(). "
"Neighbors is "
"unusual in that a failed argument cannot be passed. This is "
"because there is inherent uncertainty in what it means for a "
"neighbor to be failed. The neighbors function will only ever "
"return not-failed nodes connected to you via not-failed "
"vectors. If you want to do more elaborate queries, for "
"example, getting not-failed nodes connected to you via failed"
" vectors, you should do so via sql queries."
)
neighbors = []
# get the neighbours
if direction == "to":
outgoing_vectors = (
Vector.query.with_entities(Vector.destination_id)
.filter_by(origin_id=self.id, failed=False)
.all()
)
neighbor_ids = [v.destination_id for v in outgoing_vectors]
if neighbor_ids:
neighbors = Node.query.filter(Node.id.in_(neighbor_ids)).all()
neighbors = [n for n in neighbors if isinstance(n, type)]
if direction == "from":
incoming_vectors = (
Vector.query.with_entities(Vector.origin_id)
.filter_by(destination_id=self.id, failed=False)
.all()
)
neighbor_ids = [v.origin_id for v in incoming_vectors]
if neighbor_ids:
neighbors = Node.query.filter(Node.id.in_(neighbor_ids)).all()
neighbors = [n for n in neighbors if isinstance(n, type)]
if direction == "either":
neighbors = list(
set(
self.neighbors(type=type, direction="to")
+ self.neighbors(type=type, direction="from")
)
)
if direction == "both":
neighbors = list(
set(self.neighbors(type=type, direction="to"))
& set(self.neighbors(type=type, direction="from"))
)
return neighbors
|
python
|
{
"resource": ""
}
|
q13222
|
Node.infos
|
train
|
def infos(self, type=None, failed=False):
"""Get infos that originate from this node.
Type must be a subclass of :class:`~dallinger.models.Info`, the default is
``Info``. Failed can be True, False or "all".
"""
if type is None:
type = Info
if not issubclass(type, Info):
raise TypeError(
"Cannot get infos of type {} " "as it is not a valid type.".format(type)
)
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid vector failed".format(failed))
if failed == "all":
return type.query.filter_by(origin_id=self.id).all()
else:
return type.query.filter_by(origin_id=self.id, failed=failed).all()
|
python
|
{
"resource": ""
}
|
q13223
|
Node.received_infos
|
train
|
def received_infos(self, type=None, failed=None):
"""Get infos that have been sent to this node.
Type must be a subclass of info, the default is Info.
"""
if failed is not None:
raise ValueError(
"You should not pass a failed argument to received_infos. "
"received_infos is "
"unusual in that a failed argument cannot be passed. This is "
"because there is inherent uncertainty in what it means for a "
"received info to be failed. The received_infos function will "
"only ever check not-failed transmissions. "
"If you want to check failed transmissions "
"you should do so via sql queries."
)
if type is None:
type = Info
if not issubclass(type, Info):
raise TypeError(
"Cannot get infos of type {} " "as it is not a valid type.".format(type)
)
transmissions = (
Transmission.query.with_entities(Transmission.info_id)
.filter_by(destination_id=self.id, status="received", failed=False)
.all()
)
info_ids = [t.info_id for t in transmissions]
if info_ids:
return type.query.filter(type.id.in_(info_ids)).all()
else:
return []
|
python
|
{
"resource": ""
}
|
q13224
|
Node.transformations
|
train
|
def transformations(self, type=None, failed=False):
"""
Get Transformations done by this Node.
type must be a type of Transformation (defaults to Transformation)
Failed can be True, False or "all"
"""
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid transmission failed".format(failed))
if type is None:
type = Transformation
if failed == "all":
return type.query.filter_by(node_id=self.id).all()
else:
return type.query.filter_by(node_id=self.id, failed=failed).all()
|
python
|
{
"resource": ""
}
|
q13225
|
Node.fail
|
train
|
def fail(self):
"""
Fail a node, setting its status to "failed".
Also fails all vectors that connect to or from the node.
You cannot fail a node that has already failed, but you
can fail a dead node.
Set node.failed to True and :attr:`~dallinger.models.Node.time_of_death`
to now. Instruct all not-failed vectors connected to this node, infos
made by this node, transmissions to or from this node and
transformations made by this node to fail.
"""
if self.failed is True:
raise AttributeError("Cannot fail {} - it has already failed.".format(self))
else:
self.failed = True
self.time_of_death = timenow()
self.network.calculate_full()
for v in self.vectors():
v.fail()
for i in self.infos():
i.fail()
for t in self.transmissions(direction="all"):
t.fail()
for t in self.transformations():
t.fail()
|
python
|
{
"resource": ""
}
|
q13226
|
Vector.fail
|
train
|
def fail(self):
"""Fail a vector."""
if self.failed is True:
raise AttributeError("Cannot fail {} - it has already failed.".format(self))
else:
self.failed = True
self.time_of_death = timenow()
for t in self.transmissions():
t.fail()
|
python
|
{
"resource": ""
}
|
q13227
|
Info.json_data
|
train
|
def json_data(self):
"""The json representation of an info."""
return {
"type": self.type,
"origin_id": self.origin_id,
"network_id": self.network_id,
"contents": self.contents,
}
|
python
|
{
"resource": ""
}
|
q13228
|
Info.transformations
|
train
|
def transformations(self, relationship="all"):
"""Get all the transformations of this info.
Return a list of transformations involving this info. ``relationship``
can be "parent" (in which case only transformations where the info is
the ``info_in`` are returned), "child" (in which case only
transformations where the info is the ``info_out`` are returned) or
``all`` (in which case any transformations where the info is the
``info_out`` or the ``info_in`` are returned). The default is ``all``
"""
if relationship not in ["all", "parent", "child"]:
raise ValueError(
"You cannot get transformations of relationship {}".format(relationship)
+ "Relationship can only be parent, child or all."
)
if relationship == "all":
return Transformation.query.filter(
and_(
Transformation.failed == false(),
or_(
Transformation.info_in == self, Transformation.info_out == self
),
)
).all()
if relationship == "parent":
return Transformation.query.filter_by(
info_in_id=self.id, failed=False
).all()
if relationship == "child":
return Transformation.query.filter_by(
info_out_id=self.id, failed=False
).all()
|
python
|
{
"resource": ""
}
|
q13229
|
Transmission.json_data
|
train
|
def json_data(self):
"""The json representation of a transmissions."""
return {
"vector_id": self.vector_id,
"origin_id": self.origin_id,
"destination_id": self.destination_id,
"info_id": self.info_id,
"network_id": self.network_id,
"receive_time": self.receive_time,
"status": self.status,
}
|
python
|
{
"resource": ""
}
|
q13230
|
Transformation.json_data
|
train
|
def json_data(self):
"""The json representation of a transformation."""
return {
"info_in_id": self.info_in_id,
"info_out_id": self.info_out_id,
"node_id": self.node_id,
"network_id": self.network_id,
}
|
python
|
{
"resource": ""
}
|
q13231
|
generate_random_id
|
train
|
def generate_random_id(size=6, chars=string.ascii_uppercase + string.digits):
"""Generate random id numbers."""
return "".join(random.choice(chars) for x in range(size))
|
python
|
{
"resource": ""
}
|
q13232
|
report_idle_after
|
train
|
def report_idle_after(seconds):
"""Report_idle_after after certain number of seconds."""
def decorator(func):
def wrapper(*args, **kwargs):
def _handle_timeout(signum, frame):
config = get_config()
if not config.ready:
config.load()
message = {
"subject": "Idle Experiment.",
"body": idle_template.format(
app_id=config.get("id"), minutes_so_far=round(seconds / 60)
),
}
log("Reporting problem with idle experiment...")
get_messenger(config).send(message)
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
|
python
|
{
"resource": ""
}
|
q13233
|
verify_id
|
train
|
def verify_id(ctx, param, app):
"""Verify the experiment id."""
if app is None:
raise TypeError("Select an experiment using the --app parameter.")
elif app[0:5] == "dlgr-":
raise ValueError(
"The --app parameter requires the full "
"UUID beginning with {}-...".format(app[5:13])
)
return app
|
python
|
{
"resource": ""
}
|
q13234
|
verify_package
|
train
|
def verify_package(verbose=True):
"""Perform a series of checks on the current directory to verify that
it's a valid Dallinger experiment.
"""
results = (
verify_directory(verbose),
verify_experiment_module(verbose),
verify_config(verbose),
verify_no_conflicts(verbose),
)
ok = all(results)
return ok
|
python
|
{
"resource": ""
}
|
q13235
|
require_exp_directory
|
train
|
def require_exp_directory(f):
"""Decorator to verify that a command is run inside a valid Dallinger
experiment directory.
"""
error = "The current directory is not a valid Dallinger experiment."
@wraps(f)
def wrapper(**kwargs):
if not verify_directory(kwargs.get("verbose")):
raise click.UsageError(error)
return f(**kwargs)
return wrapper
|
python
|
{
"resource": ""
}
|
q13236
|
dallinger
|
train
|
def dallinger():
"""Dallinger command-line utility."""
from logging.config import fileConfig
fileConfig(
os.path.join(os.path.dirname(__file__), "logging.ini"),
disable_existing_loggers=False,
)
|
python
|
{
"resource": ""
}
|
q13237
|
qualify
|
train
|
def qualify(workers, qualification, value, by_name, notify, sandbox):
"""Assign a qualification to 1 or more workers"""
if not (workers and qualification and value):
raise click.BadParameter(
"Must specify a qualification ID, value/score, and at least one worker ID"
)
mturk = _mturk_service_from_config(sandbox)
if by_name:
result = mturk.get_qualification_type_by_name(qualification)
if result is None:
raise click.BadParameter(
'No qualification with name "{}" exists.'.format(qualification)
)
qid = result["id"]
else:
qid = qualification
click.echo(
"Assigning qualification {} with value {} to {} worker{}...".format(
qid, value, len(workers), "s" if len(workers) > 1 else ""
)
)
for worker in workers:
if mturk.set_qualification_score(qid, worker, int(value), notify=notify):
click.echo("{} OK".format(worker))
# print out the current set of workers with the qualification
results = list(mturk.get_workers_with_qualification(qid))
click.echo("{} workers with qualification {}:".format(len(results), qid))
for score, count in Counter([r["score"] for r in results]).items():
click.echo("{} with value {}".format(count, score))
|
python
|
{
"resource": ""
}
|
q13238
|
revoke
|
train
|
def revoke(workers, qualification, by_name, reason, sandbox):
"""Revoke a qualification from 1 or more workers"""
if not (workers and qualification):
raise click.BadParameter(
"Must specify a qualification ID or name, and at least one worker ID"
)
mturk = _mturk_service_from_config(sandbox)
if by_name:
result = mturk.get_qualification_type_by_name(qualification)
if result is None:
raise click.BadParameter(
'No qualification with name "{}" exists.'.format(qualification)
)
qid = result["id"]
else:
qid = qualification
if not click.confirm(
'\n\nYou are about to revoke qualification "{}" '
"for these workers:\n\t{}\n\n"
"This will send an email to each of them from Amazon MTurk. "
"Continue?".format(qid, "\n\t".join(workers))
):
click.echo("Aborting...")
return
for worker in workers:
if mturk.revoke_qualification(qid, worker, reason):
click.echo(
'Revoked qualification "{}" from worker "{}"'.format(qid, worker)
)
# print out the current set of workers with the qualification
results = list(mturk.get_workers_with_qualification(qid))
click.echo(
'There are now {} workers with qualification "{}"'.format(len(results), qid)
)
|
python
|
{
"resource": ""
}
|
q13239
|
hits
|
train
|
def hits(app, sandbox):
"""List hits for an experiment id."""
hit_list = list(_current_hits(_mturk_service_from_config(sandbox), app))
out = Output()
out.log(
"Found {} hits for this experiment id: {}".format(
len(hit_list), ", ".join(h["id"] for h in hit_list)
)
)
|
python
|
{
"resource": ""
}
|
q13240
|
expire
|
train
|
def expire(app, sandbox, exit=True):
"""Expire hits for an experiment id."""
success = []
failures = []
service = _mturk_service_from_config(sandbox)
hits = _current_hits(service, app)
for hit in hits:
hit_id = hit["id"]
try:
service.expire_hit(hit_id)
success.append(hit_id)
except MTurkServiceException:
failures.append(hit_id)
out = Output()
if success:
out.log("Expired {} hits: {}".format(len(success), ", ".join(success)))
if failures:
out.log(
"Could not expire {} hits: {}".format(len(failures), ", ".join(failures))
)
if not success and not failures:
out.log("No hits found for this application.")
if not sandbox:
out.log(
"If this experiment was run in the MTurk sandbox, use: "
"`dallinger expire --sandbox --app {}`".format(app)
)
if exit and not success:
sys.exit(1)
|
python
|
{
"resource": ""
}
|
q13241
|
destroy
|
train
|
def destroy(ctx, app, expire_hit, sandbox):
"""Tear down an experiment server."""
if expire_hit:
ctx.invoke(expire, app=app, sandbox=sandbox, exit=False)
HerokuApp(app).destroy()
|
python
|
{
"resource": ""
}
|
q13242
|
monitor
|
train
|
def monitor(app):
"""Set up application monitoring."""
heroku_app = HerokuApp(dallinger_uid=app)
webbrowser.open(heroku_app.dashboard_url)
webbrowser.open("https://requester.mturk.com/mturk/manageHITs")
heroku_app.open_logs()
check_call(["open", heroku_app.db_uri])
while _keep_running():
summary = get_summary(app)
click.clear()
click.echo(header)
click.echo("\nExperiment {}\n".format(app))
click.echo(summary)
time.sleep(10)
|
python
|
{
"resource": ""
}
|
q13243
|
bot
|
train
|
def bot(app, debug):
"""Run the experiment bot."""
if debug is None:
verify_id(None, None, app)
(id, tmp) = setup_experiment(log)
if debug:
url = debug
else:
heroku_app = HerokuApp(dallinger_uid=app)
worker = generate_random_id()
hit = generate_random_id()
assignment = generate_random_id()
ad_url = "{}/ad".format(heroku_app.url)
ad_parameters = "assignmentId={}&hitId={}&workerId={}&mode=sandbox"
ad_parameters = ad_parameters.format(assignment, hit, worker)
url = "{}?{}".format(ad_url, ad_parameters)
bot = bot_factory(url)
bot.run_experiment()
|
python
|
{
"resource": ""
}
|
q13244
|
getdrawings
|
train
|
def getdrawings():
"""Get all the drawings."""
infos = Info.query.all()
sketches = [json.loads(info.contents) for info in infos]
return jsonify(drawings=sketches)
|
python
|
{
"resource": ""
}
|
q13245
|
inject_experiment
|
train
|
def inject_experiment():
"""Inject experiment and enviroment variables into the template context."""
exp = Experiment(session)
return dict(experiment=exp, env=os.environ)
|
python
|
{
"resource": ""
}
|
q13246
|
consent
|
train
|
def consent():
"""Return the consent form. Here for backwards-compatibility with 2.x."""
config = _config()
return render_template(
"consent.html",
hit_id=request.args["hit_id"],
assignment_id=request.args["assignment_id"],
worker_id=request.args["worker_id"],
mode=config.get("mode"),
)
|
python
|
{
"resource": ""
}
|
q13247
|
request_parameter
|
train
|
def request_parameter(parameter, parameter_type=None, default=None, optional=False):
"""Get a parameter from a request.
parameter is the name of the parameter you are looking for
parameter_type is the type the parameter should have
default is the value the parameter takes if it has not been passed
If the parameter is not found and no default is specified,
or if the parameter is found but is of the wrong type
then a Response object is returned
"""
exp = Experiment(session)
# get the parameter
try:
value = request.values[parameter]
except KeyError:
# if it isnt found use the default, or return an error Response
if default is not None:
return default
elif optional:
return None
else:
msg = "{} {} request, {} not specified".format(
request.url, request.method, parameter
)
return error_response(error_type=msg)
# check the parameter type
if parameter_type is None:
# if no parameter_type is required, return the parameter as is
return value
elif parameter_type == "int":
# if int is required, convert to an int
try:
value = int(value)
return value
except ValueError:
msg = "{} {} request, non-numeric {}: {}".format(
request.url, request.method, parameter, value
)
return error_response(error_type=msg)
elif parameter_type == "known_class":
# if its a known class check against the known classes
try:
value = exp.known_classes[value]
return value
except KeyError:
msg = "{} {} request, unknown_class: {} for parameter {}".format(
request.url, request.method, value, parameter
)
return error_response(error_type=msg)
elif parameter_type == "bool":
# if its a boolean, convert to a boolean
if value in ["True", "False"]:
return value == "True"
else:
msg = "{} {} request, non-boolean {}: {}".format(
request.url, request.method, parameter, value
)
return error_response(error_type=msg)
else:
msg = "/{} {} request, unknown parameter type: {} for parameter {}".format(
request.url, request.method, parameter_type, parameter
)
return error_response(error_type=msg)
|
python
|
{
"resource": ""
}
|
q13248
|
node_vectors
|
train
|
def node_vectors(node_id):
"""Get the vectors of a node.
You must specify the node id in the url.
You can pass direction (incoming/outgoing/all) and failed
(True/False/all).
"""
exp = Experiment(session)
# get the parameters
direction = request_parameter(parameter="direction", default="all")
failed = request_parameter(parameter="failed", parameter_type="bool", default=False)
for x in [direction, failed]:
if type(x) == Response:
return x
# execute the request
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/vectors, node does not exist")
try:
vectors = node.vectors(direction=direction, failed=failed)
exp.vector_get_request(node=node, vectors=vectors)
session.commit()
except Exception:
return error_response(
error_type="/node/vectors GET server error",
status=403,
participant=node.participant,
)
# return the data
return success_response(vectors=[v.__json__() for v in vectors])
|
python
|
{
"resource": ""
}
|
q13249
|
node_received_infos
|
train
|
def node_received_infos(node_id):
"""Get all the infos a node has been sent and has received.
You must specify the node id in the url.
You can also pass the info type.
"""
exp = Experiment(session)
# get the parameters
info_type = request_parameter(
parameter="info_type", parameter_type="known_class", default=models.Info
)
if type(info_type) == Response:
return info_type
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(
error_type="/node/infos, node {} does not exist".format(node_id)
)
# execute the request:
infos = node.received_infos(type=info_type)
try:
# ping the experiment
exp.info_get_request(node=node, infos=infos)
session.commit()
except Exception:
return error_response(
error_type="info_get_request error",
status=403,
participant=node.participant,
)
return success_response(infos=[i.__json__() for i in infos])
|
python
|
{
"resource": ""
}
|
q13250
|
tracking_event_post
|
train
|
def tracking_event_post(node_id):
"""Enqueue a TrackingEvent worker for the specified Node.
"""
details = request_parameter(parameter="details", optional=True)
if details:
details = loads(details)
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/info POST, node does not exist")
db.logger.debug(
"rq: Queueing %s with for node: %s for worker_function",
"TrackingEvent",
node_id,
)
q.enqueue(
worker_function, "TrackingEvent", None, None, node_id=node_id, details=details
)
return success_response(details=details)
|
python
|
{
"resource": ""
}
|
q13251
|
info_post
|
train
|
def info_post(node_id):
"""Create an info.
The node id must be specified in the url.
You must pass contents as an argument.
info_type is an additional optional argument.
If info_type is a custom subclass of Info it must be
added to the known_classes of the experiment class.
"""
# get the parameters and validate them
contents = request_parameter(parameter="contents")
info_type = request_parameter(
parameter="info_type", parameter_type="known_class", default=models.Info
)
for x in [contents, info_type]:
if type(x) == Response:
return x
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/info POST, node does not exist")
exp = Experiment(session)
try:
# execute the request
info = info_type(origin=node, contents=contents)
assign_properties(info)
# ping the experiment
exp.info_post_request(node=node, info=info)
session.commit()
except Exception:
return error_response(
error_type="/info POST server error",
status=403,
participant=node.participant,
)
# return the data
return success_response(info=info.__json__())
|
python
|
{
"resource": ""
}
|
q13252
|
node_transmissions
|
train
|
def node_transmissions(node_id):
"""Get all the transmissions of a node.
The node id must be specified in the url.
You can also pass direction (to/from/all) or status (all/pending/received)
as arguments.
"""
exp = Experiment(session)
# get the parameters
direction = request_parameter(parameter="direction", default="incoming")
status = request_parameter(parameter="status", default="all")
for x in [direction, status]:
if type(x) == Response:
return x
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/transmissions, node does not exist")
# execute the request
transmissions = node.transmissions(direction=direction, status=status)
try:
if direction in ["incoming", "all"] and status in ["pending", "all"]:
node.receive()
session.commit()
# ping the experiment
exp.transmission_get_request(node=node, transmissions=transmissions)
session.commit()
except Exception:
return error_response(
error_type="/node/transmissions GET server error",
status=403,
participant=node.participant,
)
# return the data
return success_response(transmissions=[t.__json__() for t in transmissions])
|
python
|
{
"resource": ""
}
|
q13253
|
check_for_duplicate_assignments
|
train
|
def check_for_duplicate_assignments(participant):
"""Check that the assignment_id of the participant is unique.
If it isnt the older participants will be failed.
"""
participants = models.Participant.query.filter_by(
assignment_id=participant.assignment_id
).all()
duplicates = [
p for p in participants if (p.id != participant.id and p.status == "working")
]
for d in duplicates:
q.enqueue(worker_function, "AssignmentAbandoned", None, d.id)
|
python
|
{
"resource": ""
}
|
q13254
|
worker_failed
|
train
|
def worker_failed():
"""Fail worker. Used by bots only for now."""
participant_id = request.args.get("participant_id")
if not participant_id:
return error_response(
error_type="bad request", error_text="participantId parameter is required"
)
try:
_worker_failed(participant_id)
except KeyError:
return error_response(
error_type="ParticipantId not found: {}".format(participant_id)
)
return success_response(
field="status", data="success", request_type="worker failed"
)
|
python
|
{
"resource": ""
}
|
q13255
|
HerokuInfo.all_apps
|
train
|
def all_apps(self):
"""Capture a backup of the app."""
cmd = ["heroku", "apps", "--json"]
if self.team:
cmd.extend(["--team", self.team])
return json.loads(self._result(cmd))
|
python
|
{
"resource": ""
}
|
q13256
|
HerokuApp.bootstrap
|
train
|
def bootstrap(self):
"""Creates the heroku app and local git remote. Call this once you're
in the local repo you're going to use.
"""
cmd = ["heroku", "apps:create", self.name, "--buildpack", "heroku/python"]
# If a team is specified, assign the app to the team.
if self.team:
cmd.extend(["--team", self.team])
self._run(cmd)
# Set HOST value
self.set_multiple(
HOST=self.url, CREATOR=self.login_name(), DALLINGER_UID=self.dallinger_uid
)
|
python
|
{
"resource": ""
}
|
q13257
|
HerokuApp.addon
|
train
|
def addon(self, name):
"""Set up an addon"""
cmd = ["heroku", "addons:create", name, "--app", self.name]
self._run(cmd)
|
python
|
{
"resource": ""
}
|
q13258
|
HerokuApp.addon_destroy
|
train
|
def addon_destroy(self, name):
"""Destroy an addon"""
self._run(
[
"heroku",
"addons:destroy",
name,
"--app",
self.name,
"--confirm",
self.name,
]
)
|
python
|
{
"resource": ""
}
|
q13259
|
HerokuApp.buildpack
|
train
|
def buildpack(self, url):
"""Add a buildpack by URL."""
cmd = ["heroku", "buildpacks:add", url, "--app", self.name]
self._run(cmd)
|
python
|
{
"resource": ""
}
|
q13260
|
HerokuApp.destroy
|
train
|
def destroy(self):
"""Destroy an app and all its add-ons"""
result = self._result(
["heroku", "apps:destroy", "--app", self.name, "--confirm", self.name]
)
return result
|
python
|
{
"resource": ""
}
|
q13261
|
HerokuApp.get
|
train
|
def get(self, key, subcommand="config:get"):
"""Get a app config value by name"""
cmd = ["heroku", subcommand, key, "--app", self.name]
return self._result(cmd)
|
python
|
{
"resource": ""
}
|
q13262
|
HerokuApp.pg_wait
|
train
|
def pg_wait(self):
"""Wait for the DB to be fired up."""
retries = 10
while retries:
retries = retries - 1
try:
self._run(["heroku", "pg:wait", "--app", self.name])
except subprocess.CalledProcessError:
time.sleep(5)
if not retries:
raise
else:
break
|
python
|
{
"resource": ""
}
|
q13263
|
HerokuApp.restore
|
train
|
def restore(self, url):
"""Restore the remote database from the URL of a backup."""
self._run(
[
"heroku",
"pg:backups:restore",
"{}".format(url),
"DATABASE_URL",
"--app",
self.name,
"--confirm",
self.name,
]
)
|
python
|
{
"resource": ""
}
|
q13264
|
HerokuApp.scale_up_dyno
|
train
|
def scale_up_dyno(self, process, quantity, size):
"""Scale up a dyno."""
self._run(
[
"heroku",
"ps:scale",
"{}={}:{}".format(process, quantity, size),
"--app",
self.name,
]
)
|
python
|
{
"resource": ""
}
|
q13265
|
HerokuApp.scale_down_dynos
|
train
|
def scale_down_dynos(self):
"""Turn off web and worker dynos, plus clock process if
there is one and it's active.
"""
processes = ["web", "worker"]
if self.clock_is_on:
processes.append("clock")
for process in processes:
self.scale_down_dyno(process)
|
python
|
{
"resource": ""
}
|
q13266
|
HerokuLocalWrapper.start
|
train
|
def start(self, timeout_secs=60):
"""Start the heroku local subprocess group and verify that
it has started successfully.
The subprocess output is checked for a line matching 'success_regex'
to indicate success. If no match is seen after 'timeout_secs',
a HerokuTimeoutError is raised.
"""
def _handle_timeout(signum, frame):
raise HerokuTimeoutError(
"Failed to start after {} seconds.".format(timeout_secs, self._record)
)
if self.is_running:
self.out.log("Local Heroku is already running.")
return
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(timeout_secs)
self._boot()
try:
success = self._verify_startup()
finally:
signal.alarm(0)
if not success:
self.stop(signal.SIGKILL)
raise HerokuStartupError(
"Failed to start for unknown reason: {}".format(self._record)
)
return True
|
python
|
{
"resource": ""
}
|
q13267
|
HerokuLocalWrapper.stop
|
train
|
def stop(self, signal=None):
"""Stop the heroku local subprocess and all of its children.
"""
signal = signal or self.int_signal
self.out.log("Cleaning up local Heroku process...")
if self._process is None:
self.out.log("No local Heroku process was running.")
return
try:
os.killpg(os.getpgid(self._process.pid), signal)
self.out.log("Local Heroku process terminated.")
except OSError:
self.out.log("Local Heroku was already terminated.")
self.out.log(traceback.format_exc())
finally:
self._process = None
|
python
|
{
"resource": ""
}
|
q13268
|
HerokuLocalWrapper.monitor
|
train
|
def monitor(self, listener):
"""Relay the stream to listener until told to stop.
"""
for line in self._stream():
self._record.append(line)
if self.verbose:
self.out.blather(line)
if listener(line) is self.MONITOR_STOP:
return
|
python
|
{
"resource": ""
}
|
q13269
|
load
|
train
|
def load():
"""Load the active experiment."""
initialize_experiment_package(os.getcwd())
try:
try:
from dallinger_experiment import experiment
except ImportError:
from dallinger_experiment import dallinger_experiment as experiment
classes = inspect.getmembers(experiment, inspect.isclass)
for name, c in classes:
if "Experiment" in c.__bases__[0].__name__:
return c
else:
raise ImportError
except ImportError:
logger.error("Could not import experiment.")
raise
|
python
|
{
"resource": ""
}
|
q13270
|
Experiment.setup
|
train
|
def setup(self):
"""Create the networks if they don't already exist."""
if not self.networks():
for _ in range(self.practice_repeats):
network = self.create_network()
network.role = "practice"
self.session.add(network)
for _ in range(self.experiment_repeats):
network = self.create_network()
network.role = "experiment"
self.session.add(network)
self.session.commit()
|
python
|
{
"resource": ""
}
|
q13271
|
Experiment.networks
|
train
|
def networks(self, role="all", full="all"):
"""All the networks in the experiment."""
if full not in ["all", True, False]:
raise ValueError(
"full must be boolean or all, it cannot be {}".format(full)
)
if full == "all":
if role == "all":
return Network.query.all()
else:
return Network.query.filter_by(role=role).all()
else:
if role == "all":
return Network.query.filter_by(full=full).all()
else:
return Network.query.filter(
and_(Network.role == role, Network.full == full)
).all()
|
python
|
{
"resource": ""
}
|
q13272
|
Experiment.get_network_for_participant
|
train
|
def get_network_for_participant(self, participant):
"""Find a network for a participant.
If no networks are available, None will be returned. By default
participants can participate only once in each network and participants
first complete networks with `role="practice"` before doing all other
networks in a random order.
"""
key = participant.id
networks_with_space = (
Network.query.filter_by(full=False).order_by(Network.id).all()
)
networks_participated_in = [
node.network_id
for node in Node.query.with_entities(Node.network_id)
.filter_by(participant_id=participant.id)
.all()
]
legal_networks = [
net for net in networks_with_space if net.id not in networks_participated_in
]
if not legal_networks:
self.log("No networks available, returning None", key)
return None
self.log(
"{} networks out of {} available".format(
len(legal_networks), (self.practice_repeats + self.experiment_repeats)
),
key,
)
legal_practice_networks = [
net for net in legal_networks if net.role == "practice"
]
if legal_practice_networks:
chosen_network = legal_practice_networks[0]
self.log(
"Practice networks available."
"Assigning participant to practice network {}.".format(
chosen_network.id
),
key,
)
else:
chosen_network = self.choose_network(legal_networks, participant)
self.log(
"No practice networks available."
"Assigning participant to experiment network {}".format(
chosen_network.id
),
key,
)
return chosen_network
|
python
|
{
"resource": ""
}
|
q13273
|
Experiment.recruit
|
train
|
def recruit(self):
"""Recruit participants to the experiment as needed.
This method runs whenever a participant successfully completes the
experiment (participants who fail to finish successfully are
automatically replaced). By default it recruits 1 participant at a time
until all networks are full.
"""
if not self.networks(full=False):
self.log("All networks full: closing recruitment", "-----")
self.recruiter.close_recruitment()
|
python
|
{
"resource": ""
}
|
q13274
|
Experiment.log_summary
|
train
|
def log_summary(self):
"""Log a summary of all the participants' status codes."""
participants = Participant.query.with_entities(Participant.status).all()
counts = Counter([p.status for p in participants])
sorted_counts = sorted(counts.items(), key=itemgetter(0))
self.log("Status summary: {}".format(str(sorted_counts)))
return sorted_counts
|
python
|
{
"resource": ""
}
|
q13275
|
Experiment.save
|
train
|
def save(self, *objects):
"""Add all the objects to the session and commit them.
This only needs to be done for networks and participants.
"""
if len(objects) > 0:
self.session.add_all(objects)
self.session.commit()
|
python
|
{
"resource": ""
}
|
q13276
|
Experiment.fail_participant
|
train
|
def fail_participant(self, participant):
"""Fail all the nodes of a participant."""
participant_nodes = Node.query.filter_by(
participant_id=participant.id, failed=False
).all()
for node in participant_nodes:
node.fail()
|
python
|
{
"resource": ""
}
|
q13277
|
Experiment.run
|
train
|
def run(self, exp_config=None, app_id=None, bot=False, **kwargs):
"""Deploy and run an experiment.
The exp_config object is either a dictionary or a
``localconfig.LocalConfig`` object with parameters
specific to the experiment run grouped by section.
"""
import dallinger as dlgr
app_id = self.make_uuid(app_id)
if bot:
kwargs["recruiter"] = "bots"
self.app_id = app_id
self.exp_config = exp_config or kwargs
self.update_status("Starting")
try:
if self.exp_config.get("mode") == "debug":
dlgr.command_line.debug.callback(
verbose=True, bot=bot, proxy=None, exp_config=self.exp_config
)
else:
dlgr.deployment.deploy_sandbox_shared_setup(
dlgr.command_line.log,
app=app_id,
verbose=self.verbose,
exp_config=self.exp_config,
)
except Exception:
self.update_status("Errored")
raise
else:
self.update_status("Running")
self._await_completion()
self.update_status("Retrieving data")
data = self.retrieve_data()
self.update_status("Completed")
return data
|
python
|
{
"resource": ""
}
|
q13278
|
Experiment.collect
|
train
|
def collect(self, app_id, exp_config=None, bot=False, **kwargs):
"""Collect data for the provided experiment id.
The ``app_id`` parameter must be a valid UUID.
If an existing data file is found for the UUID it will
be returned, otherwise - if the UUID is not already registered -
the experiment will be run and data collected.
See :meth:`~Experiment.run` method for other parameters.
"""
try:
results = data_load(app_id)
self.log(
"Data found for experiment {}, retrieving.".format(app_id),
key="Retrieve:",
)
return results
except IOError:
self.log(
"Could not fetch data for id: {}, checking registry".format(app_id),
key="Retrieve:",
)
exp_config = exp_config or {}
if is_registered(app_id):
raise RuntimeError(
"The id {} is registered, ".format(app_id)
+ "but you do not have permission to access to the data"
)
elif kwargs.get("mode") == "debug" or exp_config.get("mode") == "debug":
raise RuntimeError("No remote or local data found for id {}".format(app_id))
try:
assert isinstance(uuid.UUID(app_id, version=4), uuid.UUID)
except (ValueError, AssertionError):
raise ValueError("Invalid UUID supplied {}".format(app_id))
self.log(
"{} appears to be a new experiment id, running experiment.".format(app_id),
key="Retrieve:",
)
return self.run(exp_config, app_id, bot, **kwargs)
|
python
|
{
"resource": ""
}
|
q13279
|
Experiment.retrieve_data
|
train
|
def retrieve_data(self):
"""Retrieves and saves data from a running experiment"""
local = False
if self.exp_config.get("mode") == "debug":
local = True
filename = export(self.app_id, local=local)
logger.debug("Data exported to %s" % filename)
return Data(filename)
|
python
|
{
"resource": ""
}
|
q13280
|
Experiment.end_experiment
|
train
|
def end_experiment(self):
"""Terminates a running experiment"""
if self.exp_config.get("mode") != "debug":
HerokuApp(self.app_id).destroy()
return True
|
python
|
{
"resource": ""
}
|
q13281
|
Scrubber._ipython_display_
|
train
|
def _ipython_display_(self):
"""Display Jupyter Notebook widget"""
from IPython.display import display
self.build_widget()
display(self.widget())
|
python
|
{
"resource": ""
}
|
q13282
|
from_config
|
train
|
def from_config(config):
"""Return a Recruiter instance based on the configuration.
Default is HotAirRecruiter in debug mode (unless we're using
the bot recruiter, which can be used in debug mode)
and the MTurkRecruiter in other modes.
"""
debug_mode = config.get("mode") == "debug"
name = config.get("recruiter", None)
recruiter = None
# Special case 1: Don't use a configured recruiter in replay mode
if config.get("replay"):
return HotAirRecruiter()
if name is not None:
recruiter = by_name(name)
# Special case 2: may run BotRecruiter or MultiRecruiter in any mode
# (debug or not), so it trumps everything else:
if isinstance(recruiter, (BotRecruiter, MultiRecruiter)):
return recruiter
# Special case 3: if we're not using bots and we're in debug mode,
# ignore any configured recruiter:
if debug_mode:
return HotAirRecruiter()
# Configured recruiter:
if recruiter is not None:
return recruiter
if name and recruiter is None:
raise NotImplementedError("No such recruiter {}".format(name))
# Default if we're not in debug mode:
return MTurkRecruiter()
|
python
|
{
"resource": ""
}
|
q13283
|
CLIRecruiter.recruit
|
train
|
def recruit(self, n=1):
"""Generate experiemnt URLs and print them to the console."""
logger.info("Recruiting {} CLI participants".format(n))
urls = []
template = "{}/ad?recruiter={}&assignmentId={}&hitId={}&workerId={}&mode={}"
for i in range(n):
ad_url = template.format(
get_base_url(),
self.nickname,
generate_random_id(),
generate_random_id(),
generate_random_id(),
self._get_mode(),
)
logger.info("{} {}".format(NEW_RECRUIT_LOG_PREFIX, ad_url))
urls.append(ad_url)
return urls
|
python
|
{
"resource": ""
}
|
q13284
|
CLIRecruiter.reward_bonus
|
train
|
def reward_bonus(self, assignment_id, amount, reason):
"""Print out bonus info for the assignment"""
logger.info(
'Award ${} for assignment {}, with reason "{}"'.format(
amount, assignment_id, reason
)
)
|
python
|
{
"resource": ""
}
|
q13285
|
SimulatedRecruiter.open_recruitment
|
train
|
def open_recruitment(self, n=1):
"""Open recruitment."""
logger.info("Opening Sim recruitment for {} participants".format(n))
return {"items": self.recruit(n), "message": "Simulated recruitment only"}
|
python
|
{
"resource": ""
}
|
q13286
|
MTurkRecruiter.open_recruitment
|
train
|
def open_recruitment(self, n=1):
"""Open a connection to AWS MTurk and create a HIT."""
logger.info("Opening MTurk recruitment for {} participants".format(n))
if self.is_in_progress:
raise MTurkRecruiterException(
"Tried to open_recruitment on already open recruiter."
)
if self.hit_domain is None:
raise MTurkRecruiterException("Can't run a HIT from localhost")
self.mturkservice.check_credentials()
if self.config.get("assign_qualifications"):
self._create_mturk_qualifications()
hit_request = {
"max_assignments": n,
"title": self.config.get("title"),
"description": self.config.get("description"),
"keywords": self._config_to_list("keywords"),
"reward": self.config.get("base_payment"),
"duration_hours": self.config.get("duration"),
"lifetime_days": self.config.get("lifetime"),
"ad_url": self.ad_url,
"notification_url": self.config.get("notification_url"),
"approve_requirement": self.config.get("approve_requirement"),
"us_only": self.config.get("us_only"),
"blacklist": self._config_to_list("qualification_blacklist"),
"annotation": self.config.get("id"),
}
hit_info = self.mturkservice.create_hit(**hit_request)
if self.config.get("mode") == "sandbox":
lookup_url = (
"https://workersandbox.mturk.com/mturk/preview?groupId={type_id}"
)
else:
lookup_url = "https://worker.mturk.com/mturk/preview?groupId={type_id}"
return {
"items": [lookup_url.format(**hit_info)],
"message": "HIT now published to Amazon Mechanical Turk",
}
|
python
|
{
"resource": ""
}
|
q13287
|
MTurkRecruiter.recruit
|
train
|
def recruit(self, n=1):
"""Recruit n new participants to an existing HIT"""
logger.info("Recruiting {} MTurk participants".format(n))
if not self.config.get("auto_recruit"):
logger.info("auto_recruit is False: recruitment suppressed")
return
hit_id = self.current_hit_id()
if hit_id is None:
logger.info("no HIT in progress: recruitment aborted")
return
try:
return self.mturkservice.extend_hit(
hit_id, number=n, duration_hours=self.config.get("duration")
)
except MTurkServiceException as ex:
logger.exception(str(ex))
|
python
|
{
"resource": ""
}
|
q13288
|
MTurkRecruiter.notify_completed
|
train
|
def notify_completed(self, participant):
"""Assign a Qualification to the Participant for the experiment ID,
and for the configured group_name, if it's been set.
Overrecruited participants don't receive qualifications, since they
haven't actually completed the experiment. This allows them to remain
eligible for future runs.
"""
if participant.status == "overrecruited" or not self.qualification_active:
return
worker_id = participant.worker_id
for name in self.qualifications:
try:
self.mturkservice.increment_qualification_score(name, worker_id)
except QualificationNotFoundException as ex:
logger.exception(ex)
|
python
|
{
"resource": ""
}
|
q13289
|
MTurkRecruiter.notify_duration_exceeded
|
train
|
def notify_duration_exceeded(self, participants, reference_time):
"""The participant has exceed the maximum time for the activity,
defined in the "duration" config value. We need find out the assignment
status on MTurk and act based on this.
"""
unsubmitted = []
for participant in participants:
summary = ParticipationTime(participant, reference_time, self.config)
status = self._mturk_status_for(participant)
if status == "Approved":
participant.status = "approved"
session.commit()
elif status == "Rejected":
participant.status = "rejected"
session.commit()
elif status == "Submitted":
self._resend_submitted_rest_notification_for(participant)
self._message_researcher(self._resubmitted_msg(summary))
logger.warning(
"Error - submitted notification for participant {} missed. "
"A replacement notification was created and sent, "
"but proceed with caution.".format(participant.id)
)
else:
self._send_notification_missing_rest_notification_for(participant)
unsubmitted.append(summary)
if unsubmitted:
self._disable_autorecruit()
self.close_recruitment()
pick_one = unsubmitted[0]
# message the researcher about the one of the participants:
self._message_researcher(self._cancelled_msg(pick_one))
# Attempt to force-expire the hit via boto. It's possible
# that the HIT won't exist if the HIT has been deleted manually.
try:
self.mturkservice.expire_hit(pick_one.participant.hit_id)
except MTurkServiceException as ex:
logger.exception(ex)
|
python
|
{
"resource": ""
}
|
q13290
|
MTurkRecruiter.reward_bonus
|
train
|
def reward_bonus(self, assignment_id, amount, reason):
"""Reward the Turker for a specified assignment with a bonus."""
try:
return self.mturkservice.grant_bonus(assignment_id, amount, reason)
except MTurkServiceException as ex:
logger.exception(str(ex))
|
python
|
{
"resource": ""
}
|
q13291
|
MTurkRecruiter._create_mturk_qualifications
|
train
|
def _create_mturk_qualifications(self):
"""Create MTurk Qualification for experiment ID, and for group_name
if it's been set. Qualifications with these names already exist, but
it's faster to try and fail than to check, then try.
"""
for name, desc in self.qualifications.items():
try:
self.mturkservice.create_qualification_type(name, desc)
except DuplicateQualificationNameError:
pass
|
python
|
{
"resource": ""
}
|
q13292
|
BotRecruiter.open_recruitment
|
train
|
def open_recruitment(self, n=1):
"""Start recruiting right away."""
logger.info("Opening Bot recruitment for {} participants".format(n))
factory = self._get_bot_factory()
bot_class_name = factory("", "", "").__class__.__name__
return {
"items": self.recruit(n),
"message": "Bot recruitment started using {}".format(bot_class_name),
}
|
python
|
{
"resource": ""
}
|
q13293
|
BotRecruiter.recruit
|
train
|
def recruit(self, n=1):
"""Recruit n new participant bots to the queue"""
logger.info("Recruiting {} Bot participants".format(n))
factory = self._get_bot_factory()
urls = []
q = _get_queue()
for _ in range(n):
base_url = get_base_url()
worker = generate_random_id()
hit = generate_random_id()
assignment = generate_random_id()
ad_parameters = (
"recruiter={}&assignmentId={}&hitId={}&workerId={}&mode=sandbox"
)
ad_parameters = ad_parameters.format(self.nickname, assignment, hit, worker)
url = "{}/ad?{}".format(base_url, ad_parameters)
urls.append(url)
bot = factory(url, assignment_id=assignment, worker_id=worker, hit_id=hit)
job = q.enqueue(bot.run_experiment, timeout=self._timeout)
logger.warning("Created job {} for url {}.".format(job.id, url))
return urls
|
python
|
{
"resource": ""
}
|
q13294
|
BotRecruiter.notify_duration_exceeded
|
train
|
def notify_duration_exceeded(self, participants, reference_time):
"""The bot participant has been working longer than the time defined in
the "duration" config value.
"""
for participant in participants:
participant.status = "rejected"
session.commit()
|
python
|
{
"resource": ""
}
|
q13295
|
MultiRecruiter.parse_spec
|
train
|
def parse_spec(self):
"""Parse the specification of how to recruit participants.
Example: recruiters = bots: 5, mturk: 1
"""
recruiters = []
spec = get_config().get("recruiters")
for match in self.SPEC_RE.finditer(spec):
name = match.group(1)
count = int(match.group(2))
recruiters.append((name, count))
return recruiters
|
python
|
{
"resource": ""
}
|
q13296
|
MultiRecruiter.recruiters
|
train
|
def recruiters(self, n=1):
"""Iterator that provides recruiters along with the participant
count to be recruited for up to `n` participants.
We use the `Recruitment` table in the db to keep track of
how many recruitments have been requested using each recruiter.
We'll use the first one from the specification that
hasn't already reached its quota.
"""
recruit_count = 0
while recruit_count <= n:
counts = dict(
session.query(Recruitment.recruiter_id, func.count(Recruitment.id))
.group_by(Recruitment.recruiter_id)
.all()
)
for recruiter_id, target_count in self.spec:
remaining = 0
count = counts.get(recruiter_id, 0)
if count >= target_count:
# This recruiter quota was reached;
# move on to the next one.
counts[recruiter_id] = count - target_count
continue
else:
# Quota is still available; let's use it.
remaining = target_count - count
break
else:
return
num_recruits = min(n - recruit_count, remaining)
# record the recruitments and commit
for i in range(num_recruits):
session.add(Recruitment(recruiter_id=recruiter_id))
session.commit()
recruit_count += num_recruits
yield by_name(recruiter_id), num_recruits
|
python
|
{
"resource": ""
}
|
q13297
|
MultiRecruiter.open_recruitment
|
train
|
def open_recruitment(self, n=1):
"""Return initial experiment URL list.
"""
logger.info("Multi recruitment running for {} participants".format(n))
recruitments = []
messages = {}
remaining = n
for recruiter, count in self.recruiters(n):
if not count:
break
if recruiter.nickname in messages:
result = recruiter.recruit(count)
recruitments.extend(result)
else:
result = recruiter.open_recruitment(count)
recruitments.extend(result["items"])
messages[recruiter.nickname] = result["message"]
remaining -= count
if remaining <= 0:
break
logger.info(
(
"Multi-recruited {} out of {} participants, " "using {} recruiters."
).format(n - remaining, n, len(messages))
)
return {"items": recruitments, "message": "\n".join(messages.values())}
|
python
|
{
"resource": ""
}
|
q13298
|
run_check
|
train
|
def run_check(participants, config, reference_time):
"""For each participant, if they've been active for longer than the
experiment duration + 2 minutes, we take action.
"""
recruiters_with_late_participants = defaultdict(list)
for p in participants:
timeline = ParticipationTime(p, reference_time, config)
if timeline.is_overdue:
print(
"Error: participant {} with status {} has been playing for too "
"long - their recruiter will be notified.".format(p.id, p.status)
)
recruiters_with_late_participants[p.recruiter_id].append(p)
for recruiter_id, participants in recruiters_with_late_participants.items():
recruiter = recruiters.by_name(recruiter_id)
recruiter.notify_duration_exceeded(participants, reference_time)
|
python
|
{
"resource": ""
}
|
q13299
|
register
|
train
|
def register(dlgr_id, snapshot=None):
"""Register the experiment using configured services."""
try:
config.get("osf_access_token")
except KeyError:
pass
else:
osf_id = _create_osf_project(dlgr_id)
_upload_assets_to_OSF(dlgr_id, osf_id)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.