code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
max_fuzzy_matches_to_check = 100
query = name.upper()
start = time.time()
args = {
"Query": query,
"MustBeRequestable": False,
"MustBeOwnedByCaller": True,
"MaxResults": max_fuzzy_matches_to_check,
}
results = self.mturk.list_qualification_types(**args)["QualificationTypes"]
# This loop is largely for tests, because there's some indexing that
# needs to happen on MTurk for search to work:
while not results and time.time() - start < self.max_wait_secs:
time.sleep(1)
results = self.mturk.list_qualification_types(**args)["QualificationTypes"]
if not results:
return None
qualifications = [self._translate_qtype(r) for r in results]
if len(qualifications) > 1:
for qualification in qualifications:
if qualification["name"].upper() == query:
return qualification
raise MTurkServiceException("{} was not a unique name".format(query))
return qualifications[0]
|
def get_qualification_type_by_name(self, name)
|
Return a Qualification Type by name. If the provided name matches
more than one Qualification, check to see if any of the results
match the provided name exactly. If there's an exact match, return
that Qualification. Otherwise, raise an exception.
| 3.890156
| 3.814928
| 1.019719
|
return self._is_ok(
self.mturk.associate_qualification_with_worker(
QualificationTypeId=qualification_id,
WorkerId=worker_id,
IntegerValue=score,
SendNotification=notify,
)
)
|
def assign_qualification(self, qualification_id, worker_id, score, notify=False)
|
Score a worker for a specific qualification
| 2.954963
| 2.907035
| 1.016487
|
result = self.get_current_qualification_score(name, worker_id)
current_score = result["score"] or 0
new_score = current_score + 1
qtype_id = result["qtype"]["id"]
self.assign_qualification(qtype_id, worker_id, new_score, notify)
return {"qtype": result["qtype"], "score": new_score}
|
def increment_qualification_score(self, name, worker_id, notify=False)
|
Increment the current qualification score for a worker, on a
qualification with the provided name.
| 2.746811
| 2.649162
| 1.03686
|
try:
response = self.mturk.get_qualification_score(
QualificationTypeId=qualification_id, WorkerId=worker_id
)
except ClientError as ex:
error = str(ex)
if "does not exist" in error:
raise WorkerLacksQualification(
"Worker {} does not have qualification {}.".format(
worker_id, qualification_id
)
)
if "operation can be called with a status of: Granted" in error:
raise RevokedQualification(
"Worker {} has had qualification {} revoked.".format(
worker_id, qualification_id
)
)
raise MTurkServiceException(error)
return response["Qualification"]["IntegerValue"]
|
def get_qualification_score(self, qualification_id, worker_id)
|
Return a worker's qualification score as an iteger.
| 3.032028
| 2.982271
| 1.016684
|
qtype = self.get_qualification_type_by_name(name)
if qtype is None:
raise QualificationNotFoundException(
'No Qualification exists with name "{}"'.format(name)
)
try:
score = self.get_qualification_score(qtype["id"], worker_id)
except (WorkerLacksQualification, RevokedQualification):
score = None
return {"qtype": qtype, "score": score}
|
def get_current_qualification_score(self, name, worker_id)
|
Return the current score for a worker, on a qualification with the
provided name.
| 3.456202
| 3.500112
| 0.987455
|
return self._is_ok(
self.mturk.delete_qualification_type(QualificationTypeId=qualification_id)
)
|
def dispose_qualification_type(self, qualification_id)
|
Remove a qualification type we created
| 4.680242
| 4.633288
| 1.010134
|
done = False
next_token = None
while not done:
if next_token is not None:
response = self.mturk.list_workers_with_qualification_type(
QualificationTypeId=qualification_id,
MaxResults=MAX_SUPPORTED_BATCH_SIZE,
Status="Granted",
NextToken=next_token,
)
else:
response = self.mturk.list_workers_with_qualification_type(
QualificationTypeId=qualification_id,
MaxResults=MAX_SUPPORTED_BATCH_SIZE,
Status="Granted",
)
if response:
for r in response["Qualifications"]:
yield {"id": r["WorkerId"], "score": r["IntegerValue"]}
if "NextToken" in response:
next_token = response["NextToken"]
else:
done = True
|
def get_workers_with_qualification(self, qualification_id)
|
Get workers with the given qualification.
| 2.009912
| 1.984548
| 1.01278
|
frame_height = 600
mturk_question = self._external_question(ad_url, frame_height)
qualifications = self.build_hit_qualifications(
approve_requirement, us_only, blacklist
)
# We need a HIT_Type in order to register for REST notifications
hit_type_id = self.register_hit_type(
title, description, reward, duration_hours, keywords, qualifications
)
self.set_rest_notification(notification_url, hit_type_id)
params = {
"HITTypeId": hit_type_id,
"Question": mturk_question,
"LifetimeInSeconds": int(
datetime.timedelta(days=lifetime_days).total_seconds()
),
"MaxAssignments": max_assignments,
"UniqueRequestToken": self._request_token(),
}
if annotation:
params["RequesterAnnotation"] = annotation
response = self.mturk.create_hit_with_hit_type(**params)
if "HIT" not in response:
raise MTurkServiceException("HIT request was invalid for unknown reason.")
return self._translate_hit(response["HIT"])
|
def create_hit(
self,
title,
description,
keywords,
reward,
duration_hours,
lifetime_days,
ad_url,
notification_url,
approve_requirement,
max_assignments,
us_only,
blacklist=None,
annotation=None,
)
|
Create the actual HIT and return a dict with its useful properties.
| 3.482423
| 3.319433
| 1.049102
|
self.create_additional_assignments_for_hit(hit_id, number)
if duration_hours is not None:
self.update_expiration_for_hit(hit_id, duration_hours)
return self.get_hit(hit_id)
|
def extend_hit(self, hit_id, number, duration_hours=None)
|
Extend an existing HIT and return an updated description
| 3.003785
| 2.635296
| 1.139828
|
try:
self.mturk.update_expiration_for_hit(HITId=hit_id, ExpireAt=0)
except Exception as ex:
raise MTurkServiceException(
"Failed to expire HIT {}: {}".format(hit_id, str(ex))
)
return True
|
def expire_hit(self, hit_id)
|
Expire a HIT, which will change its status to "Reviewable",
allowing it to be deleted.
| 3.085814
| 2.763429
| 1.116661
|
assignment = self.get_assignment(assignment_id)
worker_id = assignment["worker_id"]
amount_str = "{:.2f}".format(amount)
try:
return self._is_ok(
self.mturk.send_bonus(
WorkerId=worker_id,
BonusAmount=amount_str,
AssignmentId=assignment_id,
Reason=reason,
UniqueRequestToken=self._request_token(),
)
)
except ClientError as ex:
error = "Failed to pay assignment {} bonus of {}: {}".format(
assignment_id, amount_str, str(ex)
)
raise MTurkServiceException(error)
|
def grant_bonus(self, assignment_id, amount, reason)
|
Grant a bonus to the MTurk Worker.
Issues a payment of money from your account to a Worker. To
be eligible for a bonus, the Worker must have submitted
results for one of your HITs, and have had those results
approved or rejected. This payment happens separately from the
reward you pay to the Worker when you approve the Worker's
assignment.
| 2.95569
| 2.799281
| 1.055875
|
try:
response = self.mturk.get_assignment(AssignmentId=assignment_id)
except ClientError as ex:
if "does not exist" in str(ex):
return None
raise
return self._translate_assignment(response["Assignment"])
|
def get_assignment(self, assignment_id)
|
Get an assignment by ID and reformat the response.
| 2.999177
| 2.966321
| 1.011076
|
try:
return self._is_ok(
self.mturk.approve_assignment(AssignmentId=assignment_id)
)
except ClientError as ex:
assignment = self.get_assignment(assignment_id)
raise MTurkServiceException(
"Failed to approve assignment {}, {}: {}".format(
assignment_id, str(assignment), str(ex)
)
)
|
def approve_assignment(self, assignment_id)
|
Approving an assignment initiates two payments from the
Requester's Amazon.com account:
1. The Worker who submitted the results is paid
the reward specified in the HIT.
2. Amazon Mechanical Turk fees are debited.
| 3.270975
| 3.236529
| 1.010643
|
# Create __init__.py if it doesn't exist (needed for Python 2)
init_py = os.path.join(path, "__init__.py")
if not os.path.exists(init_py):
open(init_py, "a").close()
# Retain already set experiment module
if sys.modules.get("dallinger_experiment") is not None:
return
dirname = os.path.dirname(path)
basename = os.path.basename(path)
sys.path.insert(0, dirname)
package = __import__(basename)
if path not in package.__path__:
raise Exception("Package was not imported from the requested path!")
sys.modules["dallinger_experiment"] = package
package.__package__ = "dallinger_experiment"
sys.path.pop(0)
|
def initialize_experiment_package(path)
|
Make the specified directory importable as the `dallinger_experiment` package.
| 2.984603
| 2.771335
| 1.076955
|
return {
"type": self.type,
"recruiter": self.recruiter_id,
"assignment_id": self.assignment_id,
"hit_id": self.hit_id,
"mode": self.mode,
"end_time": self.end_time,
"base_pay": self.base_pay,
"bonus": self.bonus,
"status": self.status,
}
|
def json_data(self)
|
Return json description of a participant.
| 2.855474
| 2.614122
| 1.092326
|
if type is None:
type = Question
if not issubclass(type, Question):
raise TypeError("{} is not a valid question type.".format(type))
return type.query.filter_by(participant_id=self.id).all()
|
def questions(self, type=None)
|
Get questions associated with this participant.
Return a list of questions associated with the participant. If
specified, ``type`` filters by class.
| 3.487457
| 3.342402
| 1.043399
|
nodes = self.nodes(failed="all")
infos = []
for n in nodes:
infos.extend(n.infos(type=type, failed=failed))
return infos
|
def infos(self, type=None, failed=False)
|
Get all infos created by the participants nodes.
Return a list of infos produced by nodes associated with the
participant. If specified, ``type`` filters by class. By default, failed
infos are excluded, to include only failed nodes use ``failed=True``,
for all nodes use ``failed=all``. Note that failed filters the infos,
not the nodes - infos from all nodes (whether failed or not) can be
returned.
| 4.383187
| 3.696137
| 1.185883
|
return {
"number": self.number,
"type": self.type,
"participant_id": self.participant_id,
"question": self.question,
"response": self.response,
}
|
def json_data(self)
|
Return json description of a question.
| 3.164688
| 2.613711
| 1.210803
|
return {
"type": self.type,
"max_size": self.max_size,
"full": self.full,
"role": self.role,
}
|
def json_data(self)
|
Return json description of a participant.
| 4.530262
| 3.968869
| 1.141449
|
if type is None:
type = Node
if not issubclass(type, Node):
raise TypeError("{} is not a valid node type.".format(type))
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid node failed".format(failed))
if participant_id is not None:
if failed == "all":
return type.query.filter_by(
network_id=self.id, participant_id=participant_id
).all()
else:
return type.query.filter_by(
network_id=self.id, participant_id=participant_id, failed=failed
).all()
else:
if failed == "all":
return type.query.filter_by(network_id=self.id).all()
else:
return type.query.filter_by(failed=failed, network_id=self.id).all()
|
def nodes(self, type=None, failed=False, participant_id=None)
|
Get nodes in the network.
type specifies the type of Node. Failed can be "all", False
(default) or True. If a participant_id is passed only
nodes with that participant_id will be returned.
| 1.875647
| 1.844598
| 1.016832
|
return len(self.nodes(type=type, failed=failed))
|
def size(self, type=None, failed=False)
|
How many nodes in a network.
type specifies the class of node, failed
can be True/False/all.
| 6.130409
| 5.158339
| 1.188446
|
if type is None:
type = Info
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid failed".format(failed))
if failed == "all":
return type.query.filter_by(network_id=self.id).all()
else:
return type.query.filter_by(network_id=self.id, failed=failed).all()
|
def infos(self, type=None, failed=False)
|
Get infos in the network.
type specifies the type of info (defaults to Info). failed { False,
True, "all" } specifies the failed state of the infos. To get infos
from a specific node, see the infos() method in class
:class:`~dallinger.models.Node`.
| 2.865127
| 2.406192
| 1.190731
|
if status not in ["all", "pending", "received"]:
raise ValueError(
"You cannot get transmission of status {}.".format(status)
+ "Status can only be pending, received or all"
)
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid failed".format(failed))
if status == "all":
if failed == "all":
return Transmission.query.filter_by(network_id=self.id).all()
else:
return Transmission.query.filter_by(
network_id=self.id, failed=failed
).all()
else:
if failed == "all":
return Transmission.query.filter_by(
network_id=self.id, status=status
).all()
else:
return Transmission.query.filter_by(
network_id=self.id, status=status, failed=failed
).all()
|
def transmissions(self, status="all", failed=False)
|
Get transmissions in the network.
status { "all", "received", "pending" }
failed { False, True, "all" }
To get transmissions from a specific vector, see the
transmissions() method in class Vector.
| 1.937237
| 1.825737
| 1.061071
|
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid vector failed".format(failed))
if failed == "all":
return Vector.query.filter_by(network_id=self.id).all()
else:
return Vector.query.filter_by(network_id=self.id, failed=failed).all()
|
def vectors(self, failed=False)
|
Get vectors in the network.
failed = { False, True, "all" }
To get the vectors to/from to a specific node, see Node.vectors().
| 2.941333
| 2.416676
| 1.217099
|
# get type
if type is None:
type = Node
if not issubclass(type, Node):
raise ValueError(
"{} is not a valid neighbor type,"
"needs to be a subclass of Node.".format(type)
)
# get direction
if direction not in ["both", "either", "from", "to"]:
raise ValueError(
"{} not a valid neighbor connection."
"Should be both, either, to or from.".format(direction)
)
if failed is not None:
raise ValueError(
"You should not pass a failed argument to neighbors(). "
"Neighbors is "
"unusual in that a failed argument cannot be passed. This is "
"because there is inherent uncertainty in what it means for a "
"neighbor to be failed. The neighbors function will only ever "
"return not-failed nodes connected to you via not-failed "
"vectors. If you want to do more elaborate queries, for "
"example, getting not-failed nodes connected to you via failed"
" vectors, you should do so via sql queries."
)
neighbors = []
# get the neighbours
if direction == "to":
outgoing_vectors = (
Vector.query.with_entities(Vector.destination_id)
.filter_by(origin_id=self.id, failed=False)
.all()
)
neighbor_ids = [v.destination_id for v in outgoing_vectors]
if neighbor_ids:
neighbors = Node.query.filter(Node.id.in_(neighbor_ids)).all()
neighbors = [n for n in neighbors if isinstance(n, type)]
if direction == "from":
incoming_vectors = (
Vector.query.with_entities(Vector.origin_id)
.filter_by(destination_id=self.id, failed=False)
.all()
)
neighbor_ids = [v.origin_id for v in incoming_vectors]
if neighbor_ids:
neighbors = Node.query.filter(Node.id.in_(neighbor_ids)).all()
neighbors = [n for n in neighbors if isinstance(n, type)]
if direction == "either":
neighbors = list(
set(
self.neighbors(type=type, direction="to")
+ self.neighbors(type=type, direction="from")
)
)
if direction == "both":
neighbors = list(
set(self.neighbors(type=type, direction="to"))
& set(self.neighbors(type=type, direction="from"))
)
return neighbors
|
def neighbors(self, type=None, direction="to", failed=None)
|
Get a node's neighbors - nodes that are directly connected to it.
Type specifies the class of neighbour and must be a subclass of
Node (default is Node).
Connection is the direction of the connections and can be "to"
(default), "from", "either", or "both".
| 2.645552
| 2.561941
| 1.032636
|
if failed is not None:
raise ValueError(
"You should not pass a failed argument to is_connected."
"is_connected is "
"unusual in that a failed argument cannot be passed. This is "
"because there is inherent uncertainty in what it means for a "
"connection to be failed. The is_connected function will only "
"ever check along not-failed vectors. "
"If you want to check along failed vectors "
"you should do so via sql queries."
)
# make whom a list
if isinstance(whom, list):
is_list = True
else:
whom = [whom]
is_list = False
whom_ids = [n.id for n in whom]
# check whom contains only Nodes
for node in whom:
if not isinstance(node, Node):
raise TypeError(
"is_connected cannot parse objects of type {}.".format(type(node))
)
# check direction
if direction not in ["to", "from", "either", "both"]:
raise ValueError(
"{} is not a valid direction for is_connected".format(direction)
)
# get is_connected
connected = []
if direction == "to":
vectors = (
Vector.query.with_entities(Vector.destination_id)
.filter_by(origin_id=self.id, failed=False)
.all()
)
destinations = set([v.destination_id for v in vectors])
for w in whom_ids:
connected.append(w in destinations)
elif direction == "from":
vectors = (
Vector.query.with_entities(Vector.origin_id)
.filter_by(destination_id=self.id, failed=False)
.all()
)
origins = set([v.origin_id for v in vectors])
for w in whom_ids:
connected.append(w in origins)
elif direction in ["either", "both"]:
vectors = (
Vector.query.with_entities(Vector.origin_id, Vector.destination_id)
.filter(
and_(
Vector.failed == false(),
or_(
Vector.destination_id == self.id,
Vector.origin_id == self.id,
),
)
)
.all()
)
destinations = set([v.destination_id for v in vectors])
origins = set([v.origin_id for v in vectors])
if direction == "either":
origins_destinations = destinations.union(origins)
elif direction == "both":
origins_destinations = destinations.intersection(origins)
for w in whom_ids:
connected.append(w in origins_destinations)
if is_list:
return connected
else:
return connected[0]
|
def is_connected(self, whom, direction="to", failed=None)
|
Check whether this node is connected [to/from] whom.
whom can be a list of nodes or a single node.
direction can be "to" (default), "from", "both" or "either".
If whom is a single node this method returns a boolean,
otherwise it returns a list of booleans
| 2.432009
| 2.394733
| 1.015566
|
if type is None:
type = Info
if not issubclass(type, Info):
raise TypeError(
"Cannot get infos of type {} " "as it is not a valid type.".format(type)
)
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid vector failed".format(failed))
if failed == "all":
return type.query.filter_by(origin_id=self.id).all()
else:
return type.query.filter_by(origin_id=self.id, failed=failed).all()
|
def infos(self, type=None, failed=False)
|
Get infos that originate from this node.
Type must be a subclass of :class:`~dallinger.models.Info`, the default is
``Info``. Failed can be True, False or "all".
| 3.453585
| 2.751143
| 1.255327
|
if failed is not None:
raise ValueError(
"You should not pass a failed argument to received_infos. "
"received_infos is "
"unusual in that a failed argument cannot be passed. This is "
"because there is inherent uncertainty in what it means for a "
"received info to be failed. The received_infos function will "
"only ever check not-failed transmissions. "
"If you want to check failed transmissions "
"you should do so via sql queries."
)
if type is None:
type = Info
if not issubclass(type, Info):
raise TypeError(
"Cannot get infos of type {} " "as it is not a valid type.".format(type)
)
transmissions = (
Transmission.query.with_entities(Transmission.info_id)
.filter_by(destination_id=self.id, status="received", failed=False)
.all()
)
info_ids = [t.info_id for t in transmissions]
if info_ids:
return type.query.filter(type.id.in_(info_ids)).all()
else:
return []
|
def received_infos(self, type=None, failed=None)
|
Get infos that have been sent to this node.
Type must be a subclass of info, the default is Info.
| 3.927725
| 3.707545
| 1.059387
|
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid transmission failed".format(failed))
if type is None:
type = Transformation
if failed == "all":
return type.query.filter_by(node_id=self.id).all()
else:
return type.query.filter_by(node_id=self.id, failed=failed).all()
|
def transformations(self, type=None, failed=False)
|
Get Transformations done by this Node.
type must be a type of Transformation (defaults to Transformation)
Failed can be True, False or "all"
| 3.143134
| 2.483493
| 1.26561
|
if self.failed is True:
raise AttributeError("Cannot fail {} - it has already failed.".format(self))
else:
self.failed = True
self.time_of_death = timenow()
self.network.calculate_full()
for v in self.vectors():
v.fail()
for i in self.infos():
i.fail()
for t in self.transmissions(direction="all"):
t.fail()
for t in self.transformations():
t.fail()
|
def fail(self)
|
Fail a node, setting its status to "failed".
Also fails all vectors that connect to or from the node.
You cannot fail a node that has already failed, but you
can fail a dead node.
Set node.failed to True and :attr:`~dallinger.models.Node.time_of_death`
to now. Instruct all not-failed vectors connected to this node, infos
made by this node, transmissions to or from this node and
transformations made by this node to fail.
| 5.457089
| 3.378973
| 1.615014
|
# check direction
if direction not in ["to", "from", "both"]:
raise ValueError(
"{} is not a valid direction for connect()".format(direction)
)
# make whom a list
whom = self.flatten([whom])
# make the connections
new_vectors = []
if direction in ["to", "both"]:
already_connected_to = self.flatten(
[self.is_connected(direction="to", whom=whom)]
)
for node, connected in zip(whom, already_connected_to):
if connected:
print(
"Warning! {} already connected to {}, "
"instruction to connect will be ignored.".format(self, node)
)
else:
new_vectors.append(Vector(origin=self, destination=node))
if direction in ["from", "both"]:
already_connected_from = self.flatten(
[self.is_connected(direction="from", whom=whom)]
)
for node, connected in zip(whom, already_connected_from):
if connected:
print(
"Warning! {} already connected from {}, "
"instruction to connect will be ignored.".format(self, node)
)
else:
new_vectors.append(Vector(origin=node, destination=self))
return new_vectors
|
def connect(self, whom, direction="to")
|
Create a vector from self to/from whom.
Return a list of newly created vector between the node and whom.
``whom`` can be a specific node or a (nested) list of nodes. Nodes can
only connect with nodes in the same network. In addition nodes cannot
connect with themselves or with Sources. ``direction`` specifies the
direction of the connection it can be "to" (node -> whom), "from" (whom
-> node) or both (node <-> whom). The default is "to".
Whom may be a (nested) list of nodes.
Will raise an error if:
1. whom is not a node or list of nodes
2. whom is/contains a source if direction is to or both
3. whom is/contains self
4. whom is/contains a node in a different network
If self is already connected to/from whom a Warning
is raised and nothing happens.
This method returns a list of the vectors created
(even if there is only one).
| 2.294207
| 2.118244
| 1.08307
|
if lst == []:
return lst
if isinstance(lst[0], list):
return self.flatten(lst[0]) + self.flatten(lst[1:])
return lst[:1] + self.flatten(lst[1:])
|
def flatten(self, lst)
|
Turn a list of lists into a list.
| 1.923964
| 1.809639
| 1.063176
|
whats = set()
for what in self.flatten([what]):
if what is None:
what = self._what()
if inspect.isclass(what) and issubclass(what, Info):
whats.update(self.infos(type=what))
else:
whats.add(what)
to_whoms = set()
for to_whom in self.flatten([to_whom]):
if to_whom is None:
to_whom = self._to_whom()
if inspect.isclass(to_whom) and issubclass(to_whom, Node):
to_whoms.update(self.neighbors(direction="to", type=to_whom))
else:
to_whoms.add(to_whom)
transmissions = []
vectors = self.vectors(direction="outgoing")
for what in whats:
for to_whom in to_whoms:
try:
vector = [v for v in vectors if v.destination_id == to_whom.id][0]
except IndexError:
raise ValueError(
"{} cannot transmit to {} as it does not have "
"a connection to them".format(self, to_whom)
)
t = Transmission(info=what, vector=vector)
transmissions.append(t)
return transmissions
|
def transmit(self, what=None, to_whom=None)
|
Transmit one or more infos from one node to another.
"what" dictates which infos are sent, it can be:
(1) None (in which case the node's _what method is called).
(2) an Info (in which case the node transmits the info)
(3) a subclass of Info (in which case the node transmits all
its infos of that type)
(4) a list of any combination of the above
"to_whom" dictates which node(s) the infos are sent to, it can be:
(1) None (in which case the node's _to_whom method is called)
(2) a Node (in which case the node transmits to that node)
(3) a subclass of Node (in which case the node transmits to all
nodes of that type it is connected to)
(4) a list of any combination of the above
Will additionally raise an error if:
(1) _what() or _to_whom() returns None or a list containing None.
(2) what is/contains an info that does not originate from the
transmitting node
(3) to_whom is/contains a node that the transmitting node does not
have a not-failed connection with.
| 2.361325
| 2.16099
| 1.092705
|
if self.failed is True:
raise AttributeError("Cannot fail {} - it has already failed.".format(self))
else:
self.failed = True
self.time_of_death = timenow()
for t in self.transmissions():
t.fail()
|
def fail(self)
|
Fail a vector.
| 5.883403
| 5.81244
| 1.012209
|
return {
"type": self.type,
"origin_id": self.origin_id,
"network_id": self.network_id,
"contents": self.contents,
}
|
def json_data(self)
|
The json representation of an info.
| 3.644925
| 3.225394
| 1.130071
|
if relationship not in ["all", "parent", "child"]:
raise ValueError(
"You cannot get transformations of relationship {}".format(relationship)
+ "Relationship can only be parent, child or all."
)
if relationship == "all":
return Transformation.query.filter(
and_(
Transformation.failed == false(),
or_(
Transformation.info_in == self, Transformation.info_out == self
),
)
).all()
if relationship == "parent":
return Transformation.query.filter_by(
info_in_id=self.id, failed=False
).all()
if relationship == "child":
return Transformation.query.filter_by(
info_out_id=self.id, failed=False
).all()
|
def transformations(self, relationship="all")
|
Get all the transformations of this info.
Return a list of transformations involving this info. ``relationship``
can be "parent" (in which case only transformations where the info is
the ``info_in`` are returned), "child" (in which case only
transformations where the info is the ``info_out`` are returned) or
``all`` (in which case any transformations where the info is the
``info_out`` or the ``info_in`` are returned). The default is ``all``
| 2.657024
| 2.573631
| 1.032403
|
return {
"vector_id": self.vector_id,
"origin_id": self.origin_id,
"destination_id": self.destination_id,
"info_id": self.info_id,
"network_id": self.network_id,
"receive_time": self.receive_time,
"status": self.status,
}
|
def json_data(self)
|
The json representation of a transmissions.
| 2.835563
| 2.454016
| 1.155479
|
return {
"info_in_id": self.info_in_id,
"info_out_id": self.info_out_id,
"node_id": self.node_id,
"network_id": self.network_id,
}
|
def json_data(self)
|
The json representation of a transformation.
| 3.168052
| 3.054977
| 1.037013
|
dist = get_distribution("dallinger")
src_base = os.path.join(dist.location, dist.project_name)
return src_base
|
def dallinger_package_path()
|
Return the absolute path of the root directory of the installed
Dallinger package:
>>> utils.dallinger_package_location()
'/Users/janedoe/projects/Dallinger3/dallinger'
| 4.254812
| 7.50984
| 0.566565
|
return "".join(random.choice(chars) for x in range(size))
|
def generate_random_id(size=6, chars=string.ascii_uppercase + string.digits)
|
Generate random id numbers.
| 3.934053
| 2.467069
| 1.594626
|
tempdir = tempfile.mkdtemp()
output_file = os.path.join(tempdir, "stderr")
original_cmd = " ".join(cmd)
p = subprocess.Popen(cmd, stdout=out, stderr=subprocess.PIPE)
t = subprocess.Popen(["tee", output_file], stdin=p.stderr, stdout=out)
t.wait()
p.communicate()
p.stderr.close()
if p.returncode != 0 and not ignore_errors:
with open(output_file, "r") as output:
error = output.read()
message = 'Command: "{}": Error: "{}"'.format(
original_cmd, error.replace("\n", "")
)
shutil.rmtree(tempdir, ignore_errors=True)
raise CommandError(message)
shutil.rmtree(tempdir, ignore_errors=True)
return p.returncode
|
def run_command(cmd, out, ignore_errors=False)
|
We want to both send subprocess output to stdout or another file
descriptor as the subprocess runs, *and* capture the actual exception
message on errors. CalledProcessErrors do not reliably contain the
underlying exception in either the 'message' or 'out' attributes, so
we tee the stderr to a temporary file and if a CalledProcessError is
raised we read its contents to recover stderr
| 2.329204
| 2.329793
| 0.999747
|
if verbose:
if chevrons:
click.echo("\n❯❯ " + msg)
else:
click.echo(msg)
time.sleep(delay)
|
def log(msg, delay=0.5, chevrons=True, verbose=True)
|
Log a message to stdout.
| 3.157955
| 3.106722
| 1.016491
|
if verbose:
if chevrons:
click.secho("\n❯❯ " + msg, err=True, fg="red")
else:
click.secho(msg, err=True, fg="red")
time.sleep(delay)
|
def error(msg, delay=0.5, chevrons=True, verbose=True)
|
Log a message to stdout.
| 2.531249
| 2.46025
| 1.028858
|
def decorator(func):
def wrapper(*args, **kwargs):
def _handle_timeout(signum, frame):
config = get_config()
if not config.ready:
config.load()
message = {
"subject": "Idle Experiment.",
"body": idle_template.format(
app_id=config.get("id"), minutes_so_far=round(seconds / 60)
),
}
log("Reporting problem with idle experiment...")
get_messenger(config).send(message)
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
|
def report_idle_after(seconds)
|
Report_idle_after after certain number of seconds.
| 4.067362
| 3.937201
| 1.033059
|
if app is None:
raise TypeError("Select an experiment using the --app parameter.")
elif app[0:5] == "dlgr-":
raise ValueError(
"The --app parameter requires the full "
"UUID beginning with {}-...".format(app[5:13])
)
return app
|
def verify_id(ctx, param, app)
|
Verify the experiment id.
| 10.365453
| 8.508847
| 1.218197
|
# Check required files
ok = True
mb_to_bytes = 1000 * 1000
expected_files = ["config.txt", "experiment.py"]
for f in expected_files:
if os.path.exists(f):
log("✓ {} is PRESENT".format(f), chevrons=False, verbose=verbose)
else:
log("✗ {} is MISSING".format(f), chevrons=False, verbose=verbose)
ok = False
# Check size
max_size = max_size_mb * mb_to_bytes
size = size_on_copy()
if size > max_size:
size_in_mb = round(size / mb_to_bytes)
log(
"✗ {}MB is TOO BIG (greater than {}MB)".format(size_in_mb, max_size_mb),
chevrons=False,
verbose=verbose,
)
ok = False
return ok
|
def verify_directory(verbose=True, max_size_mb=50)
|
Ensure that the current directory looks like a Dallinger experiment, and
does not appear to have unintended contents that will be copied on
deployment.
| 2.892337
| 2.835531
| 1.020034
|
ok = True
if not os.path.exists("experiment.py"):
return False
# Bootstrap a package in a temp directory and make it importable:
temp_package_name = "TEMP_VERIFICATION_PACKAGE"
tmp = tempfile.mkdtemp()
clone_dir = os.path.join(tmp, temp_package_name)
to_ignore = shutil.ignore_patterns(
os.path.join(".git", "*"), "*.db", "snapshots", "data", "server.log"
)
shutil.copytree(os.getcwd(), clone_dir, ignore=to_ignore)
initialize_experiment_package(clone_dir)
from dallinger_experiment import experiment
if clone_dir not in experiment.__file__:
raise ImportError("Checking the wrong experiment.py... aborting.")
classes = inspect.getmembers(experiment, inspect.isclass)
exps = [c for c in classes if (c[1].__bases__[0].__name__ in "Experiment")]
# Clean up:
for entry in [k for k in sys.modules if temp_package_name in k]:
del sys.modules[entry]
# Run checks:
if len(exps) == 0:
log(
"✗ experiment.py does not define an experiment class.",
delay=0,
chevrons=False,
verbose=verbose,
)
ok = False
elif len(exps) == 1:
log(
"✓ experiment.py defines 1 experiment",
delay=0,
chevrons=False,
verbose=verbose,
)
else:
log(
"✗ experiment.py defines more than one experiment class.",
delay=0,
chevrons=False,
verbose=verbose,
)
ok = False
return ok
|
def verify_experiment_module(verbose)
|
Perform basic sanity checks on experiment.py.
| 3.591529
| 3.500356
| 1.026047
|
ok = True
config = get_config()
if not config.ready:
config.load()
# Check base_payment is correct
try:
base_pay = config.get("base_payment")
except KeyError:
log("✗ No value for base_pay.", delay=0, chevrons=False, verbose=verbose)
else:
dollarFormat = "{:.2f}".format(base_pay)
if base_pay <= 0:
log(
"✗ base_payment must be positive value in config.txt.",
delay=0,
chevrons=False,
verbose=verbose,
)
ok = False
if float(dollarFormat) != float(base_pay):
log(
"✗ base_payment must be in [dollars].[cents] format in config.txt. Try changing "
"{0} to {1}.".format(base_pay, dollarFormat),
delay=0,
chevrons=False,
verbose=verbose,
)
ok = False
return ok
|
def verify_config(verbose=True)
|
Check for common or costly errors in experiment configuration.
| 3.695049
| 3.620938
| 1.020467
|
conflicts = False
reserved_files = [
os.path.join("templates", "complete.html"),
os.path.join("templates", "error.html"),
os.path.join("templates", "error-complete.html"),
os.path.join("templates", "launch.html"),
os.path.join("templates", "thanks.html"),
os.path.join("static", "css", "dallinger.css"),
os.path.join("static", "scripts", "dallinger2.js"),
os.path.join("static", "scripts", "reqwest.min.js"),
os.path.join("static", "scripts", "store+json2.min.js"),
os.path.join("static", "scripts", "tracker.js"),
os.path.join("static", "robots.txt"),
]
for f in reserved_files:
if os.path.exists(f):
log(
"✗ {} OVERWRITES shared frontend files inserted at run-time".format(f),
delay=0,
chevrons=False,
verbose=verbose,
)
conflicts = True
if not conflicts:
log("✓ no file conflicts", delay=0, chevrons=False, verbose=verbose)
return True
|
def verify_no_conflicts(verbose=True)
|
Warn if there are filenames which conflict with those deployed by
Dallinger, but always returns True (meaning "OK").
| 3.098656
| 2.878397
| 1.076522
|
results = (
verify_directory(verbose),
verify_experiment_module(verbose),
verify_config(verbose),
verify_no_conflicts(verbose),
)
ok = all(results)
return ok
|
def verify_package(verbose=True)
|
Perform a series of checks on the current directory to verify that
it's a valid Dallinger experiment.
| 6.616908
| 5.684026
| 1.164124
|
error = "The current directory is not a valid Dallinger experiment."
@wraps(f)
def wrapper(**kwargs):
if not verify_directory(kwargs.get("verbose")):
raise click.UsageError(error)
return f(**kwargs)
return wrapper
|
def require_exp_directory(f)
|
Decorator to verify that a command is run inside a valid Dallinger
experiment directory.
| 4.755084
| 3.499052
| 1.358964
|
from logging.config import fileConfig
fileConfig(
os.path.join(os.path.dirname(__file__), "logging.ini"),
disable_existing_loggers=False,
)
|
def dallinger()
|
Dallinger command-line utility.
| 2.62667
| 2.709758
| 0.969338
|
debugger = DebugDeployment(Output(), verbose, bot, proxy, exp_config)
log(header, chevrons=False)
debugger.run()
|
def debug(verbose, bot, proxy, exp_config=None)
|
Run the experiment locally.
| 18.973341
| 17.097183
| 1.109735
|
if app:
verify_id(None, None, app)
log(header, chevrons=False)
_deploy_in_mode("sandbox", app=app, verbose=verbose, log=log)
|
def sandbox(verbose, app)
|
Deploy app using Heroku to the MTurk Sandbox.
| 13.350412
| 12.626293
| 1.05735
|
if not (workers and qualification and value):
raise click.BadParameter(
"Must specify a qualification ID, value/score, and at least one worker ID"
)
mturk = _mturk_service_from_config(sandbox)
if by_name:
result = mturk.get_qualification_type_by_name(qualification)
if result is None:
raise click.BadParameter(
'No qualification with name "{}" exists.'.format(qualification)
)
qid = result["id"]
else:
qid = qualification
click.echo(
"Assigning qualification {} with value {} to {} worker{}...".format(
qid, value, len(workers), "s" if len(workers) > 1 else ""
)
)
for worker in workers:
if mturk.set_qualification_score(qid, worker, int(value), notify=notify):
click.echo("{} OK".format(worker))
# print out the current set of workers with the qualification
results = list(mturk.get_workers_with_qualification(qid))
click.echo("{} workers with qualification {}:".format(len(results), qid))
for score, count in Counter([r["score"] for r in results]).items():
click.echo("{} with value {}".format(count, score))
|
def qualify(workers, qualification, value, by_name, notify, sandbox)
|
Assign a qualification to 1 or more workers
| 2.845371
| 2.770802
| 1.026912
|
if not (workers and qualification):
raise click.BadParameter(
"Must specify a qualification ID or name, and at least one worker ID"
)
mturk = _mturk_service_from_config(sandbox)
if by_name:
result = mturk.get_qualification_type_by_name(qualification)
if result is None:
raise click.BadParameter(
'No qualification with name "{}" exists.'.format(qualification)
)
qid = result["id"]
else:
qid = qualification
if not click.confirm(
'\n\nYou are about to revoke qualification "{}" '
"for these workers:\n\t{}\n\n"
"This will send an email to each of them from Amazon MTurk. "
"Continue?".format(qid, "\n\t".join(workers))
):
click.echo("Aborting...")
return
for worker in workers:
if mturk.revoke_qualification(qid, worker, reason):
click.echo(
'Revoked qualification "{}" from worker "{}"'.format(qid, worker)
)
# print out the current set of workers with the qualification
results = list(mturk.get_workers_with_qualification(qid))
click.echo(
'There are now {} workers with qualification "{}"'.format(len(results), qid)
)
|
def revoke(workers, qualification, by_name, reason, sandbox)
|
Revoke a qualification from 1 or more workers
| 2.827651
| 2.796517
| 1.011133
|
log("The database backup URL is...")
backup_url = data.backup(app)
log(backup_url)
log("Scaling down the web servers...")
heroku_app = HerokuApp(app)
heroku_app.scale_down_dynos()
log("Removing addons...")
addons = [
"heroku-postgresql",
# "papertrail",
"heroku-redis",
]
for addon in addons:
heroku_app.addon_destroy(addon)
|
def hibernate(app)
|
Pause an experiment and remove costly resources.
| 6.24054
| 6.023995
| 1.035947
|
hit_list = list(_current_hits(_mturk_service_from_config(sandbox), app))
out = Output()
out.log(
"Found {} hits for this experiment id: {}".format(
len(hit_list), ", ".join(h["id"] for h in hit_list)
)
)
|
def hits(app, sandbox)
|
List hits for an experiment id.
| 7.570366
| 6.180076
| 1.224963
|
success = []
failures = []
service = _mturk_service_from_config(sandbox)
hits = _current_hits(service, app)
for hit in hits:
hit_id = hit["id"]
try:
service.expire_hit(hit_id)
success.append(hit_id)
except MTurkServiceException:
failures.append(hit_id)
out = Output()
if success:
out.log("Expired {} hits: {}".format(len(success), ", ".join(success)))
if failures:
out.log(
"Could not expire {} hits: {}".format(len(failures), ", ".join(failures))
)
if not success and not failures:
out.log("No hits found for this application.")
if not sandbox:
out.log(
"If this experiment was run in the MTurk sandbox, use: "
"`dallinger expire --sandbox --app {}`".format(app)
)
if exit and not success:
sys.exit(1)
|
def expire(app, sandbox, exit=True)
|
Expire hits for an experiment id.
| 3.131779
| 2.910696
| 1.075955
|
if expire_hit:
ctx.invoke(expire, app=app, sandbox=sandbox, exit=False)
HerokuApp(app).destroy()
|
def destroy(ctx, app, expire_hit, sandbox)
|
Tear down an experiment server.
| 5.540165
| 5.514644
| 1.004628
|
id = app
config = get_config()
config.load()
bucket = data.user_s3_bucket()
key = bucket.lookup("{}.dump".format(id))
url = key.generate_url(expires_in=300)
heroku_app = HerokuApp(id, output=None, team=None)
heroku_app.addon("heroku-postgresql:{}".format(config.get("database_size")))
time.sleep(60)
heroku_app.pg_wait()
time.sleep(10)
heroku_app.addon("heroku-redis:{}".format(config.get("redis_size")))
heroku_app.restore(url)
# Scale up the dynos.
log("Scaling up the dynos...")
size = config.get("dyno_type")
for process in ["web", "worker"]:
qty = config.get("num_dynos_" + process)
heroku_app.scale_up_dyno(process, qty, size)
if config.get("clock_on"):
heroku_app.scale_up_dyno("clock", 1, size)
|
def awaken(app, databaseurl)
|
Restore the database from a given url.
| 4.731131
| 4.624347
| 1.023092
|
log(header, chevrons=False)
data.export(str(app), local=local, scrub_pii=(not no_scrub))
|
def export(app, local, no_scrub)
|
Export the data.
| 13.476737
| 12.98253
| 1.038067
|
if replay:
exp_config = exp_config or {}
exp_config["replay"] = True
log(header, chevrons=False)
loader = LoaderDeployment(app, Output(), verbose, exp_config)
loader.run()
|
def load(app, verbose, replay, exp_config=None)
|
Import database state from an exported zip file and leave the server
running until stopping the process with <control>-c.
| 8.621885
| 8.310096
| 1.037519
|
heroku_app = HerokuApp(dallinger_uid=app)
webbrowser.open(heroku_app.dashboard_url)
webbrowser.open("https://requester.mturk.com/mturk/manageHITs")
heroku_app.open_logs()
check_call(["open", heroku_app.db_uri])
while _keep_running():
summary = get_summary(app)
click.clear()
click.echo(header)
click.echo("\nExperiment {}\n".format(app))
click.echo(summary)
time.sleep(10)
|
def monitor(app)
|
Set up application monitoring.
| 5.700409
| 5.895464
| 0.966914
|
if debug is None:
verify_id(None, None, app)
(id, tmp) = setup_experiment(log)
if debug:
url = debug
else:
heroku_app = HerokuApp(dallinger_uid=app)
worker = generate_random_id()
hit = generate_random_id()
assignment = generate_random_id()
ad_url = "{}/ad".format(heroku_app.url)
ad_parameters = "assignmentId={}&hitId={}&workerId={}&mode=sandbox"
ad_parameters = ad_parameters.format(assignment, hit, worker)
url = "{}?{}".format(ad_url, ad_parameters)
bot = bot_factory(url)
bot.run_experiment()
|
def bot(app, debug)
|
Run the experiment bot.
| 6.428323
| 5.845476
| 1.099709
|
verbose = True
log(
"Verifying current directory as a Dallinger experiment...",
delay=0,
verbose=verbose,
)
ok = verify_package(verbose=verbose)
if ok:
log("✓ Everything looks good!", delay=0, verbose=verbose)
else:
log("☹ Some problems were found.", delay=0, verbose=verbose)
|
def verify()
|
Verify that app is compatible with Dallinger.
| 6.131752
| 4.860807
| 1.261468
|
infos = Info.query.all()
sketches = [json.loads(info.contents) for info in infos]
return jsonify(drawings=sketches)
|
def getdrawings()
|
Get all the drawings.
| 6.651891
| 6.453125
| 1.030802
|
config = _config()
html = "<html><head></head><body><h1>Dallinger Experiment in progress</h1><dl>"
for item in sorted(config.as_dict().items()):
html += '<dt style="font-weight:bold;margin-top:15px;">{}</dt><dd>{}</dd>'.format(
*item
)
html += "</dl></body></html>"
return html
|
def index()
|
Index route
| 4.098577
| 4.135909
| 0.990974
|
data_out = {}
data_out["status"] = "success"
data_out.update(data)
js = dumps(data_out, default=date_handler)
return Response(js, status=200, mimetype="application/json")
|
def success_response(**data)
|
Return a generic success response.
| 2.843872
| 2.643642
| 1.07574
|
last_exception = sys.exc_info()
if last_exception[0]:
db.logger.error(
"Failure for request: {!r}".format(dict(request.args)),
exc_info=last_exception,
)
data = {"status": "error"}
if simple:
data["message"] = error_text
else:
data["html"] = (
error_page(
error_text=error_text,
error_type=error_type,
participant=participant,
request_data=request_data,
)
.get_data()
.decode("utf-8")
)
return Response(dumps(data), status=status, mimetype="application/json")
|
def error_response(
error_type="Internal server error",
error_text="",
status=400,
participant=None,
simple=False,
request_data="",
)
|
Return a generic server error response.
| 2.70282
| 2.675875
| 1.01007
|
config = _config()
if error_text is None:
error_text =
if participant is not None:
hit_id = (participant.hit_id,)
assignment_id = (participant.assignment_id,)
worker_id = participant.worker_id
participant_id = participant.id
else:
hit_id = request.form.get("hit_id", "")
assignment_id = request.form.get("assignment_id", "")
worker_id = request.form.get("worker_id", "")
participant_id = request.form.get("participant_id", None)
if participant_id:
try:
participant_id = int(participant_id)
except (ValueError, TypeError):
participant_id = None
return make_response(
render_template(
"error.html",
error_text=error_text,
compensate=compensate,
contact_address=config.get("contact_email_on_error"),
error_type=error_type,
hit_id=hit_id,
assignment_id=assignment_id,
worker_id=worker_id,
request_data=request_data,
participant_id=participant_id,
),
500,
)
|
def error_page(
participant=None,
error_text=None,
compensate=True,
error_type="default",
request_data="",
)
|
Render HTML for error page.
| 2.034356
| 1.998284
| 1.018052
|
exp = Experiment(session)
return dict(experiment=exp, env=os.environ)
|
def inject_experiment()
|
Inject experiment and enviroment variables into the template context.
| 12.973334
| 7.489792
| 1.732135
|
try:
exp = Experiment(db.init_db(drop_all=False))
except Exception as ex:
return error_response(
error_text="Failed to load experiment in /launch: {}".format(str(ex)),
status=500,
simple=True,
)
try:
exp.log("Launching experiment...", "-----")
except IOError as ex:
return error_response(
error_text="IOError writing to experiment log: {}".format(str(ex)),
status=500,
simple=True,
)
try:
recruitment_details = exp.recruiter.open_recruitment(
n=exp.initial_recruitment_size
)
session.commit()
except Exception as e:
return error_response(
error_text="Failed to open recruitment, check experiment server log "
"for details: {}".format(str(e)),
status=500,
simple=True,
)
for task in exp.background_tasks:
try:
gevent.spawn(task)
except Exception:
return error_response(
error_text="Failed to spawn task on launch: {}, ".format(task)
+ "check experiment server log for details",
status=500,
simple=True,
)
if _config().get("replay", False):
try:
task = ReplayBackend(exp)
gevent.spawn(task)
except Exception:
return error_response(
error_text="Failed to launch replay task for experiment."
"check experiment server log for details",
status=500,
simple=True,
)
# If the experiment defines a channel, subscribe the experiment to the
# redis communication channel:
if exp.channel is not None:
try:
from dallinger.experiment_server.sockets import chat_backend
chat_backend.subscribe(exp, exp.channel)
except Exception:
return error_response(
error_text="Failed to subscribe to chat for channel on launch "
+ "{}".format(exp.channel)
+ ", check experiment server log for details",
status=500,
simple=True,
)
message = "\n".join(
(
"Initial recruitment list:\n{}".format(
"\n".join(recruitment_details["items"])
),
"Additional details:\n{}".format(recruitment_details["message"]),
)
)
return success_response(recruitment_msg=message)
|
def launch()
|
Launch the experiment.
| 3.319855
| 3.28479
| 1.010675
|
if participant is None:
return False
status = participant.status
marked_done = participant.end_time is not None
ready_for_external_submission = (
status in ("overrecruited", "working") and marked_done
)
assignment_complete = status in ("submitted", "approved")
return assignment_complete or ready_for_external_submission
|
def should_show_thanks_page_to(participant)
|
In the context of the /ad route, should the participant be shown
the thanks.html page instead of ad.html?
| 6.469396
| 6.352417
| 1.018415
|
if not ("hitId" in request.args and "assignmentId" in request.args):
raise ExperimentError("hit_assign_worker_id_not_set_in_mturk")
config = _config()
# Browser rule validation, if configured:
browser = ValidatesBrowser(config)
if not browser.is_supported(request.user_agent.string):
raise ExperimentError("browser_type_not_allowed")
hit_id = request.args["hitId"]
assignment_id = request.args["assignmentId"]
app_id = config.get("id", "unknown")
mode = config.get("mode")
debug_mode = mode == "debug"
worker_id = request.args.get("workerId")
participant = None
if worker_id is not None:
# First check if this workerId has completed the task before
# under a different assignment (v1):
already_participated = bool(
models.Participant.query.filter(
models.Participant.assignment_id != assignment_id
)
.filter(models.Participant.worker_id == worker_id)
.count()
)
if already_participated and not debug_mode:
raise ExperimentError("already_did_exp_hit")
# Next, check for participants already associated with this very
# assignment, and retain their status, if found:
try:
participant = (
models.Participant.query.filter(models.Participant.hit_id == hit_id)
.filter(models.Participant.assignment_id == assignment_id)
.filter(models.Participant.worker_id == worker_id)
.one()
)
except exc.SQLAlchemyError:
pass
recruiter_name = request.args.get("recruiter")
if recruiter_name:
recruiter = recruiters.by_name(recruiter_name)
else:
recruiter = recruiters.from_config(config)
recruiter_name = recruiter.nickname
if should_show_thanks_page_to(participant):
# They've either done, or they're from a recruiter that requires
# submission of an external form to complete their participation.
return render_template(
"thanks.html",
hitid=hit_id,
assignmentid=assignment_id,
workerid=worker_id,
external_submit_url=recruiter.external_submission_url,
mode=config.get("mode"),
app_id=app_id,
)
if participant and participant.status == "working":
# Once participants have finished the instructions, we do not allow
# them to start the task again.
raise ExperimentError("already_started_exp_mturk")
# Participant has not yet agreed to the consent. They might not
# even have accepted the HIT.
return render_template(
"ad.html",
recruiter=recruiter_name,
hitid=hit_id,
assignmentid=assignment_id,
workerid=worker_id,
mode=config.get("mode"),
app_id=app_id,
)
|
def advertisement()
|
This is the url we give for the ad for our 'external question'. The ad has
to display two different things: This page will be called from within
mechanical turk, with url arguments hitId, assignmentId, and workerId.
If the worker has not yet accepted the hit:
These arguments will have null values, we should just show an ad for
the experiment.
If the worker has accepted the hit:
These arguments will have appropriate values and we should enter the
person in the database and provide a link to the experiment popup.
| 3.634154
| 3.479635
| 1.044407
|
exp = Experiment(session)
state = {
"status": "success",
"summary": exp.log_summary(),
"completed": exp.is_complete(),
}
unfilled_nets = (
models.Network.query.filter(models.Network.full != true())
.with_entities(models.Network.id, models.Network.max_size)
.all()
)
working = (
models.Participant.query.filter_by(status="working")
.with_entities(func.count(models.Participant.id))
.scalar()
)
state["unfilled_networks"] = len(unfilled_nets)
nodes_remaining = 0
required_nodes = 0
if state["unfilled_networks"] == 0:
if working == 0 and state["completed"] is None:
state["completed"] = True
else:
for net in unfilled_nets:
node_count = (
models.Node.query.filter_by(network_id=net.id, failed=False)
.with_entities(func.count(models.Node.id))
.scalar()
)
net_size = net.max_size
required_nodes += net_size
nodes_remaining += net_size - node_count
state["nodes_remaining"] = nodes_remaining
state["required_nodes"] = required_nodes
if state["completed"] is None:
state["completed"] = False
# Regenerate a waiting room message when checking status
# to counter missed messages at the end of the waiting room
nonfailed_count = models.Participant.query.filter(
(models.Participant.status == "working")
| (models.Participant.status == "overrecruited")
| (models.Participant.status == "submitted")
| (models.Participant.status == "approved")
).count()
exp = Experiment(session)
overrecruited = exp.is_overrecruited(nonfailed_count)
if exp.quorum:
quorum = {"q": exp.quorum, "n": nonfailed_count, "overrecruited": overrecruited}
db.queue_message(WAITING_ROOM_CHANNEL, dumps(quorum))
return Response(dumps(state), status=200, mimetype="application/json")
|
def summary()
|
Summarize the participants' status codes.
| 3.106025
| 3.061731
| 1.014467
|
exp = Experiment(session)
try:
value = exp.public_properties[prop]
except KeyError:
abort(404)
return success_response(**{prop: value})
|
def experiment_property(prop)
|
Get a property of the experiment by name.
| 5.197695
| 5.050491
| 1.029146
|
config = _config()
return render_template(
"consent.html",
hit_id=request.args["hit_id"],
assignment_id=request.args["assignment_id"],
worker_id=request.args["worker_id"],
mode=config.get("mode"),
)
|
def consent()
|
Return the consent form. Here for backwards-compatibility with 2.x.
| 3.529752
| 3.553409
| 0.993342
|
exp = Experiment(session)
# get the parameter
try:
value = request.values[parameter]
except KeyError:
# if it isnt found use the default, or return an error Response
if default is not None:
return default
elif optional:
return None
else:
msg = "{} {} request, {} not specified".format(
request.url, request.method, parameter
)
return error_response(error_type=msg)
# check the parameter type
if parameter_type is None:
# if no parameter_type is required, return the parameter as is
return value
elif parameter_type == "int":
# if int is required, convert to an int
try:
value = int(value)
return value
except ValueError:
msg = "{} {} request, non-numeric {}: {}".format(
request.url, request.method, parameter, value
)
return error_response(error_type=msg)
elif parameter_type == "known_class":
# if its a known class check against the known classes
try:
value = exp.known_classes[value]
return value
except KeyError:
msg = "{} {} request, unknown_class: {} for parameter {}".format(
request.url, request.method, value, parameter
)
return error_response(error_type=msg)
elif parameter_type == "bool":
# if its a boolean, convert to a boolean
if value in ["True", "False"]:
return value == "True"
else:
msg = "{} {} request, non-boolean {}: {}".format(
request.url, request.method, parameter, value
)
return error_response(error_type=msg)
else:
msg = "/{} {} request, unknown parameter type: {} for parameter {}".format(
request.url, request.method, parameter_type, parameter
)
return error_response(error_type=msg)
|
def request_parameter(parameter, parameter_type=None, default=None, optional=False)
|
Get a parameter from a request.
parameter is the name of the parameter you are looking for
parameter_type is the type the parameter should have
default is the value the parameter takes if it has not been passed
If the parameter is not found and no default is specified,
or if the parameter is found but is of the wrong type
then a Response object is returned
| 2.255808
| 2.286678
| 0.9865
|
details = request_parameter(parameter="details", optional=True)
if details:
setattr(thing, "details", loads(details))
for p in range(5):
property_name = "property" + str(p + 1)
property = request_parameter(parameter=property_name, optional=True)
if property:
setattr(thing, property_name, property)
session.commit()
|
def assign_properties(thing)
|
Assign properties to an object.
When creating something via a post request (e.g. a node), you can pass the
properties of the object in the request. This function gets those values
from the request and fills in the relevant columns of the table.
| 3.731691
| 4.509402
| 0.827536
|
# Lock the table, triggering multiple simultaneous accesses to fail
try:
session.connection().execute("LOCK TABLE participant IN EXCLUSIVE MODE NOWAIT")
except exc.OperationalError as e:
e.orig = TransactionRollbackError()
raise e
missing = [p for p in (worker_id, hit_id, assignment_id) if p == "undefined"]
if missing:
msg = "/participant POST: required values were 'undefined'"
return error_response(error_type=msg, status=403)
fingerprint_hash = request.args.get("fingerprint_hash")
try:
fingerprint_found = models.Participant.query.filter_by(
fingerprint_hash=fingerprint_hash
).one_or_none()
except MultipleResultsFound:
fingerprint_found = True
if fingerprint_hash and fingerprint_found:
db.logger.warning("Same browser fingerprint detected.")
if mode == "live":
return error_response(
error_type="/participant POST: Same participant dectected.", status=403
)
already_participated = models.Participant.query.filter_by(
worker_id=worker_id
).one_or_none()
if already_participated:
db.logger.warning("Worker has already participated.")
return error_response(
error_type="/participant POST: worker has already participated.", status=403
)
duplicate = models.Participant.query.filter_by(
assignment_id=assignment_id, status="working"
).one_or_none()
if duplicate:
msg =
app.logger.warning(msg.format(duplicate.id))
q.enqueue(worker_function, "AssignmentReassigned", None, duplicate.id)
# Count working or beyond participants.
nonfailed_count = (
models.Participant.query.filter(
(models.Participant.status == "working")
| (models.Participant.status == "overrecruited")
| (models.Participant.status == "submitted")
| (models.Participant.status == "approved")
).count()
+ 1
)
recruiter_name = request.args.get("recruiter", "undefined")
if not recruiter_name or recruiter_name == "undefined":
recruiter = recruiters.from_config(_config())
if recruiter:
recruiter_name = recruiter.nickname
# Create the new participant.
participant = models.Participant(
recruiter_id=recruiter_name,
worker_id=worker_id,
assignment_id=assignment_id,
hit_id=hit_id,
mode=mode,
fingerprint_hash=fingerprint_hash,
)
exp = Experiment(session)
overrecruited = exp.is_overrecruited(nonfailed_count)
if overrecruited:
participant.status = "overrecruited"
session.add(participant)
session.flush() # Make sure we know the id for the new row
result = {"participant": participant.__json__()}
# Queue notification to others in waiting room
if exp.quorum:
quorum = {"q": exp.quorum, "n": nonfailed_count, "overrecruited": overrecruited}
db.queue_message(WAITING_ROOM_CHANNEL, dumps(quorum))
result["quorum"] = quorum
# return the data
return success_response(**result)
|
def create_participant(worker_id, hit_id, assignment_id, mode)
|
Create a participant.
This route is hit early on. Any nodes the participant creates will be
defined in reference to the participant object. You must specify the
worker_id, hit_id, assignment_id, and mode in the url.
| 3.629224
| 3.680634
| 0.986032
|
try:
ppt = models.Participant.query.filter_by(id=participant_id).one()
except NoResultFound:
return error_response(
error_type="/participant GET: no participant found", status=403
)
# return the data
return success_response(participant=ppt.__json__())
|
def get_participant(participant_id)
|
Get the participant with the given id.
| 5.442827
| 5.298127
| 1.027312
|
try:
net = models.Network.query.filter_by(id=network_id).one()
except NoResultFound:
return error_response(error_type="/network GET: no network found", status=403)
# return the data
return success_response(network=net.__json__())
|
def get_network(network_id)
|
Get the network with the given id.
| 4.816476
| 4.839704
| 0.995201
|
# Get the participant.
try:
ppt = models.Participant.query.filter_by(id=participant_id).one()
except NoResultFound:
return error_response(
error_type="/question POST no participant found", status=403
)
question = request_parameter(parameter="question")
response = request_parameter(parameter="response")
number = request_parameter(parameter="number", parameter_type="int")
for x in [question, response, number]:
if isinstance(x, Response):
return x
# Consult the recruiter regarding whether to accept a questionnaire
# from the participant:
rejection = ppt.recruiter.rejects_questionnaire_from(ppt)
if rejection:
return error_response(
error_type="/question POST, status = {}, reason: {}".format(
ppt.status, rejection
),
participant=ppt,
)
try:
# execute the request
models.Question(
participant=ppt, question=question, response=response, number=number
)
session.commit()
except Exception:
return error_response(error_type="/question POST server error", status=403)
# return the data
return success_response()
|
def create_question(participant_id)
|
Send a POST request to the question table.
Questions store information at the participant level, not the node
level.
You should pass the question (string) number (int) and response
(string) as arguments.
| 4.544168
| 4.342163
| 1.046522
|
exp = Experiment(session)
# get the parameters
node_type = request_parameter(
parameter="node_type", parameter_type="known_class", default=models.Node
)
connection = request_parameter(parameter="connection", default="to")
failed = request_parameter(parameter="failed", parameter_type="bool", optional=True)
for x in [node_type, connection]:
if type(x) == Response:
return x
# make sure the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(
error_type="/node/neighbors, node does not exist",
error_text="/node/{0}/neighbors, node {0} does not exist".format(node_id),
)
# get its neighbors
if failed is not None:
# This will always raise because "failed" is not a supported parameter.
# We just want to pass the exception message back in the response:
try:
node.neighbors(type=node_type, direction=connection, failed=failed)
except Exception as e:
return error_response(error_type="node.neighbors", error_text=str(e))
else:
nodes = node.neighbors(type=node_type, direction=connection)
try:
# ping the experiment
exp.node_get_request(node=node, nodes=nodes)
session.commit()
except Exception:
return error_response(error_type="exp.node_get_request")
return success_response(nodes=[n.__json__() for n in nodes])
|
def node_neighbors(node_id)
|
Send a GET request to the node table.
This calls the neighbours method of the node
making the request and returns a list of descriptions of
the nodes (even if there is only one).
Required arguments: participant_id, node_id
Optional arguments: type, connection
After getting the neighbours it also calls
exp.node_get_request()
| 4.119694
| 3.816566
| 1.079424
|
exp = Experiment(session)
# Get the participant.
try:
participant = models.Participant.query.filter_by(id=participant_id).one()
except NoResultFound:
return error_response(error_type="/node POST no participant found", status=403)
# Make sure the participant status is working
if participant.status != "working":
error_type = "/node POST, status = {}".format(participant.status)
return error_response(error_type=error_type, participant=participant)
# execute the request
network = exp.get_network_for_participant(participant=participant)
if network is None:
return Response(dumps({"status": "error"}), status=403)
node = exp.create_node(participant=participant, network=network)
assign_properties(node)
exp.add_node_to_network(node=node, network=network)
# ping the experiment
exp.node_post_request(participant=participant, node=node)
# return the data
return success_response(node=node.__json__())
|
def create_node(participant_id)
|
Send a POST request to the node table.
This makes a new node for the participant, it calls:
1. exp.get_network_for_participant
2. exp.create_node
3. exp.add_node_to_network
4. exp.node_post_request
| 3.991425
| 3.274418
| 1.218972
|
exp = Experiment(session)
# get the parameters
direction = request_parameter(parameter="direction", default="all")
failed = request_parameter(parameter="failed", parameter_type="bool", default=False)
for x in [direction, failed]:
if type(x) == Response:
return x
# execute the request
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/vectors, node does not exist")
try:
vectors = node.vectors(direction=direction, failed=failed)
exp.vector_get_request(node=node, vectors=vectors)
session.commit()
except Exception:
return error_response(
error_type="/node/vectors GET server error",
status=403,
participant=node.participant,
)
# return the data
return success_response(vectors=[v.__json__() for v in vectors])
|
def node_vectors(node_id)
|
Get the vectors of a node.
You must specify the node id in the url.
You can pass direction (incoming/outgoing/all) and failed
(True/False/all).
| 4.821931
| 4.488251
| 1.074345
|
exp = Experiment(session)
# get the parameters
info_type = request_parameter(
parameter="info_type", parameter_type="known_class", default=models.Info
)
if type(info_type) == Response:
return info_type
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(
error_type="/node/infos, node {} does not exist".format(node_id)
)
# execute the request:
infos = node.received_infos(type=info_type)
try:
# ping the experiment
exp.info_get_request(node=node, infos=infos)
session.commit()
except Exception:
return error_response(
error_type="info_get_request error",
status=403,
participant=node.participant,
)
return success_response(infos=[i.__json__() for i in infos])
|
def node_received_infos(node_id)
|
Get all the infos a node has been sent and has received.
You must specify the node id in the url.
You can also pass the info type.
| 4.919893
| 4.941094
| 0.995709
|
details = request_parameter(parameter="details", optional=True)
if details:
details = loads(details)
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/info POST, node does not exist")
db.logger.debug(
"rq: Queueing %s with for node: %s for worker_function",
"TrackingEvent",
node_id,
)
q.enqueue(
worker_function, "TrackingEvent", None, None, node_id=node_id, details=details
)
return success_response(details=details)
|
def tracking_event_post(node_id)
|
Enqueue a TrackingEvent worker for the specified Node.
| 6.437219
| 6.173476
| 1.042722
|
# get the parameters and validate them
contents = request_parameter(parameter="contents")
info_type = request_parameter(
parameter="info_type", parameter_type="known_class", default=models.Info
)
for x in [contents, info_type]:
if type(x) == Response:
return x
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/info POST, node does not exist")
exp = Experiment(session)
try:
# execute the request
info = info_type(origin=node, contents=contents)
assign_properties(info)
# ping the experiment
exp.info_post_request(node=node, info=info)
session.commit()
except Exception:
return error_response(
error_type="/info POST server error",
status=403,
participant=node.participant,
)
# return the data
return success_response(info=info.__json__())
|
def info_post(node_id)
|
Create an info.
The node id must be specified in the url.
You must pass contents as an argument.
info_type is an additional optional argument.
If info_type is a custom subclass of Info it must be
added to the known_classes of the experiment class.
| 5.193417
| 4.722436
| 1.099733
|
exp = Experiment(session)
# get the parameters
direction = request_parameter(parameter="direction", default="incoming")
status = request_parameter(parameter="status", default="all")
for x in [direction, status]:
if type(x) == Response:
return x
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/transmissions, node does not exist")
# execute the request
transmissions = node.transmissions(direction=direction, status=status)
try:
if direction in ["incoming", "all"] and status in ["pending", "all"]:
node.receive()
session.commit()
# ping the experiment
exp.transmission_get_request(node=node, transmissions=transmissions)
session.commit()
except Exception:
return error_response(
error_type="/node/transmissions GET server error",
status=403,
participant=node.participant,
)
# return the data
return success_response(transmissions=[t.__json__() for t in transmissions])
|
def node_transmissions(node_id)
|
Get all the transmissions of a node.
The node id must be specified in the url.
You can also pass direction (to/from/all) or status (all/pending/received)
as arguments.
| 4.466949
| 4.251375
| 1.050707
|
exp = Experiment(session)
what = request_parameter(parameter="what", optional=True)
to_whom = request_parameter(parameter="to_whom", optional=True)
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/transmit, node does not exist")
# create what
if what is not None:
try:
what = int(what)
what = models.Info.query.get(what)
if what is None:
return error_response(
error_type="/node/transmit POST, info does not exist",
participant=node.participant,
)
except Exception:
try:
what = exp.known_classes[what]
except KeyError:
msg = "/node/transmit POST, {} not in experiment.known_classes"
return error_response(
error_type=msg.format(what), participant=node.participant
)
# create to_whom
if to_whom is not None:
try:
to_whom = int(to_whom)
to_whom = models.Node.query.get(to_whom)
if to_whom is None:
return error_response(
error_type="/node/transmit POST, recipient Node does not exist",
participant=node.participant,
)
except Exception:
try:
to_whom = exp.known_classes[to_whom]
except KeyError:
msg = "/node/transmit POST, {} not in experiment.known_classes"
return error_response(
error_type=msg.format(to_whom), participant=node.participant
)
# execute the request
try:
transmissions = node.transmit(what=what, to_whom=to_whom)
for t in transmissions:
assign_properties(t)
session.commit()
# ping the experiment
exp.transmission_post_request(node=node, transmissions=transmissions)
session.commit()
except Exception:
return error_response(
error_type="/node/transmit POST, server error", participant=node.participant
)
# return the data
return success_response(transmissions=[t.__json__() for t in transmissions])
|
def node_transmit(node_id)
|
Transmit to another node.
The sender's node id must be specified in the url.
As with node.transmit() the key parameters are what and to_whom. However,
the values these accept are more limited than for the back end due to the
necessity of serialization.
If what and to_whom are not specified they will default to None.
Alternatively you can pass an int (e.g. '5') or a class name (e.g. 'Info' or
'Agent'). Passing an int will get that info/node, passing a class name will
pass the class. Note that if the class you are specifying is a custom class
it will need to be added to the dictionary of known_classes in your
experiment code.
You may also pass the values property1, property2, property3, property4,
property5 and details. If passed this will fill in the relevant values of
the transmissions created with the values you specified.
For example, to transmit all infos of type Meme to the node with id 10:
dallinger.post(
"/node/" + my_node_id + "/transmit",
{what: "Meme",
to_whom: 10}
);
| 2.368203
| 2.210022
| 1.071574
|
exp = Experiment(session)
# get the parameters
transformation_type = request_parameter(
parameter="transformation_type",
parameter_type="known_class",
default=models.Transformation,
)
if type(transformation_type) == Response:
return transformation_type
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(
error_type="/node/transformations, "
"node {} does not exist".format(node_id)
)
# execute the request
transformations = node.transformations(type=transformation_type)
try:
# ping the experiment
exp.transformation_get_request(node=node, transformations=transformations)
session.commit()
except Exception:
return error_response(
error_type="/node/transformations GET failed", participant=node.participant
)
# return the data
return success_response(transformations=[t.__json__() for t in transformations])
|
def transformation_get(node_id)
|
Get all the transformations of a node.
The node id must be specified in the url.
You can also pass transformation_type.
| 4.633194
| 4.728286
| 0.979889
|
participants = models.Participant.query.filter_by(
assignment_id=participant.assignment_id
).all()
duplicates = [
p for p in participants if (p.id != participant.id and p.status == "working")
]
for d in duplicates:
q.enqueue(worker_function, "AssignmentAbandoned", None, d.id)
|
def check_for_duplicate_assignments(participant)
|
Check that the assignment_id of the participant is unique.
If it isnt the older participants will be failed.
| 4.61152
| 4.471219
| 1.031379
|
participant_id = request.args.get("participant_id")
if not participant_id:
return error_response(
error_type="bad request", error_text="participantId parameter is required"
)
try:
_worker_complete(participant_id)
except KeyError:
return error_response(
error_type="ParticipantId not found: {}".format(participant_id)
)
return success_response(status="success")
|
def worker_complete()
|
Complete worker.
| 3.740963
| 3.533367
| 1.058753
|
participant_id = request.args.get("participant_id")
if not participant_id:
return error_response(
error_type="bad request", error_text="participantId parameter is required"
)
try:
_worker_failed(participant_id)
except KeyError:
return error_response(
error_type="ParticipantId not found: {}".format(participant_id)
)
return success_response(
field="status", data="success", request_type="worker failed"
)
|
def worker_failed()
|
Fail worker. Used by bots only for now.
| 3.991275
| 3.925212
| 1.01683
|
_config()
try:
db.logger.debug(
"rq: worker_function working on job id: %s", get_current_job().id
)
db.logger.debug(
"rq: Received Queue Length: %d (%s)", len(q), ", ".join(q.job_ids)
)
except AttributeError:
db.logger.debug("Debug worker_function called synchronously")
exp = Experiment(session)
key = "-----"
exp.log(
"Received an {} notification for assignment {}, participant {}".format(
event_type, assignment_id, participant_id
),
key,
)
if event_type == "TrackingEvent":
node = None
if node_id:
node = models.Node.query.get(node_id)
if not node:
participant = None
if participant_id:
# Lookup assignment_id to create notifications
participant = models.Participant.query.get(participant_id)
elif assignment_id:
participants = models.Participant.query.filter_by(
assignment_id=assignment_id
).all()
# if there are one or more participants select the most recent
if participants:
participant = max(participants, key=attrgetter("creation_time"))
participant_id = participant.id
if not participant:
exp.log(
"Warning: No participant associated with this "
"TrackingEvent notification.",
key,
)
return
nodes = participant.nodes()
if not nodes:
exp.log(
"Warning: No node associated with this "
"TrackingEvent notification.",
key,
)
return
node = max(nodes, key=attrgetter("creation_time"))
if not details:
details = {}
info = information.TrackingEvent(origin=node, details=details)
session.add(info)
session.commit()
return
runner_cls = WorkerEvent.for_name(event_type)
if not runner_cls:
exp.log("Event type {} is not supported... ignoring.".format(event_type))
return
if assignment_id is not None:
# save the notification to the notification table
notif = models.Notification(assignment_id=assignment_id, event_type=event_type)
session.add(notif)
session.commit()
# try to identify the participant
participants = models.Participant.query.filter_by(
assignment_id=assignment_id
).all()
# if there are one or more participants select the most recent
if participants:
participant = max(participants, key=attrgetter("creation_time"))
# if there are none print an error
else:
exp.log(
"Warning: No participants associated with this "
"assignment_id. Notification will not be processed.",
key,
)
return None
elif participant_id is not None:
participant = models.Participant.query.filter_by(id=participant_id).all()[0]
else:
raise ValueError(
"Error: worker_function needs either an assignment_id or a "
"participant_id, they cannot both be None"
)
participant_id = participant.id
runner = runner_cls(
participant, assignment_id, exp, session, _config(), datetime.now()
)
runner()
session.commit()
|
def worker_function(
event_type, assignment_id, participant_id, node_id=None, details=None
)
|
Process the notification.
| 3.154542
| 3.109773
| 1.014396
|
cmd = ["heroku", "apps", "--json"]
if self.team:
cmd.extend(["--team", self.team])
return json.loads(self._result(cmd))
|
def all_apps(self)
|
Capture a backup of the app.
| 4.736571
| 4.6797
| 1.012153
|
cmd = ["heroku", "apps:create", self.name, "--buildpack", "heroku/python"]
# If a team is specified, assign the app to the team.
if self.team:
cmd.extend(["--team", self.team])
self._run(cmd)
# Set HOST value
self.set_multiple(
HOST=self.url, CREATOR=self.login_name(), DALLINGER_UID=self.dallinger_uid
)
|
def bootstrap(self)
|
Creates the heroku app and local git remote. Call this once you're
in the local repo you're going to use.
| 7.137854
| 6.06716
| 1.176474
|
cmd = ["heroku", "addons:create", name, "--app", self.name]
self._run(cmd)
|
def addon(self, name)
|
Set up an addon
| 6.399279
| 6.632127
| 0.964891
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.