code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
my_plurals = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) attributes.PLURALS.update(my_plurals) attr_map = RESOURCE_ATTRIBUTE_MAP resources = resource_helper.build_resource_info(my_plurals, attr_map, constants.A10_DEVICE_INSTANCE) return resources
def get_resources(cls)
Returns external resources.
5.639682
5.431263
1.038374
vth = a10_cfg.get_vthunder_config() initialize_interfaces(vth, device_cfg, client) initialize_dns(vth, device_cfg, client) initialize_licensing(vth, device_cfg, client) initialize_sflow(vth, device_cfg, client)
def initialize_vthunder(a10_cfg, device_cfg, client)
Perform initialization of system-wide settings
2.903374
2.89914
1.00146
if db_session is not None: yield db_session else: session = get_session(url, expire_on_commit=False) try: try: yield session finally: session.commit() finally: session.close()
def magic_session(db_session=None, url=None)
Either does nothing with the session you already have or makes one that commits and closes no matter what happens
2.630589
2.544712
1.033747
for attached_interface in server.interface_list(): if attached_interface.net_id == network_id: if any(map(lambda x: x['ip_address'] in wrong_ips, attached_interface.fixed_ips)): continue return attached_interface return server.interface_attach(None, network_id, None)
def _plumb_port(self, server, network_id, wrong_ips)
Look for an existing port on the network Add one if it doesn't exist
3.509574
3.19108
1.099807
# Get an available IP and mark it as used before someone else does # If there's no IP, , log it and return an error # If we successfully get an IP, create a port with the specified MAC and device data # If port creation fails, deallocate the IP subnet = self.get_subnet(subnet_id) ip, mask, port_id = self.a10_allocate_ip_from_dhcp_range(subnet, "vlan", mac, port_id) return ip, mask, port_id
def allocate_ip_for_subnet(self, subnet_id, mac, port_id)
Allocates an IP from the specified subnet and creates a port
8.292347
7.830733
1.058949
subnet_id = subnet["id"] network_id = subnet["network_id"] iprange_result = self.get_ipallocationpool_by_subnet_id(subnet_id) ip_in_use_list = [x.ip_address for x in self.get_ipallocations_by_subnet_id(subnet_id)] range_begin, range_end = iprange_result.first_ip, iprange_result.last_ip ip_address = IPHelpers.find_unused_ip(range_begin, range_end, ip_in_use_list) if not ip_address: msg = "Cannot allocate from subnet {0}".format(subnet) LOG.error(msg) # TODO(mdurrant) - Raise neutron exception raise Exception mark_in_use = { "ip_address": ip_address, "network_id": network_id, "port_id": port_id, "subnet_id": subnet["id"] } self.create_ipallocation(mark_in_use) return ip_address, subnet["cidr"], mark_in_use["port_id"]
def a10_allocate_ip_from_dhcp_range(self, subnet, interface_id, mac, port_id)
Search for an available IP.addr from unallocated nmodels.IPAllocationPool range. If no addresses are available then an error is raised. Returns the address as a string. This search is conducted by a difference of the nmodels.IPAllocationPool set_a and the current IP allocations.
2.837852
2.804268
1.011976
pool_name = self._pool_name(context, pool_id) c.client.slb.service_group.update(pool_name, health_monitor="", health_check_disable=True)
def _dissociate(self, c, context, hm, pool_id)
Remove a pool association
6.486665
6.550919
0.990192
self._dissociate(c, context, hm, pool_id) pools = hm.get("pools", []) if not any(p for p in pools if p.get("pool_id") != pool_id): self._delete_unused(c, context, hm)
def dissociate(self, c, context, hm, pool_id)
Remove a pool association, and the healthmonitor if its the last one
3.675251
3.857892
0.952658
pools = hm.get("pools", []) for pool in pools: pool_id = pool.get("pool_id") self._dissociate(c, context, hm, pool_id) self._delete_unused(c, context, hm)
def _delete(self, c, context, hm)
Delete a healthmonitor and ALL its pool associations
3.550033
3.251579
1.091787
if self.fitness is not None: raise Exception( "You are calculating the fitness of agent {}, ".format(self.id) + "but they already have a fitness" ) said_blue = self.infos(type=Meme)[0].contents == "blue" proportion = float( max( self.network.nodes(type=RogersEnvironment)[0].infos(), key=attrgetter("id"), ).contents ) self.proportion = proportion is_blue = proportion > 0.5 if said_blue is is_blue: self.score = 1 else: self.score = 0 is_asocial = self.infos(type=LearningGene)[0].contents == "asocial" e = 2 b = 1 c = 0.3 * b baseline = c + 0.0001 self.fitness = (baseline + self.score * b - is_asocial * c) ** e
def calculate_fitness(self)
Calculcate your fitness.
7.110942
6.91045
1.029013
genes = [i for i in infos if isinstance(i, LearningGene)] for gene in genes: if ( self.network.role == "experiment" and self.generation > 0 and random.random() < 0.10 ): self.mutate(gene) else: self.replicate(gene)
def update(self, infos)
Process received infos.
6.241242
6.051014
1.031437
if not self.networks(): super(Bartlett1932, self).setup() for net in self.networks(): self.models.WarOfTheGhostsSource(network=net)
def setup(self)
Setup the networks. Setup only does stuff if there are no networks, this is so it only runs once at the start of the experiment. It first calls the same function in the super (see experiments.py in dallinger). Then it adds a source to each network.
22.965563
10.50423
2.186316
network.add_node(node) parents = node.neighbors(direction="from") if len(parents): parent = parents[0] parent.transmit() node.receive()
def add_node_to_network(self, node, network)
Add node to the chain and receive transmissions.
7.810977
6.533191
1.195584
if self.networks(full=False): self.recruiter.recruit(n=1) else: self.recruiter.close_recruitment()
def recruit(self)
Recruit one participant at a time until all networks are full.
7.25594
4.818219
1.505938
try: logger.info("Entering participate method") ready = WebDriverWait(self.driver, 10).until( EC.element_to_be_clickable((By.ID, "finish-reading")) ) stimulus = self.driver.find_element_by_id("stimulus") story = stimulus.find_element_by_id("story") story_text = story.text logger.info("Stimulus text:") logger.info(story_text) ready.click() submit = WebDriverWait(self.driver, 10).until( EC.element_to_be_clickable((By.ID, "submit-response")) ) textarea = WebDriverWait(self.driver, 10).until( EC.element_to_be_clickable((By.ID, "reproduction")) ) textarea.clear() text = self.transform_text(story_text) logger.info("Transformed text:") logger.info(text) textarea.send_keys(text) submit.click() return True except TimeoutException: return False
def participate(self)
Finish reading and send text
2.310122
2.184143
1.057679
info = self._info_type()(origin=self, contents=self._contents()) return info
def create_information(self)
Create new infos on demand.
20.100086
16.768061
1.198713
if not self.networks(): super(IteratedDrawing, self).setup() for net in self.networks(): self.models.DrawingSource(network=net)
def setup(self)
Setup the networks. Setup only does stuff if there are no networks, this is so it only runs once at the start of the experiment. It first calls the same function in the super (see experiments.py in dallinger). Then it adds a source to each network.
21.231936
8.889158
2.38852
super(RogersExperiment, self).setup() for net in random.sample(self.networks(role="experiment"), self.catch_repeats): net.role = "catch" for net in self.networks(): source = self.models.RogersSource(network=net) source.create_information() net.max_size = net.max_size + 1 # make room for environment node. env = self.models.RogersEnvironment(network=net) env.proportion = self.color_proportion_for_network(net) env.create_information()
def setup(self)
First time setup.
7.249377
6.958251
1.041839
return DiscreteGenerational( generations=self.generations, generation_size=self.generation_size, initial_source=True, )
def create_network(self)
Create a new network.
12.228286
10.758287
1.136639
return self.models.RogersAgent(network=network, participant=participant)
def create_node(self, network, participant)
Make a new node for participants.
18.304365
15.122136
1.210435
num_approved = len(Participant.query.filter_by(status="approved").all()) current_generation = participant.nodes()[0].generation if ( num_approved % self.generation_size == 0 and (current_generation % 10 + 1) == 0 ): for e in self.models.RogersEnvironment.query.all(): e.step()
def submission_successful(self, participant)
Run when a participant submits successfully.
7.413736
6.83863
1.084097
num_approved = len(Participant.query.filter_by(status="approved").all()) end_of_generation = num_approved % self.generation_size == 0 complete = num_approved >= (self.generations * self.generation_size) if complete: self.log("All networks full: closing recruitment", "-----") self.recruiter.close_recruitment() elif end_of_generation: self.log("generation finished, recruiting another") self.recruiter.recruit(n=self.generation_size)
def recruit(self)
Recruit participants if necessary.
5.273668
4.888122
1.078874
scores = [ n.score for n in participant.nodes() if n.network.role == "experiment" ] average = float(sum(scores)) / float(len(scores)) bonus = round(max(0.0, ((average - 0.5) * 2)) * self.bonus_payment, 2) return bonus
def bonus(self, participant)
Calculate a participants bonus.
5.536648
5.207116
1.063285
if self.catch_repeats == 0: return True scores = [n.score for n in participant.nodes() if n.network.role == "catch"] avg = float(sum(scores)) / float(len(scores)) return avg >= self.min_acceptable_performance
def attention_check(self, participant=None)
Check a participant paid attention.
8.164359
8.138578
1.003168
total_size = 0 exclusions = exclusion_policy() for dirpath, dirnames, filenames in os.walk(root, topdown=True): current_exclusions = exclusions(dirpath, os.listdir(dirpath)) # Modifying dirnames in-place will prune the subsequent files and # directories visited by os.walk. This is only possible when # topdown = True dirnames[:] = [d for d in dirnames if d not in current_exclusions] legit_files = [f for f in filenames if f not in current_exclusions] for f in legit_files: fp = os.path.join(dirpath, f) total_size += os.path.getsize(fp) return total_size
def size_on_copy(root=".")
Return the size of the experiment directory in bytes, excluding any files and directories which would be excluded on copy.
3.140053
2.932696
1.070705
app_id = config.get("id") dst = os.path.join(tempfile.mkdtemp(), app_id) # Copy local experiment files, minus some shutil.copytree(os.getcwd(), dst, ignore=exclusion_policy()) # Export the loaded configuration config.write(filter_sensitive=True, directory=dst) # Save the experiment id with open(os.path.join(dst, "experiment_id.txt"), "w") as file: file.write(app_id) # Copy Dallinger files dallinger_root = dallinger_package_path() ensure_directory(os.path.join(dst, "static", "scripts")) ensure_directory(os.path.join(dst, "static", "css")) frontend_files = [ os.path.join("static", "css", "dallinger.css"), os.path.join("static", "scripts", "dallinger2.js"), os.path.join("static", "scripts", "reqwest.min.js"), os.path.join("static", "scripts", "require.js"), os.path.join("static", "scripts", "reconnecting-websocket.js"), os.path.join("static", "scripts", "spin.min.js"), os.path.join("static", "scripts", "tracker.js"), os.path.join("static", "scripts", "store+json2.min.js"), os.path.join("templates", "error.html"), os.path.join("templates", "error-complete.html"), os.path.join("templates", "launch.html"), os.path.join("templates", "complete.html"), os.path.join("templates", "questionnaire.html"), os.path.join("templates", "thanks.html"), os.path.join("templates", "waiting.html"), os.path.join("static", "robots.txt"), ] frontend_dirs = [os.path.join("templates", "base")] for filename in frontend_files: src = os.path.join(dallinger_root, "frontend", filename) dst_filepath = os.path.join(dst, filename) if not os.path.exists(dst_filepath): shutil.copy(src, dst_filepath) for filename in frontend_dirs: src = os.path.join(dallinger_root, "frontend", filename) dst_filepath = os.path.join(dst, filename) if not os.path.exists(dst_filepath): shutil.copytree(src, dst_filepath) # Copy Heroku files heroku_files = ["Procfile", "runtime.txt"] for filename in heroku_files: src = os.path.join(dallinger_root, "heroku", filename) shutil.copy(src, os.path.join(dst, filename)) if not config.get("clock_on"): # If the clock process has been disabled, overwrite the Procfile: src = os.path.join(dallinger_root, "heroku", "Procfile_no_clock") shutil.copy(src, os.path.join(dst, "Procfile")) return dst
def assemble_experiment_temp_dir(config)
Create a temp directory from which to run an experiment. The new directory will include: - Copies of custom experiment files which don't match the exclusion policy - Templates and static resources from Dallinger - An export of the loaded configuration - Heroku-specific files (Procile, runtime.txt) from Dallinger Assumes the experiment root directory is the current working directory. Returns the absolute path of the new directory.
2.285271
2.14485
1.065469
# Verify that the Postgres server is running. try: db.check_connection() except Exception: log("There was a problem connecting to the Postgres database!") raise # Check that the demo-specific requirements are satisfied. try: with open("requirements.txt", "r") as f: dependencies = [r for r in f.readlines() if r[:3] != "-e "] except (OSError, IOError): dependencies = [] pkg_resources.require(dependencies) # Generate a unique id for this experiment. from dallinger.experiment import Experiment generated_uid = public_id = Experiment.make_uuid(app) # If the user provided an app name, use it everywhere that's user-facing. if app: public_id = str(app) log("Experiment id is " + public_id + "") # Load and update the config config = get_config() if not config.ready: config.load() # if exp_config: config.extend(exp_config) config.extend({"id": six.text_type(generated_uid)}) temp_dir = assemble_experiment_temp_dir(config) log("Deployment temp directory: {}".format(temp_dir), chevrons=False) # Zip up the temporary directory and place it in the cwd. if not debug: log("Freezing the experiment package...") shutil.make_archive( os.path.join(os.getcwd(), "snapshots", public_id + "-code"), "zip", temp_dir ) return (public_id, temp_dir)
def setup_experiment(log, debug=True, verbose=False, app=None, exp_config=None)
Checks the experiment's python dependencies, then prepares a temp directory with files merged from the custom experiment and Dallinger. The resulting directory includes all the files necessary to deploy to Heroku.
5.121492
4.859078
1.054005
self.configure() self.setup() self.update_dir() db.init_db(drop_all=True) self.out.log("Starting up the server...") config = get_config() with HerokuLocalWrapper(config, self.out, verbose=self.verbose) as wrapper: try: self.execute(wrapper) except KeyboardInterrupt: pass finally: os.chdir(self.original_dir) self.cleanup()
def run(self)
Set up the environment, get a HerokuLocalWrapper instance, and pass it to the concrete class's execute() method.
5.905589
4.216519
1.400584
for regex, handler in self.dispatch.items(): match = re.search(regex, message) if match: handler = getattr(self, handler) return handler(match)
def notify(self, message)
Callback function which checks lines of output, tries to match against regex defined in subclass's "dispatch" dict, and passes through to a handler on match.
3.860847
2.628002
1.469118
self.out.log("new recruitment request!") url = match.group(1) if self.proxy_port is not None: self.out.log("Using proxy port {}".format(self.proxy_port)) url = url.replace(str(get_config().get("base_port")), self.proxy_port) new_webbrowser_profile().open(url, new=1, autoraise=True)
def new_recruit(self, match)
Dispatched to by notify(). If a recruitment request has been issued, open a browser window for the a new participant (in this case the person doing local debugging).
5.325166
4.607111
1.155858
if self.status_thread is None: self.status_thread = threading.Thread(target=self.check_status) self.status_thread.start()
def recruitment_closed(self, match)
Recruitment is closed. Start a thread to check the experiment summary.
3.176677
2.349698
1.351951
self.out.log("Recruitment is complete. Waiting for experiment completion...") base_url = get_base_url() status_url = base_url + "/summary" while not self.complete: time.sleep(10) try: resp = requests.get(status_url) exp_data = resp.json() except (ValueError, requests.exceptions.RequestException): self.out.error("Error fetching experiment status.") else: self.out.log("Experiment summary: {}".format(exp_data)) if exp_data.get("completed", False): self.out.log("Experiment completed, all nodes filled.") self.complete = True self.heroku.stop()
def check_status(self)
Check the output of the summary route until the experiment is complete, then we can stop monitoring Heroku subprocess output.
4.183997
3.621221
1.155411
if self.complete: return HerokuLocalWrapper.MONITOR_STOP return super(DebugDeployment, self).notify(message)
def notify(self, message)
Monitor output from heroku process. This overrides the base class's `notify` to make sure that we stop if the status-monitoring thread has determined that the experiment is complete.
33.265018
24.244701
1.372053
db.init_db(drop_all=True) self.out.log( "Ingesting dataset from {}...".format(os.path.basename(self.zip_path)) ) data.ingest_zip(self.zip_path) base_url = get_base_url() self.out.log("Server is running on {}. Press Ctrl+C to exit.".format(base_url)) if self.exp_config.get("replay"): self.out.log("Launching the experiment...") time.sleep(4) _handle_launch_data("{}/launch".format(base_url), error=self.out.error) heroku.monitor(listener=self.notify) # Just run until interrupted: while self.keep_running(): time.sleep(1)
def execute(self, heroku)
Start the server, load the zip file into the database, then loop until terminated with <control>-c.
6.883092
6.013724
1.144564
self.out.log("replay ready!") url = match.group(1) new_webbrowser_profile().open(url, new=1, autoraise=True)
def start_replay(self, match)
Dispatched to by notify(). If a recruitment request has been issued, open a browser window for the a new participant (in this case the person doing local debugging).
10.832315
9.714282
1.115092
app = util.import_app("dallinger.experiment_server.sockets:app") if self.options.get("mode") == "debug": app.debug = True return app
def load(self)
Return our application to be run.
11.227514
8.529117
1.316375
animal = json.loads(self.contents) for prop, prop_range in self.properties.items(): range = prop_range[1] - prop_range[0] jittered = animal[prop] + random.gauss(0, 0.1 * range) animal[prop] = max(min(jittered, prop_range[1]), prop_range[0]) return json.dumps(animal)
def perturbed_contents(self)
Perturb the given animal.
3.085375
2.714318
1.136704
other_nodes = [n for n in self.nodes() if n.id != node.id] if len(self.nodes()) > 11: parents = [max(other_nodes, key=attrgetter("creation_time"))] else: parents = [n for n in other_nodes if isinstance(n, Source)] for parent in parents: parent.connect(whom=node)
def add_node(self, node)
Add an agent, connecting it to the previous node.
4.520318
4.226645
1.069481
other_nodes = [n for n in self.nodes() if n.id != node.id] for n in other_nodes: if isinstance(n, Source): node.connect(direction="from", whom=n) else: node.connect(direction="both", whom=n)
def add_node(self, node)
Add a node, connecting it to everyone and back.
4.531115
4.050518
1.118651
nodes = [n for n in self.nodes() if not isinstance(n, Source)] source.connect(whom=nodes)
def add_source(self, source)
Connect the source to all existing other nodes.
9.319481
6.252644
1.490487
nodes = self.nodes() if len(nodes) > 1: first_node = min(nodes, key=attrgetter("creation_time")) first_node.connect(direction="both", whom=node)
def add_node(self, node)
Add a node and connect it to the center.
7.149094
6.319424
1.131289
num_agents = len(self.nodes(type=Agent)) curr_generation = int((num_agents - 1) / float(self.generation_size)) node.generation = curr_generation if curr_generation == 0 and self.initial_source: parent = self._select_oldest_source() else: parent = self._select_fit_node_from_generation( node_type=type(node), generation=curr_generation - 1 ) if parent is not None: parent.connect(whom=node) parent.transmit(to_whom=node)
def add_node(self, node)
Link to the agent from a parent based on the parent's fitness
5.325963
4.758675
1.119211
for predecessor in self._most_recent_predecessors_to(node): predecessor.connect(whom=node)
def add_node(self, node)
Add a node, connecting it to all the active nodes.
12.721079
12.506533
1.017155
class_ = getattr(networks, self.network_class) return class_(max_size=self.quorum)
def create_network(self)
Create a new network by reading the configuration file.
13.409731
10.446974
1.2836
for agent in node.neighbors(): node.transmit(what=info, to_whom=agent)
def info_post_request(self, node, info)
Run when a request to create an info is complete.
14.238576
13.575434
1.048849
def new_func(*args, **kwargs): resp = make_response(func(*args, **kwargs)) resp.cache_control.no_cache = True return resp return update_wrapper(new_func, func)
def nocache(func)
Stop caching for pages wrapped in nocache decorator.
2.636256
2.391006
1.102572
exclusion_rules = [ r.strip() for r in self.config.get("browser_exclude_rule", "").split(",") if r.strip() ] return exclusion_rules
def exclusions(self)
Return list of browser exclusion rules defined in the Configuration.
5.010116
3.282343
1.526384
user_agent_obj = user_agents.parse(user_agent_string) browser_ok = True for rule in self.exclusions: if rule in ["mobile", "tablet", "touchcapable", "pc", "bot"]: if ( (rule == "mobile" and user_agent_obj.is_mobile) or (rule == "tablet" and user_agent_obj.is_tablet) or (rule == "touchcapable" and user_agent_obj.is_touch_capable) or (rule == "pc" and user_agent_obj.is_pc) or (rule == "bot" and user_agent_obj.is_bot) ): browser_ok = False elif rule in user_agent_string: browser_ok = False return browser_ok
def is_supported(self, user_agent_string)
Check user agent against configured exclusions.
1.985657
1.884445
1.053709
if not self.networks(): super(FunctionLearning, self).setup() for net in self.networks(): self.models.SinusoidalFunctionSource(network=net)
def setup(self)
Setup does stuff only if there are no networks. This is so it only runs once at the start of the experiment. It first calls the same function in the super (see experiments.py in dallinger). Then it adds a source to each network.
19.101791
9.787007
1.95175
return self.models.MCMCPAgent(network=network, participant=participant)
def create_node(self, network, participant)
Create a node for a participant.
25.186207
23.251419
1.083212
infos = participant.infos() return len([info for info in infos if info.chosen]) * 2 == len(infos)
def data_check(self, participant)
Make sure each trial contains exactly one chosen info.
11.649615
6.163123
1.890213
try: while True: left = WebDriverWait(self.driver, 10).until( EC.element_to_be_clickable((By.ID, "left_button")) ) right = WebDriverWait(self.driver, 10).until( EC.element_to_be_clickable((By.ID, "right_button")) ) random.choice((left, right)).click() time.sleep(1.0) except TimeoutException: return False
def participate(self)
Finish reading and send text
2.281678
2.19708
1.038505
# Check locally first cwd = os.getcwd() data_filename = "{}-data.zip".format(app_id) path_to_data = os.path.join(cwd, "data", data_filename) if os.path.exists(path_to_data): try: Data(path_to_data) except IOError: from dallinger import logger logger.exception( "Error reading local data file {}, checking remote.".format( path_to_data ) ) else: return path_to_data # Get remote file instead path_to_data = os.path.join(tempfile.mkdtemp(), data_filename) buckets = [user_s3_bucket(), dallinger_s3_bucket()] for bucket in buckets: try: bucket.download_file(data_filename, path_to_data) except botocore.exceptions.ClientError: pass else: return path_to_data
def find_experiment_export(app_id)
Attempt to find a zipped export of an experiment with the ID provided and return its path. Returns None if not found. Search order: 1. local "data" subdirectory 2. user S3 bucket 3. Dallinger S3 bucket
3.004743
2.565182
1.171357
path_to_data = find_experiment_export(app_id) if path_to_data is None: raise IOError("Dataset {} could not be found.".format(app_id)) return Data(path_to_data)
def load(app_id)
Load the data from wherever it is found.
4.849451
4.498373
1.078046
tmp_dir = tempfile.mkdtemp() current_dir = os.getcwd() os.chdir(tmp_dir) FNULL = open(os.devnull, "w") heroku_app = HerokuApp(dallinger_uid=id, output=FNULL) heroku_app.backup_capture() heroku_app.backup_download() for filename in os.listdir(tmp_dir): if filename.startswith("latest.dump"): os.rename(filename, "database.dump") os.chdir(current_dir) return os.path.join(tmp_dir, "database.dump")
def dump_database(id)
Dump the database to a temporary directory.
3.534269
3.320498
1.064379
filename = dump_database(id) key = "{}.dump".format(id) bucket = user_s3_bucket() bucket.upload_file(filename, key) return _generate_s3_url(bucket, key)
def backup(id)
Backup the database to S3.
5.690653
4.706806
1.209026
bucket = registration_s3_bucket() key = registration_key(id) obj = bucket.Object(key) obj.put(Body=url or "missing") return _generate_s3_url(bucket, key)
def register(id, url=None)
Register a UUID key in the global S3 bucket.
5.132969
4.668585
1.09947
bucket = registration_s3_bucket() key = registration_key(id) found_keys = set(obj.key for obj in bucket.objects.filter(Prefix=key)) return key in found_keys
def is_registered(id)
Check if a UUID is already registered
4.444266
4.177183
1.063939
heroku_app = HerokuApp(dallinger_uid=id) try: subprocess.call(["dropdb", heroku_app.name]) except Exception: pass heroku_app.pg_pull()
def copy_heroku_to_local(id)
Copy a Heroku database locally.
7.921005
7.852973
1.008663
if "postgresql://" in dsn or "postgres://" in dsn: conn = psycopg2.connect(dsn=dsn) else: conn = psycopg2.connect(database=dsn, user="dallinger") cur = conn.cursor() for table in table_names: csv_path = os.path.join(path, "{}.csv".format(table)) with open(csv_path, "w") as f: sql = "COPY {} TO STDOUT WITH CSV HEADER".format(table) cur.copy_expert(sql, f) conn.close() if scrub_pii: _scrub_participant_table(path)
def copy_db_to_csv(dsn, path, scrub_pii=False)
Copy a local database to a set of CSV files.
2.293379
2.303096
0.995781
path = os.path.join(path_to_data, "participant.csv") with open_for_csv(path, "r") as input, open("{}.0".format(path), "w") as output: reader = csv.reader(input) writer = csv.writer(output) headers = next(reader) writer.writerow(headers) for i, row in enumerate(reader): row[headers.index("worker_id")] = row[headers.index("id")] row[headers.index("unique_id")] = "{}:{}".format( row[headers.index("id")], row[headers.index("assignment_id")] ) writer.writerow(row) os.rename("{}.0".format(path), path)
def _scrub_participant_table(path_to_data)
Scrub PII from the given participant table.
2.405528
2.361097
1.018818
print("Preparing to export the data...") if local: db_uri = db.db_url else: db_uri = HerokuApp(id).db_uri # Create the data package if it doesn't already exist. subdata_path = os.path.join("data", id, "data") try: os.makedirs(subdata_path) except OSError as e: if e.errno != errno.EEXIST or not os.path.isdir(subdata_path): raise # Copy in the data. copy_db_to_csv(db_uri, subdata_path, scrub_pii=scrub_pii) # Copy the experiment code into a code/ subdirectory. try: shutil.copyfile( os.path.join("snapshots", id + "-code.zip"), os.path.join("data", id, id + "-code.zip"), ) except Exception: pass # Copy in the DATA readme. # open(os.path.join(id, "README.txt"), "a").close() # Save the experiment id. with open(os.path.join("data", id, "experiment_id.md"), "a+") as file: file.write(id) # Zip data src = os.path.join("data", id) dst = os.path.join("data", id + "-data.zip") archive_data(id, src, dst) cwd = os.getcwd() data_filename = "{}-data.zip".format(id) path_to_data = os.path.join(cwd, "data", data_filename) # Backup data on S3 unless run locally if not local: bucket = user_s3_bucket() bucket.upload_file(path_to_data, data_filename) url = _generate_s3_url(bucket, data_filename) # Register experiment UUID with dallinger register(id, url) return path_to_data
def export(id, local=False, scrub_pii=False)
Export data from an experiment.
3.492504
3.44987
1.012358
import_order = [ "network", "participant", "node", "info", "notification", "question", "transformation", "vector", "transmission", ] with ZipFile(path, "r") as archive: filenames = archive.namelist() for name in import_order: filename = [f for f in filenames if name in f][0] model_name = name.capitalize() model = getattr(models, model_name) file = archive.open(filename) if six.PY3: file = io.TextIOWrapper(file, encoding="utf8", newline="") ingest_to_model(file, model, engine)
def ingest_zip(path, engine=None)
Given a path to a zip file created with `export()`, recreate the database with the data stored in the included .csv files.
4.092053
4.035753
1.01395
if engine is None: engine = db.engine reader = csv.reader(file) columns = tuple('"{}"'.format(n) for n in next(reader)) postgres_copy.copy_from( file, model, engine, columns=columns, format="csv", HEADER=False ) fix_autoincrement(model.__table__.name)
def ingest_to_model(file, model, engine=None)
Load data from a CSV file handle into storage for a SQLAlchemy model class.
5.425543
5.606939
0.967648
exists = True try: s3.meta.client.head_bucket(Bucket=name) except botocore.exceptions.ClientError as e: error_code = int(e.response["Error"]["Code"]) if error_code == 404: exists = False else: raise if not exists: s3.create_bucket(Bucket=name) return s3.Bucket(name)
def _get_or_create_s3_bucket(s3, name)
Get an S3 bucket resource after making sure it exists
1.630086
1.561604
1.043854
s3 = _s3_resource() if not canonical_user_id: canonical_user_id = _get_canonical_aws_user_id(s3) s3_bucket_name = "dallinger-{}".format( hashlib.sha256(canonical_user_id.encode("utf8")).hexdigest()[0:8] ) return _get_or_create_s3_bucket(s3, s3_bucket_name)
def user_s3_bucket(canonical_user_id=None)
Get the user's S3 bucket.
2.598147
2.584425
1.00531
config = get_config() if not config.ready: config.load() region = "us-east-1" if dallinger_region else config.get("aws_region") return boto3.resource( "s3", region_name=region, aws_access_key_id=config.get("aws_access_key_id"), aws_secret_access_key=config.get("aws_secret_access_key"), )
def _s3_resource(dallinger_region=False)
A boto3 S3 resource using the AWS keys in the config.
1.91813
1.838649
1.043228
images = ["owl.png"] # We're selecting from a list of only one item here, but it's a useful # technique to demonstrate: image = random.choice(images) image_path = os.path.join("static", "stimuli", image) uri_encoded_image = "data:image/png;base64," + base64.b64encode( open(image_path, "rb").read() ) return json.dumps({"image": uri_encoded_image, "sketch": ""})
def _contents(self)
Define the contents of new Infos. transmit() -> _what() -> create_information() -> _contents().
6.073249
6.254294
0.971053
parents = from_whom parent_fs = [p.fitness for p in parents] parent_probs = [(f / (1.0 * sum(parent_fs))) for f in parent_fs] rnd = random.random() temp = 0.0 for i, probability in enumerate(parent_probs): temp += probability if temp > rnd: parent = parents[i] break parent.transmit(what=what, to_whom=to_whom)
def transmit_by_fitness(from_whom, to_whom=None, what=None)
Choose a parent with probability proportional to their fitness.
3.357937
3.207839
1.046791
email_settings = EmailConfig(config) if config.get("mode") == "debug": return DebugMessenger(email_settings) problems = email_settings.validate() if problems: logger.info(problems + " Will log errors instead of emailing them.") return DebugMessenger(email_settings) return EmailingMessenger(email_settings)
def get_messenger(config)
Return an appropriate Messenger. If we're in debug mode, or email settings aren't set, return a debug version which logs the message instead of attempting to send a real email.
5.345592
4.647294
1.150259
missing = [] for k, v in self._map.items(): attr = getattr(self, k, False) if not attr or attr == CONFIG_PLACEHOLDER: missing.append(v) if missing: return "Missing or invalid config values: {}".format( ", ".join(sorted(missing)) )
def validate(self)
Could this config be used to send a real email?
4.542778
3.947673
1.150748
try: yield local_session if commit: local_session.commit() logger.debug("DB session auto-committed as requested") except Exception as e: # We log the exception before re-raising it, in case the rollback also # fails logger.exception("Exception during scoped worker transaction, " "rolling back.") # This rollback is potentially redundant with the remove call below, # depending on how the scoped session is configured, but we'll be # explicit here. local_session.rollback() raise e finally: local_session.remove() logger.debug("Session complete, db session closed")
def sessions_scope(local_session, commit=False)
Provide a transactional scope around a series of operations.
6.154405
6.022959
1.021824
@wraps(func) def wrapper(*args, **kwargs): with sessions_scope(session): # The session used in func comes from the funcs globals, but # it will be a proxied thread local var from the session # registry, and will therefore be identical to the one returned # by the context manager above. logger.debug("Running worker %s in scoped DB session", func.__name__) return func(*args, **kwargs) return wrapper
def scoped_session_decorator(func)
Manage contexts and add debugging to db sessions.
9.373376
8.51843
1.100364
try: if drop_all: Base.metadata.drop_all(bind=bind) Base.metadata.create_all(bind=bind) except OperationalError as err: msg = 'password authentication failed for user "dallinger"' if msg in err.message: sys.stderr.write(db_user_warning) raise return session
def init_db(drop_all=False, bind=engine)
Initialize the database, optionally dropping existing tables.
3.877883
3.907566
0.992404
@wraps(func) def wrapper(*args, **kw): attempts = 100 session.remove() while attempts > 0: try: session.connection( execution_options={"isolation_level": "SERIALIZABLE"} ) result = func(*args, **kw) session.commit() return result except OperationalError as exc: session.rollback() if isinstance(exc.orig, TransactionRollbackError): if attempts > 0: attempts -= 1 else: raise Exception( "Could not commit serialized transaction " "after 100 attempts." ) else: raise finally: session.remove() time.sleep(random.expovariate(0.5)) return wrapper
def serialized(func)
Run a function within a db transaction using SERIALIZABLE isolation. With this isolation level, committing will fail if this transaction read data that was since modified by another transaction. So we need to handle that case and retry the transaction.
2.851803
2.690357
1.060009
lag_tolerance_secs = float(request.args.get("tolerance", 0.1)) client = Client(ws, lag_tolerance_secs=lag_tolerance_secs) client.subscribe(request.args.get("channel")) gevent.spawn(client.heartbeat) client.publish()
def chat(ws)
Relay chat messages to and from clients.
5.063245
4.77948
1.059371
self.clients.append(client) log("Subscribed client {} to channel {}".format(client, self.name))
def subscribe(self, client)
Subscribe a client to the channel.
5.053908
4.116487
1.227724
if client in self.clients: self.clients.remove(client) log("Unsubscribed client {} from channel {}".format(client, self.name))
def unsubscribe(self, client)
Unsubscribe a client from the channel.
3.498339
2.957947
1.182692
pubsub = redis_conn.pubsub() name = self.name if isinstance(name, six.text_type): name = name.encode("utf-8") try: pubsub.subscribe([name]) except ConnectionError: app.logger.exception("Could not connect to redis.") log("Listening on channel {}".format(self.name)) for message in pubsub.listen(): data = message.get("data") if message["type"] == "message" and data != "None": channel = message["channel"] payload = "{}:{}".format(channel.decode("utf-8"), data.decode("utf-8")) for client in self.clients: gevent.spawn(client.send, payload) gevent.sleep(0.001)
def listen(self)
Relay messages from a redis pubsub to all subscribed clients. This is run continuously in a separate greenlet.
3.167812
2.863132
1.106415
if channel_name not in self.channels: self.channels[channel_name] = channel = Channel(channel_name) channel.start() self.channels[channel_name].subscribe(client)
def subscribe(self, client, channel_name)
Register a new client to receive messages on a channel.
2.52604
2.417929
1.044712
for channel in self.channels.values(): channel.unsubscribe(client)
def unsubscribe(self, client)
Unsubscribe a client from all channels.
4.952034
3.795353
1.304762
if isinstance(message, bytes): message = message.decode("utf8") with self.send_lock: try: self.ws.send(message) except socket.error: chat_backend.unsubscribe(self)
def send(self, message)
Send a single message to the websocket.
5.0167
4.55906
1.10038
while not self.ws.closed: gevent.sleep(HEARTBEAT_DELAY) gevent.spawn(self.send, "ping")
def heartbeat(self)
Send a ping to the websocket periodically. This is needed so that Heroku won't close the connection from inactivity.
5.321217
5.201578
1.023
while not self.ws.closed: # Sleep to prevent *constant* context-switches. gevent.sleep(self.lag_tolerance_secs) message = self.ws.receive() if message is not None: channel_name, data = message.split(":", 1) redis_conn.publish(channel_name, data)
def publish(self)
Relay messages from client to redis.
7.283637
5.915899
1.231197
from dallinger.config import get_config config = get_config() if not config.ready: config.load() driver_url = config.get("webdriver_url", None) driver_type = config.get("webdriver_type") driver = None if driver_url: capabilities = CAPABILITY_MAP.get(driver_type.lower()) if capabilities is None: raise ValueError( "Unsupported remote webdriver_type: {}".format(driver_type) ) driver = webdriver.Remote( desired_capabilities=capabilities, command_executor=driver_url ) else: driver_class = DRIVER_MAP.get(driver_type.lower()) if driver_class is not None: driver = driver_class() if driver is None: raise ValueError("Unsupported webdriver_type: {}".format(driver_type)) driver.set_window_size(1024, 768) logger.info("Created {} webdriver.".format(driver_type)) return driver
def driver(self)
Returns a Selenium WebDriver instance of the type requested in the configuration.
2.490016
2.334808
1.066476
try: self.driver.get(self.URL) logger.info("Loaded ad page.") begin = WebDriverWait(self.driver, 10).until( EC.element_to_be_clickable((By.CLASS_NAME, "btn-primary")) ) begin.click() logger.info("Clicked begin experiment button.") WebDriverWait(self.driver, 10).until(lambda d: len(d.window_handles) == 2) self.driver.switch_to_window(self.driver.window_handles[-1]) self.driver.set_window_size(1024, 768) logger.info("Switched to experiment popup.") consent = WebDriverWait(self.driver, 10).until( EC.element_to_be_clickable((By.ID, "consent")) ) consent.click() logger.info("Clicked consent button.") participate = WebDriverWait(self.driver, 10).until( EC.element_to_be_clickable((By.CLASS_NAME, "btn-success")) ) participate.click() logger.info("Clicked start button.") return True except TimeoutException: logger.error("Error during experiment sign up.") return False
def sign_up(self)
Accept HIT, give consent and start experiment. This uses Selenium to click through buttons on the ad, consent, and instruction pages.
2.025897
1.879583
1.077843
logger.info("Complete questionnaire.") difficulty = self.driver.find_element_by_id("difficulty") difficulty.value = "4" engagement = self.driver.find_element_by_id("engagement") engagement.value = "3"
def complete_questionnaire(self)
Complete the standard debriefing form. Answers the questions in the base questionnaire.
4.0375
3.836317
1.052442
try: logger.info("Bot player signing off.") feedback = WebDriverWait(self.driver, 20).until( EC.presence_of_element_located((By.ID, "submit-questionnaire")) ) self.complete_questionnaire() feedback.click() logger.info("Clicked submit questionnaire button.") self.driver.switch_to_window(self.driver.window_handles[0]) self.driver.set_window_size(1024, 768) logger.info("Switched back to initial window.") return True except TimeoutException: logger.error("Error during experiment sign off.") return False
def sign_off(self)
Submit questionnaire and finish. This uses Selenium to click the submit button on the questionnaire and return to the original window.
3.523214
3.080219
1.143819
url = self.driver.current_url p = urllib.parse.urlparse(url) complete_url = "%s://%s/%s?participant_id=%s" complete_url = complete_url % (p.scheme, p.netloc, status, self.participant_id) self.driver.get(complete_url) logger.info("Forced call to %s: %s" % (status, complete_url))
def complete_experiment(self, status)
Sends worker status ('worker_complete' or 'worker_failed') to the experiment server.
3.251415
3.230241
1.006555
try: self.sign_up() self.participate() if self.sign_off(): self.complete_experiment("worker_complete") else: self.complete_experiment("worker_failed") finally: self.driver.quit()
def run_experiment(self)
Sign up, run the ``participate`` method, then sign off and close the driver.
5.394698
3.578844
1.507385
self.sign_up() self.participate() if self.sign_off(): self.complete_experiment("worker_complete") else: self.complete_experiment("worker_failed")
def run_experiment(self)
Runs the phases of interacting with the experiment including signup, participation, signoff, and recording completion.
6.981287
4.057855
1.720438
self.log("Bot player signing up.") self.subscribe_to_quorum_channel() while True: url = ( "{host}/participant/{self.worker_id}/" "{self.hit_id}/{self.assignment_id}/" "debug?fingerprint_hash={hash}&recruiter=bots:{bot_name}".format( host=self.host, self=self, hash=uuid.uuid4().hex, bot_name=self.__class__.__name__, ) ) try: result = requests.post(url) result.raise_for_status() except RequestException: self.stochastic_sleep() continue if result.json()["status"] == "error": self.stochastic_sleep() continue self.on_signup(result.json()) return True
def sign_up(self)
Signs up a participant for the experiment. This is done using a POST request to the /participant/ endpoint.
5.453521
5.033505
1.083444
self.log("Bot player completing experiment. Status: {}".format(status)) while True: url = "{host}/{status}?participant_id={participant_id}".format( host=self.host, participant_id=self.participant_id, status=status ) try: result = requests.get(url) result.raise_for_status() except RequestException: self.stochastic_sleep() continue return result
def complete_experiment(self, status)
Record worker completion status to the experiment server. This is done using a GET request to the /worker_complete or /worker_failed endpoints.
4.449396
4.29546
1.035837
from dallinger.experiment_server.sockets import chat_backend self.log("Bot subscribing to quorum channel.") chat_backend.subscribe(self, "quorum")
def subscribe_to_quorum_channel(self)
In case the experiment enforces a quorum, listen for notifications before creating Partipant objects.
10.604314
9.090858
1.166481
while True: data = { "question": "questionnaire", "number": 1, "response": json.dumps(self.question_responses), } url = "{host}/question/{self.participant_id}".format( host=self.host, self=self ) try: result = requests.post(url, data=data) result.raise_for_status() except RequestException: self.stochastic_sleep() continue return True
def complete_questionnaire(self)
Complete the standard debriefing form. Answers the questions in the base questionnaire.
4.241768
4.432864
0.956891
try: return bool(self.mturk.get_account_balance()) except NoCredentialsError: raise MTurkServiceException("No AWS credentials set!") except ClientError: raise MTurkServiceException("Invalid AWS credentials!") except Exception as ex: raise MTurkServiceException( "Error checking credentials: {}".format(str(ex)) )
def check_credentials(self)
Verifies key/secret/host combination by making a balance inquiry
3.969579
3.829411
1.036603
ISO8601 = "%Y-%m-%dT%H:%M:%SZ" notification_version = "2006-05-05" API_version = "2014-08-15" data = { "AWSAccessKeyId": self.aws_key, "HITTypeId": hit_type_id, "Notification.1.Active": "True", "Notification.1.Destination": url, "Notification.1.EventType.1": "AssignmentAccepted", "Notification.1.EventType.2": "AssignmentAbandoned", "Notification.1.EventType.3": "AssignmentReturned", "Notification.1.EventType.4": "AssignmentSubmitted", "Notification.1.EventType.5": "HITReviewable", "Notification.1.EventType.6": "HITExpired", "Notification.1.Transport": "REST", "Notification.1.Version": notification_version, "Operation": "SetHITTypeNotification", "SignatureVersion": "1", "Timestamp": time.strftime(ISO8601, time.gmtime()), "Version": API_version, } query_string, signature = self._calc_old_api_signature(data) body = query_string + "&Signature=" + urllib.parse.quote_plus(signature) data["Signature"] = signature headers = { "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8", "Content-Length": str(len(body)), "Host": self.legacy_host, } resp = requests.post("https://" + self.legacy_host, headers=headers, data=body) return "<IsValid>True</IsValid>" in resp.text
def set_rest_notification(self, url, hit_type_id)
Set a REST endpoint to recieve notifications about the HIT The newer AWS MTurk API does not support this feature, which means we cannot use boto3 here. Instead, we make the call manually after assembling a properly signed request.
2.535045
2.350848
1.078353
reward = str(reward) duration_secs = int(datetime.timedelta(hours=duration_hours).total_seconds()) hit_type = self.mturk.create_hit_type( Title=title, Description=description, Reward=reward, AssignmentDurationInSeconds=duration_secs, Keywords=",".join(keywords), AutoApprovalDelayInSeconds=0, QualificationRequirements=qualifications, ) return hit_type["HITTypeId"]
def register_hit_type( self, title, description, reward, duration_hours, keywords, qualifications )
Register HIT Type for this HIT and return the type's ID, which is required for creating a HIT.
2.323325
2.126208
1.092708
quals = [ { "QualificationTypeId": PERCENTAGE_APPROVED_REQUIREMENT_ID, "Comparator": "GreaterThanOrEqualTo", "IntegerValues": [approve_requirement], "RequiredToPreview": True, } ] if restrict_to_usa: quals.append( { "QualificationTypeId": LOCALE_REQUIREMENT_ID, "Comparator": "EqualTo", "LocaleValues": [{"Country": "US"}], "RequiredToPreview": True, } ) if blacklist is not None: for item in blacklist: qtype = self.get_qualification_type_by_name(item) if qtype: quals.append( { "QualificationTypeId": qtype["id"], "Comparator": "DoesNotExist", "RequiredToPreview": True, } ) return quals
def build_hit_qualifications(self, approve_requirement, restrict_to_usa, blacklist)
Translate restrictions/qualifications to boto Qualifications objects @blacklist is a list of names for Qualifications workers must not already hold in order to see and accept the HIT.
2.101465
2.039411
1.030428
try: response = self.mturk.create_qualification_type( Name=name, Description=description, QualificationTypeStatus=status ) except Exception as ex: if "already created a QualificationType with this name" in str(ex): raise DuplicateQualificationNameError(str(ex)) return self._translate_qtype(response["QualificationType"])
def create_qualification_type(self, name, description, status="Active")
Create a new qualification Workers can be scored for.
3.310073
3.217039
1.028919