code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
self._run( [ "heroku", "addons:destroy", name, "--app", self.name, "--confirm", self.name, ] )
def addon_destroy(self, name)
Destroy an addon
4.221414
4.297292
0.982343
cmd = ["heroku", "buildpacks:add", url, "--app", self.name] self._run(cmd)
def buildpack(self, url)
Add a buildpack by URL.
5.796165
4.801331
1.2072
output = self.get("DATABASE", subcommand="pg:credentials:url") match = re.search("(postgres://.*)$", output) if match is None: raise NameError( "Could not retrieve the DB URI. Check for error output from " "heroku above the stack trace." ) return match.group(1)
def db_uri(self)
The connection URL for the remote database. For example: postgres://some-long-uid@ec2-52-7-232-59.compute-1.amazonaws.com:5432/d5fou154it1nvt
10.776885
10.400126
1.036226
result = self._result( ["heroku", "apps:destroy", "--app", self.name, "--confirm", self.name] ) return result
def destroy(self)
Destroy an app and all its add-ons
8.690853
6.959257
1.248819
cmd = ["heroku", subcommand, key, "--app", self.name] return self._result(cmd)
def get(self, key, subcommand="config:get")
Get a app config value by name
8.677999
8.272381
1.049033
retries = 10 while retries: retries = retries - 1 try: self._run(["heroku", "pg:wait", "--app", self.name]) except subprocess.CalledProcessError: time.sleep(5) if not retries: raise else: break
def pg_wait(self)
Wait for the DB to be fired up.
3.571554
3.444443
1.036903
self._run( [ "heroku", "pg:backups:restore", "{}".format(url), "DATABASE_URL", "--app", self.name, "--confirm", self.name, ] )
def restore(self, url)
Restore the remote database from the URL of a backup.
5.031587
4.378905
1.149051
self._run( [ "heroku", "ps:scale", "{}={}:{}".format(process, quantity, size), "--app", self.name, ] )
def scale_up_dyno(self, process, quantity, size)
Scale up a dyno.
4.855902
4.703524
1.032397
processes = ["web", "worker"] if self.clock_is_on: processes.append("clock") for process in processes: self.scale_down_dyno(process)
def scale_down_dynos(self)
Turn off web and worker dynos, plus clock process if there is one and it's active.
5.66022
3.422063
1.654037
cmd = [ "heroku", "config:set", "{}={}".format(key, quote(str(value))), "--app", self.name, ] if self._is_sensitive_key(key): self._run_quiet(cmd) else: self._run(cmd)
def set(self, key, value)
Configure an app key/value pair
4.08791
4.074648
1.003255
quiet = False if not kwargs: return cmd = ["heroku", "config:set"] for k in sorted(kwargs): cmd.append("{}={}".format(k, quote(str(kwargs[k])))) if self._is_sensitive_key(k): quiet = True cmd.extend(["--app", self.name]) if quiet: self._run_quiet(cmd) else: self._run(cmd)
def set_multiple(self, **kwargs)
Configure multiple app key/value pairs
3.614873
3.488516
1.036221
def _handle_timeout(signum, frame): raise HerokuTimeoutError( "Failed to start after {} seconds.".format(timeout_secs, self._record) ) if self.is_running: self.out.log("Local Heroku is already running.") return signal.signal(signal.SIGALRM, _handle_timeout) signal.alarm(timeout_secs) self._boot() try: success = self._verify_startup() finally: signal.alarm(0) if not success: self.stop(signal.SIGKILL) raise HerokuStartupError( "Failed to start for unknown reason: {}".format(self._record) ) return True
def start(self, timeout_secs=60)
Start the heroku local subprocess group and verify that it has started successfully. The subprocess output is checked for a line matching 'success_regex' to indicate success. If no match is seen after 'timeout_secs', a HerokuTimeoutError is raised.
4.065245
3.642492
1.116061
signal = signal or self.int_signal self.out.log("Cleaning up local Heroku process...") if self._process is None: self.out.log("No local Heroku process was running.") return try: os.killpg(os.getpgid(self._process.pid), signal) self.out.log("Local Heroku process terminated.") except OSError: self.out.log("Local Heroku was already terminated.") self.out.log(traceback.format_exc()) finally: self._process = None
def stop(self, signal=None)
Stop the heroku local subprocess and all of its children.
3.285578
2.707437
1.213538
for line in self._stream(): self._record.append(line) if self.verbose: self.out.blather(line) if listener(line) is self.MONITOR_STOP: return
def monitor(self, listener)
Relay the stream to listener until told to stop.
9.143694
8.344545
1.095769
initialize_experiment_package(os.getcwd()) try: try: from dallinger_experiment import experiment except ImportError: from dallinger_experiment import dallinger_experiment as experiment classes = inspect.getmembers(experiment, inspect.isclass) for name, c in classes: if "Experiment" in c.__bases__[0].__name__: return c else: raise ImportError except ImportError: logger.error("Could not import experiment.") raise
def load()
Load the active experiment.
3.77594
3.604375
1.047599
if not self.networks(): for _ in range(self.practice_repeats): network = self.create_network() network.role = "practice" self.session.add(network) for _ in range(self.experiment_repeats): network = self.create_network() network.role = "experiment" self.session.add(network) self.session.commit()
def setup(self)
Create the networks if they don't already exist.
3.023088
2.48482
1.216623
if full not in ["all", True, False]: raise ValueError( "full must be boolean or all, it cannot be {}".format(full) ) if full == "all": if role == "all": return Network.query.all() else: return Network.query.filter_by(role=role).all() else: if role == "all": return Network.query.filter_by(full=full).all() else: return Network.query.filter( and_(Network.role == role, Network.full == full) ).all()
def networks(self, role="all", full="all")
All the networks in the experiment.
2.026978
1.967461
1.03025
key = participant.id networks_with_space = ( Network.query.filter_by(full=False).order_by(Network.id).all() ) networks_participated_in = [ node.network_id for node in Node.query.with_entities(Node.network_id) .filter_by(participant_id=participant.id) .all() ] legal_networks = [ net for net in networks_with_space if net.id not in networks_participated_in ] if not legal_networks: self.log("No networks available, returning None", key) return None self.log( "{} networks out of {} available".format( len(legal_networks), (self.practice_repeats + self.experiment_repeats) ), key, ) legal_practice_networks = [ net for net in legal_networks if net.role == "practice" ] if legal_practice_networks: chosen_network = legal_practice_networks[0] self.log( "Practice networks available." "Assigning participant to practice network {}.".format( chosen_network.id ), key, ) else: chosen_network = self.choose_network(legal_networks, participant) self.log( "No practice networks available." "Assigning participant to experiment network {}".format( chosen_network.id ), key, ) return chosen_network
def get_network_for_participant(self, participant)
Find a network for a participant. If no networks are available, None will be returned. By default participants can participate only once in each network and participants first complete networks with `role="practice"` before doing all other networks in a random order.
2.74755
2.566994
1.070338
if not self.networks(full=False): self.log("All networks full: closing recruitment", "-----") self.recruiter.close_recruitment()
def recruit(self)
Recruit participants to the experiment as needed. This method runs whenever a participant successfully completes the experiment (participants who fail to finish successfully are automatically replaced). By default it recruits 1 participant at a time until all networks are full.
13.954972
12.293834
1.13512
participants = Participant.query.with_entities(Participant.status).all() counts = Counter([p.status for p in participants]) sorted_counts = sorted(counts.items(), key=itemgetter(0)) self.log("Status summary: {}".format(str(sorted_counts))) return sorted_counts
def log_summary(self)
Log a summary of all the participants' status codes.
3.848844
3.020477
1.27425
if len(objects) > 0: self.session.add_all(objects) self.session.commit()
def save(self, *objects)
Add all the objects to the session and commit them. This only needs to be done for networks and participants.
2.865565
2.536275
1.129832
participant_nodes = Node.query.filter_by( participant_id=participant.id, failed=False ).all() for node in participant_nodes: node.fail()
def fail_participant(self, participant)
Fail all the nodes of a participant.
4.752991
3.835545
1.239196
import dallinger as dlgr app_id = self.make_uuid(app_id) if bot: kwargs["recruiter"] = "bots" self.app_id = app_id self.exp_config = exp_config or kwargs self.update_status("Starting") try: if self.exp_config.get("mode") == "debug": dlgr.command_line.debug.callback( verbose=True, bot=bot, proxy=None, exp_config=self.exp_config ) else: dlgr.deployment.deploy_sandbox_shared_setup( dlgr.command_line.log, app=app_id, verbose=self.verbose, exp_config=self.exp_config, ) except Exception: self.update_status("Errored") raise else: self.update_status("Running") self._await_completion() self.update_status("Retrieving data") data = self.retrieve_data() self.update_status("Completed") return data
def run(self, exp_config=None, app_id=None, bot=False, **kwargs)
Deploy and run an experiment. The exp_config object is either a dictionary or a ``localconfig.LocalConfig`` object with parameters specific to the experiment run grouped by section.
4.034003
4.22747
0.954236
try: results = data_load(app_id) self.log( "Data found for experiment {}, retrieving.".format(app_id), key="Retrieve:", ) return results except IOError: self.log( "Could not fetch data for id: {}, checking registry".format(app_id), key="Retrieve:", ) exp_config = exp_config or {} if is_registered(app_id): raise RuntimeError( "The id {} is registered, ".format(app_id) + "but you do not have permission to access to the data" ) elif kwargs.get("mode") == "debug" or exp_config.get("mode") == "debug": raise RuntimeError("No remote or local data found for id {}".format(app_id)) try: assert isinstance(uuid.UUID(app_id, version=4), uuid.UUID) except (ValueError, AssertionError): raise ValueError("Invalid UUID supplied {}".format(app_id)) self.log( "{} appears to be a new experiment id, running experiment.".format(app_id), key="Retrieve:", ) return self.run(exp_config, app_id, bot, **kwargs)
def collect(self, app_id, exp_config=None, bot=False, **kwargs)
Collect data for the provided experiment id. The ``app_id`` parameter must be a valid UUID. If an existing data file is found for the UUID it will be returned, otherwise - if the UUID is not already registered - the experiment will be run and data collected. See :meth:`~Experiment.run` method for other parameters.
4.01529
3.732726
1.075699
try: if app_id and isinstance(uuid.UUID(str(app_id), version=4), uuid.UUID): return str(app_id) except (ValueError, AssertionError): pass return str(uuid.UUID(int=random.getrandbits(128)))
def make_uuid(cls, app_id=None)
Generates a new UUID. This is a class method and can be called as `Experiment.make_uuid()`. Takes an optional `app_id` which is converted to a string and, if it is a valid UUID, returned.
2.760738
2.661839
1.037154
heroku_app = HerokuApp(self.app_id) status_url = "{}/summary".format(heroku_app.url) data = {} try: resp = requests.get(status_url) data = resp.json() except (ValueError, requests.exceptions.RequestException): logger.exception("Error fetching experiment status.") logger.debug("Current application state: {}".format(data)) return data.get("completed", False)
def experiment_completed(self)
Checks the current state of the experiment to see whether it has completed. This makes use of the experiment server `/summary` route, which in turn uses :meth:`~Experiment.is_complete`.
4.093841
3.504622
1.168126
local = False if self.exp_config.get("mode") == "debug": local = True filename = export(self.app_id, local=local) logger.debug("Data exported to %s" % filename) return Data(filename)
def retrieve_data(self)
Retrieves and saves data from a running experiment
7.353685
6.573714
1.11865
if self.exp_config.get("mode") != "debug": HerokuApp(self.app_id).destroy() return True
def end_experiment(self)
Terminates a running experiment
14.892261
15.278617
0.974713
if session is None: session = self.session return session.query(Info).order_by(Info.creation_time)
def events_for_replay(self, session=None, target=None)
Returns an ordered list of "events" for replaying. Experiments may override this method to provide custom replay logic. The "events" returned by this method will be passed to :meth:`~Experiment.replay_event`. The default implementation simply returns all :class:`~dallinger.models.Info` objects in the order they were created.
5.471317
3.167958
1.72708
from IPython.display import display self.build_widget() display(self.widget())
def _ipython_display_(self)
Display Jupyter Notebook widget
9.082321
6.604264
1.375221
debug_mode = config.get("mode") == "debug" name = config.get("recruiter", None) recruiter = None # Special case 1: Don't use a configured recruiter in replay mode if config.get("replay"): return HotAirRecruiter() if name is not None: recruiter = by_name(name) # Special case 2: may run BotRecruiter or MultiRecruiter in any mode # (debug or not), so it trumps everything else: if isinstance(recruiter, (BotRecruiter, MultiRecruiter)): return recruiter # Special case 3: if we're not using bots and we're in debug mode, # ignore any configured recruiter: if debug_mode: return HotAirRecruiter() # Configured recruiter: if recruiter is not None: return recruiter if name and recruiter is None: raise NotImplementedError("No such recruiter {}".format(name)) # Default if we're not in debug mode: return MTurkRecruiter()
def from_config(config)
Return a Recruiter instance based on the configuration. Default is HotAirRecruiter in debug mode (unless we're using the bot recruiter, which can be used in debug mode) and the MTurkRecruiter in other modes.
4.268599
3.555331
1.200619
logger.info("Opening CLI recruitment for {} participants".format(n)) recruitments = self.recruit(n) message = ( 'Search for "{}" in the logs for subsequent recruitment URLs.\n' "Open the logs for this experiment with " '"dallinger logs --app {}"'.format( NEW_RECRUIT_LOG_PREFIX, self.config.get("id") ) ) return {"items": recruitments, "message": message}
def open_recruitment(self, n=1)
Return initial experiment URL list, plus instructions for finding subsequent recruitment events in experiemnt logs.
8.45282
6.990502
1.209186
logger.info("Recruiting {} CLI participants".format(n)) urls = [] template = "{}/ad?recruiter={}&assignmentId={}&hitId={}&workerId={}&mode={}" for i in range(n): ad_url = template.format( get_base_url(), self.nickname, generate_random_id(), generate_random_id(), generate_random_id(), self._get_mode(), ) logger.info("{} {}".format(NEW_RECRUIT_LOG_PREFIX, ad_url)) urls.append(ad_url) return urls
def recruit(self, n=1)
Generate experiemnt URLs and print them to the console.
4.724868
4.407323
1.072049
logger.info( 'Award ${} for assignment {}, with reason "{}"'.format( amount, assignment_id, reason ) )
def reward_bonus(self, assignment_id, amount, reason)
Print out bonus info for the assignment
5.988364
6.207296
0.96473
logger.info("Opening HotAir recruitment for {} participants".format(n)) recruitments = self.recruit(n) message = "Recruitment requests will open browser windows automatically." return {"items": recruitments, "message": message}
def open_recruitment(self, n=1)
Return initial experiment URL list, plus instructions for finding subsequent recruitment events in experiemnt logs.
11.267374
9.977145
1.129318
logger.info("Opening Sim recruitment for {} participants".format(n)) return {"items": self.recruit(n), "message": "Simulated recruitment only"}
def open_recruitment(self, n=1)
Open recruitment.
12.471254
12.4259
1.00365
logger.info("Opening MTurk recruitment for {} participants".format(n)) if self.is_in_progress: raise MTurkRecruiterException( "Tried to open_recruitment on already open recruiter." ) if self.hit_domain is None: raise MTurkRecruiterException("Can't run a HIT from localhost") self.mturkservice.check_credentials() if self.config.get("assign_qualifications"): self._create_mturk_qualifications() hit_request = { "max_assignments": n, "title": self.config.get("title"), "description": self.config.get("description"), "keywords": self._config_to_list("keywords"), "reward": self.config.get("base_payment"), "duration_hours": self.config.get("duration"), "lifetime_days": self.config.get("lifetime"), "ad_url": self.ad_url, "notification_url": self.config.get("notification_url"), "approve_requirement": self.config.get("approve_requirement"), "us_only": self.config.get("us_only"), "blacklist": self._config_to_list("qualification_blacklist"), "annotation": self.config.get("id"), } hit_info = self.mturkservice.create_hit(**hit_request) if self.config.get("mode") == "sandbox": lookup_url = ( "https://workersandbox.mturk.com/mturk/preview?groupId={type_id}" ) else: lookup_url = "https://worker.mturk.com/mturk/preview?groupId={type_id}" return { "items": [lookup_url.format(**hit_info)], "message": "HIT now published to Amazon Mechanical Turk", }
def open_recruitment(self, n=1)
Open a connection to AWS MTurk and create a HIT.
3.189014
3.070113
1.038729
logger.info("Recruiting {} MTurk participants".format(n)) if not self.config.get("auto_recruit"): logger.info("auto_recruit is False: recruitment suppressed") return hit_id = self.current_hit_id() if hit_id is None: logger.info("no HIT in progress: recruitment aborted") return try: return self.mturkservice.extend_hit( hit_id, number=n, duration_hours=self.config.get("duration") ) except MTurkServiceException as ex: logger.exception(str(ex))
def recruit(self, n=1)
Recruit n new participants to an existing HIT
4.139154
3.743442
1.105708
if participant.status == "overrecruited" or not self.qualification_active: return worker_id = participant.worker_id for name in self.qualifications: try: self.mturkservice.increment_qualification_score(name, worker_id) except QualificationNotFoundException as ex: logger.exception(ex)
def notify_completed(self, participant)
Assign a Qualification to the Participant for the experiment ID, and for the configured group_name, if it's been set. Overrecruited participants don't receive qualifications, since they haven't actually completed the experiment. This allows them to remain eligible for future runs.
7.171694
5.061935
1.416789
unsubmitted = [] for participant in participants: summary = ParticipationTime(participant, reference_time, self.config) status = self._mturk_status_for(participant) if status == "Approved": participant.status = "approved" session.commit() elif status == "Rejected": participant.status = "rejected" session.commit() elif status == "Submitted": self._resend_submitted_rest_notification_for(participant) self._message_researcher(self._resubmitted_msg(summary)) logger.warning( "Error - submitted notification for participant {} missed. " "A replacement notification was created and sent, " "but proceed with caution.".format(participant.id) ) else: self._send_notification_missing_rest_notification_for(participant) unsubmitted.append(summary) if unsubmitted: self._disable_autorecruit() self.close_recruitment() pick_one = unsubmitted[0] # message the researcher about the one of the participants: self._message_researcher(self._cancelled_msg(pick_one)) # Attempt to force-expire the hit via boto. It's possible # that the HIT won't exist if the HIT has been deleted manually. try: self.mturkservice.expire_hit(pick_one.participant.hit_id) except MTurkServiceException as ex: logger.exception(ex)
def notify_duration_exceeded(self, participants, reference_time)
The participant has exceed the maximum time for the activity, defined in the "duration" config value. We need find out the assignment status on MTurk and act based on this.
6.098315
6.021347
1.012783
try: return self.mturkservice.grant_bonus(assignment_id, amount, reason) except MTurkServiceException as ex: logger.exception(str(ex))
def reward_bonus(self, assignment_id, amount, reason)
Reward the Turker for a specified assignment with a bonus.
3.988555
4.143036
0.962713
for name, desc in self.qualifications.items(): try: self.mturkservice.create_qualification_type(name, desc) except DuplicateQualificationNameError: pass
def _create_mturk_qualifications(self)
Create MTurk Qualification for experiment ID, and for group_name if it's been set. Qualifications with these names already exist, but it's faster to try and fail than to check, then try.
4.115467
3.330851
1.23556
logger.info("Opening Bot recruitment for {} participants".format(n)) factory = self._get_bot_factory() bot_class_name = factory("", "", "").__class__.__name__ return { "items": self.recruit(n), "message": "Bot recruitment started using {}".format(bot_class_name), }
def open_recruitment(self, n=1)
Start recruiting right away.
6.502242
6.215966
1.046055
logger.info("Recruiting {} Bot participants".format(n)) factory = self._get_bot_factory() urls = [] q = _get_queue() for _ in range(n): base_url = get_base_url() worker = generate_random_id() hit = generate_random_id() assignment = generate_random_id() ad_parameters = ( "recruiter={}&assignmentId={}&hitId={}&workerId={}&mode=sandbox" ) ad_parameters = ad_parameters.format(self.nickname, assignment, hit, worker) url = "{}/ad?{}".format(base_url, ad_parameters) urls.append(url) bot = factory(url, assignment_id=assignment, worker_id=worker, hit_id=hit) job = q.enqueue(bot.run_experiment, timeout=self._timeout) logger.warning("Created job {} for url {}.".format(job.id, url)) return urls
def recruit(self, n=1)
Recruit n new participant bots to the queue
4.177013
3.845879
1.086101
for participant in participants: participant.status = "rejected" session.commit()
def notify_duration_exceeded(self, participants, reference_time)
The bot participant has been working longer than the time defined in the "duration" config value.
8.981602
9.272254
0.968654
recruiters = [] spec = get_config().get("recruiters") for match in self.SPEC_RE.finditer(spec): name = match.group(1) count = int(match.group(2)) recruiters.append((name, count)) return recruiters
def parse_spec(self)
Parse the specification of how to recruit participants. Example: recruiters = bots: 5, mturk: 1
3.77193
3.009506
1.253339
recruit_count = 0 while recruit_count <= n: counts = dict( session.query(Recruitment.recruiter_id, func.count(Recruitment.id)) .group_by(Recruitment.recruiter_id) .all() ) for recruiter_id, target_count in self.spec: remaining = 0 count = counts.get(recruiter_id, 0) if count >= target_count: # This recruiter quota was reached; # move on to the next one. counts[recruiter_id] = count - target_count continue else: # Quota is still available; let's use it. remaining = target_count - count break else: return num_recruits = min(n - recruit_count, remaining) # record the recruitments and commit for i in range(num_recruits): session.add(Recruitment(recruiter_id=recruiter_id)) session.commit() recruit_count += num_recruits yield by_name(recruiter_id), num_recruits
def recruiters(self, n=1)
Iterator that provides recruiters along with the participant count to be recruited for up to `n` participants. We use the `Recruitment` table in the db to keep track of how many recruitments have been requested using each recruiter. We'll use the first one from the specification that hasn't already reached its quota.
2.918991
2.539033
1.149647
logger.info("Multi recruitment running for {} participants".format(n)) recruitments = [] messages = {} remaining = n for recruiter, count in self.recruiters(n): if not count: break if recruiter.nickname in messages: result = recruiter.recruit(count) recruitments.extend(result) else: result = recruiter.open_recruitment(count) recruitments.extend(result["items"]) messages[recruiter.nickname] = result["message"] remaining -= count if remaining <= 0: break logger.info( ( "Multi-recruited {} out of {} participants, " "using {} recruiters." ).format(n - remaining, n, len(messages)) ) return {"items": recruitments, "message": "\n".join(messages.values())}
def open_recruitment(self, n=1)
Return initial experiment URL list.
3.196442
3.196282
1.00005
recruiters_with_late_participants = defaultdict(list) for p in participants: timeline = ParticipationTime(p, reference_time, config) if timeline.is_overdue: print( "Error: participant {} with status {} has been playing for too " "long - their recruiter will be notified.".format(p.id, p.status) ) recruiters_with_late_participants[p.recruiter_id].append(p) for recruiter_id, participants in recruiters_with_late_participants.items(): recruiter = recruiters.by_name(recruiter_id) recruiter.notify_duration_exceeded(participants, reference_time)
def run_check(participants, config, reference_time)
For each participant, if they've been active for longer than the experiment duration + 2 minutes, we take action.
3.709262
3.64998
1.016242
config = dallinger.config.get_config() participants = Participant.query.filter_by(status="working").all() reference_time = datetime.now() run_check(participants, config, reference_time)
def check_db_for_missing_notifications()
Check the database for missing notifications.
7.263659
6.641154
1.093734
setup_loghandlers(logging_level) self._install_signal_handlers() self.did_perform_work = False self.register_birth() self.log.info( "RQ GEVENT worker (Greenlet pool size={0}) {1!r} started, version {2}".format( self.gevent_pool.size, self.key, VERSION ) ) self.set_state(WorkerStatus.STARTED) try: while True: try: self.check_for_suspension(burst) if self.should_run_maintenance_tasks: self.clean_registries() if self._stop_requested: self.log.info("Stopping on request.") break timeout = None if burst else max(1, self.default_worker_ttl - 60) result = self.dequeue_job_and_maintain_ttl(timeout) if result is None and burst: self.log.info("RQ worker {0!r} done, quitting".format(self.key)) try: # Make sure dependented jobs are enqueued. gevent.wait(self.children) except LoopExit: pass result = self.dequeue_job_and_maintain_ttl(timeout) if result is None: break except StopRequested: break job, queue = result self.execute_job(job, queue) finally: if not self.is_horse: self.register_death() return self.did_perform_work
def _work(self, burst=False, logging_level=logging.INFO)
Starts the work loop. Pops and performs all jobs on the current list of queues. When all queues are empty, block and wait for new jobs to arrive on any of the queues, unless `burst` mode is enabled. The return value indicates whether any jobs were processed.
5.57997
5.59555
0.997216
# If the is a burst worker it's not needed to spawn greenlet if burst: return self._work(burst, logging_level=logging_level) self.gevent_worker = gevent.spawn(self._work, burst) self.gevent_worker.join() return self.gevent_worker.value
def work(self, burst=False, logging_level=logging.INFO)
Spawning a greenlet to be able to kill it when it's blocked dequeueing job :param burst: if it's burst worker don't need to spawn a greenlet
4.800049
3.726657
1.288031
try: config.get("osf_access_token") except KeyError: pass else: osf_id = _create_osf_project(dlgr_id) _upload_assets_to_OSF(dlgr_id, osf_id)
def register(dlgr_id, snapshot=None)
Register the experiment using configured services.
5.234715
4.897703
1.06881
if not description: description = "Experiment {} registered by Dallinger.".format(dlgr_id) r = requests.post( "{}/nodes/".format(root), data={ "type": "nodes", "category": "project", "title": "Experiment dlgr-{}".format(dlgr_id[0:8]), "description": description, }, headers={"Authorization": "Bearer {}".format(config.get("osf_access_token"))}, ) r.raise_for_status() osf_id = r.json()["data"]["id"] logger.info("Project registered on OSF at http://osf.io/{}".format(osf_id)) return osf_id
def _create_osf_project(dlgr_id, description=None)
Create a project on the OSF.
3.318265
3.117902
1.064262
root = "https://files.osf.io/v1" snapshot_filename = "{}-code.zip".format(dlgr_id) snapshot_path = os.path.join("snapshots", snapshot_filename) r = requests.put( "{}/resources/{}/providers/{}/".format(root, osf_id, provider), params={"kind": "file", "name": snapshot_filename}, headers={ "Authorization": "Bearer {}".format(config.get("osf_access_token")), "Content-Type": "text/plain", }, data=open(snapshot_path, "rb"), ) r.raise_for_status()
def _upload_assets_to_OSF(dlgr_id, osf_id, provider="osfstorage")
Upload experimental assets to the OSF.
2.528982
2.515954
1.005178
client = get_client() task_doc = assemble_generic_doc("task", label=label, deadline=deadline) res = client.update_task(task_id, task_doc) formatted_print(res, simple_text="Success")
def update_task(deadline, label, task_id)
Executor for `globus task update`
6.709635
6.345868
1.057324
client = get_client() role = client.get_endpoint_role(endpoint_id, role_id) formatted_print( role, text_format=FORMAT_TEXT_RECORD, fields=( ("Principal Type", "principal_type"), ("Principal", lookup_principal), ("Role", "role"), ), )
def role_show(endpoint_id, role_id)
Executor for `globus endpoint role show`
4.784029
3.943393
1.213176
ctx = click.get_current_context() state = ctx.ensure_object(CommandState) return state.outformat_is_json()
def outformat_is_json()
Only safe to call within a click context.
4.934127
3.678389
1.341383
ctx = click.get_current_context() state = ctx.ensure_object(CommandState) return state.outformat_is_unix()
def outformat_is_unix()
Only safe to call within a click context.
5.213897
3.666619
1.421991
ctx = click.get_current_context() state = ctx.ensure_object(CommandState) return state.outformat_is_text()
def outformat_is_text()
Only safe to call within a click context.
5.145644
3.61039
1.425232
ctx = click.get_current_context() state = ctx.ensure_object(CommandState) return state.jmespath_expr
def get_jmespath_expression()
Only safe to call within a click context.
4.910339
3.502581
1.40192
ctx = click.get_current_context() state = ctx.ensure_object(CommandState) return state.verbosity
def verbosity()
Only safe to call within a click context.
4.454687
3.491929
1.275709
ctx = click.get_current_context() state = ctx.ensure_object(CommandState) return state.is_verbose()
def is_verbose()
Only safe to call within a click context.
4.38973
3.487298
1.258777
client = get_client() server_doc = client.get_endpoint_server(endpoint_id, server_id) if not server_doc["uri"]: # GCP endpoint server fields = (("ID", "id"),) text_epilog = dedent( .format( endpoint_id ) ) else: def advertised_port_summary(server): def get_range_summary(start, end): return ( "unspecified" if not start and not end else "unrestricted" if start == 1024 and end == 65535 else "{}-{}".format(start, end) ) return "incoming {}, outgoing {}".format( get_range_summary( server["incoming_data_port_start"], server["incoming_data_port_end"] ), get_range_summary( server["outgoing_data_port_start"], server["outgoing_data_port_end"] ), ) fields = ( ("ID", "id"), ("URI", "uri"), ("Subject", "subject"), ("Data Ports", advertised_port_summary), ) text_epilog = None formatted_print( server_doc, text_format=FORMAT_TEXT_RECORD, fields=fields, text_epilog=text_epilog, )
def server_show(endpoint_id, server_id)
Executor for `globus endpoint server show`
3.270857
3.039704
1.076045
principal_type, principal_val = principal client = get_client() if principal_type == "identity": principal_val = maybe_lookup_identity_id(principal_val) if not principal_val: raise click.UsageError( "Identity does not exist. " "Use --provision-identity to auto-provision an identity." ) elif principal_type == "provision-identity": principal_val = maybe_lookup_identity_id(principal_val, provision=True) principal_type = "identity" role_doc = assemble_generic_doc( "role", principal_type=principal_type, principal=principal_val, role=role ) res = client.add_endpoint_role(endpoint_id, role_doc) formatted_print(res, simple_text="ID: {}".format(res["id"]))
def role_create(role, principal, endpoint_id)
Executor for `globus endpoint role show`
3.977675
3.643493
1.09172
client = get_client() if successful_transfers: print_successful_transfers(client, task_id) else: print_task_detail(client, task_id)
def show_task(successful_transfers, task_id)
Executor for `globus task show`
3.312931
3.12391
1.060508
endpoint_id, path = endpoint_plus_path client = get_client() submit_data = {"endpoint_id": str(endpoint_id), "path": path, "name": bookmark_name} res = client.create_bookmark(submit_data) formatted_print(res, simple_text="Bookmark ID: {}".format(res["id"]))
def bookmark_create(endpoint_plus_path, bookmark_name)
Executor for `globus bookmark create`
4.318559
3.81509
1.131968
# raises usage error on shares for us endpoint, server_list = get_endpoint_w_server_list(endpoint_id) if server_list == "S3": # not GCS -- this is an S3 endpoint server_list = {"s3_url": endpoint["s3_url"]} fields = [("S3 URL", "s3_url")] text_format = FORMAT_TEXT_RECORD else: # regular GCS host endpoint fields = ( ("ID", "id"), ("URI", lambda s: (s["uri"] or "none (Globus Connect Personal)")), ) text_format = FORMAT_TEXT_TABLE formatted_print(server_list, text_format=text_format, fields=fields)
def server_list(endpoint_id)
Executor for `globus endpoint server list`
7.692546
7.197505
1.06878
def _process_filterval(prefix, value, default=None): if value: if isinstance(value, six.string_types): return "{}:{}/".format(prefix, value) return "{}:{}/".format(prefix, ",".join(str(x) for x in value)) else: return default or "" # make filter string filter_string = "" filter_string += _process_filterval("task_id", filter_task_id) filter_string += _process_filterval("status", filter_status) filter_string += _process_filterval( "type", filter_type, default="type:TRANSFER,DELETE/" ) # combine data into one list for easier processing if inexact: label_data = ["~" + s for s in filter_label] + [ "!~" + s for s in filter_not_label ] else: label_data = ["=" + s for s in filter_label] + [ "!" + s for s in filter_not_label ] filter_string += _process_filterval("label", label_data) filter_string += _process_filterval( "request_time", [(filter_requested_after or ""), (filter_requested_before or "")], ) filter_string += _process_filterval( "completion_time", [(filter_completed_after or ""), (filter_completed_before or "")], ) client = get_client() task_iterator = client.task_list( num_results=limit, filter=filter_string[:-1] ) # ignore trailing / fields = [ ("Task ID", "task_id"), ("Status", "status"), ("Type", "type"), ("Source Display Name", "source_endpoint_display_name"), ("Dest Display Name", "destination_endpoint_display_name"), ("Label", "label"), ] formatted_print( task_iterator, fields=fields, json_converter=iterable_response_to_dict )
def task_list( limit, filter_task_id, filter_status, filter_type, filter_label, filter_not_label, inexact, filter_requested_after, filter_requested_before, filter_completed_after, filter_completed_before, )
Executor for `globus task-list`
2.706167
2.647745
1.022065
endpoint_id, path = endpoint_plus_path if path is None and (not batch): raise click.UsageError("delete requires either a PATH OR --batch") client = get_client() # attempt to activate unless --skip-activation-check is given if not skip_activation_check: autoactivate(client, endpoint_id, if_expires_in=60) delete_data = DeleteData( client, endpoint_id, label=label, recursive=recursive, ignore_missing=ignore_missing, submission_id=submission_id, deadline=deadline, skip_activation_check=skip_activation_check, interpret_globs=enable_globs, **notify ) if batch: # although this sophisticated structure (like that in transfer) # isn't strictly necessary, it gives us the ability to add options in # the future to these lines with trivial modifications @click.command() @click.argument("path", type=TaskPath(base_dir=path)) def process_batch_line(path): delete_data.add_item(str(path)) shlex_process_stdin(process_batch_line, "Enter paths to delete, line by line.") else: if not star_silent and enable_globs and path.endswith("*"): # not intuitive, but `click.confirm(abort=True)` prints to stdout # unnecessarily, which we don't really want... # only do this check if stderr is a pty if ( err_is_terminal() and term_is_interactive() and not click.confirm( 'Are you sure you want to delete all files matching "{}"?'.format( path ), err=True, ) ): safeprint("Aborted.", write_to_stderr=True) click.get_current_context().exit(1) delete_data.add_item(path) if dry_run: formatted_print(delete_data, response_key="DATA", fields=[("Path", "path")]) # exit safely return res = client.submit_delete(delete_data) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=(("Message", "message"), ("Task ID", "task_id")), )
def delete_command( batch, ignore_missing, star_silent, recursive, enable_globs, endpoint_plus_path, label, submission_id, dry_run, deadline, skip_activation_check, notify, )
Executor for `globus delete`
5.207463
5.132414
1.014623
conf = get_config_obj() section = "cli" if "." in parameter: section, parameter = parameter.split(".", 1) # ensure that the section exists if section not in conf: conf[section] = {} # set the value for the given parameter conf[section][parameter] = value # write to disk safeprint("Writing updated config to {}".format(conf.filename)) conf.write()
def set_command(value, parameter)
Executor for `globus config set`
4.167792
3.767138
1.106355
# if input is interactive, print help to stderr if sys.stdin.isatty(): safeprint( ( "{}\n".format(helptext) + "Lines are split with shlex in POSIX mode: " "https://docs.python.org/library/shlex.html#parsing-rules\n" "Terminate input with Ctrl+D or <EOF>\n" ), write_to_stderr=True, ) # use readlines() rather than implicit file read line looping to force # python to properly capture EOF (otherwise, EOF acts as a flush and # things get weird) for line in sys.stdin.readlines(): # get the argument vector: # do a shlex split to handle quoted paths with spaces in them # also lets us have comments with # argv = shlex.split(line, comments=True) if argv: try: process_command.main(args=argv) except SystemExit as e: if e.code != 0: raise
def shlex_process_stdin(process_command, helptext)
Use shlex to process stdin line-by-line. Also prints help text. Requires that @process_command be a Click command object, used for processing single lines of input. helptext is prepended to the standard message printed to interactive sessions.
7.939481
7.696211
1.031609
if personal: try: ep_id = LocalGlobusConnectPersonal().endpoint_id except IOError as e: safeprint(e, write_to_stderr=True) click.get_current_context().exit(1) if ep_id is not None: safeprint(ep_id) else: safeprint("No Globus Connect Personal installation found.") click.get_current_context().exit(1)
def local_id(personal)
Executor for `globus endpoint local-id`
4.772597
4.054287
1.177173
def inner_decorator(f): f = click.group(*args, cls=GlobusCommandGroup, **kwargs)(f) f = common_options(f) return f return inner_decorator
def globus_group(*args, **kwargs)
Wrapper over click.group which sets GlobusCommandGroup as the Class Caution! Don't get snake-bitten by this. `globus_group` is a decorator which MUST take arguments. It is not wrapped in our common detect-and-decorate pattern to allow it to be used bare -- that wouldn't work (unnamed groups? weird stuff)
4.5534
2.900715
1.569751
moddata = [] modlist = ( "click", "configobj", "cryptography", "globus_cli", "globus_sdk", "jmespath", "requests", "six", ) if verbosity() < 2: modlist = ("globus_cli", "globus_sdk", "requests") for mod in modlist: cur = [mod] try: loaded_mod = __import__(mod) except ImportError: loaded_mod = None for attr in ("__version__", "__file__", "__path__"): # if loading failed, be sure to pad with error messages if loaded_mod is None: cur.append("[import failed]") continue try: attrval = getattr(loaded_mod, attr) except AttributeError: attrval = "" cur.append(attrval) moddata.append(cur) return moddata
def _get_package_data()
Import a set of important packages and return relevant data about them in a dict. Imports are done in here to avoid potential for circular imports and other problems, and to make iteration simpler.
3.671669
3.669494
1.000593
latest, current = get_versions() if latest is None: safeprint( ("Installed Version: {0}\n" "Failed to lookup latest version.").format( current ) ) else: safeprint( ("Installed Version: {0}\n" "Latest Version: {1}\n" "\n{2}").format( current, latest, "You are running the latest version of the Globus CLI" if current == latest else ( "You should update your version of the Globus CLI with\n" " globus update" ) if current < latest else "You are running a preview version of the Globus CLI", ) ) # verbose shows more platform and python info # it also includes versions of some CLI dependencies if is_verbose(): moddata = _get_package_data() safeprint("\nVerbose Data\n---") safeprint("platform:") safeprint(" platform: {}".format(platform.platform())) safeprint(" py_implementation: {}".format(platform.python_implementation())) safeprint(" py_version: {}".format(platform.python_version())) safeprint(" sys.executable: {}".format(sys.executable)) safeprint(" site.USER_BASE: {}".format(site.USER_BASE)) safeprint("modules:") for mod, modversion, modfile, modpath in moddata: safeprint(" {}:".format(mod)) safeprint(" __version__: {}".format(modversion)) safeprint(" __file__: {}".format(modfile)) safeprint(" __path__: {}".format(modpath))
def print_version()
Print out the current version, and at least try to fetch the latest from PyPi to print alongside it. It may seem odd that this isn't in globus_cli.version , but it's done this way to separate concerns over printing the version from looking it up.
3.29336
3.145441
1.047026
all_args = [sys.executable, "-m", "pip"] + list(args) print("> {}".format(" ".join(all_args))) subprocess.check_call(all_args)
def _call_pip(*args)
Invoke pip *safely* and in the *supported* way: https://pip.pypa.io/en/latest/user_guide/#using-pip-from-your-program
2.886943
2.520005
1.14561
try: subprocess.check_output( [sys.executable, "-m", "pip", "--version"], stderr=subprocess.STDOUT ) return True except subprocess.CalledProcessError: return False
def _check_pip_installed()
Invoke `pip --version` and make sure it doesn't error. Use check_output to capture stdout and stderr Invokes pip by the same manner that we plan to in _call_pip() Don't bother trying to reuse _call_pip to do this... Finnicky and not worth the effort.
2.226978
2.194108
1.014981
client = get_auth_client() # get userinfo from auth. # if we get back an error the user likely needs to log in again try: res = client.oauth2_userinfo() except AuthAPIError: safeprint( "Unable to get user information. Please try " "logging in again.", write_to_stderr=True, ) click.get_current_context().exit(1) print_command_hint( "For information on which identities are in session see\n" " globus session show\n" ) # --linked-identities either displays all usernames or a table if verbose if linked_identities: try: formatted_print( res["identity_set"], fields=[ ("Username", "username"), ("Name", "name"), ("ID", "sub"), ("Email", "email"), ], simple_text=( None if is_verbose() else "\n".join([x["username"] for x in res["identity_set"]]) ), ) except KeyError: safeprint( "Your current login does not have the consents required " "to view your full identity set. Please log in again " "to agree to the required consents.", write_to_stderr=True, ) # Default output is the top level data else: formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=[ ("Username", "preferred_username"), ("Name", "name"), ("ID", "sub"), ("Email", "email"), ], simple_text=(None if is_verbose() else res["preferred_username"]), )
def whoami_command(linked_identities)
Executor for `globus whoami`
4.463611
4.211948
1.05975
# get the "globus" command as a click.Command root_command = click.get_current_context().find_root().command # build a new context object off of it, with resilient_parsing set so that # no callbacks are invoked ctx = root_command.make_context("globus", list(args), resilient_parsing=True) # walk down multicommands until we've matched on everything and are at a # terminal context that holds all of our completed args while isinstance(ctx.command, click.MultiCommand) and args: # trim out any params that are capturable at this level of the command # tree by resetting the argument list args = ctx.protected_args + ctx.args # if there were no remaining args, stop walking the tree if not args: break # check for a matching command, and if one isn't found stop the # traversal and abort the whole process -- this would mean that a # completed command was entered which doesn't match a known command # there's nothing completion can do in this case unless it implements # sophisticated fuzzy matching command = ctx.command.get_command(ctx, args[0]) if not command: return None # otherwise, grab that command, and build a subcontext to continue the # tree walk else: ctx = command.make_context( args[0], args[1:], parent=ctx, resilient_parsing=True ) # return the context we found return ctx
def get_completion_context(args)
Walk the tree of commands to a terminal command or multicommand, using the Click Context system. Effectively, we'll be using the resilient_parsing mode of commands to stop evaluation, then having them capture their options and arguments, passing us on to the next subcommand. If we walk "off the tree" with a command that we don't recognize, we have a hardstop condition, but otherwise, we walk as far as we can go and that's the location from which we should do our completion work.
6.886456
6.212437
1.108495
ctx = get_completion_context(completed_args) if not ctx: return [] # matching rules, so we can toggle by type and such def match_with_case(n, m): return n.startswith(m) def match_nocase(n, m): return n.lower().startswith(m.lower()) match_func = match_with_case ctx_options = [ p for p in ctx.command.get_params(ctx) if isinstance(p, click.Option) ] last_completed = None if completed_args: last_completed = completed_args[-1] # if the last completed argument matches a Choice option, we're going to # have to expand cur as a choice param matching_choice_opt = None for p in ctx_options: if isinstance(p.type, click.Choice) and last_completed in p.opts: matching_choice_opt = p choices = [] # if we ended on a choice, complete with all of the available values if matching_choice_opt: # catch the case where it's case insensitive, and we need to change our # comparisons / matching later on if isinstance(matching_choice_opt.type, CaseInsensitiveChoice): match_func = match_nocase choices = [ (x, matching_choice_opt.help) for x in matching_choice_opt.type.choices ] # if cur looks like an option, just look for options # but skip if it's quoted text elif cur and cur.startswith("-") and not quoted: for param in ctx_options: # skip hidden options if isinstance(param, HiddenOption): continue for optset in (param.opts, param.secondary_opts): for opt in optset: # only add long-opts, never short opts to completion, # unless the cur appears to be a short opt already if opt.startswith("--") or (len(cur) > 1 and cur[1] != "-"): choices.append((opt, param.help)) # and if it's a multicommand we see, get the list of subcommands elif isinstance(ctx.command, click.MultiCommand) and not quoted: choices = [ (cmdname, ctx.command.get_command(ctx, cmdname).short_help) for cmdname in ctx.command.list_commands(ctx) ] else: pass # now, do final filtering if cur: choices = [(n, h) for (n, h) in choices if match_func(n, cur)] return choices
def get_all_choices(completed_args, cur, quoted)
This is the main completion function. Inputs: - completed_args: a list of already-completed arguments - cur: the current "word in progress" or None - quoted: is cur part of a quoted string?
4.234575
4.176589
1.013884
client = get_client() res = client.endpoint_get_activation_requirements(endpoint_id) def fail(deadline=None): exp_string = "" if deadline is not None: exp_string = " or will expire within {} seconds".format(deadline) message = "The endpoint is not activated{}.\n\n".format( exp_string ) + activation_requirements_help_text(res, endpoint_id) formatted_print(res, simple_text=message) click.get_current_context().exit(1) def success(msg, *format_params): formatted_print(res, simple_text=(msg.format(endpoint_id, *format_params))) click.get_current_context().exit(0) # eternally active endpoints have a special expires_in value if res["expires_in"] == -1: success("{} does not require activation") # autoactivation is not supported and --until was not passed if until is None: # and we are active right now (0s in the future)... if res.active_until(0): success("{} is activated") # or we are not active fail() # autoactivation is not supported and --until was passed if res.active_until(until, relative_time=not absolute_time): success("{} will be active for at least {} seconds", until) else: fail(deadline=until)
def endpoint_is_activated(endpoint_id, until, absolute_time)
Executor for `globus endpoint is-activated`
5.150484
4.950483
1.0404
client = get_client() # get endpoint type, ensure unambiguous. personal = kwargs.pop("personal") server = kwargs.pop("server") shared = kwargs.pop("shared") if personal and (not server) and (not shared): endpoint_type = "personal" elif server and (not personal) and (not shared): endpoint_type = "server" elif shared and (not personal) and (not server): endpoint_type = "shared" else: raise click.UsageError( "Exactly one of --personal, --server, or --shared is required." ) # validate options kwargs["is_globus_connect"] = personal or None validate_endpoint_create_and_update_params(endpoint_type, False, kwargs) # shared endpoint creation if shared: endpoint_id, host_path = shared kwargs["host_endpoint"] = endpoint_id kwargs["host_path"] = host_path ep_doc = assemble_generic_doc("shared_endpoint", **kwargs) autoactivate(client, endpoint_id, if_expires_in=60) res = client.create_shared_endpoint(ep_doc) # non shared endpoint creation else: # omit `is_globus_connect` key if not GCP, otherwise include as `True` ep_doc = assemble_generic_doc("endpoint", **kwargs) res = client.create_endpoint(ep_doc) # output formatted_print( res, fields=(COMMON_FIELDS + GCP_FIELDS if personal else COMMON_FIELDS), text_format=FORMAT_TEXT_RECORD, )
def endpoint_create(**kwargs)
Executor for `globus endpoint create`
3.971598
3.851327
1.031229
client = get_client() server_doc = assemble_generic_doc( "server", subject=subject, port=port, scheme=scheme, hostname=hostname ) # n.b. must be done after assemble_generic_doc(), as that function filters # out `None`s, which we need to be able to set for `'unspecified'` if incoming_data_ports: server_doc.update( incoming_data_port_start=incoming_data_ports[0], incoming_data_port_end=incoming_data_ports[1], ) if outgoing_data_ports: server_doc.update( outgoing_data_port_start=outgoing_data_ports[0], outgoing_data_port_end=outgoing_data_ports[1], ) res = client.update_endpoint_server(endpoint_id, server_id, server_doc) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
def server_update( endpoint_id, server_id, subject, port, scheme, hostname, incoming_data_ports, outgoing_data_ports, )
Executor for `globus endpoint server update`
2.893228
2.712178
1.066754
try: config = get_config_obj(file_error=True) except IOError as e: safeprint(e, write_to_stderr=True) click.get_current_context().exit(1) else: safeprint(config.filename)
def filename_command()
Executor for `globus config filename`
5.433672
5.111041
1.063124
client = get_client() ep_iterator = client.my_shared_endpoint_list(endpoint_id) formatted_print(ep_iterator, fields=ENDPOINT_LIST_FIELDS)
def my_shared_endpoint_list(endpoint_id)
Executor for `globus endpoint my-shared-endpoint-list`
5.700812
5.483083
1.039709
# should start with "u_" if not v.startswith("u_"): return None # usernames have @ , we want to allow `u_foo@example.com` # b32 names never have @ if "@" in v: return None # trim "u_" v = v[2:] # wrong length if len(v) != 26: return None # append padding and uppercase so that b32decode will work v = v.upper() + (6 * "=") # try to decode try: return str(uuid.UUID(bytes=base64.b32decode(v))) # if it fails, I guess it's a username? Not much left to do except ValueError: return None
def _try_b32_decode(v)
Attempt to decode a b32-encoded username which is sometimes generated by internal Globus components. The expectation is that the string is a valid ID, username, or b32-encoded name. Therefore, we can do some simple checking on it. If it does not appear to be formatted correctly, return None.
5.759188
5.729009
1.005268
client = get_auth_client() resolved_values = [_try_b32_decode(v) or v for v in values] # since API doesn't accept mixed ids and usernames, # split input values into separate lists ids = [] usernames = [] for val in resolved_values: try: uuid.UUID(val) ids.append(val) except ValueError: usernames.append(val) # make two calls to get_identities with ids and usernames # then combine the calls into one response results = [] if len(ids): results += client.get_identities(ids=ids)["identities"] if len(usernames): results += client.get_identities(usernames=usernames)["identities"] res = GlobusResponse({"identities": results}) def _custom_text_format(identities): def resolve_identity(value): for identity in identities: if identity["id"] == value: return identity["username"] if identity["username"] == value: return identity["id"] return "NO_SUCH_IDENTITY" # standard output is one resolved identity per line in the same order # as the inputs. A resolved identity is either a username if given a # UUID vice versa, or "NO_SUCH_IDENTITY" if the identity could not be # found for val in resolved_values: safeprint(resolve_identity(val)) formatted_print( res, response_key="identities", fields=[ ("ID", "id"), ("Username", "username"), ("Full Name", "name"), ("Organization", "organization"), ("Email Address", "email"), ], # verbose output is a table. Order not guaranteed, may contain # duplicates text_format=(FORMAT_TEXT_TABLE if is_verbose() else _custom_text_format), )
def get_identities_command(values)
Executor for `globus get-identities`
4.464485
4.327238
1.031717
supported = ["web"] # web activation is always supported. # oauth if res["oauth_server"]: supported.append("oauth") for req in res["DATA"]: # myproxy if ( req["type"] == "myproxy" and req["name"] == "hostname" and req["value"] != "myproxy.globusonline.org" ): supported.append("myproxy") # delegate_proxy if req["type"] == "delegate_proxy" and req["name"] == "public_key": supported.append("delegate_proxy") return supported
def supported_activation_methods(res)
Given an activation_requirements document returns a list of activation methods supported by this endpoint.
4.95194
4.55265
1.087705
methods = supported_activation_methods(res) lines = [ "This endpoint supports the following activation methods: ", ", ".join(methods).replace("_", " "), "\n", ( "For web activation use:\n" "'globus endpoint activate --web {}'\n".format(ep_id) if "web" in methods else "" ), ( "For myproxy activation use:\n" "'globus endpoint activate --myproxy {}'\n".format(ep_id) if "myproxy" in methods else "" ), ( "For oauth activation use web activation:\n" "'globus endpoint activate --web {}'\n".format(ep_id) if "oauth" in methods else "" ), ( "For delegate proxy activation use:\n" "'globus endpoint activate --delegate-proxy " "X.509_PEM_FILE {}'\n".format(ep_id) if "delegate_proxy" in methods else "" ), ( "Delegate proxy activation requires an additional dependency on " "cryptography. See the docs for details:\n" "https://docs.globus.org/cli/reference/endpoint_activate/\n" if "delegate_proxy" in methods else "" ), ] return "".join(lines)
def activation_requirements_help_text(res, ep_id)
Given an activation requirements document and an endpoint_id returns a string of help text for how to activate the endpoint
3.057982
3.051758
1.002039
kwargs = {} if if_expires_in is not None: kwargs["if_expires_in"] = if_expires_in res = client.endpoint_autoactivate(endpoint_id, **kwargs) if res["code"] == "AutoActivationFailed": message = ( "The endpoint could not be auto-activated and must be " "activated before it can be used.\n\n" + activation_requirements_help_text(res, endpoint_id) ) safeprint(message, write_to_stderr=True) click.get_current_context().exit(1) else: return res
def autoactivate(client, endpoint_id, if_expires_in=None)
Attempts to auto-activate the given endpoint with the given client If auto-activation fails, parses the returned activation requirements to determine which methods of activation are supported, then tells the user to use 'globus endpoint activate' with the correct options(s)
3.831155
3.308742
1.157888
client = get_client() endpoint = client.get_endpoint(endpoint_id) if endpoint["host_endpoint_id"]: # not GCS -- this is a share endpoint raise click.UsageError( dedent( u ).format(display_name_or_cname(endpoint), **endpoint.data) ) if endpoint["s3_url"]: # not GCS -- legacy S3 endpoint type return (endpoint, "S3") else: return (endpoint, client.endpoint_server_list(endpoint_id))
def get_endpoint_w_server_list(endpoint_id)
A helper for handling endpoint server list lookups correctly accounting for various endpoint types. - Raises click.UsageError when used on Shares - Returns (<get_endpoint_response>, "S3") for S3 endpoints - Returns (<get_endpoint_response>, <server_list_response>) for all other Endpoints
8.272034
7.082241
1.167997
client = client or get_client() def timed_out(waited_time): if timeout is None: return False else: return waited_time >= timeout def check_completed(): completed = client.task_wait( task_id, timeout=polling_interval, polling_interval=polling_interval ) if completed: if heartbeat: safeprint("", write_to_stderr=True) # meowing tasks wake up! if meow: safeprint( r, write_to_stderr=True, ) # TODO: possibly update TransferClient.task_wait so that we don't # need to do an extra fetch to get the task status after completion res = client.get_task(task_id) formatted_print(res, text_format=FORMAT_SILENT) status = res["status"] if status == "SUCCEEDED": click.get_current_context().exit(0) else: click.get_current_context().exit(1) return completed # Tasks start out sleepy if meow: safeprint( r, write_to_stderr=True, ) waited_time = 0 while not timed_out(waited_time) and not check_completed(): if heartbeat: safeprint(".", write_to_stderr=True, newline=False) sys.stderr.flush() waited_time += polling_interval # add a trailing newline to heartbeats if we fail if heartbeat: safeprint("", write_to_stderr=True) exit_code = 1 if timed_out(waited_time): safeprint( "Task has yet to complete after {} seconds".format(timeout), write_to_stderr=True, ) exit_code = timeout_exit_code # output json if requested, but nothing for text mode res = client.get_task(task_id) formatted_print(res, text_format=FORMAT_SILENT) click.get_current_context().exit(exit_code)
def task_wait_with_io( meow, heartbeat, polling_interval, timeout, task_id, timeout_exit_code, client=None )
Options are the core "task wait" options, including the `--meow` easter egg. This does the core "task wait" loop, including all of the IO. It *does exit* on behalf of the caller. (We can enhance with a `noabort=True` param or somesuch in the future if necessary.)
3.37447
3.484689
0.96837
backoff = random.random() / 100 # 5ms on average for _ in range(self.tries - 1): try: return f(*args, **kwargs) except NetworkError: time.sleep(backoff) backoff *= 2 return f(*args, **kwargs)
def retry(self, f, *args, **kwargs)
Retries the given function self.tries times on NetworkErros
3.282741
2.785565
1.178483
endpoint_id = safe_stringify(endpoint_id) self.logger.info( "TransferClient.recursive_operation_ls({}, {}, {})".format( endpoint_id, depth, params ) ) return RecursiveLsResponse(self, endpoint_id, depth, filter_after_first, params)
def recursive_operation_ls( self, endpoint_id, depth=3, filter_after_first=True, **params )
Makes recursive calls to ``GET /operation/endpoint/<endpoint_id>/ls`` Does not preserve access to top level operation_ls fields, but adds a "path" field for every item that represents the full path to that item. :rtype: iterable of :class:`GlobusResponse <globus_sdk.response.GlobusResponse>` **Parameters** ``endpoint_id`` (*string*) The endpoint being recursively ls'ed. If no "path" is given in params, the start path is determined by this endpoint. ``depth`` (*int*) The maximum file depth the recursive ls will go to. ``filter_after_first`` (*bool*) If False, any "filter" in params will only be applied to the first, top level ls, all results beyond that will be unfiltered. ``params`` Parameters that will be passed through as query params. **Examples** >>> tc = globus_sdk.TransferClient(...) >>> for entry in tc.recursive_operation_ls(ep_id, path="/~/project1/"): >>> print(entry["path"], entry["type"]) **External Documentation** See `List Directory Contents \ <https://docs.globus.org/api/transfer/file_operations/#list_directory_contents>`_ in the REST documentation for details, but note that top level data fields are no longer available and an additional per item "path" field is added.
3.950922
4.013278
0.984463
# passthrough conditions: None or already processed if value is None or isinstance(value, tuple): return value # split the value on the first colon, leave the rest intact splitval = value.split(":", 1) # first element is the endpoint_id endpoint_id = click.UUID(splitval[0]) # get the second element, defaulting to `None` if there was no colon in # the original value try: path = splitval[1] except IndexError: path = None # coerce path="" to path=None # means that we treat "enpdoint_id" and "endpoint_id:" equivalently path = path or None if path is None and self.path_required: self.fail("The path component is required", param=param) return (endpoint_id, path)
def convert(self, value, param, ctx)
ParamType.convert() is the actual processing method that takes a provided parameter and parses it.
6.550363
6.518602
1.004872
if bool(all) + bool(task_id) != 1: raise click.UsageError( "You must pass EITHER the special --all flag " "to cancel all in-progress tasks OR a single " "task ID to cancel." ) client = get_client() if all: from sys import maxsize task_ids = [ task_row["task_id"] for task_row in client.task_list( filter="type:TRANSFER,DELETE/status:ACTIVE,INACTIVE", fields="task_id", num_results=maxsize, # FIXME want to ask for "unlimited" set ) ] task_count = len(task_ids) if not task_ids: raise click.ClickException("You have no in-progress tasks.") def cancellation_iterator(): for i in task_ids: yield (i, client.cancel_task(i).data) def json_converter(res): return { "results": [x for i, x in cancellation_iterator()], "task_ids": task_ids, } def _custom_text(res): for (i, (task_id, data)) in enumerate(cancellation_iterator(), start=1): safeprint( u"{} ({} of {}): {}".format(task_id, i, task_count, data["message"]) ) # FIXME: this is kind of an abuse of formatted_print because the # text format and json converter are doing their own thing, not really # interacting with the "response data" (None). Is there a better way of # handling this? formatted_print(None, text_format=_custom_text, json_converter=json_converter) else: res = client.cancel_task(task_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
def cancel_task(all, task_id)
Executor for `globus task cancel`
4.900395
4.747299
1.032249
task_wait_with_io( meow, heartbeat, polling_interval, timeout, task_id, timeout_exit_code )
def task_wait(meow, heartbeat, polling_interval, timeout, task_id, timeout_exit_code)
Executor for `globus task wait`
3.481207
3.320317
1.048456
# cannot force a multiple or count option to be single use if "multiple" in kwargs or "count" in kwargs: raise ValueError( "Internal error, one_use_option cannot be used " "with multiple or count." ) # cannot force a non Option Paramater (argument) to be a OneUseOption if kwargs.get("cls"): raise TypeError( "Internal error, one_use_option cannot overwrite " "cls {}.".format(kwargs.get("cls")) ) # use our OneUseOption class instead of a normal Option kwargs["cls"] = OneUseOption # if dealing with a flag, switch to a counting option, # and then assert if the count is not greater than 1 and cast to a bool if kwargs.get("is_flag"): kwargs["is_flag"] = False # mutually exclusive with count kwargs["count"] = True # if not a flag, this option takes an argument(s), switch to a multiple # option, assert the len is 1, and treat the first element as the value else: kwargs["multiple"] = True # decorate with the click.option decorator, but with our custom kwargs def decorator(f): return click.option(*args, **kwargs)(f) return decorator
def one_use_option(*args, **kwargs)
Wrapper of the click.option decorator that replaces any instances of the Option class with the custom OneUseOption class
6.366973
5.983884
1.06402
assert mode in ("uri", "hostname", "hostname_port") def match(server_doc): if mode == "hostname": return server_spec == server_doc["hostname"] elif mode == "hostname_port": return server_spec == "{}:{}".format( server_doc["hostname"], server_doc["port"] ) elif mode == "uri": return server_spec == "{}://{}:{}".format( server_doc["scheme"], server_doc["hostname"], server_doc["port"] ) else: raise NotImplementedError("Unreachable error! Something is very wrong.") return [server_doc for server_doc in server_list if match(server_doc)]
def _spec_to_matches(server_list, server_spec, mode)
mode is in {uri, hostname, hostname_port} A list of matching server docs. Should usually be 0 or 1 matches. Multiple matches are possible though.
2.32594
2.108274
1.103244