_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q13300
_create_osf_project
train
def _create_osf_project(dlgr_id, description=None): """Create a project on the OSF.""" if not description: description = "Experiment {} registered by Dallinger.".format(dlgr_id) r = requests.post( "{}/nodes/".format(root), data={ "type": "nodes", "category": "project", "title": "Experiment dlgr-{}".format(dlgr_id[0:8]), "description": description, }, headers={"Authorization": "Bearer {}".format(config.get("osf_access_token"))}, ) r.raise_for_status() osf_id = r.json()["data"]["id"] logger.info("Project registered on OSF at http://osf.io/{}".format(osf_id)) return osf_id
python
{ "resource": "" }
q13301
_upload_assets_to_OSF
train
def _upload_assets_to_OSF(dlgr_id, osf_id, provider="osfstorage"): """Upload experimental assets to the OSF.""" root = "https://files.osf.io/v1" snapshot_filename = "{}-code.zip".format(dlgr_id) snapshot_path = os.path.join("snapshots", snapshot_filename) r = requests.put( "{}/resources/{}/providers/{}/".format(root, osf_id, provider), params={"kind": "file", "name": snapshot_filename}, headers={ "Authorization": "Bearer {}".format(config.get("osf_access_token")), "Content-Type": "text/plain", }, data=open(snapshot_path, "rb"), ) r.raise_for_status()
python
{ "resource": "" }
q13302
update_task
train
def update_task(deadline, label, task_id): """ Executor for `globus task update` """ client = get_client() task_doc = assemble_generic_doc("task", label=label, deadline=deadline) res = client.update_task(task_id, task_doc) formatted_print(res, simple_text="Success")
python
{ "resource": "" }
q13303
show_task
train
def show_task(successful_transfers, task_id): """ Executor for `globus task show` """ client = get_client() if successful_transfers: print_successful_transfers(client, task_id) else: print_task_detail(client, task_id)
python
{ "resource": "" }
q13304
bookmark_create
train
def bookmark_create(endpoint_plus_path, bookmark_name): """ Executor for `globus bookmark create` """ endpoint_id, path = endpoint_plus_path client = get_client() submit_data = {"endpoint_id": str(endpoint_id), "path": path, "name": bookmark_name} res = client.create_bookmark(submit_data) formatted_print(res, simple_text="Bookmark ID: {}".format(res["id"]))
python
{ "resource": "" }
q13305
server_list
train
def server_list(endpoint_id): """ Executor for `globus endpoint server list` """ # raises usage error on shares for us endpoint, server_list = get_endpoint_w_server_list(endpoint_id) if server_list == "S3": # not GCS -- this is an S3 endpoint server_list = {"s3_url": endpoint["s3_url"]} fields = [("S3 URL", "s3_url")] text_format = FORMAT_TEXT_RECORD else: # regular GCS host endpoint fields = ( ("ID", "id"), ("URI", lambda s: (s["uri"] or "none (Globus Connect Personal)")), ) text_format = FORMAT_TEXT_TABLE formatted_print(server_list, text_format=text_format, fields=fields)
python
{ "resource": "" }
q13306
task_list
train
def task_list( limit, filter_task_id, filter_status, filter_type, filter_label, filter_not_label, inexact, filter_requested_after, filter_requested_before, filter_completed_after, filter_completed_before, ): """ Executor for `globus task-list` """ def _process_filterval(prefix, value, default=None): if value: if isinstance(value, six.string_types): return "{}:{}/".format(prefix, value) return "{}:{}/".format(prefix, ",".join(str(x) for x in value)) else: return default or "" # make filter string filter_string = "" filter_string += _process_filterval("task_id", filter_task_id) filter_string += _process_filterval("status", filter_status) filter_string += _process_filterval( "type", filter_type, default="type:TRANSFER,DELETE/" ) # combine data into one list for easier processing if inexact: label_data = ["~" + s for s in filter_label] + [ "!~" + s for s in filter_not_label ] else: label_data = ["=" + s for s in filter_label] + [ "!" + s for s in filter_not_label ] filter_string += _process_filterval("label", label_data) filter_string += _process_filterval( "request_time", [(filter_requested_after or ""), (filter_requested_before or "")], ) filter_string += _process_filterval( "completion_time", [(filter_completed_after or ""), (filter_completed_before or "")], ) client = get_client() task_iterator = client.task_list( num_results=limit, filter=filter_string[:-1] ) # ignore trailing / fields = [ ("Task ID", "task_id"), ("Status", "status"), ("Type", "type"), ("Source Display Name", "source_endpoint_display_name"), ("Dest Display Name", "destination_endpoint_display_name"), ("Label", "label"), ] formatted_print( task_iterator, fields=fields, json_converter=iterable_response_to_dict )
python
{ "resource": "" }
q13307
delete_command
train
def delete_command( batch, ignore_missing, star_silent, recursive, enable_globs, endpoint_plus_path, label, submission_id, dry_run, deadline, skip_activation_check, notify, ): """ Executor for `globus delete` """ endpoint_id, path = endpoint_plus_path if path is None and (not batch): raise click.UsageError("delete requires either a PATH OR --batch") client = get_client() # attempt to activate unless --skip-activation-check is given if not skip_activation_check: autoactivate(client, endpoint_id, if_expires_in=60) delete_data = DeleteData( client, endpoint_id, label=label, recursive=recursive, ignore_missing=ignore_missing, submission_id=submission_id, deadline=deadline, skip_activation_check=skip_activation_check, interpret_globs=enable_globs, **notify ) if batch: # although this sophisticated structure (like that in transfer) # isn't strictly necessary, it gives us the ability to add options in # the future to these lines with trivial modifications @click.command() @click.argument("path", type=TaskPath(base_dir=path)) def process_batch_line(path): """ Parse a line of batch input and add it to the delete submission item. """ delete_data.add_item(str(path)) shlex_process_stdin(process_batch_line, "Enter paths to delete, line by line.") else: if not star_silent and enable_globs and path.endswith("*"): # not intuitive, but `click.confirm(abort=True)` prints to stdout # unnecessarily, which we don't really want... # only do this check if stderr is a pty if ( err_is_terminal() and term_is_interactive() and not click.confirm( 'Are you sure you want to delete all files matching "{}"?'.format( path ), err=True, ) ): safeprint("Aborted.", write_to_stderr=True) click.get_current_context().exit(1) delete_data.add_item(path) if dry_run: formatted_print(delete_data, response_key="DATA", fields=[("Path", "path")]) # exit safely return res = client.submit_delete(delete_data) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=(("Message", "message"), ("Task ID", "task_id")), )
python
{ "resource": "" }
q13308
set_command
train
def set_command(value, parameter): """ Executor for `globus config set` """ conf = get_config_obj() section = "cli" if "." in parameter: section, parameter = parameter.split(".", 1) # ensure that the section exists if section not in conf: conf[section] = {} # set the value for the given parameter conf[section][parameter] = value # write to disk safeprint("Writing updated config to {}".format(conf.filename)) conf.write()
python
{ "resource": "" }
q13309
shlex_process_stdin
train
def shlex_process_stdin(process_command, helptext): """ Use shlex to process stdin line-by-line. Also prints help text. Requires that @process_command be a Click command object, used for processing single lines of input. helptext is prepended to the standard message printed to interactive sessions. """ # if input is interactive, print help to stderr if sys.stdin.isatty(): safeprint( ( "{}\n".format(helptext) + "Lines are split with shlex in POSIX mode: " "https://docs.python.org/library/shlex.html#parsing-rules\n" "Terminate input with Ctrl+D or <EOF>\n" ), write_to_stderr=True, ) # use readlines() rather than implicit file read line looping to force # python to properly capture EOF (otherwise, EOF acts as a flush and # things get weird) for line in sys.stdin.readlines(): # get the argument vector: # do a shlex split to handle quoted paths with spaces in them # also lets us have comments with # argv = shlex.split(line, comments=True) if argv: try: process_command.main(args=argv) except SystemExit as e: if e.code != 0: raise
python
{ "resource": "" }
q13310
local_id
train
def local_id(personal): """ Executor for `globus endpoint local-id` """ if personal: try: ep_id = LocalGlobusConnectPersonal().endpoint_id except IOError as e: safeprint(e, write_to_stderr=True) click.get_current_context().exit(1) if ep_id is not None: safeprint(ep_id) else: safeprint("No Globus Connect Personal installation found.") click.get_current_context().exit(1)
python
{ "resource": "" }
q13311
globus_group
train
def globus_group(*args, **kwargs): """ Wrapper over click.group which sets GlobusCommandGroup as the Class Caution! Don't get snake-bitten by this. `globus_group` is a decorator which MUST take arguments. It is not wrapped in our common detect-and-decorate pattern to allow it to be used bare -- that wouldn't work (unnamed groups? weird stuff) """ def inner_decorator(f): f = click.group(*args, cls=GlobusCommandGroup, **kwargs)(f) f = common_options(f) return f return inner_decorator
python
{ "resource": "" }
q13312
_get_package_data
train
def _get_package_data(): """ Import a set of important packages and return relevant data about them in a dict. Imports are done in here to avoid potential for circular imports and other problems, and to make iteration simpler. """ moddata = [] modlist = ( "click", "configobj", "cryptography", "globus_cli", "globus_sdk", "jmespath", "requests", "six", ) if verbosity() < 2: modlist = ("globus_cli", "globus_sdk", "requests") for mod in modlist: cur = [mod] try: loaded_mod = __import__(mod) except ImportError: loaded_mod = None for attr in ("__version__", "__file__", "__path__"): # if loading failed, be sure to pad with error messages if loaded_mod is None: cur.append("[import failed]") continue try: attrval = getattr(loaded_mod, attr) except AttributeError: attrval = "" cur.append(attrval) moddata.append(cur) return moddata
python
{ "resource": "" }
q13313
print_version
train
def print_version(): """ Print out the current version, and at least try to fetch the latest from PyPi to print alongside it. It may seem odd that this isn't in globus_cli.version , but it's done this way to separate concerns over printing the version from looking it up. """ latest, current = get_versions() if latest is None: safeprint( ("Installed Version: {0}\n" "Failed to lookup latest version.").format( current ) ) else: safeprint( ("Installed Version: {0}\n" "Latest Version: {1}\n" "\n{2}").format( current, latest, "You are running the latest version of the Globus CLI" if current == latest else ( "You should update your version of the Globus CLI with\n" " globus update" ) if current < latest else "You are running a preview version of the Globus CLI", ) ) # verbose shows more platform and python info # it also includes versions of some CLI dependencies if is_verbose(): moddata = _get_package_data() safeprint("\nVerbose Data\n---") safeprint("platform:") safeprint(" platform: {}".format(platform.platform())) safeprint(" py_implementation: {}".format(platform.python_implementation())) safeprint(" py_version: {}".format(platform.python_version())) safeprint(" sys.executable: {}".format(sys.executable)) safeprint(" site.USER_BASE: {}".format(site.USER_BASE)) safeprint("modules:") for mod, modversion, modfile, modpath in moddata: safeprint(" {}:".format(mod)) safeprint(" __version__: {}".format(modversion)) safeprint(" __file__: {}".format(modfile)) safeprint(" __path__: {}".format(modpath))
python
{ "resource": "" }
q13314
update_command
train
def update_command(yes, development, development_version): """ Executor for `globus update` """ # enforce that pip MUST be installed # Why not just include it in the setup.py requirements? Mostly weak # reasons, but it shouldn't matter much. # - if someone has installed the CLI without pip, then they haven't # followed our install instructions, so it's mostly a non-issue # - we don't want to have `pip install -U globus-cli` upgrade pip -- that's # a little bit invasive and easy to do by accident on modern versions of # pip where `--upgrade-strategy` defaults to `eager` # - we may want to do distributions in the future with dependencies baked # into a package, but we'd never want to do that with pip. More changes # would be needed to support that use-case, which we've discussed, but # not depending directly on pip gives us a better escape hatch # - if we depend on pip, we need to start thinking about what versions we # support. In point of fact, that becomes an issue as soon as we add this # command, but not being explicit about it lets us punt for now (maybe # indefinitely) on figuring out version requirements. All of that is to # say: not including it is bad, and from that badness we reap the rewards # of procrastination and non-explicit requirements # - Advanced usage, like `pip install -t` can produce an installed version # of the CLI which can't import its installing `pip`. If we depend on # pip, anyone doing this is forced to get two copies of pip, which seems # kind of nasty (even if "they're asking for it") if not _check_pip_installed(): safeprint("`globus update` requires pip. " "Please install pip and try again") click.get_current_context().exit(1) # --development-version implies --development development = development or (development_version is not None) # if we're running with `--development`, then the target version is a # tarball from GitHub, and we can skip out on the safety checks if development: # default to master development_version = development_version or "master" target_version = ( "https://github.com/globus/globus-cli/archive/{}" ".tar.gz#egg=globus-cli" ).format(development_version) else: # lookup version from PyPi, abort if we can't get it latest, current = get_versions() if latest is None: safeprint("Failed to lookup latest version. Aborting.") click.get_current_context().exit(1) # in the case where we're already up to date, do nothing and exit if current == latest: safeprint("You are already running the latest version: {}".format(current)) return # if we're up to date (or ahead, meaning a dev version was installed) # then prompt before continuing, respecting `--yes` else: safeprint( ( "You are already running version {0}\n" "The latest version is {1}" ).format(current, latest) ) if not yes and ( not click.confirm("Continue with the upgrade?", default=True) ): click.get_current_context().exit(1) # if we make it through to here, it means we didn't hit any safe (or # unsafe) abort conditions, so set the target version for upgrade to # the latest target_version = "globus-cli=={}".format(latest) # print verbose warning/help message, to guide less fortunate souls who hit # Ctrl+C at a foolish time, lose connectivity, or don't invoke with `sudo` # on a global install of the CLI safeprint( ( "The Globus CLI will now update itself.\n" "In the event that an error occurs or the update is interrupted, we " "recommend uninstalling and reinstalling the CLI.\n" "Update Target: {}\n" ).format(target_version) ) # register the upgrade activity as an atexit function # this ensures that most library teardown (other than whatever libs might # jam into atexit themselves...) has already run, and therefore protects us # against most potential bugs resulting from upgrading click while a click # command is running # # NOTE: there is a risk that we will see bugs on upgrade if the act of # doing a pip upgrade install changes state on disk and we (or a lib we # use) rely on that via pkg_resources, lazy/deferred imports, or good # old-fashioned direct inspection of `__file__` and the like DURING an # atexit method. Anything outside of atexit methods remains safe! @atexit.register def do_upgrade(): install_args = ["install", "--upgrade", target_version] if IS_USER_INSTALL: install_args.insert(1, "--user") _call_pip(*install_args)
python
{ "resource": "" }
q13315
whoami_command
train
def whoami_command(linked_identities): """ Executor for `globus whoami` """ client = get_auth_client() # get userinfo from auth. # if we get back an error the user likely needs to log in again try: res = client.oauth2_userinfo() except AuthAPIError: safeprint( "Unable to get user information. Please try " "logging in again.", write_to_stderr=True, ) click.get_current_context().exit(1) print_command_hint( "For information on which identities are in session see\n" " globus session show\n" ) # --linked-identities either displays all usernames or a table if verbose if linked_identities: try: formatted_print( res["identity_set"], fields=[ ("Username", "username"), ("Name", "name"), ("ID", "sub"), ("Email", "email"), ], simple_text=( None if is_verbose() else "\n".join([x["username"] for x in res["identity_set"]]) ), ) except KeyError: safeprint( "Your current login does not have the consents required " "to view your full identity set. Please log in again " "to agree to the required consents.", write_to_stderr=True, ) # Default output is the top level data else: formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=[ ("Username", "preferred_username"), ("Name", "name"), ("ID", "sub"), ("Email", "email"), ], simple_text=(None if is_verbose() else res["preferred_username"]), )
python
{ "resource": "" }
q13316
get_completion_context
train
def get_completion_context(args): """ Walk the tree of commands to a terminal command or multicommand, using the Click Context system. Effectively, we'll be using the resilient_parsing mode of commands to stop evaluation, then having them capture their options and arguments, passing us on to the next subcommand. If we walk "off the tree" with a command that we don't recognize, we have a hardstop condition, but otherwise, we walk as far as we can go and that's the location from which we should do our completion work. """ # get the "globus" command as a click.Command root_command = click.get_current_context().find_root().command # build a new context object off of it, with resilient_parsing set so that # no callbacks are invoked ctx = root_command.make_context("globus", list(args), resilient_parsing=True) # walk down multicommands until we've matched on everything and are at a # terminal context that holds all of our completed args while isinstance(ctx.command, click.MultiCommand) and args: # trim out any params that are capturable at this level of the command # tree by resetting the argument list args = ctx.protected_args + ctx.args # if there were no remaining args, stop walking the tree if not args: break # check for a matching command, and if one isn't found stop the # traversal and abort the whole process -- this would mean that a # completed command was entered which doesn't match a known command # there's nothing completion can do in this case unless it implements # sophisticated fuzzy matching command = ctx.command.get_command(ctx, args[0]) if not command: return None # otherwise, grab that command, and build a subcontext to continue the # tree walk else: ctx = command.make_context( args[0], args[1:], parent=ctx, resilient_parsing=True ) # return the context we found return ctx
python
{ "resource": "" }
q13317
endpoint_is_activated
train
def endpoint_is_activated(endpoint_id, until, absolute_time): """ Executor for `globus endpoint is-activated` """ client = get_client() res = client.endpoint_get_activation_requirements(endpoint_id) def fail(deadline=None): exp_string = "" if deadline is not None: exp_string = " or will expire within {} seconds".format(deadline) message = "The endpoint is not activated{}.\n\n".format( exp_string ) + activation_requirements_help_text(res, endpoint_id) formatted_print(res, simple_text=message) click.get_current_context().exit(1) def success(msg, *format_params): formatted_print(res, simple_text=(msg.format(endpoint_id, *format_params))) click.get_current_context().exit(0) # eternally active endpoints have a special expires_in value if res["expires_in"] == -1: success("{} does not require activation") # autoactivation is not supported and --until was not passed if until is None: # and we are active right now (0s in the future)... if res.active_until(0): success("{} is activated") # or we are not active fail() # autoactivation is not supported and --until was passed if res.active_until(until, relative_time=not absolute_time): success("{} will be active for at least {} seconds", until) else: fail(deadline=until)
python
{ "resource": "" }
q13318
endpoint_create
train
def endpoint_create(**kwargs): """ Executor for `globus endpoint create` """ client = get_client() # get endpoint type, ensure unambiguous. personal = kwargs.pop("personal") server = kwargs.pop("server") shared = kwargs.pop("shared") if personal and (not server) and (not shared): endpoint_type = "personal" elif server and (not personal) and (not shared): endpoint_type = "server" elif shared and (not personal) and (not server): endpoint_type = "shared" else: raise click.UsageError( "Exactly one of --personal, --server, or --shared is required." ) # validate options kwargs["is_globus_connect"] = personal or None validate_endpoint_create_and_update_params(endpoint_type, False, kwargs) # shared endpoint creation if shared: endpoint_id, host_path = shared kwargs["host_endpoint"] = endpoint_id kwargs["host_path"] = host_path ep_doc = assemble_generic_doc("shared_endpoint", **kwargs) autoactivate(client, endpoint_id, if_expires_in=60) res = client.create_shared_endpoint(ep_doc) # non shared endpoint creation else: # omit `is_globus_connect` key if not GCP, otherwise include as `True` ep_doc = assemble_generic_doc("endpoint", **kwargs) res = client.create_endpoint(ep_doc) # output formatted_print( res, fields=(COMMON_FIELDS + GCP_FIELDS if personal else COMMON_FIELDS), text_format=FORMAT_TEXT_RECORD, )
python
{ "resource": "" }
q13319
server_update
train
def server_update( endpoint_id, server_id, subject, port, scheme, hostname, incoming_data_ports, outgoing_data_ports, ): """ Executor for `globus endpoint server update` """ client = get_client() server_doc = assemble_generic_doc( "server", subject=subject, port=port, scheme=scheme, hostname=hostname ) # n.b. must be done after assemble_generic_doc(), as that function filters # out `None`s, which we need to be able to set for `'unspecified'` if incoming_data_ports: server_doc.update( incoming_data_port_start=incoming_data_ports[0], incoming_data_port_end=incoming_data_ports[1], ) if outgoing_data_ports: server_doc.update( outgoing_data_port_start=outgoing_data_ports[0], outgoing_data_port_end=outgoing_data_ports[1], ) res = client.update_endpoint_server(endpoint_id, server_id, server_doc) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
python
{ "resource": "" }
q13320
filename_command
train
def filename_command(): """ Executor for `globus config filename` """ try: config = get_config_obj(file_error=True) except IOError as e: safeprint(e, write_to_stderr=True) click.get_current_context().exit(1) else: safeprint(config.filename)
python
{ "resource": "" }
q13321
my_shared_endpoint_list
train
def my_shared_endpoint_list(endpoint_id): """ Executor for `globus endpoint my-shared-endpoint-list` """ client = get_client() ep_iterator = client.my_shared_endpoint_list(endpoint_id) formatted_print(ep_iterator, fields=ENDPOINT_LIST_FIELDS)
python
{ "resource": "" }
q13322
_try_b32_decode
train
def _try_b32_decode(v): """ Attempt to decode a b32-encoded username which is sometimes generated by internal Globus components. The expectation is that the string is a valid ID, username, or b32-encoded name. Therefore, we can do some simple checking on it. If it does not appear to be formatted correctly, return None. """ # should start with "u_" if not v.startswith("u_"): return None # usernames have @ , we want to allow `u_foo@example.com` # b32 names never have @ if "@" in v: return None # trim "u_" v = v[2:] # wrong length if len(v) != 26: return None # append padding and uppercase so that b32decode will work v = v.upper() + (6 * "=") # try to decode try: return str(uuid.UUID(bytes=base64.b32decode(v))) # if it fails, I guess it's a username? Not much left to do except ValueError: return None
python
{ "resource": "" }
q13323
get_identities_command
train
def get_identities_command(values): """ Executor for `globus get-identities` """ client = get_auth_client() resolved_values = [_try_b32_decode(v) or v for v in values] # since API doesn't accept mixed ids and usernames, # split input values into separate lists ids = [] usernames = [] for val in resolved_values: try: uuid.UUID(val) ids.append(val) except ValueError: usernames.append(val) # make two calls to get_identities with ids and usernames # then combine the calls into one response results = [] if len(ids): results += client.get_identities(ids=ids)["identities"] if len(usernames): results += client.get_identities(usernames=usernames)["identities"] res = GlobusResponse({"identities": results}) def _custom_text_format(identities): """ Non-verbose text output is customized """ def resolve_identity(value): """ helper to deal with variable inputs and uncertain response order """ for identity in identities: if identity["id"] == value: return identity["username"] if identity["username"] == value: return identity["id"] return "NO_SUCH_IDENTITY" # standard output is one resolved identity per line in the same order # as the inputs. A resolved identity is either a username if given a # UUID vice versa, or "NO_SUCH_IDENTITY" if the identity could not be # found for val in resolved_values: safeprint(resolve_identity(val)) formatted_print( res, response_key="identities", fields=[ ("ID", "id"), ("Username", "username"), ("Full Name", "name"), ("Organization", "organization"), ("Email Address", "email"), ], # verbose output is a table. Order not guaranteed, may contain # duplicates text_format=(FORMAT_TEXT_TABLE if is_verbose() else _custom_text_format), )
python
{ "resource": "" }
q13324
supported_activation_methods
train
def supported_activation_methods(res): """ Given an activation_requirements document returns a list of activation methods supported by this endpoint. """ supported = ["web"] # web activation is always supported. # oauth if res["oauth_server"]: supported.append("oauth") for req in res["DATA"]: # myproxy if ( req["type"] == "myproxy" and req["name"] == "hostname" and req["value"] != "myproxy.globusonline.org" ): supported.append("myproxy") # delegate_proxy if req["type"] == "delegate_proxy" and req["name"] == "public_key": supported.append("delegate_proxy") return supported
python
{ "resource": "" }
q13325
activation_requirements_help_text
train
def activation_requirements_help_text(res, ep_id): """ Given an activation requirements document and an endpoint_id returns a string of help text for how to activate the endpoint """ methods = supported_activation_methods(res) lines = [ "This endpoint supports the following activation methods: ", ", ".join(methods).replace("_", " "), "\n", ( "For web activation use:\n" "'globus endpoint activate --web {}'\n".format(ep_id) if "web" in methods else "" ), ( "For myproxy activation use:\n" "'globus endpoint activate --myproxy {}'\n".format(ep_id) if "myproxy" in methods else "" ), ( "For oauth activation use web activation:\n" "'globus endpoint activate --web {}'\n".format(ep_id) if "oauth" in methods else "" ), ( "For delegate proxy activation use:\n" "'globus endpoint activate --delegate-proxy " "X.509_PEM_FILE {}'\n".format(ep_id) if "delegate_proxy" in methods else "" ), ( "Delegate proxy activation requires an additional dependency on " "cryptography. See the docs for details:\n" "https://docs.globus.org/cli/reference/endpoint_activate/\n" if "delegate_proxy" in methods else "" ), ] return "".join(lines)
python
{ "resource": "" }
q13326
get_endpoint_w_server_list
train
def get_endpoint_w_server_list(endpoint_id): """ A helper for handling endpoint server list lookups correctly accounting for various endpoint types. - Raises click.UsageError when used on Shares - Returns (<get_endpoint_response>, "S3") for S3 endpoints - Returns (<get_endpoint_response>, <server_list_response>) for all other Endpoints """ client = get_client() endpoint = client.get_endpoint(endpoint_id) if endpoint["host_endpoint_id"]: # not GCS -- this is a share endpoint raise click.UsageError( dedent( u"""\ {id} ({0}) is a share and does not have servers. To see details of the share, use globus endpoint show {id} To list the servers on the share's host endpoint, use globus endpoint server list {host_endpoint_id} """ ).format(display_name_or_cname(endpoint), **endpoint.data) ) if endpoint["s3_url"]: # not GCS -- legacy S3 endpoint type return (endpoint, "S3") else: return (endpoint, client.endpoint_server_list(endpoint_id))
python
{ "resource": "" }
q13327
RetryingTransferClient.retry
train
def retry(self, f, *args, **kwargs): """ Retries the given function self.tries times on NetworkErros """ backoff = random.random() / 100 # 5ms on average for _ in range(self.tries - 1): try: return f(*args, **kwargs) except NetworkError: time.sleep(backoff) backoff *= 2 return f(*args, **kwargs)
python
{ "resource": "" }
q13328
cancel_task
train
def cancel_task(all, task_id): """ Executor for `globus task cancel` """ if bool(all) + bool(task_id) != 1: raise click.UsageError( "You must pass EITHER the special --all flag " "to cancel all in-progress tasks OR a single " "task ID to cancel." ) client = get_client() if all: from sys import maxsize task_ids = [ task_row["task_id"] for task_row in client.task_list( filter="type:TRANSFER,DELETE/status:ACTIVE,INACTIVE", fields="task_id", num_results=maxsize, # FIXME want to ask for "unlimited" set ) ] task_count = len(task_ids) if not task_ids: raise click.ClickException("You have no in-progress tasks.") def cancellation_iterator(): for i in task_ids: yield (i, client.cancel_task(i).data) def json_converter(res): return { "results": [x for i, x in cancellation_iterator()], "task_ids": task_ids, } def _custom_text(res): for (i, (task_id, data)) in enumerate(cancellation_iterator(), start=1): safeprint( u"{} ({} of {}): {}".format(task_id, i, task_count, data["message"]) ) # FIXME: this is kind of an abuse of formatted_print because the # text format and json converter are doing their own thing, not really # interacting with the "response data" (None). Is there a better way of # handling this? formatted_print(None, text_format=_custom_text, json_converter=json_converter) else: res = client.cancel_task(task_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
python
{ "resource": "" }
q13329
task_wait
train
def task_wait(meow, heartbeat, polling_interval, timeout, task_id, timeout_exit_code): """ Executor for `globus task wait` """ task_wait_with_io( meow, heartbeat, polling_interval, timeout, task_id, timeout_exit_code )
python
{ "resource": "" }
q13330
one_use_option
train
def one_use_option(*args, **kwargs): """ Wrapper of the click.option decorator that replaces any instances of the Option class with the custom OneUseOption class """ # cannot force a multiple or count option to be single use if "multiple" in kwargs or "count" in kwargs: raise ValueError( "Internal error, one_use_option cannot be used " "with multiple or count." ) # cannot force a non Option Paramater (argument) to be a OneUseOption if kwargs.get("cls"): raise TypeError( "Internal error, one_use_option cannot overwrite " "cls {}.".format(kwargs.get("cls")) ) # use our OneUseOption class instead of a normal Option kwargs["cls"] = OneUseOption # if dealing with a flag, switch to a counting option, # and then assert if the count is not greater than 1 and cast to a bool if kwargs.get("is_flag"): kwargs["is_flag"] = False # mutually exclusive with count kwargs["count"] = True # if not a flag, this option takes an argument(s), switch to a multiple # option, assert the len is 1, and treat the first element as the value else: kwargs["multiple"] = True # decorate with the click.option decorator, but with our custom kwargs def decorator(f): return click.option(*args, **kwargs)(f) return decorator
python
{ "resource": "" }
q13331
remove_command
train
def remove_command(parameter): """ Executor for `globus config remove` """ conf = get_config_obj() section = "cli" if "." in parameter: section, parameter = parameter.split(".", 1) # ensure that the section exists if section not in conf: conf[section] = {} # remove the value for the given parameter del conf[section][parameter] # write to disk safeprint("Writing updated config to {}".format(conf.filename)) conf.write()
python
{ "resource": "" }
q13332
show_command
train
def show_command(endpoint_id, rule_id): """ Executor for `globus endpoint permission show` """ client = get_client() rule = client.get_endpoint_acl_rule(endpoint_id, rule_id) formatted_print( rule, text_format=FORMAT_TEXT_RECORD, fields=( ("Rule ID", "id"), ("Permissions", "permissions"), ("Shared With", _shared_with_keyfunc), ("Path", "path"), ), )
python
{ "resource": "" }
q13333
bookmark_rename
train
def bookmark_rename(bookmark_id_or_name, new_bookmark_name): """ Executor for `globus bookmark rename` """ client = get_client() bookmark_id = resolve_id_or_name(client, bookmark_id_or_name)["id"] submit_data = {"name": new_bookmark_name} res = client.update_bookmark(bookmark_id, submit_data) formatted_print(res, simple_text="Success")
python
{ "resource": "" }
q13334
show_command
train
def show_command(parameter): """ Executor for `globus config show` """ section = "cli" if "." in parameter: section, parameter = parameter.split(".", 1) value = lookup_option(parameter, section=section) if value is None: safeprint("{} not set".format(parameter)) else: safeprint("{} = {}".format(parameter, value))
python
{ "resource": "" }
q13335
rename_command
train
def rename_command(source, destination): """ Executor for `globus rename` """ source_ep, source_path = source dest_ep, dest_path = destination if source_ep != dest_ep: raise click.UsageError( ( "rename requires that the source and dest " "endpoints are the same, {} != {}" ).format(source_ep, dest_ep) ) endpoint_id = source_ep client = get_client() autoactivate(client, endpoint_id, if_expires_in=60) res = client.operation_rename(endpoint_id, oldpath=source_path, newpath=dest_path) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
python
{ "resource": "" }
q13336
endpoint_show
train
def endpoint_show(endpoint_id): """ Executor for `globus endpoint show` """ client = get_client() res = client.get_endpoint(endpoint_id) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=GCP_FIELDS if res["is_globus_connect"] else STANDARD_FIELDS, )
python
{ "resource": "" }
q13337
update_command
train
def update_command(permissions, rule_id, endpoint_id): """ Executor for `globus endpoint permission update` """ client = get_client() rule_data = assemble_generic_doc("access", permissions=permissions) res = client.update_endpoint_acl_rule(endpoint_id, rule_id, rule_data) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
python
{ "resource": "" }
q13338
PrintableErrorField._format_value
train
def _format_value(self, val): """ formats a value to be good for textmode printing val must be unicode """ name = self.name + ":" if not self.multiline or "\n" not in val: val = u"{0} {1}".format(name.ljust(self._text_prefix_len), val) else: spacer = "\n" + " " * (self._text_prefix_len + 1) val = u"{0}{1}{2}".format(name, spacer, spacer.join(val.split("\n"))) return val
python
{ "resource": "" }
q13339
delete_command
train
def delete_command(endpoint_id, rule_id): """ Executor for `globus endpoint permission delete` """ client = get_client() res = client.delete_endpoint_acl_rule(endpoint_id, rule_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
python
{ "resource": "" }
q13340
transfer_command
train
def transfer_command( batch, sync_level, recursive, destination, source, label, preserve_mtime, verify_checksum, encrypt, submission_id, dry_run, delete, deadline, skip_activation_check, notify, perf_cc, perf_p, perf_pp, perf_udt, ): """ Executor for `globus transfer` """ source_endpoint, cmd_source_path = source dest_endpoint, cmd_dest_path = destination if recursive and batch: raise click.UsageError( ( "You cannot use --recursive in addition to --batch. " "Instead, use --recursive on lines of --batch input " "which need it" ) ) if (cmd_source_path is None or cmd_dest_path is None) and (not batch): raise click.UsageError( ("transfer requires either SOURCE_PATH and DEST_PATH or " "--batch") ) # because python can't handle multiple **kwargs expansions in a single # call, we need to get a little bit clever # both the performance options (of which there are a few), and the # notification options (also there are a few) have elements which should be # omitted in some cases # notify comes to us clean, perf opts need more care # put them together into a dict before passing to TransferData kwargs = {} perf_opts = dict( (k, v) for (k, v) in dict( perf_cc=perf_cc, perf_p=perf_p, perf_pp=perf_pp, perf_udt=perf_udt ).items() if v is not None ) kwargs.update(perf_opts) kwargs.update(notify) client = get_client() transfer_data = TransferData( client, source_endpoint, dest_endpoint, label=label, sync_level=sync_level, verify_checksum=verify_checksum, preserve_timestamp=preserve_mtime, encrypt_data=encrypt, submission_id=submission_id, delete_destination_extra=delete, deadline=deadline, skip_activation_check=skip_activation_check, **kwargs ) if batch: @click.command() @click.option("--recursive", "-r", is_flag=True) @click.argument("source_path", type=TaskPath(base_dir=cmd_source_path)) @click.argument("dest_path", type=TaskPath(base_dir=cmd_dest_path)) def process_batch_line(dest_path, source_path, recursive): """ Parse a line of batch input and turn it into a transfer submission item. """ transfer_data.add_item( str(source_path), str(dest_path), recursive=recursive ) shlex_process_stdin( process_batch_line, ( "Enter transfers, line by line, as\n\n" " [--recursive] SOURCE_PATH DEST_PATH\n" ), ) else: transfer_data.add_item(cmd_source_path, cmd_dest_path, recursive=recursive) if dry_run: formatted_print( transfer_data, response_key="DATA", fields=( ("Source Path", "source_path"), ("Dest Path", "destination_path"), ("Recursive", "recursive"), ), ) # exit safely return # autoactivate after parsing all args and putting things together # skip this if skip-activation-check is given if not skip_activation_check: autoactivate(client, source_endpoint, if_expires_in=60) autoactivate(client, dest_endpoint, if_expires_in=60) res = client.submit_transfer(transfer_data) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=(("Message", "message"), ("Task ID", "task_id")), )
python
{ "resource": "" }
q13341
endpoint_delete
train
def endpoint_delete(endpoint_id): """ Executor for `globus endpoint delete` """ client = get_client() res = client.delete_endpoint(endpoint_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
python
{ "resource": "" }
q13342
bookmark_delete
train
def bookmark_delete(bookmark_id_or_name): """ Executor for `globus bookmark delete` """ client = get_client() bookmark_id = resolve_id_or_name(client, bookmark_id_or_name)["id"] res = client.delete_bookmark(bookmark_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
python
{ "resource": "" }
q13343
mkdir_command
train
def mkdir_command(endpoint_plus_path): """ Executor for `globus mkdir` """ endpoint_id, path = endpoint_plus_path client = get_client() autoactivate(client, endpoint_id, if_expires_in=60) res = client.operation_mkdir(endpoint_id, path=path) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
python
{ "resource": "" }
q13344
init_command
train
def init_command(default_output_format, default_myproxy_username): """ Executor for `globus config init` """ # now handle the output format, requires a little bit more care # first, prompt if it isn't given, but be clear that we have a sensible # default if they don't set it # then, make sure that if it is given, it's a valid format (discard # otherwise) # finally, set it only if given and valid if not default_output_format: safeprint( textwrap.fill( 'This must be one of "json" or "text". Other values will be ' "ignored. ENTER to skip." ) ) default_output_format = ( click.prompt( "Default CLI output format (cli.output_format)", default="text" ) .strip() .lower() ) if default_output_format not in ("json", "text"): default_output_format = None if not default_myproxy_username: safeprint(textwrap.fill("ENTER to skip.")) default_myproxy_username = click.prompt( "Default myproxy username (cli.default_myproxy_username)", default="", show_default=False, ).strip() # write to disk safeprint( "\n\nWriting updated config to {0}".format(os.path.expanduser("~/.globus.cfg")) ) write_option(OUTPUT_FORMAT_OPTNAME, default_output_format) write_option(MYPROXY_USERNAME_OPTNAME, default_myproxy_username)
python
{ "resource": "" }
q13345
detect_and_decorate
train
def detect_and_decorate(decorator, args, kwargs): """ Helper for applying a decorator when it is applied directly, and also applying it when it is given arguments and then applied to a function. """ # special behavior when invoked with only one non-keyword argument: act as # a normal decorator, decorating and returning that argument with # click.option if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): return decorator(args[0]) # if we're not doing that, we should see no positional args # the alternative behavior is to fall through and discard *args, but this # will probably confuse someone in the future when their arguments are # silently discarded elif len(args) != 0: raise ValueError("this decorator cannot take positional args") # final case: got 0 or more kwargs, no positionals # do the function-which-returns-a-decorator dance to produce a # new decorator based on the arguments given else: def inner_decorator(f): return decorator(f, **kwargs) return inner_decorator
python
{ "resource": "" }
q13346
list_command
train
def list_command(endpoint_id): """ Executor for `globus endpoint permission list` """ client = get_client() rules = client.endpoint_acl_list(endpoint_id) resolved_ids = LazyIdentityMap( x["principal"] for x in rules if x["principal_type"] == "identity" ) def principal_str(rule): principal = rule["principal"] if rule["principal_type"] == "identity": username = resolved_ids.get(principal) return username or principal elif rule["principal_type"] == "group": return (u"https://app.globus.org/groups/{}").format(principal) else: principal = rule["principal_type"] return principal formatted_print( rules, fields=[ ("Rule ID", "id"), ("Permissions", "permissions"), ("Shared With", principal_str), ("Path", "path"), ], )
python
{ "resource": "" }
q13347
bookmark_list
train
def bookmark_list(): """ Executor for `globus bookmark list` """ client = get_client() bookmark_iterator = client.bookmark_list() def get_ep_name(item): ep_id = item["endpoint_id"] try: ep_doc = client.get_endpoint(ep_id) return display_name_or_cname(ep_doc) except TransferAPIError as err: if err.code == "EndpointDeleted": return "[DELETED ENDPOINT]" else: raise err formatted_print( bookmark_iterator, fields=[ ("Name", "name"), ("Bookmark ID", "id"), ("Endpoint ID", "endpoint_id"), ("Endpoint Name", get_ep_name), ("Path", "path"), ], response_key="DATA", json_converter=iterable_response_to_dict, )
python
{ "resource": "" }
q13348
rm_command
train
def rm_command( ignore_missing, star_silent, recursive, enable_globs, endpoint_plus_path, label, submission_id, dry_run, deadline, skip_activation_check, notify, meow, heartbeat, polling_interval, timeout, timeout_exit_code, ): """ Executor for `globus rm` """ endpoint_id, path = endpoint_plus_path client = get_client() # attempt to activate unless --skip-activation-check is given if not skip_activation_check: autoactivate(client, endpoint_id, if_expires_in=60) delete_data = DeleteData( client, endpoint_id, label=label, recursive=recursive, ignore_missing=ignore_missing, submission_id=submission_id, deadline=deadline, skip_activation_check=skip_activation_check, interpret_globs=enable_globs, **notify ) if not star_silent and enable_globs and path.endswith("*"): # not intuitive, but `click.confirm(abort=True)` prints to stdout # unnecessarily, which we don't really want... # only do this check if stderr is a pty if ( err_is_terminal() and term_is_interactive() and not click.confirm( 'Are you sure you want to delete all files matching "{}"?'.format(path), err=True, ) ): safeprint("Aborted.", write_to_stderr=True) click.get_current_context().exit(1) delete_data.add_item(path) if dry_run: formatted_print(delete_data, response_key="DATA", fields=[("Path", "path")]) # exit safely return # Print task submission to stderr so that `-Fjson` is still correctly # respected, as it will be by `task wait` res = client.submit_delete(delete_data) task_id = res["task_id"] safeprint( 'Delete task submitted under ID "{}"'.format(task_id), write_to_stderr=True ) # do a `task wait` equivalent, including printing and correct exit status task_wait_with_io( meow, heartbeat, polling_interval, timeout, task_id, timeout_exit_code, client=client, )
python
{ "resource": "" }
q13349
do_link_auth_flow
train
def do_link_auth_flow(session_params=None, force_new_client=False): """ Prompts the user with a link to authenticate with globus auth and authorize the CLI to act on their behalf. """ session_params = session_params or {} # get the ConfidentialApp client object auth_client = internal_auth_client( requires_instance=True, force_new_client=force_new_client ) # start the Confidential App Grant flow auth_client.oauth2_start_flow( redirect_uri=auth_client.base_url + "v2/web/auth-code", refresh_tokens=True, requested_scopes=SCOPES, ) # prompt additional_params = {"prompt": "login"} additional_params.update(session_params) linkprompt = "Please authenticate with Globus here" safeprint( "{0}:\n{1}\n{2}\n{1}\n".format( linkprompt, "-" * len(linkprompt), auth_client.oauth2_get_authorize_url(additional_params=additional_params), ) ) # come back with auth code auth_code = click.prompt("Enter the resulting Authorization Code here").strip() # finish auth flow exchange_code_and_store_config(auth_client, auth_code) return True
python
{ "resource": "" }
q13350
exchange_code_and_store_config
train
def exchange_code_and_store_config(auth_client, auth_code): """ Finishes auth flow after code is gotten from command line or local server. Exchanges code for tokens and gets user info from auth. Stores tokens and user info in config. """ # do a token exchange with the given code tkn = auth_client.oauth2_exchange_code_for_tokens(auth_code) tkn = tkn.by_resource_server # extract access tokens from final response transfer_at = tkn["transfer.api.globus.org"]["access_token"] transfer_at_expires = tkn["transfer.api.globus.org"]["expires_at_seconds"] transfer_rt = tkn["transfer.api.globus.org"]["refresh_token"] auth_at = tkn["auth.globus.org"]["access_token"] auth_at_expires = tkn["auth.globus.org"]["expires_at_seconds"] auth_rt = tkn["auth.globus.org"]["refresh_token"] # revoke any existing tokens for token_opt in ( TRANSFER_RT_OPTNAME, TRANSFER_AT_OPTNAME, AUTH_RT_OPTNAME, AUTH_AT_OPTNAME, ): token = lookup_option(token_opt) if token: auth_client.oauth2_revoke_token(token) # write new tokens to config write_option(TRANSFER_RT_OPTNAME, transfer_rt) write_option(TRANSFER_AT_OPTNAME, transfer_at) write_option(TRANSFER_AT_EXPIRES_OPTNAME, transfer_at_expires) write_option(AUTH_RT_OPTNAME, auth_rt) write_option(AUTH_AT_OPTNAME, auth_at) write_option(AUTH_AT_EXPIRES_OPTNAME, auth_at_expires)
python
{ "resource": "" }
q13351
endpoint_search
train
def endpoint_search(filter_fulltext, filter_owner_id, filter_scope): """ Executor for `globus endpoint search` """ if filter_scope == "all" and not filter_fulltext: raise click.UsageError( "When searching all endpoints (--filter-scope=all, the default), " "a full-text search filter is required. Other scopes (e.g. " "--filter-scope=recently-used) may be used without specifying " "an additional filter." ) client = get_client() owner_id = filter_owner_id if owner_id: owner_id = maybe_lookup_identity_id(owner_id) search_iterator = client.endpoint_search( filter_fulltext=filter_fulltext, filter_scope=filter_scope, filter_owner_id=owner_id, ) formatted_print( search_iterator, fields=ENDPOINT_LIST_FIELDS, json_converter=iterable_response_to_dict, )
python
{ "resource": "" }
q13352
endpoint_update
train
def endpoint_update(**kwargs): """ Executor for `globus endpoint update` """ # validate params. Requires a get call to check the endpoint type client = get_client() endpoint_id = kwargs.pop("endpoint_id") get_res = client.get_endpoint(endpoint_id) if get_res["host_endpoint_id"]: endpoint_type = "shared" elif get_res["is_globus_connect"]: endpoint_type = "personal" elif get_res["s3_url"]: endpoint_type = "s3" else: endpoint_type = "server" validate_endpoint_create_and_update_params( endpoint_type, get_res["subscription_id"], kwargs ) # make the update ep_doc = assemble_generic_doc("endpoint", **kwargs) res = client.update_endpoint(endpoint_id, ep_doc) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
python
{ "resource": "" }
q13353
safeprint
train
def safeprint(message, write_to_stderr=False, newline=True): """ Wrapper around click.echo used to encapsulate its functionality. Also protects against EPIPE during click.echo calls, as this can happen normally in piped commands when the consumer closes before the producer. """ try: click.echo(message, nl=newline, err=write_to_stderr) except IOError as err: if err.errno is errno.EPIPE: pass else: raise
python
{ "resource": "" }
q13354
common_options
train
def common_options(*args, **kwargs): """ This is a multi-purpose decorator for applying a "base" set of options shared by all commands. It can be applied either directly, or given keyword arguments. Usage: >>> @common_options >>> def mycommand(abc, xyz): >>> ... or >>> @common_options(no_format_option=True) >>> def mycommand(abc, xyz): >>> ... """ def decorate(f, **kwargs): """ Work of actually decorating a function -- wrapped in here because we want to dispatch depending on how `common_options` is invoked """ f = version_option(f) f = debug_option(f) f = verbose_option(f) f = click.help_option("-h", "--help")(f) # if the format option is being allowed, it needs to be applied to `f` if not kwargs.get("no_format_option"): f = format_option(f) # if the --map-http-status option is being allowed, ... if not kwargs.get("no_map_http_status_option"): f = map_http_status_option(f) return f return detect_and_decorate(decorate, args, kwargs)
python
{ "resource": "" }
q13355
validate_endpoint_create_and_update_params
train
def validate_endpoint_create_and_update_params(endpoint_type, managed, params): """ Given an endpoint type of "shared" "server" or "personal" and option values Confirms the option values are valid for the given endpoint """ # options only allowed for GCS endpoints if endpoint_type != "server": # catch params with two option flags if params["public"] is False: raise click.UsageError( "Option --private only allowed " "for Globus Connect Server endpoints" ) # catch any params only usable with GCS for option in [ "public", "myproxy_dn", "myproxy_server", "oauth_server", "location", "network_use", "max_concurrency", "preferred_concurrency", "max_parallelism", "preferred_parallelism", ]: if params[option] is not None: raise click.UsageError( ( "Option --{} can only be used with Globus Connect Server " "endpoints".format(option.replace("_", "-")) ) ) # if the endpoint was not previously managed, and is not being passed # a subscription id, it cannot use managed endpoint only fields if (not managed) and not (params["subscription_id"] or params["managed"]): for option in [ "network_use", "max_concurrency", "preferred_concurrency", "max_parallelism", "preferred_parallelism", ]: if params[option] is not None: raise click.UsageError( ( "Option --{} can only be used with managed " "endpoints".format(option.replace("_", "-")) ) ) # because the Transfer service doesn't do network use level updates in a # patchy way, *both* endpoint `POST`s *and* `PUT`s must either use # - `network_use='custom'` with *every* other parameter specified (which # is validated by the service), or # - a preset/absent `network_use` with *no* other parameter specified # (which is *not* validated by the service; in this case, Transfer will # accept but ignore the others parameters if given, leading to user # confusion if we don't do this validation check) custom_network_use_params = ( "max_concurrency", "preferred_concurrency", "max_parallelism", "preferred_parallelism", ) if params["network_use"] != "custom": for option in custom_network_use_params: if params[option] is not None: raise click.UsageError( "The {} options require you use --network-use=custom.".format( "/".join( "--" + option.replace("_", "-") for option in custom_network_use_params ) ) ) # make sure --(no-)managed and --subscription-id are mutually exclusive # if --managed given pass DEFAULT as the subscription_id # if --no-managed given, pass None managed_flag = params.get("managed") if managed_flag is not None: params.pop("managed") if managed_flag: params["subscription_id"] = params.get("subscription_id") or "DEFAULT" else: if params.get("subscription_id"): raise click.UsageError( "Cannot specify --subscription-id and " "use the --no-managed option." ) params["subscription_id"] = EXPLICIT_NULL # make sure --no-default-directory are mutually exclusive # if --no-managed given, pass an EXPLICIT_NULL as the default directory if params.get("no_default_directory"): if params.get("default_directory"): raise click.UsageError( "--no-default-directory and --default-directory are mutually " "exclusive." ) else: params["default_directory"] = EXPLICIT_NULL params.pop("no_default_directory")
python
{ "resource": "" }
q13356
task_id_arg
train
def task_id_arg(*args, **kwargs): """ This is the `TASK_ID` argument consumed by many Transfer Task operations. It accept a toggle on whether or not it is required Usage: >>> @task_id_option >>> def command_func(task_id): >>> ... or >>> @task_id_option(required=False) >>> def command_func(task_id): >>> ... By default, the task ID is made required; pass `required=False` to the decorator arguments to make it optional. """ def inner_decorator(f, required=True): f = click.argument("TASK_ID", required=required)(f) return f return detect_and_decorate(inner_decorator, args, kwargs)
python
{ "resource": "" }
q13357
task_submission_options
train
def task_submission_options(f): """ Options shared by both transfer and delete task submission """ def notify_opt_callback(ctx, param, value): """ Parse --notify - "" is the same as "off" - parse by lowercase, comma-split, strip spaces - "off,x" is invalid for any x - "on,x" is valid for any valid x (other than "off") - "failed", "succeeded", "inactive" are normal vals In code, produces True, False, or a set """ # if no value was set, don't set any explicit options # the API default is "everything on" if value is None: return {} value = value.lower() value = [x.strip() for x in value.split(",")] # [""] is what you'll get if value is "" to start with # special-case it into "off", which helps avoid surprising scripts # which take a notification settings as inputs and build --notify if value == [""]: value = ["off"] off = "off" in value on = "on" in value # set-ize it -- duplicates are fine vals = set([x for x in value if x not in ("off", "on")]) if (vals or on) and off: raise click.UsageError('--notify cannot accept "off" and another value') allowed_vals = set(("on", "succeeded", "failed", "inactive")) if not vals <= allowed_vals: raise click.UsageError( "--notify received at least one invalid value among {}".format( list(vals) ) ) # return the notification options to send! # on means don't set anything (default) if on: return {} # off means turn off everything if off: return { "notify_on_succeeded": False, "notify_on_failed": False, "notify_on_inactive": False, } # otherwise, return the exact set of values seen else: return { "notify_on_succeeded": "succeeded" in vals, "notify_on_failed": "failed" in vals, "notify_on_inactive": "inactive" in vals, } f = click.option( "--dry-run", is_flag=True, help=("Don't actually submit the task, print submission " "data instead"), )(f) f = click.option( "--notify", callback=notify_opt_callback, help=( "Comma separated list of task events which notify by email. " "'on' and 'off' may be used to enable or disable notifications " "for all event types. Otherwise, use 'succeeded', 'failed', or " "'inactive'" ), )(f) f = click.option( "--submission-id", help=( "Task submission ID, as generated by `globus task " "generate-submission-id`. Used for safe resubmission in the " "presence of network failures." ), )(f) f = click.option("--label", default=None, help="Set a label for this task.")(f) f = click.option( "--deadline", default=None, type=ISOTimeType(), help="Set a deadline for this to be canceled if not completed by.", )(f) f = click.option( "--skip-activation-check", is_flag=True, help=("Submit the task even if the endpoint(s) " "aren't currently activated."), )(f) return f
python
{ "resource": "" }
q13358
delete_and_rm_options
train
def delete_and_rm_options(*args, **kwargs): """ Options which apply both to `globus delete` and `globus rm` """ def inner_decorator(f, supports_batch=True, default_enable_globs=False): f = click.option( "--recursive", "-r", is_flag=True, help="Recursively delete dirs" )(f) f = click.option( "--ignore-missing", "-f", is_flag=True, help="Don't throw errors if the file or dir is absent", )(f) f = click.option( "--star-silent", "--unsafe", "star_silent", is_flag=True, help=( 'Don\'t prompt when the trailing character is a "*".' + (" Implicit in --batch" if supports_batch else "") ), )(f) f = click.option( "--enable-globs/--no-enable-globs", is_flag=True, default=default_enable_globs, show_default=True, help=( "Enable expansion of *, ?, and [ ] characters in the last " "component of file paths, unless they are escaped with " "a preceeding backslash, \\" ), )(f) if supports_batch: f = click.option( "--batch", is_flag=True, help=( "Accept a batch of paths on stdin (i.e. run in " "batchmode). Uses ENDPOINT_ID as passed on the " "commandline. Any commandline PATH given will be used " "as a prefix to all paths given" ), )(f) return f return detect_and_decorate(inner_decorator, args, kwargs)
python
{ "resource": "" }
q13359
server_add_and_update_opts
train
def server_add_and_update_opts(*args, **kwargs): """ shared collection of options for `globus transfer endpoint server add` and `globus transfer endpoint server update`. Accepts a toggle to know if it's being used as `add` or `update`. usage: >>> @server_add_and_update_opts >>> def command_func(subject, port, scheme, hostname): >>> ... or >>> @server_add_and_update_opts(add=True) >>> def command_func(subject, port, scheme, hostname): >>> ... """ def port_range_callback(ctx, param, value): if not value: return None value = value.lower().strip() if value == "unspecified": return None, None if value == "unrestricted": return 1024, 65535 try: lower, upper = map(int, value.split("-")) except ValueError: # too many/few values from split or non-integer(s) raise click.BadParameter( "must specify as 'unspecified', " "'unrestricted', or as range separated " "by a hyphen (e.g. '50000-51000')" ) if not 1024 <= lower <= 65535 or not 1024 <= upper <= 65535: raise click.BadParameter("must be within the 1024-65535 range") return (lower, upper) if lower <= upper else (upper, lower) def inner_decorator(f, add=False): f = click.option("--hostname", required=add, help="Server Hostname.")(f) default_scheme = "gsiftp" if add else None f = click.option( "--scheme", help="Scheme for the Server.", type=CaseInsensitiveChoice(("gsiftp", "ftp")), default=default_scheme, show_default=add, )(f) default_port = 2811 if add else None f = click.option( "--port", help="Port for Globus control channel connections.", type=int, default=default_port, show_default=add, )(f) f = click.option( "--subject", help=( "Subject of the X509 Certificate of the server. When " "unspecified, the CN must match the server hostname." ), )(f) for adjective, our_preposition, their_preposition in [ ("incoming", "to", "from"), ("outgoing", "from", "to"), ]: f = click.option( "--{}-data-ports".format(adjective), callback=port_range_callback, help="Indicate to firewall administrators at other sites how to " "allow {} traffic {} this server {} their own. Specify as " "either 'unspecified', 'unrestricted', or as range of " "ports separated by a hyphen (e.g. '50000-51000') within " "the 1024-65535 range.".format( adjective, our_preposition, their_preposition ), )(f) return f return detect_and_decorate(inner_decorator, args, kwargs)
python
{ "resource": "" }
q13360
endpoint_deactivate
train
def endpoint_deactivate(endpoint_id): """ Executor for `globus endpoint deactivate` """ client = get_client() res = client.endpoint_deactivate(endpoint_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
python
{ "resource": "" }
q13361
LazyIdentityMap._lookup_identity_names
train
def _lookup_identity_names(self): """ Batch resolve identities to usernames. Returns a dict mapping IDs to Usernames """ id_batch_size = 100 # fetch in batches of 100, store in a dict ac = get_auth_client() self._resolved_map = {} for i in range(0, len(self.identity_ids), id_batch_size): chunk = self.identity_ids[i : i + id_batch_size] resolved_result = ac.get_identities(ids=chunk) for x in resolved_result["identities"]: self._resolved_map[x["id"]] = x["username"]
python
{ "resource": "" }
q13362
internal_auth_client
train
def internal_auth_client(requires_instance=False, force_new_client=False): """ Looks up the values for this CLI's Instance Client in config If none exists and requires_instance is True or force_new_client is True, registers a new Instance Client with GLobus Auth If none exists and requires_instance is false, defaults to a Native Client for backwards compatibility Returns either a NativeAppAuthClient or a ConfidentialAppAuthClient """ client_id = lookup_option(CLIENT_ID_OPTNAME) client_secret = lookup_option(CLIENT_SECRET_OPTNAME) template_id = lookup_option(TEMPLATE_ID_OPTNAME) or DEFAULT_TEMPLATE_ID template_client = internal_native_client() existing = client_id and client_secret # if we are forcing a new client, delete any existing client if force_new_client and existing: existing_client = globus_sdk.ConfidentialAppAuthClient(client_id, client_secret) try: existing_client.delete("/v2/api/clients/{}".format(client_id)) # if the client secret has been invalidated or the client has # already been removed, we continue on except globus_sdk.exc.AuthAPIError: pass # if we require a new client to be made if force_new_client or (requires_instance and not existing): # register a new instance client with auth body = {"client": {"template_id": template_id, "name": "Globus CLI"}} res = template_client.post("/v2/api/clients", json_body=body) # get values and write to config credential_data = res["included"]["client_credential"] client_id = credential_data["client"] client_secret = credential_data["secret"] write_option(CLIENT_ID_OPTNAME, client_id) write_option(CLIENT_SECRET_OPTNAME, client_secret) return globus_sdk.ConfidentialAppAuthClient( client_id, client_secret, app_name="Globus CLI" ) # if we already have a client, just return it elif existing: return globus_sdk.ConfidentialAppAuthClient( client_id, client_secret, app_name="Globus CLI" ) # fall-back to a native client to not break old logins # TOOD: eventually remove this behavior else: return template_client
python
{ "resource": "" }
q13363
exit_with_mapped_status
train
def exit_with_mapped_status(http_status): """ Given an HTTP Status, exit with either an error status of 1 or the status mapped by what we were given. """ # get the mapping by looking up the state and getting the mapping attr mapping = click.get_current_context().ensure_object(CommandState).http_status_map # if there is a mapped exit code, exit with that. Otherwise, exit 1 if http_status in mapping: sys.exit(mapping[http_status]) else: sys.exit(1)
python
{ "resource": "" }
q13364
session_hook
train
def session_hook(exception): """ Expects an exception with an authorization_paramaters field in its raw_json """ safeprint( "The resource you are trying to access requires you to " "re-authenticate with specific identities." ) params = exception.raw_json["authorization_parameters"] message = params.get("session_message") if message: safeprint("message: {}".format(message)) identities = params.get("session_required_identities") if identities: id_str = " ".join(identities) safeprint( "Please run\n\n" " globus session update {}\n\n" "to re-authenticate with the required identities".format(id_str) ) else: safeprint( 'Please use "globus session update" to re-authenticate ' "with specific identities".format(id_str) ) exit_with_mapped_status(exception.http_status)
python
{ "resource": "" }
q13365
custom_except_hook
train
def custom_except_hook(exc_info): """ A custom excepthook to present python errors produced by the CLI. We don't want to show end users big scary stacktraces if they aren't python programmers, so slim it down to some basic info. We keep a "DEBUGMODE" env variable kicking around to let us turn on stacktraces if we ever need them. Additionally, does global suppression of EPIPE errors, which often occur when a python command is piped to a consumer like `head` which closes its input stream before python has sent all of its output. DANGER: There is a (small) risk that this will bite us if there are EPIPE errors produced within the Globus SDK. We should keep an eye on this possibility, as it may demand more sophisticated handling of EPIPE. Possible TODO item to reduce this risk: inspect the exception and only hide EPIPE if it comes from within the globus_cli package. """ exception_type, exception, traceback = exc_info # check if we're in debug mode, and run the real excepthook if we are ctx = click.get_current_context() state = ctx.ensure_object(CommandState) if state.debug: sys.excepthook(exception_type, exception, traceback) # we're not in debug mode, do custom handling else: # if it's a click exception, re-raise as original -- Click's main # execution context will handle pretty-printing if isinstance(exception, click.ClickException): reraise(exception_type, exception, traceback) # catch any session errors to give helpful instructions # on how to use globus session update elif ( isinstance(exception, exc.GlobusAPIError) and exception.raw_json and "authorization_parameters" in exception.raw_json ): session_hook(exception) # handle the Globus-raised errors with our special hooks # these will present the output (on stderr) as JSON elif isinstance(exception, exc.TransferAPIError): if exception.code == "ClientError.AuthenticationFailed": authentication_hook(exception) else: transferapi_hook(exception) elif isinstance(exception, exc.AuthAPIError): if exception.code == "UNAUTHORIZED": authentication_hook(exception) # invalid_grant occurs when the users refresh tokens are not valid elif exception.message == "invalid_grant": invalidrefresh_hook(exception) else: authapi_hook(exception) elif isinstance(exception, exc.GlobusAPIError): globusapi_hook(exception) # specific checks fell through -- now check if it's any kind of # GlobusError elif isinstance(exception, exc.GlobusError): globus_generic_hook(exception) # not a GlobusError, not a ClickException -- something like ValueError # or NotImplementedError bubbled all the way up here: just print it # out, basically else: safeprint(u"{}: {}".format(exception_type.__name__, exception)) sys.exit(1)
python
{ "resource": "" }
q13366
task_event_list
train
def task_event_list(task_id, limit, filter_errors, filter_non_errors): """ Executor for `globus task-event-list` """ client = get_client() # cannot filter by both errors and non errors if filter_errors and filter_non_errors: raise click.UsageError("Cannot filter by both errors and non errors") elif filter_errors: filter_string = "is_error:1" elif filter_non_errors: filter_string = "is_error:0" else: filter_string = "" event_iterator = client.task_event_list( task_id, num_results=limit, filter=filter_string ) def squashed_json_details(x): is_json = False try: loaded = json.loads(x["details"]) is_json = True except ValueError: loaded = x["details"] if is_json: return json.dumps(loaded, separators=(",", ":"), sort_keys=True) else: return loaded.replace("\n", "\\n") formatted_print( event_iterator, fields=( ("Time", "time"), ("Code", "code"), ("Is Error", "is_error"), ("Details", squashed_json_details), ), json_converter=iterable_response_to_dict, )
python
{ "resource": "" }
q13367
role_delete
train
def role_delete(role_id, endpoint_id): """ Executor for `globus endpoint role delete` """ client = get_client() res = client.delete_endpoint_role(endpoint_id, role_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
python
{ "resource": "" }
q13368
ls_command
train
def ls_command( endpoint_plus_path, recursive_depth_limit, recursive, long_output, show_hidden, filter_val, ): """ Executor for `globus ls` """ endpoint_id, path = endpoint_plus_path # do autoactivation before the `ls` call so that recursive invocations # won't do this repeatedly, and won't have to instantiate new clients client = get_client() autoactivate(client, endpoint_id, if_expires_in=60) # create the query paramaters to send to operation_ls ls_params = {"show_hidden": int(show_hidden)} if path: ls_params["path"] = path if filter_val: # this char has special meaning in the LS API's filter clause # can't be part of the pattern (but we don't support globbing across # dir structures anyway) if "/" in filter_val: raise click.UsageError('--filter cannot contain "/"') # format into a simple filter clause which operates on filenames ls_params["filter"] = "name:{}".format(filter_val) # get the `ls` result if recursive: # NOTE: # --recursive and --filter have an interplay that some users may find # surprising # if we're asked to change or "improve" the behavior in the future, we # could do so with "type:dir" or "type:file" filters added in, and # potentially work out some viable behavior based on what people want res = client.recursive_operation_ls( endpoint_id, depth=recursive_depth_limit, **ls_params ) else: res = client.operation_ls(endpoint_id, **ls_params) def cleaned_item_name(item): return item["name"] + ("/" if item["type"] == "dir" else "") # and then print it, per formatting rules formatted_print( res, fields=[ ("Permissions", "permissions"), ("User", "user"), ("Group", "group"), ("Size", "size"), ("Last Modified", "last_modified"), ("File Type", "type"), ("Filename", cleaned_item_name), ], simple_text=( None if long_output or is_verbose() or not outformat_is_text() else "\n".join(cleaned_item_name(x) for x in res) ), json_converter=iterable_response_to_dict, )
python
{ "resource": "" }
q13369
generate_submission_id
train
def generate_submission_id(): """ Executor for `globus task generate-submission-id` """ client = get_client() res = client.get_submission_id() formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="value")
python
{ "resource": "" }
q13370
create_command
train
def create_command( principal, permissions, endpoint_plus_path, notify_email, notify_message ): """ Executor for `globus endpoint permission create` """ if not principal: raise click.UsageError("A security principal is required for this command") endpoint_id, path = endpoint_plus_path principal_type, principal_val = principal client = get_client() if principal_type == "identity": principal_val = maybe_lookup_identity_id(principal_val) if not principal_val: raise click.UsageError( "Identity does not exist. " "Use --provision-identity to auto-provision an identity." ) elif principal_type == "provision-identity": principal_val = maybe_lookup_identity_id(principal_val, provision=True) principal_type = "identity" if not notify_email: notify_message = None rule_data = assemble_generic_doc( "access", permissions=permissions, principal=principal_val, principal_type=principal_type, path=path, notify_email=notify_email, notify_message=notify_message, ) res = client.add_endpoint_acl_rule(endpoint_id, rule_data) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=[("Message", "message"), ("Rule ID", "access_id")], )
python
{ "resource": "" }
q13371
bookmark_show
train
def bookmark_show(bookmark_id_or_name): """ Executor for `globus bookmark show` """ client = get_client() res = resolve_id_or_name(client, bookmark_id_or_name) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=( ("ID", "id"), ("Name", "name"), ("Endpoint ID", "endpoint_id"), ("Path", "path"), ), simple_text=( # standard output is endpoint:path format "{}:{}".format(res["endpoint_id"], res["path"]) # verbose output includes all fields if not is_verbose() else None ), )
python
{ "resource": "" }
q13372
role_list
train
def role_list(endpoint_id): """ Executor for `globus access endpoint-role-list` """ client = get_client() roles = client.endpoint_role_list(endpoint_id) resolved_ids = LazyIdentityMap( x["principal"] for x in roles if x["principal_type"] == "identity" ) def principal_str(role): principal = role["principal"] if role["principal_type"] == "identity": username = resolved_ids.get(principal) return username or principal elif role["principal_type"] == "group": return (u"https://app.globus.org/groups/{}").format(principal) else: return principal formatted_print( roles, fields=[ ("Principal Type", "principal_type"), ("Role ID", "id"), ("Principal", principal_str), ("Role", "role"), ], )
python
{ "resource": "" }
q13373
fill_delegate_proxy_activation_requirements
train
def fill_delegate_proxy_activation_requirements( requirements_data, cred_file, lifetime_hours=12 ): """ Given the activation requirements for an endpoint and a filename for X.509 credentials, extracts the public key from the activation requirements, uses the key and the credentials to make a proxy credential, and returns the requirements data with the proxy chain filled in. """ # get the public key from the activation requirements for data in requirements_data["DATA"]: if data["type"] == "delegate_proxy" and data["name"] == "public_key": public_key = data["value"] break else: raise ValueError( ( "No public_key found in activation requirements, this endpoint " "does not support Delegate Proxy activation." ) ) # get user credentials from user credential file" with open(cred_file) as f: issuer_cred = f.read() # create the proxy credentials proxy = create_proxy_credentials(issuer_cred, public_key, lifetime_hours) # return the activation requirements document with the proxy_chain filled for data in requirements_data["DATA"]: if data["type"] == "delegate_proxy" and data["name"] == "proxy_chain": data["value"] = proxy return requirements_data else: raise ValueError( ( "No proxy_chain found in activation requirements, this endpoint " "does not support Delegate Proxy activation." ) )
python
{ "resource": "" }
q13374
create_proxy_credentials
train
def create_proxy_credentials(issuer_cred, public_key, lifetime_hours): """ Given an issuer credentials PEM file in the form of a string, a public_key string from an activation requirements document, and an int for the proxy lifetime, returns credentials as a unicode string in PEM format containing a new proxy certificate and an extended proxy chain. """ # parse the issuer credential loaded_cert, loaded_private_key, issuer_chain = parse_issuer_cred(issuer_cred) # load the public_key into a cryptography object loaded_public_key = serialization.load_pem_public_key( public_key.encode("ascii"), backend=default_backend() ) # check that the issuer certificate is not an old proxy # and is using the keyUsage section as required confirm_not_old_proxy(loaded_cert) validate_key_usage(loaded_cert) # create the proxy cert cryptography object new_cert = create_proxy_cert( loaded_cert, loaded_private_key, loaded_public_key, lifetime_hours ) # extend the proxy chain as a unicode string extended_chain = loaded_cert.public_bytes(serialization.Encoding.PEM).decode( "ascii" ) + six.u(issuer_chain) # return in PEM format as a unicode string return ( new_cert.public_bytes(serialization.Encoding.PEM).decode("ascii") + extended_chain )
python
{ "resource": "" }
q13375
create_proxy_cert
train
def create_proxy_cert( loaded_cert, loaded_private_key, loaded_public_key, lifetime_hours ): """ Given cryptography objects for an issuing certificate, a public_key, a private_key, and an int for lifetime in hours, creates a proxy cert from the issuer and public key signed by the private key. """ builder = x509.CertificateBuilder() # create a serial number for the new proxy # Under RFC 3820 there are many ways to generate the serial number. However # making the number unpredictable has security benefits, e.g. it can make # this style of attack more difficult: # http://www.win.tue.nl/hashclash/rogue-ca serial = struct.unpack("<Q", os.urandom(8))[0] builder = builder.serial_number(serial) # set the new proxy as valid from now until lifetime_hours have passed builder = builder.not_valid_before(datetime.datetime.utcnow()) builder = builder.not_valid_after( datetime.datetime.utcnow() + datetime.timedelta(hours=lifetime_hours) ) # set the public key of the new proxy to the given public key builder = builder.public_key(loaded_public_key) # set the issuer of the new cert to the subject of the issuing cert builder = builder.issuer_name(loaded_cert.subject) # set the new proxy's subject # append a CommonName to the new proxy's subject # with the serial as the value of the CN new_atribute = x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, six.u(str(serial))) subject_attributes = list(loaded_cert.subject) subject_attributes.append(new_atribute) builder = builder.subject_name(x509.Name(subject_attributes)) # add proxyCertInfo extension to the new proxy (We opt not to add keyUsage) # For RFC proxies the effective usage is defined as the intersection # of the usage of each cert in the chain. See section 4.2 of RFC 3820. # the constants 'oid' and 'value' are gotten from # examining output from a call to the open ssl function: # X509V3_EXT_conf(NULL, ctx, name, value) # ctx set by X509V3_set_nconf(&ctx, NCONF_new(NULL)) # name = "proxyCertInfo" # value = "critical,language:Inherit all" oid = x509.ObjectIdentifier("1.3.6.1.5.5.7.1.14") value = b"0\x0c0\n\x06\x08+\x06\x01\x05\x05\x07\x15\x01" extension = x509.extensions.UnrecognizedExtension(oid, value) builder = builder.add_extension(extension, critical=True) # sign the new proxy with the issuer's private key new_certificate = builder.sign( private_key=loaded_private_key, algorithm=hashes.SHA256(), backend=default_backend(), ) # return the new proxy as a cryptography object return new_certificate
python
{ "resource": "" }
q13376
confirm_not_old_proxy
train
def confirm_not_old_proxy(loaded_cert): """ Given a cryptography object for the issuer cert, checks if the cert is an "old proxy" and raise an error if so. """ # Examine the last CommonName to see if it looks like an old proxy. last_cn = loaded_cert.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME)[ -1 ] # if the last CN is 'proxy' or 'limited proxy' we are in an old proxy if last_cn.value in ("proxy", "limited proxy"): raise ValueError( "Proxy certificate is in an outdated format " "that is no longer supported" )
python
{ "resource": "" }
q13377
task_pause_info
train
def task_pause_info(task_id): """ Executor for `globus task pause-info` """ client = get_client() res = client.task_pause_info(task_id) def _custom_text_format(res): explicit_pauses = [ field for field in EXPLICIT_PAUSE_MSG_FIELDS # n.b. some keys are absent for completed tasks if res.get(field[1]) ] effective_pause_rules = res["pause_rules"] if not explicit_pauses and not effective_pause_rules: safeprint("Task {} is not paused.".format(task_id)) click.get_current_context().exit(0) if explicit_pauses: formatted_print( res, fields=explicit_pauses, text_format=FORMAT_TEXT_RECORD, text_preamble="This task has been explicitly paused.\n", text_epilog="\n" if effective_pause_rules else None, ) if effective_pause_rules: formatted_print( effective_pause_rules, fields=PAUSE_RULE_DISPLAY_FIELDS, text_preamble=( "The following pause rules are effective on this task:\n" ), ) formatted_print(res, text_format=_custom_text_format)
python
{ "resource": "" }
q13378
InstanaRecorder.run
train
def run(self): """ Span a background thread to periodically report queued spans """ self.timer = t.Thread(target=self.report_spans) self.timer.daemon = True self.timer.name = "Instana Span Reporting" self.timer.start()
python
{ "resource": "" }
q13379
InstanaRecorder.report_spans
train
def report_spans(self): """ Periodically report the queued spans """ logger.debug("Span reporting thread is now alive") def span_work(): queue_size = self.queue.qsize() if queue_size > 0 and instana.singletons.agent.can_send(): response = instana.singletons.agent.report_traces(self.queued_spans()) if response: logger.debug("reported %d spans" % queue_size) return True every(2, span_work, "Span Reporting")
python
{ "resource": "" }
q13380
InstanaRecorder.queued_spans
train
def queued_spans(self): """ Get all of the spans in the queue """ spans = [] while True: try: s = self.queue.get(False) except queue.Empty: break else: spans.append(s) return spans
python
{ "resource": "" }
q13381
InstanaRecorder.record_span
train
def record_span(self, span): """ Convert the passed BasicSpan into an JsonSpan and add it to the span queue """ if instana.singletons.agent.can_send() or "INSTANA_TEST" in os.environ: json_span = None if span.operation_name in self.registered_spans: json_span = self.build_registered_span(span) else: json_span = self.build_sdk_span(span) self.queue.put(json_span)
python
{ "resource": "" }
q13382
InstanaRecorder.build_sdk_span
train
def build_sdk_span(self, span): """ Takes a BasicSpan and converts into an SDK type JsonSpan """ custom_data = CustomData(tags=span.tags, logs=self.collect_logs(span)) sdk_data = SDKData(name=span.operation_name, custom=custom_data, Type=self.get_span_kind_as_string(span)) if "arguments" in span.tags: sdk_data.arguments = span.tags["arguments"] if "return" in span.tags: sdk_data.Return = span.tags["return"] data = Data(service=instana.singletons.agent.sensor.options.service_name, sdk=sdk_data) entity_from = {'e': instana.singletons.agent.from_.pid, 'h': instana.singletons.agent.from_.agentUuid} json_span = JsonSpan( t=span.context.trace_id, p=span.parent_id, s=span.context.span_id, ts=int(round(span.start_time * 1000)), d=int(round(span.duration * 1000)), k=self.get_span_kind_as_int(span), n="sdk", f=entity_from, data=data) error = span.tags.pop("error", False) ec = span.tags.pop("ec", None) if error and ec: json_span.error = error json_span.ec = ec return json_span
python
{ "resource": "" }
q13383
InstanaRecorder.get_span_kind_as_string
train
def get_span_kind_as_string(self, span): """ Will retrieve the `span.kind` tag and return the appropriate string value for the Instana backend or None if the tag is set to something we don't recognize. :param span: The span to search for the `span.kind` tag :return: String """ kind = None if "span.kind" in span.tags: if span.tags["span.kind"] in self.entry_kind: kind = "entry" elif span.tags["span.kind"] in self.exit_kind: kind = "exit" else: kind = "intermediate" return kind
python
{ "resource": "" }
q13384
InstanaRecorder.get_span_kind_as_int
train
def get_span_kind_as_int(self, span): """ Will retrieve the `span.kind` tag and return the appropriate integer value for the Instana backend or None if the tag is set to something we don't recognize. :param span: The span to search for the `span.kind` tag :return: Integer """ kind = None if "span.kind" in span.tags: if span.tags["span.kind"] in self.entry_kind: kind = 1 elif span.tags["span.kind"] in self.exit_kind: kind = 2 else: kind = 3 return kind
python
{ "resource": "" }
q13385
Meter.run
train
def run(self): """ Spawns the metric reporting thread """ self.thr = threading.Thread(target=self.collect_and_report) self.thr.daemon = True self.thr.name = "Instana Metric Collection" self.thr.start()
python
{ "resource": "" }
q13386
Meter.reset
train
def reset(self): """" Reset the state as new """ self.last_usage = None self.last_collect = None self.last_metrics = None self.snapshot_countdown = 0 self.run()
python
{ "resource": "" }
q13387
Meter.collect_and_report
train
def collect_and_report(self): """ Target function for the metric reporting thread. This is a simple loop to collect and report entity data every 1 second. """ logger.debug("Metric reporting thread is now alive") def metric_work(): self.process() if self.agent.is_timed_out(): logger.warn("Host agent offline for >1 min. Going to sit in a corner...") self.agent.reset() return False return True every(1, metric_work, "Metrics Collection")
python
{ "resource": "" }
q13388
Meter.process
train
def process(self): """ Collects, processes & reports metrics """ if self.agent.machine.fsm.current is "wait4init": # Test the host agent if we're ready to send data if self.agent.is_agent_ready(): self.agent.machine.fsm.ready() else: return if self.agent.can_send(): self.snapshot_countdown = self.snapshot_countdown - 1 ss = None cm = self.collect_metrics() if self.snapshot_countdown < 1: logger.debug("Sending process snapshot data") self.snapshot_countdown = self.SNAPSHOT_PERIOD ss = self.collect_snapshot() md = copy.deepcopy(cm).delta_data(None) else: md = copy.deepcopy(cm).delta_data(self.last_metrics) ed = EntityData(pid=self.agent.from_.pid, snapshot=ss, metrics=md) response = self.agent.report_data(ed) if response: if response.status_code is 200 and len(response.content) > 2: # The host agent returned something indicating that is has a request for us that we # need to process. self.handle_agent_tasks(json.loads(response.content)[0]) self.last_metrics = cm.__dict__
python
{ "resource": "" }
q13389
Meter.collect_snapshot
train
def collect_snapshot(self): """ Collects snapshot related information to this process and environment """ try: if "INSTANA_SERVICE_NAME" in os.environ: appname = os.environ["INSTANA_SERVICE_NAME"] elif "FLASK_APP" in os.environ: appname = os.environ["FLASK_APP"] elif "DJANGO_SETTINGS_MODULE" in os.environ: appname = os.environ["DJANGO_SETTINGS_MODULE"].split('.')[0] elif os.path.basename(sys.argv[0]) == '' and sys.stdout.isatty(): appname = "Interactive Console" else: if os.path.basename(sys.argv[0]) == '': appname = os.path.basename(sys.executable) else: appname = os.path.basename(sys.argv[0]) s = Snapshot(name=appname, version=platform.version(), f=platform.python_implementation(), a=platform.architecture()[0], djmw=self.djmw) s.version = sys.version s.versions = self.collect_modules() except Exception as e: logger.debug(e.message) else: return s
python
{ "resource": "" }
q13390
Meter.collect_modules
train
def collect_modules(self): """ Collect up the list of modules in use """ try: res = {} m = sys.modules for k in m: # Don't report submodules (e.g. django.x, django.y, django.z) # Skip modules that begin with underscore if ('.' in k) or k[0] == '_': continue if m[k]: try: d = m[k].__dict__ if "version" in d and d["version"]: res[k] = self.jsonable(d["version"]) elif "__version__" in d and d["__version__"]: res[k] = self.jsonable(d["__version__"]) else: res[k] = get_distribution(k).version except DistributionNotFound: pass except Exception: logger.debug("collect_modules: could not process module: %s" % k) except Exception: logger.debug("collect_modules", exc_info=True) else: return res
python
{ "resource": "" }
q13391
Agent.start
train
def start(self, e): """ Starts the agent and required threads """ logger.debug("Spawning metric & trace reporting threads") self.sensor.meter.run() instana.singletons.tracer.recorder.run()
python
{ "resource": "" }
q13392
Agent.handle_fork
train
def handle_fork(self): """ Forks happen. Here we handle them. """ self.reset() self.sensor.handle_fork() instana.singletons.tracer.handle_fork()
python
{ "resource": "" }
q13393
Agent.announce
train
def announce(self, discovery): """ With the passed in Discovery class, attempt to announce to the host agent. """ try: url = self.__discovery_url() logger.debug("making announce request to %s" % (url)) response = None response = self.client.put(url, data=self.to_json(discovery), headers={"Content-Type": "application/json"}, timeout=0.8) if response.status_code is 200: self.last_seen = datetime.now() except (requests.ConnectTimeout, requests.ConnectionError): logger.debug("announce", exc_info=True) finally: return response
python
{ "resource": "" }
q13394
Agent.is_agent_ready
train
def is_agent_ready(self): """ Used after making a successful announce to test when the agent is ready to accept data. """ try: response = self.client.head(self.__data_url(), timeout=0.8) if response.status_code is 200: return True return False except (requests.ConnectTimeout, requests.ConnectionError): logger.debug("is_agent_ready: host agent connection error")
python
{ "resource": "" }
q13395
Agent.task_response
train
def task_response(self, message_id, data): """ When the host agent passes us a task and we do it, this function is used to respond with the results of the task. """ try: response = None payload = json.dumps(data) logger.debug("Task response is %s: %s" % (self.__response_url(message_id), payload)) response = self.client.post(self.__response_url(message_id), data=payload, headers={"Content-Type": "application/json"}, timeout=0.8) except (requests.ConnectTimeout, requests.ConnectionError): logger.debug("task_response", exc_info=True) except Exception: logger.debug("task_response Exception", exc_info=True) finally: return response
python
{ "resource": "" }
q13396
Agent.__discovery_url
train
def __discovery_url(self): """ URL for announcing to the host agent """ port = self.sensor.options.agent_port if port == 0: port = AGENT_DEFAULT_PORT return "http://%s:%s/%s" % (self.host, port, AGENT_DISCOVERY_PATH)
python
{ "resource": "" }
q13397
Agent.__data_url
train
def __data_url(self): """ URL for posting metrics to the host agent. Only valid when announced. """ path = AGENT_DATA_PATH % self.from_.pid return "http://%s:%s/%s" % (self.host, self.port, path)
python
{ "resource": "" }
q13398
Agent.__traces_url
train
def __traces_url(self): """ URL for posting traces to the host agent. Only valid when announced. """ path = AGENT_TRACES_PATH % self.from_.pid return "http://%s:%s/%s" % (self.host, self.port, path)
python
{ "resource": "" }
q13399
Agent.__response_url
train
def __response_url(self, message_id): """ URL for responding to agent requests. """ if self.from_.pid != 0: path = AGENT_RESPONSE_PATH % (self.from_.pid, message_id) return "http://%s:%s/%s" % (self.host, self.port, path)
python
{ "resource": "" }