code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
client = get_client() mode = _detect_mode(server) # list (even if not necessary) in order to make errors more consistent when # mode='id' endpoint, server_list = get_endpoint_w_server_list(endpoint_id) if server_list == "S3": raise click.UsageError("You cannot delete servers from S3 endpoints.") # we don't *have to* raise an error in the GCP case, since the API would # deny it too, but doing so makes our errors a little bit more consistent # with deletes against S3 endpoints and shares if endpoint["is_globus_connect"]: raise click.UsageError( "You cannot delete servers from Globus Connect Personal endpoints" ) if mode != "id": matches = _spec_to_matches(server_list, server, mode) if not matches: raise click.UsageError('No server was found matching "{}"'.format(server)) elif len(matches) > 1: raise click.UsageError( dedent( ).format(server, [x["id"] for x in matches]) ) else: server = matches[0]["id"] response = client.delete_endpoint_server(endpoint_id, server) formatted_print(response, text_format=FORMAT_TEXT_RAW, response_key="message")
def server_delete(endpoint_id, server)
Executor for `globus endpoint server show`
5.828697
5.532363
1.053564
conf = get_config_obj() section = "cli" if "." in parameter: section, parameter = parameter.split(".", 1) # ensure that the section exists if section not in conf: conf[section] = {} # remove the value for the given parameter del conf[section][parameter] # write to disk safeprint("Writing updated config to {}".format(conf.filename)) conf.write()
def remove_command(parameter)
Executor for `globus config remove`
4.392763
3.951379
1.111704
client = get_client() rule = client.get_endpoint_acl_rule(endpoint_id, rule_id) formatted_print( rule, text_format=FORMAT_TEXT_RECORD, fields=( ("Rule ID", "id"), ("Permissions", "permissions"), ("Shared With", _shared_with_keyfunc), ("Path", "path"), ), )
def show_command(endpoint_id, rule_id)
Executor for `globus endpoint permission show`
4.302183
3.662919
1.174523
client = get_client() bookmark_id = resolve_id_or_name(client, bookmark_id_or_name)["id"] submit_data = {"name": new_bookmark_name} res = client.update_bookmark(bookmark_id, submit_data) formatted_print(res, simple_text="Success")
def bookmark_rename(bookmark_id_or_name, new_bookmark_name)
Executor for `globus bookmark rename`
4.062651
3.949166
1.028736
section = "cli" if "." in parameter: section, parameter = parameter.split(".", 1) value = lookup_option(parameter, section=section) if value is None: safeprint("{} not set".format(parameter)) else: safeprint("{} = {}".format(parameter, value))
def show_command(parameter)
Executor for `globus config show`
3.808914
3.520519
1.081918
source_ep, source_path = source dest_ep, dest_path = destination if source_ep != dest_ep: raise click.UsageError( ( "rename requires that the source and dest " "endpoints are the same, {} != {}" ).format(source_ep, dest_ep) ) endpoint_id = source_ep client = get_client() autoactivate(client, endpoint_id, if_expires_in=60) res = client.operation_rename(endpoint_id, oldpath=source_path, newpath=dest_path) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
def rename_command(source, destination)
Executor for `globus rename`
3.935797
3.551842
1.1081
client = get_client() res = client.get_endpoint(endpoint_id) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=GCP_FIELDS if res["is_globus_connect"] else STANDARD_FIELDS, )
def endpoint_show(endpoint_id)
Executor for `globus endpoint show`
6.923378
4.67963
1.479471
client = get_client() rule_data = assemble_generic_doc("access", permissions=permissions) res = client.update_endpoint_acl_rule(endpoint_id, rule_id, rule_data) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
def update_command(permissions, rule_id, endpoint_id)
Executor for `globus endpoint permission update`
5.476789
5.062035
1.081934
name = self.name + ":" if not self.multiline or "\n" not in val: val = u"{0} {1}".format(name.ljust(self._text_prefix_len), val) else: spacer = "\n" + " " * (self._text_prefix_len + 1) val = u"{0}{1}{2}".format(name, spacer, spacer.join(val.split("\n"))) return val
def _format_value(self, val)
formats a value to be good for textmode printing val must be unicode
3.370336
3.204581
1.051724
client = get_client() res = client.delete_endpoint_acl_rule(endpoint_id, rule_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
def delete_command(endpoint_id, rule_id)
Executor for `globus endpoint permission delete`
3.805846
3.190028
1.193045
source_endpoint, cmd_source_path = source dest_endpoint, cmd_dest_path = destination if recursive and batch: raise click.UsageError( ( "You cannot use --recursive in addition to --batch. " "Instead, use --recursive on lines of --batch input " "which need it" ) ) if (cmd_source_path is None or cmd_dest_path is None) and (not batch): raise click.UsageError( ("transfer requires either SOURCE_PATH and DEST_PATH or " "--batch") ) # because python can't handle multiple **kwargs expansions in a single # call, we need to get a little bit clever # both the performance options (of which there are a few), and the # notification options (also there are a few) have elements which should be # omitted in some cases # notify comes to us clean, perf opts need more care # put them together into a dict before passing to TransferData kwargs = {} perf_opts = dict( (k, v) for (k, v) in dict( perf_cc=perf_cc, perf_p=perf_p, perf_pp=perf_pp, perf_udt=perf_udt ).items() if v is not None ) kwargs.update(perf_opts) kwargs.update(notify) client = get_client() transfer_data = TransferData( client, source_endpoint, dest_endpoint, label=label, sync_level=sync_level, verify_checksum=verify_checksum, preserve_timestamp=preserve_mtime, encrypt_data=encrypt, submission_id=submission_id, delete_destination_extra=delete, deadline=deadline, skip_activation_check=skip_activation_check, **kwargs ) if batch: @click.command() @click.option("--recursive", "-r", is_flag=True) @click.argument("source_path", type=TaskPath(base_dir=cmd_source_path)) @click.argument("dest_path", type=TaskPath(base_dir=cmd_dest_path)) def process_batch_line(dest_path, source_path, recursive): transfer_data.add_item( str(source_path), str(dest_path), recursive=recursive ) shlex_process_stdin( process_batch_line, ( "Enter transfers, line by line, as\n\n" " [--recursive] SOURCE_PATH DEST_PATH\n" ), ) else: transfer_data.add_item(cmd_source_path, cmd_dest_path, recursive=recursive) if dry_run: formatted_print( transfer_data, response_key="DATA", fields=( ("Source Path", "source_path"), ("Dest Path", "destination_path"), ("Recursive", "recursive"), ), ) # exit safely return # autoactivate after parsing all args and putting things together # skip this if skip-activation-check is given if not skip_activation_check: autoactivate(client, source_endpoint, if_expires_in=60) autoactivate(client, dest_endpoint, if_expires_in=60) res = client.submit_transfer(transfer_data) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=(("Message", "message"), ("Task ID", "task_id")), )
def transfer_command( batch, sync_level, recursive, destination, source, label, preserve_mtime, verify_checksum, encrypt, submission_id, dry_run, delete, deadline, skip_activation_check, notify, perf_cc, perf_p, perf_pp, perf_udt, )
Executor for `globus transfer`
3.754602
3.7242
1.008163
client = get_client() res = client.delete_endpoint(endpoint_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
def endpoint_delete(endpoint_id)
Executor for `globus endpoint delete`
4.525231
3.342355
1.353905
client = get_client() bookmark_id = resolve_id_or_name(client, bookmark_id_or_name)["id"] res = client.delete_bookmark(bookmark_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
def bookmark_delete(bookmark_id_or_name)
Executor for `globus bookmark delete`
3.429518
3.013771
1.137949
endpoint_id, path = endpoint_plus_path client = get_client() autoactivate(client, endpoint_id, if_expires_in=60) res = client.operation_mkdir(endpoint_id, path=path) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
def mkdir_command(endpoint_plus_path)
Executor for `globus mkdir`
5.261966
4.469487
1.177309
# now handle the output format, requires a little bit more care # first, prompt if it isn't given, but be clear that we have a sensible # default if they don't set it # then, make sure that if it is given, it's a valid format (discard # otherwise) # finally, set it only if given and valid if not default_output_format: safeprint( textwrap.fill( 'This must be one of "json" or "text". Other values will be ' "ignored. ENTER to skip." ) ) default_output_format = ( click.prompt( "Default CLI output format (cli.output_format)", default="text" ) .strip() .lower() ) if default_output_format not in ("json", "text"): default_output_format = None if not default_myproxy_username: safeprint(textwrap.fill("ENTER to skip.")) default_myproxy_username = click.prompt( "Default myproxy username (cli.default_myproxy_username)", default="", show_default=False, ).strip() # write to disk safeprint( "\n\nWriting updated config to {0}".format(os.path.expanduser("~/.globus.cfg")) ) write_option(OUTPUT_FORMAT_OPTNAME, default_output_format) write_option(MYPROXY_USERNAME_OPTNAME, default_myproxy_username)
def init_command(default_output_format, default_myproxy_username)
Executor for `globus config init`
4.357028
4.199057
1.037621
# special behavior when invoked with only one non-keyword argument: act as # a normal decorator, decorating and returning that argument with # click.option if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): return decorator(args[0]) # if we're not doing that, we should see no positional args # the alternative behavior is to fall through and discard *args, but this # will probably confuse someone in the future when their arguments are # silently discarded elif len(args) != 0: raise ValueError("this decorator cannot take positional args") # final case: got 0 or more kwargs, no positionals # do the function-which-returns-a-decorator dance to produce a # new decorator based on the arguments given else: def inner_decorator(f): return decorator(f, **kwargs) return inner_decorator
def detect_and_decorate(decorator, args, kwargs)
Helper for applying a decorator when it is applied directly, and also applying it when it is given arguments and then applied to a function.
8.158013
8.340841
0.97808
client = get_client() rules = client.endpoint_acl_list(endpoint_id) resolved_ids = LazyIdentityMap( x["principal"] for x in rules if x["principal_type"] == "identity" ) def principal_str(rule): principal = rule["principal"] if rule["principal_type"] == "identity": username = resolved_ids.get(principal) return username or principal elif rule["principal_type"] == "group": return (u"https://app.globus.org/groups/{}").format(principal) else: principal = rule["principal_type"] return principal formatted_print( rules, fields=[ ("Rule ID", "id"), ("Permissions", "permissions"), ("Shared With", principal_str), ("Path", "path"), ], )
def list_command(endpoint_id)
Executor for `globus endpoint permission list`
3.937603
3.60465
1.092368
client = get_client() bookmark_iterator = client.bookmark_list() def get_ep_name(item): ep_id = item["endpoint_id"] try: ep_doc = client.get_endpoint(ep_id) return display_name_or_cname(ep_doc) except TransferAPIError as err: if err.code == "EndpointDeleted": return "[DELETED ENDPOINT]" else: raise err formatted_print( bookmark_iterator, fields=[ ("Name", "name"), ("Bookmark ID", "id"), ("Endpoint ID", "endpoint_id"), ("Endpoint Name", get_ep_name), ("Path", "path"), ], response_key="DATA", json_converter=iterable_response_to_dict, )
def bookmark_list()
Executor for `globus bookmark list`
4.30067
3.959746
1.086098
endpoint_id, path = endpoint_plus_path client = get_client() # attempt to activate unless --skip-activation-check is given if not skip_activation_check: autoactivate(client, endpoint_id, if_expires_in=60) delete_data = DeleteData( client, endpoint_id, label=label, recursive=recursive, ignore_missing=ignore_missing, submission_id=submission_id, deadline=deadline, skip_activation_check=skip_activation_check, interpret_globs=enable_globs, **notify ) if not star_silent and enable_globs and path.endswith("*"): # not intuitive, but `click.confirm(abort=True)` prints to stdout # unnecessarily, which we don't really want... # only do this check if stderr is a pty if ( err_is_terminal() and term_is_interactive() and not click.confirm( 'Are you sure you want to delete all files matching "{}"?'.format(path), err=True, ) ): safeprint("Aborted.", write_to_stderr=True) click.get_current_context().exit(1) delete_data.add_item(path) if dry_run: formatted_print(delete_data, response_key="DATA", fields=[("Path", "path")]) # exit safely return # Print task submission to stderr so that `-Fjson` is still correctly # respected, as it will be by `task wait` res = client.submit_delete(delete_data) task_id = res["task_id"] safeprint( 'Delete task submitted under ID "{}"'.format(task_id), write_to_stderr=True ) # do a `task wait` equivalent, including printing and correct exit status task_wait_with_io( meow, heartbeat, polling_interval, timeout, task_id, timeout_exit_code, client=client, )
def rm_command( ignore_missing, star_silent, recursive, enable_globs, endpoint_plus_path, label, submission_id, dry_run, deadline, skip_activation_check, notify, meow, heartbeat, polling_interval, timeout, timeout_exit_code, )
Executor for `globus rm`
5.542229
5.382156
1.029741
session_params = session_params or {} # get the ConfidentialApp client object auth_client = internal_auth_client( requires_instance=True, force_new_client=force_new_client ) # start the Confidential App Grant flow auth_client.oauth2_start_flow( redirect_uri=auth_client.base_url + "v2/web/auth-code", refresh_tokens=True, requested_scopes=SCOPES, ) # prompt additional_params = {"prompt": "login"} additional_params.update(session_params) linkprompt = "Please authenticate with Globus here" safeprint( "{0}:\n{1}\n{2}\n{1}\n".format( linkprompt, "-" * len(linkprompt), auth_client.oauth2_get_authorize_url(additional_params=additional_params), ) ) # come back with auth code auth_code = click.prompt("Enter the resulting Authorization Code here").strip() # finish auth flow exchange_code_and_store_config(auth_client, auth_code) return True
def do_link_auth_flow(session_params=None, force_new_client=False)
Prompts the user with a link to authenticate with globus auth and authorize the CLI to act on their behalf.
4.60871
4.138063
1.113736
session_params = session_params or {} # start local server and create matching redirect_uri with start_local_server(listen=("127.0.0.1", 0)) as server: _, port = server.socket.getsockname() redirect_uri = "http://localhost:{}".format(port) # get the ConfidentialApp client object and start a flow auth_client = internal_auth_client( requires_instance=True, force_new_client=force_new_client ) auth_client.oauth2_start_flow( refresh_tokens=True, redirect_uri=redirect_uri, requested_scopes=SCOPES ) additional_params = {"prompt": "login"} additional_params.update(session_params) url = auth_client.oauth2_get_authorize_url(additional_params=additional_params) # open web-browser for user to log in, get auth code webbrowser.open(url, new=1) auth_code = server.wait_for_code() if isinstance(auth_code, LocalServerError): safeprint("Authorization failed: {}".format(auth_code), write_to_stderr=True) click.get_current_context().exit(1) elif isinstance(auth_code, Exception): safeprint( "Authorization failed with unexpected error:\n{}".format(auth_code), write_to_stderr=True, ) click.get_current_context().exit(1) # finish auth flow and return true exchange_code_and_store_config(auth_client, auth_code) return True
def do_local_server_auth_flow(session_params=None, force_new_client=False)
Starts a local http server, opens a browser to have the user authenticate, and gets the code redirected to the server (no copy and pasting required)
3.404349
3.364815
1.011749
# do a token exchange with the given code tkn = auth_client.oauth2_exchange_code_for_tokens(auth_code) tkn = tkn.by_resource_server # extract access tokens from final response transfer_at = tkn["transfer.api.globus.org"]["access_token"] transfer_at_expires = tkn["transfer.api.globus.org"]["expires_at_seconds"] transfer_rt = tkn["transfer.api.globus.org"]["refresh_token"] auth_at = tkn["auth.globus.org"]["access_token"] auth_at_expires = tkn["auth.globus.org"]["expires_at_seconds"] auth_rt = tkn["auth.globus.org"]["refresh_token"] # revoke any existing tokens for token_opt in ( TRANSFER_RT_OPTNAME, TRANSFER_AT_OPTNAME, AUTH_RT_OPTNAME, AUTH_AT_OPTNAME, ): token = lookup_option(token_opt) if token: auth_client.oauth2_revoke_token(token) # write new tokens to config write_option(TRANSFER_RT_OPTNAME, transfer_rt) write_option(TRANSFER_AT_OPTNAME, transfer_at) write_option(TRANSFER_AT_EXPIRES_OPTNAME, transfer_at_expires) write_option(AUTH_RT_OPTNAME, auth_rt) write_option(AUTH_AT_OPTNAME, auth_at) write_option(AUTH_AT_EXPIRES_OPTNAME, auth_at_expires)
def exchange_code_and_store_config(auth_client, auth_code)
Finishes auth flow after code is gotten from command line or local server. Exchanges code for tokens and gets user info from auth. Stores tokens and user info in config.
2.057098
2.091945
0.983342
if filter_scope == "all" and not filter_fulltext: raise click.UsageError( "When searching all endpoints (--filter-scope=all, the default), " "a full-text search filter is required. Other scopes (e.g. " "--filter-scope=recently-used) may be used without specifying " "an additional filter." ) client = get_client() owner_id = filter_owner_id if owner_id: owner_id = maybe_lookup_identity_id(owner_id) search_iterator = client.endpoint_search( filter_fulltext=filter_fulltext, filter_scope=filter_scope, filter_owner_id=owner_id, ) formatted_print( search_iterator, fields=ENDPOINT_LIST_FIELDS, json_converter=iterable_response_to_dict, )
def endpoint_search(filter_fulltext, filter_owner_id, filter_scope)
Executor for `globus endpoint search`
3.856616
3.703491
1.041346
# validate params. Requires a get call to check the endpoint type client = get_client() endpoint_id = kwargs.pop("endpoint_id") get_res = client.get_endpoint(endpoint_id) if get_res["host_endpoint_id"]: endpoint_type = "shared" elif get_res["is_globus_connect"]: endpoint_type = "personal" elif get_res["s3_url"]: endpoint_type = "s3" else: endpoint_type = "server" validate_endpoint_create_and_update_params( endpoint_type, get_res["subscription_id"], kwargs ) # make the update ep_doc = assemble_generic_doc("endpoint", **kwargs) res = client.update_endpoint(endpoint_id, ep_doc) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
def endpoint_update(**kwargs)
Executor for `globus endpoint update`
4.558435
4.116077
1.107471
try: click.echo(message, nl=newline, err=write_to_stderr) except IOError as err: if err.errno is errno.EPIPE: pass else: raise
def safeprint(message, write_to_stderr=False, newline=True)
Wrapper around click.echo used to encapsulate its functionality. Also protects against EPIPE during click.echo calls, as this can happen normally in piped commands when the consumer closes before the producer.
2.590398
2.103129
1.231688
# if the key is a string, then the "keyfunc" is just a basic lookup # operation -- return that if isinstance(k, six.string_types): def lookup(x): return x[k] return lookup # otherwise, the key must be a function which is executed on the item # to produce a value -- return it verbatim return k
def _key_to_keyfunc(k)
We allow for 'keys' which are functions that map columns onto value types -- they may do formatting or inspect multiple values on the object. In order to support this, wrap string keys in a simple function that does the natural lookup operation, but return any functions we receive as they are.
6.577641
6.028026
1.091177
def _assert_fields(): if fields is None: raise ValueError( "Internal Error! Output format requires fields; none given. " "You can workaround this error by using `--format JSON`" ) def _print_as_json(): print_json_response( json_converter(response_data) if json_converter else response_data ) def _print_as_unix(): print_unix_response( json_converter(response_data) if json_converter else response_data ) def _print_as_text(): # if we're given simple text, print that and exit if simple_text is not None: safeprint(simple_text) return # if there's a preamble, print it beofre any other text if text_preamble is not None: safeprint(text_preamble) # if there's a response key, key into it data = response_data if response_key is None else response_data[response_key] # do the various kinds of printing if text_format == FORMAT_TEXT_TABLE: _assert_fields() print_table(data, fields) elif text_format == FORMAT_TEXT_RECORD: _assert_fields() colon_formatted_print(data, fields) elif text_format == FORMAT_TEXT_RAW: safeprint(data) elif text_format == FORMAT_TEXT_CUSTOM: _custom_text_formatter(data) # if there's an epilog, print it after any text if text_epilog is not None: safeprint(text_epilog) if isinstance(text_format, six.string_types): text_format = text_format _custom_text_formatter = None else: _custom_text_formatter = text_format text_format = FORMAT_TEXT_CUSTOM if outformat_is_json(): _print_as_json() elif outformat_is_unix(): _print_as_unix() else: # silent does nothing if text_format == FORMAT_SILENT: return _print_as_text()
def formatted_print( response_data, simple_text=None, text_preamble=None, text_epilog=None, text_format=FORMAT_TEXT_TABLE, json_converter=None, fields=None, response_key=None, )
A generic output formatter. Consumes the following pieces of data: ``response_data`` is a dict or GlobusResponse object. It contains either an API response or synthesized data for printing. ``simple_text`` is a text override -- normal printing is skipped and this string is printed instead (text output only) ``text_preamble`` is text which prints before normal printing (text output only) ``text_epilog`` is text which prints after normal printing (text output only) ``text_format`` is one of the FORMAT_TEXT_* constants OR a callable which takes ``response_data`` and prints output. Note that when a callable is given, it does the actual printing ``json_converter`` is a callable that does preprocessing of JSON output. It must take ``response_data`` and produce another dict or dict-like object (json/unix output only) ``fields`` is an iterable of (fieldname, keyfunc) tuples. When keyfunc is a string, it is implicitly converted to `lambda x: x[keyfunc]` (text output only) ``response_key`` is a key into the data to print. When used with table printing, it must get an iterable out, and when used with raw printing, it gets a string. Necessary for certain formats like text table (text output only)
2.864578
2.799264
1.023333
def decorate(f, **kwargs): f = version_option(f) f = debug_option(f) f = verbose_option(f) f = click.help_option("-h", "--help")(f) # if the format option is being allowed, it needs to be applied to `f` if not kwargs.get("no_format_option"): f = format_option(f) # if the --map-http-status option is being allowed, ... if not kwargs.get("no_map_http_status_option"): f = map_http_status_option(f) return f return detect_and_decorate(decorate, args, kwargs)
def common_options(*args, **kwargs)
This is a multi-purpose decorator for applying a "base" set of options shared by all commands. It can be applied either directly, or given keyword arguments. Usage: >>> @common_options >>> def mycommand(abc, xyz): >>> ... or >>> @common_options(no_format_option=True) >>> def mycommand(abc, xyz): >>> ...
4.170046
4.051369
1.029293
def decorate(f, **kwargs): metavar = kwargs.get("metavar", "ENDPOINT_ID") f = click.argument("endpoint_id", metavar=metavar, type=click.UUID)(f) return f return detect_and_decorate(decorate, args, kwargs)
def endpoint_id_arg(*args, **kwargs)
This is the `ENDPOINT_ID` argument consumed by many Transfer endpoint related operations. It accepts alternate metavars for cases when another name is desirable (e.x. `SHARE_ID`, `HOST_ENDPOINT_ID`), but can also be applied as a direct decorator if no specialized metavar is being passed. Usage: >>> @endpoint_id_arg >>> def command_func(endpoint_id): >>> ... or >>> @endpoint_id_arg(metavar='HOST_ENDPOINT_ID') >>> def command_func(endpoint_id): >>> ...
4.671589
5.509002
0.847992
# options only allowed for GCS endpoints if endpoint_type != "server": # catch params with two option flags if params["public"] is False: raise click.UsageError( "Option --private only allowed " "for Globus Connect Server endpoints" ) # catch any params only usable with GCS for option in [ "public", "myproxy_dn", "myproxy_server", "oauth_server", "location", "network_use", "max_concurrency", "preferred_concurrency", "max_parallelism", "preferred_parallelism", ]: if params[option] is not None: raise click.UsageError( ( "Option --{} can only be used with Globus Connect Server " "endpoints".format(option.replace("_", "-")) ) ) # if the endpoint was not previously managed, and is not being passed # a subscription id, it cannot use managed endpoint only fields if (not managed) and not (params["subscription_id"] or params["managed"]): for option in [ "network_use", "max_concurrency", "preferred_concurrency", "max_parallelism", "preferred_parallelism", ]: if params[option] is not None: raise click.UsageError( ( "Option --{} can only be used with managed " "endpoints".format(option.replace("_", "-")) ) ) # because the Transfer service doesn't do network use level updates in a # patchy way, *both* endpoint `POST`s *and* `PUT`s must either use # - `network_use='custom'` with *every* other parameter specified (which # is validated by the service), or # - a preset/absent `network_use` with *no* other parameter specified # (which is *not* validated by the service; in this case, Transfer will # accept but ignore the others parameters if given, leading to user # confusion if we don't do this validation check) custom_network_use_params = ( "max_concurrency", "preferred_concurrency", "max_parallelism", "preferred_parallelism", ) if params["network_use"] != "custom": for option in custom_network_use_params: if params[option] is not None: raise click.UsageError( "The {} options require you use --network-use=custom.".format( "/".join( "--" + option.replace("_", "-") for option in custom_network_use_params ) ) ) # make sure --(no-)managed and --subscription-id are mutually exclusive # if --managed given pass DEFAULT as the subscription_id # if --no-managed given, pass None managed_flag = params.get("managed") if managed_flag is not None: params.pop("managed") if managed_flag: params["subscription_id"] = params.get("subscription_id") or "DEFAULT" else: if params.get("subscription_id"): raise click.UsageError( "Cannot specify --subscription-id and " "use the --no-managed option." ) params["subscription_id"] = EXPLICIT_NULL # make sure --no-default-directory are mutually exclusive # if --no-managed given, pass an EXPLICIT_NULL as the default directory if params.get("no_default_directory"): if params.get("default_directory"): raise click.UsageError( "--no-default-directory and --default-directory are mutually " "exclusive." ) else: params["default_directory"] = EXPLICIT_NULL params.pop("no_default_directory")
def validate_endpoint_create_and_update_params(endpoint_type, managed, params)
Given an endpoint type of "shared" "server" or "personal" and option values Confirms the option values are valid for the given endpoint
4.043339
4.027906
1.003831
def inner_decorator(f, required=True): f = click.argument("TASK_ID", required=required)(f) return f return detect_and_decorate(inner_decorator, args, kwargs)
def task_id_arg(*args, **kwargs)
This is the `TASK_ID` argument consumed by many Transfer Task operations. It accept a toggle on whether or not it is required Usage: >>> @task_id_option >>> def command_func(task_id): >>> ... or >>> @task_id_option(required=False) >>> def command_func(task_id): >>> ... By default, the task ID is made required; pass `required=False` to the decorator arguments to make it optional.
6.990679
6.66918
1.048207
def notify_opt_callback(ctx, param, value): # if no value was set, don't set any explicit options # the API default is "everything on" if value is None: return {} value = value.lower() value = [x.strip() for x in value.split(",")] # [""] is what you'll get if value is "" to start with # special-case it into "off", which helps avoid surprising scripts # which take a notification settings as inputs and build --notify if value == [""]: value = ["off"] off = "off" in value on = "on" in value # set-ize it -- duplicates are fine vals = set([x for x in value if x not in ("off", "on")]) if (vals or on) and off: raise click.UsageError('--notify cannot accept "off" and another value') allowed_vals = set(("on", "succeeded", "failed", "inactive")) if not vals <= allowed_vals: raise click.UsageError( "--notify received at least one invalid value among {}".format( list(vals) ) ) # return the notification options to send! # on means don't set anything (default) if on: return {} # off means turn off everything if off: return { "notify_on_succeeded": False, "notify_on_failed": False, "notify_on_inactive": False, } # otherwise, return the exact set of values seen else: return { "notify_on_succeeded": "succeeded" in vals, "notify_on_failed": "failed" in vals, "notify_on_inactive": "inactive" in vals, } f = click.option( "--dry-run", is_flag=True, help=("Don't actually submit the task, print submission " "data instead"), )(f) f = click.option( "--notify", callback=notify_opt_callback, help=( "Comma separated list of task events which notify by email. " "'on' and 'off' may be used to enable or disable notifications " "for all event types. Otherwise, use 'succeeded', 'failed', or " "'inactive'" ), )(f) f = click.option( "--submission-id", help=( "Task submission ID, as generated by `globus task " "generate-submission-id`. Used for safe resubmission in the " "presence of network failures." ), )(f) f = click.option("--label", default=None, help="Set a label for this task.")(f) f = click.option( "--deadline", default=None, type=ISOTimeType(), help="Set a deadline for this to be canceled if not completed by.", )(f) f = click.option( "--skip-activation-check", is_flag=True, help=("Submit the task even if the endpoint(s) " "aren't currently activated."), )(f) return f
def task_submission_options(f)
Options shared by both transfer and delete task submission
4.466328
4.450453
1.003567
def inner_decorator(f, supports_batch=True, default_enable_globs=False): f = click.option( "--recursive", "-r", is_flag=True, help="Recursively delete dirs" )(f) f = click.option( "--ignore-missing", "-f", is_flag=True, help="Don't throw errors if the file or dir is absent", )(f) f = click.option( "--star-silent", "--unsafe", "star_silent", is_flag=True, help=( 'Don\'t prompt when the trailing character is a "*".' + (" Implicit in --batch" if supports_batch else "") ), )(f) f = click.option( "--enable-globs/--no-enable-globs", is_flag=True, default=default_enable_globs, show_default=True, help=( "Enable expansion of *, ?, and [ ] characters in the last " "component of file paths, unless they are escaped with " "a preceeding backslash, \\" ), )(f) if supports_batch: f = click.option( "--batch", is_flag=True, help=( "Accept a batch of paths on stdin (i.e. run in " "batchmode). Uses ENDPOINT_ID as passed on the " "commandline. Any commandline PATH given will be used " "as a prefix to all paths given" ), )(f) return f return detect_and_decorate(inner_decorator, args, kwargs)
def delete_and_rm_options(*args, **kwargs)
Options which apply both to `globus delete` and `globus rm`
4.593109
4.54295
1.011041
def port_range_callback(ctx, param, value): if not value: return None value = value.lower().strip() if value == "unspecified": return None, None if value == "unrestricted": return 1024, 65535 try: lower, upper = map(int, value.split("-")) except ValueError: # too many/few values from split or non-integer(s) raise click.BadParameter( "must specify as 'unspecified', " "'unrestricted', or as range separated " "by a hyphen (e.g. '50000-51000')" ) if not 1024 <= lower <= 65535 or not 1024 <= upper <= 65535: raise click.BadParameter("must be within the 1024-65535 range") return (lower, upper) if lower <= upper else (upper, lower) def inner_decorator(f, add=False): f = click.option("--hostname", required=add, help="Server Hostname.")(f) default_scheme = "gsiftp" if add else None f = click.option( "--scheme", help="Scheme for the Server.", type=CaseInsensitiveChoice(("gsiftp", "ftp")), default=default_scheme, show_default=add, )(f) default_port = 2811 if add else None f = click.option( "--port", help="Port for Globus control channel connections.", type=int, default=default_port, show_default=add, )(f) f = click.option( "--subject", help=( "Subject of the X509 Certificate of the server. When " "unspecified, the CN must match the server hostname." ), )(f) for adjective, our_preposition, their_preposition in [ ("incoming", "to", "from"), ("outgoing", "from", "to"), ]: f = click.option( "--{}-data-ports".format(adjective), callback=port_range_callback, help="Indicate to firewall administrators at other sites how to " "allow {} traffic {} this server {} their own. Specify as " "either 'unspecified', 'unrestricted', or as range of " "ports separated by a hyphen (e.g. '50000-51000') within " "the 1024-65535 range.".format( adjective, our_preposition, their_preposition ), )(f) return f return detect_and_decorate(inner_decorator, args, kwargs)
def server_add_and_update_opts(*args, **kwargs)
shared collection of options for `globus transfer endpoint server add` and `globus transfer endpoint server update`. Accepts a toggle to know if it's being used as `add` or `update`. usage: >>> @server_add_and_update_opts >>> def command_func(subject, port, scheme, hostname): >>> ... or >>> @server_add_and_update_opts(add=True) >>> def command_func(subject, port, scheme, hostname): >>> ...
3.291533
3.215663
1.023594
client = get_client() res = client.endpoint_deactivate(endpoint_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
def endpoint_deactivate(endpoint_id)
Executor for `globus endpoint deactivate`
4.090173
3.183329
1.284873
def callback(ctx, param, value): # copied from click.decorators.version_option # no idea what resilient_parsing means, but... if not value or ctx.resilient_parsing: return print_version() ctx.exit(0) return click.option( "--version", is_flag=True, expose_value=False, is_eager=True, callback=callback, cls=HiddenOption, )(f)
def version_option(f)
Largely a custom clone of click.version_option -- almost identical, but prints our special output.
3.578222
3.206261
1.116011
# import in the func (rather than top-level scope) so that at setup time, # `requests` isn't required -- otherwise, setuptools will fail to run # because requests isn't installed yet. import requests try: version_data = requests.get( "https://pypi.python.org/pypi/globus-cli/json" ).json() latest = max(LooseVersion(v) for v in version_data["releases"]) return latest, LooseVersion(__version__) # if the fetch from pypi fails except requests.RequestException: return None, LooseVersion(__version__)
def get_versions()
Wrap in a function to ensure that we don't run this every time a CLI command runs (yuck!) Also protects import of `requests` from issues when grabbed by setuptools. More on that inline
5.950274
5.707717
1.042496
id_batch_size = 100 # fetch in batches of 100, store in a dict ac = get_auth_client() self._resolved_map = {} for i in range(0, len(self.identity_ids), id_batch_size): chunk = self.identity_ids[i : i + id_batch_size] resolved_result = ac.get_identities(ids=chunk) for x in resolved_result["identities"]: self._resolved_map[x["id"]] = x["username"]
def _lookup_identity_names(self)
Batch resolve identities to usernames. Returns a dict mapping IDs to Usernames
3.784033
3.433033
1.102242
# deny rwx to Group and World -- don't bother storing the returned old mask # value, since we'll never restore it in the CLI anyway # do this on every call to ensure that we're always consistent about it os.umask(0o077) # FIXME: DRY violation with config_commands.helpers conf = get_config_obj(system=system) # add the section if absent if section not in conf: conf[section] = {} conf[section][option] = value conf.write()
def write_option(option, value, section="cli", system=False)
Write an option to disk -- doesn't handle config reloading
13.236044
12.638718
1.047262
client_id = lookup_option(CLIENT_ID_OPTNAME) client_secret = lookup_option(CLIENT_SECRET_OPTNAME) template_id = lookup_option(TEMPLATE_ID_OPTNAME) or DEFAULT_TEMPLATE_ID template_client = internal_native_client() existing = client_id and client_secret # if we are forcing a new client, delete any existing client if force_new_client and existing: existing_client = globus_sdk.ConfidentialAppAuthClient(client_id, client_secret) try: existing_client.delete("/v2/api/clients/{}".format(client_id)) # if the client secret has been invalidated or the client has # already been removed, we continue on except globus_sdk.exc.AuthAPIError: pass # if we require a new client to be made if force_new_client or (requires_instance and not existing): # register a new instance client with auth body = {"client": {"template_id": template_id, "name": "Globus CLI"}} res = template_client.post("/v2/api/clients", json_body=body) # get values and write to config credential_data = res["included"]["client_credential"] client_id = credential_data["client"] client_secret = credential_data["secret"] write_option(CLIENT_ID_OPTNAME, client_id) write_option(CLIENT_SECRET_OPTNAME, client_secret) return globus_sdk.ConfidentialAppAuthClient( client_id, client_secret, app_name="Globus CLI" ) # if we already have a client, just return it elif existing: return globus_sdk.ConfidentialAppAuthClient( client_id, client_secret, app_name="Globus CLI" ) # fall-back to a native client to not break old logins # TOOD: eventually remove this behavior else: return template_client
def internal_auth_client(requires_instance=False, force_new_client=False)
Looks up the values for this CLI's Instance Client in config If none exists and requires_instance is True or force_new_client is True, registers a new Instance Client with GLobus Auth If none exists and requires_instance is false, defaults to a Native Client for backwards compatibility Returns either a NativeAppAuthClient or a ConfidentialAppAuthClient
3.214451
3.214765
0.999902
# get the mapping by looking up the state and getting the mapping attr mapping = click.get_current_context().ensure_object(CommandState).http_status_map # if there is a mapped exit code, exit with that. Otherwise, exit 1 if http_status in mapping: sys.exit(mapping[http_status]) else: sys.exit(1)
def exit_with_mapped_status(http_status)
Given an HTTP Status, exit with either an error status of 1 or the status mapped by what we were given.
5.880183
5.285963
1.112415
safeprint( "The resource you are trying to access requires you to " "re-authenticate with specific identities." ) params = exception.raw_json["authorization_parameters"] message = params.get("session_message") if message: safeprint("message: {}".format(message)) identities = params.get("session_required_identities") if identities: id_str = " ".join(identities) safeprint( "Please run\n\n" " globus session update {}\n\n" "to re-authenticate with the required identities".format(id_str) ) else: safeprint( 'Please use "globus session update" to re-authenticate ' "with specific identities".format(id_str) ) exit_with_mapped_status(exception.http_status)
def session_hook(exception)
Expects an exception with an authorization_paramaters field in its raw_json
5.260465
4.456117
1.180504
exception_type, exception, traceback = exc_info # check if we're in debug mode, and run the real excepthook if we are ctx = click.get_current_context() state = ctx.ensure_object(CommandState) if state.debug: sys.excepthook(exception_type, exception, traceback) # we're not in debug mode, do custom handling else: # if it's a click exception, re-raise as original -- Click's main # execution context will handle pretty-printing if isinstance(exception, click.ClickException): reraise(exception_type, exception, traceback) # catch any session errors to give helpful instructions # on how to use globus session update elif ( isinstance(exception, exc.GlobusAPIError) and exception.raw_json and "authorization_parameters" in exception.raw_json ): session_hook(exception) # handle the Globus-raised errors with our special hooks # these will present the output (on stderr) as JSON elif isinstance(exception, exc.TransferAPIError): if exception.code == "ClientError.AuthenticationFailed": authentication_hook(exception) else: transferapi_hook(exception) elif isinstance(exception, exc.AuthAPIError): if exception.code == "UNAUTHORIZED": authentication_hook(exception) # invalid_grant occurs when the users refresh tokens are not valid elif exception.message == "invalid_grant": invalidrefresh_hook(exception) else: authapi_hook(exception) elif isinstance(exception, exc.GlobusAPIError): globusapi_hook(exception) # specific checks fell through -- now check if it's any kind of # GlobusError elif isinstance(exception, exc.GlobusError): globus_generic_hook(exception) # not a GlobusError, not a ClickException -- something like ValueError # or NotImplementedError bubbled all the way up here: just print it # out, basically else: safeprint(u"{}: {}".format(exception_type.__name__, exception)) sys.exit(1)
def custom_except_hook(exc_info)
A custom excepthook to present python errors produced by the CLI. We don't want to show end users big scary stacktraces if they aren't python programmers, so slim it down to some basic info. We keep a "DEBUGMODE" env variable kicking around to let us turn on stacktraces if we ever need them. Additionally, does global suppression of EPIPE errors, which often occur when a python command is piped to a consumer like `head` which closes its input stream before python has sent all of its output. DANGER: There is a (small) risk that this will bite us if there are EPIPE errors produced within the Globus SDK. We should keep an eye on this possibility, as it may demand more sophisticated handling of EPIPE. Possible TODO item to reduce this risk: inspect the exception and only hide EPIPE if it comes from within the globus_cli package.
5.271544
5.082068
1.037283
client = get_client() # cannot filter by both errors and non errors if filter_errors and filter_non_errors: raise click.UsageError("Cannot filter by both errors and non errors") elif filter_errors: filter_string = "is_error:1" elif filter_non_errors: filter_string = "is_error:0" else: filter_string = "" event_iterator = client.task_event_list( task_id, num_results=limit, filter=filter_string ) def squashed_json_details(x): is_json = False try: loaded = json.loads(x["details"]) is_json = True except ValueError: loaded = x["details"] if is_json: return json.dumps(loaded, separators=(",", ":"), sort_keys=True) else: return loaded.replace("\n", "\\n") formatted_print( event_iterator, fields=( ("Time", "time"), ("Code", "code"), ("Is Error", "is_error"), ("Details", squashed_json_details), ), json_converter=iterable_response_to_dict, )
def task_event_list(task_id, limit, filter_errors, filter_non_errors)
Executor for `globus task-event-list`
2.874465
2.788492
1.030831
client = get_client() res = client.delete_endpoint_role(endpoint_id, role_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
def role_delete(role_id, endpoint_id)
Executor for `globus endpoint role delete`
4.187709
3.10657
1.348017
endpoint_id, path = endpoint_plus_path # do autoactivation before the `ls` call so that recursive invocations # won't do this repeatedly, and won't have to instantiate new clients client = get_client() autoactivate(client, endpoint_id, if_expires_in=60) # create the query paramaters to send to operation_ls ls_params = {"show_hidden": int(show_hidden)} if path: ls_params["path"] = path if filter_val: # this char has special meaning in the LS API's filter clause # can't be part of the pattern (but we don't support globbing across # dir structures anyway) if "/" in filter_val: raise click.UsageError('--filter cannot contain "/"') # format into a simple filter clause which operates on filenames ls_params["filter"] = "name:{}".format(filter_val) # get the `ls` result if recursive: # NOTE: # --recursive and --filter have an interplay that some users may find # surprising # if we're asked to change or "improve" the behavior in the future, we # could do so with "type:dir" or "type:file" filters added in, and # potentially work out some viable behavior based on what people want res = client.recursive_operation_ls( endpoint_id, depth=recursive_depth_limit, **ls_params ) else: res = client.operation_ls(endpoint_id, **ls_params) def cleaned_item_name(item): return item["name"] + ("/" if item["type"] == "dir" else "") # and then print it, per formatting rules formatted_print( res, fields=[ ("Permissions", "permissions"), ("User", "user"), ("Group", "group"), ("Size", "size"), ("Last Modified", "last_modified"), ("File Type", "type"), ("Filename", cleaned_item_name), ], simple_text=( None if long_output or is_verbose() or not outformat_is_text() else "\n".join(cleaned_item_name(x) for x in res) ), json_converter=iterable_response_to_dict, )
def ls_command( endpoint_plus_path, recursive_depth_limit, recursive, long_output, show_hidden, filter_val, )
Executor for `globus ls`
6.279738
6.214505
1.010497
client = get_client() res = client.get_submission_id() formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="value")
def generate_submission_id()
Executor for `globus task generate-submission-id`
7.339444
5.120077
1.433463
if not principal: raise click.UsageError("A security principal is required for this command") endpoint_id, path = endpoint_plus_path principal_type, principal_val = principal client = get_client() if principal_type == "identity": principal_val = maybe_lookup_identity_id(principal_val) if not principal_val: raise click.UsageError( "Identity does not exist. " "Use --provision-identity to auto-provision an identity." ) elif principal_type == "provision-identity": principal_val = maybe_lookup_identity_id(principal_val, provision=True) principal_type = "identity" if not notify_email: notify_message = None rule_data = assemble_generic_doc( "access", permissions=permissions, principal=principal_val, principal_type=principal_type, path=path, notify_email=notify_email, notify_message=notify_message, ) res = client.add_endpoint_acl_rule(endpoint_id, rule_data) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=[("Message", "message"), ("Rule ID", "access_id")], )
def create_command( principal, permissions, endpoint_plus_path, notify_email, notify_message )
Executor for `globus endpoint permission create`
3.262208
3.053898
1.068211
# BFS is not done until the queue is empty while self.queue: logger.debug( ( "recursive_operation_ls BFS queue not empty, " "getting next path now." ) ) # rate limit based on number of ls calls we have made self.ls_count += 1 if self.ls_count % SLEEP_FREQUENCY == 0: logger.debug( ( "recursive_operation_ls sleeping {} seconds to " "rate limit itself.".format(SLEEP_LEN) ) ) time.sleep(SLEEP_LEN) # get path and current depth from the queue abs_path, rel_path, depth = self.queue.pop() # set the target path to the popped absolute path if it exists if abs_path: self.ls_params["path"] = abs_path # if filter_after_first is False, stop filtering after the first # ls call has been made if not self.filter_after_first: if self.filtering: self.filtering = False else: try: self.ls_params.pop("filter") except KeyError: pass # do the operation_ls with the updated params res = self.client.operation_ls(self.endpoint_id, **self.ls_params) res_data = res["DATA"] # if we aren't at the depth limit, add dir entries to the queue. # including the dir's name in the absolute and relative paths # and increase the depth by one. # data is reversed to maintain any "orderby" ordering if depth < self.max_depth: self.queue.extend( [ ( res["path"] + item["name"], (rel_path + "/" if rel_path else "") + item["name"], depth + 1, ) for item in reversed(res_data) if item["type"] == "dir" ] ) # for each item in the response data update the item's name with # the relative path popped from the queue, and yield the item for item in res_data: item["name"] = (rel_path + "/" if rel_path else "") + item["name"] yield GlobusResponse(item)
def iterable_func(self)
An internal function which has generator semantics. Defined using the `yield` syntax. Used to grab the first element during class initialization, and subsequently on calls to `next()` to get the remaining elements. We rely on the implicit StopIteration built into this type of function to propagate through the final `next()` call.
4.587988
4.708798
0.974344
client = get_client() res = resolve_id_or_name(client, bookmark_id_or_name) formatted_print( res, text_format=FORMAT_TEXT_RECORD, fields=( ("ID", "id"), ("Name", "name"), ("Endpoint ID", "endpoint_id"), ("Path", "path"), ), simple_text=( # standard output is endpoint:path format "{}:{}".format(res["endpoint_id"], res["path"]) # verbose output includes all fields if not is_verbose() else None ), )
def bookmark_show(bookmark_id_or_name)
Executor for `globus bookmark show`
4.405262
3.961389
1.11205
client = get_client() roles = client.endpoint_role_list(endpoint_id) resolved_ids = LazyIdentityMap( x["principal"] for x in roles if x["principal_type"] == "identity" ) def principal_str(role): principal = role["principal"] if role["principal_type"] == "identity": username = resolved_ids.get(principal) return username or principal elif role["principal_type"] == "group": return (u"https://app.globus.org/groups/{}").format(principal) else: return principal formatted_print( roles, fields=[ ("Principal Type", "principal_type"), ("Role ID", "id"), ("Principal", principal_str), ("Role", "role"), ], )
def role_list(endpoint_id)
Executor for `globus access endpoint-role-list`
3.461578
3.060762
1.130953
# get the public key from the activation requirements for data in requirements_data["DATA"]: if data["type"] == "delegate_proxy" and data["name"] == "public_key": public_key = data["value"] break else: raise ValueError( ( "No public_key found in activation requirements, this endpoint " "does not support Delegate Proxy activation." ) ) # get user credentials from user credential file" with open(cred_file) as f: issuer_cred = f.read() # create the proxy credentials proxy = create_proxy_credentials(issuer_cred, public_key, lifetime_hours) # return the activation requirements document with the proxy_chain filled for data in requirements_data["DATA"]: if data["type"] == "delegate_proxy" and data["name"] == "proxy_chain": data["value"] = proxy return requirements_data else: raise ValueError( ( "No proxy_chain found in activation requirements, this endpoint " "does not support Delegate Proxy activation." ) )
def fill_delegate_proxy_activation_requirements( requirements_data, cred_file, lifetime_hours=12 )
Given the activation requirements for an endpoint and a filename for X.509 credentials, extracts the public key from the activation requirements, uses the key and the credentials to make a proxy credential, and returns the requirements data with the proxy chain filled in.
3.016012
2.630142
1.146711
# parse the issuer credential loaded_cert, loaded_private_key, issuer_chain = parse_issuer_cred(issuer_cred) # load the public_key into a cryptography object loaded_public_key = serialization.load_pem_public_key( public_key.encode("ascii"), backend=default_backend() ) # check that the issuer certificate is not an old proxy # and is using the keyUsage section as required confirm_not_old_proxy(loaded_cert) validate_key_usage(loaded_cert) # create the proxy cert cryptography object new_cert = create_proxy_cert( loaded_cert, loaded_private_key, loaded_public_key, lifetime_hours ) # extend the proxy chain as a unicode string extended_chain = loaded_cert.public_bytes(serialization.Encoding.PEM).decode( "ascii" ) + six.u(issuer_chain) # return in PEM format as a unicode string return ( new_cert.public_bytes(serialization.Encoding.PEM).decode("ascii") + extended_chain )
def create_proxy_credentials(issuer_cred, public_key, lifetime_hours)
Given an issuer credentials PEM file in the form of a string, a public_key string from an activation requirements document, and an int for the proxy lifetime, returns credentials as a unicode string in PEM format containing a new proxy certificate and an extended proxy chain.
3.885947
3.430442
1.132783
# get each section of the PEM file sections = re.findall( "-----BEGIN.*?-----.*?-----END.*?-----", issuer_cred, flags=re.DOTALL ) try: issuer_cert = sections[0] issuer_private_key = sections[1] issuer_chain_certs = sections[2:] except IndexError: raise ValueError( "Unable to parse PEM data in credentials, " "make sure the X.509 file is in PEM format and " "consists of the issuer cert, issuer private key, " "and proxy chain (if any) in that order." ) # then validate that each section of data can be decoded as expected try: loaded_cert = x509.load_pem_x509_certificate( six.b(issuer_cert), default_backend() ) loaded_private_key = serialization.load_pem_private_key( six.b(issuer_private_key), password=None, backend=default_backend() ) for chain_cert in issuer_chain_certs: x509.load_pem_x509_certificate(six.b(chain_cert), default_backend()) issuer_chain = "".join(issuer_chain_certs) except ValueError: raise ValueError( "Failed to decode PEM data in credentials. Make sure " "the X.509 file consists of the issuer cert, " "issuer private key, and proxy chain (if any) " "in that order." ) # return loaded cryptography objects and the issuer chain return loaded_cert, loaded_private_key, issuer_chain
def parse_issuer_cred(issuer_cred)
Given an X509 PEM file in the form of a string, parses it into sections by the PEM delimiters of: -----BEGIN <label>----- and -----END <label>---- Confirms the sections can be decoded in the proxy credential order of: issuer cert, issuer private key, proxy chain of 0 or more certs . Returns the issuer cert and private key as loaded cryptography objects and the proxy chain as a potentially empty string.
2.611737
2.258517
1.156395
builder = x509.CertificateBuilder() # create a serial number for the new proxy # Under RFC 3820 there are many ways to generate the serial number. However # making the number unpredictable has security benefits, e.g. it can make # this style of attack more difficult: # http://www.win.tue.nl/hashclash/rogue-ca serial = struct.unpack("<Q", os.urandom(8))[0] builder = builder.serial_number(serial) # set the new proxy as valid from now until lifetime_hours have passed builder = builder.not_valid_before(datetime.datetime.utcnow()) builder = builder.not_valid_after( datetime.datetime.utcnow() + datetime.timedelta(hours=lifetime_hours) ) # set the public key of the new proxy to the given public key builder = builder.public_key(loaded_public_key) # set the issuer of the new cert to the subject of the issuing cert builder = builder.issuer_name(loaded_cert.subject) # set the new proxy's subject # append a CommonName to the new proxy's subject # with the serial as the value of the CN new_atribute = x509.NameAttribute(x509.oid.NameOID.COMMON_NAME, six.u(str(serial))) subject_attributes = list(loaded_cert.subject) subject_attributes.append(new_atribute) builder = builder.subject_name(x509.Name(subject_attributes)) # add proxyCertInfo extension to the new proxy (We opt not to add keyUsage) # For RFC proxies the effective usage is defined as the intersection # of the usage of each cert in the chain. See section 4.2 of RFC 3820. # the constants 'oid' and 'value' are gotten from # examining output from a call to the open ssl function: # X509V3_EXT_conf(NULL, ctx, name, value) # ctx set by X509V3_set_nconf(&ctx, NCONF_new(NULL)) # name = "proxyCertInfo" # value = "critical,language:Inherit all" oid = x509.ObjectIdentifier("1.3.6.1.5.5.7.1.14") value = b"0\x0c0\n\x06\x08+\x06\x01\x05\x05\x07\x15\x01" extension = x509.extensions.UnrecognizedExtension(oid, value) builder = builder.add_extension(extension, critical=True) # sign the new proxy with the issuer's private key new_certificate = builder.sign( private_key=loaded_private_key, algorithm=hashes.SHA256(), backend=default_backend(), ) # return the new proxy as a cryptography object return new_certificate
def create_proxy_cert( loaded_cert, loaded_private_key, loaded_public_key, lifetime_hours )
Given cryptography objects for an issuing certificate, a public_key, a private_key, and an int for lifetime in hours, creates a proxy cert from the issuer and public key signed by the private key.
4.562558
4.502545
1.013329
# Examine the last CommonName to see if it looks like an old proxy. last_cn = loaded_cert.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME)[ -1 ] # if the last CN is 'proxy' or 'limited proxy' we are in an old proxy if last_cn.value in ("proxy", "limited proxy"): raise ValueError( "Proxy certificate is in an outdated format " "that is no longer supported" )
def confirm_not_old_proxy(loaded_cert)
Given a cryptography object for the issuer cert, checks if the cert is an "old proxy" and raise an error if so.
4.252095
3.903697
1.089248
try: key_usage = loaded_cert.extensions.get_extension_for_oid( x509.oid.ExtensionOID.KEY_USAGE ) if not key_usage.value.digital_signature: raise ValueError( "Certificate is using the keyUsage extension, but has " "not asserted the Digital Signature bit." ) except x509.ExtensionNotFound: # keyUsage extension not used return
def validate_key_usage(loaded_cert)
Given a cryptography object for the issuer cert, checks that if the keyUsage extension is being used that the digital signature bit has been asserted. (As specified in RFC 3820 section 3.1.)
3.104031
2.286453
1.357575
client = get_client() res = client.task_pause_info(task_id) def _custom_text_format(res): explicit_pauses = [ field for field in EXPLICIT_PAUSE_MSG_FIELDS # n.b. some keys are absent for completed tasks if res.get(field[1]) ] effective_pause_rules = res["pause_rules"] if not explicit_pauses and not effective_pause_rules: safeprint("Task {} is not paused.".format(task_id)) click.get_current_context().exit(0) if explicit_pauses: formatted_print( res, fields=explicit_pauses, text_format=FORMAT_TEXT_RECORD, text_preamble="This task has been explicitly paused.\n", text_epilog="\n" if effective_pause_rules else None, ) if effective_pause_rules: formatted_print( effective_pause_rules, fields=PAUSE_RULE_DISPLAY_FIELDS, text_preamble=( "The following pause rules are effective on this task:\n" ), ) formatted_print(res, text_format=_custom_text_format)
def task_pause_info(task_id)
Executor for `globus task pause-info`
3.701973
3.568372
1.03744
self.timer = t.Thread(target=self.report_spans) self.timer.daemon = True self.timer.name = "Instana Span Reporting" self.timer.start()
def run(self)
Span a background thread to periodically report queued spans
7.128895
3.587037
1.987405
logger.debug("Span reporting thread is now alive") def span_work(): queue_size = self.queue.qsize() if queue_size > 0 and instana.singletons.agent.can_send(): response = instana.singletons.agent.report_traces(self.queued_spans()) if response: logger.debug("reported %d spans" % queue_size) return True every(2, span_work, "Span Reporting")
def report_spans(self)
Periodically report the queued spans
7.538554
7.020245
1.073831
spans = [] while True: try: s = self.queue.get(False) except queue.Empty: break else: spans.append(s) return spans
def queued_spans(self)
Get all of the spans in the queue
2.898771
2.637657
1.098995
if instana.singletons.agent.can_send() or "INSTANA_TEST" in os.environ: json_span = None if span.operation_name in self.registered_spans: json_span = self.build_registered_span(span) else: json_span = self.build_sdk_span(span) self.queue.put(json_span)
def record_span(self, span)
Convert the passed BasicSpan into an JsonSpan and add it to the span queue
5.634379
4.907694
1.148071
custom_data = CustomData(tags=span.tags, logs=self.collect_logs(span)) sdk_data = SDKData(name=span.operation_name, custom=custom_data, Type=self.get_span_kind_as_string(span)) if "arguments" in span.tags: sdk_data.arguments = span.tags["arguments"] if "return" in span.tags: sdk_data.Return = span.tags["return"] data = Data(service=instana.singletons.agent.sensor.options.service_name, sdk=sdk_data) entity_from = {'e': instana.singletons.agent.from_.pid, 'h': instana.singletons.agent.from_.agentUuid} json_span = JsonSpan( t=span.context.trace_id, p=span.parent_id, s=span.context.span_id, ts=int(round(span.start_time * 1000)), d=int(round(span.duration * 1000)), k=self.get_span_kind_as_int(span), n="sdk", f=entity_from, data=data) error = span.tags.pop("error", False) ec = span.tags.pop("ec", None) if error and ec: json_span.error = error json_span.ec = ec return json_span
def build_sdk_span(self, span)
Takes a BasicSpan and converts into an SDK type JsonSpan
3.953432
3.830538
1.032083
kind = None if "span.kind" in span.tags: if span.tags["span.kind"] in self.entry_kind: kind = "entry" elif span.tags["span.kind"] in self.exit_kind: kind = "exit" else: kind = "intermediate" return kind
def get_span_kind_as_string(self, span)
Will retrieve the `span.kind` tag and return the appropriate string value for the Instana backend or None if the tag is set to something we don't recognize. :param span: The span to search for the `span.kind` tag :return: String
2.731447
3.093182
0.883054
kind = None if "span.kind" in span.tags: if span.tags["span.kind"] in self.entry_kind: kind = 1 elif span.tags["span.kind"] in self.exit_kind: kind = 2 else: kind = 3 return kind
def get_span_kind_as_int(self, span)
Will retrieve the `span.kind` tag and return the appropriate integer value for the Instana backend or None if the tag is set to something we don't recognize. :param span: The span to search for the `span.kind` tag :return: Integer
2.706776
3.123036
0.866713
pid = None if os.path.exists("/proc/"): sched_file = "/proc/%d/sched" % os.getpid() if os.path.isfile(sched_file): try: file = open(sched_file) line = file.readline() g = re.search(r'\((\d+),', line) if len(g.groups()) == 1: pid = int(g.groups()[0]) except Exception: logger.debug("parsing sched file failed", exc_info=True) pass if pid is None: pid = os.getpid() return pid
def __get_real_pid(self)
Attempts to determine the true process ID by querying the /proc/<pid>/sched file. This works on systems with a proc filesystem. Otherwise default to os default.
2.671402
2.380981
1.121975
host = AGENT_DEFAULT_HOST port = AGENT_DEFAULT_PORT if "INSTANA_AGENT_HOST" in os.environ: host = os.environ["INSTANA_AGENT_HOST"] if "INSTANA_AGENT_PORT" in os.environ: port = int(os.environ["INSTANA_AGENT_PORT"]) elif "INSTANA_AGENT_IP" in os.environ: # Deprecated: INSTANA_AGENT_IP environment variable # To be removed in a future version host = os.environ["INSTANA_AGENT_IP"] if "INSTANA_AGENT_PORT" in os.environ: port = int(os.environ["INSTANA_AGENT_PORT"]) elif self.agent.sensor.options.agent_host != "": host = self.agent.sensor.options.agent_host if self.agent.sensor.options.agent_port != 0: port = self.agent.sensor.options.agent_port return host, port
def __get_agent_host_port(self)
Iterates the the various ways the host and port of the Instana host agent may be configured: default, env vars, sensor options...
1.985195
1.762099
1.126608
self.thr = threading.Thread(target=self.collect_and_report) self.thr.daemon = True self.thr.name = "Instana Metric Collection" self.thr.start()
def run(self)
Spawns the metric reporting thread
5.084456
3.756227
1.353607
self.last_usage = None self.last_collect = None self.last_metrics = None self.snapshot_countdown = 0 self.run()
def reset(self)
Reset the state as new
9.668193
9.731617
0.993483
logger.debug("Metric reporting thread is now alive") def metric_work(): self.process() if self.agent.is_timed_out(): logger.warn("Host agent offline for >1 min. Going to sit in a corner...") self.agent.reset() return False return True every(1, metric_work, "Metrics Collection")
def collect_and_report(self)
Target function for the metric reporting thread. This is a simple loop to collect and report entity data every 1 second.
15.33654
12.268646
1.25006
if self.agent.machine.fsm.current is "wait4init": # Test the host agent if we're ready to send data if self.agent.is_agent_ready(): self.agent.machine.fsm.ready() else: return if self.agent.can_send(): self.snapshot_countdown = self.snapshot_countdown - 1 ss = None cm = self.collect_metrics() if self.snapshot_countdown < 1: logger.debug("Sending process snapshot data") self.snapshot_countdown = self.SNAPSHOT_PERIOD ss = self.collect_snapshot() md = copy.deepcopy(cm).delta_data(None) else: md = copy.deepcopy(cm).delta_data(self.last_metrics) ed = EntityData(pid=self.agent.from_.pid, snapshot=ss, metrics=md) response = self.agent.report_data(ed) if response: if response.status_code is 200 and len(response.content) > 2: # The host agent returned something indicating that is has a request for us that we # need to process. self.handle_agent_tasks(json.loads(response.content)[0]) self.last_metrics = cm.__dict__
def process(self)
Collects, processes & reports metrics
6.545646
6.515701
1.004596
logger.debug("Received agent request with messageId: %s" % task["messageId"]) if "action" in task: if task["action"] == "python.source": payload = get_py_source(task["args"]["file"]) else: message = "Unrecognized action: %s. An newer Instana package may be required " \ "for this. Current version: %s" % (task["action"], package_version()) payload = {"error": message} else: payload = {"error": "Instana Python: No action specified in request."} self.agent.task_response(task["messageId"], payload)
def handle_agent_tasks(self, task)
When request(s) are received by the host agent, it is sent here for handling & processing.
6.123086
5.948083
1.029422
try: if "INSTANA_SERVICE_NAME" in os.environ: appname = os.environ["INSTANA_SERVICE_NAME"] elif "FLASK_APP" in os.environ: appname = os.environ["FLASK_APP"] elif "DJANGO_SETTINGS_MODULE" in os.environ: appname = os.environ["DJANGO_SETTINGS_MODULE"].split('.')[0] elif os.path.basename(sys.argv[0]) == '' and sys.stdout.isatty(): appname = "Interactive Console" else: if os.path.basename(sys.argv[0]) == '': appname = os.path.basename(sys.executable) else: appname = os.path.basename(sys.argv[0]) s = Snapshot(name=appname, version=platform.version(), f=platform.python_implementation(), a=platform.architecture()[0], djmw=self.djmw) s.version = sys.version s.versions = self.collect_modules() except Exception as e: logger.debug(e.message) else: return s
def collect_snapshot(self)
Collects snapshot related information to this process and environment
2.757502
2.70367
1.019911
try: res = {} m = sys.modules for k in m: # Don't report submodules (e.g. django.x, django.y, django.z) # Skip modules that begin with underscore if ('.' in k) or k[0] == '_': continue if m[k]: try: d = m[k].__dict__ if "version" in d and d["version"]: res[k] = self.jsonable(d["version"]) elif "__version__" in d and d["__version__"]: res[k] = self.jsonable(d["__version__"]) else: res[k] = get_distribution(k).version except DistributionNotFound: pass except Exception: logger.debug("collect_modules: could not process module: %s" % k) except Exception: logger.debug("collect_modules", exc_info=True) else: return res
def collect_modules(self)
Collect up the list of modules in use
3.340011
3.262331
1.023811
try: g = None u = resource.getrusage(resource.RUSAGE_SELF) if gc_.isenabled(): c = list(gc_.get_count()) th = list(gc_.get_threshold()) g = GC(collect0=c[0] if not self.last_collect else c[0] - self.last_collect[0], collect1=c[1] if not self.last_collect else c[ 1] - self.last_collect[1], collect2=c[2] if not self.last_collect else c[ 2] - self.last_collect[2], threshold0=th[0], threshold1=th[1], threshold2=th[2]) thr = threading.enumerate() daemon_threads = [tr.daemon is True for tr in thr].count(True) alive_threads = [tr.daemon is False for tr in thr].count(True) dummy_threads = [type(tr) is threading._DummyThread for tr in thr].count(True) m = Metrics(ru_utime=u[0] if not self.last_usage else u[0] - self.last_usage[0], ru_stime=u[1] if not self.last_usage else u[1] - self.last_usage[1], ru_maxrss=u[2], ru_ixrss=u[3], ru_idrss=u[4], ru_isrss=u[5], ru_minflt=u[6] if not self.last_usage else u[6] - self.last_usage[6], ru_majflt=u[7] if not self.last_usage else u[7] - self.last_usage[7], ru_nswap=u[8] if not self.last_usage else u[8] - self.last_usage[8], ru_inblock=u[9] if not self.last_usage else u[9] - self.last_usage[9], ru_oublock=u[10] if not self.last_usage else u[10] - self.last_usage[10], ru_msgsnd=u[11] if not self.last_usage else u[11] - self.last_usage[11], ru_msgrcv=u[12] if not self.last_usage else u[12] - self.last_usage[12], ru_nsignals=u[13] if not self.last_usage else u[13] - self.last_usage[13], ru_nvcs=u[14] if not self.last_usage else u[14] - self.last_usage[14], ru_nivcsw=u[15] if not self.last_usage else u[15] - self.last_usage[15], alive_threads=alive_threads, dummy_threads=dummy_threads, daemon_threads=daemon_threads, gc=g) self.last_usage = u if gc_.isenabled(): self.last_collect = c return m except: logger.debug("collect_metrics", exc_info=True)
def collect_metrics(self)
Collect up and return various metrics
1.698228
1.680798
1.01037
logger.debug("Spawning metric & trace reporting threads") self.sensor.meter.run() instana.singletons.tracer.recorder.run()
def start(self, e)
Starts the agent and required threads
39.417225
35.812862
1.100644
self.reset() self.sensor.handle_fork() instana.singletons.tracer.handle_fork()
def handle_fork(self)
Forks happen. Here we handle them.
21.714096
19.621658
1.106639
try: rv = False url = "http://%s:%s/" % (host, port) response = self.client.get(url, timeout=0.8) server_header = response.headers["Server"] if server_header == AGENT_HEADER: logger.debug("Host agent found on %s:%d" % (host, port)) rv = True else: logger.debug("...something is listening on %s:%d but it's not the Instana Agent: %s" % (host, port, server_header)) except (requests.ConnectTimeout, requests.ConnectionError): logger.debug("No host agent listening on %s:%d" % (host, port)) rv = False finally: return rv
def is_agent_listening(self, host, port)
Check if the Instana Agent is listening on <host> and <port>.
3.288184
3.020079
1.088774
try: url = self.__discovery_url() logger.debug("making announce request to %s" % (url)) response = None response = self.client.put(url, data=self.to_json(discovery), headers={"Content-Type": "application/json"}, timeout=0.8) if response.status_code is 200: self.last_seen = datetime.now() except (requests.ConnectTimeout, requests.ConnectionError): logger.debug("announce", exc_info=True) finally: return response
def announce(self, discovery)
With the passed in Discovery class, attempt to announce to the host agent.
3.77399
3.717904
1.015085
try: response = self.client.head(self.__data_url(), timeout=0.8) if response.status_code is 200: return True return False except (requests.ConnectTimeout, requests.ConnectionError): logger.debug("is_agent_ready: host agent connection error")
def is_agent_ready(self)
Used after making a successful announce to test when the agent is ready to accept data.
5.407058
4.923094
1.098305
try: response = None response = self.client.post(self.__data_url(), data=self.to_json(entity_data), headers={"Content-Type": "application/json"}, timeout=0.8) # logger.warn("report_data: response.status_code is %s" % response.status_code) if response.status_code is 200: self.last_seen = datetime.now() except (requests.ConnectTimeout, requests.ConnectionError): logger.debug("report_data: host agent connection error") finally: return response
def report_data(self, entity_data)
Used to report entity data (metrics & snapshot) to the host agent.
4.00562
3.494295
1.146331
try: response = None response = self.client.post(self.__traces_url(), data=self.to_json(spans), headers={"Content-Type": "application/json"}, timeout=0.8) # logger.warn("report_traces: response.status_code is %s" % response.status_code) if response.status_code is 200: self.last_seen = datetime.now() except (requests.ConnectTimeout, requests.ConnectionError): logger.debug("report_traces: host agent connection error") finally: return response
def report_traces(self, spans)
Used to report entity data (metrics & snapshot) to the host agent.
3.953918
3.530027
1.120081
try: response = None payload = json.dumps(data) logger.debug("Task response is %s: %s" % (self.__response_url(message_id), payload)) response = self.client.post(self.__response_url(message_id), data=payload, headers={"Content-Type": "application/json"}, timeout=0.8) except (requests.ConnectTimeout, requests.ConnectionError): logger.debug("task_response", exc_info=True) except Exception: logger.debug("task_response Exception", exc_info=True) finally: return response
def task_response(self, message_id, data)
When the host agent passes us a task and we do it, this function is used to respond with the results of the task.
2.862347
2.940212
0.973517
port = self.sensor.options.agent_port if port == 0: port = AGENT_DEFAULT_PORT return "http://%s:%s/%s" % (self.host, port, AGENT_DISCOVERY_PATH)
def __discovery_url(self)
URL for announcing to the host agent
4.453141
3.86194
1.153084
path = AGENT_DATA_PATH % self.from_.pid return "http://%s:%s/%s" % (self.host, self.port, path)
def __data_url(self)
URL for posting metrics to the host agent. Only valid when announced.
7.677797
6.068283
1.265234
path = AGENT_TRACES_PATH % self.from_.pid return "http://%s:%s/%s" % (self.host, self.port, path)
def __traces_url(self)
URL for posting traces to the host agent. Only valid when announced.
6.608247
5.946089
1.11136
if self.from_.pid != 0: path = AGENT_RESPONSE_PATH % (self.from_.pid, message_id) return "http://%s:%s/%s" % (self.host, self.port, path)
def __response_url(self, message_id)
URL for responding to agent requests.
5.504188
4.701364
1.170764
global _current_pid pid = os.getpid() if _current_pid != pid: _current_pid = pid _rnd.seed(int(1000000 * time.time()) ^ pid) id = format(_rnd.randint(0, 18446744073709551615), '02x') if len(id) < 16: id = id.zfill(16) return id
def generate_id()
Generate a 64bit base 16 ID for use as a Span or Trace ID
2.873039
2.610588
1.100533
if not isinstance(header, string_types): return BAD_ID try: # Test that header is truly a hexadecimal value before we try to convert int(header, 16) length = len(header) if length < 16: # Left pad ID with zeros header = header.zfill(16) elif length > 16: # Phase 0: Discard everything but the last 16byte header = header[-16:] return header except ValueError: return BAD_ID
def header_to_id(header)
We can receive headers in the following formats: 1. unsigned base 16 hex string of variable length 2. [eventual] :param header: the header to analyze, validate and convert (if needed) :return: a valid ID to be used internal to the tracer
5.255786
5.22285
1.006306
try: return json.dumps(obj, default=lambda obj: {k.lower(): v for k, v in obj.__dict__.items()}, sort_keys=False, separators=(',', ':')).encode() except Exception as e: logger.info("to_json: ", e, obj)
def to_json(obj)
Convert obj to json. Used mostly to convert the classes in json_span.py until we switch to nested dicts (or something better) :param obj: the object to serialize to json :return: json string
3.371022
3.423267
0.984738
version = "" try: version = pkg_resources.get_distribution('instana').version except pkg_resources.DistributionNotFound: version = 'unknown' finally: return version
def package_version()
Determine the version of this package. :return: String representing known version
3.23316
4.014108
0.805449
path = None try: if qp is None: return '' if type(kwlist) is not list: logger.debug("strip_secrets: bad keyword list") return qp # If there are no key=values, then just return if not '=' in qp: return qp if '?' in qp: path, query = qp.split('?') else: query = qp params = parse.parse_qsl(query, keep_blank_values=True) redacted = ['<redacted>'] if matcher == 'equals-ignore-case': for keyword in kwlist: for index, kv in enumerate(params): if kv[0].lower() == keyword.lower(): params[index] = (kv[0], redacted) elif matcher == 'equals': for keyword in kwlist: for index, kv in enumerate(params): if kv[0] == keyword: params[index] = (kv[0], redacted) elif matcher == 'contains-ignore-case': for keyword in kwlist: for index, kv in enumerate(params): if keyword.lower() in kv[0].lower(): params[index] = (kv[0], redacted) elif matcher == 'contains': for keyword in kwlist: for index, kv in enumerate(params): if keyword in kv[0]: params[index] = (kv[0], redacted) elif matcher == 'regex': for regexp in kwlist: for index, kv in enumerate(params): if re.match(regexp, kv[0]): params[index] = (kv[0], redacted) else: logger.debug("strip_secrets: unknown matcher") return qp if sys.version_info < (3, 0): result = urllib.urlencode(params, doseq=True) else: result = parse.urlencode(params, doseq=True) query = parse.unquote(result) if path: query = path + '?' + query return query except: logger.debug("strip_secrets", exc_info=True)
def strip_secrets(qp, matcher, kwlist)
This function will scrub the secrets from a query param string based on the passed in matcher and kwlist. blah=1&secret=password&valid=true will result in blah=1&secret=<redacted>&valid=true You can even pass in path query combinations: /signup?blah=1&secret=password&valid=true will result in /signup?blah=1&secret=<redacted>&valid=true :param qp: a string representing the query params in URL form (unencoded) :param matcher: the matcher to use :param kwlist: the list of keywords to match :return: a scrubbed query param string
1.904169
1.827124
1.042167
try: # The first line is the header line # We look for the line where the Destination is 00000000 - that is the default route # The Gateway IP is encoded backwards in hex. with open("/proc/self/net/route") as routes: for line in routes: parts = line.split('\t') if '00000000' == parts[1]: hip = parts[2] if hip is not None and len(hip) is 8: # Reverse order, convert hex to int return "%i.%i.%i.%i" % (int(hip[6:8], 16), int(hip[4:6], 16), int(hip[2:4], 16), int(hip[0:2], 16)) except: logger.warn("get_default_gateway: ", exc_info=True)
def get_default_gateway()
Attempts to read /proc/self/net/route to determine the default gateway in use. :return: String - the ip address of the default gateway or None if not found/possible/non-existant
3.913753
3.583852
1.092052
try: response = None pysource = "" if regexp_py.search(file) is None: response = {"error": "Only Python source files are allowed. (*.py)"} else: with open(file, 'r') as pyfile: pysource = pyfile.read() response = {"data": pysource} except Exception as e: response = {"error": str(e)} finally: return response
def get_py_source(file)
Retrieves and returns the source code for any Python files requested by the UI via the host agent @param file [String] The fully qualified path to a file
3.655565
3.784013
0.966055
next_time = time.time() + delay while True: time.sleep(max(0, next_time - time.time())) try: if task() is False: break except Exception: logger.debug("Problem while executing repetitive task: %s" % name, exc_info=True) # skip tasks if we are behind schedule: next_time += (time.time() - next_time) // delay * delay + delay
def every(delay, task, name)
Executes a task every `delay` seconds :param delay: the delay in seconds :param task: the method to run. The method should return False if you want the loop to stop. :return: None
3.778513
4.068224
0.928787
app = iWSGIMiddleware(app, *args, **kw) return app
def make_middleware(app=None, *args, **kw)
Given an app, return that app wrapped in iWSGIMiddleware
6.124726
3.24954
1.884798
try: eum_file = open(os.path.dirname(__file__) + '/eum.js') eum_src = Template(eum_file.read()) # Prepare the standard required IDs ids = {} ids['meta_kvs'] = '' parent_span = tracer.active_span if trace_id or parent_span: ids['trace_id'] = trace_id or parent_span.trace_id else: # No trace_id passed in and tracer doesn't show an active span so # return nothing, nada & zip. return '' if eum_api_key: ids['eum_api_key'] = eum_api_key else: ids['eum_api_key'] = global_eum_api_key # Process passed in EUM 'meta' key/values for key, value in meta.items(): ids['meta_kvs'] += ("'ineum('meta', '%s', '%s');'" % (key, value)) return eum_src.substitute(ids) except Exception as e: logger.debug(e) return ''
def eum_snippet(trace_id=None, eum_api_key=None, meta={})
Return an EUM snippet for use in views, templates and layouts that reports client side metrics to Instana that will automagically be linked to the current trace. @param trace_id [optional] the trace ID to insert into the EUM string @param eum_api_key [optional] the EUM API key from your Instana dashboard @param meta [optional] optional additional KVs you want reported with the EUM metrics @return string
4.632429
4.414193
1.04944
"Taken from BasicTracer so we can override generate_id calls to ours" start_time = time.time() if start_time is None else start_time # See if we have a parent_ctx in `references` parent_ctx = None if child_of is not None: parent_ctx = ( child_of if isinstance(child_of, ot.SpanContext) else child_of.context) elif references is not None and len(references) > 0: # TODO only the first reference is currently used parent_ctx = references[0].referenced_context # retrieve the active SpanContext if not ignore_active_span and parent_ctx is None: scope = self.scope_manager.active if scope is not None: parent_ctx = scope.span.context # Assemble the child ctx gid = generate_id() ctx = SpanContext(span_id=gid) if parent_ctx is not None: if parent_ctx._baggage is not None: ctx._baggage = parent_ctx._baggage.copy() ctx.trace_id = parent_ctx.trace_id ctx.sampled = parent_ctx.sampled else: ctx.trace_id = gid ctx.sampled = self.sampler.sampled(ctx.trace_id) # Tie it all together span = InstanaSpan(self, operation_name=operation_name, context=ctx, parent_id=(None if parent_ctx is None else parent_ctx.span_id), tags=tags, start_time=start_time) if operation_name in self.recorder.exit_spans: self.__add_stack(span) elif operation_name in self.recorder.entry_spans: # For entry spans, add only a backtrace fingerprint self.__add_stack(span, limit=2) return span
def start_span(self, operation_name=None, child_of=None, references=None, tags=None, start_time=None, ignore_active_span=False)
Taken from BasicTracer so we can override generate_id calls to ours
3.398655
2.954326
1.150399
span.stack = [] frame_count = 0 tb = traceback.extract_stack() tb.reverse() for frame in tb: if limit is not None and frame_count >= limit: break # Exclude Instana frames unless we're in dev mode if "INSTANA_DEV" not in os.environ: if re_tracer_frame.search(frame[0]) is not None: continue if re_with_stan_frame.search(frame[2]) is not None: continue span.stack.append({ "c": frame[0], "n": frame[1], "m": frame[2] }) if limit is not None: frame_count += 1
def __add_stack(self, span, limit=None)
Adds a backtrace to this span
3.832941
3.592273
1.066996
if "INSTANA_DEV" in os.environ: print("==============================================================") print("Instana: Running flask hook") print("==============================================================") wrapt.wrap_function_wrapper('flask', 'Flask.__init__', wrapper)
def hook(module)
Hook method to install the Instana middleware into Flask
7.172227
5.352134
1.340069