_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q32600 | AliasManager.detect_alias_config_change | train | def detect_alias_config_change(self):
"""
Change if the alias configuration has changed since the last run.
Returns:
False if the alias configuration file has not been changed since the last run.
Otherwise, return True.
"""
# Do not load the entire command table if there is a parse error
if self.parse_error():
return False
alias_config_sha1 = hashlib.sha1(self.alias_config_str.encode('utf-8')).hexdigest()
if alias_config_sha1 != self.alias_config_hash:
# Overwrite the old hash with the new one
self.alias_config_hash = alias_config_sha1
return True
return False | python | {
"resource": ""
} |
q32601 | AliasManager.transform | train | def transform(self, args):
"""
Transform any aliases in args to their respective commands.
Args:
args: A list of space-delimited command input extracted directly from the console.
Returns:
A list of transformed commands according to the alias configuration file.
"""
if self.parse_error():
# Write an empty hash so next run will check the config file against the entire command table again
AliasManager.write_alias_config_hash(empty_hash=True)
return args
# Only load the entire command table if it detects changes in the alias config
if self.detect_alias_config_change():
self.load_full_command_table()
self.collided_alias = AliasManager.build_collision_table(self.alias_table.sections())
build_tab_completion_table(self.alias_table)
else:
self.load_collided_alias()
transformed_commands = []
alias_iter = enumerate(args, 1)
for alias_index, alias in alias_iter:
is_collided_alias = alias in self.collided_alias and alias_index in self.collided_alias[alias]
# Check if the current alias is a named argument
# index - 2 because alias_iter starts counting at index 1
is_named_arg = alias_index > 1 and args[alias_index - 2].startswith('-')
is_named_arg_flag = alias.startswith('-')
excluded_commands = is_alias_command(['remove', 'export'], transformed_commands)
if not alias or is_collided_alias or is_named_arg or is_named_arg_flag or excluded_commands:
transformed_commands.append(alias)
continue
full_alias = self.get_full_alias(alias)
if self.alias_table.has_option(full_alias, 'command'):
cmd_derived_from_alias = self.alias_table.get(full_alias, 'command')
telemetry.set_alias_hit(full_alias)
else:
transformed_commands.append(alias)
continue
pos_args_table = build_pos_args_table(full_alias, args, alias_index)
if pos_args_table:
logger.debug(POS_ARG_DEBUG_MSG, full_alias, cmd_derived_from_alias, pos_args_table)
transformed_commands += render_template(cmd_derived_from_alias, pos_args_table)
# Skip the next arg(s) because they have been already consumed as a positional argument above
for pos_arg in pos_args_table: # pylint: disable=unused-variable
next(alias_iter)
else:
logger.debug(DEBUG_MSG, full_alias, cmd_derived_from_alias)
transformed_commands += shlex.split(cmd_derived_from_alias)
return self.post_transform(transformed_commands) | python | {
"resource": ""
} |
q32602 | AliasManager.get_full_alias | train | def get_full_alias(self, query):
"""
Get the full alias given a search query.
Args:
query: The query this function performs searching on.
Returns:
The full alias (with the placeholders, if any).
"""
if query in self.alias_table.sections():
return query
return next((section for section in self.alias_table.sections() if section.split()[0] == query), '') | python | {
"resource": ""
} |
q32603 | AliasManager.load_full_command_table | train | def load_full_command_table(self):
"""
Perform a full load of the command table to get all the reserved command words.
"""
load_cmd_tbl_func = self.kwargs.get('load_cmd_tbl_func', lambda _: {})
cache_reserved_commands(load_cmd_tbl_func)
telemetry.set_full_command_table_loaded() | python | {
"resource": ""
} |
q32604 | AliasManager.post_transform | train | def post_transform(self, args):
"""
Inject environment variables, and write hash to alias hash file after transforming alias to commands.
Args:
args: A list of args to post-transform.
"""
# Ignore 'az' if it is the first command
args = args[1:] if args and args[0] == 'az' else args
post_transform_commands = []
for i, arg in enumerate(args):
# Do not translate environment variables for command argument
if is_alias_command(['create'], args) and i > 0 and args[i - 1] in ['-c', '--command']:
post_transform_commands.append(arg)
else:
post_transform_commands.append(os.path.expandvars(arg))
AliasManager.write_alias_config_hash(self.alias_config_hash)
AliasManager.write_collided_alias(self.collided_alias)
return post_transform_commands | python | {
"resource": ""
} |
q32605 | AliasManager.build_collision_table | train | def build_collision_table(aliases, levels=COLLISION_CHECK_LEVEL_DEPTH):
"""
Build the collision table according to the alias configuration file against the entire command table.
self.collided_alias is structured as:
{
'collided_alias': [the command level at which collision happens]
}
For example:
{
'account': [1, 2]
}
This means that 'account' is a reserved command in level 1 and level 2 of the command tree because
(az account ...) and (az storage account ...)
lvl 1 lvl 2
Args:
levels: the amount of levels we tranverse through the command table tree.
"""
collided_alias = defaultdict(list)
for alias in aliases:
# Only care about the first word in the alias because alias
# cannot have spaces (unless they have positional arguments)
word = alias.split()[0]
for level in range(1, levels + 1):
collision_regex = r'^{}{}($|\s)'.format(r'([a-z\-]*\s)' * (level - 1), word.lower())
if list(filter(re.compile(collision_regex).match, azext_alias.cached_reserved_commands)) \
and level not in collided_alias[word]:
collided_alias[word].append(level)
telemetry.set_collided_aliases(list(collided_alias.keys()))
return collided_alias | python | {
"resource": ""
} |
q32606 | AliasManager.write_alias_config_hash | train | def write_alias_config_hash(alias_config_hash='', empty_hash=False):
"""
Write self.alias_config_hash to the alias hash file.
Args:
empty_hash: True if we want to write an empty string into the file. Empty string in the alias hash file
means that we have to perform a full load of the command table in the next run.
"""
with open(GLOBAL_ALIAS_HASH_PATH, 'w') as alias_config_hash_file:
alias_config_hash_file.write('' if empty_hash else alias_config_hash) | python | {
"resource": ""
} |
q32607 | AliasManager.write_collided_alias | train | def write_collided_alias(collided_alias_dict):
"""
Write the collided aliases string into the collided alias file.
"""
# w+ creates the alias config file if it does not exist
open_mode = 'r+' if os.path.exists(GLOBAL_COLLIDED_ALIAS_PATH) else 'w+'
with open(GLOBAL_COLLIDED_ALIAS_PATH, open_mode) as collided_alias_file:
collided_alias_file.truncate()
collided_alias_file.write(json.dumps(collided_alias_dict)) | python | {
"resource": ""
} |
q32608 | AliasManager.process_exception_message | train | def process_exception_message(exception):
"""
Process an exception message.
Args:
exception: The exception to process.
Returns:
A filtered string summarizing the exception.
"""
exception_message = str(exception)
for replace_char in ['\t', '\n', '\\n']:
exception_message = exception_message.replace(replace_char, '' if replace_char != '\t' else ' ')
return exception_message.replace('section', 'alias') | python | {
"resource": ""
} |
q32609 | get_network_resource_property_entry | train | def get_network_resource_property_entry(resource, prop):
""" Factory method for creating get functions. """
def get_func(cmd, resource_group_name, resource_name, item_name):
client = getattr(network_client_factory(cmd.cli_ctx), resource)
items = getattr(client.get(resource_group_name, resource_name), prop)
result = next((x for x in items if x.name.lower() == item_name.lower()), None)
if not result:
raise CLIError("Item '{}' does not exist on {} '{}'".format(
item_name, resource, resource_name))
else:
return result
func_name = 'get_network_resource_property_entry_{}_{}'.format(resource, prop)
setattr(sys.modules[__name__], func_name, get_func)
return func_name | python | {
"resource": ""
} |
q32610 | transform_file_directory_result | train | def transform_file_directory_result(cli_ctx):
"""
Transform a the result returned from file and directory listing API.
This transformer add and remove properties from File and Directory objects in the given list
in order to align the object's properties so as to offer a better view to the file and dir
list.
"""
def transformer(result):
t_file, t_dir = get_sdk(cli_ctx, CUSTOM_DATA_STORAGE, 'File', 'Directory', mod='file.models')
return_list = []
for each in result:
if isinstance(each, t_file):
delattr(each, 'content')
setattr(each, 'type', 'file')
elif isinstance(each, t_dir):
setattr(each, 'type', 'dir')
return_list.append(each)
return return_list
return transformer | python | {
"resource": ""
} |
q32611 | SubscriptionFactoryOperations.create_subscription_in_enrollment_account | train | def create_subscription_in_enrollment_account(
self, enrollment_account_name, body, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates an Azure subscription.
:param enrollment_account_name: The name of the enrollment account to
which the subscription will be billed.
:type enrollment_account_name: str
:param body: The subscription creation parameters.
:type body:
~azure.mgmt.subscription.models.SubscriptionCreationParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
SubscriptionCreationResult or
ClientRawResponse<SubscriptionCreationResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.subscription.models.SubscriptionCreationResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.subscription.models.SubscriptionCreationResult]]
:raises:
:class:`ErrorResponseException<azure.mgmt.subscription.models.ErrorResponseException>`
"""
raw_result = self._create_subscription_in_enrollment_account_initial(
enrollment_account_name=enrollment_account_name,
body=body,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
header_dict = {
'Location': 'str',
'Retry-After': 'str',
}
deserialized = self._deserialize('SubscriptionCreationResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | python | {
"resource": ""
} |
q32612 | transform_gateway | train | def transform_gateway(result):
"""Transform a gateway list to table output. """
return OrderedDict([('Name', result.get('name')),
('ResourceGroup', result.get('resourceGroup')),
('Location', result.get('location')),
('ProvisioningState', result.get('provisioningState')),
('Status', result.get('status')),
('PublicIP', result.get('ipAddress'))]) | python | {
"resource": ""
} |
q32613 | collect_blobs | train | def collect_blobs(blob_service, container, pattern=None):
"""
List the blobs in the given blob container, filter the blob by comparing their path to the given pattern.
"""
if not blob_service:
raise ValueError('missing parameter blob_service')
if not container:
raise ValueError('missing parameter container')
if not _pattern_has_wildcards(pattern):
return [pattern] if blob_service.exists(container, pattern) else []
results = []
for blob in blob_service.list_blobs(container):
try:
blob_name = blob.name.encode(
'utf-8') if isinstance(blob.name, unicode) else blob.name
except NameError:
blob_name = blob.name
if not pattern or _match_path(blob_name, pattern):
results.append(blob_name)
return results | python | {
"resource": ""
} |
q32614 | glob_files_locally | train | def glob_files_locally(folder_path, pattern):
"""glob files in local folder based on the given pattern"""
pattern = os.path.join(
folder_path, pattern.lstrip('/')) if pattern else None
len_folder_path = len(folder_path) + 1
for root, _, files in os.walk(folder_path):
for f in files:
full_path = os.path.join(root, f)
if not pattern or _match_path(full_path, pattern):
yield (full_path, full_path[len_folder_path:]) | python | {
"resource": ""
} |
q32615 | glob_files_remotely | train | def glob_files_remotely(cmd, client, share_name, pattern):
"""glob the files in remote file share based on the given pattern"""
from collections import deque
t_dir, t_file = cmd.get_models('file.models#Directory', 'file.models#File')
queue = deque([""])
while queue:
current_dir = queue.pop()
for f in client.list_directories_and_files(share_name, current_dir):
if isinstance(f, t_file):
if not pattern or _match_path(os.path.join(current_dir, f.name), pattern):
yield current_dir, f.name
elif isinstance(f, t_dir):
queue.appendleft(os.path.join(current_dir, f.name)) | python | {
"resource": ""
} |
q32616 | StorageCommandGroup._register_data_plane_account_arguments | train | def _register_data_plane_account_arguments(self, command_name):
""" Add parameters required to create a storage client """
from azure.cli.core.commands.parameters import get_resource_name_completion_list
from ._validators import validate_client_parameters
command = self.command_loader.command_table.get(command_name, None)
if not command:
return
group_name = 'Storage Account'
command.add_argument('account_name', '--account-name', required=False, default=None,
arg_group=group_name,
completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'),
help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be '
'used in conjunction with either storage account key or a SAS token. If neither are '
'present, the command will try to query the storage account key using the '
'authenticated Azure account. If a large number of storage commands are executed the '
'API quota may be hit')
command.add_argument('account_key', '--account-key', required=False, default=None,
arg_group=group_name,
help='Storage account key. Must be used in conjunction with storage account name. '
'Environment variable: AZURE_STORAGE_KEY')
command.add_argument('connection_string', '--connection-string', required=False, default=None,
validator=validate_client_parameters, arg_group=group_name,
help='Storage account connection string. Environment variable: '
'AZURE_STORAGE_CONNECTION_STRING')
command.add_argument('sas_token', '--sas-token', required=False, default=None,
arg_group=group_name,
help='A Shared Access Signature (SAS). Must be used in conjunction with storage account '
'name. Environment variable: AZURE_STORAGE_SAS_TOKEN') | python | {
"resource": ""
} |
q32617 | get_k8s_upgrades_completion_list | train | def get_k8s_upgrades_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
"""Return Kubernetes versions available for upgrading an existing cluster."""
resource_group = getattr(namespace, 'resource_group_name', None)
name = getattr(namespace, 'name', None)
return get_k8s_upgrades(cmd.cli_ctx, resource_group, name) if resource_group and name else None | python | {
"resource": ""
} |
q32618 | get_k8s_versions_completion_list | train | def get_k8s_versions_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
"""Return Kubernetes versions available for provisioning a new cluster."""
location = _get_location(cmd.cli_ctx, namespace)
return get_k8s_versions(cmd.cli_ctx, location) if location else None | python | {
"resource": ""
} |
q32619 | get_k8s_versions | train | def get_k8s_versions(cli_ctx, location):
"""Return a list of Kubernetes versions available for a new cluster."""
from ._client_factory import cf_container_services
from jmespath import search # pylint: disable=import-error
results = cf_container_services(cli_ctx).list_orchestrators(location, resource_type='managedClusters').as_dict()
# Flatten all the "orchestrator_version" fields into one array
return search('orchestrators[*].orchestrator_version', results) | python | {
"resource": ""
} |
q32620 | get_vm_size_completion_list | train | def get_vm_size_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
"""Return the intersection of the VM sizes allowed by the ACS SDK with those returned by the Compute Service."""
location = _get_location(cmd.cli_ctx, namespace)
result = get_vm_sizes(cmd.cli_ctx, location)
return set(r.name for r in result) & set(c.value for c in ContainerServiceVMSizeTypes) | python | {
"resource": ""
} |
q32621 | normalize_placeholders | train | def normalize_placeholders(arg, inject_quotes=False):
"""
Normalize placeholders' names so that the template can be ingested into Jinja template engine.
- Jinja does not accept numbers as placeholder names, so add a "_"
before the numbers to make them valid placeholder names.
- Surround placeholders expressions with "" so we can preserve spaces inside the positional arguments.
Args:
arg: The string to process.
inject_qoutes: True if we want to surround placeholders with a pair of quotes.
Returns:
A processed string where placeholders are surrounded by "" and
numbered placeholders are prepended with "_".
"""
number_placeholders = re.findall(r'{{\s*\d+\s*}}', arg)
for number_placeholder in number_placeholders:
number = re.search(r'\d+', number_placeholder).group()
arg = arg.replace(number_placeholder, '{{_' + number + '}}')
return arg.replace('{{', '"{{').replace('}}', '}}"') if inject_quotes else arg | python | {
"resource": ""
} |
q32622 | build_pos_args_table | train | def build_pos_args_table(full_alias, args, start_index):
"""
Build a dictionary where the key is placeholder name and the value is the position argument value.
Args:
full_alias: The full alias (including any placeholders).
args: The arguments that the user inputs in the terminal.
start_index: The index at which we start ingesting position arguments.
Returns:
A dictionary with the key beign the name of the placeholder and its value
being the respective positional argument.
"""
pos_args_placeholder = get_placeholders(full_alias, check_duplicates=True)
pos_args = args[start_index: start_index + len(pos_args_placeholder)]
if len(pos_args_placeholder) != len(pos_args):
error_msg = INSUFFICIENT_POS_ARG_ERROR.format(full_alias,
len(pos_args_placeholder),
'' if len(pos_args_placeholder) == 1 else 's',
len(pos_args))
raise CLIError(error_msg)
# Escape '"' because we are using "" to surround placeholder expressions
for i, pos_arg in enumerate(pos_args):
pos_args[i] = pos_arg.replace('"', '\\"')
return dict(zip(pos_args_placeholder, pos_args)) | python | {
"resource": ""
} |
q32623 | render_template | train | def render_template(cmd_derived_from_alias, pos_args_table):
"""
Render cmd_derived_from_alias as a Jinja template with pos_args_table as the arguments.
Args:
cmd_derived_from_alias: The string to be injected with positional arguemnts.
pos_args_table: The dictionary used to rendered.
Returns:
A processed string with positional arguments injected.
"""
try:
cmd_derived_from_alias = normalize_placeholders(cmd_derived_from_alias, inject_quotes=True)
template = jinja.Template(cmd_derived_from_alias)
# Shlex.split allows us to split a string by spaces while preserving quoted substrings
# (positional arguments in this case)
rendered = shlex.split(template.render(pos_args_table))
# Manually check if there is any runtime error (such as index out of range)
# since Jinja template engine only checks for compile time error.
# Only check for runtime errors if there is an empty string in rendered.
if '' in rendered:
check_runtime_errors(cmd_derived_from_alias, pos_args_table)
return rendered
except Exception as exception:
# Exception raised from runtime error
if isinstance(exception, CLIError):
raise
# The template has some sort of compile time errors
split_exception_message = str(exception).split()
# Check if the error message provides the index of the erroneous character
error_index = split_exception_message[-1]
if error_index.isdigit():
split_exception_message.insert(-1, 'index')
error_msg = RENDER_TEMPLATE_ERROR.format(' '.join(split_exception_message), cmd_derived_from_alias)
# Calculate where to put an arrow (^) char so that it is exactly below the erroneous character
# e.g. ... "{{a.split('|)}}"
# ^
error_msg += '\n{}^'.format(' ' * (len(error_msg) - len(cmd_derived_from_alias) + int(error_index) - 1))
else:
exception_str = str(exception).replace('"{{', '}}').replace('}}"', '}}')
error_msg = RENDER_TEMPLATE_ERROR.format(cmd_derived_from_alias, exception_str)
raise CLIError(error_msg) | python | {
"resource": ""
} |
q32624 | float_to_decimal | train | def float_to_decimal(f):
"""
Convert a floating point number to a Decimal with
no loss of information. Intended for Python 2.6 where
casting float to Decimal does not work.
"""
n, d = f.as_integer_ratio()
numerator, denominator = Decimal(n), Decimal(d)
ctx = Context(prec=60)
result = ctx.divide(numerator, denominator)
while ctx.flags[Inexact]:
ctx.flags[Inexact] = False
ctx.prec *= 2
result = ctx.divide(numerator, denominator)
return result | python | {
"resource": ""
} |
q32625 | rule_variable | train | def rule_variable(field_type, label=None, options=None):
""" Decorator to make a function into a rule variable
"""
options = options or []
def wrapper(func):
if not (type(field_type) == type and issubclass(field_type, BaseType)):
raise AssertionError("{0} is not instance of BaseType in"\
" rule_variable field_type".format(field_type))
func.field_type = field_type
func.is_rule_variable = True
func.label = label \
or fn_name_to_pretty_label(func.__name__)
func.options = options
return func
return wrapper | python | {
"resource": ""
} |
q32626 | type_operator | train | def type_operator(input_type, label=None,
assert_type_for_arguments=True):
""" Decorator to make a function into a type operator.
- assert_type_for_arguments - if True this patches the operator function
so that arguments passed to it will have _assert_valid_value_and_cast
called on them to make type errors explicit.
"""
def wrapper(func):
func.is_operator = True
func.label = label \
or fn_name_to_pretty_label(func.__name__)
func.input_type = input_type
@wraps(func)
def inner(self, *args, **kwargs):
if assert_type_for_arguments:
args = [self._assert_valid_value_and_cast(arg) for arg in args]
kwargs = dict((k, self._assert_valid_value_and_cast(v))
for k, v in kwargs.items())
return func(self, *args, **kwargs)
return inner
return wrapper | python | {
"resource": ""
} |
q32627 | rule_action | train | def rule_action(label=None, params=None):
""" Decorator to make a function into a rule action
"""
def wrapper(func):
params_ = params
if isinstance(params, dict):
params_ = [dict(label=fn_name_to_pretty_label(name),
name=name,
fieldType=field_type) \
for name, field_type in params.items()]
_validate_action_parameters(func, params_)
func.is_rule_action = True
func.label = label \
or fn_name_to_pretty_label(func.__name__)
func.params = params_
return func
return wrapper | python | {
"resource": ""
} |
q32628 | check_condition | train | def check_condition(condition, defined_variables):
""" Checks a single rule condition - the condition will be made up of
variables, values, and the comparison operator. The defined_variables
object must have a variable defined for any variables in this condition.
"""
name, op, value = condition['name'], condition['operator'], condition['value']
operator_type = _get_variable_value(defined_variables, name)
return _do_operator_comparison(operator_type, op, value) | python | {
"resource": ""
} |
q32629 | _do_operator_comparison | train | def _do_operator_comparison(operator_type, operator_name, comparison_value):
""" Finds the method on the given operator_type and compares it to the
given comparison_value.
operator_type should be an instance of operators.BaseType
comparison_value is whatever python type to compare to
returns a bool
"""
def fallback(*args, **kwargs):
raise AssertionError("Operator {0} does not exist for type {1}".format(
operator_name, operator_type.__class__.__name__))
method = getattr(operator_type, operator_name, fallback)
if getattr(method, 'input_type', '') == FIELD_NO_INPUT:
return method()
return method(comparison_value) | python | {
"resource": ""
} |
q32630 | CaptchaAnswerInput.build_attrs | train | def build_attrs(self, *args, **kwargs):
"""Disable automatic corrections and completions."""
attrs = super(CaptchaAnswerInput, self).build_attrs(*args, **kwargs)
attrs['autocapitalize'] = 'off'
attrs['autocomplete'] = 'off'
attrs['autocorrect'] = 'off'
attrs['spellcheck'] = 'false'
return attrs | python | {
"resource": ""
} |
q32631 | BaseCaptchaTextInput.fetch_captcha_store | train | def fetch_captcha_store(self, name, value, attrs=None, generator=None):
"""
Fetches a new CaptchaStore
This has to be called inside render
"""
try:
reverse('captcha-image', args=('dummy',))
except NoReverseMatch:
raise ImproperlyConfigured('Make sure you\'ve included captcha.urls as explained in the INSTALLATION section on http://readthedocs.org/docs/django-simple-captcha/en/latest/usage.html#installation')
if settings.CAPTCHA_GET_FROM_POOL:
key = CaptchaStore.pick()
else:
key = CaptchaStore.generate_key(generator)
# these can be used by format_output and render
self._value = [key, u('')]
self._key = key
self.id_ = self.build_attrs(attrs).get('id', None) | python | {
"resource": ""
} |
q32632 | CaptchaTextInput.get_context | train | def get_context(self, name, value, attrs):
"""Add captcha specific variables to context."""
context = super(CaptchaTextInput, self).get_context(name, value, attrs)
context['image'] = self.image_url()
context['audio'] = self.audio_url()
return context | python | {
"resource": ""
} |
q32633 | CaptchaTextInput._direct_render | train | def _direct_render(self, name, attrs):
"""Render the widget the old way - using field_template or output_format."""
context = {
'image': self.image_url(),
'name': name,
'key': self._key,
'id': u'%s_%s' % (self.id_prefix, attrs.get('id')) if self.id_prefix else attrs.get('id'),
'audio': self.audio_url(),
}
self.image_and_audio = render_to_string(settings.CAPTCHA_IMAGE_TEMPLATE, context)
self.hidden_field = render_to_string(settings.CAPTCHA_HIDDEN_FIELD_TEMPLATE, context)
self.text_field = render_to_string(settings.CAPTCHA_TEXT_FIELD_TEMPLATE, context)
return self.format_output(None) | python | {
"resource": ""
} |
q32634 | captcha_refresh | train | def captcha_refresh(request):
""" Return json with new captcha for ajax refresh request """
if not request.is_ajax():
raise Http404
new_key = CaptchaStore.pick()
to_json_response = {
'key': new_key,
'image_url': captcha_image_url(new_key),
'audio_url': captcha_audio_url(new_key) if settings.CAPTCHA_FLITE_PATH else None
}
return HttpResponse(json.dumps(to_json_response), content_type='application/json') | python | {
"resource": ""
} |
q32635 | ZohoWebClient._add_zoho_token | train | def _add_zoho_token(
self, uri, http_method="GET", body=None, headers=None, token_placement=None
):
"""Add a zoho token to the request uri, body or authorization header. follows bearer pattern"""
headers = self.prepare_zoho_headers(self.access_token, headers)
return uri, headers, body | python | {
"resource": ""
} |
q32636 | ZohoWebClient.prepare_zoho_headers | train | def prepare_zoho_headers(token, headers=None):
"""Add a `Zoho Token`_ to the request URI.
Recommended method of passing bearer tokens.
Authorization: Zoho-oauthtoken h480djs93hd8
.. _`Zoho-oauthtoken Token`: custom zoho token
"""
headers = headers or {}
headers["Authorization"] = "{token_header} {token}".format(
token_header=ZOHO_TOKEN_HEADER, token=token
)
return headers | python | {
"resource": ""
} |
q32637 | timestamp_from_datetime | train | def timestamp_from_datetime(dt):
"""
Given a datetime, in UTC, return a float that represents the timestamp for
that datetime.
http://stackoverflow.com/questions/8777753/converting-datetime-date-to-utc-timestamp-in-python#8778548
"""
dt = dt.replace(tzinfo=utc)
if hasattr(dt, "timestamp") and callable(dt.timestamp):
return dt.replace(tzinfo=utc).timestamp()
return (dt - datetime(1970, 1, 1, tzinfo=utc)).total_seconds() | python | {
"resource": ""
} |
q32638 | abs_timedelta | train | def abs_timedelta(delta):
"""Returns an "absolute" value for a timedelta, always representing a
time distance."""
if delta.days < 0:
now = _now()
return now - (now + delta)
return delta | python | {
"resource": ""
} |
q32639 | naturaltime | train | def naturaltime(value, future=False, months=True):
"""Given a datetime or a number of seconds, return a natural representation
of that time in a resolution that makes sense. This is more or less
compatible with Django's ``naturaltime`` filter. ``future`` is ignored for
datetimes, where the tense is always figured out based on the current time.
If an integer is passed, the return value will be past tense by default,
unless ``future`` is set to True."""
now = _now()
date, delta = date_and_delta(value)
if date is None:
return value
# determine tense by value only if datetime/timedelta were passed
if isinstance(value, (datetime, timedelta)):
future = date > now
ago = _('%s from now') if future else _('%s ago')
delta = naturaldelta(delta, months)
if delta == _("a moment"):
return _("now")
return ago % delta | python | {
"resource": ""
} |
q32640 | naturalday | train | def naturalday(value, format='%b %d'):
"""For date values that are tomorrow, today or yesterday compared to
present day returns representing string. Otherwise, returns a string
formatted according to ``format``."""
try:
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't date-ish
return value
except (OverflowError, ValueError):
# Date arguments out of range
return value
delta = value - date.today()
if delta.days == 0:
return _('today')
elif delta.days == 1:
return _('tomorrow')
elif delta.days == -1:
return _('yesterday')
return value.strftime(format) | python | {
"resource": ""
} |
q32641 | naturaldate | train | def naturaldate(value):
"""Like naturalday, but will append a year for dates that are a year
ago or more."""
try:
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't date-ish
return value
except (OverflowError, ValueError):
# Date arguments out of range
return value
delta = abs_timedelta(value - date.today())
if delta.days >= 365:
return naturalday(value, '%b %d %Y')
return naturalday(value) | python | {
"resource": ""
} |
q32642 | activate | train | def activate(locale, path=None):
"""Set 'locale' as current locale. Search for locale in directory 'path'
@param locale: language name, eg 'en_GB'"""
if path is None:
path = _DEFAULT_LOCALE_PATH
if locale not in _TRANSLATIONS:
translation = gettext_module.translation('humanize', path, [locale])
_TRANSLATIONS[locale] = translation
_CURRENT.locale = locale
return _TRANSLATIONS[locale] | python | {
"resource": ""
} |
q32643 | pgettext | train | def pgettext(msgctxt, message):
"""'Particular gettext' function.
It works with 'msgctxt' .po modifiers and allow duplicate keys with
different translations.
Python 2 don't have support for this GNU gettext function, so we
reimplement it. It works by joining msgctx and msgid by '4' byte."""
key = msgctxt + '\x04' + message
translation = get_translation().gettext(key)
return message if translation == key else translation | python | {
"resource": ""
} |
q32644 | intcomma | train | def intcomma(value):
"""Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'. To maintain
some compatability with Django's intcomma, this function also accepts
floats."""
try:
if isinstance(value, compat.string_types):
float(value.replace(',', ''))
else:
float(value)
except (TypeError, ValueError):
return value
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new) | python | {
"resource": ""
} |
q32645 | apnumber | train | def apnumber(value):
"""For numbers 1-9, returns the number spelled out. Otherwise, returns the
number. This follows Associated Press style. This always returns a string
unless the value was not int-able, unlike the Django filter."""
try:
value = int(value)
except (TypeError, ValueError):
return value
if not 0 < value < 10:
return str(value)
return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'),
_('seven'), _('eight'), _('nine'))[value - 1] | python | {
"resource": ""
} |
q32646 | keywords | train | def keywords(text):
"""get the top 10 keywords and their frequency scores
ignores blacklisted words in stopWords,
counts the number of occurrences of each word
"""
text = split_words(text)
numWords = len(text) # of words before removing blacklist words
freq = Counter(x for x in text if x not in stopWords)
minSize = min(10, len(freq)) # get first 10
keywords = {x: y for x, y in freq.most_common(minSize)} # recreate a dict
for k in keywords:
articleScore = keywords[k]*1.0 / numWords
keywords[k] = articleScore * 1.5 + 1
return keywords | python | {
"resource": ""
} |
q32647 | sentence_position | train | def sentence_position(i, size):
"""different sentence positions indicate different
probability of being an important sentence"""
normalized = i*1.0 / size
if 0 < normalized <= 0.1:
return 0.17
elif 0.1 < normalized <= 0.2:
return 0.23
elif 0.2 < normalized <= 0.3:
return 0.14
elif 0.3 < normalized <= 0.4:
return 0.08
elif 0.4 < normalized <= 0.5:
return 0.05
elif 0.5 < normalized <= 0.6:
return 0.04
elif 0.6 < normalized <= 0.7:
return 0.06
elif 0.7 < normalized <= 0.8:
return 0.04
elif 0.8 < normalized <= 0.9:
return 0.04
elif 0.9 < normalized <= 1.0:
return 0.15
else:
return 0 | python | {
"resource": ""
} |
q32648 | ContentExtractor.split_title | train | def split_title(self, title, splitter):
"""\
Split the title to best part possible
"""
large_text_length = 0
large_text_index = 0
title_pieces = splitter.split(title)
# find the largest title piece
for i in range(len(title_pieces)):
current = title_pieces[i]
if len(current) > large_text_length:
large_text_length = len(current)
large_text_index = i
# replace content
title = title_pieces[large_text_index]
return TITLE_REPLACEMENTS.replaceAll(title).strip() | python | {
"resource": ""
} |
q32649 | ContentExtractor.is_boostable | train | def is_boostable(self, node):
"""\
alot of times the first paragraph might be the caption under an image
so we'll want to make sure if we're going to boost a parent node that
it should be connected to other paragraphs,
at least for the first n paragraphs so we'll want to make sure that
the next sibling is a paragraph and has at
least some substatial weight to it
"""
para = "p"
steps_away = 0
minimum_stopword_count = 5
max_stepsaway_from_node = 3
nodes = self.walk_siblings(node)
for current_node in nodes:
# p
current_node_tag = self.parser.getTag(current_node)
if current_node_tag == para:
if steps_away >= max_stepsaway_from_node:
return False
paraText = self.parser.getText(current_node)
word_stats = self.stopwords_class(language=self.language).get_stopword_count(paraText)
if word_stats.get_stopword_count() > minimum_stopword_count:
return True
steps_away += 1
return False | python | {
"resource": ""
} |
q32650 | ContentExtractor.get_siblings_content | train | def get_siblings_content(self, current_sibling, baselinescore_siblings_para):
"""\
adds any siblings that may have a decent score to this node
"""
if current_sibling.tag == 'p' and len(self.parser.getText(current_sibling)) > 0:
e0 = current_sibling
if e0.tail:
e0 = deepcopy(e0)
e0.tail = ''
return [e0]
else:
potential_paragraphs = self.parser.getElementsByTag(current_sibling, tag='p')
if potential_paragraphs is None:
return None
else:
ps = []
for first_paragraph in potential_paragraphs:
text = self.parser.getText(first_paragraph)
if len(text) > 0:
word_stats = self.stopwords_class(language=self.language).get_stopword_count(text)
paragraph_score = word_stats.get_stopword_count()
sibling_baseline_score = float(.30)
high_link_density = self.is_highlink_density(first_paragraph)
score = float(baselinescore_siblings_para * sibling_baseline_score)
if score < paragraph_score and not high_link_density:
p = self.parser.createElement(tag='p', text=text, tail=None)
ps.append(p)
return ps | python | {
"resource": ""
} |
q32651 | AsyncioEventLoop.connection_made | train | def connection_made(self, transport):
"""Used to signal `asyncio.Protocol` of a successful connection."""
self._transport = transport
self._raw_transport = transport
if isinstance(transport, asyncio.SubprocessTransport):
self._transport = transport.get_pipe_transport(0) | python | {
"resource": ""
} |
q32652 | AsyncioEventLoop.data_received | train | def data_received(self, data):
"""Used to signal `asyncio.Protocol` of incoming data."""
if self._on_data:
self._on_data(data)
return
self._queued_data.append(data) | python | {
"resource": ""
} |
q32653 | AsyncioEventLoop.pipe_data_received | train | def pipe_data_received(self, fd, data):
"""Used to signal `asyncio.SubprocessProtocol` of incoming data."""
if fd == 2: # stderr fd number
self._on_stderr(data)
elif self._on_data:
self._on_data(data)
else:
self._queued_data.append(data) | python | {
"resource": ""
} |
q32654 | format_exc_skip | train | def format_exc_skip(skip, limit=None):
"""Like traceback.format_exc but allow skipping the first frames."""
etype, val, tb = sys.exc_info()
for i in range(skip):
tb = tb.tb_next
return (''.join(format_exception(etype, val, tb, limit))).rstrip() | python | {
"resource": ""
} |
q32655 | AsyncSession.request | train | def request(self, method, args, response_cb):
"""Send a msgpack-rpc request to Nvim.
A msgpack-rpc with method `method` and argument `args` is sent to
Nvim. The `response_cb` function is called with when the response
is available.
"""
request_id = self._next_request_id
self._next_request_id = request_id + 1
self._msgpack_stream.send([0, request_id, method, args])
self._pending_requests[request_id] = response_cb | python | {
"resource": ""
} |
q32656 | Response.send | train | def send(self, value, error=False):
"""Send the response.
If `error` is True, it will be sent as an error.
"""
if error:
resp = [1, self._request_id, value, None]
else:
resp = [1, self._request_id, None, value]
debug('sending response to request %d: %s', self._request_id, resp)
self._msgpack_stream.send(resp) | python | {
"resource": ""
} |
q32657 | start_host | train | def start_host(session=None):
"""Promote the current process into python plugin host for Nvim.
Start msgpack-rpc event loop for `session`, listening for Nvim requests
and notifications. It registers Nvim commands for loading/unloading
python plugins.
The sys.stdout and sys.stderr streams are redirected to Nvim through
`session`. That means print statements probably won't work as expected
while this function doesn't return.
This function is normally called at program startup and could have been
defined as a separate executable. It is exposed as a library function for
testing purposes only.
"""
plugins = []
for arg in sys.argv:
_, ext = os.path.splitext(arg)
if ext == '.py':
plugins.append(arg)
elif os.path.isdir(arg):
init = os.path.join(arg, '__init__.py')
if os.path.isfile(init):
plugins.append(arg)
# This is a special case to support the old workaround of
# adding an empty .py file to make a package directory
# visible, and it should be removed soon.
for path in list(plugins):
dup = path + ".py"
if os.path.isdir(path) and dup in plugins:
plugins.remove(dup)
# Special case: the legacy scripthost receives a single relative filename
# while the rplugin host will receive absolute paths.
if plugins == ["script_host.py"]:
name = "script"
else:
name = "rplugin"
setup_logging(name)
if not session:
session = stdio_session()
nvim = Nvim.from_session(session)
if nvim.version.api_level < 1:
sys.stderr.write("This version of pynvim "
"requires nvim 0.1.6 or later")
sys.exit(1)
host = Host(nvim)
host.start(plugins) | python | {
"resource": ""
} |
q32658 | attach | train | def attach(session_type, address=None, port=None,
path=None, argv=None, decode=None):
"""Provide a nicer interface to create python api sessions.
Previous machinery to create python api sessions is still there. This only
creates a facade function to make things easier for the most usual cases.
Thus, instead of:
from pynvim import socket_session, Nvim
session = tcp_session(address=<address>, port=<port>)
nvim = Nvim.from_session(session)
You can now do:
from pynvim import attach
nvim = attach('tcp', address=<address>, port=<port>)
And also:
nvim = attach('socket', path=<path>)
nvim = attach('child', argv=<argv>)
nvim = attach('stdio')
When the session is not needed anymore, it is recommended to explicitly
close it:
nvim.close()
It is also possible to use the session as a context mangager:
with attach('socket', path=thepath) as nvim:
print(nvim.funcs.getpid())
print(nvim.current.line)
This will automatically close the session when you're done with it, or
when an error occured.
"""
session = (tcp_session(address, port) if session_type == 'tcp' else
socket_session(path) if session_type == 'socket' else
stdio_session() if session_type == 'stdio' else
child_session(argv) if session_type == 'child' else
None)
if not session:
raise Exception('Unknown session type "%s"' % session_type)
if decode is None:
decode = IS_PYTHON3
return Nvim.from_session(session).with_decode(decode) | python | {
"resource": ""
} |
q32659 | setup_logging | train | def setup_logging(name):
"""Setup logging according to environment variables."""
logger = logging.getLogger(__name__)
if 'NVIM_PYTHON_LOG_FILE' in os.environ:
prefix = os.environ['NVIM_PYTHON_LOG_FILE'].strip()
major_version = sys.version_info[0]
logfile = '{}_py{}_{}'.format(prefix, major_version, name)
handler = logging.FileHandler(logfile, 'w', 'utf-8')
handler.formatter = logging.Formatter(
'%(asctime)s [%(levelname)s @ '
'%(filename)s:%(funcName)s:%(lineno)s] %(process)s - %(message)s')
logging.root.addHandler(handler)
level = logging.INFO
if 'NVIM_PYTHON_LOG_LEVEL' in os.environ:
lvl = getattr(logging,
os.environ['NVIM_PYTHON_LOG_LEVEL'].strip(),
level)
if isinstance(lvl, int):
level = lvl
logger.setLevel(level) | python | {
"resource": ""
} |
q32660 | main | train | def main(argv=sys.argv[1:]):
"""Parses the command line comments."""
usage = 'usage: %prog [options] FILE\n\n' + __doc__
parser = OptionParser(usage)
# options
parser.add_option("-f", "--force",
action='store_true', default=False,
help="make changes even if they cannot undone before saving the new file")
parser.add_option("-m", "--min_level",
default='NONE',
help="minimum level of logging statements to modify [default: no minimum]")
parser.add_option("-M", "--max_level",
default='NONE',
help="maximum level of logging statements to modify [default: no maximum]")
parser.add_option("-o", "--output-file",
default=None,
help="where to output the result [default: overwrite the input file]")
parser.add_option("-r", "--restore",
action='store_true', default=False,
help="restore logging statements previously commented out and replaced with pass statements")
parser.add_option("-v", "--verbose",
action='store_true', default=False,
help="print informational messages about changes made")
(options, args) = parser.parse_args(argv)
if len(args) != 1:
parser.error("expected 1 argument but got %d arguments: %s" % (len(args), ' '.join(args)))
input_fn = args[0]
if not options.output_file:
options.output_file = input_fn
# validate min/max level
LEVEL_CHOICES = LEVELS + ['NONE']
min_level_value = 0 if options.min_level == 'NONE' else get_level_value(options.min_level)
if options.min_level is None:
parser.error("min level must be an integer or one of these values: %s" % ', '.join(LEVEL_CHOICES))
max_level_value = sys.maxint if options.max_level == 'NONE' else get_level_value(options.max_level)
if options.max_level is None:
parser.error("max level must be an integer or one of these values: %s" % ', '.join(LEVEL_CHOICES))
if options.verbose:
logging.getLogger().setLevel(logging.INFO)
try:
return modify_logging(input_fn, options.output_file,
min_level_value, max_level_value,
options.restore, options.force)
except IOError as e:
logging.error(str(e))
return -1 | python | {
"resource": ""
} |
q32661 | comment_lines | train | def comment_lines(lines):
"""Comment out the given list of lines and return them. The hash mark will
be inserted before the first non-whitespace character on each line."""
ret = []
for line in lines:
ws_prefix, rest, ignore = RE_LINE_SPLITTER_COMMENT.match(line).groups()
ret.append(ws_prefix + '#' + rest)
return ''.join(ret) | python | {
"resource": ""
} |
q32662 | uncomment_lines | train | def uncomment_lines(lines):
"""Uncomment the given list of lines and return them. The first hash mark
following any amount of whitespace will be removed on each line."""
ret = []
for line in lines:
ws_prefix, rest, ignore = RE_LINE_SPLITTER_UNCOMMENT.match(line).groups()
ret.append(ws_prefix + rest)
return ''.join(ret) | python | {
"resource": ""
} |
q32663 | get_level_value | train | def get_level_value(level):
"""Returns the logging value associated with a particular level name. The
argument must be present in LEVELS_DICT or be an integer constant.
Otherwise None will be returned."""
try:
# integral constants also work: they are the level value
return int(level)
except ValueError:
try:
return LEVELS_DICT[level.upper()]
except KeyError:
logging.warning("level '%s' cannot be translated to a level value (not present in LEVELS_DICT)" % level)
return None | python | {
"resource": ""
} |
q32664 | get_logging_level | train | def get_logging_level(logging_stmt, commented_out=False):
"""Determines the level of logging in a given logging statement. The string
representing this level is returned. False is returned if the method is
not a logging statement and thus has no level. None is returned if a level
should have been found but wasn't."""
regexp = RE_LOGGING_START_IN_COMMENT if commented_out else RE_LOGGING_START
ret = regexp.match(logging_stmt)
_, method_name, _, first_arg = ret.groups()
if method_name not in LOGGING_METHODS_OF_INTEREST:
logging.debug('skipping uninteresting logging call: %s' % method_name)
return False
if method_name != 'log':
return method_name
# if the method name did not specify the level, we must have a first_arg to extract the level from
if not first_arg:
logging.warning("logging.log statement found but we couldn't extract the first argument")
return None
# extract the level of logging from the first argument to the log() call
level = first_arg_to_level_name(first_arg)
if level is None:
logging.warning("arg does not contain any known level '%s'\n" % first_arg)
return None
return level | python | {
"resource": ""
} |
q32665 | level_is_between | train | def level_is_between(level, min_level_value, max_level_value):
"""Returns True if level is between the specified min or max, inclusive."""
level_value = get_level_value(level)
if level_value is None:
# unknown level value
return False
return level_value >= min_level_value and level_value <= max_level_value | python | {
"resource": ""
} |
q32666 | split_call | train | def split_call(lines, open_paren_line=0):
"""Returns a 2-tuple where the first element is the list of lines from the
first open paren in lines to the matching closed paren. The second element
is all remaining lines in a list."""
num_open = 0
num_closed = 0
for i, line in enumerate(lines):
c = line.count('(')
num_open += c
if not c and i==open_paren_line:
raise Exception('Exception open parenthesis in line %d but there is not one there: %s' % (i, str(lines)))
num_closed += line.count(')')
if num_open == num_closed:
return (lines[:i+1], lines[i+1:])
print(''.join(lines))
raise Exception('parenthesis are mismatched (%d open, %d closed found)' % (num_open, num_closed)) | python | {
"resource": ""
} |
q32667 | modify_logging | train | def modify_logging(input_fn, output_fn, min_level_value, max_level_value, restore, force):
"""Modifies logging statements in the specified file."""
# read in all the lines
logging.info('reading in %s' % input_fn)
fh = open(input_fn, 'r')
lines = fh.readlines()
fh.close()
original_contents = ''.join(lines)
if restore:
forwards = restore_logging
backwards = disable_logging
else:
forwards = disable_logging
backwards = restore_logging
# apply the requested action
new_contents = forwards(lines, min_level_value, max_level_value)
# quietly check to see if we can undo what we just did (if not, the text
# contains something we cannot translate [bug or limitation with this code])
logging.disable(logging.CRITICAL)
new_contents_undone = backwards(new_contents.splitlines(True), min_level_value, max_level_value)
logging.disable(logging.DEBUG)
if original_contents != new_contents_undone:
base_str = 'We are unable to revert this action as expected'
if force:
logging.warning(base_str + " but -f was specified so we'll do it anyway.")
else:
logging.error(base_str + ', so we will not do it in the first place. Pass -f to override this and make the change anyway.')
return -1
logging.info('writing the new contents to %s' % output_fn)
fh = open(output_fn, 'w')
fh.write(new_contents)
fh.close()
logging.info('done!')
return 0 | python | {
"resource": ""
} |
q32668 | check_level | train | def check_level(logging_stmt, logging_stmt_is_commented_out, min_level_value, max_level_value):
"""Extracts the level of the logging statement and returns True if the
level falls betwen min and max_level_value. If the level cannot be
extracted, then a warning is logged."""
level = get_logging_level(logging_stmt, logging_stmt_is_commented_out)
if level is None:
logging.warning('skipping logging statement because the level could not be extracted: %s' % logging_stmt.strip())
return False
elif level is False:
return False
elif level_is_between(level, min_level_value, max_level_value):
return True
else:
logging.debug('keep this one as is (not in the specified level range): %s' % logging_stmt.strip())
return False | python | {
"resource": ""
} |
q32669 | disable_logging | train | def disable_logging(lines, min_level_value, max_level_value):
"""Disables logging statements in these lines whose logging level falls
between the specified minimum and maximum levels."""
output = ''
while lines:
line = lines[0]
ret = RE_LOGGING_START.match(line)
if not ret:
# no logging statement here, so just leave the line as-is and keep going
output += line
lines = lines[1:]
else:
# a logging call has started: find all the lines it includes and those it does not
logging_lines, remaining_lines = split_call(lines)
lines = remaining_lines
logging_stmt = ''.join(logging_lines)
# replace the logging statement if its level falls b/w min and max
if not check_level(logging_stmt, False, min_level_value, max_level_value):
output += logging_stmt
else:
# comment out this logging statement and replace it with pass
prefix_ws = ret.group(1)
pass_stmt = prefix_ws + PASS_LINE_CONTENTS
commented_out_logging_lines = comment_lines(logging_lines)
new_lines = pass_stmt + commented_out_logging_lines
logging.info('replacing:\n%s\nwith this:\n%s' % (logging_stmt.rstrip(), new_lines.rstrip()))
output += new_lines
return output | python | {
"resource": ""
} |
q32670 | check_async | train | def check_async(async_, kwargs, default):
"""Return a value of 'async' in kwargs or default when async_ is None.
This helper function exists for backward compatibility (See #274).
It shows a warning message when 'async' in kwargs is used to note users.
"""
if async_ is not None:
return async_
elif 'async' in kwargs:
warnings.warn(
'"async" attribute is deprecated. Use "async_" instead.',
DeprecationWarning,
)
return kwargs.pop('async')
else:
return default | python | {
"resource": ""
} |
q32671 | plugin | train | def plugin(cls):
"""Tag a class as a plugin.
This decorator is required to make the class methods discoverable by the
plugin_load method of the host.
"""
cls._nvim_plugin = True
# the _nvim_bind attribute is set to True by default, meaning that
# decorated functions have a bound Nvim instance as first argument.
# For methods in a plugin-decorated class this is not required, because
# the class initializer will already receive the nvim object.
predicate = lambda fn: hasattr(fn, '_nvim_bind')
for _, fn in inspect.getmembers(cls, predicate):
if IS_PYTHON3:
fn._nvim_bind = False
else:
fn.im_func._nvim_bind = False
return cls | python | {
"resource": ""
} |
q32672 | rpc_export | train | def rpc_export(rpc_method_name, sync=False):
"""Export a function or plugin method as a msgpack-rpc request handler."""
def dec(f):
f._nvim_rpc_method_name = rpc_method_name
f._nvim_rpc_sync = sync
f._nvim_bind = True
f._nvim_prefix_plugin_path = False
return f
return dec | python | {
"resource": ""
} |
q32673 | command | train | def command(name, nargs=0, complete=None, range=None, count=None, bang=False,
register=False, sync=False, allow_nested=False, eval=None):
"""Tag a function or plugin method as a Nvim command handler."""
def dec(f):
f._nvim_rpc_method_name = 'command:{}'.format(name)
f._nvim_rpc_sync = sync
f._nvim_bind = True
f._nvim_prefix_plugin_path = True
opts = {}
if range is not None:
opts['range'] = '' if range is True else str(range)
elif count is not None:
opts['count'] = count
if bang:
opts['bang'] = ''
if register:
opts['register'] = ''
if nargs:
opts['nargs'] = nargs
if complete:
opts['complete'] = complete
if eval:
opts['eval'] = eval
if not sync and allow_nested:
rpc_sync = "urgent"
else:
rpc_sync = sync
f._nvim_rpc_spec = {
'type': 'command',
'name': name,
'sync': rpc_sync,
'opts': opts
}
return f
return dec | python | {
"resource": ""
} |
q32674 | autocmd | train | def autocmd(name, pattern='*', sync=False, allow_nested=False, eval=None):
"""Tag a function or plugin method as a Nvim autocommand handler."""
def dec(f):
f._nvim_rpc_method_name = 'autocmd:{}:{}'.format(name, pattern)
f._nvim_rpc_sync = sync
f._nvim_bind = True
f._nvim_prefix_plugin_path = True
opts = {
'pattern': pattern
}
if eval:
opts['eval'] = eval
if not sync and allow_nested:
rpc_sync = "urgent"
else:
rpc_sync = sync
f._nvim_rpc_spec = {
'type': 'autocmd',
'name': name,
'sync': rpc_sync,
'opts': opts
}
return f
return dec | python | {
"resource": ""
} |
q32675 | function | train | def function(name, range=False, sync=False, allow_nested=False, eval=None):
"""Tag a function or plugin method as a Nvim function handler."""
def dec(f):
f._nvim_rpc_method_name = 'function:{}'.format(name)
f._nvim_rpc_sync = sync
f._nvim_bind = True
f._nvim_prefix_plugin_path = True
opts = {}
if range:
opts['range'] = '' if range is True else str(range)
if eval:
opts['eval'] = eval
if not sync and allow_nested:
rpc_sync = "urgent"
else:
rpc_sync = sync
f._nvim_rpc_spec = {
'type': 'function',
'name': name,
'sync': rpc_sync,
'opts': opts
}
return f
return dec | python | {
"resource": ""
} |
q32676 | Nvim.from_session | train | def from_session(cls, session):
"""Create a new Nvim instance for a Session instance.
This method must be called to create the first Nvim instance, since it
queries Nvim metadata for type information and sets a SessionHook for
creating specialized objects from Nvim remote handles.
"""
session.error_wrapper = lambda e: NvimError(e[1])
channel_id, metadata = session.request(b'vim_get_api_info')
if IS_PYTHON3:
# decode all metadata strings for python3
metadata = walk(decode_if_bytes, metadata)
types = {
metadata['types']['Buffer']['id']: Buffer,
metadata['types']['Window']['id']: Window,
metadata['types']['Tabpage']['id']: Tabpage,
}
return cls(session, channel_id, metadata, types) | python | {
"resource": ""
} |
q32677 | Nvim.from_nvim | train | def from_nvim(cls, nvim):
"""Create a new Nvim instance from an existing instance."""
return cls(nvim._session, nvim.channel_id, nvim.metadata,
nvim.types, nvim._decode, nvim._err_cb) | python | {
"resource": ""
} |
q32678 | Nvim.request | train | def request(self, name, *args, **kwargs):
r"""Send an API request or notification to nvim.
It is rarely needed to call this function directly, as most API
functions have python wrapper functions. The `api` object can
be also be used to call API functions as methods:
vim.api.err_write('ERROR\n', async_=True)
vim.current.buffer.api.get_mark('.')
is equivalent to
vim.request('nvim_err_write', 'ERROR\n', async_=True)
vim.request('nvim_buf_get_mark', vim.current.buffer, '.')
Normally a blocking request will be sent. If the `async_` flag is
present and True, a asynchronous notification is sent instead. This
will never block, and the return value or error is ignored.
"""
if (self._session._loop_thread is not None
and threading.current_thread() != self._session._loop_thread):
msg = ("Request from non-main thread.\n"
"Requests from different threads should be wrapped "
"with nvim.async_call(cb, ...) \n{}\n"
.format('\n'.join(format_stack(None, 5)[:-1])))
self.async_call(self._err_cb, msg)
raise NvimError("request from non-main thread")
decode = kwargs.pop('decode', self._decode)
args = walk(self._to_nvim, args)
res = self._session.request(name, *args, **kwargs)
return walk(self._from_nvim, res, decode=decode) | python | {
"resource": ""
} |
q32679 | Nvim.with_decode | train | def with_decode(self, decode=True):
"""Initialize a new Nvim instance."""
return Nvim(self._session, self.channel_id,
self.metadata, self.types, decode, self._err_cb) | python | {
"resource": ""
} |
q32680 | Nvim.ui_attach | train | def ui_attach(self, width, height, rgb=None, **kwargs):
"""Register as a remote UI.
After this method is called, the client will receive redraw
notifications.
"""
options = kwargs
if rgb is not None:
options['rgb'] = rgb
return self.request('nvim_ui_attach', width, height, options) | python | {
"resource": ""
} |
q32681 | Nvim.call | train | def call(self, name, *args, **kwargs):
"""Call a vimscript function."""
return self.request('nvim_call_function', name, args, **kwargs) | python | {
"resource": ""
} |
q32682 | Nvim.exec_lua | train | def exec_lua(self, code, *args, **kwargs):
"""Execute lua code.
Additional parameters are available as `...` inside the lua chunk.
Only statements are executed. To evaluate an expression, prefix it
with `return`: `return my_function(...)`
There is a shorthand syntax to call lua functions with arguments:
nvim.lua.func(1,2)
nvim.lua.mymod.myfunction(data, async_=True)
is equivalent to
nvim.exec_lua("return func(...)", 1, 2)
nvim.exec_lua("mymod.myfunction(...)", data, async_=True)
Note that with `async_=True` there is no return value.
"""
return self.request('nvim_execute_lua', code, args, **kwargs) | python | {
"resource": ""
} |
q32683 | Nvim.feedkeys | train | def feedkeys(self, keys, options='', escape_csi=True):
"""Push `keys` to Nvim user input buffer.
Options can be a string with the following character flags:
- 'm': Remap keys. This is default.
- 'n': Do not remap keys.
- 't': Handle keys as if typed; otherwise they are handled as if coming
from a mapping. This matters for undo, opening folds, etc.
"""
return self.request('nvim_feedkeys', keys, options, escape_csi) | python | {
"resource": ""
} |
q32684 | Nvim.replace_termcodes | train | def replace_termcodes(self, string, from_part=False, do_lt=True,
special=True):
r"""Replace any terminal code strings by byte sequences.
The returned sequences are Nvim's internal representation of keys,
for example:
<esc> -> '\x1b'
<cr> -> '\r'
<c-l> -> '\x0c'
<up> -> '\x80ku'
The returned sequences can be used as input to `feedkeys`.
"""
return self.request('nvim_replace_termcodes', string,
from_part, do_lt, special) | python | {
"resource": ""
} |
q32685 | Nvim.err_write | train | def err_write(self, msg, **kwargs):
r"""Print `msg` as an error message.
The message is buffered (won't display) until linefeed ("\n").
"""
if self._thread_invalid():
# special case: if a non-main thread writes to stderr
# i.e. due to an uncaught exception, pass it through
# without raising an additional exception.
self.async_call(self.err_write, msg, **kwargs)
return
return self.request('nvim_err_write', msg, **kwargs) | python | {
"resource": ""
} |
q32686 | Nvim.async_call | train | def async_call(self, fn, *args, **kwargs):
"""Schedule `fn` to be called by the event loop soon.
This function is thread-safe, and is the only way code not
on the main thread could interact with nvim api objects.
This function can also be called in a synchronous
event handler, just before it returns, to defer execution
that shouldn't block neovim.
"""
call_point = ''.join(format_stack(None, 5)[:-1])
def handler():
try:
fn(*args, **kwargs)
except Exception as err:
msg = ("error caught while executing async callback:\n"
"{!r}\n{}\n \nthe call was requested at\n{}"
.format(err, format_exc_skip(1), call_point))
self._err_cb(msg)
raise
self._session.threadsafe_call(handler) | python | {
"resource": ""
} |
q32687 | Buffer.append | train | def append(self, lines, index=-1):
"""Append a string or list of lines to the buffer."""
if isinstance(lines, (basestring, bytes)):
lines = [lines]
return self.request('nvim_buf_set_lines', index, index, True, lines) | python | {
"resource": ""
} |
q32688 | Buffer.add_highlight | train | def add_highlight(self, hl_group, line, col_start=0,
col_end=-1, src_id=-1, async_=None,
**kwargs):
"""Add a highlight to the buffer."""
async_ = check_async(async_, kwargs, src_id != 0)
return self.request('nvim_buf_add_highlight', src_id, hl_group,
line, col_start, col_end, async_=async_) | python | {
"resource": ""
} |
q32689 | Buffer.clear_highlight | train | def clear_highlight(self, src_id, line_start=0, line_end=-1, async_=None,
**kwargs):
"""Clear highlights from the buffer."""
async_ = check_async(async_, kwargs, True)
self.request('nvim_buf_clear_highlight', src_id,
line_start, line_end, async_=async_) | python | {
"resource": ""
} |
q32690 | Buffer.update_highlights | train | def update_highlights(self, src_id, hls, clear_start=0, clear_end=-1,
clear=False, async_=True):
"""Add or update highlights in batch to avoid unnecessary redraws.
A `src_id` must have been allocated prior to use of this function. Use
for instance `nvim.new_highlight_source()` to get a src_id for your
plugin.
`hls` should be a list of highlight items. Each item should be a list
or tuple on the form `("GroupName", linenr, col_start, col_end)` or
`("GroupName", linenr)` to highlight an entire line.
By default existing highlights are preserved. Specify a line range with
clear_start and clear_end to replace highlights in this range. As a
shorthand, use clear=True to clear the entire buffer before adding the
new highlights.
"""
if clear and clear_start is None:
clear_start = 0
lua = self._session._get_lua_private()
lua.update_highlights(self, src_id, hls, clear_start, clear_end,
async_=async_) | python | {
"resource": ""
} |
q32691 | Host.start | train | def start(self, plugins):
"""Start listening for msgpack-rpc requests and notifications."""
self.nvim.run_loop(self._on_request,
self._on_notification,
lambda: self._load(plugins),
err_cb=self._on_async_err) | python | {
"resource": ""
} |
q32692 | Host._on_request | train | def _on_request(self, name, args):
"""Handle a msgpack-rpc request."""
if IS_PYTHON3:
name = decode_if_bytes(name)
handler = self._request_handlers.get(name, None)
if not handler:
msg = self._missing_handler_error(name, 'request')
error(msg)
raise ErrorResponse(msg)
debug('calling request handler for "%s", args: "%s"', name, args)
rv = handler(*args)
debug("request handler for '%s %s' returns: %s", name, args, rv)
return rv | python | {
"resource": ""
} |
q32693 | Host._on_notification | train | def _on_notification(self, name, args):
"""Handle a msgpack-rpc notification."""
if IS_PYTHON3:
name = decode_if_bytes(name)
handler = self._notification_handlers.get(name, None)
if not handler:
msg = self._missing_handler_error(name, 'notification')
error(msg)
self._on_async_err(msg + "\n")
return
debug('calling notification handler for "%s", args: "%s"', name, args)
handler(*args) | python | {
"resource": ""
} |
q32694 | decode_if_bytes | train | def decode_if_bytes(obj, mode=True):
"""Decode obj if it is bytes."""
if mode is True:
mode = unicode_errors_default
if isinstance(obj, bytes):
return obj.decode("utf-8", errors=mode)
return obj | python | {
"resource": ""
} |
q32695 | Remote.request | train | def request(self, name, *args, **kwargs):
"""Wrapper for nvim.request."""
return self._session.request(name, self, *args, **kwargs) | python | {
"resource": ""
} |
q32696 | MsgpackStream.send | train | def send(self, msg):
"""Queue `msg` for sending to Nvim."""
debug('sent %s', msg)
self.loop.send(self._packer.pack(msg)) | python | {
"resource": ""
} |
q32697 | MsgpackStream.run | train | def run(self, message_cb):
"""Run the event loop to receive messages from Nvim.
While the event loop is running, `message_cb` will be called whenever
a message has been successfully parsed from the input stream.
"""
self._message_cb = message_cb
self.loop.run(self._on_data)
self._message_cb = None | python | {
"resource": ""
} |
q32698 | Session.threadsafe_call | train | def threadsafe_call(self, fn, *args, **kwargs):
"""Wrapper around `AsyncSession.threadsafe_call`."""
def handler():
try:
fn(*args, **kwargs)
except Exception:
warn("error caught while excecuting async callback\n%s\n",
format_exc())
def greenlet_wrapper():
gr = greenlet.greenlet(handler)
gr.switch()
self._async_session.threadsafe_call(greenlet_wrapper) | python | {
"resource": ""
} |
q32699 | Session.request | train | def request(self, method, *args, **kwargs):
"""Send a msgpack-rpc request and block until as response is received.
If the event loop is running, this method must have been called by a
request or notification handler running on a greenlet. In that case,
send the quest and yield to the parent greenlet until a response is
available.
When the event loop is not running, it will perform a blocking request
like this:
- Send the request
- Run the loop until the response is available
- Put requests/notifications received while waiting into a queue
If the `async_` flag is present and True, a asynchronous notification
is sent instead. This will never block, and the return value or error
is ignored.
"""
async_ = check_async(kwargs.pop('async_', None), kwargs, False)
if async_:
self._async_session.notify(method, args)
return
if kwargs:
raise ValueError("request got unsupported keyword argument(s): {}"
.format(', '.join(kwargs.keys())))
if self._is_running:
v = self._yielding_request(method, args)
else:
v = self._blocking_request(method, args)
if not v:
# EOF
raise IOError('EOF')
err, rv = v
if err:
info("'Received error: %s", err)
raise self.error_wrapper(err)
return rv | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.