_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q36200 | URL.with_path | train | def with_path(self, path, *, encoded=False):
"""Return a new URL with path replaced."""
if not encoded:
path = self._PATH_QUOTER(path)
if self.is_absolute():
path = self._normalize_path(path)
if len(path) > 0 and path[0] != "/":
path = "/" + path
return URL(self._val._replace(path=path, query="", fragment=""), encoded=True) | python | {
"resource": ""
} |
q36201 | URL.with_query | train | def with_query(self, *args, **kwargs):
"""Return a new URL with query part replaced.
Accepts any Mapping (e.g. dict, multidict.MultiDict instances)
or str, autoencode the argument if needed.
A sequence of (key, value) pairs is supported as well.
It also can take an arbitrary number of keyword arguments.
Clear query if None is passed.
"""
# N.B. doesn't cleanup query/fragment
new_query = self._get_str_query(*args, **kwargs)
return URL(
self._val._replace(path=self._val.path, query=new_query), encoded=True
) | python | {
"resource": ""
} |
q36202 | URL.update_query | train | def update_query(self, *args, **kwargs):
"""Return a new URL with query part updated."""
s = self._get_str_query(*args, **kwargs)
new_query = MultiDict(parse_qsl(s, keep_blank_values=True))
query = MultiDict(self.query)
query.update(new_query)
return URL(self._val._replace(query=self._get_str_query(query)), encoded=True) | python | {
"resource": ""
} |
q36203 | URL.with_fragment | train | def with_fragment(self, fragment):
"""Return a new URL with fragment replaced.
Autoencode fragment if needed.
Clear fragment to default if None is passed.
"""
# N.B. doesn't cleanup query/fragment
if fragment is None:
fragment = ""
elif not isinstance(fragment, str):
raise TypeError("Invalid fragment type")
return URL(
self._val._replace(fragment=self._FRAGMENT_QUOTER(fragment)), encoded=True
) | python | {
"resource": ""
} |
q36204 | URL.human_repr | train | def human_repr(self):
"""Return decoded human readable string for URL representation."""
return urlunsplit(
SplitResult(
self.scheme,
self._make_netloc(
self.user, self.password, self.host, self._val.port, encode=False
),
self.path,
self.query_string,
self.fragment,
)
) | python | {
"resource": ""
} |
q36205 | all_arch_srcarch_kconfigs | train | def all_arch_srcarch_kconfigs():
"""
Generates Kconfig instances for all the architectures in the kernel
"""
os.environ["srctree"] = "."
os.environ["HOSTCC"] = "gcc"
os.environ["HOSTCXX"] = "g++"
os.environ["CC"] = "gcc"
os.environ["LD"] = "ld"
for arch, srcarch in all_arch_srcarch_pairs():
print(" Processing " + arch)
os.environ["ARCH"] = arch
os.environ["SRCARCH"] = srcarch
# um (User Mode Linux) uses a different base Kconfig file
yield Kconfig("Kconfig" if arch != "um" else "arch/x86/um/Kconfig",
warn=False) | python | {
"resource": ""
} |
q36206 | menuconfig | train | def menuconfig(kconf):
"""
Launches the configuration interface, returning after the user exits.
kconf:
Kconfig instance to be configured
"""
global _kconf
global _conf_filename
global _conf_changed
global _minconf_filename
global _show_all
_kconf = kconf
# Load existing configuration and set _conf_changed True if it is outdated
_conf_changed = _load_config()
# Filename to save configuration to
_conf_filename = standard_config_filename()
# Filename to save minimal configuration to
_minconf_filename = "defconfig"
# Any visible items in the top menu?
_show_all = False
if not _shown_nodes(kconf.top_node):
# Nothing visible. Start in show-all mode and try again.
_show_all = True
if not _shown_nodes(kconf.top_node):
# Give up. The implementation relies on always having a selected
# node.
print("Empty configuration -- nothing to configure.\n"
"Check that environment variables are set properly.")
return
# Disable warnings. They get mangled in curses mode, and we deal with
# errors ourselves.
kconf.disable_warnings()
# Make curses use the locale settings specified in the environment
locale.setlocale(locale.LC_ALL, "")
# Try to fix Unicode issues on systems with bad defaults
if _CONVERT_C_LC_CTYPE_TO_UTF8:
_convert_c_lc_ctype_to_utf8()
# Get rid of the delay between pressing ESC and jumping to the parent menu,
# unless the user has set ESCDELAY (see ncurses(3)). This makes the UI much
# smoother to work with.
#
# Note: This is strictly pretty iffy, since escape codes for e.g. cursor
# keys start with ESC, but I've never seen it cause problems in practice
# (probably because it's unlikely that the escape code for a key would get
# split up across read()s, at least with a terminal emulator). Please
# report if you run into issues. Some suitable small default value could be
# used here instead in that case. Maybe it's silly to not put in the
# smallest imperceptible delay here already, though I don't like guessing.
#
# (From a quick glance at the ncurses source code, ESCDELAY might only be
# relevant for mouse events there, so maybe escapes are assumed to arrive
# in one piece already...)
os.environ.setdefault("ESCDELAY", "0")
# Enter curses mode. _menuconfig() returns a string to print on exit, after
# curses has been de-initialized.
print(curses.wrapper(_menuconfig)) | python | {
"resource": ""
} |
q36207 | print_menuconfig_nodes | train | def print_menuconfig_nodes(node, indent):
"""
Prints a tree with all the menu entries rooted at 'node'. Child menu
entries are indented.
"""
while node:
string = node_str(node)
if string:
indent_print(string, indent)
if node.list:
print_menuconfig_nodes(node.list, indent + 8)
node = node.next | python | {
"resource": ""
} |
q36208 | print_menuconfig | train | def print_menuconfig(kconf):
"""
Prints all menu entries for the configuration.
"""
# Print the expanded mainmenu text at the top. This is the same as
# kconf.top_node.prompt[0], but with variable references expanded.
print("\n======== {} ========\n".format(kconf.mainmenu_text))
print_menuconfig_nodes(kconf.top_node.list, 0)
print("") | python | {
"resource": ""
} |
q36209 | expr_str | train | def expr_str(expr, sc_expr_str_fn=standard_sc_expr_str):
"""
Returns the string representation of the expression 'expr', as in a Kconfig
file.
Passing subexpressions of expressions to this function works as expected.
sc_expr_str_fn (default: standard_sc_expr_str):
This function is called for every symbol/choice (hence "sc") appearing in
the expression, with the symbol/choice as the argument. It is expected to
return a string to be used for the symbol/choice.
This can be used e.g. to turn symbols/choices into links when generating
documentation, or for printing the value of each symbol/choice after it.
Note that quoted values are represented as constants symbols
(Symbol.is_constant == True).
"""
if expr.__class__ is not tuple:
return sc_expr_str_fn(expr)
if expr[0] is AND:
return "{} && {}".format(_parenthesize(expr[1], OR, sc_expr_str_fn),
_parenthesize(expr[2], OR, sc_expr_str_fn))
if expr[0] is OR:
# This turns A && B || C && D into "(A && B) || (C && D)", which is
# redundant, but more readable
return "{} || {}".format(_parenthesize(expr[1], AND, sc_expr_str_fn),
_parenthesize(expr[2], AND, sc_expr_str_fn))
if expr[0] is NOT:
if expr[1].__class__ is tuple:
return "!({})".format(expr_str(expr[1], sc_expr_str_fn))
return "!" + sc_expr_str_fn(expr[1]) # Symbol
# Relation
#
# Relation operands are always symbols (quoted strings are constant
# symbols)
return "{} {} {}".format(sc_expr_str_fn(expr[1]), _REL_TO_STR[expr[0]],
sc_expr_str_fn(expr[2])) | python | {
"resource": ""
} |
q36210 | standard_kconfig | train | def standard_kconfig():
"""
Helper for tools. Loads the top-level Kconfig specified as the first
command-line argument, or "Kconfig" if there are no command-line arguments.
Returns the Kconfig instance.
Exits with sys.exit() (which raises a SystemExit exception) and prints a
usage note to stderr if more than one command-line argument is passed.
"""
if len(sys.argv) > 2:
sys.exit("usage: {} [Kconfig]".format(sys.argv[0]))
# Only show backtraces for unexpected exceptions
try:
return Kconfig("Kconfig" if len(sys.argv) < 2 else sys.argv[1])
except (IOError, KconfigError) as e:
# Some long exception messages have extra newlines for better
# formatting when reported as an unhandled exception. Strip them here.
sys.exit(str(e).strip()) | python | {
"resource": ""
} |
q36211 | Kconfig.write_config | train | def write_config(self, filename=None,
header="# Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib)\n",
save_old=True, verbose=True):
r"""
Writes out symbol values in the .config format. The format matches the
C implementation, including ordering.
Symbols appear in the same order in generated .config files as they do
in the Kconfig files. For symbols defined in multiple locations, a
single assignment is written out corresponding to the first location
where the symbol is defined.
See the 'Intro to symbol values' section in the module docstring to
understand which symbols get written out.
filename (default: None):
Filename to save configuration to (a string).
If None (the default), the filename in the the environment variable
KCONFIG_CONFIG is used if set, and ".config" otherwise. See
standard_config_filename().
header (default: "# Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib)\n"):
Text that will be inserted verbatim at the beginning of the file. You
would usually want each line to start with '#' to make it a comment,
and include a final terminating newline.
save_old (default: True):
If True and <filename> already exists, a copy of it will be saved to
.<filename>.old in the same directory before the new configuration is
written. The leading dot is added only if the filename doesn't
already start with a dot.
Errors are silently ignored if .<filename>.old cannot be written
(e.g. due to being a directory).
verbose (default: True):
If True and filename is None (automatically infer configuration
file), a message will be printed to stdout telling which file got
written. This is meant to reduce boilerplate in tools.
"""
if filename is None:
filename = standard_config_filename()
else:
verbose = False
if save_old:
_save_old(filename)
with self._open(filename, "w") as f:
f.write(header)
for node in self.node_iter(unique_syms=True):
item = node.item
if item.__class__ is Symbol:
f.write(item.config_string)
elif expr_value(node.dep) and \
((item is MENU and expr_value(node.visibility)) or
item is COMMENT):
f.write("\n#\n# {}\n#\n".format(node.prompt[0]))
if verbose:
print("Configuration written to '{}'".format(filename)) | python | {
"resource": ""
} |
q36212 | Kconfig.write_min_config | train | def write_min_config(self, filename,
header="# Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib)\n"):
"""
Writes out a "minimal" configuration file, omitting symbols whose value
matches their default value. The format matches the one produced by
'make savedefconfig'.
The resulting configuration file is incomplete, but a complete
configuration can be derived from it by loading it. Minimal
configuration files can serve as a more manageable configuration format
compared to a "full" .config file, especially when configurations files
are merged or edited by hand.
filename:
Self-explanatory.
header (default: "# Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib)\n"):
Text that will be inserted verbatim at the beginning of the file. You
would usually want each line to start with '#' to make it a comment,
and include a final terminating newline.
"""
with self._open(filename, "w") as f:
f.write(header)
for sym in self.unique_defined_syms:
# Skip symbols that cannot be changed. Only check
# non-choice symbols, as selects don't affect choice
# symbols.
if not sym.choice and \
sym.visibility <= expr_value(sym.rev_dep):
continue
# Skip symbols whose value matches their default
if sym.str_value == sym._str_default():
continue
# Skip symbols that would be selected by default in a
# choice, unless the choice is optional or the symbol type
# isn't bool (it might be possible to set the choice mode
# to n or the symbol to m in those cases).
if sym.choice and \
not sym.choice.is_optional and \
sym.choice._get_selection_from_defaults() is sym and \
sym.orig_type is BOOL and \
sym.tri_value == 2:
continue
f.write(sym.config_string) | python | {
"resource": ""
} |
q36213 | Kconfig.eval_string | train | def eval_string(self, s):
"""
Returns the tristate value of the expression 's', represented as 0, 1,
and 2 for n, m, and y, respectively. Raises KconfigError if syntax
errors are detected in 's'. Warns if undefined symbols are referenced.
As an example, if FOO and BAR are tristate symbols at least one of
which has the value y, then config.eval_string("y && (FOO || BAR)")
returns 2 (y).
To get the string value of non-bool/tristate symbols, use
Symbol.str_value. eval_string() always returns a tristate value, and
all non-bool/tristate symbols have the tristate value 0 (n).
The expression parsing is consistent with how parsing works for
conditional ('if ...') expressions in the configuration, and matches
the C implementation. m is rewritten to 'm && MODULES', so
eval_string("m") will return 0 (n) unless modules are enabled.
"""
# The parser is optimized to be fast when parsing Kconfig files (where
# an expression can never appear at the beginning of a line). We have
# to monkey-patch things a bit here to reuse it.
self._filename = None
# Don't include the "if " from below to avoid giving confusing error
# messages
self._line = s
self._tokens = self._tokenize("if " + s)
self._tokens_i = 1 # Skip the 'if' token
return expr_value(self._expect_expr_and_eol()) | python | {
"resource": ""
} |
q36214 | Symbol.set_value | train | def set_value(self, value):
"""
Sets the user value of the symbol.
Equal in effect to assigning the value to the symbol within a .config
file. For bool and tristate symbols, use the 'assignable' attribute to
check which values can currently be assigned. Setting values outside
'assignable' will cause Symbol.user_value to differ from
Symbol.str/tri_value (be truncated down or up).
Setting a choice symbol to 2 (y) sets Choice.user_selection to the
choice symbol in addition to setting Symbol.user_value.
Choice.user_selection is considered when the choice is in y mode (the
"normal" mode).
Other symbols that depend (possibly indirectly) on this symbol are
automatically recalculated to reflect the assigned value.
value:
The user value to give to the symbol. For bool and tristate symbols,
n/m/y can be specified either as 0/1/2 (the usual format for tristate
values in Kconfiglib) or as one of the strings "n"/"m"/"y". For other
symbol types, pass a string.
Values that are invalid for the type (such as "foo" or 1 (m) for a
BOOL or "0x123" for an INT) are ignored and won't be stored in
Symbol.user_value. Kconfiglib will print a warning by default for
invalid assignments, and set_value() will return False.
Returns True if the value is valid for the type of the symbol, and
False otherwise. This only looks at the form of the value. For BOOL and
TRISTATE symbols, check the Symbol.assignable attribute to see what
values are currently in range and would actually be reflected in the
value of the symbol. For other symbol types, check whether the
visibility is non-n.
"""
# If the new user value matches the old, nothing changes, and we can
# save some work.
#
# This optimization is skipped for choice symbols: Setting a choice
# symbol's user value to y might change the state of the choice, so it
# wouldn't be safe (symbol user values always match the values set in a
# .config file or via set_value(), and are never implicitly updated).
if value == self.user_value and not self.choice:
self._was_set = True
return True
# Check if the value is valid for our type
if not (self.orig_type is BOOL and value in (2, 0, "y", "n") or
self.orig_type is TRISTATE and value in (2, 1, 0, "y", "m", "n") or
(value.__class__ is str and
(self.orig_type is STRING or
self.orig_type is INT and _is_base_n(value, 10) or
self.orig_type is HEX and _is_base_n(value, 16)
and int(value, 16) >= 0))):
# Display tristate values as n, m, y in the warning
self.kconfig._warn(
"the value {} is invalid for {}, which has type {} -- "
"assignment ignored"
.format(TRI_TO_STR[value] if value in (0, 1, 2) else
"'{}'".format(value),
_name_and_loc(self), TYPE_TO_STR[self.orig_type]))
return False
if self.orig_type in _BOOL_TRISTATE and value in ("y", "m", "n"):
value = STR_TO_TRI[value]
self.user_value = value
self._was_set = True
if self.choice and value == 2:
# Setting a choice symbol to y makes it the user selection of the
# choice. Like for symbol user values, the user selection is not
# guaranteed to match the actual selection of the choice, as
# dependencies come into play.
self.choice.user_selection = self
self.choice._was_set = True
self.choice._rec_invalidate()
else:
self._rec_invalidate_if_has_prompt()
return True | python | {
"resource": ""
} |
q36215 | tokenize | train | def tokenize(s):
r"""Returns an iterable through all subparts of string splitted by '.'
So:
>>> list(tokenize('foo.bar.wiz'))
['foo', 'bar', 'wiz']
Contrary to traditional ``.split()`` method, this function has to
deal with any type of data in the string. So it actually
interprets the string. Characters with meaning are '.' and '\'.
Both of these can be included in a token by quoting them with '\'.
So dot of slashes can be contained in token:
>>> print('\n'.join(tokenize(r'foo.dot<\.>.slash<\\>')))
foo
dot<.>
slash<\>
Notice that empty keys are also supported:
>>> list(tokenize(r'foo..bar'))
['foo', '', 'bar']
Given an empty string:
>>> list(tokenize(r''))
['']
And a None value:
>>> list(tokenize(None))
[]
"""
if s is None:
return
tokens = (re.sub(r'\\(\\|\.)', r'\1', m.group(0))
for m in re.finditer(r'((\\.|[^.\\])*)', s))
## an empty string superfluous token is added after all non-empty token
for token in tokens:
if len(token) != 0:
next(tokens)
yield token | python | {
"resource": ""
} |
q36216 | aget | train | def aget(dct, key):
r"""Allow to get values deep in a dict with iterable keys
Accessing leaf values is quite straightforward:
>>> dct = {'a': {'x': 1, 'b': {'c': 2}}}
>>> aget(dct, ('a', 'x'))
1
>>> aget(dct, ('a', 'b', 'c'))
2
If key is empty, it returns unchanged the ``dct`` value.
>>> aget({'x': 1}, ())
{'x': 1}
"""
key = iter(key)
try:
head = next(key)
except StopIteration:
return dct
if isinstance(dct, list):
try:
idx = int(head)
except ValueError:
raise IndexNotIntegerError(
"non-integer index %r provided on a list."
% head)
try:
value = dct[idx]
except IndexError:
raise IndexOutOfRange(
"index %d is out of range (%d elements in list)."
% (idx, len(dct)))
else:
try:
value = dct[head]
except KeyError:
## Replace with a more informative KeyError
raise MissingKeyError(
"missing key %r in dict."
% (head, ))
except Exception:
raise NonDictLikeTypeError(
"can't query subvalue %r of a leaf%s."
% (head,
(" (leaf value is %r)" % dct)
if len(repr(dct)) < 15 else ""))
return aget(value, key) | python | {
"resource": ""
} |
q36217 | die | train | def die(msg, errlvl=1, prefix="Error: "):
"""Convenience function to write short message to stderr and quit."""
stderr("%s%s\n" % (prefix, msg))
sys.exit(errlvl) | python | {
"resource": ""
} |
q36218 | type_name | train | def type_name(value):
"""Returns pseudo-YAML type name of given value."""
return type(value).__name__ if isinstance(value, EncapsulatedNode) else \
"struct" if isinstance(value, dict) else \
"sequence" if isinstance(value, (tuple, list)) else \
type(value).__name__ | python | {
"resource": ""
} |
q36219 | do | train | def do(stream, action, key, default=None, dump=yaml_dump,
loader=ShyamlSafeLoader):
"""Return string representations of target value in stream YAML
The key is used for traversal of the YAML structure to target
the value that will be dumped.
:param stream: file like input yaml content
:param action: string identifying one of the possible supported actions
:param key: string dotted expression to traverse yaml input
:param default: optional default value in case of missing end value when
traversing input yaml. (default is ``None``)
:param dump: callable that will be given python objet to dump in yaml
(default is ``yaml_dump``)
:param loader: PyYAML's *Loader subclass to parse YAML
(default is ShyamlSafeLoader)
:return: generator of string representation of target value per
YAML docs in the given stream.
:raises ActionTypeError: when there's a type mismatch between the
action selected and the type of the targetted value.
(ie: action 'key-values' on non-struct)
:raises InvalidAction: when selected action is not a recognised valid
action identifier.
:raises InvalidPath: upon inexistent content when traversing YAML
input following the key specification.
"""
at_least_one_content = False
for content in yaml.load_all(stream, Loader=loader):
at_least_one_content = True
value = traverse(content, key, default=default)
yield act(action, value, dump=dump)
## In case of empty stream, we consider that it is equivalent
## to one document having the ``null`` value.
if at_least_one_content is False:
value = traverse(None, key, default=default)
yield act(action, value, dump=dump) | python | {
"resource": ""
} |
q36220 | encode | train | def encode(s):
"""Encode a folder name using IMAP modified UTF-7 encoding.
Despite the function's name, the output is still a unicode string.
"""
if not isinstance(s, text_type):
return s
r = []
_in = []
def extend_result_if_chars_buffered():
if _in:
r.extend(['&', modified_utf7(''.join(_in)), '-'])
del _in[:]
for c in s:
if ord(c) in PRINTABLE:
extend_result_if_chars_buffered()
r.append(c)
elif c == '&':
extend_result_if_chars_buffered()
r.append('&-')
else:
_in.append(c)
extend_result_if_chars_buffered()
return ''.join(r) | python | {
"resource": ""
} |
q36221 | decode | train | def decode(s):
"""Decode a folder name from IMAP modified UTF-7 encoding to unicode.
Despite the function's name, the input may still be a unicode
string. If the input is bytes, it's first decoded to unicode.
"""
if isinstance(s, binary_type):
s = s.decode('latin-1')
if not isinstance(s, text_type):
return s
r = []
_in = []
for c in s:
if c == '&' and not _in:
_in.append('&')
elif c == '-' and _in:
if len(_in) == 1:
r.append('&')
else:
r.append(modified_deutf7(''.join(_in[1:])))
_in = []
elif _in:
_in.append(c)
else:
r.append(c)
if _in:
r.append(modified_deutf7(''.join(_in[1:])))
return ''.join(r) | python | {
"resource": ""
} |
q36222 | autoclean | train | def autoclean(input_dataframe, drop_nans=False, copy=False, encoder=None,
encoder_kwargs=None, ignore_update_check=False):
"""Performs a series of automated data cleaning transformations on the provided data set
Parameters
----------
input_dataframe: pandas.DataFrame
Data set to clean
drop_nans: bool
Drop all rows that have a NaN in any column (default: False)
copy: bool
Make a copy of the data set (default: False)
encoder: category_encoders transformer
The a valid category_encoders transformer which is passed an inferred cols list. Default (None: LabelEncoder)
encoder_kwargs: category_encoders
The a valid sklearn transformer to encode categorical features. Default (None)
ignore_update_check: bool
Do not check for the latest version of datacleaner
Returns
----------
output_dataframe: pandas.DataFrame
Cleaned data set
"""
global update_checked
if ignore_update_check:
update_checked = True
if not update_checked:
update_check('datacleaner', __version__)
update_checked = True
if copy:
input_dataframe = input_dataframe.copy()
if drop_nans:
input_dataframe.dropna(inplace=True)
if encoder_kwargs is None:
encoder_kwargs = {}
for column in input_dataframe.columns.values:
# Replace NaNs with the median or mode of the column depending on the column type
try:
input_dataframe[column].fillna(input_dataframe[column].median(), inplace=True)
except TypeError:
most_frequent = input_dataframe[column].mode()
# If the mode can't be computed, use the nearest valid value
# See https://github.com/rhiever/datacleaner/issues/8
if len(most_frequent) > 0:
input_dataframe[column].fillna(input_dataframe[column].mode()[0], inplace=True)
else:
input_dataframe[column].fillna(method='bfill', inplace=True)
input_dataframe[column].fillna(method='ffill', inplace=True)
# Encode all strings with numerical equivalents
if str(input_dataframe[column].values.dtype) == 'object':
if encoder is not None:
column_encoder = encoder(**encoder_kwargs).fit(input_dataframe[column].values)
else:
column_encoder = LabelEncoder().fit(input_dataframe[column].values)
input_dataframe[column] = column_encoder.transform(input_dataframe[column].values)
return input_dataframe | python | {
"resource": ""
} |
q36223 | autoclean_cv | train | def autoclean_cv(training_dataframe, testing_dataframe, drop_nans=False, copy=False,
encoder=None, encoder_kwargs=None, ignore_update_check=False):
"""Performs a series of automated data cleaning transformations on the provided training and testing data sets
Unlike `autoclean()`, this function takes cross-validation into account by learning the data transformations
from only the training set, then applying those transformations to both the training and testing set.
By doing so, this function will prevent information leak from the training set into the testing set.
Parameters
----------
training_dataframe: pandas.DataFrame
Training data set
testing_dataframe: pandas.DataFrame
Testing data set
drop_nans: bool
Drop all rows that have a NaN in any column (default: False)
copy: bool
Make a copy of the data set (default: False)
encoder: category_encoders transformer
The a valid category_encoders transformer which is passed an inferred cols list. Default (None: LabelEncoder)
encoder_kwargs: category_encoders
The a valid sklearn transformer to encode categorical features. Default (None)
ignore_update_check: bool
Do not check for the latest version of datacleaner
Returns
----------
output_training_dataframe: pandas.DataFrame
Cleaned training data set
output_testing_dataframe: pandas.DataFrame
Cleaned testing data set
"""
global update_checked
if ignore_update_check:
update_checked = True
if not update_checked:
update_check('datacleaner', __version__)
update_checked = True
if set(training_dataframe.columns.values) != set(testing_dataframe.columns.values):
raise ValueError('The training and testing DataFrames do not have the same columns. '
'Make sure that you are providing the same columns.')
if copy:
training_dataframe = training_dataframe.copy()
testing_dataframe = testing_dataframe.copy()
if drop_nans:
training_dataframe.dropna(inplace=True)
testing_dataframe.dropna(inplace=True)
if encoder_kwargs is None:
encoder_kwargs = {}
for column in training_dataframe.columns.values:
# Replace NaNs with the median or mode of the column depending on the column type
try:
column_median = training_dataframe[column].median()
training_dataframe[column].fillna(column_median, inplace=True)
testing_dataframe[column].fillna(column_median, inplace=True)
except TypeError:
column_mode = training_dataframe[column].mode()[0]
training_dataframe[column].fillna(column_mode, inplace=True)
testing_dataframe[column].fillna(column_mode, inplace=True)
# Encode all strings with numerical equivalents
if str(training_dataframe[column].values.dtype) == 'object':
if encoder is not None:
column_encoder = encoder(**encoder_kwargs).fit(training_dataframe[column].values)
else:
column_encoder = LabelEncoder().fit(training_dataframe[column].values)
training_dataframe[column] = column_encoder.transform(training_dataframe[column].values)
testing_dataframe[column] = column_encoder.transform(testing_dataframe[column].values)
return training_dataframe, testing_dataframe | python | {
"resource": ""
} |
q36224 | Node.get_foreign_keys | train | def get_foreign_keys(cls):
"""Get foreign keys and models they refer to, so we can pre-process
the data for load_bulk
"""
foreign_keys = {}
for field in cls._meta.fields:
if (
field.get_internal_type() == 'ForeignKey' and
field.name != 'parent'
):
if django.VERSION >= (1, 9):
foreign_keys[field.name] = field.remote_field.model
else:
foreign_keys[field.name] = field.rel.to
return foreign_keys | python | {
"resource": ""
} |
q36225 | Node._process_foreign_keys | train | def _process_foreign_keys(cls, foreign_keys, node_data):
"""For each foreign key try to load the actual object so load_bulk
doesn't fail trying to load an int where django expects a
model instance
"""
for key in foreign_keys.keys():
if key in node_data:
node_data[key] = foreign_keys[key].objects.get(
pk=node_data[key]) | python | {
"resource": ""
} |
q36226 | Node.delete | train | def delete(self):
"""Removes a node and all it's descendants."""
self.__class__.objects.filter(pk=self.pk).delete() | python | {
"resource": ""
} |
q36227 | Node.get_annotated_list_qs | train | def get_annotated_list_qs(cls, qs):
"""
Gets an annotated list from a queryset.
"""
result, info = [], {}
start_depth, prev_depth = (None, None)
for node in qs:
depth = node.get_depth()
if start_depth is None:
start_depth = depth
open = (depth and (prev_depth is None or depth > prev_depth))
if prev_depth is not None and depth < prev_depth:
info['close'] = list(range(0, prev_depth - depth))
info = {'open': open, 'close': [], 'level': depth - start_depth}
result.append((node, info,))
prev_depth = depth
if start_depth and start_depth > 0:
info['close'] = list(range(0, prev_depth - start_depth + 1))
return result | python | {
"resource": ""
} |
q36228 | Node.get_annotated_list | train | def get_annotated_list(cls, parent=None, max_depth=None):
"""
Gets an annotated list from a tree branch.
:param parent:
The node whose descendants will be annotated. The node itself
will be included in the list. If not given, the entire tree
will be annotated.
:param max_depth:
Optionally limit to specified depth
"""
result, info = [], {}
start_depth, prev_depth = (None, None)
qs = cls.get_tree(parent)
if max_depth:
qs = qs.filter(depth__lte=max_depth)
return cls.get_annotated_list_qs(qs) | python | {
"resource": ""
} |
q36229 | TreeAdmin.get_urls | train | def get_urls(self):
"""
Adds a url to move nodes to this admin
"""
urls = super(TreeAdmin, self).get_urls()
if django.VERSION < (1, 10):
from django.views.i18n import javascript_catalog
jsi18n_url = url(r'^jsi18n/$', javascript_catalog, {'packages': ('treebeard',)})
else:
from django.views.i18n import JavaScriptCatalog
jsi18n_url = url(r'^jsi18n/$',
JavaScriptCatalog.as_view(packages=['treebeard']),
name='javascript-catalog'
)
new_urls = [
url('^move/$', self.admin_site.admin_view(self.move_node), ),
jsi18n_url,
]
return new_urls + urls | python | {
"resource": ""
} |
q36230 | movenodeform_factory | train | def movenodeform_factory(model, form=MoveNodeForm, fields=None, exclude=None,
formfield_callback=None, widgets=None):
"""Dynamically build a MoveNodeForm subclass with the proper Meta.
:param Node model:
The subclass of :py:class:`Node` that will be handled
by the form.
:param form:
The form class that will be used as a base. By
default, :py:class:`MoveNodeForm` will be used.
:return: A :py:class:`MoveNodeForm` subclass
"""
_exclude = _get_exclude_for_model(model, exclude)
return django_modelform_factory(
model, form, fields, _exclude, formfield_callback, widgets) | python | {
"resource": ""
} |
q36231 | MoveNodeForm._clean_cleaned_data | train | def _clean_cleaned_data(self):
""" delete auxilary fields not belonging to node model """
reference_node_id = 0
if '_ref_node_id' in self.cleaned_data:
reference_node_id = self.cleaned_data['_ref_node_id']
del self.cleaned_data['_ref_node_id']
position_type = self.cleaned_data['_position']
del self.cleaned_data['_position']
return position_type, reference_node_id | python | {
"resource": ""
} |
q36232 | MoveNodeForm.add_subtree | train | def add_subtree(cls, for_node, node, options):
""" Recursively build options tree. """
if cls.is_loop_safe(for_node, node):
options.append(
(node.pk,
mark_safe(cls.mk_indent(node.get_depth()) + escape(node))))
for subnode in node.get_children():
cls.add_subtree(for_node, subnode, options) | python | {
"resource": ""
} |
q36233 | MoveNodeForm.mk_dropdown_tree | train | def mk_dropdown_tree(cls, model, for_node=None):
""" Creates a tree-like list of choices """
options = [(0, _('-- root --'))]
for node in model.get_root_nodes():
cls.add_subtree(for_node, node, options)
return options | python | {
"resource": ""
} |
q36234 | MP_MoveHandler.sanity_updates_after_move | train | def sanity_updates_after_move(self, oldpath, newpath):
"""
Updates the list of sql statements needed after moving nodes.
1. :attr:`depth` updates *ONLY* needed by mysql databases (*sigh*)
2. update the number of children of parent nodes
"""
if (
self.node_cls.get_database_vendor('write') == 'mysql' and
len(oldpath) != len(newpath)
):
# no words can describe how dumb mysql is
# we must update the depth of the branch in a different query
self.stmts.append(
self.get_mysql_update_depth_in_branch(newpath))
oldparentpath = self.node_cls._get_parent_path_from_path(oldpath)
newparentpath = self.node_cls._get_parent_path_from_path(newpath)
if (
(not oldparentpath and newparentpath) or
(oldparentpath and not newparentpath) or
(oldparentpath != newparentpath)
):
# node changed parent, updating count
if oldparentpath:
self.stmts.append(
self.get_sql_update_numchild(oldparentpath, 'dec'))
if newparentpath:
self.stmts.append(
self.get_sql_update_numchild(newparentpath, 'inc')) | python | {
"resource": ""
} |
q36235 | MP_Node.fix_tree | train | def fix_tree(cls, destructive=False):
"""
Solves some problems that can appear when transactions are not used and
a piece of code breaks, leaving the tree in an inconsistent state.
The problems this method solves are:
1. Nodes with an incorrect ``depth`` or ``numchild`` values due to
incorrect code and lack of database transactions.
2. "Holes" in the tree. This is normal if you move/delete nodes a
lot. Holes in a tree don't affect performance,
3. Incorrect ordering of nodes when ``node_order_by`` is enabled.
Ordering is enforced on *node insertion*, so if an attribute in
``node_order_by`` is modified after the node is inserted, the
tree ordering will be inconsistent.
:param destructive:
A boolean value. If True, a more agressive fix_tree method will be
attempted. If False (the default), it will use a safe (and fast!)
fix approach, but it will only solve the ``depth`` and
``numchild`` nodes, it won't fix the tree holes or broken path
ordering.
.. warning::
Currently what the ``destructive`` method does is:
1. Backup the tree with :meth:`dump_data`
2. Remove all nodes in the tree.
3. Restore the tree with :meth:`load_data`
So, even when the primary keys of your nodes will be preserved,
this method isn't foreign-key friendly. That needs complex
in-place tree reordering, not available at the moment (hint:
patches are welcome).
"""
cls = get_result_class(cls)
vendor = cls.get_database_vendor('write')
if destructive:
dump = cls.dump_bulk(None, True)
cls.objects.all().delete()
cls.load_bulk(dump, None, True)
else:
cursor = cls._get_database_cursor('write')
# fix the depth field
# we need the WHERE to speed up postgres
sql = (
"UPDATE %s "
"SET depth=" + sql_length("path", vendor=vendor) + "/%%s "
"WHERE depth!=" + sql_length("path", vendor=vendor) + "/%%s"
) % (connection.ops.quote_name(cls._meta.db_table), )
vals = [cls.steplen, cls.steplen]
cursor.execute(sql, vals)
# fix the numchild field
vals = ['_' * cls.steplen]
# the cake and sql portability are a lie
if cls.get_database_vendor('read') == 'mysql':
sql = (
"SELECT tbn1.path, tbn1.numchild, ("
"SELECT COUNT(1) "
"FROM %(table)s AS tbn2 "
"WHERE tbn2.path LIKE " +
sql_concat("tbn1.path", "%%s", vendor=vendor) + ") AS real_numchild "
"FROM %(table)s AS tbn1 "
"HAVING tbn1.numchild != real_numchild"
) % {'table': connection.ops.quote_name(cls._meta.db_table)}
else:
subquery = "(SELECT COUNT(1) FROM %(table)s AS tbn2"\
" WHERE tbn2.path LIKE " + sql_concat("tbn1.path", "%%s", vendor=vendor) + ")"
sql = ("SELECT tbn1.path, tbn1.numchild, " + subquery +
" FROM %(table)s AS tbn1 WHERE tbn1.numchild != " +
subquery)
sql = sql % {
'table': connection.ops.quote_name(cls._meta.db_table)}
# we include the subquery twice
vals *= 2
cursor.execute(sql, vals)
sql = "UPDATE %(table)s "\
"SET numchild=%%s "\
"WHERE path=%%s" % {
'table': connection.ops.quote_name(cls._meta.db_table)}
for node_data in cursor.fetchall():
vals = [node_data[2], node_data[0]]
cursor.execute(sql, vals) | python | {
"resource": ""
} |
q36236 | MP_Node._get_path | train | def _get_path(cls, path, depth, newstep):
"""
Builds a path given some values
:param path: the base path
:param depth: the depth of the node
:param newstep: the value (integer) of the new step
"""
parentpath = cls._get_basepath(path, depth - 1)
key = cls._int2str(newstep)
return '{0}{1}{2}'.format(
parentpath,
cls.alphabet[0] * (cls.steplen - len(key)),
key
) | python | {
"resource": ""
} |
q36237 | int2str | train | def int2str(num, radix=10, alphabet=BASE85):
"""helper function for quick base conversions from integers to strings"""
return NumConv(radix, alphabet).int2str(num) | python | {
"resource": ""
} |
q36238 | str2int | train | def str2int(num, radix=10, alphabet=BASE85):
"""helper function for quick base conversions from strings to integers"""
return NumConv(radix, alphabet).str2int(num) | python | {
"resource": ""
} |
q36239 | NumConv.int2str | train | def int2str(self, num):
"""Converts an integer into a string.
:param num: A numeric value to be converted to another base as a
string.
:rtype: string
:raise TypeError: when *num* isn't an integer
:raise ValueError: when *num* isn't positive
"""
if int(num) != num:
raise TypeError('number must be an integer')
if num < 0:
raise ValueError('number must be positive')
radix, alphabet = self.radix, self.alphabet
if radix in (8, 10, 16) and \
alphabet[:radix].lower() == BASE85[:radix].lower():
return ({8: '%o', 10: '%d', 16: '%x'}[radix] % num).upper()
ret = ''
while True:
ret = alphabet[num % radix] + ret
if num < radix:
break
num //= radix
return ret | python | {
"resource": ""
} |
q36240 | NumConv.str2int | train | def str2int(self, num):
"""Converts a string into an integer.
If possible, the built-in python conversion will be used for speed
purposes.
:param num: A string that will be converted to an integer.
:rtype: integer
:raise ValueError: when *num* is invalid
"""
radix, alphabet = self.radix, self.alphabet
if radix <= 36 and alphabet[:radix].lower() == BASE85[:radix].lower():
return int(num, radix)
ret = 0
lalphabet = alphabet[:radix]
for char in num:
if char not in lalphabet:
raise ValueError("invalid literal for radix2int() with radix "
"%d: '%s'" % (radix, num))
ret = ret * radix + self.cached_map[char]
return ret | python | {
"resource": ""
} |
q36241 | AL_NodeManager.get_queryset | train | def get_queryset(self):
"""Sets the custom queryset as the default."""
if self.model.node_order_by:
order_by = ['parent'] + list(self.model.node_order_by)
else:
order_by = ['parent', 'sib_order']
return super(AL_NodeManager, self).get_queryset().order_by(*order_by) | python | {
"resource": ""
} |
q36242 | JSONFieldBase.pre_init | train | def pre_init(self, value, obj):
"""Convert a string value to JSON only if it needs to be deserialized.
SubfieldBase metaclass has been modified to call this method instead of
to_python so that we can check the obj state and determine if it needs to be
deserialized"""
try:
if obj._state.adding:
# Make sure the primary key actually exists on the object before
# checking if it's empty. This is a special case for South datamigrations
# see: https://github.com/bradjasper/django-jsonfield/issues/52
if getattr(obj, "pk", None) is not None:
if isinstance(value, six.string_types):
try:
return json.loads(value, **self.load_kwargs)
except ValueError:
raise ValidationError(_("Enter valid JSON"))
except AttributeError:
# south fake meta class doesn't create proper attributes
# see this:
# https://github.com/bradjasper/django-jsonfield/issues/52
pass
return value | python | {
"resource": ""
} |
q36243 | get_model | train | def get_model(method):
"""Convert string to model class."""
@wraps(method)
def wrapper(migrator, model, *args, **kwargs):
if isinstance(model, str):
return method(migrator, migrator.orm[model], *args, **kwargs)
return method(migrator, model, *args, **kwargs)
return wrapper | python | {
"resource": ""
} |
q36244 | SchemaMigrator.from_database | train | def from_database(cls, database):
"""Initialize migrator by db."""
if isinstance(database, PostgresqlDatabase):
return PostgresqlMigrator(database)
if isinstance(database, SqliteDatabase):
return SqliteMigrator(database)
if isinstance(database, MySQLDatabase):
return MySQLMigrator(database)
return super(SchemaMigrator, cls).from_database(database) | python | {
"resource": ""
} |
q36245 | SchemaMigrator.change_column | train | def change_column(self, table, column_name, field):
"""Change column."""
operations = [self.alter_change_column(table, column_name, field)]
if not field.null:
operations.extend([self.add_not_null(table, column_name)])
return operations | python | {
"resource": ""
} |
q36246 | SchemaMigrator.alter_add_column | train | def alter_add_column(self, table, column_name, field, **kwargs):
"""Fix fieldname for ForeignKeys."""
name = field.name
op = super(SchemaMigrator, self).alter_add_column(table, column_name, field, **kwargs)
if isinstance(field, pw.ForeignKeyField):
field.name = name
return op | python | {
"resource": ""
} |
q36247 | Migrator.run | train | def run(self):
"""Run operations."""
for op in self.ops:
if isinstance(op, Operation):
LOGGER.info("%s %s", op.method, op.args)
op.run()
else:
op()
self.clean() | python | {
"resource": ""
} |
q36248 | Migrator.python | train | def python(self, func, *args, **kwargs):
"""Run python code."""
self.ops.append(lambda: func(*args, **kwargs)) | python | {
"resource": ""
} |
q36249 | Migrator.sql | train | def sql(self, sql, *params):
"""Execure raw SQL."""
self.ops.append(self.migrator.sql(sql, *params)) | python | {
"resource": ""
} |
q36250 | Migrator.create_table | train | def create_table(self, model):
"""Create model and table in database.
>> migrator.create_table(model)
"""
self.orm[model._meta.table_name] = model
model._meta.database = self.database
self.ops.append(model.create_table)
return model | python | {
"resource": ""
} |
q36251 | Migrator.drop_table | train | def drop_table(self, model, cascade=True):
"""Drop model and table from database.
>> migrator.drop_table(model, cascade=True)
"""
del self.orm[model._meta.table_name]
self.ops.append(self.migrator.drop_table(model, cascade)) | python | {
"resource": ""
} |
q36252 | Migrator.add_columns | train | def add_columns(self, model, **fields):
"""Create new fields."""
for name, field in fields.items():
model._meta.add_field(name, field)
self.ops.append(self.migrator.add_column(
model._meta.table_name, field.column_name, field))
if field.unique:
self.ops.append(self.migrator.add_index(
model._meta.table_name, (field.column_name,), unique=True))
return model | python | {
"resource": ""
} |
q36253 | Migrator.change_columns | train | def change_columns(self, model, **fields):
"""Change fields."""
for name, field in fields.items():
old_field = model._meta.fields.get(name, field)
old_column_name = old_field and old_field.column_name
model._meta.add_field(name, field)
if isinstance(old_field, pw.ForeignKeyField):
self.ops.append(self.migrator.drop_foreign_key_constraint(
model._meta.table_name, old_column_name))
if old_column_name != field.column_name:
self.ops.append(
self.migrator.rename_column(
model._meta.table_name, old_column_name, field.column_name))
if isinstance(field, pw.ForeignKeyField):
on_delete = field.on_delete if field.on_delete else 'RESTRICT'
on_update = field.on_update if field.on_update else 'RESTRICT'
self.ops.append(self.migrator.add_foreign_key_constraint(
model._meta.table_name, field.column_name,
field.rel_model._meta.table_name, field.rel_field.name,
on_delete, on_update))
continue
self.ops.append(self.migrator.change_column(
model._meta.table_name, field.column_name, field))
if field.unique == old_field.unique:
continue
if field.unique:
index = (field.column_name,), field.unique
self.ops.append(self.migrator.add_index(model._meta.table_name, *index))
model._meta.indexes.append(index)
else:
index = (field.column_name,), old_field.unique
self.ops.append(self.migrator.drop_index(model._meta.table_name, *index))
model._meta.indexes.remove(index)
return model | python | {
"resource": ""
} |
q36254 | Migrator.drop_columns | train | def drop_columns(self, model, *names, **kwargs):
"""Remove fields from model."""
fields = [field for field in model._meta.fields.values() if field.name in names]
cascade = kwargs.pop('cascade', True)
for field in fields:
self.__del_field__(model, field)
if field.unique:
index_name = make_index_name(model._meta.table_name, [field.column_name])
self.ops.append(self.migrator.drop_index(model._meta.table_name, index_name))
self.ops.append(
self.migrator.drop_column(
model._meta.table_name, field.column_name, cascade=cascade))
return model | python | {
"resource": ""
} |
q36255 | Migrator.rename_column | train | def rename_column(self, model, old_name, new_name):
"""Rename field in model."""
field = model._meta.fields[old_name]
if isinstance(field, pw.ForeignKeyField):
old_name = field.column_name
self.__del_field__(model, field)
field.name = field.column_name = new_name
model._meta.add_field(new_name, field)
if isinstance(field, pw.ForeignKeyField):
field.column_name = new_name = field.column_name + '_id'
self.ops.append(self.migrator.rename_column(model._meta.table_name, old_name, new_name))
return model | python | {
"resource": ""
} |
q36256 | Migrator.rename_table | train | def rename_table(self, model, new_name):
"""Rename table in database."""
del self.orm[model._meta.table_name]
model._meta.table_name = new_name
self.orm[model._meta.table_name] = model
self.ops.append(self.migrator.rename_table(model._meta.table_name, new_name))
return model | python | {
"resource": ""
} |
q36257 | Migrator.add_index | train | def add_index(self, model, *columns, **kwargs):
"""Create indexes."""
unique = kwargs.pop('unique', False)
model._meta.indexes.append((columns, unique))
columns_ = []
for col in columns:
field = model._meta.fields.get(col)
if len(columns) == 1:
field.unique = unique
field.index = not unique
if isinstance(field, pw.ForeignKeyField):
col = col + '_id'
columns_.append(col)
self.ops.append(self.migrator.add_index(model._meta.table_name, columns_, unique=unique))
return model | python | {
"resource": ""
} |
q36258 | Migrator.drop_index | train | def drop_index(self, model, *columns):
"""Drop indexes."""
columns_ = []
for col in columns:
field = model._meta.fields.get(col)
if not field:
continue
if len(columns) == 1:
field.unique = field.index = False
if isinstance(field, pw.ForeignKeyField):
col = col + '_id'
columns_.append(col)
index_name = make_index_name(model._meta.table_name, columns_)
model._meta.indexes = [(cols, _) for (cols, _) in model._meta.indexes if columns != cols]
self.ops.append(self.migrator.drop_index(model._meta.table_name, index_name))
return model | python | {
"resource": ""
} |
q36259 | Migrator.add_not_null | train | def add_not_null(self, model, *names):
"""Add not null."""
for name in names:
field = model._meta.fields[name]
field.null = False
self.ops.append(self.migrator.add_not_null(model._meta.table_name, field.column_name))
return model | python | {
"resource": ""
} |
q36260 | Migrator.drop_not_null | train | def drop_not_null(self, model, *names):
"""Drop not null."""
for name in names:
field = model._meta.fields[name]
field.null = True
self.ops.append(self.migrator.drop_not_null(model._meta.table_name, field.column_name))
return model | python | {
"resource": ""
} |
q36261 | Migrator.add_default | train | def add_default(self, model, name, default):
"""Add default."""
field = model._meta.fields[name]
model._meta.defaults[field] = field.default = default
self.ops.append(self.migrator.apply_default(model._meta.table_name, name, field))
return model | python | {
"resource": ""
} |
q36262 | migrate | train | def migrate(name=None, database=None, directory=None, verbose=None, fake=False):
"""Migrate database."""
router = get_router(directory, database, verbose)
migrations = router.run(name, fake=fake)
if migrations:
click.echo('Migrations completed: %s' % ', '.join(migrations)) | python | {
"resource": ""
} |
q36263 | rollback | train | def rollback(name, database=None, directory=None, verbose=None):
"""Rollback a migration with given name."""
router = get_router(directory, database, verbose)
router.rollback(name) | python | {
"resource": ""
} |
q36264 | load_models | train | def load_models(module):
"""Load models from given module."""
modules = _import_submodules(module)
return {m for module in modules for m in filter(
_check_model, (getattr(module, name) for name in dir(module))
)} | python | {
"resource": ""
} |
q36265 | _check_model | train | def _check_model(obj, models=None):
"""Checks object if it's a peewee model and unique."""
return isinstance(obj, type) and issubclass(obj, pw.Model) and hasattr(obj, '_meta') | python | {
"resource": ""
} |
q36266 | compile_migrations | train | def compile_migrations(migrator, models, reverse=False):
"""Compile migrations for given models."""
source = migrator.orm.values()
if reverse:
source, models = models, source
migrations = diff_many(models, source, migrator, reverse=reverse)
if not migrations:
return False
migrations = NEWLINE + NEWLINE.join('\n\n'.join(migrations).split('\n'))
return CLEAN_RE.sub('\n', migrations) | python | {
"resource": ""
} |
q36267 | BaseRouter.model | train | def model(self):
"""Initialize and cache MigrationHistory model."""
MigrateHistory._meta.database = self.database
MigrateHistory._meta.table_name = self.migrate_table
MigrateHistory._meta.schema = self.schema
MigrateHistory.create_table(True)
return MigrateHistory | python | {
"resource": ""
} |
q36268 | BaseRouter.done | train | def done(self):
"""Scan migrations in database."""
return [mm.name for mm in self.model.select().order_by(self.model.id)] | python | {
"resource": ""
} |
q36269 | BaseRouter.diff | train | def diff(self):
"""Calculate difference between fs and db."""
done = set(self.done)
return [name for name in self.todo if name not in done] | python | {
"resource": ""
} |
q36270 | BaseRouter.migrator | train | def migrator(self):
"""Create migrator and setup it with fake migrations."""
migrator = Migrator(self.database)
for name in self.done:
self.run_one(name, migrator)
return migrator | python | {
"resource": ""
} |
q36271 | Router.todo | train | def todo(self):
"""Scan migrations in file system."""
if not os.path.exists(self.migrate_dir):
self.logger.warn('Migration directory: %s does not exist.', self.migrate_dir)
os.makedirs(self.migrate_dir)
return sorted(f[:-3] for f in os.listdir(self.migrate_dir) if self.filemask.match(f)) | python | {
"resource": ""
} |
q36272 | Router.read | train | def read(self, name):
"""Read migration from file."""
call_params = dict()
if os.name == 'nt' and sys.version_info >= (3, 0):
# if system is windows - force utf-8 encoding
call_params['encoding'] = 'utf-8'
with open(os.path.join(self.migrate_dir, name + '.py'), **call_params) as f:
code = f.read()
scope = {}
exec_in(code, scope)
return scope.get('migrate', VOID), scope.get('rollback', VOID) | python | {
"resource": ""
} |
q36273 | Router.clear | train | def clear(self):
"""Remove migrations from fs."""
super(Router, self).clear()
for name in self.todo:
filename = os.path.join(self.migrate_dir, name + '.py')
os.remove(filename) | python | {
"resource": ""
} |
q36274 | diff_one | train | def diff_one(model1, model2, **kwargs):
"""Find difference between given peewee models."""
changes = []
fields1 = model1._meta.fields
fields2 = model2._meta.fields
# Add fields
names1 = set(fields1) - set(fields2)
if names1:
fields = [fields1[name] for name in names1]
changes.append(create_fields(model1, *fields, **kwargs))
# Drop fields
names2 = set(fields2) - set(fields1)
if names2:
changes.append(drop_fields(model1, *names2))
# Change fields
fields_ = []
nulls_ = []
indexes_ = []
for name in set(fields1) - names1 - names2:
field1, field2 = fields1[name], fields2[name]
diff = compare_fields(field1, field2)
null = diff.pop('null', None)
index = diff.pop('index', None)
if diff:
fields_.append(field1)
if null is not None:
nulls_.append((name, null))
if index is not None:
indexes_.append((name, index[0], index[1]))
if fields_:
changes.append(change_fields(model1, *fields_, **kwargs))
for name, null in nulls_:
changes.append(change_not_null(model1, name, null))
for name, index, unique in indexes_:
if index is True or unique is True:
if fields2[name].unique or fields2[name].index:
changes.append(drop_index(model1, name))
changes.append(add_index(model1, name, unique))
else:
changes.append(drop_index(model1, name))
return changes | python | {
"resource": ""
} |
q36275 | diff_many | train | def diff_many(models1, models2, migrator=None, reverse=False):
"""Calculate changes for migrations from models2 to models1."""
models1 = pw.sort_models(models1)
models2 = pw.sort_models(models2)
if reverse:
models1 = reversed(models1)
models2 = reversed(models2)
models1 = OrderedDict([(m._meta.name, m) for m in models1])
models2 = OrderedDict([(m._meta.name, m) for m in models2])
changes = []
for name, model1 in models1.items():
if name not in models2:
continue
changes += diff_one(model1, models2[name], migrator=migrator)
# Add models
for name in [m for m in models1 if m not in models2]:
changes.append(create_model(models1[name], migrator=migrator))
# Remove models
for name in [m for m in models2 if m not in models1]:
changes.append(remove_model(models2[name]))
return changes | python | {
"resource": ""
} |
q36276 | watch | train | def watch(path: Union[Path, str], **kwargs):
"""
Watch a directory and yield a set of changes whenever files change in that directory or its subdirectories.
"""
loop = asyncio.new_event_loop()
try:
_awatch = awatch(path, loop=loop, **kwargs)
while True:
try:
yield loop.run_until_complete(_awatch.__anext__())
except StopAsyncIteration:
break
except KeyboardInterrupt:
logger.debug('KeyboardInterrupt, exiting')
finally:
loop.close() | python | {
"resource": ""
} |
q36277 | new_event | train | def new_event(event):
"""
Wrap a raw gRPC event in a friendlier containing class.
This picks the appropriate class from one of PutEvent or DeleteEvent and
returns a new instance.
"""
op_name = event.EventType.DESCRIPTOR.values_by_number[event.type].name
if op_name == 'PUT':
cls = PutEvent
elif op_name == 'DELETE':
cls = DeleteEvent
else:
raise Exception('Invalid op_name')
return cls(event) | python | {
"resource": ""
} |
q36278 | Lock.is_acquired | train | def is_acquired(self):
"""Check if this lock is currently acquired."""
uuid, _ = self.etcd_client.get(self.key)
if uuid is None:
return False
return uuid == self.uuid | python | {
"resource": ""
} |
q36279 | lease_to_id | train | def lease_to_id(lease):
"""Figure out if the argument is a Lease object, or the lease ID."""
lease_id = 0
if hasattr(lease, 'id'):
lease_id = lease.id
else:
try:
lease_id = int(lease)
except TypeError:
pass
return lease_id | python | {
"resource": ""
} |
q36280 | Etcd3Client.put | train | def put(self, key, value, lease=None, prev_kv=False):
"""
Save a value to etcd.
Example usage:
.. code-block:: python
>>> import etcd3
>>> etcd = etcd3.client()
>>> etcd.put('/thing/key', 'hello world')
:param key: key in etcd to set
:param value: value to set key to
:type value: bytes
:param lease: Lease to associate with this key.
:type lease: either :class:`.Lease`, or int (ID of lease)
:param prev_kv: return the previous key-value pair
:type prev_kv: bool
:returns: a response containing a header and the prev_kv
:rtype: :class:`.rpc_pb2.PutResponse`
"""
put_request = self._build_put_request(key, value, lease=lease,
prev_kv=prev_kv)
return self.kvstub.Put(
put_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
) | python | {
"resource": ""
} |
q36281 | Etcd3Client.put_if_not_exists | train | def put_if_not_exists(self, key, value, lease=None):
"""
Atomically puts a value only if the key previously had no value.
This is the etcdv3 equivalent to setting a key with the etcdv2
parameter prevExist=false.
:param key: key in etcd to put
:param value: value to be written to key
:type value: bytes
:param lease: Lease to associate with this key.
:type lease: either :class:`.Lease`, or int (ID of lease)
:returns: state of transaction, ``True`` if the put was successful,
``False`` otherwise
:rtype: bool
"""
status, _ = self.transaction(
compare=[self.transactions.create(key) == '0'],
success=[self.transactions.put(key, value, lease=lease)],
failure=[],
)
return status | python | {
"resource": ""
} |
q36282 | Etcd3Client.delete | train | def delete(self, key, prev_kv=False, return_response=False):
"""
Delete a single key in etcd.
:param key: key in etcd to delete
:param prev_kv: return the deleted key-value pair
:type prev_kv: bool
:param return_response: return the full response
:type return_response: bool
:returns: True if the key has been deleted when
``return_response`` is False and a response containing
a header, the number of deleted keys and prev_kvs when
``return_response`` is True
"""
delete_request = self._build_delete_request(key, prev_kv=prev_kv)
delete_response = self.kvstub.DeleteRange(
delete_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
)
if return_response:
return delete_response
return delete_response.deleted >= 1 | python | {
"resource": ""
} |
q36283 | Etcd3Client.status | train | def status(self):
"""Get the status of the responding member."""
status_request = etcdrpc.StatusRequest()
status_response = self.maintenancestub.Status(
status_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
)
for m in self.members:
if m.id == status_response.leader:
leader = m
break
else:
# raise exception?
leader = None
return Status(status_response.version,
status_response.dbSize,
leader,
status_response.raftIndex,
status_response.raftTerm) | python | {
"resource": ""
} |
q36284 | Etcd3Client.add_watch_callback | train | def add_watch_callback(self, *args, **kwargs):
"""
Watch a key or range of keys and call a callback on every event.
If timeout was declared during the client initialization and
the watch cannot be created during that time the method raises
a ``WatchTimedOut`` exception.
:param key: key to watch
:param callback: callback function
:returns: watch_id. Later it could be used for cancelling watch.
"""
try:
return self.watcher.add_callback(*args, **kwargs)
except queue.Empty:
raise exceptions.WatchTimedOut() | python | {
"resource": ""
} |
q36285 | Etcd3Client.watch_prefix | train | def watch_prefix(self, key_prefix, **kwargs):
"""Watches a range of keys with a prefix."""
kwargs['range_end'] = \
utils.increment_last_byte(utils.to_bytes(key_prefix))
return self.watch(key_prefix, **kwargs) | python | {
"resource": ""
} |
q36286 | Etcd3Client.watch_prefix_once | train | def watch_prefix_once(self, key_prefix, timeout=None, **kwargs):
"""
Watches a range of keys with a prefix and stops after the first event.
If the timeout was specified and event didn't arrived method
will raise ``WatchTimedOut`` exception.
"""
kwargs['range_end'] = \
utils.increment_last_byte(utils.to_bytes(key_prefix))
return self.watch_once(key_prefix, timeout=timeout, **kwargs) | python | {
"resource": ""
} |
q36287 | Etcd3Client._ops_to_requests | train | def _ops_to_requests(self, ops):
"""
Return a list of grpc requests.
Returns list from an input list of etcd3.transactions.{Put, Get,
Delete, Txn} objects.
"""
request_ops = []
for op in ops:
if isinstance(op, transactions.Put):
request = self._build_put_request(op.key, op.value,
op.lease, op.prev_kv)
request_op = etcdrpc.RequestOp(request_put=request)
request_ops.append(request_op)
elif isinstance(op, transactions.Get):
request = self._build_get_range_request(op.key, op.range_end)
request_op = etcdrpc.RequestOp(request_range=request)
request_ops.append(request_op)
elif isinstance(op, transactions.Delete):
request = self._build_delete_request(op.key, op.range_end,
op.prev_kv)
request_op = etcdrpc.RequestOp(request_delete_range=request)
request_ops.append(request_op)
elif isinstance(op, transactions.Txn):
compare = [c.build_message() for c in op.compare]
success_ops = self._ops_to_requests(op.success)
failure_ops = self._ops_to_requests(op.failure)
request = etcdrpc.TxnRequest(compare=compare,
success=success_ops,
failure=failure_ops)
request_op = etcdrpc.RequestOp(request_txn=request)
request_ops.append(request_op)
else:
raise Exception(
'Unknown request class {}'.format(op.__class__))
return request_ops | python | {
"resource": ""
} |
q36288 | Etcd3Client.transaction | train | def transaction(self, compare, success=None, failure=None):
"""
Perform a transaction.
Example usage:
.. code-block:: python
etcd.transaction(
compare=[
etcd.transactions.value('/doot/testing') == 'doot',
etcd.transactions.version('/doot/testing') > 0,
],
success=[
etcd.transactions.put('/doot/testing', 'success'),
],
failure=[
etcd.transactions.put('/doot/testing', 'failure'),
]
)
:param compare: A list of comparisons to make
:param success: A list of operations to perform if all the comparisons
are true
:param failure: A list of operations to perform if any of the
comparisons are false
:return: A tuple of (operation status, responses)
"""
compare = [c.build_message() for c in compare]
success_ops = self._ops_to_requests(success)
failure_ops = self._ops_to_requests(failure)
transaction_request = etcdrpc.TxnRequest(compare=compare,
success=success_ops,
failure=failure_ops)
txn_response = self.kvstub.Txn(
transaction_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
)
responses = []
for response in txn_response.responses:
response_type = response.WhichOneof('response')
if response_type in ['response_put', 'response_delete_range',
'response_txn']:
responses.append(response)
elif response_type == 'response_range':
range_kvs = []
for kv in response.response_range.kvs:
range_kvs.append((kv.value,
KVMetadata(kv, txn_response.header)))
responses.append(range_kvs)
return txn_response.succeeded, responses | python | {
"resource": ""
} |
q36289 | Etcd3Client.lease | train | def lease(self, ttl, lease_id=None):
"""
Create a new lease.
All keys attached to this lease will be expired and deleted if the
lease expires. A lease can be sent keep alive messages to refresh the
ttl.
:param ttl: Requested time to live
:param lease_id: Requested ID for the lease
:returns: new lease
:rtype: :class:`.Lease`
"""
lease_grant_request = etcdrpc.LeaseGrantRequest(TTL=ttl, ID=lease_id)
lease_grant_response = self.leasestub.LeaseGrant(
lease_grant_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
)
return leases.Lease(lease_id=lease_grant_response.ID,
ttl=lease_grant_response.TTL,
etcd_client=self) | python | {
"resource": ""
} |
q36290 | Etcd3Client.revoke_lease | train | def revoke_lease(self, lease_id):
"""
Revoke a lease.
:param lease_id: ID of the lease to revoke.
"""
lease_revoke_request = etcdrpc.LeaseRevokeRequest(ID=lease_id)
self.leasestub.LeaseRevoke(
lease_revoke_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
) | python | {
"resource": ""
} |
q36291 | Etcd3Client.lock | train | def lock(self, name, ttl=60):
"""
Create a new lock.
:param name: name of the lock
:type name: string or bytes
:param ttl: length of time for the lock to live for in seconds. The
lock will be released after this time elapses, unless
refreshed
:type ttl: int
:returns: new lock
:rtype: :class:`.Lock`
"""
return locks.Lock(name, ttl=ttl, etcd_client=self) | python | {
"resource": ""
} |
q36292 | Etcd3Client.add_member | train | def add_member(self, urls):
"""
Add a member into the cluster.
:returns: new member
:rtype: :class:`.Member`
"""
member_add_request = etcdrpc.MemberAddRequest(peerURLs=urls)
member_add_response = self.clusterstub.MemberAdd(
member_add_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
)
member = member_add_response.member
return etcd3.members.Member(member.ID,
member.name,
member.peerURLs,
member.clientURLs,
etcd_client=self) | python | {
"resource": ""
} |
q36293 | Etcd3Client.remove_member | train | def remove_member(self, member_id):
"""
Remove an existing member from the cluster.
:param member_id: ID of the member to remove
"""
member_rm_request = etcdrpc.MemberRemoveRequest(ID=member_id)
self.clusterstub.MemberRemove(
member_rm_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
) | python | {
"resource": ""
} |
q36294 | Etcd3Client.update_member | train | def update_member(self, member_id, peer_urls):
"""
Update the configuration of an existing member in the cluster.
:param member_id: ID of the member to update
:param peer_urls: new list of peer urls the member will use to
communicate with the cluster
"""
member_update_request = etcdrpc.MemberUpdateRequest(ID=member_id,
peerURLs=peer_urls)
self.clusterstub.MemberUpdate(
member_update_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
) | python | {
"resource": ""
} |
q36295 | Etcd3Client.members | train | def members(self):
"""
List of all members associated with the cluster.
:type: sequence of :class:`.Member`
"""
member_list_request = etcdrpc.MemberListRequest()
member_list_response = self.clusterstub.MemberList(
member_list_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
)
for member in member_list_response.members:
yield etcd3.members.Member(member.ID,
member.name,
member.peerURLs,
member.clientURLs,
etcd_client=self) | python | {
"resource": ""
} |
q36296 | Etcd3Client.compact | train | def compact(self, revision, physical=False):
"""
Compact the event history in etcd up to a given revision.
All superseded keys with a revision less than the compaction revision
will be removed.
:param revision: revision for the compaction operation
:param physical: if set to True, the request will wait until the
compaction is physically applied to the local database
such that compacted entries are totally removed from
the backend database
"""
compact_request = etcdrpc.CompactionRequest(revision=revision,
physical=physical)
self.kvstub.Compact(
compact_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
) | python | {
"resource": ""
} |
q36297 | Etcd3Client.defragment | train | def defragment(self):
"""Defragment a member's backend database to recover storage space."""
defrag_request = etcdrpc.DefragmentRequest()
self.maintenancestub.Defragment(
defrag_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
) | python | {
"resource": ""
} |
q36298 | Etcd3Client.hash | train | def hash(self):
"""
Return the hash of the local KV state.
:returns: kv state hash
:rtype: int
"""
hash_request = etcdrpc.HashRequest()
return self.maintenancestub.Hash(hash_request).hash | python | {
"resource": ""
} |
q36299 | Etcd3Client.create_alarm | train | def create_alarm(self, member_id=0):
"""Create an alarm.
If no member id is given, the alarm is activated for all the
members of the cluster. Only the `no space` alarm can be raised.
:param member_id: The cluster member id to create an alarm to.
If 0, the alarm is created for all the members
of the cluster.
:returns: list of :class:`.Alarm`
"""
alarm_request = self._build_alarm_request('activate',
member_id,
'no space')
alarm_response = self.maintenancestub.Alarm(
alarm_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
)
return [Alarm(alarm.alarm, alarm.memberID)
for alarm in alarm_response.alarms] | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.