id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
20,500
|
pandas-dev/pandas
|
pandas/core/strings.py
|
str_contains
|
def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Test if pattern or regex is contained within a string of a Series or Index.
Return boolean Series or Index based on whether a given pattern or regex is
contained within a string of a Series or Index.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
Series or Index of boolean values
A Series or Index of boolean values indicating whether the
given pattern is contained within the string of each element
of the Series or Index.
See Also
--------
match : Analogous, but stricter, relying on re.match instead of re.search.
Series.str.startswith : Test if the start of each string element matches a
pattern.
Series.str.endswith : Same as startswith, but tests the end of string.
Examples
--------
Returning a Series of booleans using only a literal pattern.
>>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])
>>> s1.str.contains('og', regex=False)
0 False
1 True
2 False
3 False
4 NaN
dtype: object
Returning an Index of booleans using only a literal pattern.
>>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.NaN])
>>> ind.str.contains('23', regex=False)
Index([False, False, False, True, nan], dtype='object')
Specifying case sensitivity using `case`.
>>> s1.str.contains('oG', case=True, regex=True)
0 False
1 False
2 False
3 False
4 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN` replaces NaN values
with `False`. If Series or Index does not contain NaN values
the resultant dtype will be `bool`, otherwise, an `object` dtype.
>>> s1.str.contains('og', na=False, regex=True)
0 False
1 True
2 False
3 False
4 False
dtype: bool
Returning 'house' or 'dog' when either expression occurs in a string.
>>> s1.str.contains('house|dog', regex=True)
0 False
1 True
2 True
3 False
4 NaN
dtype: object
Ignoring case sensitivity using `flags` with regex.
>>> import re
>>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True)
0 False
1 False
2 True
3 False
4 NaN
dtype: object
Returning any digit using regular expression.
>>> s1.str.contains('\\d', regex=True)
0 False
1 False
2 False
3 True
4 NaN
dtype: object
Ensure `pat` is a not a literal pattern when `regex` is set to True.
Note in the following example one might expect only `s2[1]` and `s2[3]` to
return `True`. However, '.0' as a regex matches any character
followed by a 0.
>>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
"""
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
" groups, use str.extract.", UserWarning,
stacklevel=3)
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x
uppered = _na_map(lambda x: x.upper(), arr)
return _na_map(f, uppered, na, dtype=bool)
return _na_map(f, arr, na, dtype=bool)
|
python
|
def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Test if pattern or regex is contained within a string of a Series or Index.
Return boolean Series or Index based on whether a given pattern or regex is
contained within a string of a Series or Index.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
Series or Index of boolean values
A Series or Index of boolean values indicating whether the
given pattern is contained within the string of each element
of the Series or Index.
See Also
--------
match : Analogous, but stricter, relying on re.match instead of re.search.
Series.str.startswith : Test if the start of each string element matches a
pattern.
Series.str.endswith : Same as startswith, but tests the end of string.
Examples
--------
Returning a Series of booleans using only a literal pattern.
>>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])
>>> s1.str.contains('og', regex=False)
0 False
1 True
2 False
3 False
4 NaN
dtype: object
Returning an Index of booleans using only a literal pattern.
>>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.NaN])
>>> ind.str.contains('23', regex=False)
Index([False, False, False, True, nan], dtype='object')
Specifying case sensitivity using `case`.
>>> s1.str.contains('oG', case=True, regex=True)
0 False
1 False
2 False
3 False
4 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN` replaces NaN values
with `False`. If Series or Index does not contain NaN values
the resultant dtype will be `bool`, otherwise, an `object` dtype.
>>> s1.str.contains('og', na=False, regex=True)
0 False
1 True
2 False
3 False
4 False
dtype: bool
Returning 'house' or 'dog' when either expression occurs in a string.
>>> s1.str.contains('house|dog', regex=True)
0 False
1 True
2 True
3 False
4 NaN
dtype: object
Ignoring case sensitivity using `flags` with regex.
>>> import re
>>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True)
0 False
1 False
2 True
3 False
4 NaN
dtype: object
Returning any digit using regular expression.
>>> s1.str.contains('\\d', regex=True)
0 False
1 False
2 False
3 True
4 NaN
dtype: object
Ensure `pat` is a not a literal pattern when `regex` is set to True.
Note in the following example one might expect only `s2[1]` and `s2[3]` to
return `True`. However, '.0' as a regex matches any character
followed by a 0.
>>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
"""
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
" groups, use str.extract.", UserWarning,
stacklevel=3)
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x
uppered = _na_map(lambda x: x.upper(), arr)
return _na_map(f, uppered, na, dtype=bool)
return _na_map(f, arr, na, dtype=bool)
|
[
"def",
"str_contains",
"(",
"arr",
",",
"pat",
",",
"case",
"=",
"True",
",",
"flags",
"=",
"0",
",",
"na",
"=",
"np",
".",
"nan",
",",
"regex",
"=",
"True",
")",
":",
"if",
"regex",
":",
"if",
"not",
"case",
":",
"flags",
"|=",
"re",
".",
"IGNORECASE",
"regex",
"=",
"re",
".",
"compile",
"(",
"pat",
",",
"flags",
"=",
"flags",
")",
"if",
"regex",
".",
"groups",
">",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"This pattern has match groups. To actually get the\"",
"\" groups, use str.extract.\"",
",",
"UserWarning",
",",
"stacklevel",
"=",
"3",
")",
"f",
"=",
"lambda",
"x",
":",
"bool",
"(",
"regex",
".",
"search",
"(",
"x",
")",
")",
"else",
":",
"if",
"case",
":",
"f",
"=",
"lambda",
"x",
":",
"pat",
"in",
"x",
"else",
":",
"upper_pat",
"=",
"pat",
".",
"upper",
"(",
")",
"f",
"=",
"lambda",
"x",
":",
"upper_pat",
"in",
"x",
"uppered",
"=",
"_na_map",
"(",
"lambda",
"x",
":",
"x",
".",
"upper",
"(",
")",
",",
"arr",
")",
"return",
"_na_map",
"(",
"f",
",",
"uppered",
",",
"na",
",",
"dtype",
"=",
"bool",
")",
"return",
"_na_map",
"(",
"f",
",",
"arr",
",",
"na",
",",
"dtype",
"=",
"bool",
")"
] |
Test if pattern or regex is contained within a string of a Series or Index.
Return boolean Series or Index based on whether a given pattern or regex is
contained within a string of a Series or Index.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
Series or Index of boolean values
A Series or Index of boolean values indicating whether the
given pattern is contained within the string of each element
of the Series or Index.
See Also
--------
match : Analogous, but stricter, relying on re.match instead of re.search.
Series.str.startswith : Test if the start of each string element matches a
pattern.
Series.str.endswith : Same as startswith, but tests the end of string.
Examples
--------
Returning a Series of booleans using only a literal pattern.
>>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])
>>> s1.str.contains('og', regex=False)
0 False
1 True
2 False
3 False
4 NaN
dtype: object
Returning an Index of booleans using only a literal pattern.
>>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.NaN])
>>> ind.str.contains('23', regex=False)
Index([False, False, False, True, nan], dtype='object')
Specifying case sensitivity using `case`.
>>> s1.str.contains('oG', case=True, regex=True)
0 False
1 False
2 False
3 False
4 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN` replaces NaN values
with `False`. If Series or Index does not contain NaN values
the resultant dtype will be `bool`, otherwise, an `object` dtype.
>>> s1.str.contains('og', na=False, regex=True)
0 False
1 True
2 False
3 False
4 False
dtype: bool
Returning 'house' or 'dog' when either expression occurs in a string.
>>> s1.str.contains('house|dog', regex=True)
0 False
1 True
2 True
3 False
4 NaN
dtype: object
Ignoring case sensitivity using `flags` with regex.
>>> import re
>>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True)
0 False
1 False
2 True
3 False
4 NaN
dtype: object
Returning any digit using regular expression.
>>> s1.str.contains('\\d', regex=True)
0 False
1 False
2 False
3 True
4 NaN
dtype: object
Ensure `pat` is a not a literal pattern when `regex` is set to True.
Note in the following example one might expect only `s2[1]` and `s2[3]` to
return `True`. However, '.0' as a regex matches any character
followed by a 0.
>>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
|
[
"Test",
"if",
"pattern",
"or",
"regex",
"is",
"contained",
"within",
"a",
"string",
"of",
"a",
"Series",
"or",
"Index",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L167-L310
|
20,501
|
pandas-dev/pandas
|
pandas/core/strings.py
|
str_startswith
|
def str_startswith(arr, pat, na=np.nan):
"""
Test if the start of each string element matches a pattern.
Equivalent to :meth:`str.startswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the start of each string element.
See Also
--------
str.startswith : Python standard library string method.
Series.str.endswith : Same as startswith, but tests the end of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'Bear', 'cat', np.nan])
>>> s
0 bat
1 Bear
2 cat
3 NaN
dtype: object
>>> s.str.startswith('b')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.startswith('b', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na, dtype=bool)
|
python
|
def str_startswith(arr, pat, na=np.nan):
"""
Test if the start of each string element matches a pattern.
Equivalent to :meth:`str.startswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the start of each string element.
See Also
--------
str.startswith : Python standard library string method.
Series.str.endswith : Same as startswith, but tests the end of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'Bear', 'cat', np.nan])
>>> s
0 bat
1 Bear
2 cat
3 NaN
dtype: object
>>> s.str.startswith('b')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.startswith('b', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na, dtype=bool)
|
[
"def",
"str_startswith",
"(",
"arr",
",",
"pat",
",",
"na",
"=",
"np",
".",
"nan",
")",
":",
"f",
"=",
"lambda",
"x",
":",
"x",
".",
"startswith",
"(",
"pat",
")",
"return",
"_na_map",
"(",
"f",
",",
"arr",
",",
"na",
",",
"dtype",
"=",
"bool",
")"
] |
Test if the start of each string element matches a pattern.
Equivalent to :meth:`str.startswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the start of each string element.
See Also
--------
str.startswith : Python standard library string method.
Series.str.endswith : Same as startswith, but tests the end of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'Bear', 'cat', np.nan])
>>> s
0 bat
1 Bear
2 cat
3 NaN
dtype: object
>>> s.str.startswith('b')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.startswith('b', na=False)
0 True
1 False
2 False
3 False
dtype: bool
|
[
"Test",
"if",
"the",
"start",
"of",
"each",
"string",
"element",
"matches",
"a",
"pattern",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L313-L365
|
20,502
|
pandas-dev/pandas
|
pandas/core/strings.py
|
str_endswith
|
def str_endswith(arr, pat, na=np.nan):
"""
Test if the end of each string element matches a pattern.
Equivalent to :meth:`str.endswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
See Also
--------
str.endswith : Python standard library string method.
Series.str.startswith : Same as endswith, but tests the start of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'bear', 'caT', np.nan])
>>> s
0 bat
1 bear
2 caT
3 NaN
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.endswith('t', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool)
|
python
|
def str_endswith(arr, pat, na=np.nan):
"""
Test if the end of each string element matches a pattern.
Equivalent to :meth:`str.endswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
See Also
--------
str.endswith : Python standard library string method.
Series.str.startswith : Same as endswith, but tests the start of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'bear', 'caT', np.nan])
>>> s
0 bat
1 bear
2 caT
3 NaN
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.endswith('t', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool)
|
[
"def",
"str_endswith",
"(",
"arr",
",",
"pat",
",",
"na",
"=",
"np",
".",
"nan",
")",
":",
"f",
"=",
"lambda",
"x",
":",
"x",
".",
"endswith",
"(",
"pat",
")",
"return",
"_na_map",
"(",
"f",
",",
"arr",
",",
"na",
",",
"dtype",
"=",
"bool",
")"
] |
Test if the end of each string element matches a pattern.
Equivalent to :meth:`str.endswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
See Also
--------
str.endswith : Python standard library string method.
Series.str.startswith : Same as endswith, but tests the start of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'bear', 'caT', np.nan])
>>> s
0 bat
1 bear
2 caT
3 NaN
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.endswith('t', na=False)
0 True
1 False
2 False
3 False
dtype: bool
|
[
"Test",
"if",
"the",
"end",
"of",
"each",
"string",
"element",
"matches",
"a",
"pattern",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L368-L420
|
20,503
|
pandas-dev/pandas
|
pandas/core/strings.py
|
str_repeat
|
def str_repeat(arr, repeats):
"""
Duplicate each string in the Series or Index.
Parameters
----------
repeats : int or sequence of int
Same value for all (int) or different value per (sequence).
Returns
-------
Series or Index of object
Series or Index of repeated string objects specified by
input parameter repeats.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
Single int repeats string in Series
>>> s.str.repeat(repeats=2)
0 aa
1 bb
2 cc
dtype: object
Sequence of int repeats corresponding string in Series
>>> s.str.repeat(repeats=[1, 2, 3])
0 a
1 bb
2 ccc
dtype: object
"""
if is_scalar(repeats):
def scalar_rep(x):
try:
return bytes.__mul__(x, repeats)
except TypeError:
return str.__mul__(x, repeats)
return _na_map(scalar_rep, arr)
else:
def rep(x, r):
try:
return bytes.__mul__(x, r)
except TypeError:
return str.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = libops.vec_binop(com.values_from_object(arr), repeats, rep)
return result
|
python
|
def str_repeat(arr, repeats):
"""
Duplicate each string in the Series or Index.
Parameters
----------
repeats : int or sequence of int
Same value for all (int) or different value per (sequence).
Returns
-------
Series or Index of object
Series or Index of repeated string objects specified by
input parameter repeats.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
Single int repeats string in Series
>>> s.str.repeat(repeats=2)
0 aa
1 bb
2 cc
dtype: object
Sequence of int repeats corresponding string in Series
>>> s.str.repeat(repeats=[1, 2, 3])
0 a
1 bb
2 ccc
dtype: object
"""
if is_scalar(repeats):
def scalar_rep(x):
try:
return bytes.__mul__(x, repeats)
except TypeError:
return str.__mul__(x, repeats)
return _na_map(scalar_rep, arr)
else:
def rep(x, r):
try:
return bytes.__mul__(x, r)
except TypeError:
return str.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = libops.vec_binop(com.values_from_object(arr), repeats, rep)
return result
|
[
"def",
"str_repeat",
"(",
"arr",
",",
"repeats",
")",
":",
"if",
"is_scalar",
"(",
"repeats",
")",
":",
"def",
"scalar_rep",
"(",
"x",
")",
":",
"try",
":",
"return",
"bytes",
".",
"__mul__",
"(",
"x",
",",
"repeats",
")",
"except",
"TypeError",
":",
"return",
"str",
".",
"__mul__",
"(",
"x",
",",
"repeats",
")",
"return",
"_na_map",
"(",
"scalar_rep",
",",
"arr",
")",
"else",
":",
"def",
"rep",
"(",
"x",
",",
"r",
")",
":",
"try",
":",
"return",
"bytes",
".",
"__mul__",
"(",
"x",
",",
"r",
")",
"except",
"TypeError",
":",
"return",
"str",
".",
"__mul__",
"(",
"x",
",",
"r",
")",
"repeats",
"=",
"np",
".",
"asarray",
"(",
"repeats",
",",
"dtype",
"=",
"object",
")",
"result",
"=",
"libops",
".",
"vec_binop",
"(",
"com",
".",
"values_from_object",
"(",
"arr",
")",
",",
"repeats",
",",
"rep",
")",
"return",
"result"
] |
Duplicate each string in the Series or Index.
Parameters
----------
repeats : int or sequence of int
Same value for all (int) or different value per (sequence).
Returns
-------
Series or Index of object
Series or Index of repeated string objects specified by
input parameter repeats.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
Single int repeats string in Series
>>> s.str.repeat(repeats=2)
0 aa
1 bb
2 cc
dtype: object
Sequence of int repeats corresponding string in Series
>>> s.str.repeat(repeats=[1, 2, 3])
0 a
1 bb
2 ccc
dtype: object
|
[
"Duplicate",
"each",
"string",
"in",
"the",
"Series",
"or",
"Index",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L581-L639
|
20,504
|
pandas-dev/pandas
|
pandas/core/strings.py
|
str_match
|
def str_match(arr, pat, case=True, flags=0, na=np.nan):
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
Returns
-------
Series/array of boolean values
See Also
--------
contains : Analogous, but less strict, relying on re.search instead of
re.match.
extract : Extract matched groups.
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
dtype = bool
f = lambda x: bool(regex.match(x))
return _na_map(f, arr, na, dtype=dtype)
|
python
|
def str_match(arr, pat, case=True, flags=0, na=np.nan):
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
Returns
-------
Series/array of boolean values
See Also
--------
contains : Analogous, but less strict, relying on re.search instead of
re.match.
extract : Extract matched groups.
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
dtype = bool
f = lambda x: bool(regex.match(x))
return _na_map(f, arr, na, dtype=dtype)
|
[
"def",
"str_match",
"(",
"arr",
",",
"pat",
",",
"case",
"=",
"True",
",",
"flags",
"=",
"0",
",",
"na",
"=",
"np",
".",
"nan",
")",
":",
"if",
"not",
"case",
":",
"flags",
"|=",
"re",
".",
"IGNORECASE",
"regex",
"=",
"re",
".",
"compile",
"(",
"pat",
",",
"flags",
"=",
"flags",
")",
"dtype",
"=",
"bool",
"f",
"=",
"lambda",
"x",
":",
"bool",
"(",
"regex",
".",
"match",
"(",
"x",
")",
")",
"return",
"_na_map",
"(",
"f",
",",
"arr",
",",
"na",
",",
"dtype",
"=",
"dtype",
")"
] |
Determine if each string matches a regular expression.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
Returns
-------
Series/array of boolean values
See Also
--------
contains : Analogous, but less strict, relying on re.search instead of
re.match.
extract : Extract matched groups.
|
[
"Determine",
"if",
"each",
"string",
"matches",
"a",
"regular",
"expression",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L642-L675
|
20,505
|
pandas-dev/pandas
|
pandas/core/strings.py
|
_groups_or_na_fun
|
def _groups_or_na_fun(regex):
"""Used in both extract_noexpand and extract_frame"""
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
empty_row = [np.nan] * regex.groups
def f(x):
if not isinstance(x, str):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
return f
|
python
|
def _groups_or_na_fun(regex):
"""Used in both extract_noexpand and extract_frame"""
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
empty_row = [np.nan] * regex.groups
def f(x):
if not isinstance(x, str):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
return f
|
[
"def",
"_groups_or_na_fun",
"(",
"regex",
")",
":",
"if",
"regex",
".",
"groups",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"pattern contains no capture groups\"",
")",
"empty_row",
"=",
"[",
"np",
".",
"nan",
"]",
"*",
"regex",
".",
"groups",
"def",
"f",
"(",
"x",
")",
":",
"if",
"not",
"isinstance",
"(",
"x",
",",
"str",
")",
":",
"return",
"empty_row",
"m",
"=",
"regex",
".",
"search",
"(",
"x",
")",
"if",
"m",
":",
"return",
"[",
"np",
".",
"nan",
"if",
"item",
"is",
"None",
"else",
"item",
"for",
"item",
"in",
"m",
".",
"groups",
"(",
")",
"]",
"else",
":",
"return",
"empty_row",
"return",
"f"
] |
Used in both extract_noexpand and extract_frame
|
[
"Used",
"in",
"both",
"extract_noexpand",
"and",
"extract_frame"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L685-L699
|
20,506
|
pandas-dev/pandas
|
pandas/core/strings.py
|
str_extract
|
def str_extract(arr, pat, flags=0, expand=True):
r"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the
first match of regular expression `pat`.
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that
modify regular expression matching for things like case,
spaces, etc. For more details, see :mod:`re`.
expand : bool, default True
If True, return DataFrame with one column per capture group.
If False, return a Series/Index if there is one capture group
or DataFrame if there are multiple capture groups.
.. versionadded:: 0.18.0
Returns
-------
DataFrame or Series or Index
A DataFrame with one row for each subject string, and one
column for each group. Any capture group names in regular
expression pat will be used for column names; otherwise
capture group numbers will be used. The dtype of each result
column is always object, even when no match is found. If
``expand=False`` and pat has only one capture group, then
return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : Returns all matches (not just the first match).
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = pd.Series(['a1', 'b2', 'c3'])
>>> s.str.extract(r'([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract(r'([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract(r'[ab](\d)', expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r'[ab](\d)', expand=False)
0 1
1 2
2 NaN
dtype: object
"""
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
return _str_extract_frame(arr._orig, pat, flags=flags)
else:
result, name = _str_extract_noexpand(arr._parent, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand)
|
python
|
def str_extract(arr, pat, flags=0, expand=True):
r"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the
first match of regular expression `pat`.
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that
modify regular expression matching for things like case,
spaces, etc. For more details, see :mod:`re`.
expand : bool, default True
If True, return DataFrame with one column per capture group.
If False, return a Series/Index if there is one capture group
or DataFrame if there are multiple capture groups.
.. versionadded:: 0.18.0
Returns
-------
DataFrame or Series or Index
A DataFrame with one row for each subject string, and one
column for each group. Any capture group names in regular
expression pat will be used for column names; otherwise
capture group numbers will be used. The dtype of each result
column is always object, even when no match is found. If
``expand=False`` and pat has only one capture group, then
return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : Returns all matches (not just the first match).
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = pd.Series(['a1', 'b2', 'c3'])
>>> s.str.extract(r'([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract(r'([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract(r'[ab](\d)', expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r'[ab](\d)', expand=False)
0 1
1 2
2 NaN
dtype: object
"""
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
return _str_extract_frame(arr._orig, pat, flags=flags)
else:
result, name = _str_extract_noexpand(arr._parent, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand)
|
[
"def",
"str_extract",
"(",
"arr",
",",
"pat",
",",
"flags",
"=",
"0",
",",
"expand",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"expand",
",",
"bool",
")",
":",
"raise",
"ValueError",
"(",
"\"expand must be True or False\"",
")",
"if",
"expand",
":",
"return",
"_str_extract_frame",
"(",
"arr",
".",
"_orig",
",",
"pat",
",",
"flags",
"=",
"flags",
")",
"else",
":",
"result",
",",
"name",
"=",
"_str_extract_noexpand",
"(",
"arr",
".",
"_parent",
",",
"pat",
",",
"flags",
"=",
"flags",
")",
"return",
"arr",
".",
"_wrap_result",
"(",
"result",
",",
"name",
"=",
"name",
",",
"expand",
"=",
"expand",
")"
] |
r"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the
first match of regular expression `pat`.
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that
modify regular expression matching for things like case,
spaces, etc. For more details, see :mod:`re`.
expand : bool, default True
If True, return DataFrame with one column per capture group.
If False, return a Series/Index if there is one capture group
or DataFrame if there are multiple capture groups.
.. versionadded:: 0.18.0
Returns
-------
DataFrame or Series or Index
A DataFrame with one row for each subject string, and one
column for each group. Any capture group names in regular
expression pat will be used for column names; otherwise
capture group numbers will be used. The dtype of each result
column is always object, even when no match is found. If
``expand=False`` and pat has only one capture group, then
return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : Returns all matches (not just the first match).
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = pd.Series(['a1', 'b2', 'c3'])
>>> s.str.extract(r'([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract(r'([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract(r'[ab](\d)', expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r'[ab](\d)', expand=False)
0 1
1 2
2 NaN
dtype: object
|
[
"r",
"Extract",
"capture",
"groups",
"in",
"the",
"regex",
"pat",
"as",
"columns",
"in",
"a",
"DataFrame",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L762-L851
|
20,507
|
pandas-dev/pandas
|
pandas/core/strings.py
|
str_slice
|
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series or Index.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series or Index of object
Series or Index from sliced substring from original string object.
See Also
--------
Series.str.slice_replace : Replace a slice with a string.
Series.str.get : Return element at position.
Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i`
being the position.
Examples
--------
>>> s = pd.Series(["koala", "fox", "chameleon"])
>>> s
0 koala
1 fox
2 chameleon
dtype: object
>>> s.str.slice(start=1)
0 oala
1 ox
2 hameleon
dtype: object
>>> s.str.slice(stop=2)
0 ko
1 fo
2 ch
dtype: object
>>> s.str.slice(step=2)
0 kaa
1 fx
2 caeen
dtype: object
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 f
2 cm
dtype: object
Equivalent behaviour to:
>>> s.str[0:5:3]
0 kl
1 f
2 cm
dtype: object
"""
obj = slice(start, stop, step)
f = lambda x: x[obj]
return _na_map(f, arr)
|
python
|
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series or Index.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series or Index of object
Series or Index from sliced substring from original string object.
See Also
--------
Series.str.slice_replace : Replace a slice with a string.
Series.str.get : Return element at position.
Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i`
being the position.
Examples
--------
>>> s = pd.Series(["koala", "fox", "chameleon"])
>>> s
0 koala
1 fox
2 chameleon
dtype: object
>>> s.str.slice(start=1)
0 oala
1 ox
2 hameleon
dtype: object
>>> s.str.slice(stop=2)
0 ko
1 fo
2 ch
dtype: object
>>> s.str.slice(step=2)
0 kaa
1 fx
2 caeen
dtype: object
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 f
2 cm
dtype: object
Equivalent behaviour to:
>>> s.str[0:5:3]
0 kl
1 f
2 cm
dtype: object
"""
obj = slice(start, stop, step)
f = lambda x: x[obj]
return _na_map(f, arr)
|
[
"def",
"str_slice",
"(",
"arr",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"obj",
"=",
"slice",
"(",
"start",
",",
"stop",
",",
"step",
")",
"f",
"=",
"lambda",
"x",
":",
"x",
"[",
"obj",
"]",
"return",
"_na_map",
"(",
"f",
",",
"arr",
")"
] |
Slice substrings from each element in the Series or Index.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series or Index of object
Series or Index from sliced substring from original string object.
See Also
--------
Series.str.slice_replace : Replace a slice with a string.
Series.str.get : Return element at position.
Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i`
being the position.
Examples
--------
>>> s = pd.Series(["koala", "fox", "chameleon"])
>>> s
0 koala
1 fox
2 chameleon
dtype: object
>>> s.str.slice(start=1)
0 oala
1 ox
2 hameleon
dtype: object
>>> s.str.slice(stop=2)
0 ko
1 fo
2 ch
dtype: object
>>> s.str.slice(step=2)
0 kaa
1 fx
2 caeen
dtype: object
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 f
2 cm
dtype: object
Equivalent behaviour to:
>>> s.str[0:5:3]
0 kl
1 f
2 cm
dtype: object
|
[
"Slice",
"substrings",
"from",
"each",
"element",
"in",
"the",
"Series",
"or",
"Index",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L1345-L1413
|
20,508
|
pandas-dev/pandas
|
pandas/core/strings.py
|
str_slice_replace
|
def str_slice_replace(arr, start=None, stop=None, repl=None):
"""
Replace a positional slice of a string with another value.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified (None), the sliced region
is replaced with an empty string.
Returns
-------
Series or Index
Same type as the original object.
See Also
--------
Series.str.slice : Just slicing without replacement.
Examples
--------
>>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: object
Specify just `start`, meaning replace `start` until the end of the
string with `repl`.
>>> s.str.slice_replace(1, repl='X')
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: object
Specify just `stop`, meaning the start of the string to `stop` is replaced
with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl='X')
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: object
Specify `start` and `stop`, meaning the slice from `start` to `stop` is
replaced with `repl`. Everything before or after `start` and `stop` is
included as is.
>>> s.str.slice_replace(start=1, stop=3, repl='X')
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: object
"""
if repl is None:
repl = ''
def f(x):
if x[start:stop] == '':
local_stop = start
else:
local_stop = stop
y = ''
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return _na_map(f, arr)
|
python
|
def str_slice_replace(arr, start=None, stop=None, repl=None):
"""
Replace a positional slice of a string with another value.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified (None), the sliced region
is replaced with an empty string.
Returns
-------
Series or Index
Same type as the original object.
See Also
--------
Series.str.slice : Just slicing without replacement.
Examples
--------
>>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: object
Specify just `start`, meaning replace `start` until the end of the
string with `repl`.
>>> s.str.slice_replace(1, repl='X')
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: object
Specify just `stop`, meaning the start of the string to `stop` is replaced
with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl='X')
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: object
Specify `start` and `stop`, meaning the slice from `start` to `stop` is
replaced with `repl`. Everything before or after `start` and `stop` is
included as is.
>>> s.str.slice_replace(start=1, stop=3, repl='X')
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: object
"""
if repl is None:
repl = ''
def f(x):
if x[start:stop] == '':
local_stop = start
else:
local_stop = stop
y = ''
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return _na_map(f, arr)
|
[
"def",
"str_slice_replace",
"(",
"arr",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"repl",
"=",
"None",
")",
":",
"if",
"repl",
"is",
"None",
":",
"repl",
"=",
"''",
"def",
"f",
"(",
"x",
")",
":",
"if",
"x",
"[",
"start",
":",
"stop",
"]",
"==",
"''",
":",
"local_stop",
"=",
"start",
"else",
":",
"local_stop",
"=",
"stop",
"y",
"=",
"''",
"if",
"start",
"is",
"not",
"None",
":",
"y",
"+=",
"x",
"[",
":",
"start",
"]",
"y",
"+=",
"repl",
"if",
"stop",
"is",
"not",
"None",
":",
"y",
"+=",
"x",
"[",
"local_stop",
":",
"]",
"return",
"y",
"return",
"_na_map",
"(",
"f",
",",
"arr",
")"
] |
Replace a positional slice of a string with another value.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified (None), the sliced region
is replaced with an empty string.
Returns
-------
Series or Index
Same type as the original object.
See Also
--------
Series.str.slice : Just slicing without replacement.
Examples
--------
>>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: object
Specify just `start`, meaning replace `start` until the end of the
string with `repl`.
>>> s.str.slice_replace(1, repl='X')
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: object
Specify just `stop`, meaning the start of the string to `stop` is replaced
with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl='X')
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: object
Specify `start` and `stop`, meaning the slice from `start` to `stop` is
replaced with `repl`. Everything before or after `start` and `stop` is
included as is.
>>> s.str.slice_replace(start=1, stop=3, repl='X')
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: object
|
[
"Replace",
"a",
"positional",
"slice",
"of",
"a",
"string",
"with",
"another",
"value",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L1416-L1504
|
20,509
|
pandas-dev/pandas
|
pandas/core/strings.py
|
str_get
|
def str_get(arr, i):
"""
Extract element from each component at specified position.
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
Series or Index
Examples
--------
>>> s = pd.Series(["String",
... (1, 2, 3),
... ["a", "b", "c"],
... 123,
... -456,
... {1: "Hello", "2": "World"}])
>>> s
0 String
1 (1, 2, 3)
2 [a, b, c]
3 123
4 -456
5 {1: 'Hello', '2': 'World'}
dtype: object
>>> s.str.get(1)
0 t
1 2
2 b
3 NaN
4 NaN
5 Hello
dtype: object
>>> s.str.get(-1)
0 g
1 3
2 c
3 NaN
4 NaN
5 None
dtype: object
"""
def f(x):
if isinstance(x, dict):
return x.get(i)
elif len(x) > i >= -len(x):
return x[i]
return np.nan
return _na_map(f, arr)
|
python
|
def str_get(arr, i):
"""
Extract element from each component at specified position.
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
Series or Index
Examples
--------
>>> s = pd.Series(["String",
... (1, 2, 3),
... ["a", "b", "c"],
... 123,
... -456,
... {1: "Hello", "2": "World"}])
>>> s
0 String
1 (1, 2, 3)
2 [a, b, c]
3 123
4 -456
5 {1: 'Hello', '2': 'World'}
dtype: object
>>> s.str.get(1)
0 t
1 2
2 b
3 NaN
4 NaN
5 Hello
dtype: object
>>> s.str.get(-1)
0 g
1 3
2 c
3 NaN
4 NaN
5 None
dtype: object
"""
def f(x):
if isinstance(x, dict):
return x.get(i)
elif len(x) > i >= -len(x):
return x[i]
return np.nan
return _na_map(f, arr)
|
[
"def",
"str_get",
"(",
"arr",
",",
"i",
")",
":",
"def",
"f",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"dict",
")",
":",
"return",
"x",
".",
"get",
"(",
"i",
")",
"elif",
"len",
"(",
"x",
")",
">",
"i",
">=",
"-",
"len",
"(",
"x",
")",
":",
"return",
"x",
"[",
"i",
"]",
"return",
"np",
".",
"nan",
"return",
"_na_map",
"(",
"f",
",",
"arr",
")"
] |
Extract element from each component at specified position.
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
Series or Index
Examples
--------
>>> s = pd.Series(["String",
... (1, 2, 3),
... ["a", "b", "c"],
... 123,
... -456,
... {1: "Hello", "2": "World"}])
>>> s
0 String
1 (1, 2, 3)
2 [a, b, c]
3 123
4 -456
5 {1: 'Hello', '2': 'World'}
dtype: object
>>> s.str.get(1)
0 t
1 2
2 b
3 NaN
4 NaN
5 Hello
dtype: object
>>> s.str.get(-1)
0 g
1 3
2 c
3 NaN
4 NaN
5 None
dtype: object
|
[
"Extract",
"element",
"from",
"each",
"component",
"at",
"specified",
"position",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L1616-L1673
|
20,510
|
pandas-dev/pandas
|
pandas/core/groupby/base.py
|
GroupByMixin._dispatch
|
def _dispatch(name, *args, **kwargs):
"""
Dispatch to apply.
"""
def outer(self, *args, **kwargs):
def f(x):
x = self._shallow_copy(x, groupby=self._groupby)
return getattr(x, name)(*args, **kwargs)
return self._groupby.apply(f)
outer.__name__ = name
return outer
|
python
|
def _dispatch(name, *args, **kwargs):
"""
Dispatch to apply.
"""
def outer(self, *args, **kwargs):
def f(x):
x = self._shallow_copy(x, groupby=self._groupby)
return getattr(x, name)(*args, **kwargs)
return self._groupby.apply(f)
outer.__name__ = name
return outer
|
[
"def",
"_dispatch",
"(",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"outer",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"f",
"(",
"x",
")",
":",
"x",
"=",
"self",
".",
"_shallow_copy",
"(",
"x",
",",
"groupby",
"=",
"self",
".",
"_groupby",
")",
"return",
"getattr",
"(",
"x",
",",
"name",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"_groupby",
".",
"apply",
"(",
"f",
")",
"outer",
".",
"__name__",
"=",
"name",
"return",
"outer"
] |
Dispatch to apply.
|
[
"Dispatch",
"to",
"apply",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/base.py#L20-L31
|
20,511
|
pandas-dev/pandas
|
pandas/compat/__init__.py
|
to_str
|
def to_str(s):
"""
Convert bytes and non-string into Python 3 str
"""
if isinstance(s, bytes):
s = s.decode('utf-8')
elif not isinstance(s, str):
s = str(s)
return s
|
python
|
def to_str(s):
"""
Convert bytes and non-string into Python 3 str
"""
if isinstance(s, bytes):
s = s.decode('utf-8')
elif not isinstance(s, str):
s = str(s)
return s
|
[
"def",
"to_str",
"(",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"bytes",
")",
":",
"s",
"=",
"s",
".",
"decode",
"(",
"'utf-8'",
")",
"elif",
"not",
"isinstance",
"(",
"s",
",",
"str",
")",
":",
"s",
"=",
"str",
"(",
"s",
")",
"return",
"s"
] |
Convert bytes and non-string into Python 3 str
|
[
"Convert",
"bytes",
"and",
"non",
"-",
"string",
"into",
"Python",
"3",
"str"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/compat/__init__.py#L44-L52
|
20,512
|
pandas-dev/pandas
|
pandas/core/apply.py
|
frame_apply
|
def frame_apply(obj, func, axis=0, broadcast=None,
raw=False, reduce=None, result_type=None,
ignore_failures=False,
args=None, kwds=None):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(obj, func, broadcast=broadcast,
raw=raw, reduce=reduce, result_type=result_type,
ignore_failures=ignore_failures,
args=args, kwds=kwds)
|
python
|
def frame_apply(obj, func, axis=0, broadcast=None,
raw=False, reduce=None, result_type=None,
ignore_failures=False,
args=None, kwds=None):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(obj, func, broadcast=broadcast,
raw=raw, reduce=reduce, result_type=result_type,
ignore_failures=ignore_failures,
args=args, kwds=kwds)
|
[
"def",
"frame_apply",
"(",
"obj",
",",
"func",
",",
"axis",
"=",
"0",
",",
"broadcast",
"=",
"None",
",",
"raw",
"=",
"False",
",",
"reduce",
"=",
"None",
",",
"result_type",
"=",
"None",
",",
"ignore_failures",
"=",
"False",
",",
"args",
"=",
"None",
",",
"kwds",
"=",
"None",
")",
":",
"axis",
"=",
"obj",
".",
"_get_axis_number",
"(",
"axis",
")",
"if",
"axis",
"==",
"0",
":",
"klass",
"=",
"FrameRowApply",
"elif",
"axis",
"==",
"1",
":",
"klass",
"=",
"FrameColumnApply",
"return",
"klass",
"(",
"obj",
",",
"func",
",",
"broadcast",
"=",
"broadcast",
",",
"raw",
"=",
"raw",
",",
"reduce",
"=",
"reduce",
",",
"result_type",
"=",
"result_type",
",",
"ignore_failures",
"=",
"ignore_failures",
",",
"args",
"=",
"args",
",",
"kwds",
"=",
"kwds",
")"
] |
construct and return a row or column based frame apply object
|
[
"construct",
"and",
"return",
"a",
"row",
"or",
"column",
"based",
"frame",
"apply",
"object"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/apply.py#L16-L31
|
20,513
|
pandas-dev/pandas
|
pandas/core/apply.py
|
FrameApply.get_result
|
def get_result(self):
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
return self.obj.aggregate(self.f, axis=self.axis,
*self.args, **self.kwds)
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, str):
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(self.obj, self.f)
sig = inspect.getfullargspec(func)
if 'axis' in sig.args:
self.kwds['axis'] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all='ignore'):
results = self.obj._data.apply('apply', func=self.f)
return self.obj._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
# broadcasting
if self.result_type == 'broadcast':
return self.apply_broadcast()
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
|
python
|
def get_result(self):
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
return self.obj.aggregate(self.f, axis=self.axis,
*self.args, **self.kwds)
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, str):
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(self.obj, self.f)
sig = inspect.getfullargspec(func)
if 'axis' in sig.args:
self.kwds['axis'] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all='ignore'):
results = self.obj._data.apply('apply', func=self.f)
return self.obj._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
# broadcasting
if self.result_type == 'broadcast':
return self.apply_broadcast()
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
|
[
"def",
"get_result",
"(",
"self",
")",
":",
"# dispatch to agg",
"if",
"is_list_like",
"(",
"self",
".",
"f",
")",
"or",
"is_dict_like",
"(",
"self",
".",
"f",
")",
":",
"return",
"self",
".",
"obj",
".",
"aggregate",
"(",
"self",
".",
"f",
",",
"axis",
"=",
"self",
".",
"axis",
",",
"*",
"self",
".",
"args",
",",
"*",
"*",
"self",
".",
"kwds",
")",
"# all empty",
"if",
"len",
"(",
"self",
".",
"columns",
")",
"==",
"0",
"and",
"len",
"(",
"self",
".",
"index",
")",
"==",
"0",
":",
"return",
"self",
".",
"apply_empty_result",
"(",
")",
"# string dispatch",
"if",
"isinstance",
"(",
"self",
".",
"f",
",",
"str",
")",
":",
"# Support for `frame.transform('method')`",
"# Some methods (shift, etc.) require the axis argument, others",
"# don't, so inspect and insert if necessary.",
"func",
"=",
"getattr",
"(",
"self",
".",
"obj",
",",
"self",
".",
"f",
")",
"sig",
"=",
"inspect",
".",
"getfullargspec",
"(",
"func",
")",
"if",
"'axis'",
"in",
"sig",
".",
"args",
":",
"self",
".",
"kwds",
"[",
"'axis'",
"]",
"=",
"self",
".",
"axis",
"return",
"func",
"(",
"*",
"self",
".",
"args",
",",
"*",
"*",
"self",
".",
"kwds",
")",
"# ufunc",
"elif",
"isinstance",
"(",
"self",
".",
"f",
",",
"np",
".",
"ufunc",
")",
":",
"with",
"np",
".",
"errstate",
"(",
"all",
"=",
"'ignore'",
")",
":",
"results",
"=",
"self",
".",
"obj",
".",
"_data",
".",
"apply",
"(",
"'apply'",
",",
"func",
"=",
"self",
".",
"f",
")",
"return",
"self",
".",
"obj",
".",
"_constructor",
"(",
"data",
"=",
"results",
",",
"index",
"=",
"self",
".",
"index",
",",
"columns",
"=",
"self",
".",
"columns",
",",
"copy",
"=",
"False",
")",
"# broadcasting",
"if",
"self",
".",
"result_type",
"==",
"'broadcast'",
":",
"return",
"self",
".",
"apply_broadcast",
"(",
")",
"# one axis empty",
"elif",
"not",
"all",
"(",
"self",
".",
"obj",
".",
"shape",
")",
":",
"return",
"self",
".",
"apply_empty_result",
"(",
")",
"# raw",
"elif",
"self",
".",
"raw",
"and",
"not",
"self",
".",
"obj",
".",
"_is_mixed_type",
":",
"return",
"self",
".",
"apply_raw",
"(",
")",
"return",
"self",
".",
"apply_standard",
"(",
")"
] |
compute the results
|
[
"compute",
"the",
"results"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/apply.py#L108-L150
|
20,514
|
pandas-dev/pandas
|
pandas/core/apply.py
|
FrameApply.apply_empty_result
|
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ['reduce', None]:
return self.obj.copy()
# we may need to infer
reduce = self.result_type == 'reduce'
from pandas import Series
if not reduce:
EMPTY_SERIES = Series([])
try:
r = self.f(EMPTY_SERIES, *self.args, **self.kwds)
reduce = not isinstance(r, Series)
except Exception:
pass
if reduce:
return self.obj._constructor_sliced(np.nan, index=self.agg_axis)
else:
return self.obj.copy()
|
python
|
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ['reduce', None]:
return self.obj.copy()
# we may need to infer
reduce = self.result_type == 'reduce'
from pandas import Series
if not reduce:
EMPTY_SERIES = Series([])
try:
r = self.f(EMPTY_SERIES, *self.args, **self.kwds)
reduce = not isinstance(r, Series)
except Exception:
pass
if reduce:
return self.obj._constructor_sliced(np.nan, index=self.agg_axis)
else:
return self.obj.copy()
|
[
"def",
"apply_empty_result",
"(",
"self",
")",
":",
"# we are not asked to reduce or infer reduction",
"# so just return a copy of the existing object",
"if",
"self",
".",
"result_type",
"not",
"in",
"[",
"'reduce'",
",",
"None",
"]",
":",
"return",
"self",
".",
"obj",
".",
"copy",
"(",
")",
"# we may need to infer",
"reduce",
"=",
"self",
".",
"result_type",
"==",
"'reduce'",
"from",
"pandas",
"import",
"Series",
"if",
"not",
"reduce",
":",
"EMPTY_SERIES",
"=",
"Series",
"(",
"[",
"]",
")",
"try",
":",
"r",
"=",
"self",
".",
"f",
"(",
"EMPTY_SERIES",
",",
"*",
"self",
".",
"args",
",",
"*",
"*",
"self",
".",
"kwds",
")",
"reduce",
"=",
"not",
"isinstance",
"(",
"r",
",",
"Series",
")",
"except",
"Exception",
":",
"pass",
"if",
"reduce",
":",
"return",
"self",
".",
"obj",
".",
"_constructor_sliced",
"(",
"np",
".",
"nan",
",",
"index",
"=",
"self",
".",
"agg_axis",
")",
"else",
":",
"return",
"self",
".",
"obj",
".",
"copy",
"(",
")"
] |
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
|
[
"we",
"have",
"an",
"empty",
"result",
";",
"at",
"least",
"1",
"axis",
"is",
"0"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/apply.py#L152-L181
|
20,515
|
pandas-dev/pandas
|
pandas/core/apply.py
|
FrameApply.apply_raw
|
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = reduction.reduce(self.values, self.f, axis=self.axis)
except Exception:
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result,
index=self.index,
columns=self.columns)
else:
return self.obj._constructor_sliced(result,
index=self.agg_axis)
|
python
|
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = reduction.reduce(self.values, self.f, axis=self.axis)
except Exception:
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result,
index=self.index,
columns=self.columns)
else:
return self.obj._constructor_sliced(result,
index=self.agg_axis)
|
[
"def",
"apply_raw",
"(",
"self",
")",
":",
"try",
":",
"result",
"=",
"reduction",
".",
"reduce",
"(",
"self",
".",
"values",
",",
"self",
".",
"f",
",",
"axis",
"=",
"self",
".",
"axis",
")",
"except",
"Exception",
":",
"result",
"=",
"np",
".",
"apply_along_axis",
"(",
"self",
".",
"f",
",",
"self",
".",
"axis",
",",
"self",
".",
"values",
")",
"# TODO: mixed type case",
"if",
"result",
".",
"ndim",
"==",
"2",
":",
"return",
"self",
".",
"obj",
".",
"_constructor",
"(",
"result",
",",
"index",
"=",
"self",
".",
"index",
",",
"columns",
"=",
"self",
".",
"columns",
")",
"else",
":",
"return",
"self",
".",
"obj",
".",
"_constructor_sliced",
"(",
"result",
",",
"index",
"=",
"self",
".",
"agg_axis",
")"
] |
apply to the values as a numpy array
|
[
"apply",
"to",
"the",
"values",
"as",
"a",
"numpy",
"array"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/apply.py#L183-L198
|
20,516
|
pandas-dev/pandas
|
pandas/core/apply.py
|
FrameRowApply.wrap_results_for_axis
|
def wrap_results_for_axis(self):
""" return the results for the rows """
results = self.results
result = self.obj._constructor(data=results)
if not isinstance(results[0], ABCSeries):
try:
result.index = self.res_columns
except ValueError:
pass
try:
result.columns = self.res_index
except ValueError:
pass
return result
|
python
|
def wrap_results_for_axis(self):
""" return the results for the rows """
results = self.results
result = self.obj._constructor(data=results)
if not isinstance(results[0], ABCSeries):
try:
result.index = self.res_columns
except ValueError:
pass
try:
result.columns = self.res_index
except ValueError:
pass
return result
|
[
"def",
"wrap_results_for_axis",
"(",
"self",
")",
":",
"results",
"=",
"self",
".",
"results",
"result",
"=",
"self",
".",
"obj",
".",
"_constructor",
"(",
"data",
"=",
"results",
")",
"if",
"not",
"isinstance",
"(",
"results",
"[",
"0",
"]",
",",
"ABCSeries",
")",
":",
"try",
":",
"result",
".",
"index",
"=",
"self",
".",
"res_columns",
"except",
"ValueError",
":",
"pass",
"try",
":",
"result",
".",
"columns",
"=",
"self",
".",
"res_index",
"except",
"ValueError",
":",
"pass",
"return",
"result"
] |
return the results for the rows
|
[
"return",
"the",
"results",
"for",
"the",
"rows"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/apply.py#L335-L352
|
20,517
|
pandas-dev/pandas
|
pandas/core/apply.py
|
FrameColumnApply.wrap_results_for_axis
|
def wrap_results_for_axis(self):
""" return the results for the columns """
results = self.results
# we have requested to expand
if self.result_type == 'expand':
result = self.infer_to_same_shape()
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
from pandas import Series
result = Series(results)
result.index = self.res_index
# we may want to infer results
else:
result = self.infer_to_same_shape()
return result
|
python
|
def wrap_results_for_axis(self):
""" return the results for the columns """
results = self.results
# we have requested to expand
if self.result_type == 'expand':
result = self.infer_to_same_shape()
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
from pandas import Series
result = Series(results)
result.index = self.res_index
# we may want to infer results
else:
result = self.infer_to_same_shape()
return result
|
[
"def",
"wrap_results_for_axis",
"(",
"self",
")",
":",
"results",
"=",
"self",
".",
"results",
"# we have requested to expand",
"if",
"self",
".",
"result_type",
"==",
"'expand'",
":",
"result",
"=",
"self",
".",
"infer_to_same_shape",
"(",
")",
"# we have a non-series and don't want inference",
"elif",
"not",
"isinstance",
"(",
"results",
"[",
"0",
"]",
",",
"ABCSeries",
")",
":",
"from",
"pandas",
"import",
"Series",
"result",
"=",
"Series",
"(",
"results",
")",
"result",
".",
"index",
"=",
"self",
".",
"res_index",
"# we may want to infer results",
"else",
":",
"result",
"=",
"self",
".",
"infer_to_same_shape",
"(",
")",
"return",
"result"
] |
return the results for the columns
|
[
"return",
"the",
"results",
"for",
"the",
"columns"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/apply.py#L377-L395
|
20,518
|
pandas-dev/pandas
|
pandas/core/apply.py
|
FrameColumnApply.infer_to_same_shape
|
def infer_to_same_shape(self):
""" infer the results to the same shape as the input object """
results = self.results
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = self.res_index
# infer dtypes
result = result.infer_objects()
return result
|
python
|
def infer_to_same_shape(self):
""" infer the results to the same shape as the input object """
results = self.results
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = self.res_index
# infer dtypes
result = result.infer_objects()
return result
|
[
"def",
"infer_to_same_shape",
"(",
"self",
")",
":",
"results",
"=",
"self",
".",
"results",
"result",
"=",
"self",
".",
"obj",
".",
"_constructor",
"(",
"data",
"=",
"results",
")",
"result",
"=",
"result",
".",
"T",
"# set the index",
"result",
".",
"index",
"=",
"self",
".",
"res_index",
"# infer dtypes",
"result",
"=",
"result",
".",
"infer_objects",
"(",
")",
"return",
"result"
] |
infer the results to the same shape as the input object
|
[
"infer",
"the",
"results",
"to",
"the",
"same",
"shape",
"as",
"the",
"input",
"object"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/apply.py#L397-L410
|
20,519
|
fastai/fastai
|
fastai/vision/models/xception.py
|
xception
|
def xception(c, k=8, n_middle=8):
"Preview version of Xception network. Not tested yet - use at own risk. No pretrained model yet."
layers = [
conv(3, k*4, 3, 2),
conv(k*4, k*8, 3),
ConvSkip(k*8, k*16, act=False),
ConvSkip(k*16, k*32),
ConvSkip(k*32, k*91),
]
for i in range(n_middle): layers.append(middle_flow(k*91))
layers += [
ConvSkip(k*91,k*128),
sep_conv(k*128,k*192,act=False),
sep_conv(k*192,k*256),
nn.ReLU(),
nn.AdaptiveAvgPool2d(1),
Flatten(),
nn.Linear(k*256,c)
]
return nn.Sequential(*layers)
|
python
|
def xception(c, k=8, n_middle=8):
"Preview version of Xception network. Not tested yet - use at own risk. No pretrained model yet."
layers = [
conv(3, k*4, 3, 2),
conv(k*4, k*8, 3),
ConvSkip(k*8, k*16, act=False),
ConvSkip(k*16, k*32),
ConvSkip(k*32, k*91),
]
for i in range(n_middle): layers.append(middle_flow(k*91))
layers += [
ConvSkip(k*91,k*128),
sep_conv(k*128,k*192,act=False),
sep_conv(k*192,k*256),
nn.ReLU(),
nn.AdaptiveAvgPool2d(1),
Flatten(),
nn.Linear(k*256,c)
]
return nn.Sequential(*layers)
|
[
"def",
"xception",
"(",
"c",
",",
"k",
"=",
"8",
",",
"n_middle",
"=",
"8",
")",
":",
"layers",
"=",
"[",
"conv",
"(",
"3",
",",
"k",
"*",
"4",
",",
"3",
",",
"2",
")",
",",
"conv",
"(",
"k",
"*",
"4",
",",
"k",
"*",
"8",
",",
"3",
")",
",",
"ConvSkip",
"(",
"k",
"*",
"8",
",",
"k",
"*",
"16",
",",
"act",
"=",
"False",
")",
",",
"ConvSkip",
"(",
"k",
"*",
"16",
",",
"k",
"*",
"32",
")",
",",
"ConvSkip",
"(",
"k",
"*",
"32",
",",
"k",
"*",
"91",
")",
",",
"]",
"for",
"i",
"in",
"range",
"(",
"n_middle",
")",
":",
"layers",
".",
"append",
"(",
"middle_flow",
"(",
"k",
"*",
"91",
")",
")",
"layers",
"+=",
"[",
"ConvSkip",
"(",
"k",
"*",
"91",
",",
"k",
"*",
"128",
")",
",",
"sep_conv",
"(",
"k",
"*",
"128",
",",
"k",
"*",
"192",
",",
"act",
"=",
"False",
")",
",",
"sep_conv",
"(",
"k",
"*",
"192",
",",
"k",
"*",
"256",
")",
",",
"nn",
".",
"ReLU",
"(",
")",
",",
"nn",
".",
"AdaptiveAvgPool2d",
"(",
"1",
")",
",",
"Flatten",
"(",
")",
",",
"nn",
".",
"Linear",
"(",
"k",
"*",
"256",
",",
"c",
")",
"]",
"return",
"nn",
".",
"Sequential",
"(",
"*",
"layers",
")"
] |
Preview version of Xception network. Not tested yet - use at own risk. No pretrained model yet.
|
[
"Preview",
"version",
"of",
"Xception",
"network",
".",
"Not",
"tested",
"yet",
"-",
"use",
"at",
"own",
"risk",
".",
"No",
"pretrained",
"model",
"yet",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/models/xception.py#L41-L60
|
20,520
|
fastai/fastai
|
old/fastai/nlp.py
|
LanguageModelData.get_model
|
def get_model(self, opt_fn, emb_sz, n_hid, n_layers, **kwargs):
""" Method returns a RNN_Learner object, that wraps an instance of the RNN_Encoder module.
Args:
opt_fn (Optimizer): the torch optimizer function to use
emb_sz (int): embedding size
n_hid (int): number of hidden inputs
n_layers (int): number of hidden layers
kwargs: other arguments
Returns:
An instance of the RNN_Learner class.
"""
m = get_language_model(self.nt, emb_sz, n_hid, n_layers, self.pad_idx, **kwargs)
model = SingleModel(to_gpu(m))
return RNN_Learner(self, model, opt_fn=opt_fn)
|
python
|
def get_model(self, opt_fn, emb_sz, n_hid, n_layers, **kwargs):
""" Method returns a RNN_Learner object, that wraps an instance of the RNN_Encoder module.
Args:
opt_fn (Optimizer): the torch optimizer function to use
emb_sz (int): embedding size
n_hid (int): number of hidden inputs
n_layers (int): number of hidden layers
kwargs: other arguments
Returns:
An instance of the RNN_Learner class.
"""
m = get_language_model(self.nt, emb_sz, n_hid, n_layers, self.pad_idx, **kwargs)
model = SingleModel(to_gpu(m))
return RNN_Learner(self, model, opt_fn=opt_fn)
|
[
"def",
"get_model",
"(",
"self",
",",
"opt_fn",
",",
"emb_sz",
",",
"n_hid",
",",
"n_layers",
",",
"*",
"*",
"kwargs",
")",
":",
"m",
"=",
"get_language_model",
"(",
"self",
".",
"nt",
",",
"emb_sz",
",",
"n_hid",
",",
"n_layers",
",",
"self",
".",
"pad_idx",
",",
"*",
"*",
"kwargs",
")",
"model",
"=",
"SingleModel",
"(",
"to_gpu",
"(",
"m",
")",
")",
"return",
"RNN_Learner",
"(",
"self",
",",
"model",
",",
"opt_fn",
"=",
"opt_fn",
")"
] |
Method returns a RNN_Learner object, that wraps an instance of the RNN_Encoder module.
Args:
opt_fn (Optimizer): the torch optimizer function to use
emb_sz (int): embedding size
n_hid (int): number of hidden inputs
n_layers (int): number of hidden layers
kwargs: other arguments
Returns:
An instance of the RNN_Learner class.
|
[
"Method",
"returns",
"a",
"RNN_Learner",
"object",
"that",
"wraps",
"an",
"instance",
"of",
"the",
"RNN_Encoder",
"module",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/old/fastai/nlp.py#L263-L279
|
20,521
|
fastai/fastai
|
old/fastai/nlp.py
|
LanguageModelData.from_text_files
|
def from_text_files(cls, path, field, train, validation, test=None, bs=64, bptt=70, **kwargs):
""" Method used to instantiate a LanguageModelData object that can be used for a
supported nlp task.
Args:
path (str): the absolute path in which temporary model data will be saved
field (Field): torchtext field
train (str): file location of the training data
validation (str): file location of the validation data
test (str): file location of the testing data
bs (int): batch size to use
bptt (int): back propagation through time hyper-parameter
kwargs: other arguments
Returns:
a LanguageModelData instance, which most importantly, provides us the datasets for training,
validation, and testing
Note:
The train, validation, and test path can be pointed to any file (or folder) that contains a valid
text corpus.
"""
trn_ds, val_ds, test_ds = ConcatTextDataset.splits(
path, text_field=field, train=train, validation=validation, test=test)
return cls(path, field, trn_ds, val_ds, test_ds, bs, bptt, **kwargs)
|
python
|
def from_text_files(cls, path, field, train, validation, test=None, bs=64, bptt=70, **kwargs):
""" Method used to instantiate a LanguageModelData object that can be used for a
supported nlp task.
Args:
path (str): the absolute path in which temporary model data will be saved
field (Field): torchtext field
train (str): file location of the training data
validation (str): file location of the validation data
test (str): file location of the testing data
bs (int): batch size to use
bptt (int): back propagation through time hyper-parameter
kwargs: other arguments
Returns:
a LanguageModelData instance, which most importantly, provides us the datasets for training,
validation, and testing
Note:
The train, validation, and test path can be pointed to any file (or folder) that contains a valid
text corpus.
"""
trn_ds, val_ds, test_ds = ConcatTextDataset.splits(
path, text_field=field, train=train, validation=validation, test=test)
return cls(path, field, trn_ds, val_ds, test_ds, bs, bptt, **kwargs)
|
[
"def",
"from_text_files",
"(",
"cls",
",",
"path",
",",
"field",
",",
"train",
",",
"validation",
",",
"test",
"=",
"None",
",",
"bs",
"=",
"64",
",",
"bptt",
"=",
"70",
",",
"*",
"*",
"kwargs",
")",
":",
"trn_ds",
",",
"val_ds",
",",
"test_ds",
"=",
"ConcatTextDataset",
".",
"splits",
"(",
"path",
",",
"text_field",
"=",
"field",
",",
"train",
"=",
"train",
",",
"validation",
"=",
"validation",
",",
"test",
"=",
"test",
")",
"return",
"cls",
"(",
"path",
",",
"field",
",",
"trn_ds",
",",
"val_ds",
",",
"test_ds",
",",
"bs",
",",
"bptt",
",",
"*",
"*",
"kwargs",
")"
] |
Method used to instantiate a LanguageModelData object that can be used for a
supported nlp task.
Args:
path (str): the absolute path in which temporary model data will be saved
field (Field): torchtext field
train (str): file location of the training data
validation (str): file location of the validation data
test (str): file location of the testing data
bs (int): batch size to use
bptt (int): back propagation through time hyper-parameter
kwargs: other arguments
Returns:
a LanguageModelData instance, which most importantly, provides us the datasets for training,
validation, and testing
Note:
The train, validation, and test path can be pointed to any file (or folder) that contains a valid
text corpus.
|
[
"Method",
"used",
"to",
"instantiate",
"a",
"LanguageModelData",
"object",
"that",
"can",
"be",
"used",
"for",
"a",
"supported",
"nlp",
"task",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/old/fastai/nlp.py#L288-L313
|
20,522
|
fastai/fastai
|
fastai/data_block.py
|
get_files
|
def get_files(path:PathOrStr, extensions:Collection[str]=None, recurse:bool=False,
include:Optional[Collection[str]]=None)->FilePathList:
"Return list of files in `path` that have a suffix in `extensions`; optionally `recurse`."
if recurse:
res = []
for i,(p,d,f) in enumerate(os.walk(path)):
# skip hidden dirs
if include is not None and i==0: d[:] = [o for o in d if o in include]
else: d[:] = [o for o in d if not o.startswith('.')]
res += _get_files(path, p, f, extensions)
return res
else:
f = [o.name for o in os.scandir(path) if o.is_file()]
return _get_files(path, path, f, extensions)
|
python
|
def get_files(path:PathOrStr, extensions:Collection[str]=None, recurse:bool=False,
include:Optional[Collection[str]]=None)->FilePathList:
"Return list of files in `path` that have a suffix in `extensions`; optionally `recurse`."
if recurse:
res = []
for i,(p,d,f) in enumerate(os.walk(path)):
# skip hidden dirs
if include is not None and i==0: d[:] = [o for o in d if o in include]
else: d[:] = [o for o in d if not o.startswith('.')]
res += _get_files(path, p, f, extensions)
return res
else:
f = [o.name for o in os.scandir(path) if o.is_file()]
return _get_files(path, path, f, extensions)
|
[
"def",
"get_files",
"(",
"path",
":",
"PathOrStr",
",",
"extensions",
":",
"Collection",
"[",
"str",
"]",
"=",
"None",
",",
"recurse",
":",
"bool",
"=",
"False",
",",
"include",
":",
"Optional",
"[",
"Collection",
"[",
"str",
"]",
"]",
"=",
"None",
")",
"->",
"FilePathList",
":",
"if",
"recurse",
":",
"res",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"p",
",",
"d",
",",
"f",
")",
"in",
"enumerate",
"(",
"os",
".",
"walk",
"(",
"path",
")",
")",
":",
"# skip hidden dirs",
"if",
"include",
"is",
"not",
"None",
"and",
"i",
"==",
"0",
":",
"d",
"[",
":",
"]",
"=",
"[",
"o",
"for",
"o",
"in",
"d",
"if",
"o",
"in",
"include",
"]",
"else",
":",
"d",
"[",
":",
"]",
"=",
"[",
"o",
"for",
"o",
"in",
"d",
"if",
"not",
"o",
".",
"startswith",
"(",
"'.'",
")",
"]",
"res",
"+=",
"_get_files",
"(",
"path",
",",
"p",
",",
"f",
",",
"extensions",
")",
"return",
"res",
"else",
":",
"f",
"=",
"[",
"o",
".",
"name",
"for",
"o",
"in",
"os",
".",
"scandir",
"(",
"path",
")",
"if",
"o",
".",
"is_file",
"(",
")",
"]",
"return",
"_get_files",
"(",
"path",
",",
"path",
",",
"f",
",",
"extensions",
")"
] |
Return list of files in `path` that have a suffix in `extensions`; optionally `recurse`.
|
[
"Return",
"list",
"of",
"files",
"in",
"path",
"that",
"have",
"a",
"suffix",
"in",
"extensions",
";",
"optionally",
"recurse",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L23-L36
|
20,523
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.process
|
def process(self, processor:PreProcessors=None):
"Apply `processor` or `self.processor` to `self`."
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor: p.process(self)
return self
|
python
|
def process(self, processor:PreProcessors=None):
"Apply `processor` or `self.processor` to `self`."
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor: p.process(self)
return self
|
[
"def",
"process",
"(",
"self",
",",
"processor",
":",
"PreProcessors",
"=",
"None",
")",
":",
"if",
"processor",
"is",
"not",
"None",
":",
"self",
".",
"processor",
"=",
"processor",
"self",
".",
"processor",
"=",
"listify",
"(",
"self",
".",
"processor",
")",
"for",
"p",
"in",
"self",
".",
"processor",
":",
"p",
".",
"process",
"(",
"self",
")",
"return",
"self"
] |
Apply `processor` or `self.processor` to `self`.
|
[
"Apply",
"processor",
"or",
"self",
".",
"processor",
"to",
"self",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L71-L76
|
20,524
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.process_one
|
def process_one(self, item:ItemBase, processor:PreProcessors=None):
"Apply `processor` or `self.processor` to `item`."
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor: item = p.process_one(item)
return item
|
python
|
def process_one(self, item:ItemBase, processor:PreProcessors=None):
"Apply `processor` or `self.processor` to `item`."
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor: item = p.process_one(item)
return item
|
[
"def",
"process_one",
"(",
"self",
",",
"item",
":",
"ItemBase",
",",
"processor",
":",
"PreProcessors",
"=",
"None",
")",
":",
"if",
"processor",
"is",
"not",
"None",
":",
"self",
".",
"processor",
"=",
"processor",
"self",
".",
"processor",
"=",
"listify",
"(",
"self",
".",
"processor",
")",
"for",
"p",
"in",
"self",
".",
"processor",
":",
"item",
"=",
"p",
".",
"process_one",
"(",
"item",
")",
"return",
"item"
] |
Apply `processor` or `self.processor` to `item`.
|
[
"Apply",
"processor",
"or",
"self",
".",
"processor",
"to",
"item",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L78-L83
|
20,525
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.reconstruct
|
def reconstruct(self, t:Tensor, x:Tensor=None):
"Reconstruct one of the underlying item for its data `t`."
return self[0].reconstruct(t,x) if has_arg(self[0].reconstruct, 'x') else self[0].reconstruct(t)
|
python
|
def reconstruct(self, t:Tensor, x:Tensor=None):
"Reconstruct one of the underlying item for its data `t`."
return self[0].reconstruct(t,x) if has_arg(self[0].reconstruct, 'x') else self[0].reconstruct(t)
|
[
"def",
"reconstruct",
"(",
"self",
",",
"t",
":",
"Tensor",
",",
"x",
":",
"Tensor",
"=",
"None",
")",
":",
"return",
"self",
"[",
"0",
"]",
".",
"reconstruct",
"(",
"t",
",",
"x",
")",
"if",
"has_arg",
"(",
"self",
"[",
"0",
"]",
".",
"reconstruct",
",",
"'x'",
")",
"else",
"self",
"[",
"0",
"]",
".",
"reconstruct",
"(",
"t",
")"
] |
Reconstruct one of the underlying item for its data `t`.
|
[
"Reconstruct",
"one",
"of",
"the",
"underlying",
"item",
"for",
"its",
"data",
"t",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L89-L91
|
20,526
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.from_folder
|
def from_folder(cls, path:PathOrStr, extensions:Collection[str]=None, recurse:bool=True,
include:Optional[Collection[str]]=None, processor:PreProcessors=None, **kwargs)->'ItemList':
"""Create an `ItemList` in `path` from the filenames that have a suffix in `extensions`.
`recurse` determines if we search subfolders."""
path = Path(path)
return cls(get_files(path, extensions, recurse=recurse, include=include), path=path, processor=processor, **kwargs)
|
python
|
def from_folder(cls, path:PathOrStr, extensions:Collection[str]=None, recurse:bool=True,
include:Optional[Collection[str]]=None, processor:PreProcessors=None, **kwargs)->'ItemList':
"""Create an `ItemList` in `path` from the filenames that have a suffix in `extensions`.
`recurse` determines if we search subfolders."""
path = Path(path)
return cls(get_files(path, extensions, recurse=recurse, include=include), path=path, processor=processor, **kwargs)
|
[
"def",
"from_folder",
"(",
"cls",
",",
"path",
":",
"PathOrStr",
",",
"extensions",
":",
"Collection",
"[",
"str",
"]",
"=",
"None",
",",
"recurse",
":",
"bool",
"=",
"True",
",",
"include",
":",
"Optional",
"[",
"Collection",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"processor",
":",
"PreProcessors",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"'ItemList'",
":",
"path",
"=",
"Path",
"(",
"path",
")",
"return",
"cls",
"(",
"get_files",
"(",
"path",
",",
"extensions",
",",
"recurse",
"=",
"recurse",
",",
"include",
"=",
"include",
")",
",",
"path",
"=",
"path",
",",
"processor",
"=",
"processor",
",",
"*",
"*",
"kwargs",
")"
] |
Create an `ItemList` in `path` from the filenames that have a suffix in `extensions`.
`recurse` determines if we search subfolders.
|
[
"Create",
"an",
"ItemList",
"in",
"path",
"from",
"the",
"filenames",
"that",
"have",
"a",
"suffix",
"in",
"extensions",
".",
"recurse",
"determines",
"if",
"we",
"search",
"subfolders",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L113-L118
|
20,527
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.from_df
|
def from_df(cls, df:DataFrame, path:PathOrStr='.', cols:IntsOrStrs=0, processor:PreProcessors=None, **kwargs)->'ItemList':
"Create an `ItemList` in `path` from the inputs in the `cols` of `df`."
inputs = df.iloc[:,df_names_to_idx(cols, df)]
assert inputs.isna().sum().sum() == 0, f"You have NaN values in column(s) {cols} of your dataframe, please fix it."
res = cls(items=_maybe_squeeze(inputs.values), path=path, inner_df=df, processor=processor, **kwargs)
return res
|
python
|
def from_df(cls, df:DataFrame, path:PathOrStr='.', cols:IntsOrStrs=0, processor:PreProcessors=None, **kwargs)->'ItemList':
"Create an `ItemList` in `path` from the inputs in the `cols` of `df`."
inputs = df.iloc[:,df_names_to_idx(cols, df)]
assert inputs.isna().sum().sum() == 0, f"You have NaN values in column(s) {cols} of your dataframe, please fix it."
res = cls(items=_maybe_squeeze(inputs.values), path=path, inner_df=df, processor=processor, **kwargs)
return res
|
[
"def",
"from_df",
"(",
"cls",
",",
"df",
":",
"DataFrame",
",",
"path",
":",
"PathOrStr",
"=",
"'.'",
",",
"cols",
":",
"IntsOrStrs",
"=",
"0",
",",
"processor",
":",
"PreProcessors",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"'ItemList'",
":",
"inputs",
"=",
"df",
".",
"iloc",
"[",
":",
",",
"df_names_to_idx",
"(",
"cols",
",",
"df",
")",
"]",
"assert",
"inputs",
".",
"isna",
"(",
")",
".",
"sum",
"(",
")",
".",
"sum",
"(",
")",
"==",
"0",
",",
"f\"You have NaN values in column(s) {cols} of your dataframe, please fix it.\"",
"res",
"=",
"cls",
"(",
"items",
"=",
"_maybe_squeeze",
"(",
"inputs",
".",
"values",
")",
",",
"path",
"=",
"path",
",",
"inner_df",
"=",
"df",
",",
"processor",
"=",
"processor",
",",
"*",
"*",
"kwargs",
")",
"return",
"res"
] |
Create an `ItemList` in `path` from the inputs in the `cols` of `df`.
|
[
"Create",
"an",
"ItemList",
"in",
"path",
"from",
"the",
"inputs",
"in",
"the",
"cols",
"of",
"df",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L121-L126
|
20,528
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.use_partial_data
|
def use_partial_data(self, sample_pct:float=0.01, seed:int=None)->'ItemList':
"Use only a sample of `sample_pct`of the full dataset and an optional `seed`."
if seed is not None: np.random.seed(seed)
rand_idx = np.random.permutation(range_of(self))
cut = int(sample_pct * len(self))
return self[rand_idx[:cut]]
|
python
|
def use_partial_data(self, sample_pct:float=0.01, seed:int=None)->'ItemList':
"Use only a sample of `sample_pct`of the full dataset and an optional `seed`."
if seed is not None: np.random.seed(seed)
rand_idx = np.random.permutation(range_of(self))
cut = int(sample_pct * len(self))
return self[rand_idx[:cut]]
|
[
"def",
"use_partial_data",
"(",
"self",
",",
"sample_pct",
":",
"float",
"=",
"0.01",
",",
"seed",
":",
"int",
"=",
"None",
")",
"->",
"'ItemList'",
":",
"if",
"seed",
"is",
"not",
"None",
":",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"rand_idx",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"range_of",
"(",
"self",
")",
")",
"cut",
"=",
"int",
"(",
"sample_pct",
"*",
"len",
"(",
"self",
")",
")",
"return",
"self",
"[",
"rand_idx",
"[",
":",
"cut",
"]",
"]"
] |
Use only a sample of `sample_pct`of the full dataset and an optional `seed`.
|
[
"Use",
"only",
"a",
"sample",
"of",
"sample_pct",
"of",
"the",
"full",
"dataset",
"and",
"an",
"optional",
"seed",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L138-L143
|
20,529
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.to_text
|
def to_text(self, fn:str):
"Save `self.items` to `fn` in `self.path`."
with open(self.path/fn, 'w') as f: f.writelines([f'{o}\n' for o in self._relative_item_paths()])
|
python
|
def to_text(self, fn:str):
"Save `self.items` to `fn` in `self.path`."
with open(self.path/fn, 'w') as f: f.writelines([f'{o}\n' for o in self._relative_item_paths()])
|
[
"def",
"to_text",
"(",
"self",
",",
"fn",
":",
"str",
")",
":",
"with",
"open",
"(",
"self",
".",
"path",
"/",
"fn",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"writelines",
"(",
"[",
"f'{o}\\n'",
"for",
"o",
"in",
"self",
".",
"_relative_item_paths",
"(",
")",
"]",
")"
] |
Save `self.items` to `fn` in `self.path`.
|
[
"Save",
"self",
".",
"items",
"to",
"fn",
"in",
"self",
".",
"path",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L145-L147
|
20,530
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.filter_by_func
|
def filter_by_func(self, func:Callable)->'ItemList':
"Only keep elements for which `func` returns `True`."
self.items = array([o for o in self.items if func(o)])
return self
|
python
|
def filter_by_func(self, func:Callable)->'ItemList':
"Only keep elements for which `func` returns `True`."
self.items = array([o for o in self.items if func(o)])
return self
|
[
"def",
"filter_by_func",
"(",
"self",
",",
"func",
":",
"Callable",
")",
"->",
"'ItemList'",
":",
"self",
".",
"items",
"=",
"array",
"(",
"[",
"o",
"for",
"o",
"in",
"self",
".",
"items",
"if",
"func",
"(",
"o",
")",
"]",
")",
"return",
"self"
] |
Only keep elements for which `func` returns `True`.
|
[
"Only",
"keep",
"elements",
"for",
"which",
"func",
"returns",
"True",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L149-L152
|
20,531
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.filter_by_folder
|
def filter_by_folder(self, include=None, exclude=None):
"Only keep filenames in `include` folder or reject the ones in `exclude`."
include,exclude = listify(include),listify(exclude)
def _inner(o):
if isinstance(o, Path): n = o.relative_to(self.path).parts[0]
else: n = o.split(os.path.sep)[len(str(self.path).split(os.path.sep))]
if include and not n in include: return False
if exclude and n in exclude: return False
return True
return self.filter_by_func(_inner)
|
python
|
def filter_by_folder(self, include=None, exclude=None):
"Only keep filenames in `include` folder or reject the ones in `exclude`."
include,exclude = listify(include),listify(exclude)
def _inner(o):
if isinstance(o, Path): n = o.relative_to(self.path).parts[0]
else: n = o.split(os.path.sep)[len(str(self.path).split(os.path.sep))]
if include and not n in include: return False
if exclude and n in exclude: return False
return True
return self.filter_by_func(_inner)
|
[
"def",
"filter_by_folder",
"(",
"self",
",",
"include",
"=",
"None",
",",
"exclude",
"=",
"None",
")",
":",
"include",
",",
"exclude",
"=",
"listify",
"(",
"include",
")",
",",
"listify",
"(",
"exclude",
")",
"def",
"_inner",
"(",
"o",
")",
":",
"if",
"isinstance",
"(",
"o",
",",
"Path",
")",
":",
"n",
"=",
"o",
".",
"relative_to",
"(",
"self",
".",
"path",
")",
".",
"parts",
"[",
"0",
"]",
"else",
":",
"n",
"=",
"o",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"[",
"len",
"(",
"str",
"(",
"self",
".",
"path",
")",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
")",
"]",
"if",
"include",
"and",
"not",
"n",
"in",
"include",
":",
"return",
"False",
"if",
"exclude",
"and",
"n",
"in",
"exclude",
":",
"return",
"False",
"return",
"True",
"return",
"self",
".",
"filter_by_func",
"(",
"_inner",
")"
] |
Only keep filenames in `include` folder or reject the ones in `exclude`.
|
[
"Only",
"keep",
"filenames",
"in",
"include",
"folder",
"or",
"reject",
"the",
"ones",
"in",
"exclude",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L154-L163
|
20,532
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.filter_by_rand
|
def filter_by_rand(self, p:float, seed:int=None):
"Keep random sample of `items` with probability `p` and an optional `seed`."
if seed is not None: np.random.seed(seed)
return self.filter_by_func(lambda o: rand_bool(p))
|
python
|
def filter_by_rand(self, p:float, seed:int=None):
"Keep random sample of `items` with probability `p` and an optional `seed`."
if seed is not None: np.random.seed(seed)
return self.filter_by_func(lambda o: rand_bool(p))
|
[
"def",
"filter_by_rand",
"(",
"self",
",",
"p",
":",
"float",
",",
"seed",
":",
"int",
"=",
"None",
")",
":",
"if",
"seed",
"is",
"not",
"None",
":",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"return",
"self",
".",
"filter_by_func",
"(",
"lambda",
"o",
":",
"rand_bool",
"(",
"p",
")",
")"
] |
Keep random sample of `items` with probability `p` and an optional `seed`.
|
[
"Keep",
"random",
"sample",
"of",
"items",
"with",
"probability",
"p",
"and",
"an",
"optional",
"seed",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L165-L168
|
20,533
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.split_none
|
def split_none(self):
"Don't split the data and create an empty validation set."
val = self[[]]
val.ignore_empty = True
return self._split(self.path, self, val)
|
python
|
def split_none(self):
"Don't split the data and create an empty validation set."
val = self[[]]
val.ignore_empty = True
return self._split(self.path, self, val)
|
[
"def",
"split_none",
"(",
"self",
")",
":",
"val",
"=",
"self",
"[",
"[",
"]",
"]",
"val",
".",
"ignore_empty",
"=",
"True",
"return",
"self",
".",
"_split",
"(",
"self",
".",
"path",
",",
"self",
",",
"val",
")"
] |
Don't split the data and create an empty validation set.
|
[
"Don",
"t",
"split",
"the",
"data",
"and",
"create",
"an",
"empty",
"validation",
"set",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L174-L178
|
20,534
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.split_by_list
|
def split_by_list(self, train, valid):
"Split the data between `train` and `valid`."
return self._split(self.path, train, valid)
|
python
|
def split_by_list(self, train, valid):
"Split the data between `train` and `valid`."
return self._split(self.path, train, valid)
|
[
"def",
"split_by_list",
"(",
"self",
",",
"train",
",",
"valid",
")",
":",
"return",
"self",
".",
"_split",
"(",
"self",
".",
"path",
",",
"train",
",",
"valid",
")"
] |
Split the data between `train` and `valid`.
|
[
"Split",
"the",
"data",
"between",
"train",
"and",
"valid",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L180-L182
|
20,535
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.split_by_idxs
|
def split_by_idxs(self, train_idx, valid_idx):
"Split the data between `train_idx` and `valid_idx`."
return self.split_by_list(self[train_idx], self[valid_idx])
|
python
|
def split_by_idxs(self, train_idx, valid_idx):
"Split the data between `train_idx` and `valid_idx`."
return self.split_by_list(self[train_idx], self[valid_idx])
|
[
"def",
"split_by_idxs",
"(",
"self",
",",
"train_idx",
",",
"valid_idx",
")",
":",
"return",
"self",
".",
"split_by_list",
"(",
"self",
"[",
"train_idx",
"]",
",",
"self",
"[",
"valid_idx",
"]",
")"
] |
Split the data between `train_idx` and `valid_idx`.
|
[
"Split",
"the",
"data",
"between",
"train_idx",
"and",
"valid_idx",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L184-L186
|
20,536
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.split_by_idx
|
def split_by_idx(self, valid_idx:Collection[int])->'ItemLists':
"Split the data according to the indexes in `valid_idx`."
#train_idx = [i for i in range_of(self.items) if i not in valid_idx]
train_idx = np.setdiff1d(arange_of(self.items), valid_idx)
return self.split_by_idxs(train_idx, valid_idx)
|
python
|
def split_by_idx(self, valid_idx:Collection[int])->'ItemLists':
"Split the data according to the indexes in `valid_idx`."
#train_idx = [i for i in range_of(self.items) if i not in valid_idx]
train_idx = np.setdiff1d(arange_of(self.items), valid_idx)
return self.split_by_idxs(train_idx, valid_idx)
|
[
"def",
"split_by_idx",
"(",
"self",
",",
"valid_idx",
":",
"Collection",
"[",
"int",
"]",
")",
"->",
"'ItemLists'",
":",
"#train_idx = [i for i in range_of(self.items) if i not in valid_idx]",
"train_idx",
"=",
"np",
".",
"setdiff1d",
"(",
"arange_of",
"(",
"self",
".",
"items",
")",
",",
"valid_idx",
")",
"return",
"self",
".",
"split_by_idxs",
"(",
"train_idx",
",",
"valid_idx",
")"
] |
Split the data according to the indexes in `valid_idx`.
|
[
"Split",
"the",
"data",
"according",
"to",
"the",
"indexes",
"in",
"valid_idx",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L188-L192
|
20,537
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.split_by_rand_pct
|
def split_by_rand_pct(self, valid_pct:float=0.2, seed:int=None)->'ItemLists':
"Split the items randomly by putting `valid_pct` in the validation set, optional `seed` can be passed."
if valid_pct==0.: return self.split_none()
if seed is not None: np.random.seed(seed)
rand_idx = np.random.permutation(range_of(self))
cut = int(valid_pct * len(self))
return self.split_by_idx(rand_idx[:cut])
|
python
|
def split_by_rand_pct(self, valid_pct:float=0.2, seed:int=None)->'ItemLists':
"Split the items randomly by putting `valid_pct` in the validation set, optional `seed` can be passed."
if valid_pct==0.: return self.split_none()
if seed is not None: np.random.seed(seed)
rand_idx = np.random.permutation(range_of(self))
cut = int(valid_pct * len(self))
return self.split_by_idx(rand_idx[:cut])
|
[
"def",
"split_by_rand_pct",
"(",
"self",
",",
"valid_pct",
":",
"float",
"=",
"0.2",
",",
"seed",
":",
"int",
"=",
"None",
")",
"->",
"'ItemLists'",
":",
"if",
"valid_pct",
"==",
"0.",
":",
"return",
"self",
".",
"split_none",
"(",
")",
"if",
"seed",
"is",
"not",
"None",
":",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"rand_idx",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"range_of",
"(",
"self",
")",
")",
"cut",
"=",
"int",
"(",
"valid_pct",
"*",
"len",
"(",
"self",
")",
")",
"return",
"self",
".",
"split_by_idx",
"(",
"rand_idx",
"[",
":",
"cut",
"]",
")"
] |
Split the items randomly by putting `valid_pct` in the validation set, optional `seed` can be passed.
|
[
"Split",
"the",
"items",
"randomly",
"by",
"putting",
"valid_pct",
"in",
"the",
"validation",
"set",
"optional",
"seed",
"can",
"be",
"passed",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L206-L212
|
20,538
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.split_by_files
|
def split_by_files(self, valid_names:'ItemList')->'ItemLists':
"Split the data by using the names in `valid_names` for validation."
if isinstance(self.items[0], Path): return self.split_by_valid_func(lambda o: o.name in valid_names)
else: return self.split_by_valid_func(lambda o: os.path.basename(o) in valid_names)
|
python
|
def split_by_files(self, valid_names:'ItemList')->'ItemLists':
"Split the data by using the names in `valid_names` for validation."
if isinstance(self.items[0], Path): return self.split_by_valid_func(lambda o: o.name in valid_names)
else: return self.split_by_valid_func(lambda o: os.path.basename(o) in valid_names)
|
[
"def",
"split_by_files",
"(",
"self",
",",
"valid_names",
":",
"'ItemList'",
")",
"->",
"'ItemLists'",
":",
"if",
"isinstance",
"(",
"self",
".",
"items",
"[",
"0",
"]",
",",
"Path",
")",
":",
"return",
"self",
".",
"split_by_valid_func",
"(",
"lambda",
"o",
":",
"o",
".",
"name",
"in",
"valid_names",
")",
"else",
":",
"return",
"self",
".",
"split_by_valid_func",
"(",
"lambda",
"o",
":",
"os",
".",
"path",
".",
"basename",
"(",
"o",
")",
"in",
"valid_names",
")"
] |
Split the data by using the names in `valid_names` for validation.
|
[
"Split",
"the",
"data",
"by",
"using",
"the",
"names",
"in",
"valid_names",
"for",
"validation",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L230-L233
|
20,539
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.split_by_fname_file
|
def split_by_fname_file(self, fname:PathOrStr, path:PathOrStr=None)->'ItemLists':
"Split the data by using the names in `fname` for the validation set. `path` will override `self.path`."
path = Path(ifnone(path, self.path))
valid_names = loadtxt_str(path/fname)
return self.split_by_files(valid_names)
|
python
|
def split_by_fname_file(self, fname:PathOrStr, path:PathOrStr=None)->'ItemLists':
"Split the data by using the names in `fname` for the validation set. `path` will override `self.path`."
path = Path(ifnone(path, self.path))
valid_names = loadtxt_str(path/fname)
return self.split_by_files(valid_names)
|
[
"def",
"split_by_fname_file",
"(",
"self",
",",
"fname",
":",
"PathOrStr",
",",
"path",
":",
"PathOrStr",
"=",
"None",
")",
"->",
"'ItemLists'",
":",
"path",
"=",
"Path",
"(",
"ifnone",
"(",
"path",
",",
"self",
".",
"path",
")",
")",
"valid_names",
"=",
"loadtxt_str",
"(",
"path",
"/",
"fname",
")",
"return",
"self",
".",
"split_by_files",
"(",
"valid_names",
")"
] |
Split the data by using the names in `fname` for the validation set. `path` will override `self.path`.
|
[
"Split",
"the",
"data",
"by",
"using",
"the",
"names",
"in",
"fname",
"for",
"the",
"validation",
"set",
".",
"path",
"will",
"override",
"self",
".",
"path",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L235-L239
|
20,540
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.split_from_df
|
def split_from_df(self, col:IntsOrStrs=2):
"Split the data from the `col` in the dataframe in `self.inner_df`."
valid_idx = np.where(self.inner_df.iloc[:,df_names_to_idx(col, self.inner_df)])[0]
return self.split_by_idx(valid_idx)
|
python
|
def split_from_df(self, col:IntsOrStrs=2):
"Split the data from the `col` in the dataframe in `self.inner_df`."
valid_idx = np.where(self.inner_df.iloc[:,df_names_to_idx(col, self.inner_df)])[0]
return self.split_by_idx(valid_idx)
|
[
"def",
"split_from_df",
"(",
"self",
",",
"col",
":",
"IntsOrStrs",
"=",
"2",
")",
":",
"valid_idx",
"=",
"np",
".",
"where",
"(",
"self",
".",
"inner_df",
".",
"iloc",
"[",
":",
",",
"df_names_to_idx",
"(",
"col",
",",
"self",
".",
"inner_df",
")",
"]",
")",
"[",
"0",
"]",
"return",
"self",
".",
"split_by_idx",
"(",
"valid_idx",
")"
] |
Split the data from the `col` in the dataframe in `self.inner_df`.
|
[
"Split",
"the",
"data",
"from",
"the",
"col",
"in",
"the",
"dataframe",
"in",
"self",
".",
"inner_df",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L241-L244
|
20,541
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.get_label_cls
|
def get_label_cls(self, labels, label_cls:Callable=None, label_delim:str=None, **kwargs):
"Return `label_cls` or guess one from the first element of `labels`."
if label_cls is not None: return label_cls
if self.label_cls is not None: return self.label_cls
if label_delim is not None: return MultiCategoryList
it = index_row(labels,0)
if isinstance(it, (float, np.float32)): return FloatList
if isinstance(try_int(it), (str, Integral)): return CategoryList
if isinstance(it, Collection): return MultiCategoryList
return ItemList
|
python
|
def get_label_cls(self, labels, label_cls:Callable=None, label_delim:str=None, **kwargs):
"Return `label_cls` or guess one from the first element of `labels`."
if label_cls is not None: return label_cls
if self.label_cls is not None: return self.label_cls
if label_delim is not None: return MultiCategoryList
it = index_row(labels,0)
if isinstance(it, (float, np.float32)): return FloatList
if isinstance(try_int(it), (str, Integral)): return CategoryList
if isinstance(it, Collection): return MultiCategoryList
return ItemList
|
[
"def",
"get_label_cls",
"(",
"self",
",",
"labels",
",",
"label_cls",
":",
"Callable",
"=",
"None",
",",
"label_delim",
":",
"str",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"label_cls",
"is",
"not",
"None",
":",
"return",
"label_cls",
"if",
"self",
".",
"label_cls",
"is",
"not",
"None",
":",
"return",
"self",
".",
"label_cls",
"if",
"label_delim",
"is",
"not",
"None",
":",
"return",
"MultiCategoryList",
"it",
"=",
"index_row",
"(",
"labels",
",",
"0",
")",
"if",
"isinstance",
"(",
"it",
",",
"(",
"float",
",",
"np",
".",
"float32",
")",
")",
":",
"return",
"FloatList",
"if",
"isinstance",
"(",
"try_int",
"(",
"it",
")",
",",
"(",
"str",
",",
"Integral",
")",
")",
":",
"return",
"CategoryList",
"if",
"isinstance",
"(",
"it",
",",
"Collection",
")",
":",
"return",
"MultiCategoryList",
"return",
"ItemList"
] |
Return `label_cls` or guess one from the first element of `labels`.
|
[
"Return",
"label_cls",
"or",
"guess",
"one",
"from",
"the",
"first",
"element",
"of",
"labels",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L246-L255
|
20,542
|
fastai/fastai
|
fastai/data_block.py
|
ItemList._label_from_list
|
def _label_from_list(self, labels:Iterator, label_cls:Callable=None, from_item_lists:bool=False, **kwargs)->'LabelList':
"Label `self.items` with `labels`."
if not from_item_lists:
raise Exception("Your data isn't split, if you don't want a validation set, please use `split_none`.")
labels = array(labels, dtype=object)
label_cls = self.get_label_cls(labels, label_cls=label_cls, **kwargs)
y = label_cls(labels, path=self.path, **kwargs)
res = self._label_list(x=self, y=y)
return res
|
python
|
def _label_from_list(self, labels:Iterator, label_cls:Callable=None, from_item_lists:bool=False, **kwargs)->'LabelList':
"Label `self.items` with `labels`."
if not from_item_lists:
raise Exception("Your data isn't split, if you don't want a validation set, please use `split_none`.")
labels = array(labels, dtype=object)
label_cls = self.get_label_cls(labels, label_cls=label_cls, **kwargs)
y = label_cls(labels, path=self.path, **kwargs)
res = self._label_list(x=self, y=y)
return res
|
[
"def",
"_label_from_list",
"(",
"self",
",",
"labels",
":",
"Iterator",
",",
"label_cls",
":",
"Callable",
"=",
"None",
",",
"from_item_lists",
":",
"bool",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"->",
"'LabelList'",
":",
"if",
"not",
"from_item_lists",
":",
"raise",
"Exception",
"(",
"\"Your data isn't split, if you don't want a validation set, please use `split_none`.\"",
")",
"labels",
"=",
"array",
"(",
"labels",
",",
"dtype",
"=",
"object",
")",
"label_cls",
"=",
"self",
".",
"get_label_cls",
"(",
"labels",
",",
"label_cls",
"=",
"label_cls",
",",
"*",
"*",
"kwargs",
")",
"y",
"=",
"label_cls",
"(",
"labels",
",",
"path",
"=",
"self",
".",
"path",
",",
"*",
"*",
"kwargs",
")",
"res",
"=",
"self",
".",
"_label_list",
"(",
"x",
"=",
"self",
",",
"y",
"=",
"y",
")",
"return",
"res"
] |
Label `self.items` with `labels`.
|
[
"Label",
"self",
".",
"items",
"with",
"labels",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L257-L265
|
20,543
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.label_from_df
|
def label_from_df(self, cols:IntsOrStrs=1, label_cls:Callable=None, **kwargs):
"Label `self.items` from the values in `cols` in `self.inner_df`."
labels = self.inner_df.iloc[:,df_names_to_idx(cols, self.inner_df)]
assert labels.isna().sum().sum() == 0, f"You have NaN values in column(s) {cols} of your dataframe, please fix it."
if is_listy(cols) and len(cols) > 1 and (label_cls is None or label_cls == MultiCategoryList):
new_kwargs,label_cls = dict(one_hot=True, classes= cols),MultiCategoryList
kwargs = {**new_kwargs, **kwargs}
return self._label_from_list(_maybe_squeeze(labels), label_cls=label_cls, **kwargs)
|
python
|
def label_from_df(self, cols:IntsOrStrs=1, label_cls:Callable=None, **kwargs):
"Label `self.items` from the values in `cols` in `self.inner_df`."
labels = self.inner_df.iloc[:,df_names_to_idx(cols, self.inner_df)]
assert labels.isna().sum().sum() == 0, f"You have NaN values in column(s) {cols} of your dataframe, please fix it."
if is_listy(cols) and len(cols) > 1 and (label_cls is None or label_cls == MultiCategoryList):
new_kwargs,label_cls = dict(one_hot=True, classes= cols),MultiCategoryList
kwargs = {**new_kwargs, **kwargs}
return self._label_from_list(_maybe_squeeze(labels), label_cls=label_cls, **kwargs)
|
[
"def",
"label_from_df",
"(",
"self",
",",
"cols",
":",
"IntsOrStrs",
"=",
"1",
",",
"label_cls",
":",
"Callable",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"labels",
"=",
"self",
".",
"inner_df",
".",
"iloc",
"[",
":",
",",
"df_names_to_idx",
"(",
"cols",
",",
"self",
".",
"inner_df",
")",
"]",
"assert",
"labels",
".",
"isna",
"(",
")",
".",
"sum",
"(",
")",
".",
"sum",
"(",
")",
"==",
"0",
",",
"f\"You have NaN values in column(s) {cols} of your dataframe, please fix it.\"",
"if",
"is_listy",
"(",
"cols",
")",
"and",
"len",
"(",
"cols",
")",
">",
"1",
"and",
"(",
"label_cls",
"is",
"None",
"or",
"label_cls",
"==",
"MultiCategoryList",
")",
":",
"new_kwargs",
",",
"label_cls",
"=",
"dict",
"(",
"one_hot",
"=",
"True",
",",
"classes",
"=",
"cols",
")",
",",
"MultiCategoryList",
"kwargs",
"=",
"{",
"*",
"*",
"new_kwargs",
",",
"*",
"*",
"kwargs",
"}",
"return",
"self",
".",
"_label_from_list",
"(",
"_maybe_squeeze",
"(",
"labels",
")",
",",
"label_cls",
"=",
"label_cls",
",",
"*",
"*",
"kwargs",
")"
] |
Label `self.items` from the values in `cols` in `self.inner_df`.
|
[
"Label",
"self",
".",
"items",
"from",
"the",
"values",
"in",
"cols",
"in",
"self",
".",
"inner_df",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L267-L274
|
20,544
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.label_const
|
def label_const(self, const:Any=0, label_cls:Callable=None, **kwargs)->'LabelList':
"Label every item with `const`."
return self.label_from_func(func=lambda o: const, label_cls=label_cls, **kwargs)
|
python
|
def label_const(self, const:Any=0, label_cls:Callable=None, **kwargs)->'LabelList':
"Label every item with `const`."
return self.label_from_func(func=lambda o: const, label_cls=label_cls, **kwargs)
|
[
"def",
"label_const",
"(",
"self",
",",
"const",
":",
"Any",
"=",
"0",
",",
"label_cls",
":",
"Callable",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"'LabelList'",
":",
"return",
"self",
".",
"label_from_func",
"(",
"func",
"=",
"lambda",
"o",
":",
"const",
",",
"label_cls",
"=",
"label_cls",
",",
"*",
"*",
"kwargs",
")"
] |
Label every item with `const`.
|
[
"Label",
"every",
"item",
"with",
"const",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L276-L278
|
20,545
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.label_empty
|
def label_empty(self, **kwargs):
"Label every item with an `EmptyLabel`."
kwargs['label_cls'] = EmptyLabelList
return self.label_from_func(func=lambda o: 0., **kwargs)
|
python
|
def label_empty(self, **kwargs):
"Label every item with an `EmptyLabel`."
kwargs['label_cls'] = EmptyLabelList
return self.label_from_func(func=lambda o: 0., **kwargs)
|
[
"def",
"label_empty",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'label_cls'",
"]",
"=",
"EmptyLabelList",
"return",
"self",
".",
"label_from_func",
"(",
"func",
"=",
"lambda",
"o",
":",
"0.",
",",
"*",
"*",
"kwargs",
")"
] |
Label every item with an `EmptyLabel`.
|
[
"Label",
"every",
"item",
"with",
"an",
"EmptyLabel",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L280-L283
|
20,546
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.label_from_func
|
def label_from_func(self, func:Callable, label_cls:Callable=None, **kwargs)->'LabelList':
"Apply `func` to every input to get its label."
return self._label_from_list([func(o) for o in self.items], label_cls=label_cls, **kwargs)
|
python
|
def label_from_func(self, func:Callable, label_cls:Callable=None, **kwargs)->'LabelList':
"Apply `func` to every input to get its label."
return self._label_from_list([func(o) for o in self.items], label_cls=label_cls, **kwargs)
|
[
"def",
"label_from_func",
"(",
"self",
",",
"func",
":",
"Callable",
",",
"label_cls",
":",
"Callable",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"'LabelList'",
":",
"return",
"self",
".",
"_label_from_list",
"(",
"[",
"func",
"(",
"o",
")",
"for",
"o",
"in",
"self",
".",
"items",
"]",
",",
"label_cls",
"=",
"label_cls",
",",
"*",
"*",
"kwargs",
")"
] |
Apply `func` to every input to get its label.
|
[
"Apply",
"func",
"to",
"every",
"input",
"to",
"get",
"its",
"label",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L285-L287
|
20,547
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.label_from_folder
|
def label_from_folder(self, label_cls:Callable=None, **kwargs)->'LabelList':
"Give a label to each filename depending on its folder."
return self.label_from_func(func=lambda o: (o.parts if isinstance(o, Path) else o.split(os.path.sep))[-2],
label_cls=label_cls, **kwargs)
|
python
|
def label_from_folder(self, label_cls:Callable=None, **kwargs)->'LabelList':
"Give a label to each filename depending on its folder."
return self.label_from_func(func=lambda o: (o.parts if isinstance(o, Path) else o.split(os.path.sep))[-2],
label_cls=label_cls, **kwargs)
|
[
"def",
"label_from_folder",
"(",
"self",
",",
"label_cls",
":",
"Callable",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"'LabelList'",
":",
"return",
"self",
".",
"label_from_func",
"(",
"func",
"=",
"lambda",
"o",
":",
"(",
"o",
".",
"parts",
"if",
"isinstance",
"(",
"o",
",",
"Path",
")",
"else",
"o",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
")",
"[",
"-",
"2",
"]",
",",
"label_cls",
"=",
"label_cls",
",",
"*",
"*",
"kwargs",
")"
] |
Give a label to each filename depending on its folder.
|
[
"Give",
"a",
"label",
"to",
"each",
"filename",
"depending",
"on",
"its",
"folder",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L289-L292
|
20,548
|
fastai/fastai
|
fastai/data_block.py
|
ItemList.label_from_re
|
def label_from_re(self, pat:str, full_path:bool=False, label_cls:Callable=None, **kwargs)->'LabelList':
"Apply the re in `pat` to determine the label of every filename. If `full_path`, search in the full name."
pat = re.compile(pat)
def _inner(o):
s = str((os.path.join(self.path,o) if full_path else o).as_posix())
res = pat.search(s)
assert res,f'Failed to find "{pat}" in "{s}"'
return res.group(1)
return self.label_from_func(_inner, label_cls=label_cls, **kwargs)
|
python
|
def label_from_re(self, pat:str, full_path:bool=False, label_cls:Callable=None, **kwargs)->'LabelList':
"Apply the re in `pat` to determine the label of every filename. If `full_path`, search in the full name."
pat = re.compile(pat)
def _inner(o):
s = str((os.path.join(self.path,o) if full_path else o).as_posix())
res = pat.search(s)
assert res,f'Failed to find "{pat}" in "{s}"'
return res.group(1)
return self.label_from_func(_inner, label_cls=label_cls, **kwargs)
|
[
"def",
"label_from_re",
"(",
"self",
",",
"pat",
":",
"str",
",",
"full_path",
":",
"bool",
"=",
"False",
",",
"label_cls",
":",
"Callable",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"'LabelList'",
":",
"pat",
"=",
"re",
".",
"compile",
"(",
"pat",
")",
"def",
"_inner",
"(",
"o",
")",
":",
"s",
"=",
"str",
"(",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"o",
")",
"if",
"full_path",
"else",
"o",
")",
".",
"as_posix",
"(",
")",
")",
"res",
"=",
"pat",
".",
"search",
"(",
"s",
")",
"assert",
"res",
",",
"f'Failed to find \"{pat}\" in \"{s}\"'",
"return",
"res",
".",
"group",
"(",
"1",
")",
"return",
"self",
".",
"label_from_func",
"(",
"_inner",
",",
"label_cls",
"=",
"label_cls",
",",
"*",
"*",
"kwargs",
")"
] |
Apply the re in `pat` to determine the label of every filename. If `full_path`, search in the full name.
|
[
"Apply",
"the",
"re",
"in",
"pat",
"to",
"determine",
"the",
"label",
"of",
"every",
"filename",
".",
"If",
"full_path",
"search",
"in",
"the",
"full",
"name",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L294-L302
|
20,549
|
fastai/fastai
|
fastai/data_block.py
|
MultiCategoryProcessor.generate_classes
|
def generate_classes(self, items):
"Generate classes from `items` by taking the sorted unique values."
classes = set()
for c in items: classes = classes.union(set(c))
classes = list(classes)
classes.sort()
return classes
|
python
|
def generate_classes(self, items):
"Generate classes from `items` by taking the sorted unique values."
classes = set()
for c in items: classes = classes.union(set(c))
classes = list(classes)
classes.sort()
return classes
|
[
"def",
"generate_classes",
"(",
"self",
",",
"items",
")",
":",
"classes",
"=",
"set",
"(",
")",
"for",
"c",
"in",
"items",
":",
"classes",
"=",
"classes",
".",
"union",
"(",
"set",
"(",
"c",
")",
")",
"classes",
"=",
"list",
"(",
"classes",
")",
"classes",
".",
"sort",
"(",
")",
"return",
"classes"
] |
Generate classes from `items` by taking the sorted unique values.
|
[
"Generate",
"classes",
"from",
"items",
"by",
"taking",
"the",
"sorted",
"unique",
"values",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L388-L394
|
20,550
|
fastai/fastai
|
fastai/data_block.py
|
ItemLists.label_from_lists
|
def label_from_lists(self, train_labels:Iterator, valid_labels:Iterator, label_cls:Callable=None, **kwargs)->'LabelList':
"Use the labels in `train_labels` and `valid_labels` to label the data. `label_cls` will overwrite the default."
label_cls = self.train.get_label_cls(train_labels, label_cls)
self.train = self.train._label_list(x=self.train, y=label_cls(train_labels, **kwargs))
self.valid = self.valid._label_list(x=self.valid, y=self.train.y.new(valid_labels, **kwargs))
self.__class__ = LabelLists
self.process()
return self
|
python
|
def label_from_lists(self, train_labels:Iterator, valid_labels:Iterator, label_cls:Callable=None, **kwargs)->'LabelList':
"Use the labels in `train_labels` and `valid_labels` to label the data. `label_cls` will overwrite the default."
label_cls = self.train.get_label_cls(train_labels, label_cls)
self.train = self.train._label_list(x=self.train, y=label_cls(train_labels, **kwargs))
self.valid = self.valid._label_list(x=self.valid, y=self.train.y.new(valid_labels, **kwargs))
self.__class__ = LabelLists
self.process()
return self
|
[
"def",
"label_from_lists",
"(",
"self",
",",
"train_labels",
":",
"Iterator",
",",
"valid_labels",
":",
"Iterator",
",",
"label_cls",
":",
"Callable",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"'LabelList'",
":",
"label_cls",
"=",
"self",
".",
"train",
".",
"get_label_cls",
"(",
"train_labels",
",",
"label_cls",
")",
"self",
".",
"train",
"=",
"self",
".",
"train",
".",
"_label_list",
"(",
"x",
"=",
"self",
".",
"train",
",",
"y",
"=",
"label_cls",
"(",
"train_labels",
",",
"*",
"*",
"kwargs",
")",
")",
"self",
".",
"valid",
"=",
"self",
".",
"valid",
".",
"_label_list",
"(",
"x",
"=",
"self",
".",
"valid",
",",
"y",
"=",
"self",
".",
"train",
".",
"y",
".",
"new",
"(",
"valid_labels",
",",
"*",
"*",
"kwargs",
")",
")",
"self",
".",
"__class__",
"=",
"LabelLists",
"self",
".",
"process",
"(",
")",
"return",
"self"
] |
Use the labels in `train_labels` and `valid_labels` to label the data. `label_cls` will overwrite the default.
|
[
"Use",
"the",
"labels",
"in",
"train_labels",
"and",
"valid_labels",
"to",
"label",
"the",
"data",
".",
"label_cls",
"will",
"overwrite",
"the",
"default",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L480-L487
|
20,551
|
fastai/fastai
|
fastai/data_block.py
|
ItemLists.transform
|
def transform(self, tfms:Optional[Tuple[TfmList,TfmList]]=(None,None), **kwargs):
"Set `tfms` to be applied to the xs of the train and validation set."
if not tfms: tfms=(None,None)
assert is_listy(tfms) and len(tfms) == 2, "Please pass a list of two lists of transforms (train and valid)."
self.train.transform(tfms[0], **kwargs)
self.valid.transform(tfms[1], **kwargs)
if self.test: self.test.transform(tfms[1], **kwargs)
return self
|
python
|
def transform(self, tfms:Optional[Tuple[TfmList,TfmList]]=(None,None), **kwargs):
"Set `tfms` to be applied to the xs of the train and validation set."
if not tfms: tfms=(None,None)
assert is_listy(tfms) and len(tfms) == 2, "Please pass a list of two lists of transforms (train and valid)."
self.train.transform(tfms[0], **kwargs)
self.valid.transform(tfms[1], **kwargs)
if self.test: self.test.transform(tfms[1], **kwargs)
return self
|
[
"def",
"transform",
"(",
"self",
",",
"tfms",
":",
"Optional",
"[",
"Tuple",
"[",
"TfmList",
",",
"TfmList",
"]",
"]",
"=",
"(",
"None",
",",
"None",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"tfms",
":",
"tfms",
"=",
"(",
"None",
",",
"None",
")",
"assert",
"is_listy",
"(",
"tfms",
")",
"and",
"len",
"(",
"tfms",
")",
"==",
"2",
",",
"\"Please pass a list of two lists of transforms (train and valid).\"",
"self",
".",
"train",
".",
"transform",
"(",
"tfms",
"[",
"0",
"]",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"valid",
".",
"transform",
"(",
"tfms",
"[",
"1",
"]",
",",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"test",
":",
"self",
".",
"test",
".",
"transform",
"(",
"tfms",
"[",
"1",
"]",
",",
"*",
"*",
"kwargs",
")",
"return",
"self"
] |
Set `tfms` to be applied to the xs of the train and validation set.
|
[
"Set",
"tfms",
"to",
"be",
"applied",
"to",
"the",
"xs",
"of",
"the",
"train",
"and",
"validation",
"set",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L489-L496
|
20,552
|
fastai/fastai
|
fastai/data_block.py
|
ItemLists.transform_y
|
def transform_y(self, tfms:Optional[Tuple[TfmList,TfmList]]=(None,None), **kwargs):
"Set `tfms` to be applied to the ys of the train and validation set."
if not tfms: tfms=(None,None)
self.train.transform_y(tfms[0], **kwargs)
self.valid.transform_y(tfms[1], **kwargs)
if self.test: self.test.transform_y(tfms[1], **kwargs)
return self
|
python
|
def transform_y(self, tfms:Optional[Tuple[TfmList,TfmList]]=(None,None), **kwargs):
"Set `tfms` to be applied to the ys of the train and validation set."
if not tfms: tfms=(None,None)
self.train.transform_y(tfms[0], **kwargs)
self.valid.transform_y(tfms[1], **kwargs)
if self.test: self.test.transform_y(tfms[1], **kwargs)
return self
|
[
"def",
"transform_y",
"(",
"self",
",",
"tfms",
":",
"Optional",
"[",
"Tuple",
"[",
"TfmList",
",",
"TfmList",
"]",
"]",
"=",
"(",
"None",
",",
"None",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"tfms",
":",
"tfms",
"=",
"(",
"None",
",",
"None",
")",
"self",
".",
"train",
".",
"transform_y",
"(",
"tfms",
"[",
"0",
"]",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"valid",
".",
"transform_y",
"(",
"tfms",
"[",
"1",
"]",
",",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"test",
":",
"self",
".",
"test",
".",
"transform_y",
"(",
"tfms",
"[",
"1",
"]",
",",
"*",
"*",
"kwargs",
")",
"return",
"self"
] |
Set `tfms` to be applied to the ys of the train and validation set.
|
[
"Set",
"tfms",
"to",
"be",
"applied",
"to",
"the",
"ys",
"of",
"the",
"train",
"and",
"validation",
"set",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L498-L504
|
20,553
|
fastai/fastai
|
fastai/data_block.py
|
LabelLists.get_processors
|
def get_processors(self):
"Read the default class processors if none have been set."
procs_x,procs_y = listify(self.train.x._processor),listify(self.train.y._processor)
xp = ifnone(self.train.x.processor, [p(ds=self.train.x) for p in procs_x])
yp = ifnone(self.train.y.processor, [p(ds=self.train.y) for p in procs_y])
return xp,yp
|
python
|
def get_processors(self):
"Read the default class processors if none have been set."
procs_x,procs_y = listify(self.train.x._processor),listify(self.train.y._processor)
xp = ifnone(self.train.x.processor, [p(ds=self.train.x) for p in procs_x])
yp = ifnone(self.train.y.processor, [p(ds=self.train.y) for p in procs_y])
return xp,yp
|
[
"def",
"get_processors",
"(",
"self",
")",
":",
"procs_x",
",",
"procs_y",
"=",
"listify",
"(",
"self",
".",
"train",
".",
"x",
".",
"_processor",
")",
",",
"listify",
"(",
"self",
".",
"train",
".",
"y",
".",
"_processor",
")",
"xp",
"=",
"ifnone",
"(",
"self",
".",
"train",
".",
"x",
".",
"processor",
",",
"[",
"p",
"(",
"ds",
"=",
"self",
".",
"train",
".",
"x",
")",
"for",
"p",
"in",
"procs_x",
"]",
")",
"yp",
"=",
"ifnone",
"(",
"self",
".",
"train",
".",
"y",
".",
"processor",
",",
"[",
"p",
"(",
"ds",
"=",
"self",
".",
"train",
".",
"y",
")",
"for",
"p",
"in",
"procs_y",
"]",
")",
"return",
"xp",
",",
"yp"
] |
Read the default class processors if none have been set.
|
[
"Read",
"the",
"default",
"class",
"processors",
"if",
"none",
"have",
"been",
"set",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L512-L517
|
20,554
|
fastai/fastai
|
fastai/data_block.py
|
LabelLists.process
|
def process(self):
"Process the inner datasets."
xp,yp = self.get_processors()
for ds,n in zip(self.lists, ['train','valid','test']): ds.process(xp, yp, name=n)
#progress_bar clear the outputs so in some case warnings issued during processing disappear.
for ds in self.lists:
if getattr(ds, 'warn', False): warn(ds.warn)
return self
|
python
|
def process(self):
"Process the inner datasets."
xp,yp = self.get_processors()
for ds,n in zip(self.lists, ['train','valid','test']): ds.process(xp, yp, name=n)
#progress_bar clear the outputs so in some case warnings issued during processing disappear.
for ds in self.lists:
if getattr(ds, 'warn', False): warn(ds.warn)
return self
|
[
"def",
"process",
"(",
"self",
")",
":",
"xp",
",",
"yp",
"=",
"self",
".",
"get_processors",
"(",
")",
"for",
"ds",
",",
"n",
"in",
"zip",
"(",
"self",
".",
"lists",
",",
"[",
"'train'",
",",
"'valid'",
",",
"'test'",
"]",
")",
":",
"ds",
".",
"process",
"(",
"xp",
",",
"yp",
",",
"name",
"=",
"n",
")",
"#progress_bar clear the outputs so in some case warnings issued during processing disappear.",
"for",
"ds",
"in",
"self",
".",
"lists",
":",
"if",
"getattr",
"(",
"ds",
",",
"'warn'",
",",
"False",
")",
":",
"warn",
"(",
"ds",
".",
"warn",
")",
"return",
"self"
] |
Process the inner datasets.
|
[
"Process",
"the",
"inner",
"datasets",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L519-L526
|
20,555
|
fastai/fastai
|
fastai/data_block.py
|
LabelLists.databunch
|
def databunch(self, path:PathOrStr=None, bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus,
dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None, collate_fn:Callable=data_collate,
no_check:bool=False, **kwargs)->'DataBunch':
"Create an `DataBunch` from self, `path` will override `self.path`, `kwargs` are passed to `DataBunch.create`."
path = Path(ifnone(path, self.path))
data = self.x._bunch.create(self.train, self.valid, test_ds=self.test, path=path, bs=bs, val_bs=val_bs,
num_workers=num_workers, device=device, collate_fn=collate_fn, no_check=no_check, **kwargs)
if getattr(self, 'normalize', False):#In case a normalization was serialized
norm = self.normalize
data.normalize((norm['mean'], norm['std']), do_x=norm['do_x'], do_y=norm['do_y'])
data.label_list = self
return data
|
python
|
def databunch(self, path:PathOrStr=None, bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus,
dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None, collate_fn:Callable=data_collate,
no_check:bool=False, **kwargs)->'DataBunch':
"Create an `DataBunch` from self, `path` will override `self.path`, `kwargs` are passed to `DataBunch.create`."
path = Path(ifnone(path, self.path))
data = self.x._bunch.create(self.train, self.valid, test_ds=self.test, path=path, bs=bs, val_bs=val_bs,
num_workers=num_workers, device=device, collate_fn=collate_fn, no_check=no_check, **kwargs)
if getattr(self, 'normalize', False):#In case a normalization was serialized
norm = self.normalize
data.normalize((norm['mean'], norm['std']), do_x=norm['do_x'], do_y=norm['do_y'])
data.label_list = self
return data
|
[
"def",
"databunch",
"(",
"self",
",",
"path",
":",
"PathOrStr",
"=",
"None",
",",
"bs",
":",
"int",
"=",
"64",
",",
"val_bs",
":",
"int",
"=",
"None",
",",
"num_workers",
":",
"int",
"=",
"defaults",
".",
"cpus",
",",
"dl_tfms",
":",
"Optional",
"[",
"Collection",
"[",
"Callable",
"]",
"]",
"=",
"None",
",",
"device",
":",
"torch",
".",
"device",
"=",
"None",
",",
"collate_fn",
":",
"Callable",
"=",
"data_collate",
",",
"no_check",
":",
"bool",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"->",
"'DataBunch'",
":",
"path",
"=",
"Path",
"(",
"ifnone",
"(",
"path",
",",
"self",
".",
"path",
")",
")",
"data",
"=",
"self",
".",
"x",
".",
"_bunch",
".",
"create",
"(",
"self",
".",
"train",
",",
"self",
".",
"valid",
",",
"test_ds",
"=",
"self",
".",
"test",
",",
"path",
"=",
"path",
",",
"bs",
"=",
"bs",
",",
"val_bs",
"=",
"val_bs",
",",
"num_workers",
"=",
"num_workers",
",",
"device",
"=",
"device",
",",
"collate_fn",
"=",
"collate_fn",
",",
"no_check",
"=",
"no_check",
",",
"*",
"*",
"kwargs",
")",
"if",
"getattr",
"(",
"self",
",",
"'normalize'",
",",
"False",
")",
":",
"#In case a normalization was serialized",
"norm",
"=",
"self",
".",
"normalize",
"data",
".",
"normalize",
"(",
"(",
"norm",
"[",
"'mean'",
"]",
",",
"norm",
"[",
"'std'",
"]",
")",
",",
"do_x",
"=",
"norm",
"[",
"'do_x'",
"]",
",",
"do_y",
"=",
"norm",
"[",
"'do_y'",
"]",
")",
"data",
".",
"label_list",
"=",
"self",
"return",
"data"
] |
Create an `DataBunch` from self, `path` will override `self.path`, `kwargs` are passed to `DataBunch.create`.
|
[
"Create",
"an",
"DataBunch",
"from",
"self",
"path",
"will",
"override",
"self",
".",
"path",
"kwargs",
"are",
"passed",
"to",
"DataBunch",
".",
"create",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L532-L543
|
20,556
|
fastai/fastai
|
fastai/data_block.py
|
LabelLists.load_state
|
def load_state(cls, path:PathOrStr, state:dict):
"Create a `LabelLists` with empty sets from the serialized `state`."
path = Path(path)
train_ds = LabelList.load_state(path, state)
valid_ds = LabelList.load_state(path, state)
return LabelLists(path, train=train_ds, valid=valid_ds)
|
python
|
def load_state(cls, path:PathOrStr, state:dict):
"Create a `LabelLists` with empty sets from the serialized `state`."
path = Path(path)
train_ds = LabelList.load_state(path, state)
valid_ds = LabelList.load_state(path, state)
return LabelLists(path, train=train_ds, valid=valid_ds)
|
[
"def",
"load_state",
"(",
"cls",
",",
"path",
":",
"PathOrStr",
",",
"state",
":",
"dict",
")",
":",
"path",
"=",
"Path",
"(",
"path",
")",
"train_ds",
"=",
"LabelList",
".",
"load_state",
"(",
"path",
",",
"state",
")",
"valid_ds",
"=",
"LabelList",
".",
"load_state",
"(",
"path",
",",
"state",
")",
"return",
"LabelLists",
"(",
"path",
",",
"train",
"=",
"train_ds",
",",
"valid",
"=",
"valid_ds",
")"
] |
Create a `LabelLists` with empty sets from the serialized `state`.
|
[
"Create",
"a",
"LabelLists",
"with",
"empty",
"sets",
"from",
"the",
"serialized",
"state",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L562-L567
|
20,557
|
fastai/fastai
|
fastai/data_block.py
|
LabelList.set_item
|
def set_item(self,item):
"For inference, will briefly replace the dataset with one that only contains `item`."
self.item = self.x.process_one(item)
yield None
self.item = None
|
python
|
def set_item(self,item):
"For inference, will briefly replace the dataset with one that only contains `item`."
self.item = self.x.process_one(item)
yield None
self.item = None
|
[
"def",
"set_item",
"(",
"self",
",",
"item",
")",
":",
"self",
".",
"item",
"=",
"self",
".",
"x",
".",
"process_one",
"(",
"item",
")",
"yield",
"None",
"self",
".",
"item",
"=",
"None"
] |
For inference, will briefly replace the dataset with one that only contains `item`.
|
[
"For",
"inference",
"will",
"briefly",
"replace",
"the",
"dataset",
"with",
"one",
"that",
"only",
"contains",
"item",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L596-L600
|
20,558
|
fastai/fastai
|
fastai/data_block.py
|
LabelList.to_df
|
def to_df(self)->None:
"Create `pd.DataFrame` containing `items` from `self.x` and `self.y`."
return pd.DataFrame(dict(x=self.x._relative_item_paths(), y=[str(o) for o in self.y]))
|
python
|
def to_df(self)->None:
"Create `pd.DataFrame` containing `items` from `self.x` and `self.y`."
return pd.DataFrame(dict(x=self.x._relative_item_paths(), y=[str(o) for o in self.y]))
|
[
"def",
"to_df",
"(",
"self",
")",
"->",
"None",
":",
"return",
"pd",
".",
"DataFrame",
"(",
"dict",
"(",
"x",
"=",
"self",
".",
"x",
".",
"_relative_item_paths",
"(",
")",
",",
"y",
"=",
"[",
"str",
"(",
"o",
")",
"for",
"o",
"in",
"self",
".",
"y",
"]",
")",
")"
] |
Create `pd.DataFrame` containing `items` from `self.x` and `self.y`.
|
[
"Create",
"pd",
".",
"DataFrame",
"containing",
"items",
"from",
"self",
".",
"x",
"and",
"self",
".",
"y",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L646-L648
|
20,559
|
fastai/fastai
|
fastai/data_block.py
|
LabelList.get_state
|
def get_state(self, **kwargs):
"Return the minimal state for export."
state = {'x_cls':self.x.__class__, 'x_proc':self.x.processor,
'y_cls':self.y.__class__, 'y_proc':self.y.processor,
'tfms':self.tfms, 'tfm_y':self.tfm_y, 'tfmargs':self.tfmargs}
if hasattr(self, 'tfms_y'): state['tfms_y'] = self.tfms_y
if hasattr(self, 'tfmargs_y'): state['tfmargs_y'] = self.tfmargs_y
return {**state, **kwargs}
|
python
|
def get_state(self, **kwargs):
"Return the minimal state for export."
state = {'x_cls':self.x.__class__, 'x_proc':self.x.processor,
'y_cls':self.y.__class__, 'y_proc':self.y.processor,
'tfms':self.tfms, 'tfm_y':self.tfm_y, 'tfmargs':self.tfmargs}
if hasattr(self, 'tfms_y'): state['tfms_y'] = self.tfms_y
if hasattr(self, 'tfmargs_y'): state['tfmargs_y'] = self.tfmargs_y
return {**state, **kwargs}
|
[
"def",
"get_state",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"state",
"=",
"{",
"'x_cls'",
":",
"self",
".",
"x",
".",
"__class__",
",",
"'x_proc'",
":",
"self",
".",
"x",
".",
"processor",
",",
"'y_cls'",
":",
"self",
".",
"y",
".",
"__class__",
",",
"'y_proc'",
":",
"self",
".",
"y",
".",
"processor",
",",
"'tfms'",
":",
"self",
".",
"tfms",
",",
"'tfm_y'",
":",
"self",
".",
"tfm_y",
",",
"'tfmargs'",
":",
"self",
".",
"tfmargs",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'tfms_y'",
")",
":",
"state",
"[",
"'tfms_y'",
"]",
"=",
"self",
".",
"tfms_y",
"if",
"hasattr",
"(",
"self",
",",
"'tfmargs_y'",
")",
":",
"state",
"[",
"'tfmargs_y'",
"]",
"=",
"self",
".",
"tfmargs_y",
"return",
"{",
"*",
"*",
"state",
",",
"*",
"*",
"kwargs",
"}"
] |
Return the minimal state for export.
|
[
"Return",
"the",
"minimal",
"state",
"for",
"export",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L654-L661
|
20,560
|
fastai/fastai
|
fastai/data_block.py
|
LabelList.export
|
def export(self, fn:PathOrStr, **kwargs):
"Export the minimal state and save it in `fn` to load an empty version for inference."
pickle.dump(self.get_state(**kwargs), open(fn, 'wb'))
|
python
|
def export(self, fn:PathOrStr, **kwargs):
"Export the minimal state and save it in `fn` to load an empty version for inference."
pickle.dump(self.get_state(**kwargs), open(fn, 'wb'))
|
[
"def",
"export",
"(",
"self",
",",
"fn",
":",
"PathOrStr",
",",
"*",
"*",
"kwargs",
")",
":",
"pickle",
".",
"dump",
"(",
"self",
".",
"get_state",
"(",
"*",
"*",
"kwargs",
")",
",",
"open",
"(",
"fn",
",",
"'wb'",
")",
")"
] |
Export the minimal state and save it in `fn` to load an empty version for inference.
|
[
"Export",
"the",
"minimal",
"state",
"and",
"save",
"it",
"in",
"fn",
"to",
"load",
"an",
"empty",
"version",
"for",
"inference",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L663-L665
|
20,561
|
fastai/fastai
|
fastai/data_block.py
|
LabelList.load_empty
|
def load_empty(cls, path:PathOrStr, fn:PathOrStr):
"Load the state in `fn` to create an empty `LabelList` for inference."
return cls.load_state(path, pickle.load(open(Path(path)/fn, 'rb')))
|
python
|
def load_empty(cls, path:PathOrStr, fn:PathOrStr):
"Load the state in `fn` to create an empty `LabelList` for inference."
return cls.load_state(path, pickle.load(open(Path(path)/fn, 'rb')))
|
[
"def",
"load_empty",
"(",
"cls",
",",
"path",
":",
"PathOrStr",
",",
"fn",
":",
"PathOrStr",
")",
":",
"return",
"cls",
".",
"load_state",
"(",
"path",
",",
"pickle",
".",
"load",
"(",
"open",
"(",
"Path",
"(",
"path",
")",
"/",
"fn",
",",
"'rb'",
")",
")",
")"
] |
Load the state in `fn` to create an empty `LabelList` for inference.
|
[
"Load",
"the",
"state",
"in",
"fn",
"to",
"create",
"an",
"empty",
"LabelList",
"for",
"inference",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L668-L670
|
20,562
|
fastai/fastai
|
fastai/data_block.py
|
LabelList.load_state
|
def load_state(cls, path:PathOrStr, state:dict) -> 'LabelList':
"Create a `LabelList` from `state`."
x = state['x_cls']([], path=path, processor=state['x_proc'], ignore_empty=True)
y = state['y_cls']([], path=path, processor=state['y_proc'], ignore_empty=True)
res = cls(x, y, tfms=state['tfms'], tfm_y=state['tfm_y'], **state['tfmargs']).process()
if state.get('tfms_y', False): res.tfms_y = state['tfms_y']
if state.get('tfmargs_y', False): res.tfmargs_y = state['tfmargs_y']
if state.get('normalize', False): res.normalize = state['normalize']
return res
|
python
|
def load_state(cls, path:PathOrStr, state:dict) -> 'LabelList':
"Create a `LabelList` from `state`."
x = state['x_cls']([], path=path, processor=state['x_proc'], ignore_empty=True)
y = state['y_cls']([], path=path, processor=state['y_proc'], ignore_empty=True)
res = cls(x, y, tfms=state['tfms'], tfm_y=state['tfm_y'], **state['tfmargs']).process()
if state.get('tfms_y', False): res.tfms_y = state['tfms_y']
if state.get('tfmargs_y', False): res.tfmargs_y = state['tfmargs_y']
if state.get('normalize', False): res.normalize = state['normalize']
return res
|
[
"def",
"load_state",
"(",
"cls",
",",
"path",
":",
"PathOrStr",
",",
"state",
":",
"dict",
")",
"->",
"'LabelList'",
":",
"x",
"=",
"state",
"[",
"'x_cls'",
"]",
"(",
"[",
"]",
",",
"path",
"=",
"path",
",",
"processor",
"=",
"state",
"[",
"'x_proc'",
"]",
",",
"ignore_empty",
"=",
"True",
")",
"y",
"=",
"state",
"[",
"'y_cls'",
"]",
"(",
"[",
"]",
",",
"path",
"=",
"path",
",",
"processor",
"=",
"state",
"[",
"'y_proc'",
"]",
",",
"ignore_empty",
"=",
"True",
")",
"res",
"=",
"cls",
"(",
"x",
",",
"y",
",",
"tfms",
"=",
"state",
"[",
"'tfms'",
"]",
",",
"tfm_y",
"=",
"state",
"[",
"'tfm_y'",
"]",
",",
"*",
"*",
"state",
"[",
"'tfmargs'",
"]",
")",
".",
"process",
"(",
")",
"if",
"state",
".",
"get",
"(",
"'tfms_y'",
",",
"False",
")",
":",
"res",
".",
"tfms_y",
"=",
"state",
"[",
"'tfms_y'",
"]",
"if",
"state",
".",
"get",
"(",
"'tfmargs_y'",
",",
"False",
")",
":",
"res",
".",
"tfmargs_y",
"=",
"state",
"[",
"'tfmargs_y'",
"]",
"if",
"state",
".",
"get",
"(",
"'normalize'",
",",
"False",
")",
":",
"res",
".",
"normalize",
"=",
"state",
"[",
"'normalize'",
"]",
"return",
"res"
] |
Create a `LabelList` from `state`.
|
[
"Create",
"a",
"LabelList",
"from",
"state",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L673-L681
|
20,563
|
fastai/fastai
|
fastai/data_block.py
|
LabelList.process
|
def process(self, xp:PreProcessor=None, yp:PreProcessor=None, name:str=None):
"Launch the processing on `self.x` and `self.y` with `xp` and `yp`."
self.y.process(yp)
if getattr(self.y, 'filter_missing_y', False):
filt = array([o is None for o in self.y.items])
if filt.sum()>0:
#Warnings are given later since progress_bar might make them disappear.
self.warn = f"You are labelling your items with {self.y.__class__.__name__}.\n"
self.warn += f"Your {name} set contained the following unknown labels, the corresponding items have been discarded.\n"
for p in self.y.processor:
if len(getattr(p, 'warns', [])) > 0:
warnings = list(set(p.warns))
self.warn += ', '.join(warnings[:5])
if len(warnings) > 5: self.warn += "..."
p.warns = []
self.x,self.y = self.x[~filt],self.y[~filt]
self.x.process(xp)
return self
|
python
|
def process(self, xp:PreProcessor=None, yp:PreProcessor=None, name:str=None):
"Launch the processing on `self.x` and `self.y` with `xp` and `yp`."
self.y.process(yp)
if getattr(self.y, 'filter_missing_y', False):
filt = array([o is None for o in self.y.items])
if filt.sum()>0:
#Warnings are given later since progress_bar might make them disappear.
self.warn = f"You are labelling your items with {self.y.__class__.__name__}.\n"
self.warn += f"Your {name} set contained the following unknown labels, the corresponding items have been discarded.\n"
for p in self.y.processor:
if len(getattr(p, 'warns', [])) > 0:
warnings = list(set(p.warns))
self.warn += ', '.join(warnings[:5])
if len(warnings) > 5: self.warn += "..."
p.warns = []
self.x,self.y = self.x[~filt],self.y[~filt]
self.x.process(xp)
return self
|
[
"def",
"process",
"(",
"self",
",",
"xp",
":",
"PreProcessor",
"=",
"None",
",",
"yp",
":",
"PreProcessor",
"=",
"None",
",",
"name",
":",
"str",
"=",
"None",
")",
":",
"self",
".",
"y",
".",
"process",
"(",
"yp",
")",
"if",
"getattr",
"(",
"self",
".",
"y",
",",
"'filter_missing_y'",
",",
"False",
")",
":",
"filt",
"=",
"array",
"(",
"[",
"o",
"is",
"None",
"for",
"o",
"in",
"self",
".",
"y",
".",
"items",
"]",
")",
"if",
"filt",
".",
"sum",
"(",
")",
">",
"0",
":",
"#Warnings are given later since progress_bar might make them disappear.",
"self",
".",
"warn",
"=",
"f\"You are labelling your items with {self.y.__class__.__name__}.\\n\"",
"self",
".",
"warn",
"+=",
"f\"Your {name} set contained the following unknown labels, the corresponding items have been discarded.\\n\"",
"for",
"p",
"in",
"self",
".",
"y",
".",
"processor",
":",
"if",
"len",
"(",
"getattr",
"(",
"p",
",",
"'warns'",
",",
"[",
"]",
")",
")",
">",
"0",
":",
"warnings",
"=",
"list",
"(",
"set",
"(",
"p",
".",
"warns",
")",
")",
"self",
".",
"warn",
"+=",
"', '",
".",
"join",
"(",
"warnings",
"[",
":",
"5",
"]",
")",
"if",
"len",
"(",
"warnings",
")",
">",
"5",
":",
"self",
".",
"warn",
"+=",
"\"...\"",
"p",
".",
"warns",
"=",
"[",
"]",
"self",
".",
"x",
",",
"self",
".",
"y",
"=",
"self",
".",
"x",
"[",
"~",
"filt",
"]",
",",
"self",
".",
"y",
"[",
"~",
"filt",
"]",
"self",
".",
"x",
".",
"process",
"(",
"xp",
")",
"return",
"self"
] |
Launch the processing on `self.x` and `self.y` with `xp` and `yp`.
|
[
"Launch",
"the",
"processing",
"on",
"self",
".",
"x",
"and",
"self",
".",
"y",
"with",
"xp",
"and",
"yp",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L683-L700
|
20,564
|
fastai/fastai
|
fastai/data_block.py
|
LabelList.transform
|
def transform(self, tfms:TfmList, tfm_y:bool=None, **kwargs):
"Set the `tfms` and `tfm_y` value to be applied to the inputs and targets."
_check_kwargs(self.x, tfms, **kwargs)
if tfm_y is None: tfm_y = self.tfm_y
if tfm_y: _check_kwargs(self.y, tfms, **kwargs)
self.tfms, self.tfmargs = tfms,kwargs
self.tfm_y, self.tfmargs_y = tfm_y,kwargs
self.tfms_y = None if tfms is None else list(filter(lambda t: t.use_on_y, listify(tfms)))
return self
|
python
|
def transform(self, tfms:TfmList, tfm_y:bool=None, **kwargs):
"Set the `tfms` and `tfm_y` value to be applied to the inputs and targets."
_check_kwargs(self.x, tfms, **kwargs)
if tfm_y is None: tfm_y = self.tfm_y
if tfm_y: _check_kwargs(self.y, tfms, **kwargs)
self.tfms, self.tfmargs = tfms,kwargs
self.tfm_y, self.tfmargs_y = tfm_y,kwargs
self.tfms_y = None if tfms is None else list(filter(lambda t: t.use_on_y, listify(tfms)))
return self
|
[
"def",
"transform",
"(",
"self",
",",
"tfms",
":",
"TfmList",
",",
"tfm_y",
":",
"bool",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"_check_kwargs",
"(",
"self",
".",
"x",
",",
"tfms",
",",
"*",
"*",
"kwargs",
")",
"if",
"tfm_y",
"is",
"None",
":",
"tfm_y",
"=",
"self",
".",
"tfm_y",
"if",
"tfm_y",
":",
"_check_kwargs",
"(",
"self",
".",
"y",
",",
"tfms",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"tfms",
",",
"self",
".",
"tfmargs",
"=",
"tfms",
",",
"kwargs",
"self",
".",
"tfm_y",
",",
"self",
".",
"tfmargs_y",
"=",
"tfm_y",
",",
"kwargs",
"self",
".",
"tfms_y",
"=",
"None",
"if",
"tfms",
"is",
"None",
"else",
"list",
"(",
"filter",
"(",
"lambda",
"t",
":",
"t",
".",
"use_on_y",
",",
"listify",
"(",
"tfms",
")",
")",
")",
"return",
"self"
] |
Set the `tfms` and `tfm_y` value to be applied to the inputs and targets.
|
[
"Set",
"the",
"tfms",
"and",
"tfm_y",
"value",
"to",
"be",
"applied",
"to",
"the",
"inputs",
"and",
"targets",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L707-L715
|
20,565
|
fastai/fastai
|
fastai/data_block.py
|
LabelList.transform_y
|
def transform_y(self, tfms:TfmList=None, **kwargs):
"Set `tfms` to be applied to the targets only."
_check_kwargs(self.y, tfms, **kwargs)
self.tfm_y=True
if tfms is None:
self.tfms_y = list(filter(lambda t: t.use_on_y, listify(self.tfms)))
self.tfmargs_y = {**self.tfmargs, **kwargs}
else:
tfms = list(filter(lambda t: t.use_on_y, tfms))
self.tfms_y,self.tfmargs_y = tfms,kwargs
return self
|
python
|
def transform_y(self, tfms:TfmList=None, **kwargs):
"Set `tfms` to be applied to the targets only."
_check_kwargs(self.y, tfms, **kwargs)
self.tfm_y=True
if tfms is None:
self.tfms_y = list(filter(lambda t: t.use_on_y, listify(self.tfms)))
self.tfmargs_y = {**self.tfmargs, **kwargs}
else:
tfms = list(filter(lambda t: t.use_on_y, tfms))
self.tfms_y,self.tfmargs_y = tfms,kwargs
return self
|
[
"def",
"transform_y",
"(",
"self",
",",
"tfms",
":",
"TfmList",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"_check_kwargs",
"(",
"self",
".",
"y",
",",
"tfms",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"tfm_y",
"=",
"True",
"if",
"tfms",
"is",
"None",
":",
"self",
".",
"tfms_y",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"t",
":",
"t",
".",
"use_on_y",
",",
"listify",
"(",
"self",
".",
"tfms",
")",
")",
")",
"self",
".",
"tfmargs_y",
"=",
"{",
"*",
"*",
"self",
".",
"tfmargs",
",",
"*",
"*",
"kwargs",
"}",
"else",
":",
"tfms",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"t",
":",
"t",
".",
"use_on_y",
",",
"tfms",
")",
")",
"self",
".",
"tfms_y",
",",
"self",
".",
"tfmargs_y",
"=",
"tfms",
",",
"kwargs",
"return",
"self"
] |
Set `tfms` to be applied to the targets only.
|
[
"Set",
"tfms",
"to",
"be",
"applied",
"to",
"the",
"targets",
"only",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L717-L727
|
20,566
|
fastai/fastai
|
fastai/utils/collect_env.py
|
get_env
|
def get_env(name):
"Return env var value if it's defined and not an empty string, or return Unknown"
res = os.environ.get(name,'')
return res if len(res) else "Unknown"
|
python
|
def get_env(name):
"Return env var value if it's defined and not an empty string, or return Unknown"
res = os.environ.get(name,'')
return res if len(res) else "Unknown"
|
[
"def",
"get_env",
"(",
"name",
")",
":",
"res",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"name",
",",
"''",
")",
"return",
"res",
"if",
"len",
"(",
"res",
")",
"else",
"\"Unknown\""
] |
Return env var value if it's defined and not an empty string, or return Unknown
|
[
"Return",
"env",
"var",
"value",
"if",
"it",
"s",
"defined",
"and",
"not",
"an",
"empty",
"string",
"or",
"return",
"Unknown"
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/utils/collect_env.py#L11-L14
|
20,567
|
fastai/fastai
|
fastai/utils/collect_env.py
|
pypi_module_version_is_available
|
def pypi_module_version_is_available(module, version):
"Check whether module==version is available on pypi"
# returns True/False (or None if failed to execute the check)
# using a hack that when passing "module==" w/ no version number to pip
# it "fails" and returns all the available versions in stderr
try:
cmd = f"pip install {module}=="
result = subprocess.run(cmd.split(), shell=False, check=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
print(f"Error: {e}")
return None
else:
if result.returncode == 1 and result.stderr:
output = result.stderr.decode('utf-8')
return True if version in output else False
else:
print(f"Some error in {cmd}")
return None
|
python
|
def pypi_module_version_is_available(module, version):
"Check whether module==version is available on pypi"
# returns True/False (or None if failed to execute the check)
# using a hack that when passing "module==" w/ no version number to pip
# it "fails" and returns all the available versions in stderr
try:
cmd = f"pip install {module}=="
result = subprocess.run(cmd.split(), shell=False, check=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
print(f"Error: {e}")
return None
else:
if result.returncode == 1 and result.stderr:
output = result.stderr.decode('utf-8')
return True if version in output else False
else:
print(f"Some error in {cmd}")
return None
|
[
"def",
"pypi_module_version_is_available",
"(",
"module",
",",
"version",
")",
":",
"# returns True/False (or None if failed to execute the check)",
"# using a hack that when passing \"module==\" w/ no version number to pip",
"# it \"fails\" and returns all the available versions in stderr",
"try",
":",
"cmd",
"=",
"f\"pip install {module}==\"",
"result",
"=",
"subprocess",
".",
"run",
"(",
"cmd",
".",
"split",
"(",
")",
",",
"shell",
"=",
"False",
",",
"check",
"=",
"False",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"f\"Error: {e}\"",
")",
"return",
"None",
"else",
":",
"if",
"result",
".",
"returncode",
"==",
"1",
"and",
"result",
".",
"stderr",
":",
"output",
"=",
"result",
".",
"stderr",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"True",
"if",
"version",
"in",
"output",
"else",
"False",
"else",
":",
"print",
"(",
"f\"Some error in {cmd}\"",
")",
"return",
"None"
] |
Check whether module==version is available on pypi
|
[
"Check",
"whether",
"module",
"==",
"version",
"is",
"available",
"on",
"pypi"
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/utils/collect_env.py#L129-L148
|
20,568
|
fastai/fastai
|
fastai/callback.py
|
annealing_linear
|
def annealing_linear(start:Number, end:Number, pct:float)->Number:
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start + pct * (end-start)
|
python
|
def annealing_linear(start:Number, end:Number, pct:float)->Number:
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start + pct * (end-start)
|
[
"def",
"annealing_linear",
"(",
"start",
":",
"Number",
",",
"end",
":",
"Number",
",",
"pct",
":",
"float",
")",
"->",
"Number",
":",
"return",
"start",
"+",
"pct",
"*",
"(",
"end",
"-",
"start",
")"
] |
Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0.
|
[
"Linearly",
"anneal",
"from",
"start",
"to",
"end",
"as",
"pct",
"goes",
"from",
"0",
".",
"0",
"to",
"1",
".",
"0",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L358-L360
|
20,569
|
fastai/fastai
|
fastai/callback.py
|
annealing_exp
|
def annealing_exp(start:Number, end:Number, pct:float)->Number:
"Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start * (end/start) ** pct
|
python
|
def annealing_exp(start:Number, end:Number, pct:float)->Number:
"Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start * (end/start) ** pct
|
[
"def",
"annealing_exp",
"(",
"start",
":",
"Number",
",",
"end",
":",
"Number",
",",
"pct",
":",
"float",
")",
"->",
"Number",
":",
"return",
"start",
"*",
"(",
"end",
"/",
"start",
")",
"**",
"pct"
] |
Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0.
|
[
"Exponentially",
"anneal",
"from",
"start",
"to",
"end",
"as",
"pct",
"goes",
"from",
"0",
".",
"0",
"to",
"1",
".",
"0",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L361-L363
|
20,570
|
fastai/fastai
|
fastai/callback.py
|
annealing_cos
|
def annealing_cos(start:Number, end:Number, pct:float)->Number:
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out
|
python
|
def annealing_cos(start:Number, end:Number, pct:float)->Number:
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out
|
[
"def",
"annealing_cos",
"(",
"start",
":",
"Number",
",",
"end",
":",
"Number",
",",
"pct",
":",
"float",
")",
"->",
"Number",
":",
"cos_out",
"=",
"np",
".",
"cos",
"(",
"np",
".",
"pi",
"*",
"pct",
")",
"+",
"1",
"return",
"end",
"+",
"(",
"start",
"-",
"end",
")",
"/",
"2",
"*",
"cos_out"
] |
Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0.
|
[
"Cosine",
"anneal",
"from",
"start",
"to",
"end",
"as",
"pct",
"goes",
"from",
"0",
".",
"0",
"to",
"1",
".",
"0",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L364-L367
|
20,571
|
fastai/fastai
|
fastai/callback.py
|
do_annealing_poly
|
def do_annealing_poly(start:Number, end:Number, pct:float, degree:Number)->Number:
"Helper function for `anneal_poly`."
return end + (start-end) * (1-pct)**degree
|
python
|
def do_annealing_poly(start:Number, end:Number, pct:float, degree:Number)->Number:
"Helper function for `anneal_poly`."
return end + (start-end) * (1-pct)**degree
|
[
"def",
"do_annealing_poly",
"(",
"start",
":",
"Number",
",",
"end",
":",
"Number",
",",
"pct",
":",
"float",
",",
"degree",
":",
"Number",
")",
"->",
"Number",
":",
"return",
"end",
"+",
"(",
"start",
"-",
"end",
")",
"*",
"(",
"1",
"-",
"pct",
")",
"**",
"degree"
] |
Helper function for `anneal_poly`.
|
[
"Helper",
"function",
"for",
"anneal_poly",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L369-L371
|
20,572
|
fastai/fastai
|
fastai/callback.py
|
OptimWrapper.create
|
def create(cls, opt_func:Union[type,Callable], lr:Union[float,Tuple,List], layer_groups:ModuleList, wd:Floats=0.,
true_wd:bool=False, bn_wd:bool=True)->optim.Optimizer:
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
split_params = split_no_wd_params(layer_groups)
opt = opt_func([{'params': p, 'lr':0} for p in split_params])
opt = cls(opt, wd=wd, true_wd=true_wd, bn_wd=bn_wd)
opt.lr,opt.opt_func = listify(lr, layer_groups),opt_func
return opt
|
python
|
def create(cls, opt_func:Union[type,Callable], lr:Union[float,Tuple,List], layer_groups:ModuleList, wd:Floats=0.,
true_wd:bool=False, bn_wd:bool=True)->optim.Optimizer:
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
split_params = split_no_wd_params(layer_groups)
opt = opt_func([{'params': p, 'lr':0} for p in split_params])
opt = cls(opt, wd=wd, true_wd=true_wd, bn_wd=bn_wd)
opt.lr,opt.opt_func = listify(lr, layer_groups),opt_func
return opt
|
[
"def",
"create",
"(",
"cls",
",",
"opt_func",
":",
"Union",
"[",
"type",
",",
"Callable",
"]",
",",
"lr",
":",
"Union",
"[",
"float",
",",
"Tuple",
",",
"List",
"]",
",",
"layer_groups",
":",
"ModuleList",
",",
"wd",
":",
"Floats",
"=",
"0.",
",",
"true_wd",
":",
"bool",
"=",
"False",
",",
"bn_wd",
":",
"bool",
"=",
"True",
")",
"->",
"optim",
".",
"Optimizer",
":",
"split_params",
"=",
"split_no_wd_params",
"(",
"layer_groups",
")",
"opt",
"=",
"opt_func",
"(",
"[",
"{",
"'params'",
":",
"p",
",",
"'lr'",
":",
"0",
"}",
"for",
"p",
"in",
"split_params",
"]",
")",
"opt",
"=",
"cls",
"(",
"opt",
",",
"wd",
"=",
"wd",
",",
"true_wd",
"=",
"true_wd",
",",
"bn_wd",
"=",
"bn_wd",
")",
"opt",
".",
"lr",
",",
"opt",
".",
"opt_func",
"=",
"listify",
"(",
"lr",
",",
"layer_groups",
")",
",",
"opt_func",
"return",
"opt"
] |
Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`.
|
[
"Create",
"an",
"optim",
".",
"Optimizer",
"from",
"opt_func",
"with",
"lr",
".",
"Set",
"lr",
"on",
"layer_groups",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L20-L27
|
20,573
|
fastai/fastai
|
fastai/callback.py
|
OptimWrapper.step
|
def step(self)->None:
"Set weight decay and step optimizer."
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr,wd,pg1,pg2 in zip(self._lr,self._wd,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
for p in pg1['params']: p.data.mul_(1 - wd*lr)
if self.bn_wd:
for p in pg2['params']: p.data.mul_(1 - wd*lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
|
python
|
def step(self)->None:
"Set weight decay and step optimizer."
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr,wd,pg1,pg2 in zip(self._lr,self._wd,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
for p in pg1['params']: p.data.mul_(1 - wd*lr)
if self.bn_wd:
for p in pg2['params']: p.data.mul_(1 - wd*lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
|
[
"def",
"step",
"(",
"self",
")",
"->",
"None",
":",
"# weight decay outside of optimizer step (AdamW)",
"if",
"self",
".",
"true_wd",
":",
"for",
"lr",
",",
"wd",
",",
"pg1",
",",
"pg2",
"in",
"zip",
"(",
"self",
".",
"_lr",
",",
"self",
".",
"_wd",
",",
"self",
".",
"opt",
".",
"param_groups",
"[",
":",
":",
"2",
"]",
",",
"self",
".",
"opt",
".",
"param_groups",
"[",
"1",
":",
":",
"2",
"]",
")",
":",
"for",
"p",
"in",
"pg1",
"[",
"'params'",
"]",
":",
"p",
".",
"data",
".",
"mul_",
"(",
"1",
"-",
"wd",
"*",
"lr",
")",
"if",
"self",
".",
"bn_wd",
":",
"for",
"p",
"in",
"pg2",
"[",
"'params'",
"]",
":",
"p",
".",
"data",
".",
"mul_",
"(",
"1",
"-",
"wd",
"*",
"lr",
")",
"self",
".",
"set_val",
"(",
"'weight_decay'",
",",
"listify",
"(",
"0",
",",
"self",
".",
"_wd",
")",
")",
"self",
".",
"opt",
".",
"step",
"(",
")"
] |
Set weight decay and step optimizer.
|
[
"Set",
"weight",
"decay",
"and",
"step",
"optimizer",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L48-L57
|
20,574
|
fastai/fastai
|
fastai/callback.py
|
OptimWrapper.wd
|
def wd(self, val:float)->None:
"Set weight decay."
if not self.true_wd: self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
|
python
|
def wd(self, val:float)->None:
"Set weight decay."
if not self.true_wd: self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
|
[
"def",
"wd",
"(",
"self",
",",
"val",
":",
"float",
")",
"->",
"None",
":",
"if",
"not",
"self",
".",
"true_wd",
":",
"self",
".",
"set_val",
"(",
"'weight_decay'",
",",
"listify",
"(",
"val",
",",
"self",
".",
"_wd",
")",
",",
"bn_groups",
"=",
"self",
".",
"bn_wd",
")",
"self",
".",
"_wd",
"=",
"listify",
"(",
"val",
",",
"self",
".",
"_wd",
")"
] |
Set weight decay.
|
[
"Set",
"weight",
"decay",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L104-L107
|
20,575
|
fastai/fastai
|
fastai/callback.py
|
OptimWrapper.read_defaults
|
def read_defaults(self)->None:
"Read the values inside the optimizer for the hyper-parameters."
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys: self._mom,self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys: self._wd = self.read_val('weight_decay')
reserved_names = ['params', 'lr', 'momentum', 'alpha', 'betas', 'weight_decay']
stat_names = [n for n in self.opt_keys if n not in reserved_names]
self._stats = {n:self.read_val(n) for n in stat_names}
|
python
|
def read_defaults(self)->None:
"Read the values inside the optimizer for the hyper-parameters."
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys: self._mom,self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys: self._wd = self.read_val('weight_decay')
reserved_names = ['params', 'lr', 'momentum', 'alpha', 'betas', 'weight_decay']
stat_names = [n for n in self.opt_keys if n not in reserved_names]
self._stats = {n:self.read_val(n) for n in stat_names}
|
[
"def",
"read_defaults",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"_beta",
"=",
"None",
"if",
"'lr'",
"in",
"self",
".",
"opt_keys",
":",
"self",
".",
"_lr",
"=",
"self",
".",
"read_val",
"(",
"'lr'",
")",
"if",
"'momentum'",
"in",
"self",
".",
"opt_keys",
":",
"self",
".",
"_mom",
"=",
"self",
".",
"read_val",
"(",
"'momentum'",
")",
"if",
"'alpha'",
"in",
"self",
".",
"opt_keys",
":",
"self",
".",
"_beta",
"=",
"self",
".",
"read_val",
"(",
"'alpha'",
")",
"if",
"'betas'",
"in",
"self",
".",
"opt_keys",
":",
"self",
".",
"_mom",
",",
"self",
".",
"_beta",
"=",
"self",
".",
"read_val",
"(",
"'betas'",
")",
"if",
"'weight_decay'",
"in",
"self",
".",
"opt_keys",
":",
"self",
".",
"_wd",
"=",
"self",
".",
"read_val",
"(",
"'weight_decay'",
")",
"reserved_names",
"=",
"[",
"'params'",
",",
"'lr'",
",",
"'momentum'",
",",
"'alpha'",
",",
"'betas'",
",",
"'weight_decay'",
"]",
"stat_names",
"=",
"[",
"n",
"for",
"n",
"in",
"self",
".",
"opt_keys",
"if",
"n",
"not",
"in",
"reserved_names",
"]",
"self",
".",
"_stats",
"=",
"{",
"n",
":",
"self",
".",
"read_val",
"(",
"n",
")",
"for",
"n",
"in",
"stat_names",
"}"
] |
Read the values inside the optimizer for the hyper-parameters.
|
[
"Read",
"the",
"values",
"inside",
"the",
"optimizer",
"for",
"the",
"hyper",
"-",
"parameters",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L110-L120
|
20,576
|
fastai/fastai
|
fastai/callback.py
|
OptimWrapper.set_val
|
def set_val(self, key:str, val:Any, bn_groups:bool=True)->Any:
"Set `val` inside the optimizer dictionary at `key`."
if is_tuple(val): val = [(v1,v2) for v1,v2 in zip(*val)]
for v,pg1,pg2 in zip(val,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
|
python
|
def set_val(self, key:str, val:Any, bn_groups:bool=True)->Any:
"Set `val` inside the optimizer dictionary at `key`."
if is_tuple(val): val = [(v1,v2) for v1,v2 in zip(*val)]
for v,pg1,pg2 in zip(val,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
|
[
"def",
"set_val",
"(",
"self",
",",
"key",
":",
"str",
",",
"val",
":",
"Any",
",",
"bn_groups",
":",
"bool",
"=",
"True",
")",
"->",
"Any",
":",
"if",
"is_tuple",
"(",
"val",
")",
":",
"val",
"=",
"[",
"(",
"v1",
",",
"v2",
")",
"for",
"v1",
",",
"v2",
"in",
"zip",
"(",
"*",
"val",
")",
"]",
"for",
"v",
",",
"pg1",
",",
"pg2",
"in",
"zip",
"(",
"val",
",",
"self",
".",
"opt",
".",
"param_groups",
"[",
":",
":",
"2",
"]",
",",
"self",
".",
"opt",
".",
"param_groups",
"[",
"1",
":",
":",
"2",
"]",
")",
":",
"pg1",
"[",
"key",
"]",
"=",
"v",
"if",
"bn_groups",
":",
"pg2",
"[",
"key",
"]",
"=",
"v",
"return",
"val"
] |
Set `val` inside the optimizer dictionary at `key`.
|
[
"Set",
"val",
"inside",
"the",
"optimizer",
"dictionary",
"at",
"key",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L132-L138
|
20,577
|
fastai/fastai
|
fastai/callback.py
|
OptimWrapper.read_val
|
def read_val(self, key:str) -> Union[List[float],Tuple[List[float],List[float]]]:
"Read a hyperparameter `key` in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
|
python
|
def read_val(self, key:str) -> Union[List[float],Tuple[List[float],List[float]]]:
"Read a hyperparameter `key` in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
|
[
"def",
"read_val",
"(",
"self",
",",
"key",
":",
"str",
")",
"->",
"Union",
"[",
"List",
"[",
"float",
"]",
",",
"Tuple",
"[",
"List",
"[",
"float",
"]",
",",
"List",
"[",
"float",
"]",
"]",
"]",
":",
"val",
"=",
"[",
"pg",
"[",
"key",
"]",
"for",
"pg",
"in",
"self",
".",
"opt",
".",
"param_groups",
"[",
":",
":",
"2",
"]",
"]",
"if",
"is_tuple",
"(",
"val",
"[",
"0",
"]",
")",
":",
"val",
"=",
"[",
"o",
"[",
"0",
"]",
"for",
"o",
"in",
"val",
"]",
",",
"[",
"o",
"[",
"1",
"]",
"for",
"o",
"in",
"val",
"]",
"return",
"val"
] |
Read a hyperparameter `key` in the optimizer dictionary.
|
[
"Read",
"a",
"hyperparameter",
"key",
"in",
"the",
"optimizer",
"dictionary",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L140-L144
|
20,578
|
fastai/fastai
|
fastai/callback.py
|
OptimWrapper.get_state
|
def get_state(self):
"Return the inner state minus the layer groups."
return {'opt_state':self.opt.state_dict(), 'lr':self._lr, 'wd':self._wd, 'beta':self._beta, 'mom':self._mom,
'opt_func':self.opt_func, 'true_wd':self.true_wd, 'bn_wd':self.bn_wd}
|
python
|
def get_state(self):
"Return the inner state minus the layer groups."
return {'opt_state':self.opt.state_dict(), 'lr':self._lr, 'wd':self._wd, 'beta':self._beta, 'mom':self._mom,
'opt_func':self.opt_func, 'true_wd':self.true_wd, 'bn_wd':self.bn_wd}
|
[
"def",
"get_state",
"(",
"self",
")",
":",
"return",
"{",
"'opt_state'",
":",
"self",
".",
"opt",
".",
"state_dict",
"(",
")",
",",
"'lr'",
":",
"self",
".",
"_lr",
",",
"'wd'",
":",
"self",
".",
"_wd",
",",
"'beta'",
":",
"self",
".",
"_beta",
",",
"'mom'",
":",
"self",
".",
"_mom",
",",
"'opt_func'",
":",
"self",
".",
"opt_func",
",",
"'true_wd'",
":",
"self",
".",
"true_wd",
",",
"'bn_wd'",
":",
"self",
".",
"bn_wd",
"}"
] |
Return the inner state minus the layer groups.
|
[
"Return",
"the",
"inner",
"state",
"minus",
"the",
"layer",
"groups",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L146-L149
|
20,579
|
fastai/fastai
|
fastai/callback.py
|
Callback.get_state
|
def get_state(self, minimal:bool=True):
"Return the inner state of the `Callback`, `minimal` or not."
to_remove = ['exclude', 'not_min'] + getattr(self, 'exclude', []).copy()
if minimal: to_remove += getattr(self, 'not_min', []).copy()
return {k:v for k,v in self.__dict__.items() if k not in to_remove}
|
python
|
def get_state(self, minimal:bool=True):
"Return the inner state of the `Callback`, `minimal` or not."
to_remove = ['exclude', 'not_min'] + getattr(self, 'exclude', []).copy()
if minimal: to_remove += getattr(self, 'not_min', []).copy()
return {k:v for k,v in self.__dict__.items() if k not in to_remove}
|
[
"def",
"get_state",
"(",
"self",
",",
"minimal",
":",
"bool",
"=",
"True",
")",
":",
"to_remove",
"=",
"[",
"'exclude'",
",",
"'not_min'",
"]",
"+",
"getattr",
"(",
"self",
",",
"'exclude'",
",",
"[",
"]",
")",
".",
"copy",
"(",
")",
"if",
"minimal",
":",
"to_remove",
"+=",
"getattr",
"(",
"self",
",",
"'not_min'",
",",
"[",
"]",
")",
".",
"copy",
"(",
")",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"__dict__",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"to_remove",
"}"
] |
Return the inner state of the `Callback`, `minimal` or not.
|
[
"Return",
"the",
"inner",
"state",
"of",
"the",
"Callback",
"minimal",
"or",
"not",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L196-L200
|
20,580
|
fastai/fastai
|
fastai/callback.py
|
SmoothenValue.add_value
|
def add_value(self, val:float)->None:
"Add `val` to calculate updated smoothed value."
self.n += 1
self.mov_avg = self.beta * self.mov_avg + (1 - self.beta) * val
self.smooth = self.mov_avg / (1 - self.beta ** self.n)
|
python
|
def add_value(self, val:float)->None:
"Add `val` to calculate updated smoothed value."
self.n += 1
self.mov_avg = self.beta * self.mov_avg + (1 - self.beta) * val
self.smooth = self.mov_avg / (1 - self.beta ** self.n)
|
[
"def",
"add_value",
"(",
"self",
",",
"val",
":",
"float",
")",
"->",
"None",
":",
"self",
".",
"n",
"+=",
"1",
"self",
".",
"mov_avg",
"=",
"self",
".",
"beta",
"*",
"self",
".",
"mov_avg",
"+",
"(",
"1",
"-",
"self",
".",
"beta",
")",
"*",
"val",
"self",
".",
"smooth",
"=",
"self",
".",
"mov_avg",
"/",
"(",
"1",
"-",
"self",
".",
"beta",
"**",
"self",
".",
"n",
")"
] |
Add `val` to calculate updated smoothed value.
|
[
"Add",
"val",
"to",
"calculate",
"updated",
"smoothed",
"value",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L213-L217
|
20,581
|
fastai/fastai
|
fastai/callback.py
|
AverageMetric.on_batch_end
|
def on_batch_end(self, last_output, last_target, **kwargs):
"Update metric computation with `last_output` and `last_target`."
if not is_listy(last_target): last_target=[last_target]
self.count += last_target[0].size(0)
val = self.func(last_output, *last_target)
if self.world:
val = val.clone()
dist.all_reduce(val, op=dist.ReduceOp.SUM)
val /= self.world
self.val += last_target[0].size(0) * val.detach().cpu()
|
python
|
def on_batch_end(self, last_output, last_target, **kwargs):
"Update metric computation with `last_output` and `last_target`."
if not is_listy(last_target): last_target=[last_target]
self.count += last_target[0].size(0)
val = self.func(last_output, *last_target)
if self.world:
val = val.clone()
dist.all_reduce(val, op=dist.ReduceOp.SUM)
val /= self.world
self.val += last_target[0].size(0) * val.detach().cpu()
|
[
"def",
"on_batch_end",
"(",
"self",
",",
"last_output",
",",
"last_target",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"is_listy",
"(",
"last_target",
")",
":",
"last_target",
"=",
"[",
"last_target",
"]",
"self",
".",
"count",
"+=",
"last_target",
"[",
"0",
"]",
".",
"size",
"(",
"0",
")",
"val",
"=",
"self",
".",
"func",
"(",
"last_output",
",",
"*",
"last_target",
")",
"if",
"self",
".",
"world",
":",
"val",
"=",
"val",
".",
"clone",
"(",
")",
"dist",
".",
"all_reduce",
"(",
"val",
",",
"op",
"=",
"dist",
".",
"ReduceOp",
".",
"SUM",
")",
"val",
"/=",
"self",
".",
"world",
"self",
".",
"val",
"+=",
"last_target",
"[",
"0",
"]",
".",
"size",
"(",
"0",
")",
"*",
"val",
".",
"detach",
"(",
")",
".",
"cpu",
"(",
")"
] |
Update metric computation with `last_output` and `last_target`.
|
[
"Update",
"metric",
"computation",
"with",
"last_output",
"and",
"last_target",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L340-L349
|
20,582
|
fastai/fastai
|
fastai/callback.py
|
AverageMetric.on_epoch_end
|
def on_epoch_end(self, last_metrics, **kwargs):
"Set the final result in `last_metrics`."
return add_metrics(last_metrics, self.val/self.count)
|
python
|
def on_epoch_end(self, last_metrics, **kwargs):
"Set the final result in `last_metrics`."
return add_metrics(last_metrics, self.val/self.count)
|
[
"def",
"on_epoch_end",
"(",
"self",
",",
"last_metrics",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"add_metrics",
"(",
"last_metrics",
",",
"self",
".",
"val",
"/",
"self",
".",
"count",
")"
] |
Set the final result in `last_metrics`.
|
[
"Set",
"the",
"final",
"result",
"in",
"last_metrics",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L351-L353
|
20,583
|
fastai/fastai
|
fastai/callback.py
|
Scheduler.step
|
def step(self)->Number:
"Return next value along annealed schedule."
self.n += 1
return self.func(self.start, self.end, self.n/self.n_iter)
|
python
|
def step(self)->Number:
"Return next value along annealed schedule."
self.n += 1
return self.func(self.start, self.end, self.n/self.n_iter)
|
[
"def",
"step",
"(",
"self",
")",
"->",
"Number",
":",
"self",
".",
"n",
"+=",
"1",
"return",
"self",
".",
"func",
"(",
"self",
".",
"start",
",",
"self",
".",
"end",
",",
"self",
".",
"n",
"/",
"self",
".",
"n_iter",
")"
] |
Return next value along annealed schedule.
|
[
"Return",
"next",
"value",
"along",
"annealed",
"schedule",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L387-L390
|
20,584
|
fastai/fastai
|
fastai/callbacks/one_cycle.py
|
OneCycleScheduler.steps
|
def steps(self, *steps_cfg:StartOptEnd):
"Build anneal schedule for all of the parameters."
return [Scheduler(step, n_iter, func=func)
for (step,(n_iter,func)) in zip(steps_cfg, self.phases)]
|
python
|
def steps(self, *steps_cfg:StartOptEnd):
"Build anneal schedule for all of the parameters."
return [Scheduler(step, n_iter, func=func)
for (step,(n_iter,func)) in zip(steps_cfg, self.phases)]
|
[
"def",
"steps",
"(",
"self",
",",
"*",
"steps_cfg",
":",
"StartOptEnd",
")",
":",
"return",
"[",
"Scheduler",
"(",
"step",
",",
"n_iter",
",",
"func",
"=",
"func",
")",
"for",
"(",
"step",
",",
"(",
"n_iter",
",",
"func",
")",
")",
"in",
"zip",
"(",
"steps_cfg",
",",
"self",
".",
"phases",
")",
"]"
] |
Build anneal schedule for all of the parameters.
|
[
"Build",
"anneal",
"schedule",
"for",
"all",
"of",
"the",
"parameters",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callbacks/one_cycle.py#L19-L22
|
20,585
|
fastai/fastai
|
fastai/callbacks/one_cycle.py
|
OneCycleScheduler.on_train_begin
|
def on_train_begin(self, n_epochs:int, epoch:int, **kwargs:Any)->None:
"Initialize our optimization params based on our annealing schedule."
res = {'epoch':self.start_epoch} if self.start_epoch is not None else None
self.start_epoch = ifnone(self.start_epoch, epoch)
self.tot_epochs = ifnone(self.tot_epochs, n_epochs)
n = len(self.learn.data.train_dl) * self.tot_epochs
a1 = int(n * self.pct_start)
a2 = n-a1
self.phases = ((a1, annealing_cos), (a2, annealing_cos))
low_lr = self.lr_max/self.div_factor
self.lr_scheds = self.steps((low_lr, self.lr_max), (self.lr_max, self.lr_max/self.final_div))
self.mom_scheds = self.steps(self.moms, (self.moms[1], self.moms[0]))
self.opt = self.learn.opt
self.opt.lr,self.opt.mom = self.lr_scheds[0].start,self.mom_scheds[0].start
self.idx_s = 0
return res
|
python
|
def on_train_begin(self, n_epochs:int, epoch:int, **kwargs:Any)->None:
"Initialize our optimization params based on our annealing schedule."
res = {'epoch':self.start_epoch} if self.start_epoch is not None else None
self.start_epoch = ifnone(self.start_epoch, epoch)
self.tot_epochs = ifnone(self.tot_epochs, n_epochs)
n = len(self.learn.data.train_dl) * self.tot_epochs
a1 = int(n * self.pct_start)
a2 = n-a1
self.phases = ((a1, annealing_cos), (a2, annealing_cos))
low_lr = self.lr_max/self.div_factor
self.lr_scheds = self.steps((low_lr, self.lr_max), (self.lr_max, self.lr_max/self.final_div))
self.mom_scheds = self.steps(self.moms, (self.moms[1], self.moms[0]))
self.opt = self.learn.opt
self.opt.lr,self.opt.mom = self.lr_scheds[0].start,self.mom_scheds[0].start
self.idx_s = 0
return res
|
[
"def",
"on_train_begin",
"(",
"self",
",",
"n_epochs",
":",
"int",
",",
"epoch",
":",
"int",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"None",
":",
"res",
"=",
"{",
"'epoch'",
":",
"self",
".",
"start_epoch",
"}",
"if",
"self",
".",
"start_epoch",
"is",
"not",
"None",
"else",
"None",
"self",
".",
"start_epoch",
"=",
"ifnone",
"(",
"self",
".",
"start_epoch",
",",
"epoch",
")",
"self",
".",
"tot_epochs",
"=",
"ifnone",
"(",
"self",
".",
"tot_epochs",
",",
"n_epochs",
")",
"n",
"=",
"len",
"(",
"self",
".",
"learn",
".",
"data",
".",
"train_dl",
")",
"*",
"self",
".",
"tot_epochs",
"a1",
"=",
"int",
"(",
"n",
"*",
"self",
".",
"pct_start",
")",
"a2",
"=",
"n",
"-",
"a1",
"self",
".",
"phases",
"=",
"(",
"(",
"a1",
",",
"annealing_cos",
")",
",",
"(",
"a2",
",",
"annealing_cos",
")",
")",
"low_lr",
"=",
"self",
".",
"lr_max",
"/",
"self",
".",
"div_factor",
"self",
".",
"lr_scheds",
"=",
"self",
".",
"steps",
"(",
"(",
"low_lr",
",",
"self",
".",
"lr_max",
")",
",",
"(",
"self",
".",
"lr_max",
",",
"self",
".",
"lr_max",
"/",
"self",
".",
"final_div",
")",
")",
"self",
".",
"mom_scheds",
"=",
"self",
".",
"steps",
"(",
"self",
".",
"moms",
",",
"(",
"self",
".",
"moms",
"[",
"1",
"]",
",",
"self",
".",
"moms",
"[",
"0",
"]",
")",
")",
"self",
".",
"opt",
"=",
"self",
".",
"learn",
".",
"opt",
"self",
".",
"opt",
".",
"lr",
",",
"self",
".",
"opt",
".",
"mom",
"=",
"self",
".",
"lr_scheds",
"[",
"0",
"]",
".",
"start",
",",
"self",
".",
"mom_scheds",
"[",
"0",
"]",
".",
"start",
"self",
".",
"idx_s",
"=",
"0",
"return",
"res"
] |
Initialize our optimization params based on our annealing schedule.
|
[
"Initialize",
"our",
"optimization",
"params",
"based",
"on",
"our",
"annealing",
"schedule",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callbacks/one_cycle.py#L24-L39
|
20,586
|
fastai/fastai
|
fastai/callbacks/one_cycle.py
|
OneCycleScheduler.on_batch_end
|
def on_batch_end(self, train, **kwargs:Any)->None:
"Take one step forward on the annealing schedule for the optim params."
if train:
if self.idx_s >= len(self.lr_scheds): return {'stop_training': True, 'stop_epoch': True}
self.opt.lr = self.lr_scheds[self.idx_s].step()
self.opt.mom = self.mom_scheds[self.idx_s].step()
# when the current schedule is complete we move onto the next
# schedule. (in 1-cycle there are two schedules)
if self.lr_scheds[self.idx_s].is_done:
self.idx_s += 1
|
python
|
def on_batch_end(self, train, **kwargs:Any)->None:
"Take one step forward on the annealing schedule for the optim params."
if train:
if self.idx_s >= len(self.lr_scheds): return {'stop_training': True, 'stop_epoch': True}
self.opt.lr = self.lr_scheds[self.idx_s].step()
self.opt.mom = self.mom_scheds[self.idx_s].step()
# when the current schedule is complete we move onto the next
# schedule. (in 1-cycle there are two schedules)
if self.lr_scheds[self.idx_s].is_done:
self.idx_s += 1
|
[
"def",
"on_batch_end",
"(",
"self",
",",
"train",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"None",
":",
"if",
"train",
":",
"if",
"self",
".",
"idx_s",
">=",
"len",
"(",
"self",
".",
"lr_scheds",
")",
":",
"return",
"{",
"'stop_training'",
":",
"True",
",",
"'stop_epoch'",
":",
"True",
"}",
"self",
".",
"opt",
".",
"lr",
"=",
"self",
".",
"lr_scheds",
"[",
"self",
".",
"idx_s",
"]",
".",
"step",
"(",
")",
"self",
".",
"opt",
".",
"mom",
"=",
"self",
".",
"mom_scheds",
"[",
"self",
".",
"idx_s",
"]",
".",
"step",
"(",
")",
"# when the current schedule is complete we move onto the next",
"# schedule. (in 1-cycle there are two schedules)",
"if",
"self",
".",
"lr_scheds",
"[",
"self",
".",
"idx_s",
"]",
".",
"is_done",
":",
"self",
".",
"idx_s",
"+=",
"1"
] |
Take one step forward on the annealing schedule for the optim params.
|
[
"Take",
"one",
"step",
"forward",
"on",
"the",
"annealing",
"schedule",
"for",
"the",
"optim",
"params",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callbacks/one_cycle.py#L45-L54
|
20,587
|
fastai/fastai
|
fastai/vision/gan.py
|
basic_critic
|
def basic_critic(in_size:int, n_channels:int, n_features:int=64, n_extra_layers:int=0, **conv_kwargs):
"A basic critic for images `n_channels` x `in_size` x `in_size`."
layers = [conv_layer(n_channels, n_features, 4, 2, 1, leaky=0.2, norm_type=None, **conv_kwargs)]#norm_type=None?
cur_size, cur_ftrs = in_size//2, n_features
layers.append(nn.Sequential(*[conv_layer(cur_ftrs, cur_ftrs, 3, 1, leaky=0.2, **conv_kwargs) for _ in range(n_extra_layers)]))
while cur_size > 4:
layers.append(conv_layer(cur_ftrs, cur_ftrs*2, 4, 2, 1, leaky=0.2, **conv_kwargs))
cur_ftrs *= 2 ; cur_size //= 2
layers += [conv2d(cur_ftrs, 1, 4, padding=0), AvgFlatten()]
return nn.Sequential(*layers)
|
python
|
def basic_critic(in_size:int, n_channels:int, n_features:int=64, n_extra_layers:int=0, **conv_kwargs):
"A basic critic for images `n_channels` x `in_size` x `in_size`."
layers = [conv_layer(n_channels, n_features, 4, 2, 1, leaky=0.2, norm_type=None, **conv_kwargs)]#norm_type=None?
cur_size, cur_ftrs = in_size//2, n_features
layers.append(nn.Sequential(*[conv_layer(cur_ftrs, cur_ftrs, 3, 1, leaky=0.2, **conv_kwargs) for _ in range(n_extra_layers)]))
while cur_size > 4:
layers.append(conv_layer(cur_ftrs, cur_ftrs*2, 4, 2, 1, leaky=0.2, **conv_kwargs))
cur_ftrs *= 2 ; cur_size //= 2
layers += [conv2d(cur_ftrs, 1, 4, padding=0), AvgFlatten()]
return nn.Sequential(*layers)
|
[
"def",
"basic_critic",
"(",
"in_size",
":",
"int",
",",
"n_channels",
":",
"int",
",",
"n_features",
":",
"int",
"=",
"64",
",",
"n_extra_layers",
":",
"int",
"=",
"0",
",",
"*",
"*",
"conv_kwargs",
")",
":",
"layers",
"=",
"[",
"conv_layer",
"(",
"n_channels",
",",
"n_features",
",",
"4",
",",
"2",
",",
"1",
",",
"leaky",
"=",
"0.2",
",",
"norm_type",
"=",
"None",
",",
"*",
"*",
"conv_kwargs",
")",
"]",
"#norm_type=None?",
"cur_size",
",",
"cur_ftrs",
"=",
"in_size",
"//",
"2",
",",
"n_features",
"layers",
".",
"append",
"(",
"nn",
".",
"Sequential",
"(",
"*",
"[",
"conv_layer",
"(",
"cur_ftrs",
",",
"cur_ftrs",
",",
"3",
",",
"1",
",",
"leaky",
"=",
"0.2",
",",
"*",
"*",
"conv_kwargs",
")",
"for",
"_",
"in",
"range",
"(",
"n_extra_layers",
")",
"]",
")",
")",
"while",
"cur_size",
">",
"4",
":",
"layers",
".",
"append",
"(",
"conv_layer",
"(",
"cur_ftrs",
",",
"cur_ftrs",
"*",
"2",
",",
"4",
",",
"2",
",",
"1",
",",
"leaky",
"=",
"0.2",
",",
"*",
"*",
"conv_kwargs",
")",
")",
"cur_ftrs",
"*=",
"2",
"cur_size",
"//=",
"2",
"layers",
"+=",
"[",
"conv2d",
"(",
"cur_ftrs",
",",
"1",
",",
"4",
",",
"padding",
"=",
"0",
")",
",",
"AvgFlatten",
"(",
")",
"]",
"return",
"nn",
".",
"Sequential",
"(",
"*",
"layers",
")"
] |
A basic critic for images `n_channels` x `in_size` x `in_size`.
|
[
"A",
"basic",
"critic",
"for",
"images",
"n_channels",
"x",
"in_size",
"x",
"in_size",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L17-L26
|
20,588
|
fastai/fastai
|
fastai/vision/gan.py
|
basic_generator
|
def basic_generator(in_size:int, n_channels:int, noise_sz:int=100, n_features:int=64, n_extra_layers=0, **conv_kwargs):
"A basic generator from `noise_sz` to images `n_channels` x `in_size` x `in_size`."
cur_size, cur_ftrs = 4, n_features//2
while cur_size < in_size: cur_size *= 2; cur_ftrs *= 2
layers = [conv_layer(noise_sz, cur_ftrs, 4, 1, transpose=True, **conv_kwargs)]
cur_size = 4
while cur_size < in_size // 2:
layers.append(conv_layer(cur_ftrs, cur_ftrs//2, 4, 2, 1, transpose=True, **conv_kwargs))
cur_ftrs //= 2; cur_size *= 2
layers += [conv_layer(cur_ftrs, cur_ftrs, 3, 1, 1, transpose=True, **conv_kwargs) for _ in range(n_extra_layers)]
layers += [conv2d_trans(cur_ftrs, n_channels, 4, 2, 1, bias=False), nn.Tanh()]
return nn.Sequential(*layers)
|
python
|
def basic_generator(in_size:int, n_channels:int, noise_sz:int=100, n_features:int=64, n_extra_layers=0, **conv_kwargs):
"A basic generator from `noise_sz` to images `n_channels` x `in_size` x `in_size`."
cur_size, cur_ftrs = 4, n_features//2
while cur_size < in_size: cur_size *= 2; cur_ftrs *= 2
layers = [conv_layer(noise_sz, cur_ftrs, 4, 1, transpose=True, **conv_kwargs)]
cur_size = 4
while cur_size < in_size // 2:
layers.append(conv_layer(cur_ftrs, cur_ftrs//2, 4, 2, 1, transpose=True, **conv_kwargs))
cur_ftrs //= 2; cur_size *= 2
layers += [conv_layer(cur_ftrs, cur_ftrs, 3, 1, 1, transpose=True, **conv_kwargs) for _ in range(n_extra_layers)]
layers += [conv2d_trans(cur_ftrs, n_channels, 4, 2, 1, bias=False), nn.Tanh()]
return nn.Sequential(*layers)
|
[
"def",
"basic_generator",
"(",
"in_size",
":",
"int",
",",
"n_channels",
":",
"int",
",",
"noise_sz",
":",
"int",
"=",
"100",
",",
"n_features",
":",
"int",
"=",
"64",
",",
"n_extra_layers",
"=",
"0",
",",
"*",
"*",
"conv_kwargs",
")",
":",
"cur_size",
",",
"cur_ftrs",
"=",
"4",
",",
"n_features",
"//",
"2",
"while",
"cur_size",
"<",
"in_size",
":",
"cur_size",
"*=",
"2",
"cur_ftrs",
"*=",
"2",
"layers",
"=",
"[",
"conv_layer",
"(",
"noise_sz",
",",
"cur_ftrs",
",",
"4",
",",
"1",
",",
"transpose",
"=",
"True",
",",
"*",
"*",
"conv_kwargs",
")",
"]",
"cur_size",
"=",
"4",
"while",
"cur_size",
"<",
"in_size",
"//",
"2",
":",
"layers",
".",
"append",
"(",
"conv_layer",
"(",
"cur_ftrs",
",",
"cur_ftrs",
"//",
"2",
",",
"4",
",",
"2",
",",
"1",
",",
"transpose",
"=",
"True",
",",
"*",
"*",
"conv_kwargs",
")",
")",
"cur_ftrs",
"//=",
"2",
"cur_size",
"*=",
"2",
"layers",
"+=",
"[",
"conv_layer",
"(",
"cur_ftrs",
",",
"cur_ftrs",
",",
"3",
",",
"1",
",",
"1",
",",
"transpose",
"=",
"True",
",",
"*",
"*",
"conv_kwargs",
")",
"for",
"_",
"in",
"range",
"(",
"n_extra_layers",
")",
"]",
"layers",
"+=",
"[",
"conv2d_trans",
"(",
"cur_ftrs",
",",
"n_channels",
",",
"4",
",",
"2",
",",
"1",
",",
"bias",
"=",
"False",
")",
",",
"nn",
".",
"Tanh",
"(",
")",
"]",
"return",
"nn",
".",
"Sequential",
"(",
"*",
"layers",
")"
] |
A basic generator from `noise_sz` to images `n_channels` x `in_size` x `in_size`.
|
[
"A",
"basic",
"generator",
"from",
"noise_sz",
"to",
"images",
"n_channels",
"x",
"in_size",
"x",
"in_size",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L28-L39
|
20,589
|
fastai/fastai
|
fastai/vision/gan.py
|
gan_loss_from_func
|
def gan_loss_from_func(loss_gen, loss_crit, weights_gen:Tuple[float,float]=None):
"Define loss functions for a GAN from `loss_gen` and `loss_crit`."
def _loss_G(fake_pred, output, target, weights_gen=weights_gen):
ones = fake_pred.new_ones(fake_pred.shape[0])
weights_gen = ifnone(weights_gen, (1.,1.))
return weights_gen[0] * loss_crit(fake_pred, ones) + weights_gen[1] * loss_gen(output, target)
def _loss_C(real_pred, fake_pred):
ones = real_pred.new_ones (real_pred.shape[0])
zeros = fake_pred.new_zeros(fake_pred.shape[0])
return (loss_crit(real_pred, ones) + loss_crit(fake_pred, zeros)) / 2
return _loss_G, _loss_C
|
python
|
def gan_loss_from_func(loss_gen, loss_crit, weights_gen:Tuple[float,float]=None):
"Define loss functions for a GAN from `loss_gen` and `loss_crit`."
def _loss_G(fake_pred, output, target, weights_gen=weights_gen):
ones = fake_pred.new_ones(fake_pred.shape[0])
weights_gen = ifnone(weights_gen, (1.,1.))
return weights_gen[0] * loss_crit(fake_pred, ones) + weights_gen[1] * loss_gen(output, target)
def _loss_C(real_pred, fake_pred):
ones = real_pred.new_ones (real_pred.shape[0])
zeros = fake_pred.new_zeros(fake_pred.shape[0])
return (loss_crit(real_pred, ones) + loss_crit(fake_pred, zeros)) / 2
return _loss_G, _loss_C
|
[
"def",
"gan_loss_from_func",
"(",
"loss_gen",
",",
"loss_crit",
",",
"weights_gen",
":",
"Tuple",
"[",
"float",
",",
"float",
"]",
"=",
"None",
")",
":",
"def",
"_loss_G",
"(",
"fake_pred",
",",
"output",
",",
"target",
",",
"weights_gen",
"=",
"weights_gen",
")",
":",
"ones",
"=",
"fake_pred",
".",
"new_ones",
"(",
"fake_pred",
".",
"shape",
"[",
"0",
"]",
")",
"weights_gen",
"=",
"ifnone",
"(",
"weights_gen",
",",
"(",
"1.",
",",
"1.",
")",
")",
"return",
"weights_gen",
"[",
"0",
"]",
"*",
"loss_crit",
"(",
"fake_pred",
",",
"ones",
")",
"+",
"weights_gen",
"[",
"1",
"]",
"*",
"loss_gen",
"(",
"output",
",",
"target",
")",
"def",
"_loss_C",
"(",
"real_pred",
",",
"fake_pred",
")",
":",
"ones",
"=",
"real_pred",
".",
"new_ones",
"(",
"real_pred",
".",
"shape",
"[",
"0",
"]",
")",
"zeros",
"=",
"fake_pred",
".",
"new_zeros",
"(",
"fake_pred",
".",
"shape",
"[",
"0",
"]",
")",
"return",
"(",
"loss_crit",
"(",
"real_pred",
",",
"ones",
")",
"+",
"loss_crit",
"(",
"fake_pred",
",",
"zeros",
")",
")",
"/",
"2",
"return",
"_loss_G",
",",
"_loss_C"
] |
Define loss functions for a GAN from `loss_gen` and `loss_crit`.
|
[
"Define",
"loss",
"functions",
"for",
"a",
"GAN",
"from",
"loss_gen",
"and",
"loss_crit",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L191-L203
|
20,590
|
fastai/fastai
|
fastai/vision/gan.py
|
gan_critic
|
def gan_critic(n_channels:int=3, nf:int=128, n_blocks:int=3, p:int=0.15):
"Critic to train a `GAN`."
layers = [
_conv(n_channels, nf, ks=4, stride=2),
nn.Dropout2d(p/2),
res_block(nf, dense=True,**_conv_args)]
nf *= 2 # after dense block
for i in range(n_blocks):
layers += [
nn.Dropout2d(p),
_conv(nf, nf*2, ks=4, stride=2, self_attention=(i==0))]
nf *= 2
layers += [
_conv(nf, 1, ks=4, bias=False, padding=0, use_activ=False),
Flatten()]
return nn.Sequential(*layers)
|
python
|
def gan_critic(n_channels:int=3, nf:int=128, n_blocks:int=3, p:int=0.15):
"Critic to train a `GAN`."
layers = [
_conv(n_channels, nf, ks=4, stride=2),
nn.Dropout2d(p/2),
res_block(nf, dense=True,**_conv_args)]
nf *= 2 # after dense block
for i in range(n_blocks):
layers += [
nn.Dropout2d(p),
_conv(nf, nf*2, ks=4, stride=2, self_attention=(i==0))]
nf *= 2
layers += [
_conv(nf, 1, ks=4, bias=False, padding=0, use_activ=False),
Flatten()]
return nn.Sequential(*layers)
|
[
"def",
"gan_critic",
"(",
"n_channels",
":",
"int",
"=",
"3",
",",
"nf",
":",
"int",
"=",
"128",
",",
"n_blocks",
":",
"int",
"=",
"3",
",",
"p",
":",
"int",
"=",
"0.15",
")",
":",
"layers",
"=",
"[",
"_conv",
"(",
"n_channels",
",",
"nf",
",",
"ks",
"=",
"4",
",",
"stride",
"=",
"2",
")",
",",
"nn",
".",
"Dropout2d",
"(",
"p",
"/",
"2",
")",
",",
"res_block",
"(",
"nf",
",",
"dense",
"=",
"True",
",",
"*",
"*",
"_conv_args",
")",
"]",
"nf",
"*=",
"2",
"# after dense block",
"for",
"i",
"in",
"range",
"(",
"n_blocks",
")",
":",
"layers",
"+=",
"[",
"nn",
".",
"Dropout2d",
"(",
"p",
")",
",",
"_conv",
"(",
"nf",
",",
"nf",
"*",
"2",
",",
"ks",
"=",
"4",
",",
"stride",
"=",
"2",
",",
"self_attention",
"=",
"(",
"i",
"==",
"0",
")",
")",
"]",
"nf",
"*=",
"2",
"layers",
"+=",
"[",
"_conv",
"(",
"nf",
",",
"1",
",",
"ks",
"=",
"4",
",",
"bias",
"=",
"False",
",",
"padding",
"=",
"0",
",",
"use_activ",
"=",
"False",
")",
",",
"Flatten",
"(",
")",
"]",
"return",
"nn",
".",
"Sequential",
"(",
"*",
"layers",
")"
] |
Critic to train a `GAN`.
|
[
"Critic",
"to",
"train",
"a",
"GAN",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L261-L276
|
20,591
|
fastai/fastai
|
fastai/vision/gan.py
|
accuracy_thresh_expand
|
def accuracy_thresh_expand(y_pred:Tensor, y_true:Tensor, thresh:float=0.5, sigmoid:bool=True)->Rank0Tensor:
"Compute accuracy after expanding `y_true` to the size of `y_pred`."
if sigmoid: y_pred = y_pred.sigmoid()
return ((y_pred>thresh)==y_true[:,None].expand_as(y_pred).byte()).float().mean()
|
python
|
def accuracy_thresh_expand(y_pred:Tensor, y_true:Tensor, thresh:float=0.5, sigmoid:bool=True)->Rank0Tensor:
"Compute accuracy after expanding `y_true` to the size of `y_pred`."
if sigmoid: y_pred = y_pred.sigmoid()
return ((y_pred>thresh)==y_true[:,None].expand_as(y_pred).byte()).float().mean()
|
[
"def",
"accuracy_thresh_expand",
"(",
"y_pred",
":",
"Tensor",
",",
"y_true",
":",
"Tensor",
",",
"thresh",
":",
"float",
"=",
"0.5",
",",
"sigmoid",
":",
"bool",
"=",
"True",
")",
"->",
"Rank0Tensor",
":",
"if",
"sigmoid",
":",
"y_pred",
"=",
"y_pred",
".",
"sigmoid",
"(",
")",
"return",
"(",
"(",
"y_pred",
">",
"thresh",
")",
"==",
"y_true",
"[",
":",
",",
"None",
"]",
".",
"expand_as",
"(",
"y_pred",
")",
".",
"byte",
"(",
")",
")",
".",
"float",
"(",
")",
".",
"mean",
"(",
")"
] |
Compute accuracy after expanding `y_true` to the size of `y_pred`.
|
[
"Compute",
"accuracy",
"after",
"expanding",
"y_true",
"to",
"the",
"size",
"of",
"y_pred",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L301-L304
|
20,592
|
fastai/fastai
|
fastai/vision/gan.py
|
GANModule.switch
|
def switch(self, gen_mode:bool=None):
"Put the model in generator mode if `gen_mode`, in critic mode otherwise."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
|
python
|
def switch(self, gen_mode:bool=None):
"Put the model in generator mode if `gen_mode`, in critic mode otherwise."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
|
[
"def",
"switch",
"(",
"self",
",",
"gen_mode",
":",
"bool",
"=",
"None",
")",
":",
"self",
".",
"gen_mode",
"=",
"(",
"not",
"self",
".",
"gen_mode",
")",
"if",
"gen_mode",
"is",
"None",
"else",
"gen_mode"
] |
Put the model in generator mode if `gen_mode`, in critic mode otherwise.
|
[
"Put",
"the",
"model",
"in",
"generator",
"mode",
"if",
"gen_mode",
"in",
"critic",
"mode",
"otherwise",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L51-L53
|
20,593
|
fastai/fastai
|
fastai/vision/gan.py
|
GANLoss.generator
|
def generator(self, output, target):
"Evaluate the `output` with the critic then uses `self.loss_funcG` to combine it with `target`."
fake_pred = self.gan_model.critic(output)
return self.loss_funcG(fake_pred, target, output)
|
python
|
def generator(self, output, target):
"Evaluate the `output` with the critic then uses `self.loss_funcG` to combine it with `target`."
fake_pred = self.gan_model.critic(output)
return self.loss_funcG(fake_pred, target, output)
|
[
"def",
"generator",
"(",
"self",
",",
"output",
",",
"target",
")",
":",
"fake_pred",
"=",
"self",
".",
"gan_model",
".",
"critic",
"(",
"output",
")",
"return",
"self",
".",
"loss_funcG",
"(",
"fake_pred",
",",
"target",
",",
"output",
")"
] |
Evaluate the `output` with the critic then uses `self.loss_funcG` to combine it with `target`.
|
[
"Evaluate",
"the",
"output",
"with",
"the",
"critic",
"then",
"uses",
"self",
".",
"loss_funcG",
"to",
"combine",
"it",
"with",
"target",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L61-L64
|
20,594
|
fastai/fastai
|
fastai/vision/gan.py
|
GANLoss.critic
|
def critic(self, real_pred, input):
"Create some `fake_pred` with the generator from `input` and compare them to `real_pred` in `self.loss_funcD`."
fake = self.gan_model.generator(input.requires_grad_(False)).requires_grad_(True)
fake_pred = self.gan_model.critic(fake)
return self.loss_funcC(real_pred, fake_pred)
|
python
|
def critic(self, real_pred, input):
"Create some `fake_pred` with the generator from `input` and compare them to `real_pred` in `self.loss_funcD`."
fake = self.gan_model.generator(input.requires_grad_(False)).requires_grad_(True)
fake_pred = self.gan_model.critic(fake)
return self.loss_funcC(real_pred, fake_pred)
|
[
"def",
"critic",
"(",
"self",
",",
"real_pred",
",",
"input",
")",
":",
"fake",
"=",
"self",
".",
"gan_model",
".",
"generator",
"(",
"input",
".",
"requires_grad_",
"(",
"False",
")",
")",
".",
"requires_grad_",
"(",
"True",
")",
"fake_pred",
"=",
"self",
".",
"gan_model",
".",
"critic",
"(",
"fake",
")",
"return",
"self",
".",
"loss_funcC",
"(",
"real_pred",
",",
"fake_pred",
")"
] |
Create some `fake_pred` with the generator from `input` and compare them to `real_pred` in `self.loss_funcD`.
|
[
"Create",
"some",
"fake_pred",
"with",
"the",
"generator",
"from",
"input",
"and",
"compare",
"them",
"to",
"real_pred",
"in",
"self",
".",
"loss_funcD",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L66-L70
|
20,595
|
fastai/fastai
|
fastai/vision/gan.py
|
GANTrainer.on_train_begin
|
def on_train_begin(self, **kwargs):
"Create the optimizers for the generator and critic if necessary, initialize smootheners."
if not getattr(self,'opt_gen',None):
self.opt_gen = self.opt.new([nn.Sequential(*flatten_model(self.generator))])
else: self.opt_gen.lr,self.opt_gen.wd = self.opt.lr,self.opt.wd
if not getattr(self,'opt_critic',None):
self.opt_critic = self.opt.new([nn.Sequential(*flatten_model(self.critic))])
else: self.opt_critic.lr,self.opt_critic.wd = self.opt.lr,self.opt.wd
self.gen_mode = self.gen_first
self.switch(self.gen_mode)
self.closses,self.glosses = [],[]
self.smoothenerG,self.smoothenerC = SmoothenValue(self.beta),SmoothenValue(self.beta)
#self.recorder.no_val=True
self.recorder.add_metric_names(['gen_loss', 'disc_loss'])
self.imgs,self.titles = [],[]
|
python
|
def on_train_begin(self, **kwargs):
"Create the optimizers for the generator and critic if necessary, initialize smootheners."
if not getattr(self,'opt_gen',None):
self.opt_gen = self.opt.new([nn.Sequential(*flatten_model(self.generator))])
else: self.opt_gen.lr,self.opt_gen.wd = self.opt.lr,self.opt.wd
if not getattr(self,'opt_critic',None):
self.opt_critic = self.opt.new([nn.Sequential(*flatten_model(self.critic))])
else: self.opt_critic.lr,self.opt_critic.wd = self.opt.lr,self.opt.wd
self.gen_mode = self.gen_first
self.switch(self.gen_mode)
self.closses,self.glosses = [],[]
self.smoothenerG,self.smoothenerC = SmoothenValue(self.beta),SmoothenValue(self.beta)
#self.recorder.no_val=True
self.recorder.add_metric_names(['gen_loss', 'disc_loss'])
self.imgs,self.titles = [],[]
|
[
"def",
"on_train_begin",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"getattr",
"(",
"self",
",",
"'opt_gen'",
",",
"None",
")",
":",
"self",
".",
"opt_gen",
"=",
"self",
".",
"opt",
".",
"new",
"(",
"[",
"nn",
".",
"Sequential",
"(",
"*",
"flatten_model",
"(",
"self",
".",
"generator",
")",
")",
"]",
")",
"else",
":",
"self",
".",
"opt_gen",
".",
"lr",
",",
"self",
".",
"opt_gen",
".",
"wd",
"=",
"self",
".",
"opt",
".",
"lr",
",",
"self",
".",
"opt",
".",
"wd",
"if",
"not",
"getattr",
"(",
"self",
",",
"'opt_critic'",
",",
"None",
")",
":",
"self",
".",
"opt_critic",
"=",
"self",
".",
"opt",
".",
"new",
"(",
"[",
"nn",
".",
"Sequential",
"(",
"*",
"flatten_model",
"(",
"self",
".",
"critic",
")",
")",
"]",
")",
"else",
":",
"self",
".",
"opt_critic",
".",
"lr",
",",
"self",
".",
"opt_critic",
".",
"wd",
"=",
"self",
".",
"opt",
".",
"lr",
",",
"self",
".",
"opt",
".",
"wd",
"self",
".",
"gen_mode",
"=",
"self",
".",
"gen_first",
"self",
".",
"switch",
"(",
"self",
".",
"gen_mode",
")",
"self",
".",
"closses",
",",
"self",
".",
"glosses",
"=",
"[",
"]",
",",
"[",
"]",
"self",
".",
"smoothenerG",
",",
"self",
".",
"smoothenerC",
"=",
"SmoothenValue",
"(",
"self",
".",
"beta",
")",
",",
"SmoothenValue",
"(",
"self",
".",
"beta",
")",
"#self.recorder.no_val=True",
"self",
".",
"recorder",
".",
"add_metric_names",
"(",
"[",
"'gen_loss'",
",",
"'disc_loss'",
"]",
")",
"self",
".",
"imgs",
",",
"self",
".",
"titles",
"=",
"[",
"]",
",",
"[",
"]"
] |
Create the optimizers for the generator and critic if necessary, initialize smootheners.
|
[
"Create",
"the",
"optimizers",
"for",
"the",
"generator",
"and",
"critic",
"if",
"necessary",
"initialize",
"smootheners",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L90-L104
|
20,596
|
fastai/fastai
|
fastai/vision/gan.py
|
GANTrainer.on_batch_begin
|
def on_batch_begin(self, last_input, last_target, **kwargs):
"Clamp the weights with `self.clip` if it's not None, return the correct input."
if self.clip is not None:
for p in self.critic.parameters(): p.data.clamp_(-self.clip, self.clip)
return {'last_input':last_input,'last_target':last_target} if self.gen_mode else {'last_input':last_target,'last_target':last_input}
|
python
|
def on_batch_begin(self, last_input, last_target, **kwargs):
"Clamp the weights with `self.clip` if it's not None, return the correct input."
if self.clip is not None:
for p in self.critic.parameters(): p.data.clamp_(-self.clip, self.clip)
return {'last_input':last_input,'last_target':last_target} if self.gen_mode else {'last_input':last_target,'last_target':last_input}
|
[
"def",
"on_batch_begin",
"(",
"self",
",",
"last_input",
",",
"last_target",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"clip",
"is",
"not",
"None",
":",
"for",
"p",
"in",
"self",
".",
"critic",
".",
"parameters",
"(",
")",
":",
"p",
".",
"data",
".",
"clamp_",
"(",
"-",
"self",
".",
"clip",
",",
"self",
".",
"clip",
")",
"return",
"{",
"'last_input'",
":",
"last_input",
",",
"'last_target'",
":",
"last_target",
"}",
"if",
"self",
".",
"gen_mode",
"else",
"{",
"'last_input'",
":",
"last_target",
",",
"'last_target'",
":",
"last_input",
"}"
] |
Clamp the weights with `self.clip` if it's not None, return the correct input.
|
[
"Clamp",
"the",
"weights",
"with",
"self",
".",
"clip",
"if",
"it",
"s",
"not",
"None",
"return",
"the",
"correct",
"input",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L110-L114
|
20,597
|
fastai/fastai
|
fastai/vision/gan.py
|
GANTrainer.on_backward_begin
|
def on_backward_begin(self, last_loss, last_output, **kwargs):
"Record `last_loss` in the proper list."
last_loss = last_loss.detach().cpu()
if self.gen_mode:
self.smoothenerG.add_value(last_loss)
self.glosses.append(self.smoothenerG.smooth)
self.last_gen = last_output.detach().cpu()
else:
self.smoothenerC.add_value(last_loss)
self.closses.append(self.smoothenerC.smooth)
|
python
|
def on_backward_begin(self, last_loss, last_output, **kwargs):
"Record `last_loss` in the proper list."
last_loss = last_loss.detach().cpu()
if self.gen_mode:
self.smoothenerG.add_value(last_loss)
self.glosses.append(self.smoothenerG.smooth)
self.last_gen = last_output.detach().cpu()
else:
self.smoothenerC.add_value(last_loss)
self.closses.append(self.smoothenerC.smooth)
|
[
"def",
"on_backward_begin",
"(",
"self",
",",
"last_loss",
",",
"last_output",
",",
"*",
"*",
"kwargs",
")",
":",
"last_loss",
"=",
"last_loss",
".",
"detach",
"(",
")",
".",
"cpu",
"(",
")",
"if",
"self",
".",
"gen_mode",
":",
"self",
".",
"smoothenerG",
".",
"add_value",
"(",
"last_loss",
")",
"self",
".",
"glosses",
".",
"append",
"(",
"self",
".",
"smoothenerG",
".",
"smooth",
")",
"self",
".",
"last_gen",
"=",
"last_output",
".",
"detach",
"(",
")",
".",
"cpu",
"(",
")",
"else",
":",
"self",
".",
"smoothenerC",
".",
"add_value",
"(",
"last_loss",
")",
"self",
".",
"closses",
".",
"append",
"(",
"self",
".",
"smoothenerC",
".",
"smooth",
")"
] |
Record `last_loss` in the proper list.
|
[
"Record",
"last_loss",
"in",
"the",
"proper",
"list",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L116-L125
|
20,598
|
fastai/fastai
|
fastai/vision/gan.py
|
GANTrainer.on_epoch_end
|
def on_epoch_end(self, pbar, epoch, last_metrics, **kwargs):
"Put the various losses in the recorder and show a sample image."
if not hasattr(self, 'last_gen') or not self.show_img: return
data = self.learn.data
img = self.last_gen[0]
norm = getattr(data,'norm',False)
if norm and norm.keywords.get('do_y',False): img = data.denorm(img)
img = data.train_ds.y.reconstruct(img)
self.imgs.append(img)
self.titles.append(f'Epoch {epoch}')
pbar.show_imgs(self.imgs, self.titles)
return add_metrics(last_metrics, [getattr(self.smoothenerG,'smooth',None),getattr(self.smoothenerC,'smooth',None)])
|
python
|
def on_epoch_end(self, pbar, epoch, last_metrics, **kwargs):
"Put the various losses in the recorder and show a sample image."
if not hasattr(self, 'last_gen') or not self.show_img: return
data = self.learn.data
img = self.last_gen[0]
norm = getattr(data,'norm',False)
if norm and norm.keywords.get('do_y',False): img = data.denorm(img)
img = data.train_ds.y.reconstruct(img)
self.imgs.append(img)
self.titles.append(f'Epoch {epoch}')
pbar.show_imgs(self.imgs, self.titles)
return add_metrics(last_metrics, [getattr(self.smoothenerG,'smooth',None),getattr(self.smoothenerC,'smooth',None)])
|
[
"def",
"on_epoch_end",
"(",
"self",
",",
"pbar",
",",
"epoch",
",",
"last_metrics",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'last_gen'",
")",
"or",
"not",
"self",
".",
"show_img",
":",
"return",
"data",
"=",
"self",
".",
"learn",
".",
"data",
"img",
"=",
"self",
".",
"last_gen",
"[",
"0",
"]",
"norm",
"=",
"getattr",
"(",
"data",
",",
"'norm'",
",",
"False",
")",
"if",
"norm",
"and",
"norm",
".",
"keywords",
".",
"get",
"(",
"'do_y'",
",",
"False",
")",
":",
"img",
"=",
"data",
".",
"denorm",
"(",
"img",
")",
"img",
"=",
"data",
".",
"train_ds",
".",
"y",
".",
"reconstruct",
"(",
"img",
")",
"self",
".",
"imgs",
".",
"append",
"(",
"img",
")",
"self",
".",
"titles",
".",
"append",
"(",
"f'Epoch {epoch}'",
")",
"pbar",
".",
"show_imgs",
"(",
"self",
".",
"imgs",
",",
"self",
".",
"titles",
")",
"return",
"add_metrics",
"(",
"last_metrics",
",",
"[",
"getattr",
"(",
"self",
".",
"smoothenerG",
",",
"'smooth'",
",",
"None",
")",
",",
"getattr",
"(",
"self",
".",
"smoothenerC",
",",
"'smooth'",
",",
"None",
")",
"]",
")"
] |
Put the various losses in the recorder and show a sample image.
|
[
"Put",
"the",
"various",
"losses",
"in",
"the",
"recorder",
"and",
"show",
"a",
"sample",
"image",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L131-L142
|
20,599
|
fastai/fastai
|
fastai/vision/gan.py
|
GANTrainer.switch
|
def switch(self, gen_mode:bool=None):
"Switch the model, if `gen_mode` is provided, in the desired mode."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
self.opt.opt = self.opt_gen.opt if self.gen_mode else self.opt_critic.opt
self._set_trainable()
self.model.switch(gen_mode)
self.loss_func.switch(gen_mode)
|
python
|
def switch(self, gen_mode:bool=None):
"Switch the model, if `gen_mode` is provided, in the desired mode."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
self.opt.opt = self.opt_gen.opt if self.gen_mode else self.opt_critic.opt
self._set_trainable()
self.model.switch(gen_mode)
self.loss_func.switch(gen_mode)
|
[
"def",
"switch",
"(",
"self",
",",
"gen_mode",
":",
"bool",
"=",
"None",
")",
":",
"self",
".",
"gen_mode",
"=",
"(",
"not",
"self",
".",
"gen_mode",
")",
"if",
"gen_mode",
"is",
"None",
"else",
"gen_mode",
"self",
".",
"opt",
".",
"opt",
"=",
"self",
".",
"opt_gen",
".",
"opt",
"if",
"self",
".",
"gen_mode",
"else",
"self",
".",
"opt_critic",
".",
"opt",
"self",
".",
"_set_trainable",
"(",
")",
"self",
".",
"model",
".",
"switch",
"(",
"gen_mode",
")",
"self",
".",
"loss_func",
".",
"switch",
"(",
"gen_mode",
")"
] |
Switch the model, if `gen_mode` is provided, in the desired mode.
|
[
"Switch",
"the",
"model",
"if",
"gen_mode",
"is",
"provided",
"in",
"the",
"desired",
"mode",
"."
] |
9fb84a5cdefe5a766cdb792b8f5d8971737b7e67
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L144-L150
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.