partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
tokenize
|
Convert a single string into a list of substrings
split along punctuation and word boundaries. Keep
whitespace intact by always attaching it to the
previous token.
Arguments:
----------
text : str
normalize_ascii : bool, perform some replacements
on non-ascii characters to canonicalize the
string (defaults to True).
Returns:
--------
list<str>, list of substring tokens.
|
ciseau/word_tokenizer.py
|
def tokenize(text, normalize_ascii=True):
"""
Convert a single string into a list of substrings
split along punctuation and word boundaries. Keep
whitespace intact by always attaching it to the
previous token.
Arguments:
----------
text : str
normalize_ascii : bool, perform some replacements
on non-ascii characters to canonicalize the
string (defaults to True).
Returns:
--------
list<str>, list of substring tokens.
"""
# 1. If there's no punctuation, return immediately
if no_punctuation.match(text):
return [text]
# 2. let's standardize the input text to ascii (if desired)
# Note: this will no longer respect input-to-output character positions
if normalize_ascii:
# normalize these greco-roman characters to ascii:
text = text.replace(u"œ", "oe").replace(u"æ", "ae")
# normalize dashes:
text = repeated_dash_converter.sub("-", text)
# 3. let's construct an integer array of the possible split locations:
split_locations = [UNDECIDED] * len(text)
regexes = (
pure_whitespace,
left_quote_shifter,
left_quote_converter,
left_single_quote_converter,
remaining_quote_converter,
# regex can't fix this -> regex ca n't fix this
english_nots,
# you'll dig this -> you 'll dig this
english_contractions,
# the rhino's horns -> the rhino 's horns
english_specific_appendages,
# qu'a tu fais au rhino -> qu ' a tu fais au rhino,
french_appendages
)
# 4. Mark end locations for specific regular expressions:
for regex in regexes:
mark_regex(regex, text, split_locations)
begin_end_regexes = (
multi_single_quote_finder,
right_single_quote_converter,
# use dashes as the breakpoint:
# the rhino--truck -> the rhino -- truck
simple_dash_finder if normalize_ascii else advanced_dash_finder,
numerical_expression,
url_file_finder,
shifted_ellipses,
# the #rhino! -> the # rhino ! ;
# the rino[sic] -> the rino [ sic ]
shifted_standard_punctuation
)
# 5. Mark begin and end locations for other regular expressions:
for regex in begin_end_regexes:
mark_begin_end_regex(regex, text, split_locations)
# 6. Remove splitting on exceptional uses of periods:
# I'm with Mr. -> I 'm with Mr. , I'm with Mister. -> I 'm with Mister .
protect_shorthand(text, split_locations)
if normalize_ascii:
text = dash_converter.sub("-", text)
# 7. Return the split string using the integer list:
return list(split_with_locations(text, split_locations))
|
def tokenize(text, normalize_ascii=True):
"""
Convert a single string into a list of substrings
split along punctuation and word boundaries. Keep
whitespace intact by always attaching it to the
previous token.
Arguments:
----------
text : str
normalize_ascii : bool, perform some replacements
on non-ascii characters to canonicalize the
string (defaults to True).
Returns:
--------
list<str>, list of substring tokens.
"""
# 1. If there's no punctuation, return immediately
if no_punctuation.match(text):
return [text]
# 2. let's standardize the input text to ascii (if desired)
# Note: this will no longer respect input-to-output character positions
if normalize_ascii:
# normalize these greco-roman characters to ascii:
text = text.replace(u"œ", "oe").replace(u"æ", "ae")
# normalize dashes:
text = repeated_dash_converter.sub("-", text)
# 3. let's construct an integer array of the possible split locations:
split_locations = [UNDECIDED] * len(text)
regexes = (
pure_whitespace,
left_quote_shifter,
left_quote_converter,
left_single_quote_converter,
remaining_quote_converter,
# regex can't fix this -> regex ca n't fix this
english_nots,
# you'll dig this -> you 'll dig this
english_contractions,
# the rhino's horns -> the rhino 's horns
english_specific_appendages,
# qu'a tu fais au rhino -> qu ' a tu fais au rhino,
french_appendages
)
# 4. Mark end locations for specific regular expressions:
for regex in regexes:
mark_regex(regex, text, split_locations)
begin_end_regexes = (
multi_single_quote_finder,
right_single_quote_converter,
# use dashes as the breakpoint:
# the rhino--truck -> the rhino -- truck
simple_dash_finder if normalize_ascii else advanced_dash_finder,
numerical_expression,
url_file_finder,
shifted_ellipses,
# the #rhino! -> the # rhino ! ;
# the rino[sic] -> the rino [ sic ]
shifted_standard_punctuation
)
# 5. Mark begin and end locations for other regular expressions:
for regex in begin_end_regexes:
mark_begin_end_regex(regex, text, split_locations)
# 6. Remove splitting on exceptional uses of periods:
# I'm with Mr. -> I 'm with Mr. , I'm with Mister. -> I 'm with Mister .
protect_shorthand(text, split_locations)
if normalize_ascii:
text = dash_converter.sub("-", text)
# 7. Return the split string using the integer list:
return list(split_with_locations(text, split_locations))
|
[
"Convert",
"a",
"single",
"string",
"into",
"a",
"list",
"of",
"substrings",
"split",
"along",
"punctuation",
"and",
"word",
"boundaries",
".",
"Keep",
"whitespace",
"intact",
"by",
"always",
"attaching",
"it",
"to",
"the",
"previous",
"token",
"."
] |
JonathanRaiman/ciseau
|
python
|
https://github.com/JonathanRaiman/ciseau/blob/f72d1c82d85eeb3d3ac9fac17690041725402175/ciseau/word_tokenizer.py#L185-L260
|
[
"def",
"tokenize",
"(",
"text",
",",
"normalize_ascii",
"=",
"True",
")",
":",
"# 1. If there's no punctuation, return immediately",
"if",
"no_punctuation",
".",
"match",
"(",
"text",
")",
":",
"return",
"[",
"text",
"]",
"# 2. let's standardize the input text to ascii (if desired)",
"# Note: this will no longer respect input-to-output character positions",
"if",
"normalize_ascii",
":",
"# normalize these greco-roman characters to ascii:",
"text",
"=",
"text",
".",
"replace",
"(",
"u\"œ\",",
" ",
"oe\")",
".",
"r",
"eplace(",
"u",
"\"æ\", ",
"\"",
"e\")",
"",
"# normalize dashes:",
"text",
"=",
"repeated_dash_converter",
".",
"sub",
"(",
"\"-\"",
",",
"text",
")",
"# 3. let's construct an integer array of the possible split locations:",
"split_locations",
"=",
"[",
"UNDECIDED",
"]",
"*",
"len",
"(",
"text",
")",
"regexes",
"=",
"(",
"pure_whitespace",
",",
"left_quote_shifter",
",",
"left_quote_converter",
",",
"left_single_quote_converter",
",",
"remaining_quote_converter",
",",
"# regex can't fix this -> regex ca n't fix this",
"english_nots",
",",
"# you'll dig this -> you 'll dig this",
"english_contractions",
",",
"# the rhino's horns -> the rhino 's horns",
"english_specific_appendages",
",",
"# qu'a tu fais au rhino -> qu ' a tu fais au rhino,",
"french_appendages",
")",
"# 4. Mark end locations for specific regular expressions:",
"for",
"regex",
"in",
"regexes",
":",
"mark_regex",
"(",
"regex",
",",
"text",
",",
"split_locations",
")",
"begin_end_regexes",
"=",
"(",
"multi_single_quote_finder",
",",
"right_single_quote_converter",
",",
"# use dashes as the breakpoint:",
"# the rhino--truck -> the rhino -- truck",
"simple_dash_finder",
"if",
"normalize_ascii",
"else",
"advanced_dash_finder",
",",
"numerical_expression",
",",
"url_file_finder",
",",
"shifted_ellipses",
",",
"# the #rhino! -> the # rhino ! ;",
"# the rino[sic] -> the rino [ sic ]",
"shifted_standard_punctuation",
")",
"# 5. Mark begin and end locations for other regular expressions:",
"for",
"regex",
"in",
"begin_end_regexes",
":",
"mark_begin_end_regex",
"(",
"regex",
",",
"text",
",",
"split_locations",
")",
"# 6. Remove splitting on exceptional uses of periods:",
"# I'm with Mr. -> I 'm with Mr. , I'm with Mister. -> I 'm with Mister .",
"protect_shorthand",
"(",
"text",
",",
"split_locations",
")",
"if",
"normalize_ascii",
":",
"text",
"=",
"dash_converter",
".",
"sub",
"(",
"\"-\"",
",",
"text",
")",
"# 7. Return the split string using the integer list:",
"return",
"list",
"(",
"split_with_locations",
"(",
"text",
",",
"split_locations",
")",
")"
] |
f72d1c82d85eeb3d3ac9fac17690041725402175
|
test
|
main
|
Main command line interface.
|
keyrings/cryptfile/convert.py
|
def main(argv=None):
"""Main command line interface."""
if argv is None:
argv = sys.argv[1:]
cli = CommandLineTool()
try:
return cli.run(argv)
except KeyboardInterrupt:
print('Canceled')
return 3
|
def main(argv=None):
"""Main command line interface."""
if argv is None:
argv = sys.argv[1:]
cli = CommandLineTool()
try:
return cli.run(argv)
except KeyboardInterrupt:
print('Canceled')
return 3
|
[
"Main",
"command",
"line",
"interface",
"."
] |
frispete/keyrings.cryptfile
|
python
|
https://github.com/frispete/keyrings.cryptfile/blob/cfa80d4848a5c3c0aeee41a954b2b120c80e69b2/keyrings/cryptfile/convert.py#L132-L142
|
[
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"if",
"argv",
"is",
"None",
":",
"argv",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"cli",
"=",
"CommandLineTool",
"(",
")",
"try",
":",
"return",
"cli",
".",
"run",
"(",
"argv",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"'Canceled'",
")",
"return",
"3"
] |
cfa80d4848a5c3c0aeee41a954b2b120c80e69b2
|
test
|
ArgonAESEncryption._create_cipher
|
Create the cipher object to encrypt or decrypt a payload.
|
keyrings/cryptfile/cryptfile.py
|
def _create_cipher(self, password, salt, nonce = None):
"""
Create the cipher object to encrypt or decrypt a payload.
"""
from argon2.low_level import hash_secret_raw, Type
from Crypto.Cipher import AES
aesmode = self._get_mode(self.aesmode)
if aesmode is None: # pragma: no cover
raise ValueError('invalid AES mode: %s' % self.aesmode)
key = hash_secret_raw(
secret = password.encode(self.password_encoding),
salt = salt,
time_cost = self.time_cost,
memory_cost = self.memory_cost,
parallelism = self.parallelism,
hash_len = 16,
type = Type.ID)
return AES.new(key, aesmode, nonce)
|
def _create_cipher(self, password, salt, nonce = None):
"""
Create the cipher object to encrypt or decrypt a payload.
"""
from argon2.low_level import hash_secret_raw, Type
from Crypto.Cipher import AES
aesmode = self._get_mode(self.aesmode)
if aesmode is None: # pragma: no cover
raise ValueError('invalid AES mode: %s' % self.aesmode)
key = hash_secret_raw(
secret = password.encode(self.password_encoding),
salt = salt,
time_cost = self.time_cost,
memory_cost = self.memory_cost,
parallelism = self.parallelism,
hash_len = 16,
type = Type.ID)
return AES.new(key, aesmode, nonce)
|
[
"Create",
"the",
"cipher",
"object",
"to",
"encrypt",
"or",
"decrypt",
"a",
"payload",
"."
] |
frispete/keyrings.cryptfile
|
python
|
https://github.com/frispete/keyrings.cryptfile/blob/cfa80d4848a5c3c0aeee41a954b2b120c80e69b2/keyrings/cryptfile/cryptfile.py#L38-L58
|
[
"def",
"_create_cipher",
"(",
"self",
",",
"password",
",",
"salt",
",",
"nonce",
"=",
"None",
")",
":",
"from",
"argon2",
".",
"low_level",
"import",
"hash_secret_raw",
",",
"Type",
"from",
"Crypto",
".",
"Cipher",
"import",
"AES",
"aesmode",
"=",
"self",
".",
"_get_mode",
"(",
"self",
".",
"aesmode",
")",
"if",
"aesmode",
"is",
"None",
":",
"# pragma: no cover",
"raise",
"ValueError",
"(",
"'invalid AES mode: %s'",
"%",
"self",
".",
"aesmode",
")",
"key",
"=",
"hash_secret_raw",
"(",
"secret",
"=",
"password",
".",
"encode",
"(",
"self",
".",
"password_encoding",
")",
",",
"salt",
"=",
"salt",
",",
"time_cost",
"=",
"self",
".",
"time_cost",
",",
"memory_cost",
"=",
"self",
".",
"memory_cost",
",",
"parallelism",
"=",
"self",
".",
"parallelism",
",",
"hash_len",
"=",
"16",
",",
"type",
"=",
"Type",
".",
"ID",
")",
"return",
"AES",
".",
"new",
"(",
"key",
",",
"aesmode",
",",
"nonce",
")"
] |
cfa80d4848a5c3c0aeee41a954b2b120c80e69b2
|
test
|
ArgonAESEncryption._get_mode
|
Return the AES mode, or a list of valid AES modes, if mode == None
|
keyrings/cryptfile/cryptfile.py
|
def _get_mode(mode = None):
"""
Return the AES mode, or a list of valid AES modes, if mode == None
"""
from Crypto.Cipher import AES
AESModeMap = {
'CCM': AES.MODE_CCM,
'EAX': AES.MODE_EAX,
'GCM': AES.MODE_GCM,
'OCB': AES.MODE_OCB,
}
if mode is None:
return AESModeMap.keys()
return AESModeMap.get(mode)
|
def _get_mode(mode = None):
"""
Return the AES mode, or a list of valid AES modes, if mode == None
"""
from Crypto.Cipher import AES
AESModeMap = {
'CCM': AES.MODE_CCM,
'EAX': AES.MODE_EAX,
'GCM': AES.MODE_GCM,
'OCB': AES.MODE_OCB,
}
if mode is None:
return AESModeMap.keys()
return AESModeMap.get(mode)
|
[
"Return",
"the",
"AES",
"mode",
"or",
"a",
"list",
"of",
"valid",
"AES",
"modes",
"if",
"mode",
"==",
"None"
] |
frispete/keyrings.cryptfile
|
python
|
https://github.com/frispete/keyrings.cryptfile/blob/cfa80d4848a5c3c0aeee41a954b2b120c80e69b2/keyrings/cryptfile/cryptfile.py#L61-L76
|
[
"def",
"_get_mode",
"(",
"mode",
"=",
"None",
")",
":",
"from",
"Crypto",
".",
"Cipher",
"import",
"AES",
"AESModeMap",
"=",
"{",
"'CCM'",
":",
"AES",
".",
"MODE_CCM",
",",
"'EAX'",
":",
"AES",
".",
"MODE_EAX",
",",
"'GCM'",
":",
"AES",
".",
"MODE_GCM",
",",
"'OCB'",
":",
"AES",
".",
"MODE_OCB",
",",
"}",
"if",
"mode",
"is",
"None",
":",
"return",
"AESModeMap",
".",
"keys",
"(",
")",
"return",
"AESModeMap",
".",
"get",
"(",
"mode",
")"
] |
cfa80d4848a5c3c0aeee41a954b2b120c80e69b2
|
test
|
CryptFileKeyring.priority
|
Applicable for all platforms, where the schemes, that are integrated
with your environment, does not fit.
|
keyrings/cryptfile/cryptfile.py
|
def priority(self):
"""
Applicable for all platforms, where the schemes, that are integrated
with your environment, does not fit.
"""
try:
__import__('argon2.low_level')
except ImportError: # pragma: no cover
raise RuntimeError("argon2_cffi package required")
try:
__import__('Crypto.Cipher.AES')
except ImportError: # pragma: no cover
raise RuntimeError("PyCryptodome package required")
if not json: # pragma: no cover
raise RuntimeError("JSON implementation such as simplejson "
"required.")
return 2.5
|
def priority(self):
"""
Applicable for all platforms, where the schemes, that are integrated
with your environment, does not fit.
"""
try:
__import__('argon2.low_level')
except ImportError: # pragma: no cover
raise RuntimeError("argon2_cffi package required")
try:
__import__('Crypto.Cipher.AES')
except ImportError: # pragma: no cover
raise RuntimeError("PyCryptodome package required")
if not json: # pragma: no cover
raise RuntimeError("JSON implementation such as simplejson "
"required.")
return 2.5
|
[
"Applicable",
"for",
"all",
"platforms",
"where",
"the",
"schemes",
"that",
"are",
"integrated",
"with",
"your",
"environment",
"does",
"not",
"fit",
"."
] |
frispete/keyrings.cryptfile
|
python
|
https://github.com/frispete/keyrings.cryptfile/blob/cfa80d4848a5c3c0aeee41a954b2b120c80e69b2/keyrings/cryptfile/cryptfile.py#L90-L106
|
[
"def",
"priority",
"(",
"self",
")",
":",
"try",
":",
"__import__",
"(",
"'argon2.low_level'",
")",
"except",
"ImportError",
":",
"# pragma: no cover",
"raise",
"RuntimeError",
"(",
"\"argon2_cffi package required\"",
")",
"try",
":",
"__import__",
"(",
"'Crypto.Cipher.AES'",
")",
"except",
"ImportError",
":",
"# pragma: no cover",
"raise",
"RuntimeError",
"(",
"\"PyCryptodome package required\"",
")",
"if",
"not",
"json",
":",
"# pragma: no cover",
"raise",
"RuntimeError",
"(",
"\"JSON implementation such as simplejson \"",
"\"required.\"",
")",
"return",
"2.5"
] |
cfa80d4848a5c3c0aeee41a954b2b120c80e69b2
|
test
|
CryptFileKeyring._check_scheme
|
check for a valid scheme
raise AttributeError if missing
raise ValueError if not valid
|
keyrings/cryptfile/cryptfile.py
|
def _check_scheme(self, config):
"""
check for a valid scheme
raise AttributeError if missing
raise ValueError if not valid
"""
try:
scheme = config.get(
escape_for_ini('keyring-setting'),
escape_for_ini('scheme'),
)
except (configparser.NoSectionError, configparser.NoOptionError):
raise AttributeError("Encryption scheme missing")
# extract AES mode
aesmode = scheme[-3:]
if aesmode not in self._get_mode():
raise ValueError("Encryption scheme invalid: %s" % (aesmode))
# setup AES mode
self.aesmode = aesmode
# remove pointless crypto module name
if scheme.startswith('PyCryptodome '):
scheme = scheme[13:]
# check other scheme properties
if scheme != self.scheme:
raise ValueError("Encryption scheme mismatch "
"(exp.: %s, found: %s)" % (self.scheme, scheme))
|
def _check_scheme(self, config):
"""
check for a valid scheme
raise AttributeError if missing
raise ValueError if not valid
"""
try:
scheme = config.get(
escape_for_ini('keyring-setting'),
escape_for_ini('scheme'),
)
except (configparser.NoSectionError, configparser.NoOptionError):
raise AttributeError("Encryption scheme missing")
# extract AES mode
aesmode = scheme[-3:]
if aesmode not in self._get_mode():
raise ValueError("Encryption scheme invalid: %s" % (aesmode))
# setup AES mode
self.aesmode = aesmode
# remove pointless crypto module name
if scheme.startswith('PyCryptodome '):
scheme = scheme[13:]
# check other scheme properties
if scheme != self.scheme:
raise ValueError("Encryption scheme mismatch "
"(exp.: %s, found: %s)" % (self.scheme, scheme))
|
[
"check",
"for",
"a",
"valid",
"scheme"
] |
frispete/keyrings.cryptfile
|
python
|
https://github.com/frispete/keyrings.cryptfile/blob/cfa80d4848a5c3c0aeee41a954b2b120c80e69b2/keyrings/cryptfile/cryptfile.py#L132-L162
|
[
"def",
"_check_scheme",
"(",
"self",
",",
"config",
")",
":",
"try",
":",
"scheme",
"=",
"config",
".",
"get",
"(",
"escape_for_ini",
"(",
"'keyring-setting'",
")",
",",
"escape_for_ini",
"(",
"'scheme'",
")",
",",
")",
"except",
"(",
"configparser",
".",
"NoSectionError",
",",
"configparser",
".",
"NoOptionError",
")",
":",
"raise",
"AttributeError",
"(",
"\"Encryption scheme missing\"",
")",
"# extract AES mode",
"aesmode",
"=",
"scheme",
"[",
"-",
"3",
":",
"]",
"if",
"aesmode",
"not",
"in",
"self",
".",
"_get_mode",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Encryption scheme invalid: %s\"",
"%",
"(",
"aesmode",
")",
")",
"# setup AES mode",
"self",
".",
"aesmode",
"=",
"aesmode",
"# remove pointless crypto module name",
"if",
"scheme",
".",
"startswith",
"(",
"'PyCryptodome '",
")",
":",
"scheme",
"=",
"scheme",
"[",
"13",
":",
"]",
"# check other scheme properties",
"if",
"scheme",
"!=",
"self",
".",
"scheme",
":",
"raise",
"ValueError",
"(",
"\"Encryption scheme mismatch \"",
"\"(exp.: %s, found: %s)\"",
"%",
"(",
"self",
".",
"scheme",
",",
"scheme",
")",
")"
] |
cfa80d4848a5c3c0aeee41a954b2b120c80e69b2
|
test
|
startLogging
|
Starts the global Twisted logger subsystem with maybe
stdout and/or a file specified in the config file
|
examples/subscriber.py
|
def startLogging(console=True, filepath=None):
'''
Starts the global Twisted logger subsystem with maybe
stdout and/or a file specified in the config file
'''
global logLevelFilterPredicate
observers = []
if console:
observers.append( FilteringLogObserver(observer=textFileLogObserver(sys.stdout),
predicates=[logLevelFilterPredicate] ))
if filepath is not None and filepath != "":
observers.append( FilteringLogObserver(observer=textFileLogObserver(open(filepath,'a')),
predicates=[logLevelFilterPredicate] ))
globalLogBeginner.beginLoggingTo(observers)
|
def startLogging(console=True, filepath=None):
'''
Starts the global Twisted logger subsystem with maybe
stdout and/or a file specified in the config file
'''
global logLevelFilterPredicate
observers = []
if console:
observers.append( FilteringLogObserver(observer=textFileLogObserver(sys.stdout),
predicates=[logLevelFilterPredicate] ))
if filepath is not None and filepath != "":
observers.append( FilteringLogObserver(observer=textFileLogObserver(open(filepath,'a')),
predicates=[logLevelFilterPredicate] ))
globalLogBeginner.beginLoggingTo(observers)
|
[
"Starts",
"the",
"global",
"Twisted",
"logger",
"subsystem",
"with",
"maybe",
"stdout",
"and",
"/",
"or",
"a",
"file",
"specified",
"in",
"the",
"config",
"file"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/examples/subscriber.py#L27-L42
|
[
"def",
"startLogging",
"(",
"console",
"=",
"True",
",",
"filepath",
"=",
"None",
")",
":",
"global",
"logLevelFilterPredicate",
"observers",
"=",
"[",
"]",
"if",
"console",
":",
"observers",
".",
"append",
"(",
"FilteringLogObserver",
"(",
"observer",
"=",
"textFileLogObserver",
"(",
"sys",
".",
"stdout",
")",
",",
"predicates",
"=",
"[",
"logLevelFilterPredicate",
"]",
")",
")",
"if",
"filepath",
"is",
"not",
"None",
"and",
"filepath",
"!=",
"\"\"",
":",
"observers",
".",
"append",
"(",
"FilteringLogObserver",
"(",
"observer",
"=",
"textFileLogObserver",
"(",
"open",
"(",
"filepath",
",",
"'a'",
")",
")",
",",
"predicates",
"=",
"[",
"logLevelFilterPredicate",
"]",
")",
")",
"globalLogBeginner",
".",
"beginLoggingTo",
"(",
"observers",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
setLogLevel
|
Set a new log level for a given namespace
LevelStr is: 'critical', 'error', 'warn', 'info', 'debug'
|
examples/subscriber.py
|
def setLogLevel(namespace=None, levelStr='info'):
'''
Set a new log level for a given namespace
LevelStr is: 'critical', 'error', 'warn', 'info', 'debug'
'''
level = LogLevel.levelWithName(levelStr)
logLevelFilterPredicate.setLogLevelForNamespace(namespace=namespace, level=level)
|
def setLogLevel(namespace=None, levelStr='info'):
'''
Set a new log level for a given namespace
LevelStr is: 'critical', 'error', 'warn', 'info', 'debug'
'''
level = LogLevel.levelWithName(levelStr)
logLevelFilterPredicate.setLogLevelForNamespace(namespace=namespace, level=level)
|
[
"Set",
"a",
"new",
"log",
"level",
"for",
"a",
"given",
"namespace",
"LevelStr",
"is",
":",
"critical",
"error",
"warn",
"info",
"debug"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/examples/subscriber.py#L45-L51
|
[
"def",
"setLogLevel",
"(",
"namespace",
"=",
"None",
",",
"levelStr",
"=",
"'info'",
")",
":",
"level",
"=",
"LogLevel",
".",
"levelWithName",
"(",
"levelStr",
")",
"logLevelFilterPredicate",
".",
"setLogLevelForNamespace",
"(",
"namespace",
"=",
"namespace",
",",
"level",
"=",
"level",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
MQTTService.connectToBroker
|
Connect to MQTT broker
|
examples/subscriber.py
|
def connectToBroker(self, protocol):
'''
Connect to MQTT broker
'''
self.protocol = protocol
self.protocol.onPublish = self.onPublish
self.protocol.onDisconnection = self.onDisconnection
self.protocol.setWindowSize(3)
try:
yield self.protocol.connect("TwistedMQTT-subs", keepalive=60)
yield self.subscribe()
except Exception as e:
log.error("Connecting to {broker} raised {excp!s}",
broker=BROKER, excp=e)
else:
log.info("Connected and subscribed to {broker}", broker=BROKER)
|
def connectToBroker(self, protocol):
'''
Connect to MQTT broker
'''
self.protocol = protocol
self.protocol.onPublish = self.onPublish
self.protocol.onDisconnection = self.onDisconnection
self.protocol.setWindowSize(3)
try:
yield self.protocol.connect("TwistedMQTT-subs", keepalive=60)
yield self.subscribe()
except Exception as e:
log.error("Connecting to {broker} raised {excp!s}",
broker=BROKER, excp=e)
else:
log.info("Connected and subscribed to {broker}", broker=BROKER)
|
[
"Connect",
"to",
"MQTT",
"broker"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/examples/subscriber.py#L72-L87
|
[
"def",
"connectToBroker",
"(",
"self",
",",
"protocol",
")",
":",
"self",
".",
"protocol",
"=",
"protocol",
"self",
".",
"protocol",
".",
"onPublish",
"=",
"self",
".",
"onPublish",
"self",
".",
"protocol",
".",
"onDisconnection",
"=",
"self",
".",
"onDisconnection",
"self",
".",
"protocol",
".",
"setWindowSize",
"(",
"3",
")",
"try",
":",
"yield",
"self",
".",
"protocol",
".",
"connect",
"(",
"\"TwistedMQTT-subs\"",
",",
"keepalive",
"=",
"60",
")",
"yield",
"self",
".",
"subscribe",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"Connecting to {broker} raised {excp!s}\"",
",",
"broker",
"=",
"BROKER",
",",
"excp",
"=",
"e",
")",
"else",
":",
"log",
".",
"info",
"(",
"\"Connected and subscribed to {broker}\"",
",",
"broker",
"=",
"BROKER",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
MQTTService.onPublish
|
Callback Receiving messages from publisher
|
examples/subscriber.py
|
def onPublish(self, topic, payload, qos, dup, retain, msgId):
'''
Callback Receiving messages from publisher
'''
log.debug("msg={payload}", payload=payload)
|
def onPublish(self, topic, payload, qos, dup, retain, msgId):
'''
Callback Receiving messages from publisher
'''
log.debug("msg={payload}", payload=payload)
|
[
"Callback",
"Receiving",
"messages",
"from",
"publisher"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/examples/subscriber.py#L117-L121
|
[
"def",
"onPublish",
"(",
"self",
",",
"topic",
",",
"payload",
",",
"qos",
",",
"dup",
",",
"retain",
",",
"msgId",
")",
":",
"log",
".",
"debug",
"(",
"\"msg={payload}\"",
",",
"payload",
"=",
"payload",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
MQTTService.onDisconnection
|
get notfied of disconnections
and get a deferred for a new protocol object (next retry)
|
examples/subscriber.py
|
def onDisconnection(self, reason):
'''
get notfied of disconnections
and get a deferred for a new protocol object (next retry)
'''
log.debug("<Connection was lost !> <reason={r}>", r=reason)
self.whenConnected().addCallback(self.connectToBroker)
|
def onDisconnection(self, reason):
'''
get notfied of disconnections
and get a deferred for a new protocol object (next retry)
'''
log.debug("<Connection was lost !> <reason={r}>", r=reason)
self.whenConnected().addCallback(self.connectToBroker)
|
[
"get",
"notfied",
"of",
"disconnections",
"and",
"get",
"a",
"deferred",
"for",
"a",
"new",
"protocol",
"object",
"(",
"next",
"retry",
")"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/examples/subscriber.py#L124-L130
|
[
"def",
"onDisconnection",
"(",
"self",
",",
"reason",
")",
":",
"log",
".",
"debug",
"(",
"\"<Connection was lost !> <reason={r}>\"",
",",
"r",
"=",
"reason",
")",
"self",
".",
"whenConnected",
"(",
")",
".",
"addCallback",
"(",
"self",
".",
"connectToBroker",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
MQTTService.connectToBroker
|
Connect to MQTT broker
|
examples/pubsubs.py
|
def connectToBroker(self, protocol):
'''
Connect to MQTT broker
'''
self.protocol = protocol
self.protocol.onPublish = self.onPublish
self.protocol.onDisconnection = self.onDisconnection
self.protocol.setWindowSize(3)
self.task = task.LoopingCall(self.publish)
self.task.start(5.0, now=False)
try:
yield self.protocol.connect("TwistedMQTT-pubsubs", keepalive=60)
yield self.subscribe()
except Exception as e:
log.error("Connecting to {broker} raised {excp!s}",
broker=BROKER, excp=e)
else:
log.info("Connected and subscribed to {broker}", broker=BROKER)
|
def connectToBroker(self, protocol):
'''
Connect to MQTT broker
'''
self.protocol = protocol
self.protocol.onPublish = self.onPublish
self.protocol.onDisconnection = self.onDisconnection
self.protocol.setWindowSize(3)
self.task = task.LoopingCall(self.publish)
self.task.start(5.0, now=False)
try:
yield self.protocol.connect("TwistedMQTT-pubsubs", keepalive=60)
yield self.subscribe()
except Exception as e:
log.error("Connecting to {broker} raised {excp!s}",
broker=BROKER, excp=e)
else:
log.info("Connected and subscribed to {broker}", broker=BROKER)
|
[
"Connect",
"to",
"MQTT",
"broker"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/examples/pubsubs.py#L72-L89
|
[
"def",
"connectToBroker",
"(",
"self",
",",
"protocol",
")",
":",
"self",
".",
"protocol",
"=",
"protocol",
"self",
".",
"protocol",
".",
"onPublish",
"=",
"self",
".",
"onPublish",
"self",
".",
"protocol",
".",
"onDisconnection",
"=",
"self",
".",
"onDisconnection",
"self",
".",
"protocol",
".",
"setWindowSize",
"(",
"3",
")",
"self",
".",
"task",
"=",
"task",
".",
"LoopingCall",
"(",
"self",
".",
"publish",
")",
"self",
".",
"task",
".",
"start",
"(",
"5.0",
",",
"now",
"=",
"False",
")",
"try",
":",
"yield",
"self",
".",
"protocol",
".",
"connect",
"(",
"\"TwistedMQTT-pubsubs\"",
",",
"keepalive",
"=",
"60",
")",
"yield",
"self",
".",
"subscribe",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"Connecting to {broker} raised {excp!s}\"",
",",
"broker",
"=",
"BROKER",
",",
"excp",
"=",
"e",
")",
"else",
":",
"log",
".",
"info",
"(",
"\"Connected and subscribed to {broker}\"",
",",
"broker",
"=",
"BROKER",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
MQTTFactory.makeId
|
Produce ids for Protocol packets, outliving their sessions
|
mqtt/client/factory.py
|
def makeId(self):
'''Produce ids for Protocol packets, outliving their sessions'''
self.id = (self.id + 1) % 65536
self.id = self.id or 1 # avoid id 0
return self.id
|
def makeId(self):
'''Produce ids for Protocol packets, outliving their sessions'''
self.id = (self.id + 1) % 65536
self.id = self.id or 1 # avoid id 0
return self.id
|
[
"Produce",
"ids",
"for",
"Protocol",
"packets",
"outliving",
"their",
"sessions"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/client/factory.py#L116-L120
|
[
"def",
"makeId",
"(",
"self",
")",
":",
"self",
".",
"id",
"=",
"(",
"self",
".",
"id",
"+",
"1",
")",
"%",
"65536",
"self",
".",
"id",
"=",
"self",
".",
"id",
"or",
"1",
"# avoid id 0",
"return",
"self",
".",
"id"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
BaseState.connect
|
Send a CONNECT control packet.
|
mqtt/client/base.py
|
def connect(self, request):
'''
Send a CONNECT control packet.
'''
state = self.__class__.__name__
return defer.fail(MQTTStateError("Unexpected connect() operation", state))
|
def connect(self, request):
'''
Send a CONNECT control packet.
'''
state = self.__class__.__name__
return defer.fail(MQTTStateError("Unexpected connect() operation", state))
|
[
"Send",
"a",
"CONNECT",
"control",
"packet",
"."
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/client/base.py#L100-L105
|
[
"def",
"connect",
"(",
"self",
",",
"request",
")",
":",
"state",
"=",
"self",
".",
"__class__",
".",
"__name__",
"return",
"defer",
".",
"fail",
"(",
"MQTTStateError",
"(",
"\"Unexpected connect() operation\"",
",",
"state",
")",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
BaseState.handleCONNACK
|
Handles CONNACK packet from the server
|
mqtt/client/base.py
|
def handleCONNACK(self, response):
'''
Handles CONNACK packet from the server
'''
state = self.__class__.__name__
log.error("Unexpected {packet:7} packet received in {log_source}", packet="CONNACK")
|
def handleCONNACK(self, response):
'''
Handles CONNACK packet from the server
'''
state = self.__class__.__name__
log.error("Unexpected {packet:7} packet received in {log_source}", packet="CONNACK")
|
[
"Handles",
"CONNACK",
"packet",
"from",
"the",
"server"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/client/base.py#L138-L143
|
[
"def",
"handleCONNACK",
"(",
"self",
",",
"response",
")",
":",
"state",
"=",
"self",
".",
"__class__",
".",
"__name__",
"log",
".",
"error",
"(",
"\"Unexpected {packet:7} packet received in {log_source}\"",
",",
"packet",
"=",
"\"CONNACK\"",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
IMQTTClientControl.connect
|
Abstract
========
Send a CONNECT control packet.
Description
===========
After a Network Connection is established by a Client to a Server,
the first Packet sent from the Client to the Server MUST be a CONNECT
Packet [MQTT-3.1.0-1].
A Client can only send the CONNECT Packet once over a
Network Connection. The Server MUST process a second CONNECT Packet
sent from a Client as a protocol violation and disconnect the Client.
If the Client does not receive a CONNACK Packet from the Server within
a reasonable amount of time, he Client SHOULD close the Network
Connection. A "reasonable" amount of time depends on the type of
application and the communications infrastructure.
Signature
=========
@param clientId: client ID for the connection (UTF-8 string)
@param keepalive: connection keepalive period in seconds.
@param willTopic: last will topic (UTF-8 string)
@param willMessage: last will message (UTF-8 string)
@param willQoS: last will qos message
@param willRetain: lass will retain flag.
@param cleanStart: session clean flag.
@return: a Deferred whose callback will be called with a tuple
C{returnCode, sessionFlag)} when the connection completes.
The Deferred errback with a C{MQTTError} exception will be called
if no connection ack is received from the server within a keepalive
period. If no keepalive is used, a max of 10 seconds is used.
|
mqtt/client/interfaces.py
|
def connect(clientId, keepalive=0, willTopic=None,
willMessage=None, willQoS=0, willRetain=False,
username=None, password=None, cleanStart=True, version=mqtt.v311):
'''
Abstract
========
Send a CONNECT control packet.
Description
===========
After a Network Connection is established by a Client to a Server,
the first Packet sent from the Client to the Server MUST be a CONNECT
Packet [MQTT-3.1.0-1].
A Client can only send the CONNECT Packet once over a
Network Connection. The Server MUST process a second CONNECT Packet
sent from a Client as a protocol violation and disconnect the Client.
If the Client does not receive a CONNACK Packet from the Server within
a reasonable amount of time, he Client SHOULD close the Network
Connection. A "reasonable" amount of time depends on the type of
application and the communications infrastructure.
Signature
=========
@param clientId: client ID for the connection (UTF-8 string)
@param keepalive: connection keepalive period in seconds.
@param willTopic: last will topic (UTF-8 string)
@param willMessage: last will message (UTF-8 string)
@param willQoS: last will qos message
@param willRetain: lass will retain flag.
@param cleanStart: session clean flag.
@return: a Deferred whose callback will be called with a tuple
C{returnCode, sessionFlag)} when the connection completes.
The Deferred errback with a C{MQTTError} exception will be called
if no connection ack is received from the server within a keepalive
period. If no keepalive is used, a max of 10 seconds is used.
'''
|
def connect(clientId, keepalive=0, willTopic=None,
willMessage=None, willQoS=0, willRetain=False,
username=None, password=None, cleanStart=True, version=mqtt.v311):
'''
Abstract
========
Send a CONNECT control packet.
Description
===========
After a Network Connection is established by a Client to a Server,
the first Packet sent from the Client to the Server MUST be a CONNECT
Packet [MQTT-3.1.0-1].
A Client can only send the CONNECT Packet once over a
Network Connection. The Server MUST process a second CONNECT Packet
sent from a Client as a protocol violation and disconnect the Client.
If the Client does not receive a CONNACK Packet from the Server within
a reasonable amount of time, he Client SHOULD close the Network
Connection. A "reasonable" amount of time depends on the type of
application and the communications infrastructure.
Signature
=========
@param clientId: client ID for the connection (UTF-8 string)
@param keepalive: connection keepalive period in seconds.
@param willTopic: last will topic (UTF-8 string)
@param willMessage: last will message (UTF-8 string)
@param willQoS: last will qos message
@param willRetain: lass will retain flag.
@param cleanStart: session clean flag.
@return: a Deferred whose callback will be called with a tuple
C{returnCode, sessionFlag)} when the connection completes.
The Deferred errback with a C{MQTTError} exception will be called
if no connection ack is received from the server within a keepalive
period. If no keepalive is used, a max of 10 seconds is used.
'''
|
[
"Abstract",
"========"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/client/interfaces.py#L44-L84
|
[
"def",
"connect",
"(",
"clientId",
",",
"keepalive",
"=",
"0",
",",
"willTopic",
"=",
"None",
",",
"willMessage",
"=",
"None",
",",
"willQoS",
"=",
"0",
",",
"willRetain",
"=",
"False",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"cleanStart",
"=",
"True",
",",
"version",
"=",
"mqtt",
".",
"v311",
")",
":"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
encodeString
|
Encode an UTF-8 string into MQTT format.
Returns a bytearray
|
mqtt/pdu.py
|
def encodeString(string):
'''
Encode an UTF-8 string into MQTT format.
Returns a bytearray
'''
encoded = bytearray(2)
encoded.extend(bytearray(string, encoding='utf-8'))
l = len(encoded)-2
if(l > 65535):
raise StringValueError(l)
encoded[0] = l >> 8
encoded[1] = l & 0xFF
return encoded
|
def encodeString(string):
'''
Encode an UTF-8 string into MQTT format.
Returns a bytearray
'''
encoded = bytearray(2)
encoded.extend(bytearray(string, encoding='utf-8'))
l = len(encoded)-2
if(l > 65535):
raise StringValueError(l)
encoded[0] = l >> 8
encoded[1] = l & 0xFF
return encoded
|
[
"Encode",
"an",
"UTF",
"-",
"8",
"string",
"into",
"MQTT",
"format",
".",
"Returns",
"a",
"bytearray"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L51-L63
|
[
"def",
"encodeString",
"(",
"string",
")",
":",
"encoded",
"=",
"bytearray",
"(",
"2",
")",
"encoded",
".",
"extend",
"(",
"bytearray",
"(",
"string",
",",
"encoding",
"=",
"'utf-8'",
")",
")",
"l",
"=",
"len",
"(",
"encoded",
")",
"-",
"2",
"if",
"(",
"l",
">",
"65535",
")",
":",
"raise",
"StringValueError",
"(",
"l",
")",
"encoded",
"[",
"0",
"]",
"=",
"l",
">>",
"8",
"encoded",
"[",
"1",
"]",
"=",
"l",
"&",
"0xFF",
"return",
"encoded"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
decodeString
|
Decodes an UTF-8 string from an encoded MQTT bytearray.
Returns the decoded string and renaining bytearray to be parsed
|
mqtt/pdu.py
|
def decodeString(encoded):
'''
Decodes an UTF-8 string from an encoded MQTT bytearray.
Returns the decoded string and renaining bytearray to be parsed
'''
length = encoded[0]*256 + encoded[1]
return (encoded[2:2+length].decode('utf-8'), encoded[2+length:])
|
def decodeString(encoded):
'''
Decodes an UTF-8 string from an encoded MQTT bytearray.
Returns the decoded string and renaining bytearray to be parsed
'''
length = encoded[0]*256 + encoded[1]
return (encoded[2:2+length].decode('utf-8'), encoded[2+length:])
|
[
"Decodes",
"an",
"UTF",
"-",
"8",
"string",
"from",
"an",
"encoded",
"MQTT",
"bytearray",
".",
"Returns",
"the",
"decoded",
"string",
"and",
"renaining",
"bytearray",
"to",
"be",
"parsed"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L65-L71
|
[
"def",
"decodeString",
"(",
"encoded",
")",
":",
"length",
"=",
"encoded",
"[",
"0",
"]",
"*",
"256",
"+",
"encoded",
"[",
"1",
"]",
"return",
"(",
"encoded",
"[",
"2",
":",
"2",
"+",
"length",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"encoded",
"[",
"2",
"+",
"length",
":",
"]",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
encode16Int
|
Encodes a 16 bit unsigned integer into MQTT format.
Returns a bytearray
|
mqtt/pdu.py
|
def encode16Int(value):
'''
Encodes a 16 bit unsigned integer into MQTT format.
Returns a bytearray
'''
value = int(value)
encoded = bytearray(2)
encoded[0] = value >> 8
encoded[1] = value & 0xFF
return encoded
|
def encode16Int(value):
'''
Encodes a 16 bit unsigned integer into MQTT format.
Returns a bytearray
'''
value = int(value)
encoded = bytearray(2)
encoded[0] = value >> 8
encoded[1] = value & 0xFF
return encoded
|
[
"Encodes",
"a",
"16",
"bit",
"unsigned",
"integer",
"into",
"MQTT",
"format",
".",
"Returns",
"a",
"bytearray"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L74-L83
|
[
"def",
"encode16Int",
"(",
"value",
")",
":",
"value",
"=",
"int",
"(",
"value",
")",
"encoded",
"=",
"bytearray",
"(",
"2",
")",
"encoded",
"[",
"0",
"]",
"=",
"value",
">>",
"8",
"encoded",
"[",
"1",
"]",
"=",
"value",
"&",
"0xFF",
"return",
"encoded"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
encodeLength
|
Encodes value into a multibyte sequence defined by MQTT protocol.
Used to encode packet length fields.
|
mqtt/pdu.py
|
def encodeLength(value):
'''
Encodes value into a multibyte sequence defined by MQTT protocol.
Used to encode packet length fields.
'''
encoded = bytearray()
while True:
digit = value % 128
value //= 128
if value > 0:
digit |= 128
encoded.append(digit)
if value <= 0:
break
return encoded
|
def encodeLength(value):
'''
Encodes value into a multibyte sequence defined by MQTT protocol.
Used to encode packet length fields.
'''
encoded = bytearray()
while True:
digit = value % 128
value //= 128
if value > 0:
digit |= 128
encoded.append(digit)
if value <= 0:
break
return encoded
|
[
"Encodes",
"value",
"into",
"a",
"multibyte",
"sequence",
"defined",
"by",
"MQTT",
"protocol",
".",
"Used",
"to",
"encode",
"packet",
"length",
"fields",
"."
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L92-L106
|
[
"def",
"encodeLength",
"(",
"value",
")",
":",
"encoded",
"=",
"bytearray",
"(",
")",
"while",
"True",
":",
"digit",
"=",
"value",
"%",
"128",
"value",
"//=",
"128",
"if",
"value",
">",
"0",
":",
"digit",
"|=",
"128",
"encoded",
".",
"append",
"(",
"digit",
")",
"if",
"value",
"<=",
"0",
":",
"break",
"return",
"encoded"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
decodeLength
|
Decodes a variable length value defined in the MQTT protocol.
This value typically represents remaining field lengths
|
mqtt/pdu.py
|
def decodeLength(encoded):
'''
Decodes a variable length value defined in the MQTT protocol.
This value typically represents remaining field lengths
'''
value = 0
multiplier = 1
for i in encoded:
value += (i & 0x7F) * multiplier
multiplier *= 0x80
if (i & 0x80) != 0x80:
break
return value
|
def decodeLength(encoded):
'''
Decodes a variable length value defined in the MQTT protocol.
This value typically represents remaining field lengths
'''
value = 0
multiplier = 1
for i in encoded:
value += (i & 0x7F) * multiplier
multiplier *= 0x80
if (i & 0x80) != 0x80:
break
return value
|
[
"Decodes",
"a",
"variable",
"length",
"value",
"defined",
"in",
"the",
"MQTT",
"protocol",
".",
"This",
"value",
"typically",
"represents",
"remaining",
"field",
"lengths"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L109-L121
|
[
"def",
"decodeLength",
"(",
"encoded",
")",
":",
"value",
"=",
"0",
"multiplier",
"=",
"1",
"for",
"i",
"in",
"encoded",
":",
"value",
"+=",
"(",
"i",
"&",
"0x7F",
")",
"*",
"multiplier",
"multiplier",
"*=",
"0x80",
"if",
"(",
"i",
"&",
"0x80",
")",
"!=",
"0x80",
":",
"break",
"return",
"value"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
DISCONNECT.encode
|
Encode and store a DISCONNECT control packet.
|
mqtt/pdu.py
|
def encode(self):
'''
Encode and store a DISCONNECT control packet.
'''
header = bytearray(2)
header[0] = 0xE0
self.encoded = header
return str(header) if PY2 else bytes(header)
|
def encode(self):
'''
Encode and store a DISCONNECT control packet.
'''
header = bytearray(2)
header[0] = 0xE0
self.encoded = header
return str(header) if PY2 else bytes(header)
|
[
"Encode",
"and",
"store",
"a",
"DISCONNECT",
"control",
"packet",
"."
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L133-L140
|
[
"def",
"encode",
"(",
"self",
")",
":",
"header",
"=",
"bytearray",
"(",
"2",
")",
"header",
"[",
"0",
"]",
"=",
"0xE0",
"self",
".",
"encoded",
"=",
"header",
"return",
"str",
"(",
"header",
")",
"if",
"PY2",
"else",
"bytes",
"(",
"header",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
CONNECT.encode
|
Encode and store a CONNECT control packet.
@raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes.
@raise e: C{ValueError} if encoded username string exceeds 65535 bytes.
|
mqtt/pdu.py
|
def encode(self):
'''
Encode and store a CONNECT control packet.
@raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes.
@raise e: C{ValueError} if encoded username string exceeds 65535 bytes.
'''
header = bytearray(1)
varHeader = bytearray()
payload = bytearray()
header[0] = 0x10 # packet code
# ---- Variable header encoding section -----
varHeader.extend(encodeString(self.version['tag']))
varHeader.append(self.version['level']) # protocol Level
flags = (self.cleanStart << 1)
if self.willTopic is not None and self.willMessage is not None:
flags |= 0x04 | (self.willRetain << 5) | (self.willQoS << 3)
if self.username is not None:
flags |= 0x80
if self.password is not None:
flags |= 0x40
varHeader.append(flags)
varHeader.extend(encode16Int(self.keepalive))
# ------ Payload encoding section ----
payload.extend(encodeString(self.clientId))
if self.willTopic is not None and self.willMessage is not None:
payload.extend(encodeString(self.willTopic))
payload.extend(encodeString(self.willMessage))
if self.username is not None:
payload.extend(encodeString(self.username))
if self.password is not None:
payload.extend(encode16Int(len(self.password)))
payload.extend(bytearray(self.password, encoding='ascii', errors='ignore'))
# ---- Build the packet once all lengths are known ----
header.extend(encodeLength(len(varHeader) + len(payload)))
header.extend(varHeader)
header.extend(payload)
self.encoded = header
return str(header) if PY2 else bytes(header)
|
def encode(self):
'''
Encode and store a CONNECT control packet.
@raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes.
@raise e: C{ValueError} if encoded username string exceeds 65535 bytes.
'''
header = bytearray(1)
varHeader = bytearray()
payload = bytearray()
header[0] = 0x10 # packet code
# ---- Variable header encoding section -----
varHeader.extend(encodeString(self.version['tag']))
varHeader.append(self.version['level']) # protocol Level
flags = (self.cleanStart << 1)
if self.willTopic is not None and self.willMessage is not None:
flags |= 0x04 | (self.willRetain << 5) | (self.willQoS << 3)
if self.username is not None:
flags |= 0x80
if self.password is not None:
flags |= 0x40
varHeader.append(flags)
varHeader.extend(encode16Int(self.keepalive))
# ------ Payload encoding section ----
payload.extend(encodeString(self.clientId))
if self.willTopic is not None and self.willMessage is not None:
payload.extend(encodeString(self.willTopic))
payload.extend(encodeString(self.willMessage))
if self.username is not None:
payload.extend(encodeString(self.username))
if self.password is not None:
payload.extend(encode16Int(len(self.password)))
payload.extend(bytearray(self.password, encoding='ascii', errors='ignore'))
# ---- Build the packet once all lengths are known ----
header.extend(encodeLength(len(varHeader) + len(payload)))
header.extend(varHeader)
header.extend(payload)
self.encoded = header
return str(header) if PY2 else bytes(header)
|
[
"Encode",
"and",
"store",
"a",
"CONNECT",
"control",
"packet",
"."
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L211-L249
|
[
"def",
"encode",
"(",
"self",
")",
":",
"header",
"=",
"bytearray",
"(",
"1",
")",
"varHeader",
"=",
"bytearray",
"(",
")",
"payload",
"=",
"bytearray",
"(",
")",
"header",
"[",
"0",
"]",
"=",
"0x10",
"# packet code",
"# ---- Variable header encoding section -----",
"varHeader",
".",
"extend",
"(",
"encodeString",
"(",
"self",
".",
"version",
"[",
"'tag'",
"]",
")",
")",
"varHeader",
".",
"append",
"(",
"self",
".",
"version",
"[",
"'level'",
"]",
")",
"# protocol Level",
"flags",
"=",
"(",
"self",
".",
"cleanStart",
"<<",
"1",
")",
"if",
"self",
".",
"willTopic",
"is",
"not",
"None",
"and",
"self",
".",
"willMessage",
"is",
"not",
"None",
":",
"flags",
"|=",
"0x04",
"|",
"(",
"self",
".",
"willRetain",
"<<",
"5",
")",
"|",
"(",
"self",
".",
"willQoS",
"<<",
"3",
")",
"if",
"self",
".",
"username",
"is",
"not",
"None",
":",
"flags",
"|=",
"0x80",
"if",
"self",
".",
"password",
"is",
"not",
"None",
":",
"flags",
"|=",
"0x40",
"varHeader",
".",
"append",
"(",
"flags",
")",
"varHeader",
".",
"extend",
"(",
"encode16Int",
"(",
"self",
".",
"keepalive",
")",
")",
"# ------ Payload encoding section ----",
"payload",
".",
"extend",
"(",
"encodeString",
"(",
"self",
".",
"clientId",
")",
")",
"if",
"self",
".",
"willTopic",
"is",
"not",
"None",
"and",
"self",
".",
"willMessage",
"is",
"not",
"None",
":",
"payload",
".",
"extend",
"(",
"encodeString",
"(",
"self",
".",
"willTopic",
")",
")",
"payload",
".",
"extend",
"(",
"encodeString",
"(",
"self",
".",
"willMessage",
")",
")",
"if",
"self",
".",
"username",
"is",
"not",
"None",
":",
"payload",
".",
"extend",
"(",
"encodeString",
"(",
"self",
".",
"username",
")",
")",
"if",
"self",
".",
"password",
"is",
"not",
"None",
":",
"payload",
".",
"extend",
"(",
"encode16Int",
"(",
"len",
"(",
"self",
".",
"password",
")",
")",
")",
"payload",
".",
"extend",
"(",
"bytearray",
"(",
"self",
".",
"password",
",",
"encoding",
"=",
"'ascii'",
",",
"errors",
"=",
"'ignore'",
")",
")",
"# ---- Build the packet once all lengths are known ----",
"header",
".",
"extend",
"(",
"encodeLength",
"(",
"len",
"(",
"varHeader",
")",
"+",
"len",
"(",
"payload",
")",
")",
")",
"header",
".",
"extend",
"(",
"varHeader",
")",
"header",
".",
"extend",
"(",
"payload",
")",
"self",
".",
"encoded",
"=",
"header",
"return",
"str",
"(",
"header",
")",
"if",
"PY2",
"else",
"bytes",
"(",
"header",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
CONNECT.decode
|
Decode a CONNECT control packet.
|
mqtt/pdu.py
|
def decode(self, packet):
'''
Decode a CONNECT control packet.
'''
self.encoded = packet
# Strip the fixed header plus variable length field
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
# Variable Header
version_str, packet_remaining = decodeString(packet_remaining)
version_id = int(packet_remaining[0])
if version_id == v31['level']:
self.version = v31
else:
self.version = v311
flags = packet_remaining[1]
self.cleanStart = (flags & 0x02) != 0
willFlag = (flags & 0x04) != 0
willQoS = (flags >> 3) & 0x03
willRetain = (flags & 0x20) != 0
userFlag = (flags & 0x80) != 0
passFlag = (flags & 0x40) != 0
packet_remaining = packet_remaining[2:]
self.keepalive = decode16Int(packet_remaining)
# Payload
packet_remaining = packet_remaining[2:]
self.clientId, packet_remaining = decodeString(packet_remaining)
if willFlag:
self.willRetain = willRetain
self.willQoS = willQoS
self.willTopic, packet_remaining = decodeString(packet_remaining)
self.willMessage, packet_remaining = decodeString(packet_remaining)
if userFlag:
self.username, packet_remaining = decodeString(packet_remaining)
if passFlag:
l = decode16Int(packet_remaining)
self.password = packet_remaining[2:2+l]
|
def decode(self, packet):
'''
Decode a CONNECT control packet.
'''
self.encoded = packet
# Strip the fixed header plus variable length field
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
# Variable Header
version_str, packet_remaining = decodeString(packet_remaining)
version_id = int(packet_remaining[0])
if version_id == v31['level']:
self.version = v31
else:
self.version = v311
flags = packet_remaining[1]
self.cleanStart = (flags & 0x02) != 0
willFlag = (flags & 0x04) != 0
willQoS = (flags >> 3) & 0x03
willRetain = (flags & 0x20) != 0
userFlag = (flags & 0x80) != 0
passFlag = (flags & 0x40) != 0
packet_remaining = packet_remaining[2:]
self.keepalive = decode16Int(packet_remaining)
# Payload
packet_remaining = packet_remaining[2:]
self.clientId, packet_remaining = decodeString(packet_remaining)
if willFlag:
self.willRetain = willRetain
self.willQoS = willQoS
self.willTopic, packet_remaining = decodeString(packet_remaining)
self.willMessage, packet_remaining = decodeString(packet_remaining)
if userFlag:
self.username, packet_remaining = decodeString(packet_remaining)
if passFlag:
l = decode16Int(packet_remaining)
self.password = packet_remaining[2:2+l]
|
[
"Decode",
"a",
"CONNECT",
"control",
"packet",
"."
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L251-L289
|
[
"def",
"decode",
"(",
"self",
",",
"packet",
")",
":",
"self",
".",
"encoded",
"=",
"packet",
"# Strip the fixed header plus variable length field",
"lenLen",
"=",
"1",
"while",
"packet",
"[",
"lenLen",
"]",
"&",
"0x80",
":",
"lenLen",
"+=",
"1",
"packet_remaining",
"=",
"packet",
"[",
"lenLen",
"+",
"1",
":",
"]",
"# Variable Header",
"version_str",
",",
"packet_remaining",
"=",
"decodeString",
"(",
"packet_remaining",
")",
"version_id",
"=",
"int",
"(",
"packet_remaining",
"[",
"0",
"]",
")",
"if",
"version_id",
"==",
"v31",
"[",
"'level'",
"]",
":",
"self",
".",
"version",
"=",
"v31",
"else",
":",
"self",
".",
"version",
"=",
"v311",
"flags",
"=",
"packet_remaining",
"[",
"1",
"]",
"self",
".",
"cleanStart",
"=",
"(",
"flags",
"&",
"0x02",
")",
"!=",
"0",
"willFlag",
"=",
"(",
"flags",
"&",
"0x04",
")",
"!=",
"0",
"willQoS",
"=",
"(",
"flags",
">>",
"3",
")",
"&",
"0x03",
"willRetain",
"=",
"(",
"flags",
"&",
"0x20",
")",
"!=",
"0",
"userFlag",
"=",
"(",
"flags",
"&",
"0x80",
")",
"!=",
"0",
"passFlag",
"=",
"(",
"flags",
"&",
"0x40",
")",
"!=",
"0",
"packet_remaining",
"=",
"packet_remaining",
"[",
"2",
":",
"]",
"self",
".",
"keepalive",
"=",
"decode16Int",
"(",
"packet_remaining",
")",
"# Payload",
"packet_remaining",
"=",
"packet_remaining",
"[",
"2",
":",
"]",
"self",
".",
"clientId",
",",
"packet_remaining",
"=",
"decodeString",
"(",
"packet_remaining",
")",
"if",
"willFlag",
":",
"self",
".",
"willRetain",
"=",
"willRetain",
"self",
".",
"willQoS",
"=",
"willQoS",
"self",
".",
"willTopic",
",",
"packet_remaining",
"=",
"decodeString",
"(",
"packet_remaining",
")",
"self",
".",
"willMessage",
",",
"packet_remaining",
"=",
"decodeString",
"(",
"packet_remaining",
")",
"if",
"userFlag",
":",
"self",
".",
"username",
",",
"packet_remaining",
"=",
"decodeString",
"(",
"packet_remaining",
")",
"if",
"passFlag",
":",
"l",
"=",
"decode16Int",
"(",
"packet_remaining",
")",
"self",
".",
"password",
"=",
"packet_remaining",
"[",
"2",
":",
"2",
"+",
"l",
"]"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
CONNACK.encode
|
Encode and store a CONNACK control packet.
|
mqtt/pdu.py
|
def encode(self):
'''
Encode and store a CONNACK control packet.
'''
header = bytearray(1)
varHeader = bytearray(2)
header[0] = 0x20
varHeader[0] = self.session
varHeader[1] = self.resultCode
header.extend(encodeLength(len(varHeader)))
header.extend(varHeader)
self.encoded = header
return str(header) if PY2 else bytes(header)
|
def encode(self):
'''
Encode and store a CONNACK control packet.
'''
header = bytearray(1)
varHeader = bytearray(2)
header[0] = 0x20
varHeader[0] = self.session
varHeader[1] = self.resultCode
header.extend(encodeLength(len(varHeader)))
header.extend(varHeader)
self.encoded = header
return str(header) if PY2 else bytes(header)
|
[
"Encode",
"and",
"store",
"a",
"CONNACK",
"control",
"packet",
"."
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L302-L315
|
[
"def",
"encode",
"(",
"self",
")",
":",
"header",
"=",
"bytearray",
"(",
"1",
")",
"varHeader",
"=",
"bytearray",
"(",
"2",
")",
"header",
"[",
"0",
"]",
"=",
"0x20",
"varHeader",
"[",
"0",
"]",
"=",
"self",
".",
"session",
"varHeader",
"[",
"1",
"]",
"=",
"self",
".",
"resultCode",
"header",
".",
"extend",
"(",
"encodeLength",
"(",
"len",
"(",
"varHeader",
")",
")",
")",
"header",
".",
"extend",
"(",
"varHeader",
")",
"self",
".",
"encoded",
"=",
"header",
"return",
"str",
"(",
"header",
")",
"if",
"PY2",
"else",
"bytes",
"(",
"header",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
CONNACK.decode
|
Decode a CONNACK control packet.
|
mqtt/pdu.py
|
def decode(self, packet):
'''
Decode a CONNACK control packet.
'''
self.encoded = packet
# Strip the fixed header plus variable length field
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.session = (packet_remaining[0] & 0x01) == 0x01
self.resultCode = int(packet_remaining[1])
|
def decode(self, packet):
'''
Decode a CONNACK control packet.
'''
self.encoded = packet
# Strip the fixed header plus variable length field
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.session = (packet_remaining[0] & 0x01) == 0x01
self.resultCode = int(packet_remaining[1])
|
[
"Decode",
"a",
"CONNACK",
"control",
"packet",
"."
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L317-L328
|
[
"def",
"decode",
"(",
"self",
",",
"packet",
")",
":",
"self",
".",
"encoded",
"=",
"packet",
"# Strip the fixed header plus variable length field",
"lenLen",
"=",
"1",
"while",
"packet",
"[",
"lenLen",
"]",
"&",
"0x80",
":",
"lenLen",
"+=",
"1",
"packet_remaining",
"=",
"packet",
"[",
"lenLen",
"+",
"1",
":",
"]",
"self",
".",
"session",
"=",
"(",
"packet_remaining",
"[",
"0",
"]",
"&",
"0x01",
")",
"==",
"0x01",
"self",
".",
"resultCode",
"=",
"int",
"(",
"packet_remaining",
"[",
"1",
"]",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
SUBSCRIBE.decode
|
Decode a SUBSCRIBE control packet.
|
mqtt/pdu.py
|
def decode(self, packet):
'''
Decode a SUBSCRIBE control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.msgId = decode16Int(packet_remaining[0:2])
self.topics = []
packet_remaining = packet_remaining[2:]
while len(packet_remaining):
topic, packet_remaining = decodeString(packet_remaining)
qos = int (packet_remaining[0]) & 0x03
self.topics.append((topic,qos))
packet_remaining = packet_remaining[1:]
|
def decode(self, packet):
'''
Decode a SUBSCRIBE control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.msgId = decode16Int(packet_remaining[0:2])
self.topics = []
packet_remaining = packet_remaining[2:]
while len(packet_remaining):
topic, packet_remaining = decodeString(packet_remaining)
qos = int (packet_remaining[0]) & 0x03
self.topics.append((topic,qos))
packet_remaining = packet_remaining[1:]
|
[
"Decode",
"a",
"SUBSCRIBE",
"control",
"packet",
"."
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L357-L373
|
[
"def",
"decode",
"(",
"self",
",",
"packet",
")",
":",
"self",
".",
"encoded",
"=",
"packet",
"lenLen",
"=",
"1",
"while",
"packet",
"[",
"lenLen",
"]",
"&",
"0x80",
":",
"lenLen",
"+=",
"1",
"packet_remaining",
"=",
"packet",
"[",
"lenLen",
"+",
"1",
":",
"]",
"self",
".",
"msgId",
"=",
"decode16Int",
"(",
"packet_remaining",
"[",
"0",
":",
"2",
"]",
")",
"self",
".",
"topics",
"=",
"[",
"]",
"packet_remaining",
"=",
"packet_remaining",
"[",
"2",
":",
"]",
"while",
"len",
"(",
"packet_remaining",
")",
":",
"topic",
",",
"packet_remaining",
"=",
"decodeString",
"(",
"packet_remaining",
")",
"qos",
"=",
"int",
"(",
"packet_remaining",
"[",
"0",
"]",
")",
"&",
"0x03",
"self",
".",
"topics",
".",
"append",
"(",
"(",
"topic",
",",
"qos",
")",
")",
"packet_remaining",
"=",
"packet_remaining",
"[",
"1",
":",
"]"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
SUBACK.encode
|
Encode and store a SUBACK control packet.
|
mqtt/pdu.py
|
def encode(self):
'''
Encode and store a SUBACK control packet.
'''
header = bytearray(1)
payload = bytearray()
varHeader = encode16Int(self.msgId)
header[0] = 0x90
for code in self.granted:
payload.append(code[0] | (0x80 if code[1] == True else 0x00))
header.extend(encodeLength(len(varHeader) + len(payload)))
header.extend(varHeader)
header.extend(payload)
self.encoded = header
return str(header) if PY2 else bytes(header)
|
def encode(self):
'''
Encode and store a SUBACK control packet.
'''
header = bytearray(1)
payload = bytearray()
varHeader = encode16Int(self.msgId)
header[0] = 0x90
for code in self.granted:
payload.append(code[0] | (0x80 if code[1] == True else 0x00))
header.extend(encodeLength(len(varHeader) + len(payload)))
header.extend(varHeader)
header.extend(payload)
self.encoded = header
return str(header) if PY2 else bytes(header)
|
[
"Encode",
"and",
"store",
"a",
"SUBACK",
"control",
"packet",
"."
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L387-L401
|
[
"def",
"encode",
"(",
"self",
")",
":",
"header",
"=",
"bytearray",
"(",
"1",
")",
"payload",
"=",
"bytearray",
"(",
")",
"varHeader",
"=",
"encode16Int",
"(",
"self",
".",
"msgId",
")",
"header",
"[",
"0",
"]",
"=",
"0x90",
"for",
"code",
"in",
"self",
".",
"granted",
":",
"payload",
".",
"append",
"(",
"code",
"[",
"0",
"]",
"|",
"(",
"0x80",
"if",
"code",
"[",
"1",
"]",
"==",
"True",
"else",
"0x00",
")",
")",
"header",
".",
"extend",
"(",
"encodeLength",
"(",
"len",
"(",
"varHeader",
")",
"+",
"len",
"(",
"payload",
")",
")",
")",
"header",
".",
"extend",
"(",
"varHeader",
")",
"header",
".",
"extend",
"(",
"payload",
")",
"self",
".",
"encoded",
"=",
"header",
"return",
"str",
"(",
"header",
")",
"if",
"PY2",
"else",
"bytes",
"(",
"header",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
UNSUBSCRIBE.encode
|
Encode and store an UNSUBCRIBE control packet
@raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes
|
mqtt/pdu.py
|
def encode(self):
'''
Encode and store an UNSUBCRIBE control packet
@raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes
'''
header = bytearray(1)
payload = bytearray()
varHeader = encode16Int(self.msgId)
header[0] = 0xA2 # packet with QoS=1
for topic in self.topics:
payload.extend(encodeString(topic)) # topic name
header.extend(encodeLength(len(varHeader) + len(payload)))
header.extend(varHeader)
header.extend(payload)
self.encoded = header
return str(header) if PY2 else bytes(header)
|
def encode(self):
'''
Encode and store an UNSUBCRIBE control packet
@raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes
'''
header = bytearray(1)
payload = bytearray()
varHeader = encode16Int(self.msgId)
header[0] = 0xA2 # packet with QoS=1
for topic in self.topics:
payload.extend(encodeString(topic)) # topic name
header.extend(encodeLength(len(varHeader) + len(payload)))
header.extend(varHeader)
header.extend(payload)
self.encoded = header
return str(header) if PY2 else bytes(header)
|
[
"Encode",
"and",
"store",
"an",
"UNSUBCRIBE",
"control",
"packet"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L430-L445
|
[
"def",
"encode",
"(",
"self",
")",
":",
"header",
"=",
"bytearray",
"(",
"1",
")",
"payload",
"=",
"bytearray",
"(",
")",
"varHeader",
"=",
"encode16Int",
"(",
"self",
".",
"msgId",
")",
"header",
"[",
"0",
"]",
"=",
"0xA2",
"# packet with QoS=1",
"for",
"topic",
"in",
"self",
".",
"topics",
":",
"payload",
".",
"extend",
"(",
"encodeString",
"(",
"topic",
")",
")",
"# topic name",
"header",
".",
"extend",
"(",
"encodeLength",
"(",
"len",
"(",
"varHeader",
")",
"+",
"len",
"(",
"payload",
")",
")",
")",
"header",
".",
"extend",
"(",
"varHeader",
")",
"header",
".",
"extend",
"(",
"payload",
")",
"self",
".",
"encoded",
"=",
"header",
"return",
"str",
"(",
"header",
")",
"if",
"PY2",
"else",
"bytes",
"(",
"header",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
UNSUBSCRIBE.decode
|
Decode a UNSUBACK control packet.
|
mqtt/pdu.py
|
def decode(self, packet):
'''
Decode a UNSUBACK control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.msgId = decode16Int(packet_remaining[0:2])
self.topics = []
packet_remaining = packet_remaining[2:]
while len(packet_remaining):
l = decode16Int(packet_remaining[0:2])
topic = packet_remaining[2:2+l].decode(encoding='utf-8')
self.topics.append(topic)
packet_remaining = packet_remaining[2+l:]
|
def decode(self, packet):
'''
Decode a UNSUBACK control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.msgId = decode16Int(packet_remaining[0:2])
self.topics = []
packet_remaining = packet_remaining[2:]
while len(packet_remaining):
l = decode16Int(packet_remaining[0:2])
topic = packet_remaining[2:2+l].decode(encoding='utf-8')
self.topics.append(topic)
packet_remaining = packet_remaining[2+l:]
|
[
"Decode",
"a",
"UNSUBACK",
"control",
"packet",
"."
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L447-L463
|
[
"def",
"decode",
"(",
"self",
",",
"packet",
")",
":",
"self",
".",
"encoded",
"=",
"packet",
"lenLen",
"=",
"1",
"while",
"packet",
"[",
"lenLen",
"]",
"&",
"0x80",
":",
"lenLen",
"+=",
"1",
"packet_remaining",
"=",
"packet",
"[",
"lenLen",
"+",
"1",
":",
"]",
"self",
".",
"msgId",
"=",
"decode16Int",
"(",
"packet_remaining",
"[",
"0",
":",
"2",
"]",
")",
"self",
".",
"topics",
"=",
"[",
"]",
"packet_remaining",
"=",
"packet_remaining",
"[",
"2",
":",
"]",
"while",
"len",
"(",
"packet_remaining",
")",
":",
"l",
"=",
"decode16Int",
"(",
"packet_remaining",
"[",
"0",
":",
"2",
"]",
")",
"topic",
"=",
"packet_remaining",
"[",
"2",
":",
"2",
"+",
"l",
"]",
".",
"decode",
"(",
"encoding",
"=",
"'utf-8'",
")",
"self",
".",
"topics",
".",
"append",
"(",
"topic",
")",
"packet_remaining",
"=",
"packet_remaining",
"[",
"2",
"+",
"l",
":",
"]"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
UNSUBACK.encode
|
Encode and store an UNSUBACK control packet
|
mqtt/pdu.py
|
def encode(self):
'''
Encode and store an UNSUBACK control packet
'''
header = bytearray(1)
varHeader = encode16Int(self.msgId)
header[0] = 0xB0
header.extend(encodeLength(len(varHeader)))
header.extend(varHeader)
self.encoded = header
return str(header) if PY2 else bytes(header)
|
def encode(self):
'''
Encode and store an UNSUBACK control packet
'''
header = bytearray(1)
varHeader = encode16Int(self.msgId)
header[0] = 0xB0
header.extend(encodeLength(len(varHeader)))
header.extend(varHeader)
self.encoded = header
return str(header) if PY2 else bytes(header)
|
[
"Encode",
"and",
"store",
"an",
"UNSUBACK",
"control",
"packet"
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L474-L484
|
[
"def",
"encode",
"(",
"self",
")",
":",
"header",
"=",
"bytearray",
"(",
"1",
")",
"varHeader",
"=",
"encode16Int",
"(",
"self",
".",
"msgId",
")",
"header",
"[",
"0",
"]",
"=",
"0xB0",
"header",
".",
"extend",
"(",
"encodeLength",
"(",
"len",
"(",
"varHeader",
")",
")",
")",
"header",
".",
"extend",
"(",
"varHeader",
")",
"self",
".",
"encoded",
"=",
"header",
"return",
"str",
"(",
"header",
")",
"if",
"PY2",
"else",
"bytes",
"(",
"header",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
PUBLISH.encode
|
Encode and store a PUBLISH control packet.
@raise e: C{ValueError} if encoded topic string exceeds 65535 bytes.
@raise e: C{ValueError} if encoded packet size exceeds 268435455 bytes.
@raise e: C{TypeError} if C{data} is not a string, bytearray, int, boolean or float.
|
mqtt/pdu.py
|
def encode(self):
'''
Encode and store a PUBLISH control packet.
@raise e: C{ValueError} if encoded topic string exceeds 65535 bytes.
@raise e: C{ValueError} if encoded packet size exceeds 268435455 bytes.
@raise e: C{TypeError} if C{data} is not a string, bytearray, int, boolean or float.
'''
header = bytearray(1)
varHeader = bytearray()
payload = bytearray()
if self.qos:
header[0] = 0x30 | self.retain | (self.qos << 1) | (self.dup << 3)
varHeader.extend(encodeString(self.topic)) # topic name
varHeader.extend(encode16Int(self.msgId)) # msgId should not be None
else:
header[0] = 0x30 | self.retain
varHeader.extend(encodeString(self.topic)) # topic name
if isinstance(self.payload, bytearray):
payload.extend(self.payload)
elif isinstance(self.payload, str):
payload.extend(bytearray(self.payload, encoding='utf-8'))
else:
raise PayloadTypeError(type(self.payload))
totalLen = len(varHeader) + len(payload)
if totalLen > 268435455:
raise PayloadValueError(totalLen)
header.extend(encodeLength(totalLen))
header.extend(varHeader)
header.extend(payload)
self.encoded = header
return str(header) if PY2 else bytes(header)
|
def encode(self):
'''
Encode and store a PUBLISH control packet.
@raise e: C{ValueError} if encoded topic string exceeds 65535 bytes.
@raise e: C{ValueError} if encoded packet size exceeds 268435455 bytes.
@raise e: C{TypeError} if C{data} is not a string, bytearray, int, boolean or float.
'''
header = bytearray(1)
varHeader = bytearray()
payload = bytearray()
if self.qos:
header[0] = 0x30 | self.retain | (self.qos << 1) | (self.dup << 3)
varHeader.extend(encodeString(self.topic)) # topic name
varHeader.extend(encode16Int(self.msgId)) # msgId should not be None
else:
header[0] = 0x30 | self.retain
varHeader.extend(encodeString(self.topic)) # topic name
if isinstance(self.payload, bytearray):
payload.extend(self.payload)
elif isinstance(self.payload, str):
payload.extend(bytearray(self.payload, encoding='utf-8'))
else:
raise PayloadTypeError(type(self.payload))
totalLen = len(varHeader) + len(payload)
if totalLen > 268435455:
raise PayloadValueError(totalLen)
header.extend(encodeLength(totalLen))
header.extend(varHeader)
header.extend(payload)
self.encoded = header
return str(header) if PY2 else bytes(header)
|
[
"Encode",
"and",
"store",
"a",
"PUBLISH",
"control",
"packet",
"."
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L511-L542
|
[
"def",
"encode",
"(",
"self",
")",
":",
"header",
"=",
"bytearray",
"(",
"1",
")",
"varHeader",
"=",
"bytearray",
"(",
")",
"payload",
"=",
"bytearray",
"(",
")",
"if",
"self",
".",
"qos",
":",
"header",
"[",
"0",
"]",
"=",
"0x30",
"|",
"self",
".",
"retain",
"|",
"(",
"self",
".",
"qos",
"<<",
"1",
")",
"|",
"(",
"self",
".",
"dup",
"<<",
"3",
")",
"varHeader",
".",
"extend",
"(",
"encodeString",
"(",
"self",
".",
"topic",
")",
")",
"# topic name",
"varHeader",
".",
"extend",
"(",
"encode16Int",
"(",
"self",
".",
"msgId",
")",
")",
"# msgId should not be None",
"else",
":",
"header",
"[",
"0",
"]",
"=",
"0x30",
"|",
"self",
".",
"retain",
"varHeader",
".",
"extend",
"(",
"encodeString",
"(",
"self",
".",
"topic",
")",
")",
"# topic name",
"if",
"isinstance",
"(",
"self",
".",
"payload",
",",
"bytearray",
")",
":",
"payload",
".",
"extend",
"(",
"self",
".",
"payload",
")",
"elif",
"isinstance",
"(",
"self",
".",
"payload",
",",
"str",
")",
":",
"payload",
".",
"extend",
"(",
"bytearray",
"(",
"self",
".",
"payload",
",",
"encoding",
"=",
"'utf-8'",
")",
")",
"else",
":",
"raise",
"PayloadTypeError",
"(",
"type",
"(",
"self",
".",
"payload",
")",
")",
"totalLen",
"=",
"len",
"(",
"varHeader",
")",
"+",
"len",
"(",
"payload",
")",
"if",
"totalLen",
">",
"268435455",
":",
"raise",
"PayloadValueError",
"(",
"totalLen",
")",
"header",
".",
"extend",
"(",
"encodeLength",
"(",
"totalLen",
")",
")",
"header",
".",
"extend",
"(",
"varHeader",
")",
"header",
".",
"extend",
"(",
"payload",
")",
"self",
".",
"encoded",
"=",
"header",
"return",
"str",
"(",
"header",
")",
"if",
"PY2",
"else",
"bytes",
"(",
"header",
")"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
PUBLISH.decode
|
Decode a PUBLISH control packet.
|
mqtt/pdu.py
|
def decode(self, packet):
'''
Decode a PUBLISH control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.dup = (packet[0] & 0x08) == 0x08
self.qos = (packet[0] & 0x06) >> 1
self.retain = (packet[0] & 0x01) == 0x01
self.topic, _ = decodeString(packet_remaining)
topicLen = decode16Int(packet_remaining)
if self.qos:
self.msgId = decode16Int( packet_remaining[topicLen+2:topicLen+4] )
self.payload = packet_remaining[topicLen+4:]
else:
self.msgId = None
self.payload = packet_remaining[topicLen+2:]
|
def decode(self, packet):
'''
Decode a PUBLISH control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.dup = (packet[0] & 0x08) == 0x08
self.qos = (packet[0] & 0x06) >> 1
self.retain = (packet[0] & 0x01) == 0x01
self.topic, _ = decodeString(packet_remaining)
topicLen = decode16Int(packet_remaining)
if self.qos:
self.msgId = decode16Int( packet_remaining[topicLen+2:topicLen+4] )
self.payload = packet_remaining[topicLen+4:]
else:
self.msgId = None
self.payload = packet_remaining[topicLen+2:]
|
[
"Decode",
"a",
"PUBLISH",
"control",
"packet",
"."
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L544-L563
|
[
"def",
"decode",
"(",
"self",
",",
"packet",
")",
":",
"self",
".",
"encoded",
"=",
"packet",
"lenLen",
"=",
"1",
"while",
"packet",
"[",
"lenLen",
"]",
"&",
"0x80",
":",
"lenLen",
"+=",
"1",
"packet_remaining",
"=",
"packet",
"[",
"lenLen",
"+",
"1",
":",
"]",
"self",
".",
"dup",
"=",
"(",
"packet",
"[",
"0",
"]",
"&",
"0x08",
")",
"==",
"0x08",
"self",
".",
"qos",
"=",
"(",
"packet",
"[",
"0",
"]",
"&",
"0x06",
")",
">>",
"1",
"self",
".",
"retain",
"=",
"(",
"packet",
"[",
"0",
"]",
"&",
"0x01",
")",
"==",
"0x01",
"self",
".",
"topic",
",",
"_",
"=",
"decodeString",
"(",
"packet_remaining",
")",
"topicLen",
"=",
"decode16Int",
"(",
"packet_remaining",
")",
"if",
"self",
".",
"qos",
":",
"self",
".",
"msgId",
"=",
"decode16Int",
"(",
"packet_remaining",
"[",
"topicLen",
"+",
"2",
":",
"topicLen",
"+",
"4",
"]",
")",
"self",
".",
"payload",
"=",
"packet_remaining",
"[",
"topicLen",
"+",
"4",
":",
"]",
"else",
":",
"self",
".",
"msgId",
"=",
"None",
"self",
".",
"payload",
"=",
"packet_remaining",
"[",
"topicLen",
"+",
"2",
":",
"]"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
PUBREL.decode
|
Decode a PUBREL control packet.
|
mqtt/pdu.py
|
def decode(self, packet):
'''
Decode a PUBREL control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.msgId = decode16Int(packet_remaining)
self.dup = (packet[0] & 0x08) == 0x08
|
def decode(self, packet):
'''
Decode a PUBREL control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.msgId = decode16Int(packet_remaining)
self.dup = (packet[0] & 0x08) == 0x08
|
[
"Decode",
"a",
"PUBREL",
"control",
"packet",
"."
] |
astrorafael/twisted-mqtt
|
python
|
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L651-L661
|
[
"def",
"decode",
"(",
"self",
",",
"packet",
")",
":",
"self",
".",
"encoded",
"=",
"packet",
"lenLen",
"=",
"1",
"while",
"packet",
"[",
"lenLen",
"]",
"&",
"0x80",
":",
"lenLen",
"+=",
"1",
"packet_remaining",
"=",
"packet",
"[",
"lenLen",
"+",
"1",
":",
"]",
"self",
".",
"msgId",
"=",
"decode16Int",
"(",
"packet_remaining",
")",
"self",
".",
"dup",
"=",
"(",
"packet",
"[",
"0",
"]",
"&",
"0x08",
")",
"==",
"0x08"
] |
5b322f7c2b82a502b1e1b70703ae45f1f668d07d
|
test
|
API.get_url
|
Return url for call method.
:param method (optional): `str` method name.
:returns: `str` URL.
|
vklancer/api.py
|
def get_url(self, method=None, **kwargs):
"""Return url for call method.
:param method (optional): `str` method name.
:returns: `str` URL.
"""
kwargs.setdefault('v', self.__version)
if self.__token is not None:
kwargs.setdefault('access_token', self.__token)
return 'https://api.vk.com/method/{}?{}'.format(
method or self.__method, urlencode(kwargs)
)
|
def get_url(self, method=None, **kwargs):
"""Return url for call method.
:param method (optional): `str` method name.
:returns: `str` URL.
"""
kwargs.setdefault('v', self.__version)
if self.__token is not None:
kwargs.setdefault('access_token', self.__token)
return 'https://api.vk.com/method/{}?{}'.format(
method or self.__method, urlencode(kwargs)
)
|
[
"Return",
"url",
"for",
"call",
"method",
"."
] |
bindlock/vklancer
|
python
|
https://github.com/bindlock/vklancer/blob/10151c3856bc6f46a1f446ae4d605d46aace3669/vklancer/api.py#L26-L39
|
[
"def",
"get_url",
"(",
"self",
",",
"method",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'v'",
",",
"self",
".",
"__version",
")",
"if",
"self",
".",
"__token",
"is",
"not",
"None",
":",
"kwargs",
".",
"setdefault",
"(",
"'access_token'",
",",
"self",
".",
"__token",
")",
"return",
"'https://api.vk.com/method/{}?{}'",
".",
"format",
"(",
"method",
"or",
"self",
".",
"__method",
",",
"urlencode",
"(",
"kwargs",
")",
")"
] |
10151c3856bc6f46a1f446ae4d605d46aace3669
|
test
|
API.request
|
Send request to API.
:param method: `str` method name.
:returns: `dict` response.
|
vklancer/api.py
|
def request(self, method, **kwargs):
"""
Send request to API.
:param method: `str` method name.
:returns: `dict` response.
"""
kwargs.setdefault('v', self.__version)
if self.__token is not None:
kwargs.setdefault('access_token', self.__token)
return requests.get(self.get_url(method, **kwargs)).json()
|
def request(self, method, **kwargs):
"""
Send request to API.
:param method: `str` method name.
:returns: `dict` response.
"""
kwargs.setdefault('v', self.__version)
if self.__token is not None:
kwargs.setdefault('access_token', self.__token)
return requests.get(self.get_url(method, **kwargs)).json()
|
[
"Send",
"request",
"to",
"API",
"."
] |
bindlock/vklancer
|
python
|
https://github.com/bindlock/vklancer/blob/10151c3856bc6f46a1f446ae4d605d46aace3669/vklancer/api.py#L41-L53
|
[
"def",
"request",
"(",
"self",
",",
"method",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'v'",
",",
"self",
".",
"__version",
")",
"if",
"self",
".",
"__token",
"is",
"not",
"None",
":",
"kwargs",
".",
"setdefault",
"(",
"'access_token'",
",",
"self",
".",
"__token",
")",
"return",
"requests",
".",
"get",
"(",
"self",
".",
"get_url",
"(",
"method",
",",
"*",
"*",
"kwargs",
")",
")",
".",
"json",
"(",
")"
] |
10151c3856bc6f46a1f446ae4d605d46aace3669
|
test
|
authentication
|
Authentication on vk.com.
:param login: login on vk.com.
:param password: password on vk.com.
:returns: `requests.Session` session with cookies.
|
vklancer/utils.py
|
def authentication(login, password):
"""
Authentication on vk.com.
:param login: login on vk.com.
:param password: password on vk.com.
:returns: `requests.Session` session with cookies.
"""
session = requests.Session()
response = session.get('https://m.vk.com')
url = re.search(r'action="([^\"]+)"', response.text).group(1)
data = {'email': login, 'pass': password}
response = session.post(url, data=data)
return session
|
def authentication(login, password):
"""
Authentication on vk.com.
:param login: login on vk.com.
:param password: password on vk.com.
:returns: `requests.Session` session with cookies.
"""
session = requests.Session()
response = session.get('https://m.vk.com')
url = re.search(r'action="([^\"]+)"', response.text).group(1)
data = {'email': login, 'pass': password}
response = session.post(url, data=data)
return session
|
[
"Authentication",
"on",
"vk",
".",
"com",
"."
] |
bindlock/vklancer
|
python
|
https://github.com/bindlock/vklancer/blob/10151c3856bc6f46a1f446ae4d605d46aace3669/vklancer/utils.py#L8-L21
|
[
"def",
"authentication",
"(",
"login",
",",
"password",
")",
":",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"response",
"=",
"session",
".",
"get",
"(",
"'https://m.vk.com'",
")",
"url",
"=",
"re",
".",
"search",
"(",
"r'action=\"([^\\\"]+)\"'",
",",
"response",
".",
"text",
")",
".",
"group",
"(",
"1",
")",
"data",
"=",
"{",
"'email'",
":",
"login",
",",
"'pass'",
":",
"password",
"}",
"response",
"=",
"session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"data",
")",
"return",
"session"
] |
10151c3856bc6f46a1f446ae4d605d46aace3669
|
test
|
oauth
|
OAuth on vk.com.
:param login: login on vk.com.
:param password: password on vk.com.
:param app_id: vk.com application id (default: 4729418).
:param scope: allowed actions (default: 2097151 (all)).
:returns: OAuth2 access token or None.
|
vklancer/utils.py
|
def oauth(login, password, app_id=4729418, scope=2097151):
"""
OAuth on vk.com.
:param login: login on vk.com.
:param password: password on vk.com.
:param app_id: vk.com application id (default: 4729418).
:param scope: allowed actions (default: 2097151 (all)).
:returns: OAuth2 access token or None.
"""
session = authentication(login, password)
data = {
'response_type': 'token',
'client_id': app_id,
'scope': scope,
'display': 'mobile',
}
response = session.post('https://oauth.vk.com/authorize', data=data)
if 'access_token' not in response.url:
url = re.search(r'action="([^\"]+)"', response.text).group(1)
response = session.get(url)
try:
return re.search(r'access_token=([^\&]+)', response.url).group(1)
except:
return None
|
def oauth(login, password, app_id=4729418, scope=2097151):
"""
OAuth on vk.com.
:param login: login on vk.com.
:param password: password on vk.com.
:param app_id: vk.com application id (default: 4729418).
:param scope: allowed actions (default: 2097151 (all)).
:returns: OAuth2 access token or None.
"""
session = authentication(login, password)
data = {
'response_type': 'token',
'client_id': app_id,
'scope': scope,
'display': 'mobile',
}
response = session.post('https://oauth.vk.com/authorize', data=data)
if 'access_token' not in response.url:
url = re.search(r'action="([^\"]+)"', response.text).group(1)
response = session.get(url)
try:
return re.search(r'access_token=([^\&]+)', response.url).group(1)
except:
return None
|
[
"OAuth",
"on",
"vk",
".",
"com",
"."
] |
bindlock/vklancer
|
python
|
https://github.com/bindlock/vklancer/blob/10151c3856bc6f46a1f446ae4d605d46aace3669/vklancer/utils.py#L24-L50
|
[
"def",
"oauth",
"(",
"login",
",",
"password",
",",
"app_id",
"=",
"4729418",
",",
"scope",
"=",
"2097151",
")",
":",
"session",
"=",
"authentication",
"(",
"login",
",",
"password",
")",
"data",
"=",
"{",
"'response_type'",
":",
"'token'",
",",
"'client_id'",
":",
"app_id",
",",
"'scope'",
":",
"scope",
",",
"'display'",
":",
"'mobile'",
",",
"}",
"response",
"=",
"session",
".",
"post",
"(",
"'https://oauth.vk.com/authorize'",
",",
"data",
"=",
"data",
")",
"if",
"'access_token'",
"not",
"in",
"response",
".",
"url",
":",
"url",
"=",
"re",
".",
"search",
"(",
"r'action=\"([^\\\"]+)\"'",
",",
"response",
".",
"text",
")",
".",
"group",
"(",
"1",
")",
"response",
"=",
"session",
".",
"get",
"(",
"url",
")",
"try",
":",
"return",
"re",
".",
"search",
"(",
"r'access_token=([^\\&]+)'",
",",
"response",
".",
"url",
")",
".",
"group",
"(",
"1",
")",
"except",
":",
"return",
"None"
] |
10151c3856bc6f46a1f446ae4d605d46aace3669
|
test
|
File.create_from_array
|
create a block from array like objects
The operation is well defined only if array is at most 2d.
Parameters
----------
array : array_like,
array shall have a scalar dtype.
blockname : string
name of the block
Nfile : int or None
number of physical files. if None, 32M items per file
is used.
memorylimit : int
number of bytes to use for the buffering. relevant only if
indexing on array returns a copy (e.g. IO or dask array)
|
bigfile/__init__.py
|
def create_from_array(self, blockname, array, Nfile=None, memorylimit=1024 * 1024 * 256):
""" create a block from array like objects
The operation is well defined only if array is at most 2d.
Parameters
----------
array : array_like,
array shall have a scalar dtype.
blockname : string
name of the block
Nfile : int or None
number of physical files. if None, 32M items per file
is used.
memorylimit : int
number of bytes to use for the buffering. relevant only if
indexing on array returns a copy (e.g. IO or dask array)
"""
size = len(array)
# sane value -- 32 million items per physical file
sizeperfile = 32 * 1024 * 1024
if Nfile is None:
Nfile = (size + sizeperfile - 1) // sizeperfile
dtype = numpy.dtype((array.dtype, array.shape[1:]))
itemsize = dtype.itemsize
# we will do some chunking
# write memorylimit bytes at most (256M bytes)
# round to 1024 items
itemlimit = memorylimit // dtype.itemsize // 1024 * 1024
with self.create(blockname, dtype, size, Nfile) as b:
for i in range(0, len(array), itemlimit):
b.write(i, numpy.array(array[i:i+itemlimit]))
return self.open(blockname)
|
def create_from_array(self, blockname, array, Nfile=None, memorylimit=1024 * 1024 * 256):
""" create a block from array like objects
The operation is well defined only if array is at most 2d.
Parameters
----------
array : array_like,
array shall have a scalar dtype.
blockname : string
name of the block
Nfile : int or None
number of physical files. if None, 32M items per file
is used.
memorylimit : int
number of bytes to use for the buffering. relevant only if
indexing on array returns a copy (e.g. IO or dask array)
"""
size = len(array)
# sane value -- 32 million items per physical file
sizeperfile = 32 * 1024 * 1024
if Nfile is None:
Nfile = (size + sizeperfile - 1) // sizeperfile
dtype = numpy.dtype((array.dtype, array.shape[1:]))
itemsize = dtype.itemsize
# we will do some chunking
# write memorylimit bytes at most (256M bytes)
# round to 1024 items
itemlimit = memorylimit // dtype.itemsize // 1024 * 1024
with self.create(blockname, dtype, size, Nfile) as b:
for i in range(0, len(array), itemlimit):
b.write(i, numpy.array(array[i:i+itemlimit]))
return self.open(blockname)
|
[
"create",
"a",
"block",
"from",
"array",
"like",
"objects",
"The",
"operation",
"is",
"well",
"defined",
"only",
"if",
"array",
"is",
"at",
"most",
"2d",
"."
] |
rainwoodman/bigfile
|
python
|
https://github.com/rainwoodman/bigfile/blob/1a2d05977fc8edebd8ddf9e81fdb97648596266d/bigfile/__init__.py#L96-L135
|
[
"def",
"create_from_array",
"(",
"self",
",",
"blockname",
",",
"array",
",",
"Nfile",
"=",
"None",
",",
"memorylimit",
"=",
"1024",
"*",
"1024",
"*",
"256",
")",
":",
"size",
"=",
"len",
"(",
"array",
")",
"# sane value -- 32 million items per physical file",
"sizeperfile",
"=",
"32",
"*",
"1024",
"*",
"1024",
"if",
"Nfile",
"is",
"None",
":",
"Nfile",
"=",
"(",
"size",
"+",
"sizeperfile",
"-",
"1",
")",
"//",
"sizeperfile",
"dtype",
"=",
"numpy",
".",
"dtype",
"(",
"(",
"array",
".",
"dtype",
",",
"array",
".",
"shape",
"[",
"1",
":",
"]",
")",
")",
"itemsize",
"=",
"dtype",
".",
"itemsize",
"# we will do some chunking",
"# write memorylimit bytes at most (256M bytes)",
"# round to 1024 items",
"itemlimit",
"=",
"memorylimit",
"//",
"dtype",
".",
"itemsize",
"//",
"1024",
"*",
"1024",
"with",
"self",
".",
"create",
"(",
"blockname",
",",
"dtype",
",",
"size",
",",
"Nfile",
")",
"as",
"b",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"array",
")",
",",
"itemlimit",
")",
":",
"b",
".",
"write",
"(",
"i",
",",
"numpy",
".",
"array",
"(",
"array",
"[",
"i",
":",
"i",
"+",
"itemlimit",
"]",
")",
")",
"return",
"self",
".",
"open",
"(",
"blockname",
")"
] |
1a2d05977fc8edebd8ddf9e81fdb97648596266d
|
test
|
FileMPI.refresh
|
Refresh the list of blocks to the disk, collectively
|
bigfile/__init__.py
|
def refresh(self):
""" Refresh the list of blocks to the disk, collectively """
if self.comm.rank == 0:
self._blocks = self.list_blocks()
else:
self._blocks = None
self._blocks = self.comm.bcast(self._blocks)
|
def refresh(self):
""" Refresh the list of blocks to the disk, collectively """
if self.comm.rank == 0:
self._blocks = self.list_blocks()
else:
self._blocks = None
self._blocks = self.comm.bcast(self._blocks)
|
[
"Refresh",
"the",
"list",
"of",
"blocks",
"to",
"the",
"disk",
"collectively"
] |
rainwoodman/bigfile
|
python
|
https://github.com/rainwoodman/bigfile/blob/1a2d05977fc8edebd8ddf9e81fdb97648596266d/bigfile/__init__.py#L199-L205
|
[
"def",
"refresh",
"(",
"self",
")",
":",
"if",
"self",
".",
"comm",
".",
"rank",
"==",
"0",
":",
"self",
".",
"_blocks",
"=",
"self",
".",
"list_blocks",
"(",
")",
"else",
":",
"self",
".",
"_blocks",
"=",
"None",
"self",
".",
"_blocks",
"=",
"self",
".",
"comm",
".",
"bcast",
"(",
"self",
".",
"_blocks",
")"
] |
1a2d05977fc8edebd8ddf9e81fdb97648596266d
|
test
|
FileMPI.create_from_array
|
create a block from array like objects
The operation is well defined only if array is at most 2d.
Parameters
----------
array : array_like,
array shall have a scalar dtype.
blockname : string
name of the block
Nfile : int or None
number of physical files. if None, 32M items per file
is used.
memorylimit : int
number of bytes to use for the buffering. relevant only if
indexing on array returns a copy (e.g. IO or dask array)
|
bigfile/__init__.py
|
def create_from_array(self, blockname, array, Nfile=None, memorylimit=1024 * 1024 * 256):
""" create a block from array like objects
The operation is well defined only if array is at most 2d.
Parameters
----------
array : array_like,
array shall have a scalar dtype.
blockname : string
name of the block
Nfile : int or None
number of physical files. if None, 32M items per file
is used.
memorylimit : int
number of bytes to use for the buffering. relevant only if
indexing on array returns a copy (e.g. IO or dask array)
"""
size = self.comm.allreduce(len(array))
# sane value -- 32 million items per physical file
sizeperfile = 32 * 1024 * 1024
if Nfile is None:
Nfile = (size + sizeperfile - 1) // sizeperfile
offset = sum(self.comm.allgather(len(array))[:self.comm.rank])
dtype = numpy.dtype((array.dtype, array.shape[1:]))
itemsize = dtype.itemsize
# we will do some chunking
# write memorylimit bytes at most (256M bytes)
# round to 1024 items
itemlimit = memorylimit // dtype.itemsize // 1024 * 1024
with self.create(blockname, dtype, size, Nfile) as b:
for i in range(0, len(array), itemlimit):
b.write(offset + i, numpy.array(array[i:i+itemlimit]))
return self.open(blockname)
|
def create_from_array(self, blockname, array, Nfile=None, memorylimit=1024 * 1024 * 256):
""" create a block from array like objects
The operation is well defined only if array is at most 2d.
Parameters
----------
array : array_like,
array shall have a scalar dtype.
blockname : string
name of the block
Nfile : int or None
number of physical files. if None, 32M items per file
is used.
memorylimit : int
number of bytes to use for the buffering. relevant only if
indexing on array returns a copy (e.g. IO or dask array)
"""
size = self.comm.allreduce(len(array))
# sane value -- 32 million items per physical file
sizeperfile = 32 * 1024 * 1024
if Nfile is None:
Nfile = (size + sizeperfile - 1) // sizeperfile
offset = sum(self.comm.allgather(len(array))[:self.comm.rank])
dtype = numpy.dtype((array.dtype, array.shape[1:]))
itemsize = dtype.itemsize
# we will do some chunking
# write memorylimit bytes at most (256M bytes)
# round to 1024 items
itemlimit = memorylimit // dtype.itemsize // 1024 * 1024
with self.create(blockname, dtype, size, Nfile) as b:
for i in range(0, len(array), itemlimit):
b.write(offset + i, numpy.array(array[i:i+itemlimit]))
return self.open(blockname)
|
[
"create",
"a",
"block",
"from",
"array",
"like",
"objects",
"The",
"operation",
"is",
"well",
"defined",
"only",
"if",
"array",
"is",
"at",
"most",
"2d",
"."
] |
rainwoodman/bigfile
|
python
|
https://github.com/rainwoodman/bigfile/blob/1a2d05977fc8edebd8ddf9e81fdb97648596266d/bigfile/__init__.py#L221-L261
|
[
"def",
"create_from_array",
"(",
"self",
",",
"blockname",
",",
"array",
",",
"Nfile",
"=",
"None",
",",
"memorylimit",
"=",
"1024",
"*",
"1024",
"*",
"256",
")",
":",
"size",
"=",
"self",
".",
"comm",
".",
"allreduce",
"(",
"len",
"(",
"array",
")",
")",
"# sane value -- 32 million items per physical file",
"sizeperfile",
"=",
"32",
"*",
"1024",
"*",
"1024",
"if",
"Nfile",
"is",
"None",
":",
"Nfile",
"=",
"(",
"size",
"+",
"sizeperfile",
"-",
"1",
")",
"//",
"sizeperfile",
"offset",
"=",
"sum",
"(",
"self",
".",
"comm",
".",
"allgather",
"(",
"len",
"(",
"array",
")",
")",
"[",
":",
"self",
".",
"comm",
".",
"rank",
"]",
")",
"dtype",
"=",
"numpy",
".",
"dtype",
"(",
"(",
"array",
".",
"dtype",
",",
"array",
".",
"shape",
"[",
"1",
":",
"]",
")",
")",
"itemsize",
"=",
"dtype",
".",
"itemsize",
"# we will do some chunking",
"# write memorylimit bytes at most (256M bytes)",
"# round to 1024 items",
"itemlimit",
"=",
"memorylimit",
"//",
"dtype",
".",
"itemsize",
"//",
"1024",
"*",
"1024",
"with",
"self",
".",
"create",
"(",
"blockname",
",",
"dtype",
",",
"size",
",",
"Nfile",
")",
"as",
"b",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"array",
")",
",",
"itemlimit",
")",
":",
"b",
".",
"write",
"(",
"offset",
"+",
"i",
",",
"numpy",
".",
"array",
"(",
"array",
"[",
"i",
":",
"i",
"+",
"itemlimit",
"]",
")",
")",
"return",
"self",
".",
"open",
"(",
"blockname",
")"
] |
1a2d05977fc8edebd8ddf9e81fdb97648596266d
|
test
|
maybebool
|
If `value` is a string type, attempts to convert it to a boolean
if it looks like it might be one, otherwise returns the value
unchanged. The difference between this and
:func:`pyramid.settings.asbool` is how non-bools are handled: this
returns the original value, whereas `asbool` returns False.
|
pyramid_webassets/__init__.py
|
def maybebool(value):
'''
If `value` is a string type, attempts to convert it to a boolean
if it looks like it might be one, otherwise returns the value
unchanged. The difference between this and
:func:`pyramid.settings.asbool` is how non-bools are handled: this
returns the original value, whereas `asbool` returns False.
'''
if isinstance(value, six.string_types) and value.lower() in booly:
return asbool(value) # pragma: no cover
return value
|
def maybebool(value):
'''
If `value` is a string type, attempts to convert it to a boolean
if it looks like it might be one, otherwise returns the value
unchanged. The difference between this and
:func:`pyramid.settings.asbool` is how non-bools are handled: this
returns the original value, whereas `asbool` returns False.
'''
if isinstance(value, six.string_types) and value.lower() in booly:
return asbool(value) # pragma: no cover
return value
|
[
"If",
"value",
"is",
"a",
"string",
"type",
"attempts",
"to",
"convert",
"it",
"to",
"a",
"boolean",
"if",
"it",
"looks",
"like",
"it",
"might",
"be",
"one",
"otherwise",
"returns",
"the",
"value",
"unchanged",
".",
"The",
"difference",
"between",
"this",
"and",
":",
"func",
":",
"pyramid",
".",
"settings",
".",
"asbool",
"is",
"how",
"non",
"-",
"bools",
"are",
"handled",
":",
"this",
"returns",
"the",
"original",
"value",
"whereas",
"asbool",
"returns",
"False",
"."
] |
sontek/pyramid_webassets
|
python
|
https://github.com/sontek/pyramid_webassets/blob/d81a8f0c55aa49181ced4650fc88d434bbf94e62/pyramid_webassets/__init__.py#L24-L34
|
[
"def",
"maybebool",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
"and",
"value",
".",
"lower",
"(",
")",
"in",
"booly",
":",
"return",
"asbool",
"(",
"value",
")",
"# pragma: no cover",
"return",
"value"
] |
d81a8f0c55aa49181ced4650fc88d434bbf94e62
|
test
|
get_webassets_env_from_settings
|
This function will take all webassets.* parameters, and
call the ``Environment()`` constructor with kwargs passed in.
The only two parameters that are not passed as keywords are:
* base_dir
* base_url
which are passed in positionally.
Read the ``WebAssets`` docs for ``Environment`` for more details.
|
pyramid_webassets/__init__.py
|
def get_webassets_env_from_settings(settings, prefix='webassets'):
"""This function will take all webassets.* parameters, and
call the ``Environment()`` constructor with kwargs passed in.
The only two parameters that are not passed as keywords are:
* base_dir
* base_url
which are passed in positionally.
Read the ``WebAssets`` docs for ``Environment`` for more details.
"""
# Make a dictionary of the webassets.* elements...
kwargs = {} # assets settings
cut_prefix = len(prefix) + 1
for k in settings:
if k.startswith(prefix):
val = settings[k]
if isinstance(val, six.string_types):
if val.lower() in auto_booly:
val = asbool(val)
elif val.lower().startswith('json:') and k[cut_prefix:] != 'manifest':
val = json.loads(val[5:])
kwargs[k[cut_prefix:]] = val
if 'base_dir' not in kwargs:
raise Exception("You need to provide webassets.base_dir in your configuration")
if 'base_url' not in kwargs:
raise Exception("You need to provide webassets.base_url in your configuration")
asset_dir = kwargs.pop('base_dir')
asset_url = kwargs.pop('base_url')
if ':' in asset_dir:
try:
resolved_dir = AssetResolver(None).resolve(asset_dir).abspath()
except ImportError:
pass
else:
# Store the original asset spec to use later
kwargs['asset_base'] = asset_dir
asset_dir = resolved_dir
if not asset_url.startswith('/'):
if six.moves.urllib.parse.urlparse(asset_url).scheme == '':
asset_url = '/' + asset_url
if 'debug' in kwargs:
kwargs['debug'] = maybebool(kwargs['debug'])
if 'cache' in kwargs:
cache = kwargs['cache'] = maybebool(kwargs['cache'])
if cache and isinstance(cache, six.string_types) and not path.isdir(cache):
makedirs(cache)
# 'updater' is just passed in...
if 'auto_build' in kwargs:
kwargs['auto_build'] = maybebool(kwargs['auto_build'])
if 'jst_compiler' in kwargs:
kwargs['JST_COMPILER'] = kwargs.pop('jst_compiler')
if 'jst_namespace' in kwargs:
kwargs['JST_NAMESPACE'] = kwargs.pop('jst_namespace')
if 'manifest' in kwargs:
kwargs['manifest'] = maybebool(kwargs['manifest'])
if 'url_expire' in kwargs:
kwargs['url_expire'] = maybebool(kwargs['url_expire'])
if 'static_view' in kwargs:
kwargs['static_view'] = asbool(kwargs['static_view'])
else:
kwargs['static_view'] = False
if 'cache_max_age' in kwargs:
kwargs['cache_max_age'] = int(kwargs.pop('cache_max_age'))
else:
kwargs['cache_max_age'] = None
if 'load_path' in kwargs:
# force load_path to be an array and split on whitespace
if not isinstance(kwargs['load_path'], list):
kwargs['load_path'] = kwargs['load_path'].split()
paths = kwargs.pop('paths', None)
if 'bundles' in kwargs:
if isinstance(kwargs['bundles'], six.string_types):
kwargs['bundles'] = kwargs['bundles'].split()
bundles = kwargs.pop('bundles', None)
assets_env = Environment(asset_dir, asset_url, **kwargs)
if paths is not None:
for map_path, map_url in json.loads(paths).items():
assets_env.append_path(map_path, map_url)
def yaml_stream(fname, mode):
if path.exists(fname):
return open(fname, mode)
else:
return assets_env.resolver.resolver.resolve(fname).stream()
if isinstance(bundles, list):
fnames = reversed(bundles)
fin = fileinput.input(fnames, openhook=yaml_stream)
with closing(fin):
lines = [text(line).rstrip() for line in fin]
yamlin = six.StringIO('\n'.join(lines))
loader = YAMLLoader(yamlin)
result = loader.load_bundles()
assets_env.register(result)
elif isinstance(bundles, dict):
assets_env.register(bundles)
return assets_env
|
def get_webassets_env_from_settings(settings, prefix='webassets'):
"""This function will take all webassets.* parameters, and
call the ``Environment()`` constructor with kwargs passed in.
The only two parameters that are not passed as keywords are:
* base_dir
* base_url
which are passed in positionally.
Read the ``WebAssets`` docs for ``Environment`` for more details.
"""
# Make a dictionary of the webassets.* elements...
kwargs = {} # assets settings
cut_prefix = len(prefix) + 1
for k in settings:
if k.startswith(prefix):
val = settings[k]
if isinstance(val, six.string_types):
if val.lower() in auto_booly:
val = asbool(val)
elif val.lower().startswith('json:') and k[cut_prefix:] != 'manifest':
val = json.loads(val[5:])
kwargs[k[cut_prefix:]] = val
if 'base_dir' not in kwargs:
raise Exception("You need to provide webassets.base_dir in your configuration")
if 'base_url' not in kwargs:
raise Exception("You need to provide webassets.base_url in your configuration")
asset_dir = kwargs.pop('base_dir')
asset_url = kwargs.pop('base_url')
if ':' in asset_dir:
try:
resolved_dir = AssetResolver(None).resolve(asset_dir).abspath()
except ImportError:
pass
else:
# Store the original asset spec to use later
kwargs['asset_base'] = asset_dir
asset_dir = resolved_dir
if not asset_url.startswith('/'):
if six.moves.urllib.parse.urlparse(asset_url).scheme == '':
asset_url = '/' + asset_url
if 'debug' in kwargs:
kwargs['debug'] = maybebool(kwargs['debug'])
if 'cache' in kwargs:
cache = kwargs['cache'] = maybebool(kwargs['cache'])
if cache and isinstance(cache, six.string_types) and not path.isdir(cache):
makedirs(cache)
# 'updater' is just passed in...
if 'auto_build' in kwargs:
kwargs['auto_build'] = maybebool(kwargs['auto_build'])
if 'jst_compiler' in kwargs:
kwargs['JST_COMPILER'] = kwargs.pop('jst_compiler')
if 'jst_namespace' in kwargs:
kwargs['JST_NAMESPACE'] = kwargs.pop('jst_namespace')
if 'manifest' in kwargs:
kwargs['manifest'] = maybebool(kwargs['manifest'])
if 'url_expire' in kwargs:
kwargs['url_expire'] = maybebool(kwargs['url_expire'])
if 'static_view' in kwargs:
kwargs['static_view'] = asbool(kwargs['static_view'])
else:
kwargs['static_view'] = False
if 'cache_max_age' in kwargs:
kwargs['cache_max_age'] = int(kwargs.pop('cache_max_age'))
else:
kwargs['cache_max_age'] = None
if 'load_path' in kwargs:
# force load_path to be an array and split on whitespace
if not isinstance(kwargs['load_path'], list):
kwargs['load_path'] = kwargs['load_path'].split()
paths = kwargs.pop('paths', None)
if 'bundles' in kwargs:
if isinstance(kwargs['bundles'], six.string_types):
kwargs['bundles'] = kwargs['bundles'].split()
bundles = kwargs.pop('bundles', None)
assets_env = Environment(asset_dir, asset_url, **kwargs)
if paths is not None:
for map_path, map_url in json.loads(paths).items():
assets_env.append_path(map_path, map_url)
def yaml_stream(fname, mode):
if path.exists(fname):
return open(fname, mode)
else:
return assets_env.resolver.resolver.resolve(fname).stream()
if isinstance(bundles, list):
fnames = reversed(bundles)
fin = fileinput.input(fnames, openhook=yaml_stream)
with closing(fin):
lines = [text(line).rstrip() for line in fin]
yamlin = six.StringIO('\n'.join(lines))
loader = YAMLLoader(yamlin)
result = loader.load_bundles()
assets_env.register(result)
elif isinstance(bundles, dict):
assets_env.register(bundles)
return assets_env
|
[
"This",
"function",
"will",
"take",
"all",
"webassets",
".",
"*",
"parameters",
"and",
"call",
"the",
"Environment",
"()",
"constructor",
"with",
"kwargs",
"passed",
"in",
"."
] |
sontek/pyramid_webassets
|
python
|
https://github.com/sontek/pyramid_webassets/blob/d81a8f0c55aa49181ced4650fc88d434bbf94e62/pyramid_webassets/__init__.py#L207-L328
|
[
"def",
"get_webassets_env_from_settings",
"(",
"settings",
",",
"prefix",
"=",
"'webassets'",
")",
":",
"# Make a dictionary of the webassets.* elements...",
"kwargs",
"=",
"{",
"}",
"# assets settings",
"cut_prefix",
"=",
"len",
"(",
"prefix",
")",
"+",
"1",
"for",
"k",
"in",
"settings",
":",
"if",
"k",
".",
"startswith",
"(",
"prefix",
")",
":",
"val",
"=",
"settings",
"[",
"k",
"]",
"if",
"isinstance",
"(",
"val",
",",
"six",
".",
"string_types",
")",
":",
"if",
"val",
".",
"lower",
"(",
")",
"in",
"auto_booly",
":",
"val",
"=",
"asbool",
"(",
"val",
")",
"elif",
"val",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'json:'",
")",
"and",
"k",
"[",
"cut_prefix",
":",
"]",
"!=",
"'manifest'",
":",
"val",
"=",
"json",
".",
"loads",
"(",
"val",
"[",
"5",
":",
"]",
")",
"kwargs",
"[",
"k",
"[",
"cut_prefix",
":",
"]",
"]",
"=",
"val",
"if",
"'base_dir'",
"not",
"in",
"kwargs",
":",
"raise",
"Exception",
"(",
"\"You need to provide webassets.base_dir in your configuration\"",
")",
"if",
"'base_url'",
"not",
"in",
"kwargs",
":",
"raise",
"Exception",
"(",
"\"You need to provide webassets.base_url in your configuration\"",
")",
"asset_dir",
"=",
"kwargs",
".",
"pop",
"(",
"'base_dir'",
")",
"asset_url",
"=",
"kwargs",
".",
"pop",
"(",
"'base_url'",
")",
"if",
"':'",
"in",
"asset_dir",
":",
"try",
":",
"resolved_dir",
"=",
"AssetResolver",
"(",
"None",
")",
".",
"resolve",
"(",
"asset_dir",
")",
".",
"abspath",
"(",
")",
"except",
"ImportError",
":",
"pass",
"else",
":",
"# Store the original asset spec to use later",
"kwargs",
"[",
"'asset_base'",
"]",
"=",
"asset_dir",
"asset_dir",
"=",
"resolved_dir",
"if",
"not",
"asset_url",
".",
"startswith",
"(",
"'/'",
")",
":",
"if",
"six",
".",
"moves",
".",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"asset_url",
")",
".",
"scheme",
"==",
"''",
":",
"asset_url",
"=",
"'/'",
"+",
"asset_url",
"if",
"'debug'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'debug'",
"]",
"=",
"maybebool",
"(",
"kwargs",
"[",
"'debug'",
"]",
")",
"if",
"'cache'",
"in",
"kwargs",
":",
"cache",
"=",
"kwargs",
"[",
"'cache'",
"]",
"=",
"maybebool",
"(",
"kwargs",
"[",
"'cache'",
"]",
")",
"if",
"cache",
"and",
"isinstance",
"(",
"cache",
",",
"six",
".",
"string_types",
")",
"and",
"not",
"path",
".",
"isdir",
"(",
"cache",
")",
":",
"makedirs",
"(",
"cache",
")",
"# 'updater' is just passed in...",
"if",
"'auto_build'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'auto_build'",
"]",
"=",
"maybebool",
"(",
"kwargs",
"[",
"'auto_build'",
"]",
")",
"if",
"'jst_compiler'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'JST_COMPILER'",
"]",
"=",
"kwargs",
".",
"pop",
"(",
"'jst_compiler'",
")",
"if",
"'jst_namespace'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'JST_NAMESPACE'",
"]",
"=",
"kwargs",
".",
"pop",
"(",
"'jst_namespace'",
")",
"if",
"'manifest'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'manifest'",
"]",
"=",
"maybebool",
"(",
"kwargs",
"[",
"'manifest'",
"]",
")",
"if",
"'url_expire'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'url_expire'",
"]",
"=",
"maybebool",
"(",
"kwargs",
"[",
"'url_expire'",
"]",
")",
"if",
"'static_view'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'static_view'",
"]",
"=",
"asbool",
"(",
"kwargs",
"[",
"'static_view'",
"]",
")",
"else",
":",
"kwargs",
"[",
"'static_view'",
"]",
"=",
"False",
"if",
"'cache_max_age'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'cache_max_age'",
"]",
"=",
"int",
"(",
"kwargs",
".",
"pop",
"(",
"'cache_max_age'",
")",
")",
"else",
":",
"kwargs",
"[",
"'cache_max_age'",
"]",
"=",
"None",
"if",
"'load_path'",
"in",
"kwargs",
":",
"# force load_path to be an array and split on whitespace",
"if",
"not",
"isinstance",
"(",
"kwargs",
"[",
"'load_path'",
"]",
",",
"list",
")",
":",
"kwargs",
"[",
"'load_path'",
"]",
"=",
"kwargs",
"[",
"'load_path'",
"]",
".",
"split",
"(",
")",
"paths",
"=",
"kwargs",
".",
"pop",
"(",
"'paths'",
",",
"None",
")",
"if",
"'bundles'",
"in",
"kwargs",
":",
"if",
"isinstance",
"(",
"kwargs",
"[",
"'bundles'",
"]",
",",
"six",
".",
"string_types",
")",
":",
"kwargs",
"[",
"'bundles'",
"]",
"=",
"kwargs",
"[",
"'bundles'",
"]",
".",
"split",
"(",
")",
"bundles",
"=",
"kwargs",
".",
"pop",
"(",
"'bundles'",
",",
"None",
")",
"assets_env",
"=",
"Environment",
"(",
"asset_dir",
",",
"asset_url",
",",
"*",
"*",
"kwargs",
")",
"if",
"paths",
"is",
"not",
"None",
":",
"for",
"map_path",
",",
"map_url",
"in",
"json",
".",
"loads",
"(",
"paths",
")",
".",
"items",
"(",
")",
":",
"assets_env",
".",
"append_path",
"(",
"map_path",
",",
"map_url",
")",
"def",
"yaml_stream",
"(",
"fname",
",",
"mode",
")",
":",
"if",
"path",
".",
"exists",
"(",
"fname",
")",
":",
"return",
"open",
"(",
"fname",
",",
"mode",
")",
"else",
":",
"return",
"assets_env",
".",
"resolver",
".",
"resolver",
".",
"resolve",
"(",
"fname",
")",
".",
"stream",
"(",
")",
"if",
"isinstance",
"(",
"bundles",
",",
"list",
")",
":",
"fnames",
"=",
"reversed",
"(",
"bundles",
")",
"fin",
"=",
"fileinput",
".",
"input",
"(",
"fnames",
",",
"openhook",
"=",
"yaml_stream",
")",
"with",
"closing",
"(",
"fin",
")",
":",
"lines",
"=",
"[",
"text",
"(",
"line",
")",
".",
"rstrip",
"(",
")",
"for",
"line",
"in",
"fin",
"]",
"yamlin",
"=",
"six",
".",
"StringIO",
"(",
"'\\n'",
".",
"join",
"(",
"lines",
")",
")",
"loader",
"=",
"YAMLLoader",
"(",
"yamlin",
")",
"result",
"=",
"loader",
".",
"load_bundles",
"(",
")",
"assets_env",
".",
"register",
"(",
"result",
")",
"elif",
"isinstance",
"(",
"bundles",
",",
"dict",
")",
":",
"assets_env",
".",
"register",
"(",
"bundles",
")",
"return",
"assets_env"
] |
d81a8f0c55aa49181ced4650fc88d434bbf94e62
|
test
|
classifier.format_data
|
Function for converting a dict to an array suitable for sklearn.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
scale : bool
Whether or not to scale the data. Should always be
`True`, unless used by `classifier.fitting_data`
where a scaler hasn't been created yet.
Returns
-------
A data array suitable for use with `sklearn.cluster`.
|
latools/filtering/classifier_obj.py
|
def format_data(self, data, scale=True):
"""
Function for converting a dict to an array suitable for sklearn.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
scale : bool
Whether or not to scale the data. Should always be
`True`, unless used by `classifier.fitting_data`
where a scaler hasn't been created yet.
Returns
-------
A data array suitable for use with `sklearn.cluster`.
"""
if len(self.analytes) == 1:
# if single analyte
d = nominal_values(data[self.analytes[0]])
ds = np.array(list(zip(d, np.zeros(len(d)))))
else:
# package multiple analytes
d = [nominal_values(data[a]) for a in self.analytes]
ds = np.vstack(d).T
# identify all nan values
finite = np.isfinite(ds).sum(1) == ds.shape[1]
# remember which values are sampled
sampled = np.arange(data[self.analytes[0]].size)[finite]
# remove all nan values
ds = ds[finite]
if scale:
ds = self.scaler.transform(ds)
return ds, sampled
|
def format_data(self, data, scale=True):
"""
Function for converting a dict to an array suitable for sklearn.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
scale : bool
Whether or not to scale the data. Should always be
`True`, unless used by `classifier.fitting_data`
where a scaler hasn't been created yet.
Returns
-------
A data array suitable for use with `sklearn.cluster`.
"""
if len(self.analytes) == 1:
# if single analyte
d = nominal_values(data[self.analytes[0]])
ds = np.array(list(zip(d, np.zeros(len(d)))))
else:
# package multiple analytes
d = [nominal_values(data[a]) for a in self.analytes]
ds = np.vstack(d).T
# identify all nan values
finite = np.isfinite(ds).sum(1) == ds.shape[1]
# remember which values are sampled
sampled = np.arange(data[self.analytes[0]].size)[finite]
# remove all nan values
ds = ds[finite]
if scale:
ds = self.scaler.transform(ds)
return ds, sampled
|
[
"Function",
"for",
"converting",
"a",
"dict",
"to",
"an",
"array",
"suitable",
"for",
"sklearn",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/classifier_obj.py#L28-L65
|
[
"def",
"format_data",
"(",
"self",
",",
"data",
",",
"scale",
"=",
"True",
")",
":",
"if",
"len",
"(",
"self",
".",
"analytes",
")",
"==",
"1",
":",
"# if single analyte",
"d",
"=",
"nominal_values",
"(",
"data",
"[",
"self",
".",
"analytes",
"[",
"0",
"]",
"]",
")",
"ds",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"zip",
"(",
"d",
",",
"np",
".",
"zeros",
"(",
"len",
"(",
"d",
")",
")",
")",
")",
")",
"else",
":",
"# package multiple analytes",
"d",
"=",
"[",
"nominal_values",
"(",
"data",
"[",
"a",
"]",
")",
"for",
"a",
"in",
"self",
".",
"analytes",
"]",
"ds",
"=",
"np",
".",
"vstack",
"(",
"d",
")",
".",
"T",
"# identify all nan values",
"finite",
"=",
"np",
".",
"isfinite",
"(",
"ds",
")",
".",
"sum",
"(",
"1",
")",
"==",
"ds",
".",
"shape",
"[",
"1",
"]",
"# remember which values are sampled",
"sampled",
"=",
"np",
".",
"arange",
"(",
"data",
"[",
"self",
".",
"analytes",
"[",
"0",
"]",
"]",
".",
"size",
")",
"[",
"finite",
"]",
"# remove all nan values",
"ds",
"=",
"ds",
"[",
"finite",
"]",
"if",
"scale",
":",
"ds",
"=",
"self",
".",
"scaler",
".",
"transform",
"(",
"ds",
")",
"return",
"ds",
",",
"sampled"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
classifier.fitting_data
|
Function to format data for cluster fitting.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
Returns
-------
A data array for initial cluster fitting.
|
latools/filtering/classifier_obj.py
|
def fitting_data(self, data):
"""
Function to format data for cluster fitting.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
Returns
-------
A data array for initial cluster fitting.
"""
ds_fit, _ = self.format_data(data, scale=False)
# define scaler
self.scaler = preprocessing.StandardScaler().fit(ds_fit)
# scale data and return
return self.scaler.transform(ds_fit)
|
def fitting_data(self, data):
"""
Function to format data for cluster fitting.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
Returns
-------
A data array for initial cluster fitting.
"""
ds_fit, _ = self.format_data(data, scale=False)
# define scaler
self.scaler = preprocessing.StandardScaler().fit(ds_fit)
# scale data and return
return self.scaler.transform(ds_fit)
|
[
"Function",
"to",
"format",
"data",
"for",
"cluster",
"fitting",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/classifier_obj.py#L67-L87
|
[
"def",
"fitting_data",
"(",
"self",
",",
"data",
")",
":",
"ds_fit",
",",
"_",
"=",
"self",
".",
"format_data",
"(",
"data",
",",
"scale",
"=",
"False",
")",
"# define scaler",
"self",
".",
"scaler",
"=",
"preprocessing",
".",
"StandardScaler",
"(",
")",
".",
"fit",
"(",
"ds_fit",
")",
"# scale data and return",
"return",
"self",
".",
"scaler",
".",
"transform",
"(",
"ds_fit",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
classifier.fit_kmeans
|
Fit KMeans clustering algorithm to data.
Parameters
----------
data : array-like
A dataset formatted by `classifier.fitting_data`.
n_clusters : int
The number of clusters in the data.
**kwargs
passed to `sklearn.cluster.KMeans`.
Returns
-------
Fitted `sklearn.cluster.KMeans` object.
|
latools/filtering/classifier_obj.py
|
def fit_kmeans(self, data, n_clusters, **kwargs):
"""
Fit KMeans clustering algorithm to data.
Parameters
----------
data : array-like
A dataset formatted by `classifier.fitting_data`.
n_clusters : int
The number of clusters in the data.
**kwargs
passed to `sklearn.cluster.KMeans`.
Returns
-------
Fitted `sklearn.cluster.KMeans` object.
"""
km = cl.KMeans(n_clusters=n_clusters, **kwargs)
km.fit(data)
return km
|
def fit_kmeans(self, data, n_clusters, **kwargs):
"""
Fit KMeans clustering algorithm to data.
Parameters
----------
data : array-like
A dataset formatted by `classifier.fitting_data`.
n_clusters : int
The number of clusters in the data.
**kwargs
passed to `sklearn.cluster.KMeans`.
Returns
-------
Fitted `sklearn.cluster.KMeans` object.
"""
km = cl.KMeans(n_clusters=n_clusters, **kwargs)
km.fit(data)
return km
|
[
"Fit",
"KMeans",
"clustering",
"algorithm",
"to",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/classifier_obj.py#L89-L108
|
[
"def",
"fit_kmeans",
"(",
"self",
",",
"data",
",",
"n_clusters",
",",
"*",
"*",
"kwargs",
")",
":",
"km",
"=",
"cl",
".",
"KMeans",
"(",
"n_clusters",
"=",
"n_clusters",
",",
"*",
"*",
"kwargs",
")",
"km",
".",
"fit",
"(",
"data",
")",
"return",
"km"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
classifier.fit_meanshift
|
Fit MeanShift clustering algorithm to data.
Parameters
----------
data : array-like
A dataset formatted by `classifier.fitting_data`.
bandwidth : float
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs
passed to `sklearn.cluster.MeanShift`.
Returns
-------
Fitted `sklearn.cluster.MeanShift` object.
|
latools/filtering/classifier_obj.py
|
def fit_meanshift(self, data, bandwidth=None, bin_seeding=False, **kwargs):
"""
Fit MeanShift clustering algorithm to data.
Parameters
----------
data : array-like
A dataset formatted by `classifier.fitting_data`.
bandwidth : float
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs
passed to `sklearn.cluster.MeanShift`.
Returns
-------
Fitted `sklearn.cluster.MeanShift` object.
"""
if bandwidth is None:
bandwidth = cl.estimate_bandwidth(data)
ms = cl.MeanShift(bandwidth=bandwidth, bin_seeding=bin_seeding)
ms.fit(data)
return ms
|
def fit_meanshift(self, data, bandwidth=None, bin_seeding=False, **kwargs):
"""
Fit MeanShift clustering algorithm to data.
Parameters
----------
data : array-like
A dataset formatted by `classifier.fitting_data`.
bandwidth : float
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs
passed to `sklearn.cluster.MeanShift`.
Returns
-------
Fitted `sklearn.cluster.MeanShift` object.
"""
if bandwidth is None:
bandwidth = cl.estimate_bandwidth(data)
ms = cl.MeanShift(bandwidth=bandwidth, bin_seeding=bin_seeding)
ms.fit(data)
return ms
|
[
"Fit",
"MeanShift",
"clustering",
"algorithm",
"to",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/classifier_obj.py#L110-L137
|
[
"def",
"fit_meanshift",
"(",
"self",
",",
"data",
",",
"bandwidth",
"=",
"None",
",",
"bin_seeding",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"bandwidth",
"is",
"None",
":",
"bandwidth",
"=",
"cl",
".",
"estimate_bandwidth",
"(",
"data",
")",
"ms",
"=",
"cl",
".",
"MeanShift",
"(",
"bandwidth",
"=",
"bandwidth",
",",
"bin_seeding",
"=",
"bin_seeding",
")",
"ms",
".",
"fit",
"(",
"data",
")",
"return",
"ms"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
classifier.fit
|
fit classifiers from large dataset.
Parameters
----------
data : dict
A dict of data for clustering. Must contain
items with the same name as analytes used for
clustering.
method : str
A string defining the clustering method used. Can be:
* 'kmeans' : K-Means clustering algorithm
* 'meanshift' : Meanshift algorithm
n_clusters : int
*K-Means only*. The numebr of clusters to identify
bandwidth : float
*Meanshift only.*
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
*Meanshift only.*
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs :
passed to `sklearn.cluster.MeanShift`.
Returns
-------
list
|
latools/filtering/classifier_obj.py
|
def fit(self, data, method='kmeans', **kwargs):
"""
fit classifiers from large dataset.
Parameters
----------
data : dict
A dict of data for clustering. Must contain
items with the same name as analytes used for
clustering.
method : str
A string defining the clustering method used. Can be:
* 'kmeans' : K-Means clustering algorithm
* 'meanshift' : Meanshift algorithm
n_clusters : int
*K-Means only*. The numebr of clusters to identify
bandwidth : float
*Meanshift only.*
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
*Meanshift only.*
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs :
passed to `sklearn.cluster.MeanShift`.
Returns
-------
list
"""
self.method = method
ds_fit = self.fitting_data(data)
mdict = {'kmeans': self.fit_kmeans,
'meanshift': self.fit_meanshift}
clust = mdict[method]
self.classifier = clust(data=ds_fit, **kwargs)
# sort cluster centers by value of first column, to avoid random variation.
c0 = self.classifier.cluster_centers_.T[self.sort_by]
self.classifier.cluster_centers_ = self.classifier.cluster_centers_[np.argsort(c0)]
# recalculate the labels, so it's consistent with cluster centers
self.classifier.labels_ = self.classifier.predict(ds_fit)
self.classifier.ulabels_ = np.unique(self.classifier.labels_)
return
|
def fit(self, data, method='kmeans', **kwargs):
"""
fit classifiers from large dataset.
Parameters
----------
data : dict
A dict of data for clustering. Must contain
items with the same name as analytes used for
clustering.
method : str
A string defining the clustering method used. Can be:
* 'kmeans' : K-Means clustering algorithm
* 'meanshift' : Meanshift algorithm
n_clusters : int
*K-Means only*. The numebr of clusters to identify
bandwidth : float
*Meanshift only.*
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
*Meanshift only.*
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs :
passed to `sklearn.cluster.MeanShift`.
Returns
-------
list
"""
self.method = method
ds_fit = self.fitting_data(data)
mdict = {'kmeans': self.fit_kmeans,
'meanshift': self.fit_meanshift}
clust = mdict[method]
self.classifier = clust(data=ds_fit, **kwargs)
# sort cluster centers by value of first column, to avoid random variation.
c0 = self.classifier.cluster_centers_.T[self.sort_by]
self.classifier.cluster_centers_ = self.classifier.cluster_centers_[np.argsort(c0)]
# recalculate the labels, so it's consistent with cluster centers
self.classifier.labels_ = self.classifier.predict(ds_fit)
self.classifier.ulabels_ = np.unique(self.classifier.labels_)
return
|
[
"fit",
"classifiers",
"from",
"large",
"dataset",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/classifier_obj.py#L139-L190
|
[
"def",
"fit",
"(",
"self",
",",
"data",
",",
"method",
"=",
"'kmeans'",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"method",
"=",
"method",
"ds_fit",
"=",
"self",
".",
"fitting_data",
"(",
"data",
")",
"mdict",
"=",
"{",
"'kmeans'",
":",
"self",
".",
"fit_kmeans",
",",
"'meanshift'",
":",
"self",
".",
"fit_meanshift",
"}",
"clust",
"=",
"mdict",
"[",
"method",
"]",
"self",
".",
"classifier",
"=",
"clust",
"(",
"data",
"=",
"ds_fit",
",",
"*",
"*",
"kwargs",
")",
"# sort cluster centers by value of first column, to avoid random variation.",
"c0",
"=",
"self",
".",
"classifier",
".",
"cluster_centers_",
".",
"T",
"[",
"self",
".",
"sort_by",
"]",
"self",
".",
"classifier",
".",
"cluster_centers_",
"=",
"self",
".",
"classifier",
".",
"cluster_centers_",
"[",
"np",
".",
"argsort",
"(",
"c0",
")",
"]",
"# recalculate the labels, so it's consistent with cluster centers",
"self",
".",
"classifier",
".",
"labels_",
"=",
"self",
".",
"classifier",
".",
"predict",
"(",
"ds_fit",
")",
"self",
".",
"classifier",
".",
"ulabels_",
"=",
"np",
".",
"unique",
"(",
"self",
".",
"classifier",
".",
"labels_",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
classifier.predict
|
Label new data with cluster identities.
Parameters
----------
data : dict
A data dict containing the same analytes used to
fit the classifier.
sort_by : str
The name of an analyte used to sort the resulting
clusters. If None, defaults to the first analyte
used in fitting.
Returns
-------
array of clusters the same length as the data.
|
latools/filtering/classifier_obj.py
|
def predict(self, data):
"""
Label new data with cluster identities.
Parameters
----------
data : dict
A data dict containing the same analytes used to
fit the classifier.
sort_by : str
The name of an analyte used to sort the resulting
clusters. If None, defaults to the first analyte
used in fitting.
Returns
-------
array of clusters the same length as the data.
"""
size = data[self.analytes[0]].size
ds, sampled = self.format_data(data)
# predict clusters
cs = self.classifier.predict(ds)
# map clusters to original index
clusters = self.map_clusters(size, sampled, cs)
return clusters
|
def predict(self, data):
"""
Label new data with cluster identities.
Parameters
----------
data : dict
A data dict containing the same analytes used to
fit the classifier.
sort_by : str
The name of an analyte used to sort the resulting
clusters. If None, defaults to the first analyte
used in fitting.
Returns
-------
array of clusters the same length as the data.
"""
size = data[self.analytes[0]].size
ds, sampled = self.format_data(data)
# predict clusters
cs = self.classifier.predict(ds)
# map clusters to original index
clusters = self.map_clusters(size, sampled, cs)
return clusters
|
[
"Label",
"new",
"data",
"with",
"cluster",
"identities",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/classifier_obj.py#L192-L218
|
[
"def",
"predict",
"(",
"self",
",",
"data",
")",
":",
"size",
"=",
"data",
"[",
"self",
".",
"analytes",
"[",
"0",
"]",
"]",
".",
"size",
"ds",
",",
"sampled",
"=",
"self",
".",
"format_data",
"(",
"data",
")",
"# predict clusters",
"cs",
"=",
"self",
".",
"classifier",
".",
"predict",
"(",
"ds",
")",
"# map clusters to original index",
"clusters",
"=",
"self",
".",
"map_clusters",
"(",
"size",
",",
"sampled",
",",
"cs",
")",
"return",
"clusters"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
classifier.map_clusters
|
Translate cluster identity back to original data size.
Parameters
----------
size : int
size of original dataset
sampled : array-like
integer array describing location of finite values
in original data.
clusters : array-like
integer array of cluster identities
Returns
-------
list of cluster identities the same length as original
data. Where original data are non-finite, returns -2.
|
latools/filtering/classifier_obj.py
|
def map_clusters(self, size, sampled, clusters):
"""
Translate cluster identity back to original data size.
Parameters
----------
size : int
size of original dataset
sampled : array-like
integer array describing location of finite values
in original data.
clusters : array-like
integer array of cluster identities
Returns
-------
list of cluster identities the same length as original
data. Where original data are non-finite, returns -2.
"""
ids = np.zeros(size, dtype=int)
ids[:] = -2
ids[sampled] = clusters
return ids
|
def map_clusters(self, size, sampled, clusters):
"""
Translate cluster identity back to original data size.
Parameters
----------
size : int
size of original dataset
sampled : array-like
integer array describing location of finite values
in original data.
clusters : array-like
integer array of cluster identities
Returns
-------
list of cluster identities the same length as original
data. Where original data are non-finite, returns -2.
"""
ids = np.zeros(size, dtype=int)
ids[:] = -2
ids[sampled] = clusters
return ids
|
[
"Translate",
"cluster",
"identity",
"back",
"to",
"original",
"data",
"size",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/classifier_obj.py#L220-L245
|
[
"def",
"map_clusters",
"(",
"self",
",",
"size",
",",
"sampled",
",",
"clusters",
")",
":",
"ids",
"=",
"np",
".",
"zeros",
"(",
"size",
",",
"dtype",
"=",
"int",
")",
"ids",
"[",
":",
"]",
"=",
"-",
"2",
"ids",
"[",
"sampled",
"]",
"=",
"clusters",
"return",
"ids"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
classifier.sort_clusters
|
Sort clusters by the concentration of a particular analyte.
Parameters
----------
data : dict
A dataset containing sort_by as a key.
cs : array-like
An array of clusters, the same length as values of data.
sort_by : str
analyte to sort the clusters by
Returns
-------
array of clusters, sorted by mean value of sort_by analyte.
|
latools/filtering/classifier_obj.py
|
def sort_clusters(self, data, cs, sort_by):
"""
Sort clusters by the concentration of a particular analyte.
Parameters
----------
data : dict
A dataset containing sort_by as a key.
cs : array-like
An array of clusters, the same length as values of data.
sort_by : str
analyte to sort the clusters by
Returns
-------
array of clusters, sorted by mean value of sort_by analyte.
"""
# label the clusters according to their contents
sdat = data[sort_by]
means = []
nclusts = np.arange(cs.max() + 1)
for c in nclusts:
means.append(np.nanmean(sdat[cs == c]))
# create ranks
means = np.array(means)
rank = np.zeros(means.size)
rank[np.argsort(means)] = np.arange(means.size)
csn = cs.copy()
for c, o in zip(nclusts, rank):
csn[cs == c] = o
return csn
|
def sort_clusters(self, data, cs, sort_by):
"""
Sort clusters by the concentration of a particular analyte.
Parameters
----------
data : dict
A dataset containing sort_by as a key.
cs : array-like
An array of clusters, the same length as values of data.
sort_by : str
analyte to sort the clusters by
Returns
-------
array of clusters, sorted by mean value of sort_by analyte.
"""
# label the clusters according to their contents
sdat = data[sort_by]
means = []
nclusts = np.arange(cs.max() + 1)
for c in nclusts:
means.append(np.nanmean(sdat[cs == c]))
# create ranks
means = np.array(means)
rank = np.zeros(means.size)
rank[np.argsort(means)] = np.arange(means.size)
csn = cs.copy()
for c, o in zip(nclusts, rank):
csn[cs == c] = o
return csn
|
[
"Sort",
"clusters",
"by",
"the",
"concentration",
"of",
"a",
"particular",
"analyte",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/classifier_obj.py#L247-L281
|
[
"def",
"sort_clusters",
"(",
"self",
",",
"data",
",",
"cs",
",",
"sort_by",
")",
":",
"# label the clusters according to their contents",
"sdat",
"=",
"data",
"[",
"sort_by",
"]",
"means",
"=",
"[",
"]",
"nclusts",
"=",
"np",
".",
"arange",
"(",
"cs",
".",
"max",
"(",
")",
"+",
"1",
")",
"for",
"c",
"in",
"nclusts",
":",
"means",
".",
"append",
"(",
"np",
".",
"nanmean",
"(",
"sdat",
"[",
"cs",
"==",
"c",
"]",
")",
")",
"# create ranks",
"means",
"=",
"np",
".",
"array",
"(",
"means",
")",
"rank",
"=",
"np",
".",
"zeros",
"(",
"means",
".",
"size",
")",
"rank",
"[",
"np",
".",
"argsort",
"(",
"means",
")",
"]",
"=",
"np",
".",
"arange",
"(",
"means",
".",
"size",
")",
"csn",
"=",
"cs",
".",
"copy",
"(",
")",
"for",
"c",
",",
"o",
"in",
"zip",
"(",
"nclusts",
",",
"rank",
")",
":",
"csn",
"[",
"cs",
"==",
"c",
"]",
"=",
"o",
"return",
"csn"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
get_date
|
Return a datetime oject from a string, with optional time format.
Parameters
----------
datetime : str
Date-time as string in any sensible format.
time_format : datetime str (optional)
String describing the datetime format. If missing uses
dateutil.parser to guess time format.
|
latools/helpers/helpers.py
|
def get_date(datetime, time_format=None):
"""
Return a datetime oject from a string, with optional time format.
Parameters
----------
datetime : str
Date-time as string in any sensible format.
time_format : datetime str (optional)
String describing the datetime format. If missing uses
dateutil.parser to guess time format.
"""
if time_format is None:
t = du.parser.parse(datetime)
else:
t = dt.datetime.strftime(datetime, time_format)
return t
|
def get_date(datetime, time_format=None):
"""
Return a datetime oject from a string, with optional time format.
Parameters
----------
datetime : str
Date-time as string in any sensible format.
time_format : datetime str (optional)
String describing the datetime format. If missing uses
dateutil.parser to guess time format.
"""
if time_format is None:
t = du.parser.parse(datetime)
else:
t = dt.datetime.strftime(datetime, time_format)
return t
|
[
"Return",
"a",
"datetime",
"oject",
"from",
"a",
"string",
"with",
"optional",
"time",
"format",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L29-L45
|
[
"def",
"get_date",
"(",
"datetime",
",",
"time_format",
"=",
"None",
")",
":",
"if",
"time_format",
"is",
"None",
":",
"t",
"=",
"du",
".",
"parser",
".",
"parse",
"(",
"datetime",
")",
"else",
":",
"t",
"=",
"dt",
".",
"datetime",
".",
"strftime",
"(",
"datetime",
",",
"time_format",
")",
"return",
"t"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
get_total_n_points
|
Returns the total number of data points in values of dict.
Paramters
---------
d : dict
|
latools/helpers/helpers.py
|
def get_total_n_points(d):
"""
Returns the total number of data points in values of dict.
Paramters
---------
d : dict
"""
n = 0
for di in d.values():
n += len(di)
return n
|
def get_total_n_points(d):
"""
Returns the total number of data points in values of dict.
Paramters
---------
d : dict
"""
n = 0
for di in d.values():
n += len(di)
return n
|
[
"Returns",
"the",
"total",
"number",
"of",
"data",
"points",
"in",
"values",
"of",
"dict",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L47-L58
|
[
"def",
"get_total_n_points",
"(",
"d",
")",
":",
"n",
"=",
"0",
"for",
"di",
"in",
"d",
".",
"values",
"(",
")",
":",
"n",
"+=",
"len",
"(",
"di",
")",
"return",
"n"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
get_total_time_span
|
Returns total length of analysis.
|
latools/helpers/helpers.py
|
def get_total_time_span(d):
"""
Returns total length of analysis.
"""
tmax = 0
for di in d.values():
if di.uTime.max() > tmax:
tmax = di.uTime.max()
return tmax
|
def get_total_time_span(d):
"""
Returns total length of analysis.
"""
tmax = 0
for di in d.values():
if di.uTime.max() > tmax:
tmax = di.uTime.max()
return tmax
|
[
"Returns",
"total",
"length",
"of",
"analysis",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L60-L70
|
[
"def",
"get_total_time_span",
"(",
"d",
")",
":",
"tmax",
"=",
"0",
"for",
"di",
"in",
"d",
".",
"values",
"(",
")",
":",
"if",
"di",
".",
"uTime",
".",
"max",
"(",
")",
">",
"tmax",
":",
"tmax",
"=",
"di",
".",
"uTime",
".",
"max",
"(",
")",
"return",
"tmax"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
unitpicker
|
Determines the most appropriate plotting unit for data.
Parameters
----------
a : float or array-like
number to optimise. If array like, the 25% quantile is optimised.
llim : float
minimum allowable value in scaled data.
Returns
-------
(float, str)
(multiplier, unit)
|
latools/helpers/helpers.py
|
def unitpicker(a, llim=0.1, denominator=None, focus_stage=None):
"""
Determines the most appropriate plotting unit for data.
Parameters
----------
a : float or array-like
number to optimise. If array like, the 25% quantile is optimised.
llim : float
minimum allowable value in scaled data.
Returns
-------
(float, str)
(multiplier, unit)
"""
if not isinstance(a, (int, float)):
a = nominal_values(a)
a = np.percentile(a[~np.isnan(a)], 25)
if denominator is not None:
pd = pretty_element(denominator)
else:
pd = ''
if focus_stage == 'calibrated':
udict = {0: 'mol/mol ' + pd,
1: 'mmol/mol ' + pd,
2: '$\mu$mol/mol ' + pd,
3: 'nmol/mol ' + pd,
4: 'pmol/mol ' + pd,
5: 'fmol/mol ' + pd}
elif focus_stage == 'ratios':
udict = {0: 'counts/count ' + pd,
1: '$10^{-3}$ counts/count ' + pd,
2: '$10^{-6}$ counts/count ' + pd,
3: '$10^{-9}$ counts/count ' + pd,
4: '$10^{-12}$ counts/count ' + pd,
5: '$10^{-15}$ counts/count ' + pd}
elif focus_stage in ('rawdata', 'despiked', 'bkgsub'):
udict = udict = {0: 'counts',
1: '$10^{-3}$ counts',
2: '$10^{-6}$ counts',
3: '$10^{-9}$ counts',
4: '$10^{-12}$ counts',
5: '$10^{-15}$ counts'}
else:
udict = {0: '', 1: '', 2: '', 3: '', 4: '', 5: ''}
a = abs(a)
n = 0
if a < llim:
while a < llim:
a *= 1000
n += 1
return float(1000**n), udict[n]
|
def unitpicker(a, llim=0.1, denominator=None, focus_stage=None):
"""
Determines the most appropriate plotting unit for data.
Parameters
----------
a : float or array-like
number to optimise. If array like, the 25% quantile is optimised.
llim : float
minimum allowable value in scaled data.
Returns
-------
(float, str)
(multiplier, unit)
"""
if not isinstance(a, (int, float)):
a = nominal_values(a)
a = np.percentile(a[~np.isnan(a)], 25)
if denominator is not None:
pd = pretty_element(denominator)
else:
pd = ''
if focus_stage == 'calibrated':
udict = {0: 'mol/mol ' + pd,
1: 'mmol/mol ' + pd,
2: '$\mu$mol/mol ' + pd,
3: 'nmol/mol ' + pd,
4: 'pmol/mol ' + pd,
5: 'fmol/mol ' + pd}
elif focus_stage == 'ratios':
udict = {0: 'counts/count ' + pd,
1: '$10^{-3}$ counts/count ' + pd,
2: '$10^{-6}$ counts/count ' + pd,
3: '$10^{-9}$ counts/count ' + pd,
4: '$10^{-12}$ counts/count ' + pd,
5: '$10^{-15}$ counts/count ' + pd}
elif focus_stage in ('rawdata', 'despiked', 'bkgsub'):
udict = udict = {0: 'counts',
1: '$10^{-3}$ counts',
2: '$10^{-6}$ counts',
3: '$10^{-9}$ counts',
4: '$10^{-12}$ counts',
5: '$10^{-15}$ counts'}
else:
udict = {0: '', 1: '', 2: '', 3: '', 4: '', 5: ''}
a = abs(a)
n = 0
if a < llim:
while a < llim:
a *= 1000
n += 1
return float(1000**n), udict[n]
|
[
"Determines",
"the",
"most",
"appropriate",
"plotting",
"unit",
"for",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L72-L128
|
[
"def",
"unitpicker",
"(",
"a",
",",
"llim",
"=",
"0.1",
",",
"denominator",
"=",
"None",
",",
"focus_stage",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"a",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"a",
"=",
"nominal_values",
"(",
"a",
")",
"a",
"=",
"np",
".",
"percentile",
"(",
"a",
"[",
"~",
"np",
".",
"isnan",
"(",
"a",
")",
"]",
",",
"25",
")",
"if",
"denominator",
"is",
"not",
"None",
":",
"pd",
"=",
"pretty_element",
"(",
"denominator",
")",
"else",
":",
"pd",
"=",
"''",
"if",
"focus_stage",
"==",
"'calibrated'",
":",
"udict",
"=",
"{",
"0",
":",
"'mol/mol '",
"+",
"pd",
",",
"1",
":",
"'mmol/mol '",
"+",
"pd",
",",
"2",
":",
"'$\\mu$mol/mol '",
"+",
"pd",
",",
"3",
":",
"'nmol/mol '",
"+",
"pd",
",",
"4",
":",
"'pmol/mol '",
"+",
"pd",
",",
"5",
":",
"'fmol/mol '",
"+",
"pd",
"}",
"elif",
"focus_stage",
"==",
"'ratios'",
":",
"udict",
"=",
"{",
"0",
":",
"'counts/count '",
"+",
"pd",
",",
"1",
":",
"'$10^{-3}$ counts/count '",
"+",
"pd",
",",
"2",
":",
"'$10^{-6}$ counts/count '",
"+",
"pd",
",",
"3",
":",
"'$10^{-9}$ counts/count '",
"+",
"pd",
",",
"4",
":",
"'$10^{-12}$ counts/count '",
"+",
"pd",
",",
"5",
":",
"'$10^{-15}$ counts/count '",
"+",
"pd",
"}",
"elif",
"focus_stage",
"in",
"(",
"'rawdata'",
",",
"'despiked'",
",",
"'bkgsub'",
")",
":",
"udict",
"=",
"udict",
"=",
"{",
"0",
":",
"'counts'",
",",
"1",
":",
"'$10^{-3}$ counts'",
",",
"2",
":",
"'$10^{-6}$ counts'",
",",
"3",
":",
"'$10^{-9}$ counts'",
",",
"4",
":",
"'$10^{-12}$ counts'",
",",
"5",
":",
"'$10^{-15}$ counts'",
"}",
"else",
":",
"udict",
"=",
"{",
"0",
":",
"''",
",",
"1",
":",
"''",
",",
"2",
":",
"''",
",",
"3",
":",
"''",
",",
"4",
":",
"''",
",",
"5",
":",
"''",
"}",
"a",
"=",
"abs",
"(",
"a",
")",
"n",
"=",
"0",
"if",
"a",
"<",
"llim",
":",
"while",
"a",
"<",
"llim",
":",
"a",
"*=",
"1000",
"n",
"+=",
"1",
"return",
"float",
"(",
"1000",
"**",
"n",
")",
",",
"udict",
"[",
"n",
"]"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
pretty_element
|
Returns formatted element name.
Parameters
----------
s : str
of format [A-Z][a-z]?[0-9]+
Returns
-------
str
LaTeX formatted string with superscript numbers.
|
latools/helpers/helpers.py
|
def pretty_element(s):
"""
Returns formatted element name.
Parameters
----------
s : str
of format [A-Z][a-z]?[0-9]+
Returns
-------
str
LaTeX formatted string with superscript numbers.
"""
el = re.match('.*?([A-z]{1,3}).*?', s).groups()[0]
m = re.match('.*?([0-9]{1,3}).*?', s).groups()[0]
return '$^{' + m + '}$' + el
|
def pretty_element(s):
"""
Returns formatted element name.
Parameters
----------
s : str
of format [A-Z][a-z]?[0-9]+
Returns
-------
str
LaTeX formatted string with superscript numbers.
"""
el = re.match('.*?([A-z]{1,3}).*?', s).groups()[0]
m = re.match('.*?([0-9]{1,3}).*?', s).groups()[0]
return '$^{' + m + '}$' + el
|
[
"Returns",
"formatted",
"element",
"name",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L130-L147
|
[
"def",
"pretty_element",
"(",
"s",
")",
":",
"el",
"=",
"re",
".",
"match",
"(",
"'.*?([A-z]{1,3}).*?'",
",",
"s",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"m",
"=",
"re",
".",
"match",
"(",
"'.*?([0-9]{1,3}).*?'",
",",
"s",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"return",
"'$^{'",
"+",
"m",
"+",
"'}$'",
"+",
"el"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyte_2_namemass
|
Converts analytes in format '27Al' to 'Al27'.
Parameters
----------
s : str
of format [A-z]{1,3}[0-9]{1,3}
Returns
-------
str
Name in format [0-9]{1,3}[A-z]{1,3}
|
latools/helpers/helpers.py
|
def analyte_2_namemass(s):
"""
Converts analytes in format '27Al' to 'Al27'.
Parameters
----------
s : str
of format [A-z]{1,3}[0-9]{1,3}
Returns
-------
str
Name in format [0-9]{1,3}[A-z]{1,3}
"""
el = re.match('.*?([A-z]{1,3}).*?', s).groups()[0]
m = re.match('.*?([0-9]{1,3}).*?', s).groups()[0]
return el + m
|
def analyte_2_namemass(s):
"""
Converts analytes in format '27Al' to 'Al27'.
Parameters
----------
s : str
of format [A-z]{1,3}[0-9]{1,3}
Returns
-------
str
Name in format [0-9]{1,3}[A-z]{1,3}
"""
el = re.match('.*?([A-z]{1,3}).*?', s).groups()[0]
m = re.match('.*?([0-9]{1,3}).*?', s).groups()[0]
return el + m
|
[
"Converts",
"analytes",
"in",
"format",
"27Al",
"to",
"Al27",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L149-L166
|
[
"def",
"analyte_2_namemass",
"(",
"s",
")",
":",
"el",
"=",
"re",
".",
"match",
"(",
"'.*?([A-z]{1,3}).*?'",
",",
"s",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"m",
"=",
"re",
".",
"match",
"(",
"'.*?([0-9]{1,3}).*?'",
",",
"s",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"return",
"el",
"+",
"m"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
analyte_2_massname
|
Converts analytes in format 'Al27' to '27Al'.
Parameters
----------
s : str
of format [0-9]{1,3}[A-z]{1,3}
Returns
-------
str
Name in format [A-z]{1,3}[0-9]{1,3}
|
latools/helpers/helpers.py
|
def analyte_2_massname(s):
"""
Converts analytes in format 'Al27' to '27Al'.
Parameters
----------
s : str
of format [0-9]{1,3}[A-z]{1,3}
Returns
-------
str
Name in format [A-z]{1,3}[0-9]{1,3}
"""
el = re.match('.*?([A-z]{1,3}).*?', s).groups()[0]
m = re.match('.*?([0-9]{1,3}).*?', s).groups()[0]
return m + el
|
def analyte_2_massname(s):
"""
Converts analytes in format 'Al27' to '27Al'.
Parameters
----------
s : str
of format [0-9]{1,3}[A-z]{1,3}
Returns
-------
str
Name in format [A-z]{1,3}[0-9]{1,3}
"""
el = re.match('.*?([A-z]{1,3}).*?', s).groups()[0]
m = re.match('.*?([0-9]{1,3}).*?', s).groups()[0]
return m + el
|
[
"Converts",
"analytes",
"in",
"format",
"Al27",
"to",
"27Al",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L168-L185
|
[
"def",
"analyte_2_massname",
"(",
"s",
")",
":",
"el",
"=",
"re",
".",
"match",
"(",
"'.*?([A-z]{1,3}).*?'",
",",
"s",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"m",
"=",
"re",
".",
"match",
"(",
"'.*?([0-9]{1,3}).*?'",
",",
"s",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"return",
"m",
"+",
"el"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
collate_data
|
Copy all csvs in nested directroy to single directory.
Function to copy all csvs from a directory, and place
them in a new directory.
Parameters
----------
in_dir : str
Input directory containing csv files in subfolders
extension : str
The extension that identifies your data files.
Defaults to '.csv'.
out_dir : str
Destination directory
Returns
-------
None
|
latools/helpers/helpers.py
|
def collate_data(in_dir, extension='.csv', out_dir=None):
"""
Copy all csvs in nested directroy to single directory.
Function to copy all csvs from a directory, and place
them in a new directory.
Parameters
----------
in_dir : str
Input directory containing csv files in subfolders
extension : str
The extension that identifies your data files.
Defaults to '.csv'.
out_dir : str
Destination directory
Returns
-------
None
"""
if out_dir is None:
out_dir = './' + re.search('^\.(.*)', extension).groups(0)[0]
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
for p, d, fs in os.walk(in_dir):
for f in fs:
if extension in f:
shutil.copy(p + '/' + f, out_dir + '/' + f)
return
|
def collate_data(in_dir, extension='.csv', out_dir=None):
"""
Copy all csvs in nested directroy to single directory.
Function to copy all csvs from a directory, and place
them in a new directory.
Parameters
----------
in_dir : str
Input directory containing csv files in subfolders
extension : str
The extension that identifies your data files.
Defaults to '.csv'.
out_dir : str
Destination directory
Returns
-------
None
"""
if out_dir is None:
out_dir = './' + re.search('^\.(.*)', extension).groups(0)[0]
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
for p, d, fs in os.walk(in_dir):
for f in fs:
if extension in f:
shutil.copy(p + '/' + f, out_dir + '/' + f)
return
|
[
"Copy",
"all",
"csvs",
"in",
"nested",
"directroy",
"to",
"single",
"directory",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L187-L218
|
[
"def",
"collate_data",
"(",
"in_dir",
",",
"extension",
"=",
"'.csv'",
",",
"out_dir",
"=",
"None",
")",
":",
"if",
"out_dir",
"is",
"None",
":",
"out_dir",
"=",
"'./'",
"+",
"re",
".",
"search",
"(",
"'^\\.(.*)'",
",",
"extension",
")",
".",
"groups",
"(",
"0",
")",
"[",
"0",
"]",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"out_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"out_dir",
")",
"for",
"p",
",",
"d",
",",
"fs",
"in",
"os",
".",
"walk",
"(",
"in_dir",
")",
":",
"for",
"f",
"in",
"fs",
":",
"if",
"extension",
"in",
"f",
":",
"shutil",
".",
"copy",
"(",
"p",
"+",
"'/'",
"+",
"f",
",",
"out_dir",
"+",
"'/'",
"+",
"f",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
bool_2_indices
|
Convert boolean array into a 2D array of (start, stop) pairs.
|
latools/helpers/helpers.py
|
def bool_2_indices(a):
"""
Convert boolean array into a 2D array of (start, stop) pairs.
"""
if any(a):
lims = []
lims.append(np.where(a[:-1] != a[1:])[0])
if a[0]:
lims.append([0])
if a[-1]:
lims.append([len(a) - 1])
lims = np.concatenate(lims)
lims.sort()
return np.reshape(lims, (lims.size // 2, 2))
else:
return None
|
def bool_2_indices(a):
"""
Convert boolean array into a 2D array of (start, stop) pairs.
"""
if any(a):
lims = []
lims.append(np.where(a[:-1] != a[1:])[0])
if a[0]:
lims.append([0])
if a[-1]:
lims.append([len(a) - 1])
lims = np.concatenate(lims)
lims.sort()
return np.reshape(lims, (lims.size // 2, 2))
else:
return None
|
[
"Convert",
"boolean",
"array",
"into",
"a",
"2D",
"array",
"of",
"(",
"start",
"stop",
")",
"pairs",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L220-L237
|
[
"def",
"bool_2_indices",
"(",
"a",
")",
":",
"if",
"any",
"(",
"a",
")",
":",
"lims",
"=",
"[",
"]",
"lims",
".",
"append",
"(",
"np",
".",
"where",
"(",
"a",
"[",
":",
"-",
"1",
"]",
"!=",
"a",
"[",
"1",
":",
"]",
")",
"[",
"0",
"]",
")",
"if",
"a",
"[",
"0",
"]",
":",
"lims",
".",
"append",
"(",
"[",
"0",
"]",
")",
"if",
"a",
"[",
"-",
"1",
"]",
":",
"lims",
".",
"append",
"(",
"[",
"len",
"(",
"a",
")",
"-",
"1",
"]",
")",
"lims",
"=",
"np",
".",
"concatenate",
"(",
"lims",
")",
"lims",
".",
"sort",
"(",
")",
"return",
"np",
".",
"reshape",
"(",
"lims",
",",
"(",
"lims",
".",
"size",
"//",
"2",
",",
"2",
")",
")",
"else",
":",
"return",
"None"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
enumerate_bool
|
Consecutively numbers contiguous booleans in array.
i.e. a boolean sequence, and resulting numbering
T F T T T F T F F F T T F
0-1 1 1 - 2 ---3 3 -
where ' - '
Parameters
----------
bool_array : array_like
Array of booleans.
nstart : int
The number of the first boolean group.
|
latools/helpers/helpers.py
|
def enumerate_bool(bool_array, nstart=0):
"""
Consecutively numbers contiguous booleans in array.
i.e. a boolean sequence, and resulting numbering
T F T T T F T F F F T T F
0-1 1 1 - 2 ---3 3 -
where ' - '
Parameters
----------
bool_array : array_like
Array of booleans.
nstart : int
The number of the first boolean group.
"""
ind = bool_2_indices(bool_array)
ns = np.full(bool_array.size, nstart, dtype=int)
for n, lims in enumerate(ind):
ns[lims[0]:lims[-1] + 1] = nstart + n + 1
return ns
|
def enumerate_bool(bool_array, nstart=0):
"""
Consecutively numbers contiguous booleans in array.
i.e. a boolean sequence, and resulting numbering
T F T T T F T F F F T T F
0-1 1 1 - 2 ---3 3 -
where ' - '
Parameters
----------
bool_array : array_like
Array of booleans.
nstart : int
The number of the first boolean group.
"""
ind = bool_2_indices(bool_array)
ns = np.full(bool_array.size, nstart, dtype=int)
for n, lims in enumerate(ind):
ns[lims[0]:lims[-1] + 1] = nstart + n + 1
return ns
|
[
"Consecutively",
"numbers",
"contiguous",
"booleans",
"in",
"array",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L239-L260
|
[
"def",
"enumerate_bool",
"(",
"bool_array",
",",
"nstart",
"=",
"0",
")",
":",
"ind",
"=",
"bool_2_indices",
"(",
"bool_array",
")",
"ns",
"=",
"np",
".",
"full",
"(",
"bool_array",
".",
"size",
",",
"nstart",
",",
"dtype",
"=",
"int",
")",
"for",
"n",
",",
"lims",
"in",
"enumerate",
"(",
"ind",
")",
":",
"ns",
"[",
"lims",
"[",
"0",
"]",
":",
"lims",
"[",
"-",
"1",
"]",
"+",
"1",
"]",
"=",
"nstart",
"+",
"n",
"+",
"1",
"return",
"ns"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
tuples_2_bool
|
Generate boolean array from list of limit tuples.
Parameters
----------
tuples : array_like
[2, n] array of (start, end) values
x : array_like
x scale the tuples are mapped to
Returns
-------
array_like
boolean array, True where x is between each pair of tuples.
|
latools/helpers/helpers.py
|
def tuples_2_bool(tuples, x):
"""
Generate boolean array from list of limit tuples.
Parameters
----------
tuples : array_like
[2, n] array of (start, end) values
x : array_like
x scale the tuples are mapped to
Returns
-------
array_like
boolean array, True where x is between each pair of tuples.
"""
if np.ndim(tuples) == 1:
tuples = [tuples]
out = np.zeros(x.size, dtype=bool)
for l, u in tuples:
out[(x > l) & (x < u)] = True
return out
|
def tuples_2_bool(tuples, x):
"""
Generate boolean array from list of limit tuples.
Parameters
----------
tuples : array_like
[2, n] array of (start, end) values
x : array_like
x scale the tuples are mapped to
Returns
-------
array_like
boolean array, True where x is between each pair of tuples.
"""
if np.ndim(tuples) == 1:
tuples = [tuples]
out = np.zeros(x.size, dtype=bool)
for l, u in tuples:
out[(x > l) & (x < u)] = True
return out
|
[
"Generate",
"boolean",
"array",
"from",
"list",
"of",
"limit",
"tuples",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L262-L284
|
[
"def",
"tuples_2_bool",
"(",
"tuples",
",",
"x",
")",
":",
"if",
"np",
".",
"ndim",
"(",
"tuples",
")",
"==",
"1",
":",
"tuples",
"=",
"[",
"tuples",
"]",
"out",
"=",
"np",
".",
"zeros",
"(",
"x",
".",
"size",
",",
"dtype",
"=",
"bool",
")",
"for",
"l",
",",
"u",
"in",
"tuples",
":",
"out",
"[",
"(",
"x",
">",
"l",
")",
"&",
"(",
"x",
"<",
"u",
")",
"]",
"=",
"True",
"return",
"out"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
rolling_window
|
Returns (win, len(a)) rolling - window array of data.
Parameters
----------
a : array_like
Array to calculate the rolling window of
window : int
Description of `window`.
pad : same as dtype(a)
Description of `pad`.
Returns
-------
array_like
An array of shape (n, window), where n is either len(a) - window
if pad is None, or len(a) if pad is not None.
|
latools/helpers/helpers.py
|
def rolling_window(a, window, pad=None):
"""
Returns (win, len(a)) rolling - window array of data.
Parameters
----------
a : array_like
Array to calculate the rolling window of
window : int
Description of `window`.
pad : same as dtype(a)
Description of `pad`.
Returns
-------
array_like
An array of shape (n, window), where n is either len(a) - window
if pad is None, or len(a) if pad is not None.
"""
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1], )
out = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
# pad shape
if window % 2 == 0:
npre = window // 2 - 1
npost = window // 2
else:
npre = npost = window // 2
if isinstance(pad, str):
if pad == 'ends':
prepad = np.full((npre, window), a[0])
postpad = np.full((npost, window), a[-1])
elif pad == 'mean_ends':
prepad = np.full((npre, window), np.mean(a[:(window // 2)]))
postpad = np.full((npost, window), np.mean(a[-(window // 2):]))
elif pad == 'repeat_ends':
prepad = np.full((npre, window), out[0])
postpad = np.full((npost, window), out[0])
else:
raise ValueError("If pad is a string, it must be either 'ends', 'mean_ends' or 'repeat_ends'.")
return np.concatenate((prepad, out, postpad))
elif pad is not None:
pre_blankpad = np.empty(((npre, window)))
pre_blankpad[:] = pad
post_blankpad = np.empty(((npost, window)))
post_blankpad[:] = pad
return np.concatenate([pre_blankpad, out, post_blankpad])
else:
return out
|
def rolling_window(a, window, pad=None):
"""
Returns (win, len(a)) rolling - window array of data.
Parameters
----------
a : array_like
Array to calculate the rolling window of
window : int
Description of `window`.
pad : same as dtype(a)
Description of `pad`.
Returns
-------
array_like
An array of shape (n, window), where n is either len(a) - window
if pad is None, or len(a) if pad is not None.
"""
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1], )
out = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
# pad shape
if window % 2 == 0:
npre = window // 2 - 1
npost = window // 2
else:
npre = npost = window // 2
if isinstance(pad, str):
if pad == 'ends':
prepad = np.full((npre, window), a[0])
postpad = np.full((npost, window), a[-1])
elif pad == 'mean_ends':
prepad = np.full((npre, window), np.mean(a[:(window // 2)]))
postpad = np.full((npost, window), np.mean(a[-(window // 2):]))
elif pad == 'repeat_ends':
prepad = np.full((npre, window), out[0])
postpad = np.full((npost, window), out[0])
else:
raise ValueError("If pad is a string, it must be either 'ends', 'mean_ends' or 'repeat_ends'.")
return np.concatenate((prepad, out, postpad))
elif pad is not None:
pre_blankpad = np.empty(((npre, window)))
pre_blankpad[:] = pad
post_blankpad = np.empty(((npost, window)))
post_blankpad[:] = pad
return np.concatenate([pre_blankpad, out, post_blankpad])
else:
return out
|
[
"Returns",
"(",
"win",
"len",
"(",
"a",
"))",
"rolling",
"-",
"window",
"array",
"of",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L328-L377
|
[
"def",
"rolling_window",
"(",
"a",
",",
"window",
",",
"pad",
"=",
"None",
")",
":",
"shape",
"=",
"a",
".",
"shape",
"[",
":",
"-",
"1",
"]",
"+",
"(",
"a",
".",
"shape",
"[",
"-",
"1",
"]",
"-",
"window",
"+",
"1",
",",
"window",
")",
"strides",
"=",
"a",
".",
"strides",
"+",
"(",
"a",
".",
"strides",
"[",
"-",
"1",
"]",
",",
")",
"out",
"=",
"np",
".",
"lib",
".",
"stride_tricks",
".",
"as_strided",
"(",
"a",
",",
"shape",
"=",
"shape",
",",
"strides",
"=",
"strides",
")",
"# pad shape",
"if",
"window",
"%",
"2",
"==",
"0",
":",
"npre",
"=",
"window",
"//",
"2",
"-",
"1",
"npost",
"=",
"window",
"//",
"2",
"else",
":",
"npre",
"=",
"npost",
"=",
"window",
"//",
"2",
"if",
"isinstance",
"(",
"pad",
",",
"str",
")",
":",
"if",
"pad",
"==",
"'ends'",
":",
"prepad",
"=",
"np",
".",
"full",
"(",
"(",
"npre",
",",
"window",
")",
",",
"a",
"[",
"0",
"]",
")",
"postpad",
"=",
"np",
".",
"full",
"(",
"(",
"npost",
",",
"window",
")",
",",
"a",
"[",
"-",
"1",
"]",
")",
"elif",
"pad",
"==",
"'mean_ends'",
":",
"prepad",
"=",
"np",
".",
"full",
"(",
"(",
"npre",
",",
"window",
")",
",",
"np",
".",
"mean",
"(",
"a",
"[",
":",
"(",
"window",
"//",
"2",
")",
"]",
")",
")",
"postpad",
"=",
"np",
".",
"full",
"(",
"(",
"npost",
",",
"window",
")",
",",
"np",
".",
"mean",
"(",
"a",
"[",
"-",
"(",
"window",
"//",
"2",
")",
":",
"]",
")",
")",
"elif",
"pad",
"==",
"'repeat_ends'",
":",
"prepad",
"=",
"np",
".",
"full",
"(",
"(",
"npre",
",",
"window",
")",
",",
"out",
"[",
"0",
"]",
")",
"postpad",
"=",
"np",
".",
"full",
"(",
"(",
"npost",
",",
"window",
")",
",",
"out",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"If pad is a string, it must be either 'ends', 'mean_ends' or 'repeat_ends'.\"",
")",
"return",
"np",
".",
"concatenate",
"(",
"(",
"prepad",
",",
"out",
",",
"postpad",
")",
")",
"elif",
"pad",
"is",
"not",
"None",
":",
"pre_blankpad",
"=",
"np",
".",
"empty",
"(",
"(",
"(",
"npre",
",",
"window",
")",
")",
")",
"pre_blankpad",
"[",
":",
"]",
"=",
"pad",
"post_blankpad",
"=",
"np",
".",
"empty",
"(",
"(",
"(",
"npost",
",",
"window",
")",
")",
")",
"post_blankpad",
"[",
":",
"]",
"=",
"pad",
"return",
"np",
".",
"concatenate",
"(",
"[",
"pre_blankpad",
",",
"out",
",",
"post_blankpad",
"]",
")",
"else",
":",
"return",
"out"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
fastsmooth
|
Returns rolling - window smooth of a.
Function to efficiently calculate the rolling mean of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
|
latools/helpers/helpers.py
|
def fastsmooth(a, win=11):
"""
Returns rolling - window smooth of a.
Function to efficiently calculate the rolling mean of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
"""
# check to see if 'window' is odd (even does not work)
if win % 2 == 0:
win += 1 # add 1 to window if it is even.
kernel = np.ones(win) / win
npad = int((win - 1) / 2)
spad = np.full(npad + 1, np.mean(a[:(npad + 1)]))
epad = np.full(npad - 1, np.mean(a[-(npad - 1):]))
return np.concatenate([spad, np.convolve(a, kernel, 'valid'), epad])
|
def fastsmooth(a, win=11):
"""
Returns rolling - window smooth of a.
Function to efficiently calculate the rolling mean of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
"""
# check to see if 'window' is odd (even does not work)
if win % 2 == 0:
win += 1 # add 1 to window if it is even.
kernel = np.ones(win) / win
npad = int((win - 1) / 2)
spad = np.full(npad + 1, np.mean(a[:(npad + 1)]))
epad = np.full(npad - 1, np.mean(a[-(npad - 1):]))
return np.concatenate([spad, np.convolve(a, kernel, 'valid'), epad])
|
[
"Returns",
"rolling",
"-",
"window",
"smooth",
"of",
"a",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L379-L406
|
[
"def",
"fastsmooth",
"(",
"a",
",",
"win",
"=",
"11",
")",
":",
"# check to see if 'window' is odd (even does not work)",
"if",
"win",
"%",
"2",
"==",
"0",
":",
"win",
"+=",
"1",
"# add 1 to window if it is even.",
"kernel",
"=",
"np",
".",
"ones",
"(",
"win",
")",
"/",
"win",
"npad",
"=",
"int",
"(",
"(",
"win",
"-",
"1",
")",
"/",
"2",
")",
"spad",
"=",
"np",
".",
"full",
"(",
"npad",
"+",
"1",
",",
"np",
".",
"mean",
"(",
"a",
"[",
":",
"(",
"npad",
"+",
"1",
")",
"]",
")",
")",
"epad",
"=",
"np",
".",
"full",
"(",
"npad",
"-",
"1",
",",
"np",
".",
"mean",
"(",
"a",
"[",
"-",
"(",
"npad",
"-",
"1",
")",
":",
"]",
")",
")",
"return",
"np",
".",
"concatenate",
"(",
"[",
"spad",
",",
"np",
".",
"convolve",
"(",
"a",
",",
"kernel",
",",
"'valid'",
")",
",",
"epad",
"]",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
fastgrad
|
Returns rolling - window gradient of a.
Function to efficiently calculate the rolling gradient of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
|
latools/helpers/helpers.py
|
def fastgrad(a, win=11):
"""
Returns rolling - window gradient of a.
Function to efficiently calculate the rolling gradient of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
"""
# check to see if 'window' is odd (even does not work)
if win % 2 == 0:
win += 1 # subtract 1 from window if it is even.
# trick for efficient 'rolling' computation in numpy
# shape = a.shape[:-1] + (a.shape[-1] - win + 1, win)
# strides = a.strides + (a.strides[-1], )
# wins = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
wins = rolling_window(a, win, 'ends')
# apply rolling gradient to data
a = map(lambda x: np.polyfit(np.arange(win), x, 1)[0], wins)
return np.array(list(a))
|
def fastgrad(a, win=11):
"""
Returns rolling - window gradient of a.
Function to efficiently calculate the rolling gradient of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
"""
# check to see if 'window' is odd (even does not work)
if win % 2 == 0:
win += 1 # subtract 1 from window if it is even.
# trick for efficient 'rolling' computation in numpy
# shape = a.shape[:-1] + (a.shape[-1] - win + 1, win)
# strides = a.strides + (a.strides[-1], )
# wins = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
wins = rolling_window(a, win, 'ends')
# apply rolling gradient to data
a = map(lambda x: np.polyfit(np.arange(win), x, 1)[0], wins)
return np.array(list(a))
|
[
"Returns",
"rolling",
"-",
"window",
"gradient",
"of",
"a",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L408-L439
|
[
"def",
"fastgrad",
"(",
"a",
",",
"win",
"=",
"11",
")",
":",
"# check to see if 'window' is odd (even does not work)",
"if",
"win",
"%",
"2",
"==",
"0",
":",
"win",
"+=",
"1",
"# subtract 1 from window if it is even.",
"# trick for efficient 'rolling' computation in numpy",
"# shape = a.shape[:-1] + (a.shape[-1] - win + 1, win)",
"# strides = a.strides + (a.strides[-1], )",
"# wins = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)",
"wins",
"=",
"rolling_window",
"(",
"a",
",",
"win",
",",
"'ends'",
")",
"# apply rolling gradient to data",
"a",
"=",
"map",
"(",
"lambda",
"x",
":",
"np",
".",
"polyfit",
"(",
"np",
".",
"arange",
"(",
"win",
")",
",",
"x",
",",
"1",
")",
"[",
"0",
"]",
",",
"wins",
")",
"return",
"np",
".",
"array",
"(",
"list",
"(",
"a",
")",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
calc_grads
|
Calculate gradients of values in dat.
Parameters
----------
x : array like
Independent variable for items in dat.
dat : dict
{key: dependent_variable} pairs
keys : str or array-like
Which keys in dict to calculate the gradient of.
win : int
The side of the rolling window for gradient calculation
Returns
-------
dict of gradients
|
latools/helpers/helpers.py
|
def calc_grads(x, dat, keys=None, win=5):
"""
Calculate gradients of values in dat.
Parameters
----------
x : array like
Independent variable for items in dat.
dat : dict
{key: dependent_variable} pairs
keys : str or array-like
Which keys in dict to calculate the gradient of.
win : int
The side of the rolling window for gradient calculation
Returns
-------
dict of gradients
"""
if keys is None:
keys = dat.keys()
def grad(xy):
if (~np.isnan(xy)).all():
try:
return np.polyfit(xy[0], xy[1], 1)[0]
except ValueError:
return np.nan
else:
return np.nan
xs = rolling_window(x, win, pad='repeat_ends')
grads = Bunch()
for k in keys:
d = nominal_values(rolling_window(dat[k], win, pad='repeat_ends'))
grads[k] = np.array(list(map(grad, zip(xs, d))))
return grads
|
def calc_grads(x, dat, keys=None, win=5):
"""
Calculate gradients of values in dat.
Parameters
----------
x : array like
Independent variable for items in dat.
dat : dict
{key: dependent_variable} pairs
keys : str or array-like
Which keys in dict to calculate the gradient of.
win : int
The side of the rolling window for gradient calculation
Returns
-------
dict of gradients
"""
if keys is None:
keys = dat.keys()
def grad(xy):
if (~np.isnan(xy)).all():
try:
return np.polyfit(xy[0], xy[1], 1)[0]
except ValueError:
return np.nan
else:
return np.nan
xs = rolling_window(x, win, pad='repeat_ends')
grads = Bunch()
for k in keys:
d = nominal_values(rolling_window(dat[k], win, pad='repeat_ends'))
grads[k] = np.array(list(map(grad, zip(xs, d))))
return grads
|
[
"Calculate",
"gradients",
"of",
"values",
"in",
"dat",
".",
"Parameters",
"----------",
"x",
":",
"array",
"like",
"Independent",
"variable",
"for",
"items",
"in",
"dat",
".",
"dat",
":",
"dict",
"{",
"key",
":",
"dependent_variable",
"}",
"pairs",
"keys",
":",
"str",
"or",
"array",
"-",
"like",
"Which",
"keys",
"in",
"dict",
"to",
"calculate",
"the",
"gradient",
"of",
".",
"win",
":",
"int",
"The",
"side",
"of",
"the",
"rolling",
"window",
"for",
"gradient",
"calculation"
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L441-L479
|
[
"def",
"calc_grads",
"(",
"x",
",",
"dat",
",",
"keys",
"=",
"None",
",",
"win",
"=",
"5",
")",
":",
"if",
"keys",
"is",
"None",
":",
"keys",
"=",
"dat",
".",
"keys",
"(",
")",
"def",
"grad",
"(",
"xy",
")",
":",
"if",
"(",
"~",
"np",
".",
"isnan",
"(",
"xy",
")",
")",
".",
"all",
"(",
")",
":",
"try",
":",
"return",
"np",
".",
"polyfit",
"(",
"xy",
"[",
"0",
"]",
",",
"xy",
"[",
"1",
"]",
",",
"1",
")",
"[",
"0",
"]",
"except",
"ValueError",
":",
"return",
"np",
".",
"nan",
"else",
":",
"return",
"np",
".",
"nan",
"xs",
"=",
"rolling_window",
"(",
"x",
",",
"win",
",",
"pad",
"=",
"'repeat_ends'",
")",
"grads",
"=",
"Bunch",
"(",
")",
"for",
"k",
"in",
"keys",
":",
"d",
"=",
"nominal_values",
"(",
"rolling_window",
"(",
"dat",
"[",
"k",
"]",
",",
"win",
",",
"pad",
"=",
"'repeat_ends'",
")",
")",
"grads",
"[",
"k",
"]",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"grad",
",",
"zip",
"(",
"xs",
",",
"d",
")",
")",
")",
")",
"return",
"grads"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
findmins
|
Function to find local minima.
Parameters
----------
x, y : array_like
1D arrays of the independent (x) and dependent (y) variables.
Returns
-------
array_like
Array of points in x where y has a local minimum.
|
latools/helpers/helpers.py
|
def findmins(x, y):
""" Function to find local minima.
Parameters
----------
x, y : array_like
1D arrays of the independent (x) and dependent (y) variables.
Returns
-------
array_like
Array of points in x where y has a local minimum.
"""
return x[np.r_[False, y[1:] < y[:-1]] & np.r_[y[:-1] < y[1:], False]]
|
def findmins(x, y):
""" Function to find local minima.
Parameters
----------
x, y : array_like
1D arrays of the independent (x) and dependent (y) variables.
Returns
-------
array_like
Array of points in x where y has a local minimum.
"""
return x[np.r_[False, y[1:] < y[:-1]] & np.r_[y[:-1] < y[1:], False]]
|
[
"Function",
"to",
"find",
"local",
"minima",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L481-L494
|
[
"def",
"findmins",
"(",
"x",
",",
"y",
")",
":",
"return",
"x",
"[",
"np",
".",
"r_",
"[",
"False",
",",
"y",
"[",
"1",
":",
"]",
"<",
"y",
"[",
":",
"-",
"1",
"]",
"]",
"&",
"np",
".",
"r_",
"[",
"y",
"[",
":",
"-",
"1",
"]",
"<",
"y",
"[",
"1",
":",
"]",
",",
"False",
"]",
"]"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
stack_keys
|
Combine elements of ddict into an array of shape (len(ddict[key]), len(keys)).
Useful for preparing data for sklearn.
Parameters
----------
ddict : dict
A dict containing arrays or lists to be stacked.
Must be of equal length.
keys : list or str
The keys of dict to stack. Must be present in ddict.
extra : list (optional)
A list of additional arrays to stack. Elements of extra
must be the same length as arrays in ddict.
Extras are inserted as the first columns of output.
|
latools/helpers/helpers.py
|
def stack_keys(ddict, keys, extra=None):
"""
Combine elements of ddict into an array of shape (len(ddict[key]), len(keys)).
Useful for preparing data for sklearn.
Parameters
----------
ddict : dict
A dict containing arrays or lists to be stacked.
Must be of equal length.
keys : list or str
The keys of dict to stack. Must be present in ddict.
extra : list (optional)
A list of additional arrays to stack. Elements of extra
must be the same length as arrays in ddict.
Extras are inserted as the first columns of output.
"""
if isinstance(keys, str):
d = [ddict[keys]]
else:
d = [ddict[k] for k in keys]
if extra is not None:
d = extra + d
return np.vstack(d).T
|
def stack_keys(ddict, keys, extra=None):
"""
Combine elements of ddict into an array of shape (len(ddict[key]), len(keys)).
Useful for preparing data for sklearn.
Parameters
----------
ddict : dict
A dict containing arrays or lists to be stacked.
Must be of equal length.
keys : list or str
The keys of dict to stack. Must be present in ddict.
extra : list (optional)
A list of additional arrays to stack. Elements of extra
must be the same length as arrays in ddict.
Extras are inserted as the first columns of output.
"""
if isinstance(keys, str):
d = [ddict[keys]]
else:
d = [ddict[k] for k in keys]
if extra is not None:
d = extra + d
return np.vstack(d).T
|
[
"Combine",
"elements",
"of",
"ddict",
"into",
"an",
"array",
"of",
"shape",
"(",
"len",
"(",
"ddict",
"[",
"key",
"]",
")",
"len",
"(",
"keys",
"))",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/helpers.py#L496-L520
|
[
"def",
"stack_keys",
"(",
"ddict",
",",
"keys",
",",
"extra",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"keys",
",",
"str",
")",
":",
"d",
"=",
"[",
"ddict",
"[",
"keys",
"]",
"]",
"else",
":",
"d",
"=",
"[",
"ddict",
"[",
"k",
"]",
"for",
"k",
"in",
"keys",
"]",
"if",
"extra",
"is",
"not",
"None",
":",
"d",
"=",
"extra",
"+",
"d",
"return",
"np",
".",
"vstack",
"(",
"d",
")",
".",
"T"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
cluster_meanshift
|
Identify clusters using Meanshift algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
bandwidth : float or None
If None, bandwidth is estimated automatically using
sklean.cluster.estimate_bandwidth
bin_seeding : bool
Setting this option to True will speed up the algorithm.
See sklearn documentation for full description.
Returns
-------
dict
boolean array for each identified cluster.
|
latools/filtering/clustering.py
|
def cluster_meanshift(data, bandwidth=None, bin_seeding=False, **kwargs):
"""
Identify clusters using Meanshift algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
bandwidth : float or None
If None, bandwidth is estimated automatically using
sklean.cluster.estimate_bandwidth
bin_seeding : bool
Setting this option to True will speed up the algorithm.
See sklearn documentation for full description.
Returns
-------
dict
boolean array for each identified cluster.
"""
if bandwidth is None:
bandwidth = cl.estimate_bandwidth(data)
ms = cl.MeanShift(bandwidth=bandwidth, bin_seeding=bin_seeding, **kwargs)
ms.fit(data)
labels = ms.labels_
return labels, [np.nan]
|
def cluster_meanshift(data, bandwidth=None, bin_seeding=False, **kwargs):
"""
Identify clusters using Meanshift algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
bandwidth : float or None
If None, bandwidth is estimated automatically using
sklean.cluster.estimate_bandwidth
bin_seeding : bool
Setting this option to True will speed up the algorithm.
See sklearn documentation for full description.
Returns
-------
dict
boolean array for each identified cluster.
"""
if bandwidth is None:
bandwidth = cl.estimate_bandwidth(data)
ms = cl.MeanShift(bandwidth=bandwidth, bin_seeding=bin_seeding, **kwargs)
ms.fit(data)
labels = ms.labels_
return labels, [np.nan]
|
[
"Identify",
"clusters",
"using",
"Meanshift",
"algorithm",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/clustering.py#L5-L33
|
[
"def",
"cluster_meanshift",
"(",
"data",
",",
"bandwidth",
"=",
"None",
",",
"bin_seeding",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"bandwidth",
"is",
"None",
":",
"bandwidth",
"=",
"cl",
".",
"estimate_bandwidth",
"(",
"data",
")",
"ms",
"=",
"cl",
".",
"MeanShift",
"(",
"bandwidth",
"=",
"bandwidth",
",",
"bin_seeding",
"=",
"bin_seeding",
",",
"*",
"*",
"kwargs",
")",
"ms",
".",
"fit",
"(",
"data",
")",
"labels",
"=",
"ms",
".",
"labels_",
"return",
"labels",
",",
"[",
"np",
".",
"nan",
"]"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
cluster_kmeans
|
Identify clusters using K - Means algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
n_clusters : int
The number of clusters expected in the data.
Returns
-------
dict
boolean array for each identified cluster.
|
latools/filtering/clustering.py
|
def cluster_kmeans(data, n_clusters, **kwargs):
"""
Identify clusters using K - Means algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
n_clusters : int
The number of clusters expected in the data.
Returns
-------
dict
boolean array for each identified cluster.
"""
km = cl.KMeans(n_clusters, **kwargs)
kmf = km.fit(data)
labels = kmf.labels_
return labels, [np.nan]
|
def cluster_kmeans(data, n_clusters, **kwargs):
"""
Identify clusters using K - Means algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
n_clusters : int
The number of clusters expected in the data.
Returns
-------
dict
boolean array for each identified cluster.
"""
km = cl.KMeans(n_clusters, **kwargs)
kmf = km.fit(data)
labels = kmf.labels_
return labels, [np.nan]
|
[
"Identify",
"clusters",
"using",
"K",
"-",
"Means",
"algorithm",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/clustering.py#L35-L56
|
[
"def",
"cluster_kmeans",
"(",
"data",
",",
"n_clusters",
",",
"*",
"*",
"kwargs",
")",
":",
"km",
"=",
"cl",
".",
"KMeans",
"(",
"n_clusters",
",",
"*",
"*",
"kwargs",
")",
"kmf",
"=",
"km",
".",
"fit",
"(",
"data",
")",
"labels",
"=",
"kmf",
".",
"labels_",
"return",
"labels",
",",
"[",
"np",
".",
"nan",
"]"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
cluster_DBSCAN
|
Identify clusters using DBSCAN algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
eps : float
The minimum 'distance' points must be apart for them to be in the
same cluster. Defaults to 0.3. Note: If the data are normalised
(they should be for DBSCAN) this is in terms of total sample
variance. Normalised data have a mean of 0 and a variance of 1.
min_samples : int
The minimum number of samples within distance `eps` required
to be considered as an independent cluster.
n_clusters : int
The number of clusters expected. If specified, `eps` will be
incrementally reduced until the expected number of clusters is
found.
maxiter : int
The maximum number of iterations DBSCAN will run.
Returns
-------
dict
boolean array for each identified cluster and core samples.
|
latools/filtering/clustering.py
|
def cluster_DBSCAN(data, eps=None, min_samples=None,
n_clusters=None, maxiter=200, **kwargs):
"""
Identify clusters using DBSCAN algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
eps : float
The minimum 'distance' points must be apart for them to be in the
same cluster. Defaults to 0.3. Note: If the data are normalised
(they should be for DBSCAN) this is in terms of total sample
variance. Normalised data have a mean of 0 and a variance of 1.
min_samples : int
The minimum number of samples within distance `eps` required
to be considered as an independent cluster.
n_clusters : int
The number of clusters expected. If specified, `eps` will be
incrementally reduced until the expected number of clusters is
found.
maxiter : int
The maximum number of iterations DBSCAN will run.
Returns
-------
dict
boolean array for each identified cluster and core samples.
"""
if n_clusters is None:
if eps is None:
eps = 0.3
db = cl.DBSCAN(eps=eps, min_samples=min_samples, **kwargs).fit(data)
else:
clusters = 0
eps_temp = 1 / .95
niter = 0
while clusters < n_clusters:
clusters_last = clusters
eps_temp *= 0.95
db = cl.DBSCAN(eps=eps_temp, min_samples=min_samples, **kwargs).fit(data)
clusters = (len(set(db.labels_)) -
(1 if -1 in db.labels_ else 0))
if clusters < clusters_last:
eps_temp *= 1 / 0.95
db = cl.DBSCAN(eps=eps_temp, min_samples=min_samples, **kwargs).fit(data)
clusters = (len(set(db.labels_)) -
(1 if -1 in db.labels_ else 0))
warnings.warn(('\n\n***Unable to find {:.0f} clusters in '
'data. Found {:.0f} with an eps of {:.2e}'
'').format(n_clusters, clusters, eps_temp))
break
niter += 1
if niter == maxiter:
warnings.warn(('\n\n***Maximum iterations ({:.0f}) reached'
', {:.0f} clusters not found.\nDeacrease '
'min_samples or n_clusters (or increase '
'maxiter).').format(maxiter, n_clusters))
break
labels = db.labels_
core_samples_mask = np.zeros_like(labels)
core_samples_mask[db.core_sample_indices_] = True
return labels, core_samples_mask
|
def cluster_DBSCAN(data, eps=None, min_samples=None,
n_clusters=None, maxiter=200, **kwargs):
"""
Identify clusters using DBSCAN algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
eps : float
The minimum 'distance' points must be apart for them to be in the
same cluster. Defaults to 0.3. Note: If the data are normalised
(they should be for DBSCAN) this is in terms of total sample
variance. Normalised data have a mean of 0 and a variance of 1.
min_samples : int
The minimum number of samples within distance `eps` required
to be considered as an independent cluster.
n_clusters : int
The number of clusters expected. If specified, `eps` will be
incrementally reduced until the expected number of clusters is
found.
maxiter : int
The maximum number of iterations DBSCAN will run.
Returns
-------
dict
boolean array for each identified cluster and core samples.
"""
if n_clusters is None:
if eps is None:
eps = 0.3
db = cl.DBSCAN(eps=eps, min_samples=min_samples, **kwargs).fit(data)
else:
clusters = 0
eps_temp = 1 / .95
niter = 0
while clusters < n_clusters:
clusters_last = clusters
eps_temp *= 0.95
db = cl.DBSCAN(eps=eps_temp, min_samples=min_samples, **kwargs).fit(data)
clusters = (len(set(db.labels_)) -
(1 if -1 in db.labels_ else 0))
if clusters < clusters_last:
eps_temp *= 1 / 0.95
db = cl.DBSCAN(eps=eps_temp, min_samples=min_samples, **kwargs).fit(data)
clusters = (len(set(db.labels_)) -
(1 if -1 in db.labels_ else 0))
warnings.warn(('\n\n***Unable to find {:.0f} clusters in '
'data. Found {:.0f} with an eps of {:.2e}'
'').format(n_clusters, clusters, eps_temp))
break
niter += 1
if niter == maxiter:
warnings.warn(('\n\n***Maximum iterations ({:.0f}) reached'
', {:.0f} clusters not found.\nDeacrease '
'min_samples or n_clusters (or increase '
'maxiter).').format(maxiter, n_clusters))
break
labels = db.labels_
core_samples_mask = np.zeros_like(labels)
core_samples_mask[db.core_sample_indices_] = True
return labels, core_samples_mask
|
[
"Identify",
"clusters",
"using",
"DBSCAN",
"algorithm",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/clustering.py#L58-L123
|
[
"def",
"cluster_DBSCAN",
"(",
"data",
",",
"eps",
"=",
"None",
",",
"min_samples",
"=",
"None",
",",
"n_clusters",
"=",
"None",
",",
"maxiter",
"=",
"200",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"n_clusters",
"is",
"None",
":",
"if",
"eps",
"is",
"None",
":",
"eps",
"=",
"0.3",
"db",
"=",
"cl",
".",
"DBSCAN",
"(",
"eps",
"=",
"eps",
",",
"min_samples",
"=",
"min_samples",
",",
"*",
"*",
"kwargs",
")",
".",
"fit",
"(",
"data",
")",
"else",
":",
"clusters",
"=",
"0",
"eps_temp",
"=",
"1",
"/",
".95",
"niter",
"=",
"0",
"while",
"clusters",
"<",
"n_clusters",
":",
"clusters_last",
"=",
"clusters",
"eps_temp",
"*=",
"0.95",
"db",
"=",
"cl",
".",
"DBSCAN",
"(",
"eps",
"=",
"eps_temp",
",",
"min_samples",
"=",
"min_samples",
",",
"*",
"*",
"kwargs",
")",
".",
"fit",
"(",
"data",
")",
"clusters",
"=",
"(",
"len",
"(",
"set",
"(",
"db",
".",
"labels_",
")",
")",
"-",
"(",
"1",
"if",
"-",
"1",
"in",
"db",
".",
"labels_",
"else",
"0",
")",
")",
"if",
"clusters",
"<",
"clusters_last",
":",
"eps_temp",
"*=",
"1",
"/",
"0.95",
"db",
"=",
"cl",
".",
"DBSCAN",
"(",
"eps",
"=",
"eps_temp",
",",
"min_samples",
"=",
"min_samples",
",",
"*",
"*",
"kwargs",
")",
".",
"fit",
"(",
"data",
")",
"clusters",
"=",
"(",
"len",
"(",
"set",
"(",
"db",
".",
"labels_",
")",
")",
"-",
"(",
"1",
"if",
"-",
"1",
"in",
"db",
".",
"labels_",
"else",
"0",
")",
")",
"warnings",
".",
"warn",
"(",
"(",
"'\\n\\n***Unable to find {:.0f} clusters in '",
"'data. Found {:.0f} with an eps of {:.2e}'",
"''",
")",
".",
"format",
"(",
"n_clusters",
",",
"clusters",
",",
"eps_temp",
")",
")",
"break",
"niter",
"+=",
"1",
"if",
"niter",
"==",
"maxiter",
":",
"warnings",
".",
"warn",
"(",
"(",
"'\\n\\n***Maximum iterations ({:.0f}) reached'",
"', {:.0f} clusters not found.\\nDeacrease '",
"'min_samples or n_clusters (or increase '",
"'maxiter).'",
")",
".",
"format",
"(",
"maxiter",
",",
"n_clusters",
")",
")",
"break",
"labels",
"=",
"db",
".",
"labels_",
"core_samples_mask",
"=",
"np",
".",
"zeros_like",
"(",
"labels",
")",
"core_samples_mask",
"[",
"db",
".",
"core_sample_indices_",
"]",
"=",
"True",
"return",
"labels",
",",
"core_samples_mask"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
get_defined_srms
|
Returns list of SRMS defined in the SRM database
|
latools/helpers/srm.py
|
def get_defined_srms(srm_file):
"""
Returns list of SRMS defined in the SRM database
"""
srms = read_table(srm_file)
return np.asanyarray(srms.index.unique())
|
def get_defined_srms(srm_file):
"""
Returns list of SRMS defined in the SRM database
"""
srms = read_table(srm_file)
return np.asanyarray(srms.index.unique())
|
[
"Returns",
"list",
"of",
"SRMS",
"defined",
"in",
"the",
"SRM",
"database"
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/srm.py#L22-L27
|
[
"def",
"get_defined_srms",
"(",
"srm_file",
")",
":",
"srms",
"=",
"read_table",
"(",
"srm_file",
")",
"return",
"np",
".",
"asanyarray",
"(",
"srms",
".",
"index",
".",
"unique",
"(",
")",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
read_configuration
|
Read LAtools configuration file, and return parameters as dict.
|
latools/helpers/config.py
|
def read_configuration(config='DEFAULT'):
"""
Read LAtools configuration file, and return parameters as dict.
"""
# read configuration file
_, conf = read_latoolscfg()
# if 'DEFAULT', check which is the default configuration
if config == 'DEFAULT':
config = conf['DEFAULT']['config']
# grab the chosen configuration
conf = dict(conf[config])
# update config name with chosen
conf['config'] = config
return conf
|
def read_configuration(config='DEFAULT'):
"""
Read LAtools configuration file, and return parameters as dict.
"""
# read configuration file
_, conf = read_latoolscfg()
# if 'DEFAULT', check which is the default configuration
if config == 'DEFAULT':
config = conf['DEFAULT']['config']
# grab the chosen configuration
conf = dict(conf[config])
# update config name with chosen
conf['config'] = config
return conf
|
[
"Read",
"LAtools",
"configuration",
"file",
"and",
"return",
"parameters",
"as",
"dict",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/config.py#L13-L27
|
[
"def",
"read_configuration",
"(",
"config",
"=",
"'DEFAULT'",
")",
":",
"# read configuration file",
"_",
",",
"conf",
"=",
"read_latoolscfg",
"(",
")",
"# if 'DEFAULT', check which is the default configuration",
"if",
"config",
"==",
"'DEFAULT'",
":",
"config",
"=",
"conf",
"[",
"'DEFAULT'",
"]",
"[",
"'config'",
"]",
"# grab the chosen configuration",
"conf",
"=",
"dict",
"(",
"conf",
"[",
"config",
"]",
")",
"# update config name with chosen",
"conf",
"[",
"'config'",
"]",
"=",
"config",
"return",
"conf"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
read_latoolscfg
|
Reads configuration, returns a ConfigParser object.
Distinct from read_configuration, which returns a dict.
|
latools/helpers/config.py
|
def read_latoolscfg():
"""
Reads configuration, returns a ConfigParser object.
Distinct from read_configuration, which returns a dict.
"""
config_file = pkgrs.resource_filename('latools', 'latools.cfg')
cf = configparser.ConfigParser()
cf.read(config_file)
return config_file, cf
|
def read_latoolscfg():
"""
Reads configuration, returns a ConfigParser object.
Distinct from read_configuration, which returns a dict.
"""
config_file = pkgrs.resource_filename('latools', 'latools.cfg')
cf = configparser.ConfigParser()
cf.read(config_file)
return config_file, cf
|
[
"Reads",
"configuration",
"returns",
"a",
"ConfigParser",
"object",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/config.py#L30-L39
|
[
"def",
"read_latoolscfg",
"(",
")",
":",
"config_file",
"=",
"pkgrs",
".",
"resource_filename",
"(",
"'latools'",
",",
"'latools.cfg'",
")",
"cf",
"=",
"configparser",
".",
"ConfigParser",
"(",
")",
"cf",
".",
"read",
"(",
"config_file",
")",
"return",
"config_file",
",",
"cf"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
print_all
|
Prints all currently defined configurations.
|
latools/helpers/config.py
|
def print_all():
"""
Prints all currently defined configurations.
"""
# read configuration file
_, conf = read_latoolscfg()
default = conf['DEFAULT']['config']
pstr = '\nCurrently defined LAtools configurations:\n\n'
for s in conf.sections():
if s == default:
pstr += s + ' [DEFAULT]\n'
elif s == 'REPRODUCE':
pstr += s + ' [DO NOT ALTER]\n'
else:
pstr += s + '\n'
for k, v in conf[s].items():
if k != 'config':
if v[:9] == 'resources':
v = pkgrs.resource_filename('latools', v)
pstr += ' ' + k + ': ' + v + '\n'
pstr += '\n'
print(pstr)
return
|
def print_all():
"""
Prints all currently defined configurations.
"""
# read configuration file
_, conf = read_latoolscfg()
default = conf['DEFAULT']['config']
pstr = '\nCurrently defined LAtools configurations:\n\n'
for s in conf.sections():
if s == default:
pstr += s + ' [DEFAULT]\n'
elif s == 'REPRODUCE':
pstr += s + ' [DO NOT ALTER]\n'
else:
pstr += s + '\n'
for k, v in conf[s].items():
if k != 'config':
if v[:9] == 'resources':
v = pkgrs.resource_filename('latools', v)
pstr += ' ' + k + ': ' + v + '\n'
pstr += '\n'
print(pstr)
return
|
[
"Prints",
"all",
"currently",
"defined",
"configurations",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/config.py#L50-L76
|
[
"def",
"print_all",
"(",
")",
":",
"# read configuration file",
"_",
",",
"conf",
"=",
"read_latoolscfg",
"(",
")",
"default",
"=",
"conf",
"[",
"'DEFAULT'",
"]",
"[",
"'config'",
"]",
"pstr",
"=",
"'\\nCurrently defined LAtools configurations:\\n\\n'",
"for",
"s",
"in",
"conf",
".",
"sections",
"(",
")",
":",
"if",
"s",
"==",
"default",
":",
"pstr",
"+=",
"s",
"+",
"' [DEFAULT]\\n'",
"elif",
"s",
"==",
"'REPRODUCE'",
":",
"pstr",
"+=",
"s",
"+",
"' [DO NOT ALTER]\\n'",
"else",
":",
"pstr",
"+=",
"s",
"+",
"'\\n'",
"for",
"k",
",",
"v",
"in",
"conf",
"[",
"s",
"]",
".",
"items",
"(",
")",
":",
"if",
"k",
"!=",
"'config'",
":",
"if",
"v",
"[",
":",
"9",
"]",
"==",
"'resources'",
":",
"v",
"=",
"pkgrs",
".",
"resource_filename",
"(",
"'latools'",
",",
"v",
")",
"pstr",
"+=",
"' '",
"+",
"k",
"+",
"': '",
"+",
"v",
"+",
"'\\n'",
"pstr",
"+=",
"'\\n'",
"print",
"(",
"pstr",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
copy_SRM_file
|
Creates a copy of the default SRM table at the specified location.
Parameters
----------
destination : str
The save location for the SRM file. If no location specified,
saves it as 'LAtools_[config]_SRMTable.csv' in the current working
directory.
config : str
It's possible to set up different configurations with different
SRM files. This specifies the name of the configuration that you
want to copy the SRM file from. If not specified, the 'DEFAULT'
configuration is used.
|
latools/helpers/config.py
|
def copy_SRM_file(destination=None, config='DEFAULT'):
"""
Creates a copy of the default SRM table at the specified location.
Parameters
----------
destination : str
The save location for the SRM file. If no location specified,
saves it as 'LAtools_[config]_SRMTable.csv' in the current working
directory.
config : str
It's possible to set up different configurations with different
SRM files. This specifies the name of the configuration that you
want to copy the SRM file from. If not specified, the 'DEFAULT'
configuration is used.
"""
# find SRM file from configuration
conf = read_configuration()
src = pkgrs.resource_filename('latools', conf['srmfile'])
# work out destination path (if not given)
if destination is None:
destination = './LAtools_' + conf['config'] + '_SRMTable.csv'
if os.path.isdir(destination):
destination += 'LAtools_' + conf['config'] + '_SRMTable.csv'
copyfile(src, destination)
print(src + ' \n copied to:\n ' + destination)
return
|
def copy_SRM_file(destination=None, config='DEFAULT'):
"""
Creates a copy of the default SRM table at the specified location.
Parameters
----------
destination : str
The save location for the SRM file. If no location specified,
saves it as 'LAtools_[config]_SRMTable.csv' in the current working
directory.
config : str
It's possible to set up different configurations with different
SRM files. This specifies the name of the configuration that you
want to copy the SRM file from. If not specified, the 'DEFAULT'
configuration is used.
"""
# find SRM file from configuration
conf = read_configuration()
src = pkgrs.resource_filename('latools', conf['srmfile'])
# work out destination path (if not given)
if destination is None:
destination = './LAtools_' + conf['config'] + '_SRMTable.csv'
if os.path.isdir(destination):
destination += 'LAtools_' + conf['config'] + '_SRMTable.csv'
copyfile(src, destination)
print(src + ' \n copied to:\n ' + destination)
return
|
[
"Creates",
"a",
"copy",
"of",
"the",
"default",
"SRM",
"table",
"at",
"the",
"specified",
"location",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/config.py#L78-L109
|
[
"def",
"copy_SRM_file",
"(",
"destination",
"=",
"None",
",",
"config",
"=",
"'DEFAULT'",
")",
":",
"# find SRM file from configuration ",
"conf",
"=",
"read_configuration",
"(",
")",
"src",
"=",
"pkgrs",
".",
"resource_filename",
"(",
"'latools'",
",",
"conf",
"[",
"'srmfile'",
"]",
")",
"# work out destination path (if not given)",
"if",
"destination",
"is",
"None",
":",
"destination",
"=",
"'./LAtools_'",
"+",
"conf",
"[",
"'config'",
"]",
"+",
"'_SRMTable.csv'",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"destination",
")",
":",
"destination",
"+=",
"'LAtools_'",
"+",
"conf",
"[",
"'config'",
"]",
"+",
"'_SRMTable.csv'",
"copyfile",
"(",
"src",
",",
"destination",
")",
"print",
"(",
"src",
"+",
"' \\n copied to:\\n '",
"+",
"destination",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
create
|
Adds a new configuration to latools.cfg.
Parameters
----------
config_name : str
The name of the new configuration. This should be descriptive
(e.g. UC Davis Foram Group)
srmfile : str (optional)
The location of the srm file used for calibration.
dataformat : str (optional)
The location of the dataformat definition to use.
base_on : str
The name of the existing configuration to base the new one on.
If either srm_file or dataformat are not specified, the new
config will copy this information from the base_on config.
make_default : bool
Whether or not to make the new configuration the default
for future analyses. Default = False.
Returns
-------
None
|
latools/helpers/config.py
|
def create(config_name, srmfile=None, dataformat=None, base_on='DEFAULT', make_default=False):
"""
Adds a new configuration to latools.cfg.
Parameters
----------
config_name : str
The name of the new configuration. This should be descriptive
(e.g. UC Davis Foram Group)
srmfile : str (optional)
The location of the srm file used for calibration.
dataformat : str (optional)
The location of the dataformat definition to use.
base_on : str
The name of the existing configuration to base the new one on.
If either srm_file or dataformat are not specified, the new
config will copy this information from the base_on config.
make_default : bool
Whether or not to make the new configuration the default
for future analyses. Default = False.
Returns
-------
None
"""
base_config = read_configuration(base_on)
# read config file
config_file, cf = read_latoolscfg()
# if config doesn't already exist, create it.
if config_name not in cf.sections():
cf.add_section(config_name)
# set parameter values
if dataformat is None:
dataformat = base_config['dataformat']
cf.set(config_name, 'dataformat', dataformat)
if srmfile is None:
srmfile = base_config['srmfile']
cf.set(config_name, 'srmfile', srmfile)
# make the parameter set default, if requested
if make_default:
cf.set('DEFAULT', 'config', config_name)
with open(config_file, 'w') as f:
cf.write(f)
return
|
def create(config_name, srmfile=None, dataformat=None, base_on='DEFAULT', make_default=False):
"""
Adds a new configuration to latools.cfg.
Parameters
----------
config_name : str
The name of the new configuration. This should be descriptive
(e.g. UC Davis Foram Group)
srmfile : str (optional)
The location of the srm file used for calibration.
dataformat : str (optional)
The location of the dataformat definition to use.
base_on : str
The name of the existing configuration to base the new one on.
If either srm_file or dataformat are not specified, the new
config will copy this information from the base_on config.
make_default : bool
Whether or not to make the new configuration the default
for future analyses. Default = False.
Returns
-------
None
"""
base_config = read_configuration(base_on)
# read config file
config_file, cf = read_latoolscfg()
# if config doesn't already exist, create it.
if config_name not in cf.sections():
cf.add_section(config_name)
# set parameter values
if dataformat is None:
dataformat = base_config['dataformat']
cf.set(config_name, 'dataformat', dataformat)
if srmfile is None:
srmfile = base_config['srmfile']
cf.set(config_name, 'srmfile', srmfile)
# make the parameter set default, if requested
if make_default:
cf.set('DEFAULT', 'config', config_name)
with open(config_file, 'w') as f:
cf.write(f)
return
|
[
"Adds",
"a",
"new",
"configuration",
"to",
"latools",
".",
"cfg",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/config.py#L111-L161
|
[
"def",
"create",
"(",
"config_name",
",",
"srmfile",
"=",
"None",
",",
"dataformat",
"=",
"None",
",",
"base_on",
"=",
"'DEFAULT'",
",",
"make_default",
"=",
"False",
")",
":",
"base_config",
"=",
"read_configuration",
"(",
"base_on",
")",
"# read config file",
"config_file",
",",
"cf",
"=",
"read_latoolscfg",
"(",
")",
"# if config doesn't already exist, create it.",
"if",
"config_name",
"not",
"in",
"cf",
".",
"sections",
"(",
")",
":",
"cf",
".",
"add_section",
"(",
"config_name",
")",
"# set parameter values",
"if",
"dataformat",
"is",
"None",
":",
"dataformat",
"=",
"base_config",
"[",
"'dataformat'",
"]",
"cf",
".",
"set",
"(",
"config_name",
",",
"'dataformat'",
",",
"dataformat",
")",
"if",
"srmfile",
"is",
"None",
":",
"srmfile",
"=",
"base_config",
"[",
"'srmfile'",
"]",
"cf",
".",
"set",
"(",
"config_name",
",",
"'srmfile'",
",",
"srmfile",
")",
"# make the parameter set default, if requested",
"if",
"make_default",
":",
"cf",
".",
"set",
"(",
"'DEFAULT'",
",",
"'config'",
",",
"config_name",
")",
"with",
"open",
"(",
"config_file",
",",
"'w'",
")",
"as",
"f",
":",
"cf",
".",
"write",
"(",
"f",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
change_default
|
Change the default configuration.
|
latools/helpers/config.py
|
def change_default(config):
"""
Change the default configuration.
"""
config_file, cf = read_latoolscfg()
if config not in cf.sections():
raise ValueError("\n'{:s}' is not a defined configuration.".format(config))
if config == 'REPRODUCE':
pstr = ('Are you SURE you want to set REPRODUCE as your default configuration?\n' +
' ... this is an odd thing to be doing.')
else:
pstr = ('Are you sure you want to change the default configuration from {:s}'.format(cf['DEFAULT']['config']) +
'to {:s}?'.format(config))
response = input(pstr + '\n> [N/y]: ')
if response.lower() == 'y':
cf.set('DEFAULT', 'config', config)
with open(config_file, 'w') as f:
cf.write(f)
print(' Default changed!')
else:
print(' Done nothing.')
|
def change_default(config):
"""
Change the default configuration.
"""
config_file, cf = read_latoolscfg()
if config not in cf.sections():
raise ValueError("\n'{:s}' is not a defined configuration.".format(config))
if config == 'REPRODUCE':
pstr = ('Are you SURE you want to set REPRODUCE as your default configuration?\n' +
' ... this is an odd thing to be doing.')
else:
pstr = ('Are you sure you want to change the default configuration from {:s}'.format(cf['DEFAULT']['config']) +
'to {:s}?'.format(config))
response = input(pstr + '\n> [N/y]: ')
if response.lower() == 'y':
cf.set('DEFAULT', 'config', config)
with open(config_file, 'w') as f:
cf.write(f)
print(' Default changed!')
else:
print(' Done nothing.')
|
[
"Change",
"the",
"default",
"configuration",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/config.py#L209-L233
|
[
"def",
"change_default",
"(",
"config",
")",
":",
"config_file",
",",
"cf",
"=",
"read_latoolscfg",
"(",
")",
"if",
"config",
"not",
"in",
"cf",
".",
"sections",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"\\n'{:s}' is not a defined configuration.\"",
".",
"format",
"(",
"config",
")",
")",
"if",
"config",
"==",
"'REPRODUCE'",
":",
"pstr",
"=",
"(",
"'Are you SURE you want to set REPRODUCE as your default configuration?\\n'",
"+",
"' ... this is an odd thing to be doing.'",
")",
"else",
":",
"pstr",
"=",
"(",
"'Are you sure you want to change the default configuration from {:s}'",
".",
"format",
"(",
"cf",
"[",
"'DEFAULT'",
"]",
"[",
"'config'",
"]",
")",
"+",
"'to {:s}?'",
".",
"format",
"(",
"config",
")",
")",
"response",
"=",
"input",
"(",
"pstr",
"+",
"'\\n> [N/y]: '",
")",
"if",
"response",
".",
"lower",
"(",
")",
"==",
"'y'",
":",
"cf",
".",
"set",
"(",
"'DEFAULT'",
",",
"'config'",
",",
"config",
")",
"with",
"open",
"(",
"config_file",
",",
"'w'",
")",
"as",
"f",
":",
"cf",
".",
"write",
"(",
"f",
")",
"print",
"(",
"' Default changed!'",
")",
"else",
":",
"print",
"(",
"' Done nothing.'",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
threshold
|
Return boolean arrays where a >= and < threshold.
Parameters
----------
values : array-like
Array of real values.
threshold : float
Threshold value
Returns
-------
(below, above) : tuple or boolean arrays
|
latools/filtering/filters.py
|
def threshold(values, threshold):
"""
Return boolean arrays where a >= and < threshold.
Parameters
----------
values : array-like
Array of real values.
threshold : float
Threshold value
Returns
-------
(below, above) : tuple or boolean arrays
"""
values = nominal_values(values)
return (values < threshold, values >= threshold)
|
def threshold(values, threshold):
"""
Return boolean arrays where a >= and < threshold.
Parameters
----------
values : array-like
Array of real values.
threshold : float
Threshold value
Returns
-------
(below, above) : tuple or boolean arrays
"""
values = nominal_values(values)
return (values < threshold, values >= threshold)
|
[
"Return",
"boolean",
"arrays",
"where",
"a",
">",
"=",
"and",
"<",
"threshold",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/filters.py#L7-L23
|
[
"def",
"threshold",
"(",
"values",
",",
"threshold",
")",
":",
"values",
"=",
"nominal_values",
"(",
"values",
")",
"return",
"(",
"values",
"<",
"threshold",
",",
"values",
">=",
"threshold",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
exclude_downhole
|
Exclude all data after the first excluded portion.
This makes sense for spot measurements where, because
of the signal mixing inherent in LA-ICPMS, once a
contaminant is ablated, it will always be present to
some degree in signals from further down the ablation
pit.
Parameters
----------
filt : boolean array
threshold : int
Returns
-------
filter : boolean array
|
latools/filtering/filters.py
|
def exclude_downhole(filt, threshold=2):
"""
Exclude all data after the first excluded portion.
This makes sense for spot measurements where, because
of the signal mixing inherent in LA-ICPMS, once a
contaminant is ablated, it will always be present to
some degree in signals from further down the ablation
pit.
Parameters
----------
filt : boolean array
threshold : int
Returns
-------
filter : boolean array
"""
cfilt = filt.copy()
inds = bool_2_indices(~filt)
rem = (np.diff(inds) >= threshold)[:, 0]
if any(rem):
if inds[rem].shape[0] > 1:
limit = inds[rem][1, 0]
cfilt[limit:] = False
return cfilt
|
def exclude_downhole(filt, threshold=2):
"""
Exclude all data after the first excluded portion.
This makes sense for spot measurements where, because
of the signal mixing inherent in LA-ICPMS, once a
contaminant is ablated, it will always be present to
some degree in signals from further down the ablation
pit.
Parameters
----------
filt : boolean array
threshold : int
Returns
-------
filter : boolean array
"""
cfilt = filt.copy()
inds = bool_2_indices(~filt)
rem = (np.diff(inds) >= threshold)[:, 0]
if any(rem):
if inds[rem].shape[0] > 1:
limit = inds[rem][1, 0]
cfilt[limit:] = False
return cfilt
|
[
"Exclude",
"all",
"data",
"after",
"the",
"first",
"excluded",
"portion",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/filters.py#L26-L56
|
[
"def",
"exclude_downhole",
"(",
"filt",
",",
"threshold",
"=",
"2",
")",
":",
"cfilt",
"=",
"filt",
".",
"copy",
"(",
")",
"inds",
"=",
"bool_2_indices",
"(",
"~",
"filt",
")",
"rem",
"=",
"(",
"np",
".",
"diff",
"(",
"inds",
")",
">=",
"threshold",
")",
"[",
":",
",",
"0",
"]",
"if",
"any",
"(",
"rem",
")",
":",
"if",
"inds",
"[",
"rem",
"]",
".",
"shape",
"[",
"0",
"]",
">",
"1",
":",
"limit",
"=",
"inds",
"[",
"rem",
"]",
"[",
"1",
",",
"0",
"]",
"cfilt",
"[",
"limit",
":",
"]",
"=",
"False",
"return",
"cfilt"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
defrag
|
'Defragment' a filter.
Parameters
----------
filt : boolean array
A filter
threshold : int
Consecutive values equal to or below this threshold
length are considered fragments, and will be removed.
mode : str
Wheter to change False fragments to True ('include')
or True fragments to False ('exclude')
Returns
-------
defragmented filter : boolean array
|
latools/filtering/filters.py
|
def defrag(filt, threshold=3, mode='include'):
"""
'Defragment' a filter.
Parameters
----------
filt : boolean array
A filter
threshold : int
Consecutive values equal to or below this threshold
length are considered fragments, and will be removed.
mode : str
Wheter to change False fragments to True ('include')
or True fragments to False ('exclude')
Returns
-------
defragmented filter : boolean array
"""
if bool_2_indices(filt) is None:
return filt
if mode == 'include':
inds = bool_2_indices(~filt) + 1
rep = True
if mode == 'exclude':
inds = bool_2_indices(filt) + 1
rep = False
rem = (np.diff(inds) <= threshold)[:, 0]
cfilt = filt.copy()
if any(rem):
for lo, hi in inds[rem]:
cfilt[lo:hi] = rep
return cfilt
|
def defrag(filt, threshold=3, mode='include'):
"""
'Defragment' a filter.
Parameters
----------
filt : boolean array
A filter
threshold : int
Consecutive values equal to or below this threshold
length are considered fragments, and will be removed.
mode : str
Wheter to change False fragments to True ('include')
or True fragments to False ('exclude')
Returns
-------
defragmented filter : boolean array
"""
if bool_2_indices(filt) is None:
return filt
if mode == 'include':
inds = bool_2_indices(~filt) + 1
rep = True
if mode == 'exclude':
inds = bool_2_indices(filt) + 1
rep = False
rem = (np.diff(inds) <= threshold)[:, 0]
cfilt = filt.copy()
if any(rem):
for lo, hi in inds[rem]:
cfilt[lo:hi] = rep
return cfilt
|
[
"Defragment",
"a",
"filter",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/filters.py#L58-L94
|
[
"def",
"defrag",
"(",
"filt",
",",
"threshold",
"=",
"3",
",",
"mode",
"=",
"'include'",
")",
":",
"if",
"bool_2_indices",
"(",
"filt",
")",
"is",
"None",
":",
"return",
"filt",
"if",
"mode",
"==",
"'include'",
":",
"inds",
"=",
"bool_2_indices",
"(",
"~",
"filt",
")",
"+",
"1",
"rep",
"=",
"True",
"if",
"mode",
"==",
"'exclude'",
":",
"inds",
"=",
"bool_2_indices",
"(",
"filt",
")",
"+",
"1",
"rep",
"=",
"False",
"rem",
"=",
"(",
"np",
".",
"diff",
"(",
"inds",
")",
"<=",
"threshold",
")",
"[",
":",
",",
"0",
"]",
"cfilt",
"=",
"filt",
".",
"copy",
"(",
")",
"if",
"any",
"(",
"rem",
")",
":",
"for",
"lo",
",",
"hi",
"in",
"inds",
"[",
"rem",
"]",
":",
"cfilt",
"[",
"lo",
":",
"hi",
"]",
"=",
"rep",
"return",
"cfilt"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
trim
|
Remove points from the start and end of True regions.
Parameters
----------
start, end : int
The number of points to remove from the start and end of
the specified filter.
ind : boolean array
Which filter to trim. If True, applies to currently active
filters.
|
latools/filtering/filters.py
|
def trim(ind, start=1, end=0):
"""
Remove points from the start and end of True regions.
Parameters
----------
start, end : int
The number of points to remove from the start and end of
the specified filter.
ind : boolean array
Which filter to trim. If True, applies to currently active
filters.
"""
return np.roll(ind, start) & np.roll(ind, -end)
|
def trim(ind, start=1, end=0):
"""
Remove points from the start and end of True regions.
Parameters
----------
start, end : int
The number of points to remove from the start and end of
the specified filter.
ind : boolean array
Which filter to trim. If True, applies to currently active
filters.
"""
return np.roll(ind, start) & np.roll(ind, -end)
|
[
"Remove",
"points",
"from",
"the",
"start",
"and",
"end",
"of",
"True",
"regions",
".",
"Parameters",
"----------",
"start",
"end",
":",
"int",
"The",
"number",
"of",
"points",
"to",
"remove",
"from",
"the",
"start",
"and",
"end",
"of",
"the",
"specified",
"filter",
".",
"ind",
":",
"boolean",
"array",
"Which",
"filter",
"to",
"trim",
".",
"If",
"True",
"applies",
"to",
"currently",
"active",
"filters",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/filters.py#L96-L110
|
[
"def",
"trim",
"(",
"ind",
",",
"start",
"=",
"1",
",",
"end",
"=",
"0",
")",
":",
"return",
"np",
".",
"roll",
"(",
"ind",
",",
"start",
")",
"&",
"np",
".",
"roll",
"(",
"ind",
",",
"-",
"end",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.setfocus
|
Set the 'focus' attribute of the data file.
The 'focus' attribute of the object points towards data from a
particular stage of analysis. It is used to identify the 'working
stage' of the data. Processing functions operate on the 'focus'
stage, so if steps are done out of sequence, things will break.
Names of analysis stages:
* 'rawdata': raw data, loaded from csv file when object
is initialised.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data,
padded with np.nan. Created by self.separate, after
signal and background regions have been identified by
self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by
self.calibrate.
Parameters
----------
focus : str
The name of the analysis stage desired.
Returns
-------
None
|
latools/D_obj.py
|
def setfocus(self, focus):
"""
Set the 'focus' attribute of the data file.
The 'focus' attribute of the object points towards data from a
particular stage of analysis. It is used to identify the 'working
stage' of the data. Processing functions operate on the 'focus'
stage, so if steps are done out of sequence, things will break.
Names of analysis stages:
* 'rawdata': raw data, loaded from csv file when object
is initialised.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data,
padded with np.nan. Created by self.separate, after
signal and background regions have been identified by
self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by
self.calibrate.
Parameters
----------
focus : str
The name of the analysis stage desired.
Returns
-------
None
"""
self.focus = self.data[focus]
self.focus_stage = focus
self.__dict__.update(self.focus)
|
def setfocus(self, focus):
"""
Set the 'focus' attribute of the data file.
The 'focus' attribute of the object points towards data from a
particular stage of analysis. It is used to identify the 'working
stage' of the data. Processing functions operate on the 'focus'
stage, so if steps are done out of sequence, things will break.
Names of analysis stages:
* 'rawdata': raw data, loaded from csv file when object
is initialised.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data,
padded with np.nan. Created by self.separate, after
signal and background regions have been identified by
self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by
self.calibrate.
Parameters
----------
focus : str
The name of the analysis stage desired.
Returns
-------
None
"""
self.focus = self.data[focus]
self.focus_stage = focus
self.__dict__.update(self.focus)
|
[
"Set",
"the",
"focus",
"attribute",
"of",
"the",
"data",
"file",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L155-L191
|
[
"def",
"setfocus",
"(",
"self",
",",
"focus",
")",
":",
"self",
".",
"focus",
"=",
"self",
".",
"data",
"[",
"focus",
"]",
"self",
".",
"focus_stage",
"=",
"focus",
"self",
".",
"__dict__",
".",
"update",
"(",
"self",
".",
"focus",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.despike
|
Applies expdecay_despiker and noise_despiker to data.
Parameters
----------
expdecay_despiker : bool
Whether or not to apply the exponential decay filter.
exponent : None or float
The exponent for the exponential decay filter. If None,
it is determined automatically using `find_expocoef`.
noise_despiker : bool
Whether or not to apply the standard deviation spike filter.
win : int
The rolling window over which the spike filter calculates
the trace statistics.
nlim : float
The number of standard deviations above the rolling mean
that data are excluded.
maxiter : int
The max number of times that the fitler is applied.
Returns
-------
None
|
latools/D_obj.py
|
def despike(self, expdecay_despiker=True, exponent=None,
noise_despiker=True, win=3, nlim=12., maxiter=3):
"""
Applies expdecay_despiker and noise_despiker to data.
Parameters
----------
expdecay_despiker : bool
Whether or not to apply the exponential decay filter.
exponent : None or float
The exponent for the exponential decay filter. If None,
it is determined automatically using `find_expocoef`.
noise_despiker : bool
Whether or not to apply the standard deviation spike filter.
win : int
The rolling window over which the spike filter calculates
the trace statistics.
nlim : float
The number of standard deviations above the rolling mean
that data are excluded.
maxiter : int
The max number of times that the fitler is applied.
Returns
-------
None
"""
if not hasattr(self, 'despiked'):
self.data['despiked'] = Bunch()
out = {}
for a, v in self.focus.items():
if 'time' not in a.lower():
sig = v.copy() # copy data
if expdecay_despiker:
if exponent is not None:
sig = proc.expdecay_despike(sig, exponent, self.tstep, maxiter)
else:
warnings.warn('exponent is None - either provide exponent, or run at `analyse`\nlevel to automatically calculate it.')
if noise_despiker:
sig = proc.noise_despike(sig, int(win), nlim, maxiter)
out[a] = sig
self.data['despiked'].update(out)
# recalculate total counts
self.data['total_counts'] = sum(self.data['despiked'].values())
self.setfocus('despiked')
return
|
def despike(self, expdecay_despiker=True, exponent=None,
noise_despiker=True, win=3, nlim=12., maxiter=3):
"""
Applies expdecay_despiker and noise_despiker to data.
Parameters
----------
expdecay_despiker : bool
Whether or not to apply the exponential decay filter.
exponent : None or float
The exponent for the exponential decay filter. If None,
it is determined automatically using `find_expocoef`.
noise_despiker : bool
Whether or not to apply the standard deviation spike filter.
win : int
The rolling window over which the spike filter calculates
the trace statistics.
nlim : float
The number of standard deviations above the rolling mean
that data are excluded.
maxiter : int
The max number of times that the fitler is applied.
Returns
-------
None
"""
if not hasattr(self, 'despiked'):
self.data['despiked'] = Bunch()
out = {}
for a, v in self.focus.items():
if 'time' not in a.lower():
sig = v.copy() # copy data
if expdecay_despiker:
if exponent is not None:
sig = proc.expdecay_despike(sig, exponent, self.tstep, maxiter)
else:
warnings.warn('exponent is None - either provide exponent, or run at `analyse`\nlevel to automatically calculate it.')
if noise_despiker:
sig = proc.noise_despike(sig, int(win), nlim, maxiter)
out[a] = sig
self.data['despiked'].update(out)
# recalculate total counts
self.data['total_counts'] = sum(self.data['despiked'].values())
self.setfocus('despiked')
return
|
[
"Applies",
"expdecay_despiker",
"and",
"noise_despiker",
"to",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L196-L245
|
[
"def",
"despike",
"(",
"self",
",",
"expdecay_despiker",
"=",
"True",
",",
"exponent",
"=",
"None",
",",
"noise_despiker",
"=",
"True",
",",
"win",
"=",
"3",
",",
"nlim",
"=",
"12.",
",",
"maxiter",
"=",
"3",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'despiked'",
")",
":",
"self",
".",
"data",
"[",
"'despiked'",
"]",
"=",
"Bunch",
"(",
")",
"out",
"=",
"{",
"}",
"for",
"a",
",",
"v",
"in",
"self",
".",
"focus",
".",
"items",
"(",
")",
":",
"if",
"'time'",
"not",
"in",
"a",
".",
"lower",
"(",
")",
":",
"sig",
"=",
"v",
".",
"copy",
"(",
")",
"# copy data",
"if",
"expdecay_despiker",
":",
"if",
"exponent",
"is",
"not",
"None",
":",
"sig",
"=",
"proc",
".",
"expdecay_despike",
"(",
"sig",
",",
"exponent",
",",
"self",
".",
"tstep",
",",
"maxiter",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"'exponent is None - either provide exponent, or run at `analyse`\\nlevel to automatically calculate it.'",
")",
"if",
"noise_despiker",
":",
"sig",
"=",
"proc",
".",
"noise_despike",
"(",
"sig",
",",
"int",
"(",
"win",
")",
",",
"nlim",
",",
"maxiter",
")",
"out",
"[",
"a",
"]",
"=",
"sig",
"self",
".",
"data",
"[",
"'despiked'",
"]",
".",
"update",
"(",
"out",
")",
"# recalculate total counts",
"self",
".",
"data",
"[",
"'total_counts'",
"]",
"=",
"sum",
"(",
"self",
".",
"data",
"[",
"'despiked'",
"]",
".",
"values",
"(",
")",
")",
"self",
".",
"setfocus",
"(",
"'despiked'",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.autorange
|
Automatically separates signal and background data regions.
Automatically detect signal and background regions in the laser
data, based on the behaviour of a single analyte. The analyte used
should be abundant and homogenous in the sample.
**Step 1: Thresholding.**
The background signal is determined using a gaussian kernel density
estimator (kde) of all the data. Under normal circumstances, this
kde should find two distinct data distributions, corresponding to
'signal' and 'background'. The minima between these two distributions
is taken as a rough threshold to identify signal and background
regions. Any point where the trace crosses this thrshold is identified
as a 'transition'.
**Step 2: Transition Removal.**
The width of the transition regions between signal and background are
then determined, and the transitions are excluded from analysis. The
width of the transitions is determined by fitting a gaussian to the
smoothed first derivative of the analyte trace, and determining its
width at a point where the gaussian intensity is at at `conf` time the
gaussian maximum. These gaussians are fit to subsets of the data
centered around the transitions regions determined in Step 1, +/- `win`
data points. The peak is further isolated by finding the minima and
maxima of a second derivative within this window, and the gaussian is
fit to the isolated peak.
Parameters
----------
analyte : str
The analyte that autorange should consider. For best results,
choose an analyte that is present homogeneously in high
concentrations.
gwin : int
The smoothing window used for calculating the first derivative.
Must be odd.
win : int
Determines the width (c +/- win) of the transition data subsets.
on_mult and off_mult : tuple, len=2
Factors to control the width of the excluded transition regions.
A region n times the full - width - half - maximum of the transition
gradient will be removed either side of the transition center.
`on_mult` and `off_mult` refer to the laser - on and laser - off
transitions, respectively. See manual for full explanation.
Defaults to (1.5, 1) and (1, 1.5).
Returns
-------
Outputs added as instance attributes. Returns None.
bkg, sig, trn : iterable, bool
Boolean arrays identifying background, signal and transision
regions
bkgrng, sigrng and trnrng : iterable
(min, max) pairs identifying the boundaries of contiguous
True regions in the boolean arrays.
|
latools/D_obj.py
|
def autorange(self, analyte='total_counts', gwin=5, swin=3, win=30,
on_mult=[1., 1.], off_mult=[1., 1.5],
ploterrs=True, transform='log', **kwargs):
"""
Automatically separates signal and background data regions.
Automatically detect signal and background regions in the laser
data, based on the behaviour of a single analyte. The analyte used
should be abundant and homogenous in the sample.
**Step 1: Thresholding.**
The background signal is determined using a gaussian kernel density
estimator (kde) of all the data. Under normal circumstances, this
kde should find two distinct data distributions, corresponding to
'signal' and 'background'. The minima between these two distributions
is taken as a rough threshold to identify signal and background
regions. Any point where the trace crosses this thrshold is identified
as a 'transition'.
**Step 2: Transition Removal.**
The width of the transition regions between signal and background are
then determined, and the transitions are excluded from analysis. The
width of the transitions is determined by fitting a gaussian to the
smoothed first derivative of the analyte trace, and determining its
width at a point where the gaussian intensity is at at `conf` time the
gaussian maximum. These gaussians are fit to subsets of the data
centered around the transitions regions determined in Step 1, +/- `win`
data points. The peak is further isolated by finding the minima and
maxima of a second derivative within this window, and the gaussian is
fit to the isolated peak.
Parameters
----------
analyte : str
The analyte that autorange should consider. For best results,
choose an analyte that is present homogeneously in high
concentrations.
gwin : int
The smoothing window used for calculating the first derivative.
Must be odd.
win : int
Determines the width (c +/- win) of the transition data subsets.
on_mult and off_mult : tuple, len=2
Factors to control the width of the excluded transition regions.
A region n times the full - width - half - maximum of the transition
gradient will be removed either side of the transition center.
`on_mult` and `off_mult` refer to the laser - on and laser - off
transitions, respectively. See manual for full explanation.
Defaults to (1.5, 1) and (1, 1.5).
Returns
-------
Outputs added as instance attributes. Returns None.
bkg, sig, trn : iterable, bool
Boolean arrays identifying background, signal and transision
regions
bkgrng, sigrng and trnrng : iterable
(min, max) pairs identifying the boundaries of contiguous
True regions in the boolean arrays.
"""
if analyte is None:
# sig = self.focus[self.internal_standard]
sig = self.data['total_counts']
elif analyte == 'total_counts':
sig = self.data['total_counts']
elif analyte in self.analytes:
sig = self.focus[analyte]
else:
raise ValueError('Invalid analyte.')
(self.bkg, self.sig,
self.trn, failed) = proc.autorange(self.Time, sig, gwin=gwin, swin=swin, win=win,
on_mult=on_mult, off_mult=off_mult,
transform=transform)
self.mkrngs()
errs_to_plot = False
if len(failed) > 0:
errs_to_plot = True
plotlines = []
for f in failed:
if f != self.Time[-1]:
plotlines.append(f)
# warnings.warn(("\n\nSample {:s}: ".format(self.sample) +
# "Transition identification at " +
# "{:.1f} failed.".format(f) +
# "\n **This is not necessarily a problem**"
# "\nBut please check the data plots and make sure " +
# "everything is OK.\n"))
if ploterrs and errs_to_plot and len(plotlines) > 0:
f, ax = self.tplot(ranges=True)
for pl in plotlines:
ax.axvline(pl, c='r', alpha=0.6, lw=3, ls='dashed')
return f, plotlines
else:
return
|
def autorange(self, analyte='total_counts', gwin=5, swin=3, win=30,
on_mult=[1., 1.], off_mult=[1., 1.5],
ploterrs=True, transform='log', **kwargs):
"""
Automatically separates signal and background data regions.
Automatically detect signal and background regions in the laser
data, based on the behaviour of a single analyte. The analyte used
should be abundant and homogenous in the sample.
**Step 1: Thresholding.**
The background signal is determined using a gaussian kernel density
estimator (kde) of all the data. Under normal circumstances, this
kde should find two distinct data distributions, corresponding to
'signal' and 'background'. The minima between these two distributions
is taken as a rough threshold to identify signal and background
regions. Any point where the trace crosses this thrshold is identified
as a 'transition'.
**Step 2: Transition Removal.**
The width of the transition regions between signal and background are
then determined, and the transitions are excluded from analysis. The
width of the transitions is determined by fitting a gaussian to the
smoothed first derivative of the analyte trace, and determining its
width at a point where the gaussian intensity is at at `conf` time the
gaussian maximum. These gaussians are fit to subsets of the data
centered around the transitions regions determined in Step 1, +/- `win`
data points. The peak is further isolated by finding the minima and
maxima of a second derivative within this window, and the gaussian is
fit to the isolated peak.
Parameters
----------
analyte : str
The analyte that autorange should consider. For best results,
choose an analyte that is present homogeneously in high
concentrations.
gwin : int
The smoothing window used for calculating the first derivative.
Must be odd.
win : int
Determines the width (c +/- win) of the transition data subsets.
on_mult and off_mult : tuple, len=2
Factors to control the width of the excluded transition regions.
A region n times the full - width - half - maximum of the transition
gradient will be removed either side of the transition center.
`on_mult` and `off_mult` refer to the laser - on and laser - off
transitions, respectively. See manual for full explanation.
Defaults to (1.5, 1) and (1, 1.5).
Returns
-------
Outputs added as instance attributes. Returns None.
bkg, sig, trn : iterable, bool
Boolean arrays identifying background, signal and transision
regions
bkgrng, sigrng and trnrng : iterable
(min, max) pairs identifying the boundaries of contiguous
True regions in the boolean arrays.
"""
if analyte is None:
# sig = self.focus[self.internal_standard]
sig = self.data['total_counts']
elif analyte == 'total_counts':
sig = self.data['total_counts']
elif analyte in self.analytes:
sig = self.focus[analyte]
else:
raise ValueError('Invalid analyte.')
(self.bkg, self.sig,
self.trn, failed) = proc.autorange(self.Time, sig, gwin=gwin, swin=swin, win=win,
on_mult=on_mult, off_mult=off_mult,
transform=transform)
self.mkrngs()
errs_to_plot = False
if len(failed) > 0:
errs_to_plot = True
plotlines = []
for f in failed:
if f != self.Time[-1]:
plotlines.append(f)
# warnings.warn(("\n\nSample {:s}: ".format(self.sample) +
# "Transition identification at " +
# "{:.1f} failed.".format(f) +
# "\n **This is not necessarily a problem**"
# "\nBut please check the data plots and make sure " +
# "everything is OK.\n"))
if ploterrs and errs_to_plot and len(plotlines) > 0:
f, ax = self.tplot(ranges=True)
for pl in plotlines:
ax.axvline(pl, c='r', alpha=0.6, lw=3, ls='dashed')
return f, plotlines
else:
return
|
[
"Automatically",
"separates",
"signal",
"and",
"background",
"data",
"regions",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L248-L346
|
[
"def",
"autorange",
"(",
"self",
",",
"analyte",
"=",
"'total_counts'",
",",
"gwin",
"=",
"5",
",",
"swin",
"=",
"3",
",",
"win",
"=",
"30",
",",
"on_mult",
"=",
"[",
"1.",
",",
"1.",
"]",
",",
"off_mult",
"=",
"[",
"1.",
",",
"1.5",
"]",
",",
"ploterrs",
"=",
"True",
",",
"transform",
"=",
"'log'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"analyte",
"is",
"None",
":",
"# sig = self.focus[self.internal_standard]",
"sig",
"=",
"self",
".",
"data",
"[",
"'total_counts'",
"]",
"elif",
"analyte",
"==",
"'total_counts'",
":",
"sig",
"=",
"self",
".",
"data",
"[",
"'total_counts'",
"]",
"elif",
"analyte",
"in",
"self",
".",
"analytes",
":",
"sig",
"=",
"self",
".",
"focus",
"[",
"analyte",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid analyte.'",
")",
"(",
"self",
".",
"bkg",
",",
"self",
".",
"sig",
",",
"self",
".",
"trn",
",",
"failed",
")",
"=",
"proc",
".",
"autorange",
"(",
"self",
".",
"Time",
",",
"sig",
",",
"gwin",
"=",
"gwin",
",",
"swin",
"=",
"swin",
",",
"win",
"=",
"win",
",",
"on_mult",
"=",
"on_mult",
",",
"off_mult",
"=",
"off_mult",
",",
"transform",
"=",
"transform",
")",
"self",
".",
"mkrngs",
"(",
")",
"errs_to_plot",
"=",
"False",
"if",
"len",
"(",
"failed",
")",
">",
"0",
":",
"errs_to_plot",
"=",
"True",
"plotlines",
"=",
"[",
"]",
"for",
"f",
"in",
"failed",
":",
"if",
"f",
"!=",
"self",
".",
"Time",
"[",
"-",
"1",
"]",
":",
"plotlines",
".",
"append",
"(",
"f",
")",
"# warnings.warn((\"\\n\\nSample {:s}: \".format(self.sample) +",
"# \"Transition identification at \" +",
"# \"{:.1f} failed.\".format(f) +",
"# \"\\n **This is not necessarily a problem**\"",
"# \"\\nBut please check the data plots and make sure \" +",
"# \"everything is OK.\\n\"))",
"if",
"ploterrs",
"and",
"errs_to_plot",
"and",
"len",
"(",
"plotlines",
")",
">",
"0",
":",
"f",
",",
"ax",
"=",
"self",
".",
"tplot",
"(",
"ranges",
"=",
"True",
")",
"for",
"pl",
"in",
"plotlines",
":",
"ax",
".",
"axvline",
"(",
"pl",
",",
"c",
"=",
"'r'",
",",
"alpha",
"=",
"0.6",
",",
"lw",
"=",
"3",
",",
"ls",
"=",
"'dashed'",
")",
"return",
"f",
",",
"plotlines",
"else",
":",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.autorange_plot
|
Plot a detailed autorange report for this sample.
|
latools/D_obj.py
|
def autorange_plot(self, analyte='total_counts', gwin=7, swin=None, win=20,
on_mult=[1.5, 1.], off_mult=[1., 1.5],
transform='log'):
"""
Plot a detailed autorange report for this sample.
"""
if analyte is None:
# sig = self.focus[self.internal_standard]
sig = self.data['total_counts']
elif analyte == 'total_counts':
sig = self.data['total_counts']
elif analyte in self.analytes:
sig = self.focus[analyte]
else:
raise ValueError('Invalid analyte.')
if transform == 'log':
sig = np.log10(sig)
fig, axs = plot.autorange_plot(t=self.Time, sig=sig, gwin=gwin,
swin=swin, win=win, on_mult=on_mult,
off_mult=off_mult)
return fig, axs
|
def autorange_plot(self, analyte='total_counts', gwin=7, swin=None, win=20,
on_mult=[1.5, 1.], off_mult=[1., 1.5],
transform='log'):
"""
Plot a detailed autorange report for this sample.
"""
if analyte is None:
# sig = self.focus[self.internal_standard]
sig = self.data['total_counts']
elif analyte == 'total_counts':
sig = self.data['total_counts']
elif analyte in self.analytes:
sig = self.focus[analyte]
else:
raise ValueError('Invalid analyte.')
if transform == 'log':
sig = np.log10(sig)
fig, axs = plot.autorange_plot(t=self.Time, sig=sig, gwin=gwin,
swin=swin, win=win, on_mult=on_mult,
off_mult=off_mult)
return fig, axs
|
[
"Plot",
"a",
"detailed",
"autorange",
"report",
"for",
"this",
"sample",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L348-L371
|
[
"def",
"autorange_plot",
"(",
"self",
",",
"analyte",
"=",
"'total_counts'",
",",
"gwin",
"=",
"7",
",",
"swin",
"=",
"None",
",",
"win",
"=",
"20",
",",
"on_mult",
"=",
"[",
"1.5",
",",
"1.",
"]",
",",
"off_mult",
"=",
"[",
"1.",
",",
"1.5",
"]",
",",
"transform",
"=",
"'log'",
")",
":",
"if",
"analyte",
"is",
"None",
":",
"# sig = self.focus[self.internal_standard]",
"sig",
"=",
"self",
".",
"data",
"[",
"'total_counts'",
"]",
"elif",
"analyte",
"==",
"'total_counts'",
":",
"sig",
"=",
"self",
".",
"data",
"[",
"'total_counts'",
"]",
"elif",
"analyte",
"in",
"self",
".",
"analytes",
":",
"sig",
"=",
"self",
".",
"focus",
"[",
"analyte",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid analyte.'",
")",
"if",
"transform",
"==",
"'log'",
":",
"sig",
"=",
"np",
".",
"log10",
"(",
"sig",
")",
"fig",
",",
"axs",
"=",
"plot",
".",
"autorange_plot",
"(",
"t",
"=",
"self",
".",
"Time",
",",
"sig",
"=",
"sig",
",",
"gwin",
"=",
"gwin",
",",
"swin",
"=",
"swin",
",",
"win",
"=",
"win",
",",
"on_mult",
"=",
"on_mult",
",",
"off_mult",
"=",
"off_mult",
")",
"return",
"fig",
",",
"axs"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.mkrngs
|
Transform boolean arrays into list of limit pairs.
Gets Time limits of signal/background boolean arrays and stores them as
sigrng and bkgrng arrays. These arrays can be saved by 'save_ranges' in
the analyse object.
|
latools/D_obj.py
|
def mkrngs(self):
"""
Transform boolean arrays into list of limit pairs.
Gets Time limits of signal/background boolean arrays and stores them as
sigrng and bkgrng arrays. These arrays can be saved by 'save_ranges' in
the analyse object.
"""
bbool = bool_2_indices(self.bkg)
if bbool is not None:
self.bkgrng = self.Time[bbool]
else:
self.bkgrng = [[np.nan, np.nan]]
sbool = bool_2_indices(self.sig)
if sbool is not None:
self.sigrng = self.Time[sbool]
else:
self.sigrng = [[np.nan, np.nan]]
tbool = bool_2_indices(self.trn)
if tbool is not None:
self.trnrng = self.Time[tbool]
else:
self.trnrng = [[np.nan, np.nan]]
self.ns = np.zeros(self.Time.size)
n = 1
for i in range(len(self.sig) - 1):
if self.sig[i]:
self.ns[i] = n
if self.sig[i] and ~self.sig[i + 1]:
n += 1
self.n = int(max(self.ns)) # record number of traces
return
|
def mkrngs(self):
"""
Transform boolean arrays into list of limit pairs.
Gets Time limits of signal/background boolean arrays and stores them as
sigrng and bkgrng arrays. These arrays can be saved by 'save_ranges' in
the analyse object.
"""
bbool = bool_2_indices(self.bkg)
if bbool is not None:
self.bkgrng = self.Time[bbool]
else:
self.bkgrng = [[np.nan, np.nan]]
sbool = bool_2_indices(self.sig)
if sbool is not None:
self.sigrng = self.Time[sbool]
else:
self.sigrng = [[np.nan, np.nan]]
tbool = bool_2_indices(self.trn)
if tbool is not None:
self.trnrng = self.Time[tbool]
else:
self.trnrng = [[np.nan, np.nan]]
self.ns = np.zeros(self.Time.size)
n = 1
for i in range(len(self.sig) - 1):
if self.sig[i]:
self.ns[i] = n
if self.sig[i] and ~self.sig[i + 1]:
n += 1
self.n = int(max(self.ns)) # record number of traces
return
|
[
"Transform",
"boolean",
"arrays",
"into",
"list",
"of",
"limit",
"pairs",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L373-L406
|
[
"def",
"mkrngs",
"(",
"self",
")",
":",
"bbool",
"=",
"bool_2_indices",
"(",
"self",
".",
"bkg",
")",
"if",
"bbool",
"is",
"not",
"None",
":",
"self",
".",
"bkgrng",
"=",
"self",
".",
"Time",
"[",
"bbool",
"]",
"else",
":",
"self",
".",
"bkgrng",
"=",
"[",
"[",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"]",
"]",
"sbool",
"=",
"bool_2_indices",
"(",
"self",
".",
"sig",
")",
"if",
"sbool",
"is",
"not",
"None",
":",
"self",
".",
"sigrng",
"=",
"self",
".",
"Time",
"[",
"sbool",
"]",
"else",
":",
"self",
".",
"sigrng",
"=",
"[",
"[",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"]",
"]",
"tbool",
"=",
"bool_2_indices",
"(",
"self",
".",
"trn",
")",
"if",
"tbool",
"is",
"not",
"None",
":",
"self",
".",
"trnrng",
"=",
"self",
".",
"Time",
"[",
"tbool",
"]",
"else",
":",
"self",
".",
"trnrng",
"=",
"[",
"[",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"]",
"]",
"self",
".",
"ns",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"Time",
".",
"size",
")",
"n",
"=",
"1",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"sig",
")",
"-",
"1",
")",
":",
"if",
"self",
".",
"sig",
"[",
"i",
"]",
":",
"self",
".",
"ns",
"[",
"i",
"]",
"=",
"n",
"if",
"self",
".",
"sig",
"[",
"i",
"]",
"and",
"~",
"self",
".",
"sig",
"[",
"i",
"+",
"1",
"]",
":",
"n",
"+=",
"1",
"self",
".",
"n",
"=",
"int",
"(",
"max",
"(",
"self",
".",
"ns",
")",
")",
"# record number of traces",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.bkg_subtract
|
Subtract provided background from signal (focus stage).
Results is saved in new 'bkgsub' focus stage
Returns
-------
None
|
latools/D_obj.py
|
def bkg_subtract(self, analyte, bkg, ind=None, focus_stage='despiked'):
"""
Subtract provided background from signal (focus stage).
Results is saved in new 'bkgsub' focus stage
Returns
-------
None
"""
if 'bkgsub' not in self.data.keys():
self.data['bkgsub'] = Bunch()
self.data['bkgsub'][analyte] = self.data[focus_stage][analyte] - bkg
if ind is not None:
self.data['bkgsub'][analyte][ind] = np.nan
return
|
def bkg_subtract(self, analyte, bkg, ind=None, focus_stage='despiked'):
"""
Subtract provided background from signal (focus stage).
Results is saved in new 'bkgsub' focus stage
Returns
-------
None
"""
if 'bkgsub' not in self.data.keys():
self.data['bkgsub'] = Bunch()
self.data['bkgsub'][analyte] = self.data[focus_stage][analyte] - bkg
if ind is not None:
self.data['bkgsub'][analyte][ind] = np.nan
return
|
[
"Subtract",
"provided",
"background",
"from",
"signal",
"(",
"focus",
"stage",
")",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L409-L427
|
[
"def",
"bkg_subtract",
"(",
"self",
",",
"analyte",
",",
"bkg",
",",
"ind",
"=",
"None",
",",
"focus_stage",
"=",
"'despiked'",
")",
":",
"if",
"'bkgsub'",
"not",
"in",
"self",
".",
"data",
".",
"keys",
"(",
")",
":",
"self",
".",
"data",
"[",
"'bkgsub'",
"]",
"=",
"Bunch",
"(",
")",
"self",
".",
"data",
"[",
"'bkgsub'",
"]",
"[",
"analyte",
"]",
"=",
"self",
".",
"data",
"[",
"focus_stage",
"]",
"[",
"analyte",
"]",
"-",
"bkg",
"if",
"ind",
"is",
"not",
"None",
":",
"self",
".",
"data",
"[",
"'bkgsub'",
"]",
"[",
"analyte",
"]",
"[",
"ind",
"]",
"=",
"np",
".",
"nan",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.correct_spectral_interference
|
Correct spectral interference.
Subtract interference counts from target_analyte, based on the
intensity of a source_analayte and a known fractional contribution (f).
Correction takes the form:
target_analyte -= source_analyte * f
Only operates on background-corrected data ('bkgsub').
To undo a correction,
rerun `self.bkg_subtract()`.
Parameters
----------
target_analyte : str
The name of the analyte to modify.
source_analyte : str
The name of the analyte to base the correction on.
f : float
The fraction of the intensity of the source_analyte to
subtract from the target_analyte. Correction is:
target_analyte - source_analyte * f
Returns
-------
None
|
latools/D_obj.py
|
def correct_spectral_interference(self, target_analyte, source_analyte, f):
"""
Correct spectral interference.
Subtract interference counts from target_analyte, based on the
intensity of a source_analayte and a known fractional contribution (f).
Correction takes the form:
target_analyte -= source_analyte * f
Only operates on background-corrected data ('bkgsub').
To undo a correction,
rerun `self.bkg_subtract()`.
Parameters
----------
target_analyte : str
The name of the analyte to modify.
source_analyte : str
The name of the analyte to base the correction on.
f : float
The fraction of the intensity of the source_analyte to
subtract from the target_analyte. Correction is:
target_analyte - source_analyte * f
Returns
-------
None
"""
if target_analyte not in self.analytes:
raise ValueError('target_analyte: {:} not in available analytes ({:})'.format(target_analyte, ', '.join(self.analytes)))
if source_analyte not in self.analytes:
raise ValueError('source_analyte: {:} not in available analytes ({:})'.format(source_analyte, ', '.join(self.analytes)))
self.data['bkgsub'][target_analyte] -= self.data['bkgsub'][source_analyte] * f
|
def correct_spectral_interference(self, target_analyte, source_analyte, f):
"""
Correct spectral interference.
Subtract interference counts from target_analyte, based on the
intensity of a source_analayte and a known fractional contribution (f).
Correction takes the form:
target_analyte -= source_analyte * f
Only operates on background-corrected data ('bkgsub').
To undo a correction,
rerun `self.bkg_subtract()`.
Parameters
----------
target_analyte : str
The name of the analyte to modify.
source_analyte : str
The name of the analyte to base the correction on.
f : float
The fraction of the intensity of the source_analyte to
subtract from the target_analyte. Correction is:
target_analyte - source_analyte * f
Returns
-------
None
"""
if target_analyte not in self.analytes:
raise ValueError('target_analyte: {:} not in available analytes ({:})'.format(target_analyte, ', '.join(self.analytes)))
if source_analyte not in self.analytes:
raise ValueError('source_analyte: {:} not in available analytes ({:})'.format(source_analyte, ', '.join(self.analytes)))
self.data['bkgsub'][target_analyte] -= self.data['bkgsub'][source_analyte] * f
|
[
"Correct",
"spectral",
"interference",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L430-L467
|
[
"def",
"correct_spectral_interference",
"(",
"self",
",",
"target_analyte",
",",
"source_analyte",
",",
"f",
")",
":",
"if",
"target_analyte",
"not",
"in",
"self",
".",
"analytes",
":",
"raise",
"ValueError",
"(",
"'target_analyte: {:} not in available analytes ({:})'",
".",
"format",
"(",
"target_analyte",
",",
"', '",
".",
"join",
"(",
"self",
".",
"analytes",
")",
")",
")",
"if",
"source_analyte",
"not",
"in",
"self",
".",
"analytes",
":",
"raise",
"ValueError",
"(",
"'source_analyte: {:} not in available analytes ({:})'",
".",
"format",
"(",
"source_analyte",
",",
"', '",
".",
"join",
"(",
"self",
".",
"analytes",
")",
")",
")",
"self",
".",
"data",
"[",
"'bkgsub'",
"]",
"[",
"target_analyte",
"]",
"-=",
"self",
".",
"data",
"[",
"'bkgsub'",
"]",
"[",
"source_analyte",
"]",
"*",
"f"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.ratio
|
Divide all analytes by a specified internal_standard analyte.
Parameters
----------
internal_standard : str
The analyte used as the internal_standard.
Returns
-------
None
|
latools/D_obj.py
|
def ratio(self, internal_standard=None):
"""
Divide all analytes by a specified internal_standard analyte.
Parameters
----------
internal_standard : str
The analyte used as the internal_standard.
Returns
-------
None
"""
if internal_standard is not None:
self.internal_standard = internal_standard
self.data['ratios'] = Bunch()
for a in self.analytes:
self.data['ratios'][a] = (self.data['bkgsub'][a] /
self.data['bkgsub'][self.internal_standard])
self.setfocus('ratios')
return
|
def ratio(self, internal_standard=None):
"""
Divide all analytes by a specified internal_standard analyte.
Parameters
----------
internal_standard : str
The analyte used as the internal_standard.
Returns
-------
None
"""
if internal_standard is not None:
self.internal_standard = internal_standard
self.data['ratios'] = Bunch()
for a in self.analytes:
self.data['ratios'][a] = (self.data['bkgsub'][a] /
self.data['bkgsub'][self.internal_standard])
self.setfocus('ratios')
return
|
[
"Divide",
"all",
"analytes",
"by",
"a",
"specified",
"internal_standard",
"analyte",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L470-L491
|
[
"def",
"ratio",
"(",
"self",
",",
"internal_standard",
"=",
"None",
")",
":",
"if",
"internal_standard",
"is",
"not",
"None",
":",
"self",
".",
"internal_standard",
"=",
"internal_standard",
"self",
".",
"data",
"[",
"'ratios'",
"]",
"=",
"Bunch",
"(",
")",
"for",
"a",
"in",
"self",
".",
"analytes",
":",
"self",
".",
"data",
"[",
"'ratios'",
"]",
"[",
"a",
"]",
"=",
"(",
"self",
".",
"data",
"[",
"'bkgsub'",
"]",
"[",
"a",
"]",
"/",
"self",
".",
"data",
"[",
"'bkgsub'",
"]",
"[",
"self",
".",
"internal_standard",
"]",
")",
"self",
".",
"setfocus",
"(",
"'ratios'",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.calibrate
|
Apply calibration to data.
The `calib_dict` must be calculated at the `analyse` level,
and passed to this calibrate function.
Parameters
----------
calib_dict : dict
A dict of calibration values to apply to each analyte.
Returns
-------
None
|
latools/D_obj.py
|
def calibrate(self, calib_ps, analytes=None):
"""
Apply calibration to data.
The `calib_dict` must be calculated at the `analyse` level,
and passed to this calibrate function.
Parameters
----------
calib_dict : dict
A dict of calibration values to apply to each analyte.
Returns
-------
None
"""
# can have calibration function stored in self and pass *coefs?
if analytes is None:
analytes = self.analytes
if 'calibrated' not in self.data.keys():
self.data['calibrated'] = Bunch()
for a in analytes:
m = calib_ps[a]['m'].new(self.uTime)
if 'c' in calib_ps[a]:
c = calib_ps[a]['c'].new(self.uTime)
else:
c = 0
self.data['calibrated'][a] = self.data['ratios'][a] * m + c
if self.internal_standard not in analytes:
self.data['calibrated'][self.internal_standard] = \
np.empty(len(self.data['ratios'][self.internal_standard]))
self.setfocus('calibrated')
return
|
def calibrate(self, calib_ps, analytes=None):
"""
Apply calibration to data.
The `calib_dict` must be calculated at the `analyse` level,
and passed to this calibrate function.
Parameters
----------
calib_dict : dict
A dict of calibration values to apply to each analyte.
Returns
-------
None
"""
# can have calibration function stored in self and pass *coefs?
if analytes is None:
analytes = self.analytes
if 'calibrated' not in self.data.keys():
self.data['calibrated'] = Bunch()
for a in analytes:
m = calib_ps[a]['m'].new(self.uTime)
if 'c' in calib_ps[a]:
c = calib_ps[a]['c'].new(self.uTime)
else:
c = 0
self.data['calibrated'][a] = self.data['ratios'][a] * m + c
if self.internal_standard not in analytes:
self.data['calibrated'][self.internal_standard] = \
np.empty(len(self.data['ratios'][self.internal_standard]))
self.setfocus('calibrated')
return
|
[
"Apply",
"calibration",
"to",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L494-L532
|
[
"def",
"calibrate",
"(",
"self",
",",
"calib_ps",
",",
"analytes",
"=",
"None",
")",
":",
"# can have calibration function stored in self and pass *coefs?",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"if",
"'calibrated'",
"not",
"in",
"self",
".",
"data",
".",
"keys",
"(",
")",
":",
"self",
".",
"data",
"[",
"'calibrated'",
"]",
"=",
"Bunch",
"(",
")",
"for",
"a",
"in",
"analytes",
":",
"m",
"=",
"calib_ps",
"[",
"a",
"]",
"[",
"'m'",
"]",
".",
"new",
"(",
"self",
".",
"uTime",
")",
"if",
"'c'",
"in",
"calib_ps",
"[",
"a",
"]",
":",
"c",
"=",
"calib_ps",
"[",
"a",
"]",
"[",
"'c'",
"]",
".",
"new",
"(",
"self",
".",
"uTime",
")",
"else",
":",
"c",
"=",
"0",
"self",
".",
"data",
"[",
"'calibrated'",
"]",
"[",
"a",
"]",
"=",
"self",
".",
"data",
"[",
"'ratios'",
"]",
"[",
"a",
"]",
"*",
"m",
"+",
"c",
"if",
"self",
".",
"internal_standard",
"not",
"in",
"analytes",
":",
"self",
".",
"data",
"[",
"'calibrated'",
"]",
"[",
"self",
".",
"internal_standard",
"]",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"self",
".",
"data",
"[",
"'ratios'",
"]",
"[",
"self",
".",
"internal_standard",
"]",
")",
")",
"self",
".",
"setfocus",
"(",
"'calibrated'",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.sample_stats
|
Calculate sample statistics
Returns samples, analytes, and arrays of statistics
of shape (samples, analytes). Statistics are calculated
from the 'focus' data variable, so output depends on how
the data have been processed.
Parameters
----------
analytes : array_like
List of analytes to calculate the statistic on
filt : bool or str
The filter to apply to the data when calculating sample statistics.
bool: True applies filter specified in filt.switches.
str: logical string specifying a partucular filter
stat_fns : dict
Dict of {name: function} pairs. Functions that take a single
array_like input, and return a single statistic. Function should
be able to cope with NaN values.
eachtrace : bool
True: per - ablation statistics
False: whole sample statistics
Returns
-------
None
|
latools/D_obj.py
|
def sample_stats(self, analytes=None, filt=True,
stat_fns={},
eachtrace=True):
"""
Calculate sample statistics
Returns samples, analytes, and arrays of statistics
of shape (samples, analytes). Statistics are calculated
from the 'focus' data variable, so output depends on how
the data have been processed.
Parameters
----------
analytes : array_like
List of analytes to calculate the statistic on
filt : bool or str
The filter to apply to the data when calculating sample statistics.
bool: True applies filter specified in filt.switches.
str: logical string specifying a partucular filter
stat_fns : dict
Dict of {name: function} pairs. Functions that take a single
array_like input, and return a single statistic. Function should
be able to cope with NaN values.
eachtrace : bool
True: per - ablation statistics
False: whole sample statistics
Returns
-------
None
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
self.stats = Bunch()
self.stats['analytes'] = analytes
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
for n, f in stat_fns.items():
self.stats[n] = []
for a in analytes:
ind = self.filt.grab_filt(filt, a)
dat = nominal_values(self.focus[a])
if eachtrace:
sts = []
for t in np.arange(self.n) + 1:
sts.append(f(dat[ind & (self.ns == t)]))
self.stats[n].append(sts)
else:
self.stats[n].append(f(dat[ind]))
self.stats[n] = np.array(self.stats[n])
return
|
def sample_stats(self, analytes=None, filt=True,
stat_fns={},
eachtrace=True):
"""
Calculate sample statistics
Returns samples, analytes, and arrays of statistics
of shape (samples, analytes). Statistics are calculated
from the 'focus' data variable, so output depends on how
the data have been processed.
Parameters
----------
analytes : array_like
List of analytes to calculate the statistic on
filt : bool or str
The filter to apply to the data when calculating sample statistics.
bool: True applies filter specified in filt.switches.
str: logical string specifying a partucular filter
stat_fns : dict
Dict of {name: function} pairs. Functions that take a single
array_like input, and return a single statistic. Function should
be able to cope with NaN values.
eachtrace : bool
True: per - ablation statistics
False: whole sample statistics
Returns
-------
None
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
self.stats = Bunch()
self.stats['analytes'] = analytes
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
for n, f in stat_fns.items():
self.stats[n] = []
for a in analytes:
ind = self.filt.grab_filt(filt, a)
dat = nominal_values(self.focus[a])
if eachtrace:
sts = []
for t in np.arange(self.n) + 1:
sts.append(f(dat[ind & (self.ns == t)]))
self.stats[n].append(sts)
else:
self.stats[n].append(f(dat[ind]))
self.stats[n] = np.array(self.stats[n])
return
|
[
"Calculate",
"sample",
"statistics"
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L536-L590
|
[
"def",
"sample_stats",
"(",
"self",
",",
"analytes",
"=",
"None",
",",
"filt",
"=",
"True",
",",
"stat_fns",
"=",
"{",
"}",
",",
"eachtrace",
"=",
"True",
")",
":",
"if",
"analytes",
"is",
"None",
":",
"analytes",
"=",
"self",
".",
"analytes",
"elif",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"self",
".",
"stats",
"=",
"Bunch",
"(",
")",
"self",
".",
"stats",
"[",
"'analytes'",
"]",
"=",
"analytes",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
",",
"category",
"=",
"RuntimeWarning",
")",
"for",
"n",
",",
"f",
"in",
"stat_fns",
".",
"items",
"(",
")",
":",
"self",
".",
"stats",
"[",
"n",
"]",
"=",
"[",
"]",
"for",
"a",
"in",
"analytes",
":",
"ind",
"=",
"self",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
",",
"a",
")",
"dat",
"=",
"nominal_values",
"(",
"self",
".",
"focus",
"[",
"a",
"]",
")",
"if",
"eachtrace",
":",
"sts",
"=",
"[",
"]",
"for",
"t",
"in",
"np",
".",
"arange",
"(",
"self",
".",
"n",
")",
"+",
"1",
":",
"sts",
".",
"append",
"(",
"f",
"(",
"dat",
"[",
"ind",
"&",
"(",
"self",
".",
"ns",
"==",
"t",
")",
"]",
")",
")",
"self",
".",
"stats",
"[",
"n",
"]",
".",
"append",
"(",
"sts",
")",
"else",
":",
"self",
".",
"stats",
"[",
"n",
"]",
".",
"append",
"(",
"f",
"(",
"dat",
"[",
"ind",
"]",
")",
")",
"self",
".",
"stats",
"[",
"n",
"]",
"=",
"np",
".",
"array",
"(",
"self",
".",
"stats",
"[",
"n",
"]",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.ablation_times
|
Function for calculating the ablation time for each
ablation.
Returns
-------
dict of times for each ablation.
|
latools/D_obj.py
|
def ablation_times(self):
"""
Function for calculating the ablation time for each
ablation.
Returns
-------
dict of times for each ablation.
"""
ats = {}
for n in np.arange(self.n) + 1:
t = self.Time[self.ns == n]
ats[n - 1] = t.max() - t.min()
return ats
|
def ablation_times(self):
"""
Function for calculating the ablation time for each
ablation.
Returns
-------
dict of times for each ablation.
"""
ats = {}
for n in np.arange(self.n) + 1:
t = self.Time[self.ns == n]
ats[n - 1] = t.max() - t.min()
return ats
|
[
"Function",
"for",
"calculating",
"the",
"ablation",
"time",
"for",
"each",
"ablation",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L593-L606
|
[
"def",
"ablation_times",
"(",
"self",
")",
":",
"ats",
"=",
"{",
"}",
"for",
"n",
"in",
"np",
".",
"arange",
"(",
"self",
".",
"n",
")",
"+",
"1",
":",
"t",
"=",
"self",
".",
"Time",
"[",
"self",
".",
"ns",
"==",
"n",
"]",
"ats",
"[",
"n",
"-",
"1",
"]",
"=",
"t",
".",
"max",
"(",
")",
"-",
"t",
".",
"min",
"(",
")",
"return",
"ats"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.filter_threshold
|
Apply threshold filter.
Generates threshold filters for the given analytes above and below
the specified threshold.
Two filters are created with prefixes '_above' and '_below'.
'_above' keeps all the data above the threshold.
'_below' keeps all the data below the threshold.
i.e. to select data below the threshold value, you should turn the
'_above' filter off.
Parameters
----------
analyte : TYPE
Description of `analyte`.
threshold : TYPE
Description of `threshold`.
Returns
-------
None
|
latools/D_obj.py
|
def filter_threshold(self, analyte, threshold):
"""
Apply threshold filter.
Generates threshold filters for the given analytes above and below
the specified threshold.
Two filters are created with prefixes '_above' and '_below'.
'_above' keeps all the data above the threshold.
'_below' keeps all the data below the threshold.
i.e. to select data below the threshold value, you should turn the
'_above' filter off.
Parameters
----------
analyte : TYPE
Description of `analyte`.
threshold : TYPE
Description of `threshold`.
Returns
-------
None
"""
params = locals()
del(params['self'])
# generate filter
below, above = filters.threshold(self.focus[analyte], threshold)
setn = self.filt.maxset + 1
self.filt.add(analyte + '_thresh_below',
below,
'Keep below {:.3e} '.format(threshold) + analyte,
params, setn=setn)
self.filt.add(analyte + '_thresh_above',
above,
'Keep above {:.3e} '.format(threshold) + analyte,
params, setn=setn)
|
def filter_threshold(self, analyte, threshold):
"""
Apply threshold filter.
Generates threshold filters for the given analytes above and below
the specified threshold.
Two filters are created with prefixes '_above' and '_below'.
'_above' keeps all the data above the threshold.
'_below' keeps all the data below the threshold.
i.e. to select data below the threshold value, you should turn the
'_above' filter off.
Parameters
----------
analyte : TYPE
Description of `analyte`.
threshold : TYPE
Description of `threshold`.
Returns
-------
None
"""
params = locals()
del(params['self'])
# generate filter
below, above = filters.threshold(self.focus[analyte], threshold)
setn = self.filt.maxset + 1
self.filt.add(analyte + '_thresh_below',
below,
'Keep below {:.3e} '.format(threshold) + analyte,
params, setn=setn)
self.filt.add(analyte + '_thresh_above',
above,
'Keep above {:.3e} '.format(threshold) + analyte,
params, setn=setn)
|
[
"Apply",
"threshold",
"filter",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L610-L650
|
[
"def",
"filter_threshold",
"(",
"self",
",",
"analyte",
",",
"threshold",
")",
":",
"params",
"=",
"locals",
"(",
")",
"del",
"(",
"params",
"[",
"'self'",
"]",
")",
"# generate filter",
"below",
",",
"above",
"=",
"filters",
".",
"threshold",
"(",
"self",
".",
"focus",
"[",
"analyte",
"]",
",",
"threshold",
")",
"setn",
"=",
"self",
".",
"filt",
".",
"maxset",
"+",
"1",
"self",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_thresh_below'",
",",
"below",
",",
"'Keep below {:.3e} '",
".",
"format",
"(",
"threshold",
")",
"+",
"analyte",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"self",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_thresh_above'",
",",
"above",
",",
"'Keep above {:.3e} '",
".",
"format",
"(",
"threshold",
")",
"+",
"analyte",
",",
"params",
",",
"setn",
"=",
"setn",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.filter_gradient_threshold
|
Apply gradient threshold filter.
Generates threshold filters for the given analytes above and below
the specified threshold.
Two filters are created with prefixes '_above' and '_below'.
'_above' keeps all the data above the threshold.
'_below' keeps all the data below the threshold.
i.e. to select data below the threshold value, you should turn the
'_above' filter off.
Parameters
----------
analyte : str
Description of `analyte`.
threshold : float
Description of `threshold`.
win : int
Window used to calculate gradients (n points)
recalc : bool
Whether or not to re-calculate the gradients.
Returns
-------
None
|
latools/D_obj.py
|
def filter_gradient_threshold(self, analyte, win, threshold, recalc=True):
"""
Apply gradient threshold filter.
Generates threshold filters for the given analytes above and below
the specified threshold.
Two filters are created with prefixes '_above' and '_below'.
'_above' keeps all the data above the threshold.
'_below' keeps all the data below the threshold.
i.e. to select data below the threshold value, you should turn the
'_above' filter off.
Parameters
----------
analyte : str
Description of `analyte`.
threshold : float
Description of `threshold`.
win : int
Window used to calculate gradients (n points)
recalc : bool
Whether or not to re-calculate the gradients.
Returns
-------
None
"""
params = locals()
del(params['self'])
# calculate absolute gradient
if recalc or not self.grads_calced:
self.grads = calc_grads(self.Time, self.focus,
[analyte], win)
self.grads_calced = True
below, above = filters.threshold(abs(self.grads[analyte]), threshold)
setn = self.filt.maxset + 1
self.filt.add(analyte + '_gthresh_below',
below,
'Keep gradient below {:.3e} '.format(threshold) + analyte,
params, setn=setn)
self.filt.add(analyte + '_gthresh_above',
above,
'Keep gradient above {:.3e} '.format(threshold) + analyte,
params, setn=setn)
|
def filter_gradient_threshold(self, analyte, win, threshold, recalc=True):
"""
Apply gradient threshold filter.
Generates threshold filters for the given analytes above and below
the specified threshold.
Two filters are created with prefixes '_above' and '_below'.
'_above' keeps all the data above the threshold.
'_below' keeps all the data below the threshold.
i.e. to select data below the threshold value, you should turn the
'_above' filter off.
Parameters
----------
analyte : str
Description of `analyte`.
threshold : float
Description of `threshold`.
win : int
Window used to calculate gradients (n points)
recalc : bool
Whether or not to re-calculate the gradients.
Returns
-------
None
"""
params = locals()
del(params['self'])
# calculate absolute gradient
if recalc or not self.grads_calced:
self.grads = calc_grads(self.Time, self.focus,
[analyte], win)
self.grads_calced = True
below, above = filters.threshold(abs(self.grads[analyte]), threshold)
setn = self.filt.maxset + 1
self.filt.add(analyte + '_gthresh_below',
below,
'Keep gradient below {:.3e} '.format(threshold) + analyte,
params, setn=setn)
self.filt.add(analyte + '_gthresh_above',
above,
'Keep gradient above {:.3e} '.format(threshold) + analyte,
params, setn=setn)
|
[
"Apply",
"gradient",
"threshold",
"filter",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L653-L702
|
[
"def",
"filter_gradient_threshold",
"(",
"self",
",",
"analyte",
",",
"win",
",",
"threshold",
",",
"recalc",
"=",
"True",
")",
":",
"params",
"=",
"locals",
"(",
")",
"del",
"(",
"params",
"[",
"'self'",
"]",
")",
"# calculate absolute gradient",
"if",
"recalc",
"or",
"not",
"self",
".",
"grads_calced",
":",
"self",
".",
"grads",
"=",
"calc_grads",
"(",
"self",
".",
"Time",
",",
"self",
".",
"focus",
",",
"[",
"analyte",
"]",
",",
"win",
")",
"self",
".",
"grads_calced",
"=",
"True",
"below",
",",
"above",
"=",
"filters",
".",
"threshold",
"(",
"abs",
"(",
"self",
".",
"grads",
"[",
"analyte",
"]",
")",
",",
"threshold",
")",
"setn",
"=",
"self",
".",
"filt",
".",
"maxset",
"+",
"1",
"self",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_gthresh_below'",
",",
"below",
",",
"'Keep gradient below {:.3e} '",
".",
"format",
"(",
"threshold",
")",
"+",
"analyte",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"self",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_gthresh_above'",
",",
"above",
",",
"'Keep gradient above {:.3e} '",
".",
"format",
"(",
"threshold",
")",
"+",
"analyte",
",",
"params",
",",
"setn",
"=",
"setn",
")"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.filter_clustering
|
Applies an n - dimensional clustering filter to the data.
Available Clustering Algorithms
* 'meanshift': The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
* 'kmeans': The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
* 'DBSCAN': The `sklearn.cluster.DBSCAN` algorithm. Automatically
determines the number and characteristics of clusters
within the data based on the 'connectivity' of the
data (i.e. how far apart each data point is in a
multi - dimensional parameter space). Requires you to
set `eps`, the minimum distance point must be from
another point to be considered in the same cluster,
and `min_samples`, the minimum number of points that
must be within the minimum distance for it to be
considered a cluster. It may also be run in automatic
mode by specifying `n_clusters` alongside
`min_samples`, where eps is decreased until the
desired number of clusters is obtained.
For more information on these algorithms, refer to the
documentation.
Parameters
----------
analytes : str
The analyte(s) that the filter applies to.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
normalise : bool
Whether or not to normalise the data to zero mean and unit
variance. Reccomended if clustering based on more than 1 analyte.
Uses `sklearn.preprocessing.scale`.
method : str
Which clustering algorithm to use (see above).
include_time : bool
Whether or not to include the Time variable in the clustering
analysis. Useful if you're looking for spatially continuous
clusters in your data, i.e. this will identify each spot in your
analysis as an individual cluster.
sort : bool, str or array-like
Whether or not to label the resulting clusters according to their
contents. If used, the cluster with the lowest values will be
labelled from 0, in order of increasing cluster mean value.analytes.
The sorting rules depend on the value of 'sort', which can be the name
of a single analyte (str), a list of several analyte names (array-like)
or True (bool), to specify all analytes used to calcualte the cluster.
min_data : int
The minimum number of data points that should be considered by
the filter. Default = 10.
**kwargs
Parameters passed to the clustering algorithm specified by
`method`.
Meanshift Parameters
--------------------
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K - Means Parameters
------------------
n_clusters : int
The number of clusters expected in the data.
DBSCAN Parameters
-----------------
eps : float
The minimum 'distance' points must be apart for them to be in the
same cluster. Defaults to 0.3. Note: If the data are normalised
(they should be for DBSCAN) this is in terms of total sample
variance. Normalised data have a mean of 0 and a variance of 1.
min_samples : int
The minimum number of samples within distance `eps` required
to be considered as an independent cluster.
n_clusters : int
The number of clusters expected. If specified, `eps` will be
incrementally reduced until the expected number of clusters is
found.
maxiter : int
The maximum number of iterations DBSCAN will run.
Returns
-------
None
|
latools/D_obj.py
|
def filter_clustering(self, analytes, filt=False, normalise=True,
method='meanshift', include_time=False,
sort=None, min_data=10, **kwargs):
"""
Applies an n - dimensional clustering filter to the data.
Available Clustering Algorithms
* 'meanshift': The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
* 'kmeans': The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
* 'DBSCAN': The `sklearn.cluster.DBSCAN` algorithm. Automatically
determines the number and characteristics of clusters
within the data based on the 'connectivity' of the
data (i.e. how far apart each data point is in a
multi - dimensional parameter space). Requires you to
set `eps`, the minimum distance point must be from
another point to be considered in the same cluster,
and `min_samples`, the minimum number of points that
must be within the minimum distance for it to be
considered a cluster. It may also be run in automatic
mode by specifying `n_clusters` alongside
`min_samples`, where eps is decreased until the
desired number of clusters is obtained.
For more information on these algorithms, refer to the
documentation.
Parameters
----------
analytes : str
The analyte(s) that the filter applies to.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
normalise : bool
Whether or not to normalise the data to zero mean and unit
variance. Reccomended if clustering based on more than 1 analyte.
Uses `sklearn.preprocessing.scale`.
method : str
Which clustering algorithm to use (see above).
include_time : bool
Whether or not to include the Time variable in the clustering
analysis. Useful if you're looking for spatially continuous
clusters in your data, i.e. this will identify each spot in your
analysis as an individual cluster.
sort : bool, str or array-like
Whether or not to label the resulting clusters according to their
contents. If used, the cluster with the lowest values will be
labelled from 0, in order of increasing cluster mean value.analytes.
The sorting rules depend on the value of 'sort', which can be the name
of a single analyte (str), a list of several analyte names (array-like)
or True (bool), to specify all analytes used to calcualte the cluster.
min_data : int
The minimum number of data points that should be considered by
the filter. Default = 10.
**kwargs
Parameters passed to the clustering algorithm specified by
`method`.
Meanshift Parameters
--------------------
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K - Means Parameters
------------------
n_clusters : int
The number of clusters expected in the data.
DBSCAN Parameters
-----------------
eps : float
The minimum 'distance' points must be apart for them to be in the
same cluster. Defaults to 0.3. Note: If the data are normalised
(they should be for DBSCAN) this is in terms of total sample
variance. Normalised data have a mean of 0 and a variance of 1.
min_samples : int
The minimum number of samples within distance `eps` required
to be considered as an independent cluster.
n_clusters : int
The number of clusters expected. If specified, `eps` will be
incrementally reduced until the expected number of clusters is
found.
maxiter : int
The maximum number of iterations DBSCAN will run.
Returns
-------
None
"""
params = locals()
del(params['self'])
# convert string to list, if single analyte
if isinstance(analytes, str):
analytes = [analytes]
setn = self.filt.maxset + 1
# generate filter
vals = np.vstack(nominal_values(list(self.focus.values())))
if filt is not None:
ind = (self.filt.grab_filt(filt, analytes) &
np.apply_along_axis(all, 0, ~np.isnan(vals)))
else:
ind = np.apply_along_axis(all, 0, ~np.isnan(vals))
if sum(ind) > min_data:
# get indices for data passed to clustering
sampled = np.arange(self.Time.size)[ind]
# generate data for clustering
if include_time:
extra = self.Time
else:
extra = None
# get data as array
ds = stack_keys(self.focus, analytes, extra)
# apply filter, and get nominal values
ds = nominal_values(ds[ind, :])
if normalise | (len(analytes) > 1):
ds = preprocessing.scale(ds)
method_key = {'kmeans': clustering.cluster_kmeans,
# 'DBSCAN': clustering.cluster_DBSCAN,
'meanshift': clustering.cluster_meanshift}
cfun = method_key[method]
labels, core_samples_mask = cfun(ds, **kwargs)
# return labels, and if DBSCAN core_sample_mask
labels_unique = np.unique(labels)
# label the clusters according to their contents
if (sort is not None) & (sort is not False):
if isinstance(sort, str):
sort = [sort]
sanalytes = analytes
# make boolean filter to select analytes
if sort is True:
sortk = np.array([True] * len(sanalytes))
else:
sortk = np.array([s in sort for s in sanalytes])
# create per-point mean based on selected analytes.
sd = np.apply_along_axis(sum, 1, ds[:, sortk])
# calculate per-cluster means
avs = [np.nanmean(sd[labels == lab]) for lab in labels_unique]
# re-order the cluster labels based on their means
order = [x[0] for x in sorted(enumerate(avs), key=lambda x:x[1])]
sdict = dict(zip(order, labels_unique))
else:
sdict = dict(zip(labels_unique, labels_unique))
filts = {}
for ind, lab in sdict.items():
filts[lab] = labels == ind
# only applies to DBSCAN results.
if not all(np.isnan(core_samples_mask)):
filts['core'] = core_samples_mask
resized = {}
for k, v in filts.items():
resized[k] = np.zeros(self.Time.size, dtype=bool)
resized[k][sampled] = v
namebase = '-'.join(analytes) + '_cluster-' + method
info = '-'.join(analytes) + ' cluster filter.'
if method == 'DBSCAN':
for k, v in resized.items():
if isinstance(k, str):
name = namebase + '_core'
elif k < 0:
name = namebase + '_noise'
else:
name = namebase + '_{:.0f}'.format(k)
self.filt.add(name, v, info=info, params=params, setn=setn)
else:
for k, v in resized.items():
name = namebase + '_{:.0f}'.format(k)
self.filt.add(name, v, info=info, params=params, setn=setn)
else:
# if there are no data
name = '-'.join(analytes) + '_cluster-' + method + '_0'
info = '-'.join(analytes) + ' cluster filter failed.'
self.filt.add(name, np.zeros(self.Time.size, dtype=bool),
info=info, params=params, setn=setn)
return
|
def filter_clustering(self, analytes, filt=False, normalise=True,
method='meanshift', include_time=False,
sort=None, min_data=10, **kwargs):
"""
Applies an n - dimensional clustering filter to the data.
Available Clustering Algorithms
* 'meanshift': The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
* 'kmeans': The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
* 'DBSCAN': The `sklearn.cluster.DBSCAN` algorithm. Automatically
determines the number and characteristics of clusters
within the data based on the 'connectivity' of the
data (i.e. how far apart each data point is in a
multi - dimensional parameter space). Requires you to
set `eps`, the minimum distance point must be from
another point to be considered in the same cluster,
and `min_samples`, the minimum number of points that
must be within the minimum distance for it to be
considered a cluster. It may also be run in automatic
mode by specifying `n_clusters` alongside
`min_samples`, where eps is decreased until the
desired number of clusters is obtained.
For more information on these algorithms, refer to the
documentation.
Parameters
----------
analytes : str
The analyte(s) that the filter applies to.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
normalise : bool
Whether or not to normalise the data to zero mean and unit
variance. Reccomended if clustering based on more than 1 analyte.
Uses `sklearn.preprocessing.scale`.
method : str
Which clustering algorithm to use (see above).
include_time : bool
Whether or not to include the Time variable in the clustering
analysis. Useful if you're looking for spatially continuous
clusters in your data, i.e. this will identify each spot in your
analysis as an individual cluster.
sort : bool, str or array-like
Whether or not to label the resulting clusters according to their
contents. If used, the cluster with the lowest values will be
labelled from 0, in order of increasing cluster mean value.analytes.
The sorting rules depend on the value of 'sort', which can be the name
of a single analyte (str), a list of several analyte names (array-like)
or True (bool), to specify all analytes used to calcualte the cluster.
min_data : int
The minimum number of data points that should be considered by
the filter. Default = 10.
**kwargs
Parameters passed to the clustering algorithm specified by
`method`.
Meanshift Parameters
--------------------
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K - Means Parameters
------------------
n_clusters : int
The number of clusters expected in the data.
DBSCAN Parameters
-----------------
eps : float
The minimum 'distance' points must be apart for them to be in the
same cluster. Defaults to 0.3. Note: If the data are normalised
(they should be for DBSCAN) this is in terms of total sample
variance. Normalised data have a mean of 0 and a variance of 1.
min_samples : int
The minimum number of samples within distance `eps` required
to be considered as an independent cluster.
n_clusters : int
The number of clusters expected. If specified, `eps` will be
incrementally reduced until the expected number of clusters is
found.
maxiter : int
The maximum number of iterations DBSCAN will run.
Returns
-------
None
"""
params = locals()
del(params['self'])
# convert string to list, if single analyte
if isinstance(analytes, str):
analytes = [analytes]
setn = self.filt.maxset + 1
# generate filter
vals = np.vstack(nominal_values(list(self.focus.values())))
if filt is not None:
ind = (self.filt.grab_filt(filt, analytes) &
np.apply_along_axis(all, 0, ~np.isnan(vals)))
else:
ind = np.apply_along_axis(all, 0, ~np.isnan(vals))
if sum(ind) > min_data:
# get indices for data passed to clustering
sampled = np.arange(self.Time.size)[ind]
# generate data for clustering
if include_time:
extra = self.Time
else:
extra = None
# get data as array
ds = stack_keys(self.focus, analytes, extra)
# apply filter, and get nominal values
ds = nominal_values(ds[ind, :])
if normalise | (len(analytes) > 1):
ds = preprocessing.scale(ds)
method_key = {'kmeans': clustering.cluster_kmeans,
# 'DBSCAN': clustering.cluster_DBSCAN,
'meanshift': clustering.cluster_meanshift}
cfun = method_key[method]
labels, core_samples_mask = cfun(ds, **kwargs)
# return labels, and if DBSCAN core_sample_mask
labels_unique = np.unique(labels)
# label the clusters according to their contents
if (sort is not None) & (sort is not False):
if isinstance(sort, str):
sort = [sort]
sanalytes = analytes
# make boolean filter to select analytes
if sort is True:
sortk = np.array([True] * len(sanalytes))
else:
sortk = np.array([s in sort for s in sanalytes])
# create per-point mean based on selected analytes.
sd = np.apply_along_axis(sum, 1, ds[:, sortk])
# calculate per-cluster means
avs = [np.nanmean(sd[labels == lab]) for lab in labels_unique]
# re-order the cluster labels based on their means
order = [x[0] for x in sorted(enumerate(avs), key=lambda x:x[1])]
sdict = dict(zip(order, labels_unique))
else:
sdict = dict(zip(labels_unique, labels_unique))
filts = {}
for ind, lab in sdict.items():
filts[lab] = labels == ind
# only applies to DBSCAN results.
if not all(np.isnan(core_samples_mask)):
filts['core'] = core_samples_mask
resized = {}
for k, v in filts.items():
resized[k] = np.zeros(self.Time.size, dtype=bool)
resized[k][sampled] = v
namebase = '-'.join(analytes) + '_cluster-' + method
info = '-'.join(analytes) + ' cluster filter.'
if method == 'DBSCAN':
for k, v in resized.items():
if isinstance(k, str):
name = namebase + '_core'
elif k < 0:
name = namebase + '_noise'
else:
name = namebase + '_{:.0f}'.format(k)
self.filt.add(name, v, info=info, params=params, setn=setn)
else:
for k, v in resized.items():
name = namebase + '_{:.0f}'.format(k)
self.filt.add(name, v, info=info, params=params, setn=setn)
else:
# if there are no data
name = '-'.join(analytes) + '_cluster-' + method + '_0'
info = '-'.join(analytes) + ' cluster filter failed.'
self.filt.add(name, np.zeros(self.Time.size, dtype=bool),
info=info, params=params, setn=setn)
return
|
[
"Applies",
"an",
"n",
"-",
"dimensional",
"clustering",
"filter",
"to",
"the",
"data",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L705-L912
|
[
"def",
"filter_clustering",
"(",
"self",
",",
"analytes",
",",
"filt",
"=",
"False",
",",
"normalise",
"=",
"True",
",",
"method",
"=",
"'meanshift'",
",",
"include_time",
"=",
"False",
",",
"sort",
"=",
"None",
",",
"min_data",
"=",
"10",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"locals",
"(",
")",
"del",
"(",
"params",
"[",
"'self'",
"]",
")",
"# convert string to list, if single analyte",
"if",
"isinstance",
"(",
"analytes",
",",
"str",
")",
":",
"analytes",
"=",
"[",
"analytes",
"]",
"setn",
"=",
"self",
".",
"filt",
".",
"maxset",
"+",
"1",
"# generate filter",
"vals",
"=",
"np",
".",
"vstack",
"(",
"nominal_values",
"(",
"list",
"(",
"self",
".",
"focus",
".",
"values",
"(",
")",
")",
")",
")",
"if",
"filt",
"is",
"not",
"None",
":",
"ind",
"=",
"(",
"self",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
",",
"analytes",
")",
"&",
"np",
".",
"apply_along_axis",
"(",
"all",
",",
"0",
",",
"~",
"np",
".",
"isnan",
"(",
"vals",
")",
")",
")",
"else",
":",
"ind",
"=",
"np",
".",
"apply_along_axis",
"(",
"all",
",",
"0",
",",
"~",
"np",
".",
"isnan",
"(",
"vals",
")",
")",
"if",
"sum",
"(",
"ind",
")",
">",
"min_data",
":",
"# get indices for data passed to clustering",
"sampled",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"Time",
".",
"size",
")",
"[",
"ind",
"]",
"# generate data for clustering",
"if",
"include_time",
":",
"extra",
"=",
"self",
".",
"Time",
"else",
":",
"extra",
"=",
"None",
"# get data as array",
"ds",
"=",
"stack_keys",
"(",
"self",
".",
"focus",
",",
"analytes",
",",
"extra",
")",
"# apply filter, and get nominal values",
"ds",
"=",
"nominal_values",
"(",
"ds",
"[",
"ind",
",",
":",
"]",
")",
"if",
"normalise",
"|",
"(",
"len",
"(",
"analytes",
")",
">",
"1",
")",
":",
"ds",
"=",
"preprocessing",
".",
"scale",
"(",
"ds",
")",
"method_key",
"=",
"{",
"'kmeans'",
":",
"clustering",
".",
"cluster_kmeans",
",",
"# 'DBSCAN': clustering.cluster_DBSCAN,",
"'meanshift'",
":",
"clustering",
".",
"cluster_meanshift",
"}",
"cfun",
"=",
"method_key",
"[",
"method",
"]",
"labels",
",",
"core_samples_mask",
"=",
"cfun",
"(",
"ds",
",",
"*",
"*",
"kwargs",
")",
"# return labels, and if DBSCAN core_sample_mask",
"labels_unique",
"=",
"np",
".",
"unique",
"(",
"labels",
")",
"# label the clusters according to their contents",
"if",
"(",
"sort",
"is",
"not",
"None",
")",
"&",
"(",
"sort",
"is",
"not",
"False",
")",
":",
"if",
"isinstance",
"(",
"sort",
",",
"str",
")",
":",
"sort",
"=",
"[",
"sort",
"]",
"sanalytes",
"=",
"analytes",
"# make boolean filter to select analytes",
"if",
"sort",
"is",
"True",
":",
"sortk",
"=",
"np",
".",
"array",
"(",
"[",
"True",
"]",
"*",
"len",
"(",
"sanalytes",
")",
")",
"else",
":",
"sortk",
"=",
"np",
".",
"array",
"(",
"[",
"s",
"in",
"sort",
"for",
"s",
"in",
"sanalytes",
"]",
")",
"# create per-point mean based on selected analytes.",
"sd",
"=",
"np",
".",
"apply_along_axis",
"(",
"sum",
",",
"1",
",",
"ds",
"[",
":",
",",
"sortk",
"]",
")",
"# calculate per-cluster means",
"avs",
"=",
"[",
"np",
".",
"nanmean",
"(",
"sd",
"[",
"labels",
"==",
"lab",
"]",
")",
"for",
"lab",
"in",
"labels_unique",
"]",
"# re-order the cluster labels based on their means",
"order",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"sorted",
"(",
"enumerate",
"(",
"avs",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
")",
"]",
"sdict",
"=",
"dict",
"(",
"zip",
"(",
"order",
",",
"labels_unique",
")",
")",
"else",
":",
"sdict",
"=",
"dict",
"(",
"zip",
"(",
"labels_unique",
",",
"labels_unique",
")",
")",
"filts",
"=",
"{",
"}",
"for",
"ind",
",",
"lab",
"in",
"sdict",
".",
"items",
"(",
")",
":",
"filts",
"[",
"lab",
"]",
"=",
"labels",
"==",
"ind",
"# only applies to DBSCAN results.",
"if",
"not",
"all",
"(",
"np",
".",
"isnan",
"(",
"core_samples_mask",
")",
")",
":",
"filts",
"[",
"'core'",
"]",
"=",
"core_samples_mask",
"resized",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"filts",
".",
"items",
"(",
")",
":",
"resized",
"[",
"k",
"]",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"Time",
".",
"size",
",",
"dtype",
"=",
"bool",
")",
"resized",
"[",
"k",
"]",
"[",
"sampled",
"]",
"=",
"v",
"namebase",
"=",
"'-'",
".",
"join",
"(",
"analytes",
")",
"+",
"'_cluster-'",
"+",
"method",
"info",
"=",
"'-'",
".",
"join",
"(",
"analytes",
")",
"+",
"' cluster filter.'",
"if",
"method",
"==",
"'DBSCAN'",
":",
"for",
"k",
",",
"v",
"in",
"resized",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"k",
",",
"str",
")",
":",
"name",
"=",
"namebase",
"+",
"'_core'",
"elif",
"k",
"<",
"0",
":",
"name",
"=",
"namebase",
"+",
"'_noise'",
"else",
":",
"name",
"=",
"namebase",
"+",
"'_{:.0f}'",
".",
"format",
"(",
"k",
")",
"self",
".",
"filt",
".",
"add",
"(",
"name",
",",
"v",
",",
"info",
"=",
"info",
",",
"params",
"=",
"params",
",",
"setn",
"=",
"setn",
")",
"else",
":",
"for",
"k",
",",
"v",
"in",
"resized",
".",
"items",
"(",
")",
":",
"name",
"=",
"namebase",
"+",
"'_{:.0f}'",
".",
"format",
"(",
"k",
")",
"self",
".",
"filt",
".",
"add",
"(",
"name",
",",
"v",
",",
"info",
"=",
"info",
",",
"params",
"=",
"params",
",",
"setn",
"=",
"setn",
")",
"else",
":",
"# if there are no data",
"name",
"=",
"'-'",
".",
"join",
"(",
"analytes",
")",
"+",
"'_cluster-'",
"+",
"method",
"+",
"'_0'",
"info",
"=",
"'-'",
".",
"join",
"(",
"analytes",
")",
"+",
"' cluster filter failed.'",
"self",
".",
"filt",
".",
"add",
"(",
"name",
",",
"np",
".",
"zeros",
"(",
"self",
".",
"Time",
".",
"size",
",",
"dtype",
"=",
"bool",
")",
",",
"info",
"=",
"info",
",",
"params",
"=",
"params",
",",
"setn",
"=",
"setn",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.calc_correlation
|
Calculate local correlation between two analytes.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None
|
latools/D_obj.py
|
def calc_correlation(self, x_analyte, y_analyte, window=15, filt=True, recalc=True):
"""
Calculate local correlation between two analytes.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None
"""
label = '{:}_{:}_{:.0f}'.format(x_analyte, y_analyte, window)
if label in self.correlations and not recalc:
return
# make window odd
if window % 2 != 1:
window += 1
# get filter
ind = self.filt.grab_filt(filt, [x_analyte, y_analyte])
x = nominal_values(self.focus[x_analyte])
x[~ind] = np.nan
xr = rolling_window(x, window, pad=np.nan)
y = nominal_values(self.focus[y_analyte])
y[~ind] = np.nan
yr = rolling_window(y, window, pad=np.nan)
r, p = zip(*map(nan_pearsonr, xr, yr))
r = np.array(r)
p = np.array(p)
# save correlation info
self.correlations[label] = r, p
return
|
def calc_correlation(self, x_analyte, y_analyte, window=15, filt=True, recalc=True):
"""
Calculate local correlation between two analytes.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None
"""
label = '{:}_{:}_{:.0f}'.format(x_analyte, y_analyte, window)
if label in self.correlations and not recalc:
return
# make window odd
if window % 2 != 1:
window += 1
# get filter
ind = self.filt.grab_filt(filt, [x_analyte, y_analyte])
x = nominal_values(self.focus[x_analyte])
x[~ind] = np.nan
xr = rolling_window(x, window, pad=np.nan)
y = nominal_values(self.focus[y_analyte])
y[~ind] = np.nan
yr = rolling_window(y, window, pad=np.nan)
r, p = zip(*map(nan_pearsonr, xr, yr))
r = np.array(r)
p = np.array(p)
# save correlation info
self.correlations[label] = r, p
return
|
[
"Calculate",
"local",
"correlation",
"between",
"two",
"analytes",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L915-L963
|
[
"def",
"calc_correlation",
"(",
"self",
",",
"x_analyte",
",",
"y_analyte",
",",
"window",
"=",
"15",
",",
"filt",
"=",
"True",
",",
"recalc",
"=",
"True",
")",
":",
"label",
"=",
"'{:}_{:}_{:.0f}'",
".",
"format",
"(",
"x_analyte",
",",
"y_analyte",
",",
"window",
")",
"if",
"label",
"in",
"self",
".",
"correlations",
"and",
"not",
"recalc",
":",
"return",
"# make window odd",
"if",
"window",
"%",
"2",
"!=",
"1",
":",
"window",
"+=",
"1",
"# get filter",
"ind",
"=",
"self",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
",",
"[",
"x_analyte",
",",
"y_analyte",
"]",
")",
"x",
"=",
"nominal_values",
"(",
"self",
".",
"focus",
"[",
"x_analyte",
"]",
")",
"x",
"[",
"~",
"ind",
"]",
"=",
"np",
".",
"nan",
"xr",
"=",
"rolling_window",
"(",
"x",
",",
"window",
",",
"pad",
"=",
"np",
".",
"nan",
")",
"y",
"=",
"nominal_values",
"(",
"self",
".",
"focus",
"[",
"y_analyte",
"]",
")",
"y",
"[",
"~",
"ind",
"]",
"=",
"np",
".",
"nan",
"yr",
"=",
"rolling_window",
"(",
"y",
",",
"window",
",",
"pad",
"=",
"np",
".",
"nan",
")",
"r",
",",
"p",
"=",
"zip",
"(",
"*",
"map",
"(",
"nan_pearsonr",
",",
"xr",
",",
"yr",
")",
")",
"r",
"=",
"np",
".",
"array",
"(",
"r",
")",
"p",
"=",
"np",
".",
"array",
"(",
"p",
")",
"# save correlation info",
"self",
".",
"correlations",
"[",
"label",
"]",
"=",
"r",
",",
"p",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.filter_correlation
|
Calculate correlation filter.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
r_threshold : float
The correlation index above which to exclude data.
Note: the absolute pearson R value is considered, so
negative correlations below -`r_threshold` will also
be excluded.
p_threshold : float
The significant level below which data are excluded.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None
|
latools/D_obj.py
|
def filter_correlation(self, x_analyte, y_analyte, window=15,
r_threshold=0.9, p_threshold=0.05, filt=True, recalc=False):
"""
Calculate correlation filter.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
r_threshold : float
The correlation index above which to exclude data.
Note: the absolute pearson R value is considered, so
negative correlations below -`r_threshold` will also
be excluded.
p_threshold : float
The significant level below which data are excluded.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None
"""
# make window odd
if window % 2 != 1:
window += 1
params = locals()
del(params['self'])
setn = self.filt.maxset + 1
label = '{:}_{:}_{:.0f}'.format(x_analyte, y_analyte, window)
self.calc_correlation(x_analyte, y_analyte, window, filt, recalc)
r, p = self.correlations[label]
cfilt = (abs(r) > r_threshold) & (p < p_threshold)
cfilt = ~cfilt
name = x_analyte + '_' + y_analyte + '_corr'
self.filt.add(name=name,
filt=cfilt,
info=(x_analyte + ' vs. ' + y_analyte +
' correlation filter.'),
params=params, setn=setn)
self.filt.off(filt=name)
self.filt.on(analyte=y_analyte, filt=name)
return
|
def filter_correlation(self, x_analyte, y_analyte, window=15,
r_threshold=0.9, p_threshold=0.05, filt=True, recalc=False):
"""
Calculate correlation filter.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
r_threshold : float
The correlation index above which to exclude data.
Note: the absolute pearson R value is considered, so
negative correlations below -`r_threshold` will also
be excluded.
p_threshold : float
The significant level below which data are excluded.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None
"""
# make window odd
if window % 2 != 1:
window += 1
params = locals()
del(params['self'])
setn = self.filt.maxset + 1
label = '{:}_{:}_{:.0f}'.format(x_analyte, y_analyte, window)
self.calc_correlation(x_analyte, y_analyte, window, filt, recalc)
r, p = self.correlations[label]
cfilt = (abs(r) > r_threshold) & (p < p_threshold)
cfilt = ~cfilt
name = x_analyte + '_' + y_analyte + '_corr'
self.filt.add(name=name,
filt=cfilt,
info=(x_analyte + ' vs. ' + y_analyte +
' correlation filter.'),
params=params, setn=setn)
self.filt.off(filt=name)
self.filt.on(analyte=y_analyte, filt=name)
return
|
[
"Calculate",
"correlation",
"filter",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L966-L1021
|
[
"def",
"filter_correlation",
"(",
"self",
",",
"x_analyte",
",",
"y_analyte",
",",
"window",
"=",
"15",
",",
"r_threshold",
"=",
"0.9",
",",
"p_threshold",
"=",
"0.05",
",",
"filt",
"=",
"True",
",",
"recalc",
"=",
"False",
")",
":",
"# make window odd",
"if",
"window",
"%",
"2",
"!=",
"1",
":",
"window",
"+=",
"1",
"params",
"=",
"locals",
"(",
")",
"del",
"(",
"params",
"[",
"'self'",
"]",
")",
"setn",
"=",
"self",
".",
"filt",
".",
"maxset",
"+",
"1",
"label",
"=",
"'{:}_{:}_{:.0f}'",
".",
"format",
"(",
"x_analyte",
",",
"y_analyte",
",",
"window",
")",
"self",
".",
"calc_correlation",
"(",
"x_analyte",
",",
"y_analyte",
",",
"window",
",",
"filt",
",",
"recalc",
")",
"r",
",",
"p",
"=",
"self",
".",
"correlations",
"[",
"label",
"]",
"cfilt",
"=",
"(",
"abs",
"(",
"r",
")",
">",
"r_threshold",
")",
"&",
"(",
"p",
"<",
"p_threshold",
")",
"cfilt",
"=",
"~",
"cfilt",
"name",
"=",
"x_analyte",
"+",
"'_'",
"+",
"y_analyte",
"+",
"'_corr'",
"self",
".",
"filt",
".",
"add",
"(",
"name",
"=",
"name",
",",
"filt",
"=",
"cfilt",
",",
"info",
"=",
"(",
"x_analyte",
"+",
"' vs. '",
"+",
"y_analyte",
"+",
"' correlation filter.'",
")",
",",
"params",
"=",
"params",
",",
"setn",
"=",
"setn",
")",
"self",
".",
"filt",
".",
"off",
"(",
"filt",
"=",
"name",
")",
"self",
".",
"filt",
".",
"on",
"(",
"analyte",
"=",
"y_analyte",
",",
"filt",
"=",
"name",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.correlation_plot
|
Plot the local correlation between two analytes.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
fig, axs : figure and axes objects
|
latools/D_obj.py
|
def correlation_plot(self, x_analyte, y_analyte, window=15, filt=True, recalc=False):
"""
Plot the local correlation between two analytes.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
fig, axs : figure and axes objects
"""
label = '{:}_{:}_{:.0f}'.format(x_analyte, y_analyte, window)
self.calc_correlation(x_analyte, y_analyte, window, filt, recalc)
r, p = self.correlations[label]
fig, axs = plt.subplots(3, 1, figsize=[7, 5], sharex=True)
# plot analytes
ax = axs[0]
ax.plot(self.Time, nominal_values(self.focus[x_analyte]), color=self.cmap[x_analyte], label=x_analyte)
ax.plot(self.Time, nominal_values(self.focus[y_analyte]), color=self.cmap[y_analyte], label=y_analyte)
ax.set_yscale('log')
ax.legend()
ax.set_ylabel('Signals')
# plot r
ax = axs[1]
ax.plot(self.Time, r)
ax.set_ylabel('Pearson R')
# plot p
ax = axs[2]
ax.plot(self.Time, p)
ax.set_ylabel('pignificance Level (p)')
fig.tight_layout()
return fig, axs
|
def correlation_plot(self, x_analyte, y_analyte, window=15, filt=True, recalc=False):
"""
Plot the local correlation between two analytes.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
fig, axs : figure and axes objects
"""
label = '{:}_{:}_{:.0f}'.format(x_analyte, y_analyte, window)
self.calc_correlation(x_analyte, y_analyte, window, filt, recalc)
r, p = self.correlations[label]
fig, axs = plt.subplots(3, 1, figsize=[7, 5], sharex=True)
# plot analytes
ax = axs[0]
ax.plot(self.Time, nominal_values(self.focus[x_analyte]), color=self.cmap[x_analyte], label=x_analyte)
ax.plot(self.Time, nominal_values(self.focus[y_analyte]), color=self.cmap[y_analyte], label=y_analyte)
ax.set_yscale('log')
ax.legend()
ax.set_ylabel('Signals')
# plot r
ax = axs[1]
ax.plot(self.Time, r)
ax.set_ylabel('Pearson R')
# plot p
ax = axs[2]
ax.plot(self.Time, p)
ax.set_ylabel('pignificance Level (p)')
fig.tight_layout()
return fig, axs
|
[
"Plot",
"the",
"local",
"correlation",
"between",
"two",
"analytes",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L1024-L1073
|
[
"def",
"correlation_plot",
"(",
"self",
",",
"x_analyte",
",",
"y_analyte",
",",
"window",
"=",
"15",
",",
"filt",
"=",
"True",
",",
"recalc",
"=",
"False",
")",
":",
"label",
"=",
"'{:}_{:}_{:.0f}'",
".",
"format",
"(",
"x_analyte",
",",
"y_analyte",
",",
"window",
")",
"self",
".",
"calc_correlation",
"(",
"x_analyte",
",",
"y_analyte",
",",
"window",
",",
"filt",
",",
"recalc",
")",
"r",
",",
"p",
"=",
"self",
".",
"correlations",
"[",
"label",
"]",
"fig",
",",
"axs",
"=",
"plt",
".",
"subplots",
"(",
"3",
",",
"1",
",",
"figsize",
"=",
"[",
"7",
",",
"5",
"]",
",",
"sharex",
"=",
"True",
")",
"# plot analytes",
"ax",
"=",
"axs",
"[",
"0",
"]",
"ax",
".",
"plot",
"(",
"self",
".",
"Time",
",",
"nominal_values",
"(",
"self",
".",
"focus",
"[",
"x_analyte",
"]",
")",
",",
"color",
"=",
"self",
".",
"cmap",
"[",
"x_analyte",
"]",
",",
"label",
"=",
"x_analyte",
")",
"ax",
".",
"plot",
"(",
"self",
".",
"Time",
",",
"nominal_values",
"(",
"self",
".",
"focus",
"[",
"y_analyte",
"]",
")",
",",
"color",
"=",
"self",
".",
"cmap",
"[",
"y_analyte",
"]",
",",
"label",
"=",
"y_analyte",
")",
"ax",
".",
"set_yscale",
"(",
"'log'",
")",
"ax",
".",
"legend",
"(",
")",
"ax",
".",
"set_ylabel",
"(",
"'Signals'",
")",
"# plot r",
"ax",
"=",
"axs",
"[",
"1",
"]",
"ax",
".",
"plot",
"(",
"self",
".",
"Time",
",",
"r",
")",
"ax",
".",
"set_ylabel",
"(",
"'Pearson R'",
")",
"# plot p",
"ax",
"=",
"axs",
"[",
"2",
"]",
"ax",
".",
"plot",
"(",
"self",
".",
"Time",
",",
"p",
")",
"ax",
".",
"set_ylabel",
"(",
"'pignificance Level (p)'",
")",
"fig",
".",
"tight_layout",
"(",
")",
"return",
"fig",
",",
"axs"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
test
|
D.filter_new
|
Make new filter from combination of other filters.
Parameters
----------
name : str
The name of the new filter. Should be unique.
filt_str : str
A logical combination of partial strings which will create
the new filter. For example, 'Albelow & Mnbelow' will combine
all filters that partially match 'Albelow' with those that
partially match 'Mnbelow' using the 'AND' logical operator.
Returns
-------
None
|
latools/D_obj.py
|
def filter_new(self, name, filt_str):
"""
Make new filter from combination of other filters.
Parameters
----------
name : str
The name of the new filter. Should be unique.
filt_str : str
A logical combination of partial strings which will create
the new filter. For example, 'Albelow & Mnbelow' will combine
all filters that partially match 'Albelow' with those that
partially match 'Mnbelow' using the 'AND' logical operator.
Returns
-------
None
"""
filt = self.filt.grab_filt(filt=filt_str)
self.filt.add(name, filt, info=filt_str)
return
|
def filter_new(self, name, filt_str):
"""
Make new filter from combination of other filters.
Parameters
----------
name : str
The name of the new filter. Should be unique.
filt_str : str
A logical combination of partial strings which will create
the new filter. For example, 'Albelow & Mnbelow' will combine
all filters that partially match 'Albelow' with those that
partially match 'Mnbelow' using the 'AND' logical operator.
Returns
-------
None
"""
filt = self.filt.grab_filt(filt=filt_str)
self.filt.add(name, filt, info=filt_str)
return
|
[
"Make",
"new",
"filter",
"from",
"combination",
"of",
"other",
"filters",
"."
] |
oscarbranson/latools
|
python
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L1076-L1096
|
[
"def",
"filter_new",
"(",
"self",
",",
"name",
",",
"filt_str",
")",
":",
"filt",
"=",
"self",
".",
"filt",
".",
"grab_filt",
"(",
"filt",
"=",
"filt_str",
")",
"self",
".",
"filt",
".",
"add",
"(",
"name",
",",
"filt",
",",
"info",
"=",
"filt_str",
")",
"return"
] |
cd25a650cfee318152f234d992708511f7047fbe
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.