id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
26,300 | tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder._id_to_subword | def _id_to_subword(self, subword_id):
"""Converts a subword integer ID to a subword string."""
if subword_id < 0 or subword_id >= (self.vocab_size - 1):
raise ValueError("Received id %d which is invalid. Ids must be within "
"[0, %d)." % (subword_id + 1, self.vocab_size))
if 0 <= subword_id < len(self._subwords):
# Subword
return self._subwords[subword_id]
else:
# Byte
offset = len(self._subwords)
subword_id -= offset
bytestr = bytes(bytearray([subword_id]))
return bytestr | python | def _id_to_subword(self, subword_id):
"""Converts a subword integer ID to a subword string."""
if subword_id < 0 or subword_id >= (self.vocab_size - 1):
raise ValueError("Received id %d which is invalid. Ids must be within "
"[0, %d)." % (subword_id + 1, self.vocab_size))
if 0 <= subword_id < len(self._subwords):
# Subword
return self._subwords[subword_id]
else:
# Byte
offset = len(self._subwords)
subword_id -= offset
bytestr = bytes(bytearray([subword_id]))
return bytestr | [
"def",
"_id_to_subword",
"(",
"self",
",",
"subword_id",
")",
":",
"if",
"subword_id",
"<",
"0",
"or",
"subword_id",
">=",
"(",
"self",
".",
"vocab_size",
"-",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"Received id %d which is invalid. Ids must be within \"",
"\"[0, %d).\"",
"%",
"(",
"subword_id",
"+",
"1",
",",
"self",
".",
"vocab_size",
")",
")",
"if",
"0",
"<=",
"subword_id",
"<",
"len",
"(",
"self",
".",
"_subwords",
")",
":",
"# Subword",
"return",
"self",
".",
"_subwords",
"[",
"subword_id",
"]",
"else",
":",
"# Byte",
"offset",
"=",
"len",
"(",
"self",
".",
"_subwords",
")",
"subword_id",
"-=",
"offset",
"bytestr",
"=",
"bytes",
"(",
"bytearray",
"(",
"[",
"subword_id",
"]",
")",
")",
"return",
"bytestr"
] | Converts a subword integer ID to a subword string. | [
"Converts",
"a",
"subword",
"integer",
"ID",
"to",
"a",
"subword",
"string",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L174-L188 |
26,301 | tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder._token_to_subwords | def _token_to_subwords(self, token):
"""Greedily split token into subwords."""
subwords = []
start = 0
while start < len(token):
subword = None
for end in range(
min(len(token), start + self._max_subword_len), start, -1):
candidate = token[start:end]
if (candidate in self._subword_to_id or
candidate == _UNDERSCORE_REPLACEMENT):
subword = candidate
subwords.append(subword)
start = end
break
# No subword match found. Consume a single (unicode) character.
if subword is None:
subwords.append(token[start])
start += 1
return subwords | python | def _token_to_subwords(self, token):
"""Greedily split token into subwords."""
subwords = []
start = 0
while start < len(token):
subword = None
for end in range(
min(len(token), start + self._max_subword_len), start, -1):
candidate = token[start:end]
if (candidate in self._subword_to_id or
candidate == _UNDERSCORE_REPLACEMENT):
subword = candidate
subwords.append(subword)
start = end
break
# No subword match found. Consume a single (unicode) character.
if subword is None:
subwords.append(token[start])
start += 1
return subwords | [
"def",
"_token_to_subwords",
"(",
"self",
",",
"token",
")",
":",
"subwords",
"=",
"[",
"]",
"start",
"=",
"0",
"while",
"start",
"<",
"len",
"(",
"token",
")",
":",
"subword",
"=",
"None",
"for",
"end",
"in",
"range",
"(",
"min",
"(",
"len",
"(",
"token",
")",
",",
"start",
"+",
"self",
".",
"_max_subword_len",
")",
",",
"start",
",",
"-",
"1",
")",
":",
"candidate",
"=",
"token",
"[",
"start",
":",
"end",
"]",
"if",
"(",
"candidate",
"in",
"self",
".",
"_subword_to_id",
"or",
"candidate",
"==",
"_UNDERSCORE_REPLACEMENT",
")",
":",
"subword",
"=",
"candidate",
"subwords",
".",
"append",
"(",
"subword",
")",
"start",
"=",
"end",
"break",
"# No subword match found. Consume a single (unicode) character.",
"if",
"subword",
"is",
"None",
":",
"subwords",
".",
"append",
"(",
"token",
"[",
"start",
"]",
")",
"start",
"+=",
"1",
"return",
"subwords"
] | Greedily split token into subwords. | [
"Greedily",
"split",
"token",
"into",
"subwords",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L190-L211 |
26,302 | tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder._init_from_list | def _init_from_list(self, subwords):
"""Initializes the encoder from a list of subwords."""
subwords = [tf.compat.as_text(s) for s in subwords if s]
self._subwords = subwords
# Note that internally everything is 0-indexed. Padding is dealt with at the
# end of encode and the beginning of decode.
self._subword_to_id = {s: i for i, s in enumerate(subwords)}
# We remember the maximum length of any subword to avoid having to
# check arbitrarily long strings.
self._max_subword_len = max(
len(_UNDERSCORE_REPLACEMENT), max([len(s) for s in subwords] or [1]))
# Initialize the cache
self._cache_size = 2**20
self._token_to_ids_cache = [(None, None)] * self._cache_size
# Setup tokenizer
# Reserved tokens are all tokens that are mixed alphanum and non-alphanum.
reserved_tokens = set([_UNDERSCORE_REPLACEMENT])
for t in self._subwords:
if text_encoder.is_mixed_alphanum(t):
reserved_tokens.add(t)
self._tokenizer = text_encoder.Tokenizer(
alphanum_only=False, reserved_tokens=reserved_tokens) | python | def _init_from_list(self, subwords):
"""Initializes the encoder from a list of subwords."""
subwords = [tf.compat.as_text(s) for s in subwords if s]
self._subwords = subwords
# Note that internally everything is 0-indexed. Padding is dealt with at the
# end of encode and the beginning of decode.
self._subword_to_id = {s: i for i, s in enumerate(subwords)}
# We remember the maximum length of any subword to avoid having to
# check arbitrarily long strings.
self._max_subword_len = max(
len(_UNDERSCORE_REPLACEMENT), max([len(s) for s in subwords] or [1]))
# Initialize the cache
self._cache_size = 2**20
self._token_to_ids_cache = [(None, None)] * self._cache_size
# Setup tokenizer
# Reserved tokens are all tokens that are mixed alphanum and non-alphanum.
reserved_tokens = set([_UNDERSCORE_REPLACEMENT])
for t in self._subwords:
if text_encoder.is_mixed_alphanum(t):
reserved_tokens.add(t)
self._tokenizer = text_encoder.Tokenizer(
alphanum_only=False, reserved_tokens=reserved_tokens) | [
"def",
"_init_from_list",
"(",
"self",
",",
"subwords",
")",
":",
"subwords",
"=",
"[",
"tf",
".",
"compat",
".",
"as_text",
"(",
"s",
")",
"for",
"s",
"in",
"subwords",
"if",
"s",
"]",
"self",
".",
"_subwords",
"=",
"subwords",
"# Note that internally everything is 0-indexed. Padding is dealt with at the",
"# end of encode and the beginning of decode.",
"self",
".",
"_subword_to_id",
"=",
"{",
"s",
":",
"i",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"subwords",
")",
"}",
"# We remember the maximum length of any subword to avoid having to",
"# check arbitrarily long strings.",
"self",
".",
"_max_subword_len",
"=",
"max",
"(",
"len",
"(",
"_UNDERSCORE_REPLACEMENT",
")",
",",
"max",
"(",
"[",
"len",
"(",
"s",
")",
"for",
"s",
"in",
"subwords",
"]",
"or",
"[",
"1",
"]",
")",
")",
"# Initialize the cache",
"self",
".",
"_cache_size",
"=",
"2",
"**",
"20",
"self",
".",
"_token_to_ids_cache",
"=",
"[",
"(",
"None",
",",
"None",
")",
"]",
"*",
"self",
".",
"_cache_size",
"# Setup tokenizer",
"# Reserved tokens are all tokens that are mixed alphanum and non-alphanum.",
"reserved_tokens",
"=",
"set",
"(",
"[",
"_UNDERSCORE_REPLACEMENT",
"]",
")",
"for",
"t",
"in",
"self",
".",
"_subwords",
":",
"if",
"text_encoder",
".",
"is_mixed_alphanum",
"(",
"t",
")",
":",
"reserved_tokens",
".",
"add",
"(",
"t",
")",
"self",
".",
"_tokenizer",
"=",
"text_encoder",
".",
"Tokenizer",
"(",
"alphanum_only",
"=",
"False",
",",
"reserved_tokens",
"=",
"reserved_tokens",
")"
] | Initializes the encoder from a list of subwords. | [
"Initializes",
"the",
"encoder",
"from",
"a",
"list",
"of",
"subwords",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L213-L237 |
26,303 | tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder.save_to_file | def save_to_file(self, filename_prefix):
"""Save the vocabulary to a file."""
# Wrap in single quotes to make it easier to see the full subword when
# it has spaces and make it easier to search with ctrl+f.
filename = self._filename(filename_prefix)
lines = ["'%s'" % s for s in self._subwords]
self._write_lines_to_file(filename, lines) | python | def save_to_file(self, filename_prefix):
"""Save the vocabulary to a file."""
# Wrap in single quotes to make it easier to see the full subword when
# it has spaces and make it easier to search with ctrl+f.
filename = self._filename(filename_prefix)
lines = ["'%s'" % s for s in self._subwords]
self._write_lines_to_file(filename, lines) | [
"def",
"save_to_file",
"(",
"self",
",",
"filename_prefix",
")",
":",
"# Wrap in single quotes to make it easier to see the full subword when",
"# it has spaces and make it easier to search with ctrl+f.",
"filename",
"=",
"self",
".",
"_filename",
"(",
"filename_prefix",
")",
"lines",
"=",
"[",
"\"'%s'\"",
"%",
"s",
"for",
"s",
"in",
"self",
".",
"_subwords",
"]",
"self",
".",
"_write_lines_to_file",
"(",
"filename",
",",
"lines",
")"
] | Save the vocabulary to a file. | [
"Save",
"the",
"vocabulary",
"to",
"a",
"file",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L243-L249 |
26,304 | tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder.load_from_file | def load_from_file(cls, filename_prefix):
"""Extracts list of subwords from file."""
filename = cls._filename(filename_prefix)
lines, _ = cls._read_lines_from_file(filename)
# Strip wrapping single quotes
vocab_list = [line[1:-1] for line in lines]
return cls(vocab_list=vocab_list) | python | def load_from_file(cls, filename_prefix):
"""Extracts list of subwords from file."""
filename = cls._filename(filename_prefix)
lines, _ = cls._read_lines_from_file(filename)
# Strip wrapping single quotes
vocab_list = [line[1:-1] for line in lines]
return cls(vocab_list=vocab_list) | [
"def",
"load_from_file",
"(",
"cls",
",",
"filename_prefix",
")",
":",
"filename",
"=",
"cls",
".",
"_filename",
"(",
"filename_prefix",
")",
"lines",
",",
"_",
"=",
"cls",
".",
"_read_lines_from_file",
"(",
"filename",
")",
"# Strip wrapping single quotes",
"vocab_list",
"=",
"[",
"line",
"[",
"1",
":",
"-",
"1",
"]",
"for",
"line",
"in",
"lines",
"]",
"return",
"cls",
"(",
"vocab_list",
"=",
"vocab_list",
")"
] | Extracts list of subwords from file. | [
"Extracts",
"list",
"of",
"subwords",
"from",
"file",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L252-L258 |
26,305 | tensorflow/datasets | tensorflow_datasets/core/features/text/subword_text_encoder.py | SubwordTextEncoder.build_from_corpus | def build_from_corpus(cls,
corpus_generator,
target_vocab_size,
max_subword_length=20,
max_corpus_chars=None,
reserved_tokens=None):
"""Builds a `SubwordTextEncoder` based on the `corpus_generator`.
Args:
corpus_generator: generator yielding `str`, from which subwords will be
constructed.
target_vocab_size: `int`, approximate size of the vocabulary to create.
max_subword_length: `int`, maximum length of a subword. Note that memory
and compute scale quadratically in the length of the longest token.
max_corpus_chars: `int`, the maximum number of characters to consume from
`corpus_generator` for the purposes of building the subword vocabulary.
reserved_tokens: `list<str>`, list of tokens that will always be treated
as whole tokens and not split up. Note that these must contain a mix of
alphanumeric and non-alphanumeric characters (e.g. "<EOS>") and not end
in an underscore.
Returns:
`SubwordTextEncoder`.
"""
reserved_tokens = reserved_tokens or []
_validate_build_arguments(
max_subword_length=max_subword_length,
reserved_tokens=reserved_tokens,
target_vocab_size=target_vocab_size)
token_counts = _token_counts_from_generator(
generator=corpus_generator,
max_chars=max_corpus_chars,
reserved_tokens=reserved_tokens)
# Binary search on the minimum token count to build a vocabulary with
# approximately the right size
def _binary_search(min_token_count, max_token_count):
"""Binary search min_token_count to build SubwordTextEncoder vocab."""
candidate_min = (min_token_count + max_token_count) // 2
logging.info("SubwordTextEncoder build: trying min_token_count %d",
candidate_min)
encoder = cls._build_from_token_counts(
token_counts=token_counts,
min_token_count=candidate_min,
reserved_tokens=reserved_tokens,
num_iterations=4,
max_subword_length=max_subword_length)
vocab_size = encoder.vocab_size
# Being within 1% of the target vocab size is ok
target_achieved = (
abs(vocab_size - target_vocab_size) * 100 < target_vocab_size)
if (target_achieved or min_token_count >= max_token_count or
candidate_min <= 1):
# Search complete
return encoder
# Recurse
if vocab_size > target_vocab_size:
next_encoder = _binary_search(candidate_min + 1, max_token_count)
else:
next_encoder = _binary_search(min_token_count, candidate_min - 1)
# Return the one that's closest to the target_vocab_size
if (abs(vocab_size - target_vocab_size) <
abs(next_encoder.vocab_size - target_vocab_size)):
return encoder
else:
return next_encoder
# Get min and max token counts.
min_token_count = max(min(token_counts.values()), 1)
max_token_count = max(token_counts.values())
# Another option could be to do a binary search over *ranks* of the tokens.
return _binary_search(min_token_count, max_token_count) | python | def build_from_corpus(cls,
corpus_generator,
target_vocab_size,
max_subword_length=20,
max_corpus_chars=None,
reserved_tokens=None):
"""Builds a `SubwordTextEncoder` based on the `corpus_generator`.
Args:
corpus_generator: generator yielding `str`, from which subwords will be
constructed.
target_vocab_size: `int`, approximate size of the vocabulary to create.
max_subword_length: `int`, maximum length of a subword. Note that memory
and compute scale quadratically in the length of the longest token.
max_corpus_chars: `int`, the maximum number of characters to consume from
`corpus_generator` for the purposes of building the subword vocabulary.
reserved_tokens: `list<str>`, list of tokens that will always be treated
as whole tokens and not split up. Note that these must contain a mix of
alphanumeric and non-alphanumeric characters (e.g. "<EOS>") and not end
in an underscore.
Returns:
`SubwordTextEncoder`.
"""
reserved_tokens = reserved_tokens or []
_validate_build_arguments(
max_subword_length=max_subword_length,
reserved_tokens=reserved_tokens,
target_vocab_size=target_vocab_size)
token_counts = _token_counts_from_generator(
generator=corpus_generator,
max_chars=max_corpus_chars,
reserved_tokens=reserved_tokens)
# Binary search on the minimum token count to build a vocabulary with
# approximately the right size
def _binary_search(min_token_count, max_token_count):
"""Binary search min_token_count to build SubwordTextEncoder vocab."""
candidate_min = (min_token_count + max_token_count) // 2
logging.info("SubwordTextEncoder build: trying min_token_count %d",
candidate_min)
encoder = cls._build_from_token_counts(
token_counts=token_counts,
min_token_count=candidate_min,
reserved_tokens=reserved_tokens,
num_iterations=4,
max_subword_length=max_subword_length)
vocab_size = encoder.vocab_size
# Being within 1% of the target vocab size is ok
target_achieved = (
abs(vocab_size - target_vocab_size) * 100 < target_vocab_size)
if (target_achieved or min_token_count >= max_token_count or
candidate_min <= 1):
# Search complete
return encoder
# Recurse
if vocab_size > target_vocab_size:
next_encoder = _binary_search(candidate_min + 1, max_token_count)
else:
next_encoder = _binary_search(min_token_count, candidate_min - 1)
# Return the one that's closest to the target_vocab_size
if (abs(vocab_size - target_vocab_size) <
abs(next_encoder.vocab_size - target_vocab_size)):
return encoder
else:
return next_encoder
# Get min and max token counts.
min_token_count = max(min(token_counts.values()), 1)
max_token_count = max(token_counts.values())
# Another option could be to do a binary search over *ranks* of the tokens.
return _binary_search(min_token_count, max_token_count) | [
"def",
"build_from_corpus",
"(",
"cls",
",",
"corpus_generator",
",",
"target_vocab_size",
",",
"max_subword_length",
"=",
"20",
",",
"max_corpus_chars",
"=",
"None",
",",
"reserved_tokens",
"=",
"None",
")",
":",
"reserved_tokens",
"=",
"reserved_tokens",
"or",
"[",
"]",
"_validate_build_arguments",
"(",
"max_subword_length",
"=",
"max_subword_length",
",",
"reserved_tokens",
"=",
"reserved_tokens",
",",
"target_vocab_size",
"=",
"target_vocab_size",
")",
"token_counts",
"=",
"_token_counts_from_generator",
"(",
"generator",
"=",
"corpus_generator",
",",
"max_chars",
"=",
"max_corpus_chars",
",",
"reserved_tokens",
"=",
"reserved_tokens",
")",
"# Binary search on the minimum token count to build a vocabulary with",
"# approximately the right size",
"def",
"_binary_search",
"(",
"min_token_count",
",",
"max_token_count",
")",
":",
"\"\"\"Binary search min_token_count to build SubwordTextEncoder vocab.\"\"\"",
"candidate_min",
"=",
"(",
"min_token_count",
"+",
"max_token_count",
")",
"//",
"2",
"logging",
".",
"info",
"(",
"\"SubwordTextEncoder build: trying min_token_count %d\"",
",",
"candidate_min",
")",
"encoder",
"=",
"cls",
".",
"_build_from_token_counts",
"(",
"token_counts",
"=",
"token_counts",
",",
"min_token_count",
"=",
"candidate_min",
",",
"reserved_tokens",
"=",
"reserved_tokens",
",",
"num_iterations",
"=",
"4",
",",
"max_subword_length",
"=",
"max_subword_length",
")",
"vocab_size",
"=",
"encoder",
".",
"vocab_size",
"# Being within 1% of the target vocab size is ok",
"target_achieved",
"=",
"(",
"abs",
"(",
"vocab_size",
"-",
"target_vocab_size",
")",
"*",
"100",
"<",
"target_vocab_size",
")",
"if",
"(",
"target_achieved",
"or",
"min_token_count",
">=",
"max_token_count",
"or",
"candidate_min",
"<=",
"1",
")",
":",
"# Search complete",
"return",
"encoder",
"# Recurse",
"if",
"vocab_size",
">",
"target_vocab_size",
":",
"next_encoder",
"=",
"_binary_search",
"(",
"candidate_min",
"+",
"1",
",",
"max_token_count",
")",
"else",
":",
"next_encoder",
"=",
"_binary_search",
"(",
"min_token_count",
",",
"candidate_min",
"-",
"1",
")",
"# Return the one that's closest to the target_vocab_size",
"if",
"(",
"abs",
"(",
"vocab_size",
"-",
"target_vocab_size",
")",
"<",
"abs",
"(",
"next_encoder",
".",
"vocab_size",
"-",
"target_vocab_size",
")",
")",
":",
"return",
"encoder",
"else",
":",
"return",
"next_encoder",
"# Get min and max token counts.",
"min_token_count",
"=",
"max",
"(",
"min",
"(",
"token_counts",
".",
"values",
"(",
")",
")",
",",
"1",
")",
"max_token_count",
"=",
"max",
"(",
"token_counts",
".",
"values",
"(",
")",
")",
"# Another option could be to do a binary search over *ranks* of the tokens.",
"return",
"_binary_search",
"(",
"min_token_count",
",",
"max_token_count",
")"
] | Builds a `SubwordTextEncoder` based on the `corpus_generator`.
Args:
corpus_generator: generator yielding `str`, from which subwords will be
constructed.
target_vocab_size: `int`, approximate size of the vocabulary to create.
max_subword_length: `int`, maximum length of a subword. Note that memory
and compute scale quadratically in the length of the longest token.
max_corpus_chars: `int`, the maximum number of characters to consume from
`corpus_generator` for the purposes of building the subword vocabulary.
reserved_tokens: `list<str>`, list of tokens that will always be treated
as whole tokens and not split up. Note that these must contain a mix of
alphanumeric and non-alphanumeric characters (e.g. "<EOS>") and not end
in an underscore.
Returns:
`SubwordTextEncoder`. | [
"Builds",
"a",
"SubwordTextEncoder",
"based",
"on",
"the",
"corpus_generator",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/subword_text_encoder.py#L261-L336 |
26,306 | tensorflow/datasets | tensorflow_datasets/structured/higgs.py | Higgs._generate_examples | def _generate_examples(self, file_path):
"""Generate features given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features, per row.
"""
fieldnames = [
'class_label', 'lepton_pT', 'lepton_eta', 'lepton_phi',
'missing_energy_magnitude', 'missing_energy_phi', 'jet_1_pt',
'jet_1_eta', 'jet_1_phi', 'jet_1_b-tag', 'jet_2_pt', 'jet_2_eta',
'jet_2_phi', 'jet_2_b-tag', 'jet_3_pt', 'jet_3_eta', 'jet_3_phi',
'jet_3_b-tag', 'jet_4_pt', 'jet_4_eta', 'jet_4_phi', 'jet_4_b-tag',
'm_jj', 'm_jjj', 'm_lv', 'm_jlv', 'm_bb', 'm_wbb', 'm_wwbb'
]
with tf.io.gfile.GFile(file_path) as csvfile:
reader = csv.DictReader(csvfile, fieldnames=fieldnames)
for row in reader:
yield row | python | def _generate_examples(self, file_path):
"""Generate features given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features, per row.
"""
fieldnames = [
'class_label', 'lepton_pT', 'lepton_eta', 'lepton_phi',
'missing_energy_magnitude', 'missing_energy_phi', 'jet_1_pt',
'jet_1_eta', 'jet_1_phi', 'jet_1_b-tag', 'jet_2_pt', 'jet_2_eta',
'jet_2_phi', 'jet_2_b-tag', 'jet_3_pt', 'jet_3_eta', 'jet_3_phi',
'jet_3_b-tag', 'jet_4_pt', 'jet_4_eta', 'jet_4_phi', 'jet_4_b-tag',
'm_jj', 'm_jjj', 'm_lv', 'm_jlv', 'm_bb', 'm_wbb', 'm_wwbb'
]
with tf.io.gfile.GFile(file_path) as csvfile:
reader = csv.DictReader(csvfile, fieldnames=fieldnames)
for row in reader:
yield row | [
"def",
"_generate_examples",
"(",
"self",
",",
"file_path",
")",
":",
"fieldnames",
"=",
"[",
"'class_label'",
",",
"'lepton_pT'",
",",
"'lepton_eta'",
",",
"'lepton_phi'",
",",
"'missing_energy_magnitude'",
",",
"'missing_energy_phi'",
",",
"'jet_1_pt'",
",",
"'jet_1_eta'",
",",
"'jet_1_phi'",
",",
"'jet_1_b-tag'",
",",
"'jet_2_pt'",
",",
"'jet_2_eta'",
",",
"'jet_2_phi'",
",",
"'jet_2_b-tag'",
",",
"'jet_3_pt'",
",",
"'jet_3_eta'",
",",
"'jet_3_phi'",
",",
"'jet_3_b-tag'",
",",
"'jet_4_pt'",
",",
"'jet_4_eta'",
",",
"'jet_4_phi'",
",",
"'jet_4_b-tag'",
",",
"'m_jj'",
",",
"'m_jjj'",
",",
"'m_lv'",
",",
"'m_jlv'",
",",
"'m_bb'",
",",
"'m_wbb'",
",",
"'m_wwbb'",
"]",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"file_path",
")",
"as",
"csvfile",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"csvfile",
",",
"fieldnames",
"=",
"fieldnames",
")",
"for",
"row",
"in",
"reader",
":",
"yield",
"row"
] | Generate features given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features, per row. | [
"Generate",
"features",
"given",
"the",
"directory",
"path",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/structured/higgs.py#L122-L144 |
26,307 | tensorflow/datasets | tensorflow_datasets/image/cats_vs_dogs.py | CatsVsDogs._generate_examples | def _generate_examples(self, archive):
"""Generate Cats vs Dogs images and labels given a directory path."""
num_skipped = 0
for fname, fobj in archive:
res = _NAME_RE.match(fname)
if not res: # README file, ...
continue
label = res.group(1).lower()
if tf.compat.as_bytes("JFIF") not in fobj.peek(10):
num_skipped += 1
continue
yield {
"image": fobj,
"image/filename": fname,
"label": label,
}
if num_skipped != _NUM_CORRUPT_IMAGES:
raise ValueError("Expected %d corrupt images, but found %d" % (
_NUM_CORRUPT_IMAGES, num_skipped))
logging.warning("%d images were corrupted and were skipped", num_skipped) | python | def _generate_examples(self, archive):
"""Generate Cats vs Dogs images and labels given a directory path."""
num_skipped = 0
for fname, fobj in archive:
res = _NAME_RE.match(fname)
if not res: # README file, ...
continue
label = res.group(1).lower()
if tf.compat.as_bytes("JFIF") not in fobj.peek(10):
num_skipped += 1
continue
yield {
"image": fobj,
"image/filename": fname,
"label": label,
}
if num_skipped != _NUM_CORRUPT_IMAGES:
raise ValueError("Expected %d corrupt images, but found %d" % (
_NUM_CORRUPT_IMAGES, num_skipped))
logging.warning("%d images were corrupted and were skipped", num_skipped) | [
"def",
"_generate_examples",
"(",
"self",
",",
"archive",
")",
":",
"num_skipped",
"=",
"0",
"for",
"fname",
",",
"fobj",
"in",
"archive",
":",
"res",
"=",
"_NAME_RE",
".",
"match",
"(",
"fname",
")",
"if",
"not",
"res",
":",
"# README file, ...",
"continue",
"label",
"=",
"res",
".",
"group",
"(",
"1",
")",
".",
"lower",
"(",
")",
"if",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"\"JFIF\"",
")",
"not",
"in",
"fobj",
".",
"peek",
"(",
"10",
")",
":",
"num_skipped",
"+=",
"1",
"continue",
"yield",
"{",
"\"image\"",
":",
"fobj",
",",
"\"image/filename\"",
":",
"fname",
",",
"\"label\"",
":",
"label",
",",
"}",
"if",
"num_skipped",
"!=",
"_NUM_CORRUPT_IMAGES",
":",
"raise",
"ValueError",
"(",
"\"Expected %d corrupt images, but found %d\"",
"%",
"(",
"_NUM_CORRUPT_IMAGES",
",",
"num_skipped",
")",
")",
"logging",
".",
"warning",
"(",
"\"%d images were corrupted and were skipped\"",
",",
"num_skipped",
")"
] | Generate Cats vs Dogs images and labels given a directory path. | [
"Generate",
"Cats",
"vs",
"Dogs",
"images",
"and",
"labels",
"given",
"a",
"directory",
"path",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cats_vs_dogs.py#L87-L107 |
26,308 | tensorflow/datasets | tensorflow_datasets/image/smallnorb.py | _load_chunk | def _load_chunk(dat_path, cat_path, info_path):
"""Loads a data chunk as specified by the paths.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Returns:
Tuple with the dat, cat, info_arrays.
"""
dat_array = read_binary_matrix(dat_path)
# Even if the image is gray scale, we need to add an extra channel dimension
# to be compatible with tfds.features.Image.
dat_array = np.expand_dims(dat_array, -1)
cat_array = read_binary_matrix(cat_path)
info_array = read_binary_matrix(info_path)
info_array = np.copy(info_array) # Make read-only buffer array writable.
# Azimuth values are 0, 2, 4, .., 34. We divide by 2 to get proper labels.
info_array[:, 2] = info_array[:, 2] / 2
return dat_array, cat_array, info_array | python | def _load_chunk(dat_path, cat_path, info_path):
"""Loads a data chunk as specified by the paths.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Returns:
Tuple with the dat, cat, info_arrays.
"""
dat_array = read_binary_matrix(dat_path)
# Even if the image is gray scale, we need to add an extra channel dimension
# to be compatible with tfds.features.Image.
dat_array = np.expand_dims(dat_array, -1)
cat_array = read_binary_matrix(cat_path)
info_array = read_binary_matrix(info_path)
info_array = np.copy(info_array) # Make read-only buffer array writable.
# Azimuth values are 0, 2, 4, .., 34. We divide by 2 to get proper labels.
info_array[:, 2] = info_array[:, 2] / 2
return dat_array, cat_array, info_array | [
"def",
"_load_chunk",
"(",
"dat_path",
",",
"cat_path",
",",
"info_path",
")",
":",
"dat_array",
"=",
"read_binary_matrix",
"(",
"dat_path",
")",
"# Even if the image is gray scale, we need to add an extra channel dimension",
"# to be compatible with tfds.features.Image.",
"dat_array",
"=",
"np",
".",
"expand_dims",
"(",
"dat_array",
",",
"-",
"1",
")",
"cat_array",
"=",
"read_binary_matrix",
"(",
"cat_path",
")",
"info_array",
"=",
"read_binary_matrix",
"(",
"info_path",
")",
"info_array",
"=",
"np",
".",
"copy",
"(",
"info_array",
")",
"# Make read-only buffer array writable.",
"# Azimuth values are 0, 2, 4, .., 34. We divide by 2 to get proper labels.",
"info_array",
"[",
":",
",",
"2",
"]",
"=",
"info_array",
"[",
":",
",",
"2",
"]",
"/",
"2",
"return",
"dat_array",
",",
"cat_array",
",",
"info_array"
] | Loads a data chunk as specified by the paths.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Returns:
Tuple with the dat, cat, info_arrays. | [
"Loads",
"a",
"data",
"chunk",
"as",
"specified",
"by",
"the",
"paths",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/smallnorb.py#L141-L164 |
26,309 | tensorflow/datasets | tensorflow_datasets/image/smallnorb.py | read_binary_matrix | def read_binary_matrix(filename):
"""Reads and returns binary formatted matrix stored in filename.
The file format is described on the data set page:
https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/
Args:
filename: String with path to the file.
Returns:
Numpy array contained in the file.
"""
with tf.io.gfile.GFile(filename, "rb") as f:
s = f.read()
# Data is stored in little-endian byte order.
int32_dtype = np.dtype("int32").newbyteorder("<")
# The first 4 bytes contain a magic code that specifies the data type.
magic = int(np.frombuffer(s, dtype=int32_dtype, count=1))
if magic == 507333717:
data_dtype = np.dtype("uint8") # uint8 does not have a byte order.
elif magic == 507333716:
data_dtype = np.dtype("int32").newbyteorder("<")
else:
raise ValueError("Invalid magic value for data type!")
# The second 4 bytes contain an int32 with the number of dimensions of the
# stored array.
ndim = int(np.frombuffer(s, dtype=int32_dtype, count=1, offset=4))
# The next ndim x 4 bytes contain the shape of the array in int32.
dims = np.frombuffer(s, dtype=int32_dtype, count=ndim, offset=8)
# If the array has less than three dimensions, three int32 are still used to
# save the shape info (remaining int32 are simply set to 1). The shape info
# hence uses max(3, ndim) bytes.
bytes_used_for_shape_info = max(3, ndim) * 4
# The remaining bytes are the array.
data = np.frombuffer(
s, dtype=data_dtype, offset=8 + bytes_used_for_shape_info)
return data.reshape(tuple(dims)) | python | def read_binary_matrix(filename):
"""Reads and returns binary formatted matrix stored in filename.
The file format is described on the data set page:
https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/
Args:
filename: String with path to the file.
Returns:
Numpy array contained in the file.
"""
with tf.io.gfile.GFile(filename, "rb") as f:
s = f.read()
# Data is stored in little-endian byte order.
int32_dtype = np.dtype("int32").newbyteorder("<")
# The first 4 bytes contain a magic code that specifies the data type.
magic = int(np.frombuffer(s, dtype=int32_dtype, count=1))
if magic == 507333717:
data_dtype = np.dtype("uint8") # uint8 does not have a byte order.
elif magic == 507333716:
data_dtype = np.dtype("int32").newbyteorder("<")
else:
raise ValueError("Invalid magic value for data type!")
# The second 4 bytes contain an int32 with the number of dimensions of the
# stored array.
ndim = int(np.frombuffer(s, dtype=int32_dtype, count=1, offset=4))
# The next ndim x 4 bytes contain the shape of the array in int32.
dims = np.frombuffer(s, dtype=int32_dtype, count=ndim, offset=8)
# If the array has less than three dimensions, three int32 are still used to
# save the shape info (remaining int32 are simply set to 1). The shape info
# hence uses max(3, ndim) bytes.
bytes_used_for_shape_info = max(3, ndim) * 4
# The remaining bytes are the array.
data = np.frombuffer(
s, dtype=data_dtype, offset=8 + bytes_used_for_shape_info)
return data.reshape(tuple(dims)) | [
"def",
"read_binary_matrix",
"(",
"filename",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"f",
":",
"s",
"=",
"f",
".",
"read",
"(",
")",
"# Data is stored in little-endian byte order.",
"int32_dtype",
"=",
"np",
".",
"dtype",
"(",
"\"int32\"",
")",
".",
"newbyteorder",
"(",
"\"<\"",
")",
"# The first 4 bytes contain a magic code that specifies the data type.",
"magic",
"=",
"int",
"(",
"np",
".",
"frombuffer",
"(",
"s",
",",
"dtype",
"=",
"int32_dtype",
",",
"count",
"=",
"1",
")",
")",
"if",
"magic",
"==",
"507333717",
":",
"data_dtype",
"=",
"np",
".",
"dtype",
"(",
"\"uint8\"",
")",
"# uint8 does not have a byte order.",
"elif",
"magic",
"==",
"507333716",
":",
"data_dtype",
"=",
"np",
".",
"dtype",
"(",
"\"int32\"",
")",
".",
"newbyteorder",
"(",
"\"<\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid magic value for data type!\"",
")",
"# The second 4 bytes contain an int32 with the number of dimensions of the",
"# stored array.",
"ndim",
"=",
"int",
"(",
"np",
".",
"frombuffer",
"(",
"s",
",",
"dtype",
"=",
"int32_dtype",
",",
"count",
"=",
"1",
",",
"offset",
"=",
"4",
")",
")",
"# The next ndim x 4 bytes contain the shape of the array in int32.",
"dims",
"=",
"np",
".",
"frombuffer",
"(",
"s",
",",
"dtype",
"=",
"int32_dtype",
",",
"count",
"=",
"ndim",
",",
"offset",
"=",
"8",
")",
"# If the array has less than three dimensions, three int32 are still used to",
"# save the shape info (remaining int32 are simply set to 1). The shape info",
"# hence uses max(3, ndim) bytes.",
"bytes_used_for_shape_info",
"=",
"max",
"(",
"3",
",",
"ndim",
")",
"*",
"4",
"# The remaining bytes are the array.",
"data",
"=",
"np",
".",
"frombuffer",
"(",
"s",
",",
"dtype",
"=",
"data_dtype",
",",
"offset",
"=",
"8",
"+",
"bytes_used_for_shape_info",
")",
"return",
"data",
".",
"reshape",
"(",
"tuple",
"(",
"dims",
")",
")"
] | Reads and returns binary formatted matrix stored in filename.
The file format is described on the data set page:
https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/
Args:
filename: String with path to the file.
Returns:
Numpy array contained in the file. | [
"Reads",
"and",
"returns",
"binary",
"formatted",
"matrix",
"stored",
"in",
"filename",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/smallnorb.py#L167-L209 |
26,310 | tensorflow/datasets | tensorflow_datasets/image/smallnorb.py | Smallnorb._generate_examples | def _generate_examples(self, dat_path, cat_path, info_path):
"""Generate examples for the Smallnorb dataset.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Yields:
Dictionaries with images and the different labels.
"""
dat_arr, cat_arr, info_arr = _load_chunk(dat_path, cat_path, info_path)
for image, category, info_vec in moves.zip(dat_arr, cat_arr, info_arr):
yield {
"image": image[0],
"image2": image[1],
"label_category": category,
"instance": info_vec[0],
"label_elevation": info_vec[1],
"label_azimuth": info_vec[2],
"label_lighting": info_vec[3],
} | python | def _generate_examples(self, dat_path, cat_path, info_path):
"""Generate examples for the Smallnorb dataset.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Yields:
Dictionaries with images and the different labels.
"""
dat_arr, cat_arr, info_arr = _load_chunk(dat_path, cat_path, info_path)
for image, category, info_vec in moves.zip(dat_arr, cat_arr, info_arr):
yield {
"image": image[0],
"image2": image[1],
"label_category": category,
"instance": info_vec[0],
"label_elevation": info_vec[1],
"label_azimuth": info_vec[2],
"label_lighting": info_vec[3],
} | [
"def",
"_generate_examples",
"(",
"self",
",",
"dat_path",
",",
"cat_path",
",",
"info_path",
")",
":",
"dat_arr",
",",
"cat_arr",
",",
"info_arr",
"=",
"_load_chunk",
"(",
"dat_path",
",",
"cat_path",
",",
"info_path",
")",
"for",
"image",
",",
"category",
",",
"info_vec",
"in",
"moves",
".",
"zip",
"(",
"dat_arr",
",",
"cat_arr",
",",
"info_arr",
")",
":",
"yield",
"{",
"\"image\"",
":",
"image",
"[",
"0",
"]",
",",
"\"image2\"",
":",
"image",
"[",
"1",
"]",
",",
"\"label_category\"",
":",
"category",
",",
"\"instance\"",
":",
"info_vec",
"[",
"0",
"]",
",",
"\"label_elevation\"",
":",
"info_vec",
"[",
"1",
"]",
",",
"\"label_azimuth\"",
":",
"info_vec",
"[",
"2",
"]",
",",
"\"label_lighting\"",
":",
"info_vec",
"[",
"3",
"]",
",",
"}"
] | Generate examples for the Smallnorb dataset.
Args:
dat_path: Path to dat file of the chunk.
cat_path: Path to cat file of the chunk.
info_path: Path to info file of the chunk.
Yields:
Dictionaries with images and the different labels. | [
"Generate",
"examples",
"for",
"the",
"Smallnorb",
"dataset",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/smallnorb.py#L116-L138 |
26,311 | tensorflow/datasets | tensorflow_datasets/core/dataset_utils.py | build_dataset | def build_dataset(instruction_dicts,
dataset_from_file_fn,
shuffle_files=False,
parallel_reads=64):
"""Constructs a `tf.data.Dataset` from TFRecord files.
Args:
instruction_dicts: `list` of {'filepath':, 'mask':, 'offset_mask':}
containing the information about which files and which examples to use.
The boolean mask will be repeated and zipped with the examples from
filepath.
dataset_from_file_fn: function returning a `tf.data.Dataset` given a
filename.
shuffle_files: `bool`, Whether to shuffle the input filenames.
parallel_reads: `int`, how many files to read in parallel.
Returns:
`tf.data.Dataset`
"""
# First case: All examples are taken (No value skipped)
if _no_examples_skipped(instruction_dicts):
# Only use the filenames as instruction
instruction_ds = tf.data.Dataset.from_tensor_slices([
d["filepath"] for d in instruction_dicts
])
build_ds_from_instruction = dataset_from_file_fn
# Second case: Use the instructions to read the examples
else:
instruction_ds = _build_instruction_ds(instruction_dicts)
build_ds_from_instruction = functools.partial(
_build_ds_from_instruction,
ds_from_file_fn=dataset_from_file_fn,
)
# If shuffle is True, we shuffle the instructions/shards
if shuffle_files:
instruction_ds = instruction_ds.shuffle(len(instruction_dicts))
# Use interleave to parallel read files and decode records
ds = instruction_ds.interleave(
build_ds_from_instruction,
cycle_length=parallel_reads,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return ds | python | def build_dataset(instruction_dicts,
dataset_from_file_fn,
shuffle_files=False,
parallel_reads=64):
"""Constructs a `tf.data.Dataset` from TFRecord files.
Args:
instruction_dicts: `list` of {'filepath':, 'mask':, 'offset_mask':}
containing the information about which files and which examples to use.
The boolean mask will be repeated and zipped with the examples from
filepath.
dataset_from_file_fn: function returning a `tf.data.Dataset` given a
filename.
shuffle_files: `bool`, Whether to shuffle the input filenames.
parallel_reads: `int`, how many files to read in parallel.
Returns:
`tf.data.Dataset`
"""
# First case: All examples are taken (No value skipped)
if _no_examples_skipped(instruction_dicts):
# Only use the filenames as instruction
instruction_ds = tf.data.Dataset.from_tensor_slices([
d["filepath"] for d in instruction_dicts
])
build_ds_from_instruction = dataset_from_file_fn
# Second case: Use the instructions to read the examples
else:
instruction_ds = _build_instruction_ds(instruction_dicts)
build_ds_from_instruction = functools.partial(
_build_ds_from_instruction,
ds_from_file_fn=dataset_from_file_fn,
)
# If shuffle is True, we shuffle the instructions/shards
if shuffle_files:
instruction_ds = instruction_ds.shuffle(len(instruction_dicts))
# Use interleave to parallel read files and decode records
ds = instruction_ds.interleave(
build_ds_from_instruction,
cycle_length=parallel_reads,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return ds | [
"def",
"build_dataset",
"(",
"instruction_dicts",
",",
"dataset_from_file_fn",
",",
"shuffle_files",
"=",
"False",
",",
"parallel_reads",
"=",
"64",
")",
":",
"# First case: All examples are taken (No value skipped)",
"if",
"_no_examples_skipped",
"(",
"instruction_dicts",
")",
":",
"# Only use the filenames as instruction",
"instruction_ds",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"[",
"d",
"[",
"\"filepath\"",
"]",
"for",
"d",
"in",
"instruction_dicts",
"]",
")",
"build_ds_from_instruction",
"=",
"dataset_from_file_fn",
"# Second case: Use the instructions to read the examples",
"else",
":",
"instruction_ds",
"=",
"_build_instruction_ds",
"(",
"instruction_dicts",
")",
"build_ds_from_instruction",
"=",
"functools",
".",
"partial",
"(",
"_build_ds_from_instruction",
",",
"ds_from_file_fn",
"=",
"dataset_from_file_fn",
",",
")",
"# If shuffle is True, we shuffle the instructions/shards",
"if",
"shuffle_files",
":",
"instruction_ds",
"=",
"instruction_ds",
".",
"shuffle",
"(",
"len",
"(",
"instruction_dicts",
")",
")",
"# Use interleave to parallel read files and decode records",
"ds",
"=",
"instruction_ds",
".",
"interleave",
"(",
"build_ds_from_instruction",
",",
"cycle_length",
"=",
"parallel_reads",
",",
"num_parallel_calls",
"=",
"tf",
".",
"data",
".",
"experimental",
".",
"AUTOTUNE",
")",
"return",
"ds"
] | Constructs a `tf.data.Dataset` from TFRecord files.
Args:
instruction_dicts: `list` of {'filepath':, 'mask':, 'offset_mask':}
containing the information about which files and which examples to use.
The boolean mask will be repeated and zipped with the examples from
filepath.
dataset_from_file_fn: function returning a `tf.data.Dataset` given a
filename.
shuffle_files: `bool`, Whether to shuffle the input filenames.
parallel_reads: `int`, how many files to read in parallel.
Returns:
`tf.data.Dataset` | [
"Constructs",
"a",
"tf",
".",
"data",
".",
"Dataset",
"from",
"TFRecord",
"files",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_utils.py#L32-L76 |
26,312 | tensorflow/datasets | tensorflow_datasets/core/dataset_utils.py | _build_instruction_ds | def _build_instruction_ds(instructions):
"""Create a dataset containing individual instruction for each shard.
Each instruction is a dict:
```
{
"filepath": tf.Tensor(shape=(), dtype=tf.string),
"mask_offset": tf.Tensor(shape=(), dtype=tf.int64),
"mask": tf.Tensor(shape=(100,), dtype=tf.bool),
}
```
Args:
instructions: `list[dict]`, the list of instruction dict
Returns:
instruction_ds: The dataset containing the instruction. The dataset size is
the number of shard.
"""
# Transpose the list[dict] into dict[list]
tensor_inputs = {
# offset_mask need to be converted to int64 explicitly
k: np.array(vals, dtype=np.int64) if k == "mask_offset" else list(vals)
for k, vals in utils.zip_dict(*instructions)
}
return tf.data.Dataset.from_tensor_slices(tensor_inputs) | python | def _build_instruction_ds(instructions):
"""Create a dataset containing individual instruction for each shard.
Each instruction is a dict:
```
{
"filepath": tf.Tensor(shape=(), dtype=tf.string),
"mask_offset": tf.Tensor(shape=(), dtype=tf.int64),
"mask": tf.Tensor(shape=(100,), dtype=tf.bool),
}
```
Args:
instructions: `list[dict]`, the list of instruction dict
Returns:
instruction_ds: The dataset containing the instruction. The dataset size is
the number of shard.
"""
# Transpose the list[dict] into dict[list]
tensor_inputs = {
# offset_mask need to be converted to int64 explicitly
k: np.array(vals, dtype=np.int64) if k == "mask_offset" else list(vals)
for k, vals in utils.zip_dict(*instructions)
}
return tf.data.Dataset.from_tensor_slices(tensor_inputs) | [
"def",
"_build_instruction_ds",
"(",
"instructions",
")",
":",
"# Transpose the list[dict] into dict[list]",
"tensor_inputs",
"=",
"{",
"# offset_mask need to be converted to int64 explicitly",
"k",
":",
"np",
".",
"array",
"(",
"vals",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"if",
"k",
"==",
"\"mask_offset\"",
"else",
"list",
"(",
"vals",
")",
"for",
"k",
",",
"vals",
"in",
"utils",
".",
"zip_dict",
"(",
"*",
"instructions",
")",
"}",
"return",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"tensor_inputs",
")"
] | Create a dataset containing individual instruction for each shard.
Each instruction is a dict:
```
{
"filepath": tf.Tensor(shape=(), dtype=tf.string),
"mask_offset": tf.Tensor(shape=(), dtype=tf.int64),
"mask": tf.Tensor(shape=(100,), dtype=tf.bool),
}
```
Args:
instructions: `list[dict]`, the list of instruction dict
Returns:
instruction_ds: The dataset containing the instruction. The dataset size is
the number of shard. | [
"Create",
"a",
"dataset",
"containing",
"individual",
"instruction",
"for",
"each",
"shard",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_utils.py#L84-L109 |
26,313 | tensorflow/datasets | tensorflow_datasets/core/dataset_utils.py | _build_mask_ds | def _build_mask_ds(mask, mask_offset):
"""Build the mask dataset to indicate which element to skip.
Args:
mask: `tf.Tensor`, binary mask to apply to all following elements. This
mask should have a length 100.
mask_offset: `tf.Tensor`, Integer specifying from how much the mask
should be shifted for the first element.
Returns:
mask_ds: `tf.data.Dataset`, a dataset returning False for examples to skip
and True for examples to keep.
"""
mask_ds = tf.data.Dataset.from_tensor_slices(mask)
mask_ds = mask_ds.repeat()
mask_ds = mask_ds.skip(mask_offset)
return mask_ds | python | def _build_mask_ds(mask, mask_offset):
"""Build the mask dataset to indicate which element to skip.
Args:
mask: `tf.Tensor`, binary mask to apply to all following elements. This
mask should have a length 100.
mask_offset: `tf.Tensor`, Integer specifying from how much the mask
should be shifted for the first element.
Returns:
mask_ds: `tf.data.Dataset`, a dataset returning False for examples to skip
and True for examples to keep.
"""
mask_ds = tf.data.Dataset.from_tensor_slices(mask)
mask_ds = mask_ds.repeat()
mask_ds = mask_ds.skip(mask_offset)
return mask_ds | [
"def",
"_build_mask_ds",
"(",
"mask",
",",
"mask_offset",
")",
":",
"mask_ds",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"mask",
")",
"mask_ds",
"=",
"mask_ds",
".",
"repeat",
"(",
")",
"mask_ds",
"=",
"mask_ds",
".",
"skip",
"(",
"mask_offset",
")",
"return",
"mask_ds"
] | Build the mask dataset to indicate which element to skip.
Args:
mask: `tf.Tensor`, binary mask to apply to all following elements. This
mask should have a length 100.
mask_offset: `tf.Tensor`, Integer specifying from how much the mask
should be shifted for the first element.
Returns:
mask_ds: `tf.data.Dataset`, a dataset returning False for examples to skip
and True for examples to keep. | [
"Build",
"the",
"mask",
"dataset",
"to",
"indicate",
"which",
"element",
"to",
"skip",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_utils.py#L112-L128 |
26,314 | tensorflow/datasets | tensorflow_datasets/core/dataset_utils.py | _build_ds_from_instruction | def _build_ds_from_instruction(instruction, ds_from_file_fn):
"""Map an instruction to a real datasets for one particular shard.
Args:
instruction: A `dict` of `tf.Tensor` containing the instruction to load
the particular shard (filename, mask,...)
ds_from_file_fn: `fct`, function which returns the dataset associated to
the filename
Returns:
dataset: `tf.data.Dataset`, The shard loaded from the instruction
"""
# Create the example and mask ds for this particular shard
examples_ds = ds_from_file_fn(instruction["filepath"])
mask_ds = _build_mask_ds(
mask_offset=instruction["mask_offset"],
mask=instruction["mask"],
)
# Zip the mask and real examples
ds = tf.data.Dataset.zip((examples_ds, mask_ds))
# Filter according to the mask (only keep True)
ds = ds.filter(lambda example, mask: mask)
# Only keep the examples
ds = ds.map(lambda example, mask: example)
return ds | python | def _build_ds_from_instruction(instruction, ds_from_file_fn):
"""Map an instruction to a real datasets for one particular shard.
Args:
instruction: A `dict` of `tf.Tensor` containing the instruction to load
the particular shard (filename, mask,...)
ds_from_file_fn: `fct`, function which returns the dataset associated to
the filename
Returns:
dataset: `tf.data.Dataset`, The shard loaded from the instruction
"""
# Create the example and mask ds for this particular shard
examples_ds = ds_from_file_fn(instruction["filepath"])
mask_ds = _build_mask_ds(
mask_offset=instruction["mask_offset"],
mask=instruction["mask"],
)
# Zip the mask and real examples
ds = tf.data.Dataset.zip((examples_ds, mask_ds))
# Filter according to the mask (only keep True)
ds = ds.filter(lambda example, mask: mask)
# Only keep the examples
ds = ds.map(lambda example, mask: example)
return ds | [
"def",
"_build_ds_from_instruction",
"(",
"instruction",
",",
"ds_from_file_fn",
")",
":",
"# Create the example and mask ds for this particular shard",
"examples_ds",
"=",
"ds_from_file_fn",
"(",
"instruction",
"[",
"\"filepath\"",
"]",
")",
"mask_ds",
"=",
"_build_mask_ds",
"(",
"mask_offset",
"=",
"instruction",
"[",
"\"mask_offset\"",
"]",
",",
"mask",
"=",
"instruction",
"[",
"\"mask\"",
"]",
",",
")",
"# Zip the mask and real examples",
"ds",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"zip",
"(",
"(",
"examples_ds",
",",
"mask_ds",
")",
")",
"# Filter according to the mask (only keep True)",
"ds",
"=",
"ds",
".",
"filter",
"(",
"lambda",
"example",
",",
"mask",
":",
"mask",
")",
"# Only keep the examples",
"ds",
"=",
"ds",
".",
"map",
"(",
"lambda",
"example",
",",
"mask",
":",
"example",
")",
"return",
"ds"
] | Map an instruction to a real datasets for one particular shard.
Args:
instruction: A `dict` of `tf.Tensor` containing the instruction to load
the particular shard (filename, mask,...)
ds_from_file_fn: `fct`, function which returns the dataset associated to
the filename
Returns:
dataset: `tf.data.Dataset`, The shard loaded from the instruction | [
"Map",
"an",
"instruction",
"to",
"a",
"real",
"datasets",
"for",
"one",
"particular",
"shard",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_utils.py#L131-L156 |
26,315 | tensorflow/datasets | tensorflow_datasets/core/dataset_utils.py | as_numpy | def as_numpy(dataset, graph=None):
"""Converts a `tf.data.Dataset` to an iterable of NumPy arrays.
`as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively.
Args:
dataset: a possibly nested structure of `tf.data.Dataset`s and/or
`tf.Tensor`s.
graph: `tf.Graph`, optional, explicitly set the graph to use.
Returns:
A structure matching `dataset` where `tf.data.Dataset`s are converted to
generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.
"""
nested_ds = dataset
del dataset
# Flatten
flat_ds = tf.nest.flatten(nested_ds)
flat_np = []
# Type check for Tensors and Datasets
for ds_el in flat_ds:
types = [type(el) for el in flat_ds]
types = tf.nest.pack_sequence_as(nested_ds, types)
if not (isinstance(ds_el, tf.Tensor) or tf_compat.is_dataset(ds_el)):
raise ValueError("Arguments to as_numpy must be tf.Tensors or "
"tf.data.Datasets. Got: %s" % types)
if tf.executing_eagerly():
# Eager mode
for ds_el in flat_ds:
if isinstance(ds_el, tf.Tensor):
np_el = ds_el.numpy()
elif tf_compat.is_dataset(ds_el):
np_el = _eager_dataset_iterator(ds_el)
else:
assert False
flat_np.append(np_el)
else:
# Graph mode
# First create iterators for datasets
with utils.maybe_with_graph(graph, create_if_none=False):
ds_iters = [
tf.compat.v1.data.make_one_shot_iterator(ds_el).get_next()
for ds_el in flat_ds if tf_compat.is_dataset(ds_el)
]
ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters]
# Then create numpy arrays for tensors
with utils.nogpu_session(graph) as sess: # Shared session for tf.Tensor
# Calling sess.run once so that randomness is shared.
np_arrays = sess.run([tensor for tensor in flat_ds
if not tf_compat.is_dataset(tensor)])
# Merge the dataset iterators and np arrays
iter_ds = iter(ds_iters)
iter_array = iter(np_arrays)
flat_np = [
next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array)
for ds_el in flat_ds
]
# Nest
return tf.nest.pack_sequence_as(nested_ds, flat_np) | python | def as_numpy(dataset, graph=None):
"""Converts a `tf.data.Dataset` to an iterable of NumPy arrays.
`as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively.
Args:
dataset: a possibly nested structure of `tf.data.Dataset`s and/or
`tf.Tensor`s.
graph: `tf.Graph`, optional, explicitly set the graph to use.
Returns:
A structure matching `dataset` where `tf.data.Dataset`s are converted to
generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.
"""
nested_ds = dataset
del dataset
# Flatten
flat_ds = tf.nest.flatten(nested_ds)
flat_np = []
# Type check for Tensors and Datasets
for ds_el in flat_ds:
types = [type(el) for el in flat_ds]
types = tf.nest.pack_sequence_as(nested_ds, types)
if not (isinstance(ds_el, tf.Tensor) or tf_compat.is_dataset(ds_el)):
raise ValueError("Arguments to as_numpy must be tf.Tensors or "
"tf.data.Datasets. Got: %s" % types)
if tf.executing_eagerly():
# Eager mode
for ds_el in flat_ds:
if isinstance(ds_el, tf.Tensor):
np_el = ds_el.numpy()
elif tf_compat.is_dataset(ds_el):
np_el = _eager_dataset_iterator(ds_el)
else:
assert False
flat_np.append(np_el)
else:
# Graph mode
# First create iterators for datasets
with utils.maybe_with_graph(graph, create_if_none=False):
ds_iters = [
tf.compat.v1.data.make_one_shot_iterator(ds_el).get_next()
for ds_el in flat_ds if tf_compat.is_dataset(ds_el)
]
ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters]
# Then create numpy arrays for tensors
with utils.nogpu_session(graph) as sess: # Shared session for tf.Tensor
# Calling sess.run once so that randomness is shared.
np_arrays = sess.run([tensor for tensor in flat_ds
if not tf_compat.is_dataset(tensor)])
# Merge the dataset iterators and np arrays
iter_ds = iter(ds_iters)
iter_array = iter(np_arrays)
flat_np = [
next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array)
for ds_el in flat_ds
]
# Nest
return tf.nest.pack_sequence_as(nested_ds, flat_np) | [
"def",
"as_numpy",
"(",
"dataset",
",",
"graph",
"=",
"None",
")",
":",
"nested_ds",
"=",
"dataset",
"del",
"dataset",
"# Flatten",
"flat_ds",
"=",
"tf",
".",
"nest",
".",
"flatten",
"(",
"nested_ds",
")",
"flat_np",
"=",
"[",
"]",
"# Type check for Tensors and Datasets",
"for",
"ds_el",
"in",
"flat_ds",
":",
"types",
"=",
"[",
"type",
"(",
"el",
")",
"for",
"el",
"in",
"flat_ds",
"]",
"types",
"=",
"tf",
".",
"nest",
".",
"pack_sequence_as",
"(",
"nested_ds",
",",
"types",
")",
"if",
"not",
"(",
"isinstance",
"(",
"ds_el",
",",
"tf",
".",
"Tensor",
")",
"or",
"tf_compat",
".",
"is_dataset",
"(",
"ds_el",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Arguments to as_numpy must be tf.Tensors or \"",
"\"tf.data.Datasets. Got: %s\"",
"%",
"types",
")",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"# Eager mode",
"for",
"ds_el",
"in",
"flat_ds",
":",
"if",
"isinstance",
"(",
"ds_el",
",",
"tf",
".",
"Tensor",
")",
":",
"np_el",
"=",
"ds_el",
".",
"numpy",
"(",
")",
"elif",
"tf_compat",
".",
"is_dataset",
"(",
"ds_el",
")",
":",
"np_el",
"=",
"_eager_dataset_iterator",
"(",
"ds_el",
")",
"else",
":",
"assert",
"False",
"flat_np",
".",
"append",
"(",
"np_el",
")",
"else",
":",
"# Graph mode",
"# First create iterators for datasets",
"with",
"utils",
".",
"maybe_with_graph",
"(",
"graph",
",",
"create_if_none",
"=",
"False",
")",
":",
"ds_iters",
"=",
"[",
"tf",
".",
"compat",
".",
"v1",
".",
"data",
".",
"make_one_shot_iterator",
"(",
"ds_el",
")",
".",
"get_next",
"(",
")",
"for",
"ds_el",
"in",
"flat_ds",
"if",
"tf_compat",
".",
"is_dataset",
"(",
"ds_el",
")",
"]",
"ds_iters",
"=",
"[",
"_graph_dataset_iterator",
"(",
"ds_iter",
",",
"graph",
")",
"for",
"ds_iter",
"in",
"ds_iters",
"]",
"# Then create numpy arrays for tensors",
"with",
"utils",
".",
"nogpu_session",
"(",
"graph",
")",
"as",
"sess",
":",
"# Shared session for tf.Tensor",
"# Calling sess.run once so that randomness is shared.",
"np_arrays",
"=",
"sess",
".",
"run",
"(",
"[",
"tensor",
"for",
"tensor",
"in",
"flat_ds",
"if",
"not",
"tf_compat",
".",
"is_dataset",
"(",
"tensor",
")",
"]",
")",
"# Merge the dataset iterators and np arrays",
"iter_ds",
"=",
"iter",
"(",
"ds_iters",
")",
"iter_array",
"=",
"iter",
"(",
"np_arrays",
")",
"flat_np",
"=",
"[",
"next",
"(",
"iter_ds",
")",
"if",
"tf_compat",
".",
"is_dataset",
"(",
"ds_el",
")",
"else",
"next",
"(",
"iter_array",
")",
"for",
"ds_el",
"in",
"flat_ds",
"]",
"# Nest",
"return",
"tf",
".",
"nest",
".",
"pack_sequence_as",
"(",
"nested_ds",
",",
"flat_np",
")"
] | Converts a `tf.data.Dataset` to an iterable of NumPy arrays.
`as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively.
Args:
dataset: a possibly nested structure of `tf.data.Dataset`s and/or
`tf.Tensor`s.
graph: `tf.Graph`, optional, explicitly set the graph to use.
Returns:
A structure matching `dataset` where `tf.data.Dataset`s are converted to
generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays. | [
"Converts",
"a",
"tf",
".",
"data",
".",
"Dataset",
"to",
"an",
"iterable",
"of",
"NumPy",
"arrays",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_utils.py#L176-L242 |
26,316 | tensorflow/datasets | tensorflow_datasets/image/shapes3d.py | _load_data | def _load_data(filepath):
"""Loads the images and latent values into Numpy arrays."""
with h5py.File(filepath, "r") as h5dataset:
image_array = np.array(h5dataset["images"])
# The 'label' data set in the hdf5 file actually contains the float values
# and not the class labels.
values_array = np.array(h5dataset["labels"])
return image_array, values_array | python | def _load_data(filepath):
"""Loads the images and latent values into Numpy arrays."""
with h5py.File(filepath, "r") as h5dataset:
image_array = np.array(h5dataset["images"])
# The 'label' data set in the hdf5 file actually contains the float values
# and not the class labels.
values_array = np.array(h5dataset["labels"])
return image_array, values_array | [
"def",
"_load_data",
"(",
"filepath",
")",
":",
"with",
"h5py",
".",
"File",
"(",
"filepath",
",",
"\"r\"",
")",
"as",
"h5dataset",
":",
"image_array",
"=",
"np",
".",
"array",
"(",
"h5dataset",
"[",
"\"images\"",
"]",
")",
"# The 'label' data set in the hdf5 file actually contains the float values",
"# and not the class labels.",
"values_array",
"=",
"np",
".",
"array",
"(",
"h5dataset",
"[",
"\"labels\"",
"]",
")",
"return",
"image_array",
",",
"values_array"
] | Loads the images and latent values into Numpy arrays. | [
"Loads",
"the",
"images",
"and",
"latent",
"values",
"into",
"Numpy",
"arrays",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/shapes3d.py#L151-L158 |
26,317 | tensorflow/datasets | tensorflow_datasets/image/shapes3d.py | _discretize | def _discretize(a):
"""Discretizes array values to class labels."""
arr = np.asarray(a)
index = np.argsort(arr)
inverse_index = np.zeros(arr.size, dtype=np.intp)
inverse_index[index] = np.arange(arr.size, dtype=np.intp)
arr = arr[index]
obs = np.r_[True, arr[1:] != arr[:-1]]
return obs.cumsum()[inverse_index] - 1 | python | def _discretize(a):
"""Discretizes array values to class labels."""
arr = np.asarray(a)
index = np.argsort(arr)
inverse_index = np.zeros(arr.size, dtype=np.intp)
inverse_index[index] = np.arange(arr.size, dtype=np.intp)
arr = arr[index]
obs = np.r_[True, arr[1:] != arr[:-1]]
return obs.cumsum()[inverse_index] - 1 | [
"def",
"_discretize",
"(",
"a",
")",
":",
"arr",
"=",
"np",
".",
"asarray",
"(",
"a",
")",
"index",
"=",
"np",
".",
"argsort",
"(",
"arr",
")",
"inverse_index",
"=",
"np",
".",
"zeros",
"(",
"arr",
".",
"size",
",",
"dtype",
"=",
"np",
".",
"intp",
")",
"inverse_index",
"[",
"index",
"]",
"=",
"np",
".",
"arange",
"(",
"arr",
".",
"size",
",",
"dtype",
"=",
"np",
".",
"intp",
")",
"arr",
"=",
"arr",
"[",
"index",
"]",
"obs",
"=",
"np",
".",
"r_",
"[",
"True",
",",
"arr",
"[",
"1",
":",
"]",
"!=",
"arr",
"[",
":",
"-",
"1",
"]",
"]",
"return",
"obs",
".",
"cumsum",
"(",
")",
"[",
"inverse_index",
"]",
"-",
"1"
] | Discretizes array values to class labels. | [
"Discretizes",
"array",
"values",
"to",
"class",
"labels",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/shapes3d.py#L163-L171 |
26,318 | tensorflow/datasets | tensorflow_datasets/image/shapes3d.py | Shapes3d._generate_examples | def _generate_examples(self, filepath):
"""Generate examples for the Shapes3d dataset.
Args:
filepath: path to the Shapes3d hdf5 file.
Yields:
Dictionaries with images and the different labels.
"""
# Simultaneously iterating through the different data sets in the hdf5
# file will be slow with a single file. Instead, we first load everything
# into memory before yielding the samples.
image_array, values_array = _load_data(filepath)
# We need to calculate the class labels from the float values in the file.
labels_array = np.zeros_like(values_array, dtype=np.int64)
for i in range(values_array.shape[1]):
labels_array[:, i] = _discretize(values_array[:, i]) # pylint: disable=unsupported-assignment-operation
for image, labels, values in moves.zip(image_array, labels_array,
values_array):
yield {
"image": image,
"label_floor_hue": labels[0],
"label_wall_hue": labels[1],
"label_object_hue": labels[2],
"label_scale": labels[3],
"label_shape": labels[4],
"label_orientation": labels[5],
"value_floor_hue": values[0],
"value_wall_hue": values[1],
"value_object_hue": values[2],
"value_scale": values[3],
"value_shape": values[4],
"value_orientation": values[5],
} | python | def _generate_examples(self, filepath):
"""Generate examples for the Shapes3d dataset.
Args:
filepath: path to the Shapes3d hdf5 file.
Yields:
Dictionaries with images and the different labels.
"""
# Simultaneously iterating through the different data sets in the hdf5
# file will be slow with a single file. Instead, we first load everything
# into memory before yielding the samples.
image_array, values_array = _load_data(filepath)
# We need to calculate the class labels from the float values in the file.
labels_array = np.zeros_like(values_array, dtype=np.int64)
for i in range(values_array.shape[1]):
labels_array[:, i] = _discretize(values_array[:, i]) # pylint: disable=unsupported-assignment-operation
for image, labels, values in moves.zip(image_array, labels_array,
values_array):
yield {
"image": image,
"label_floor_hue": labels[0],
"label_wall_hue": labels[1],
"label_object_hue": labels[2],
"label_scale": labels[3],
"label_shape": labels[4],
"label_orientation": labels[5],
"value_floor_hue": values[0],
"value_wall_hue": values[1],
"value_object_hue": values[2],
"value_scale": values[3],
"value_shape": values[4],
"value_orientation": values[5],
} | [
"def",
"_generate_examples",
"(",
"self",
",",
"filepath",
")",
":",
"# Simultaneously iterating through the different data sets in the hdf5",
"# file will be slow with a single file. Instead, we first load everything",
"# into memory before yielding the samples.",
"image_array",
",",
"values_array",
"=",
"_load_data",
"(",
"filepath",
")",
"# We need to calculate the class labels from the float values in the file.",
"labels_array",
"=",
"np",
".",
"zeros_like",
"(",
"values_array",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"for",
"i",
"in",
"range",
"(",
"values_array",
".",
"shape",
"[",
"1",
"]",
")",
":",
"labels_array",
"[",
":",
",",
"i",
"]",
"=",
"_discretize",
"(",
"values_array",
"[",
":",
",",
"i",
"]",
")",
"# pylint: disable=unsupported-assignment-operation",
"for",
"image",
",",
"labels",
",",
"values",
"in",
"moves",
".",
"zip",
"(",
"image_array",
",",
"labels_array",
",",
"values_array",
")",
":",
"yield",
"{",
"\"image\"",
":",
"image",
",",
"\"label_floor_hue\"",
":",
"labels",
"[",
"0",
"]",
",",
"\"label_wall_hue\"",
":",
"labels",
"[",
"1",
"]",
",",
"\"label_object_hue\"",
":",
"labels",
"[",
"2",
"]",
",",
"\"label_scale\"",
":",
"labels",
"[",
"3",
"]",
",",
"\"label_shape\"",
":",
"labels",
"[",
"4",
"]",
",",
"\"label_orientation\"",
":",
"labels",
"[",
"5",
"]",
",",
"\"value_floor_hue\"",
":",
"values",
"[",
"0",
"]",
",",
"\"value_wall_hue\"",
":",
"values",
"[",
"1",
"]",
",",
"\"value_object_hue\"",
":",
"values",
"[",
"2",
"]",
",",
"\"value_scale\"",
":",
"values",
"[",
"3",
"]",
",",
"\"value_shape\"",
":",
"values",
"[",
"4",
"]",
",",
"\"value_orientation\"",
":",
"values",
"[",
"5",
"]",
",",
"}"
] | Generate examples for the Shapes3d dataset.
Args:
filepath: path to the Shapes3d hdf5 file.
Yields:
Dictionaries with images and the different labels. | [
"Generate",
"examples",
"for",
"the",
"Shapes3d",
"dataset",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/shapes3d.py#L113-L148 |
26,319 | tensorflow/datasets | tensorflow_datasets/text/wikipedia.py | _parse_and_clean_wikicode | def _parse_and_clean_wikicode(raw_content):
"""Strips formatting and unwanted sections from raw page content."""
wikicode = tfds.core.lazy_imports.mwparserfromhell.parse(raw_content)
# Filters for references, tables, and file/image links.
re_rm_wikilink = re.compile(
"^(?:File|Image|Media):", flags=re.IGNORECASE | re.UNICODE)
def rm_wikilink(obj):
return bool(re_rm_wikilink.match(six.text_type(obj.title)))
def rm_tag(obj):
return six.text_type(obj.tag) in {"ref", "table"}
def rm_template(obj):
return obj.name.lower() in {
"reflist", "notelist", "notelist-ua", "notelist-lr", "notelist-ur",
"notelist-lg"}
def try_remove_obj(obj, section):
try:
section.remove(obj)
except ValueError:
# For unknown reasons, objects are sometimes not found.
pass
section_text = []
# Filter individual sections to clean.
for section in wikicode.get_sections(
flat=True, include_lead=True, include_headings=True):
for obj in section.ifilter_wikilinks(matches=rm_wikilink, recursive=True):
try_remove_obj(obj, section)
for obj in section.ifilter_templates(matches=rm_template, recursive=True):
try_remove_obj(obj, section)
for obj in section.ifilter_tags(matches=rm_tag, recursive=True):
try_remove_obj(obj, section)
section_text.append(section.strip_code().strip())
return "\n\n".join(section_text) | python | def _parse_and_clean_wikicode(raw_content):
"""Strips formatting and unwanted sections from raw page content."""
wikicode = tfds.core.lazy_imports.mwparserfromhell.parse(raw_content)
# Filters for references, tables, and file/image links.
re_rm_wikilink = re.compile(
"^(?:File|Image|Media):", flags=re.IGNORECASE | re.UNICODE)
def rm_wikilink(obj):
return bool(re_rm_wikilink.match(six.text_type(obj.title)))
def rm_tag(obj):
return six.text_type(obj.tag) in {"ref", "table"}
def rm_template(obj):
return obj.name.lower() in {
"reflist", "notelist", "notelist-ua", "notelist-lr", "notelist-ur",
"notelist-lg"}
def try_remove_obj(obj, section):
try:
section.remove(obj)
except ValueError:
# For unknown reasons, objects are sometimes not found.
pass
section_text = []
# Filter individual sections to clean.
for section in wikicode.get_sections(
flat=True, include_lead=True, include_headings=True):
for obj in section.ifilter_wikilinks(matches=rm_wikilink, recursive=True):
try_remove_obj(obj, section)
for obj in section.ifilter_templates(matches=rm_template, recursive=True):
try_remove_obj(obj, section)
for obj in section.ifilter_tags(matches=rm_tag, recursive=True):
try_remove_obj(obj, section)
section_text.append(section.strip_code().strip())
return "\n\n".join(section_text) | [
"def",
"_parse_and_clean_wikicode",
"(",
"raw_content",
")",
":",
"wikicode",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"mwparserfromhell",
".",
"parse",
"(",
"raw_content",
")",
"# Filters for references, tables, and file/image links.",
"re_rm_wikilink",
"=",
"re",
".",
"compile",
"(",
"\"^(?:File|Image|Media):\"",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
"|",
"re",
".",
"UNICODE",
")",
"def",
"rm_wikilink",
"(",
"obj",
")",
":",
"return",
"bool",
"(",
"re_rm_wikilink",
".",
"match",
"(",
"six",
".",
"text_type",
"(",
"obj",
".",
"title",
")",
")",
")",
"def",
"rm_tag",
"(",
"obj",
")",
":",
"return",
"six",
".",
"text_type",
"(",
"obj",
".",
"tag",
")",
"in",
"{",
"\"ref\"",
",",
"\"table\"",
"}",
"def",
"rm_template",
"(",
"obj",
")",
":",
"return",
"obj",
".",
"name",
".",
"lower",
"(",
")",
"in",
"{",
"\"reflist\"",
",",
"\"notelist\"",
",",
"\"notelist-ua\"",
",",
"\"notelist-lr\"",
",",
"\"notelist-ur\"",
",",
"\"notelist-lg\"",
"}",
"def",
"try_remove_obj",
"(",
"obj",
",",
"section",
")",
":",
"try",
":",
"section",
".",
"remove",
"(",
"obj",
")",
"except",
"ValueError",
":",
"# For unknown reasons, objects are sometimes not found.",
"pass",
"section_text",
"=",
"[",
"]",
"# Filter individual sections to clean.",
"for",
"section",
"in",
"wikicode",
".",
"get_sections",
"(",
"flat",
"=",
"True",
",",
"include_lead",
"=",
"True",
",",
"include_headings",
"=",
"True",
")",
":",
"for",
"obj",
"in",
"section",
".",
"ifilter_wikilinks",
"(",
"matches",
"=",
"rm_wikilink",
",",
"recursive",
"=",
"True",
")",
":",
"try_remove_obj",
"(",
"obj",
",",
"section",
")",
"for",
"obj",
"in",
"section",
".",
"ifilter_templates",
"(",
"matches",
"=",
"rm_template",
",",
"recursive",
"=",
"True",
")",
":",
"try_remove_obj",
"(",
"obj",
",",
"section",
")",
"for",
"obj",
"in",
"section",
".",
"ifilter_tags",
"(",
"matches",
"=",
"rm_tag",
",",
"recursive",
"=",
"True",
")",
":",
"try_remove_obj",
"(",
"obj",
",",
"section",
")",
"section_text",
".",
"append",
"(",
"section",
".",
"strip_code",
"(",
")",
".",
"strip",
"(",
")",
")",
"return",
"\"\\n\\n\"",
".",
"join",
"(",
"section_text",
")"
] | Strips formatting and unwanted sections from raw page content. | [
"Strips",
"formatting",
"and",
"unwanted",
"sections",
"from",
"raw",
"page",
"content",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/wikipedia.py#L234-L269 |
26,320 | tensorflow/datasets | tensorflow_datasets/scripts/download_and_prepare.py | download_and_prepare | def download_and_prepare(builder):
"""Generate data for a given dataset."""
print("download_and_prepare for dataset {}...".format(builder.info.full_name))
dl_config = download_config()
if isinstance(builder, tfds.core.BeamBasedBuilder):
beam = tfds.core.lazy_imports.apache_beam
# TODO(b/129149715): Restore compute stats. Currently skipped because not
# beam supported.
dl_config.compute_stats = tfds.download.ComputeStatsMode.SKIP
dl_config.beam_options = beam.options.pipeline_options.PipelineOptions()
builder.download_and_prepare(
download_dir=FLAGS.download_dir,
download_config=dl_config,
)
termcolor.cprint(str(builder.info.as_proto), attrs=["bold"])
if FLAGS.debug:
dataset = builder.as_dataset(split=tfds.Split.TRAIN)
pdb.set_trace()
del dataset | python | def download_and_prepare(builder):
"""Generate data for a given dataset."""
print("download_and_prepare for dataset {}...".format(builder.info.full_name))
dl_config = download_config()
if isinstance(builder, tfds.core.BeamBasedBuilder):
beam = tfds.core.lazy_imports.apache_beam
# TODO(b/129149715): Restore compute stats. Currently skipped because not
# beam supported.
dl_config.compute_stats = tfds.download.ComputeStatsMode.SKIP
dl_config.beam_options = beam.options.pipeline_options.PipelineOptions()
builder.download_and_prepare(
download_dir=FLAGS.download_dir,
download_config=dl_config,
)
termcolor.cprint(str(builder.info.as_proto), attrs=["bold"])
if FLAGS.debug:
dataset = builder.as_dataset(split=tfds.Split.TRAIN)
pdb.set_trace()
del dataset | [
"def",
"download_and_prepare",
"(",
"builder",
")",
":",
"print",
"(",
"\"download_and_prepare for dataset {}...\"",
".",
"format",
"(",
"builder",
".",
"info",
".",
"full_name",
")",
")",
"dl_config",
"=",
"download_config",
"(",
")",
"if",
"isinstance",
"(",
"builder",
",",
"tfds",
".",
"core",
".",
"BeamBasedBuilder",
")",
":",
"beam",
"=",
"tfds",
".",
"core",
".",
"lazy_imports",
".",
"apache_beam",
"# TODO(b/129149715): Restore compute stats. Currently skipped because not",
"# beam supported.",
"dl_config",
".",
"compute_stats",
"=",
"tfds",
".",
"download",
".",
"ComputeStatsMode",
".",
"SKIP",
"dl_config",
".",
"beam_options",
"=",
"beam",
".",
"options",
".",
"pipeline_options",
".",
"PipelineOptions",
"(",
")",
"builder",
".",
"download_and_prepare",
"(",
"download_dir",
"=",
"FLAGS",
".",
"download_dir",
",",
"download_config",
"=",
"dl_config",
",",
")",
"termcolor",
".",
"cprint",
"(",
"str",
"(",
"builder",
".",
"info",
".",
"as_proto",
")",
",",
"attrs",
"=",
"[",
"\"bold\"",
"]",
")",
"if",
"FLAGS",
".",
"debug",
":",
"dataset",
"=",
"builder",
".",
"as_dataset",
"(",
"split",
"=",
"tfds",
".",
"Split",
".",
"TRAIN",
")",
"pdb",
".",
"set_trace",
"(",
")",
"del",
"dataset"
] | Generate data for a given dataset. | [
"Generate",
"data",
"for",
"a",
"given",
"dataset",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/scripts/download_and_prepare.py#L113-L135 |
26,321 | tensorflow/datasets | tensorflow_datasets/image/cifar.py | Cifar10._generate_examples | def _generate_examples(self, filepaths):
"""Generate CIFAR examples as dicts.
Shared across CIFAR-{10, 100}. Uses self._cifar_info as
configuration.
Args:
filepaths (list[str]): The files to use to generate the data.
Yields:
The cifar examples, as defined in the dataset info features.
"""
label_keys = self._cifar_info.label_keys
for path in filepaths:
for labels, np_image in _load_data(path, len(label_keys)):
row = dict(zip(label_keys, labels))
row["image"] = np_image
yield row | python | def _generate_examples(self, filepaths):
"""Generate CIFAR examples as dicts.
Shared across CIFAR-{10, 100}. Uses self._cifar_info as
configuration.
Args:
filepaths (list[str]): The files to use to generate the data.
Yields:
The cifar examples, as defined in the dataset info features.
"""
label_keys = self._cifar_info.label_keys
for path in filepaths:
for labels, np_image in _load_data(path, len(label_keys)):
row = dict(zip(label_keys, labels))
row["image"] = np_image
yield row | [
"def",
"_generate_examples",
"(",
"self",
",",
"filepaths",
")",
":",
"label_keys",
"=",
"self",
".",
"_cifar_info",
".",
"label_keys",
"for",
"path",
"in",
"filepaths",
":",
"for",
"labels",
",",
"np_image",
"in",
"_load_data",
"(",
"path",
",",
"len",
"(",
"label_keys",
")",
")",
":",
"row",
"=",
"dict",
"(",
"zip",
"(",
"label_keys",
",",
"labels",
")",
")",
"row",
"[",
"\"image\"",
"]",
"=",
"np_image",
"yield",
"row"
] | Generate CIFAR examples as dicts.
Shared across CIFAR-{10, 100}. Uses self._cifar_info as
configuration.
Args:
filepaths (list[str]): The files to use to generate the data.
Yields:
The cifar examples, as defined in the dataset info features. | [
"Generate",
"CIFAR",
"examples",
"as",
"dicts",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cifar.py#L110-L127 |
26,322 | tensorflow/datasets | tensorflow_datasets/core/api_utils.py | disallow_positional_args | def disallow_positional_args(wrapped=None, allowed=None):
"""Requires function to be called using keyword arguments."""
# See
# https://wrapt.readthedocs.io/en/latest/decorators.html#decorators-with-optional-arguments
# for decorator pattern.
if wrapped is None:
return functools.partial(disallow_positional_args, allowed=allowed)
@wrapt.decorator
def disallow_positional_args_dec(fn, instance, args, kwargs):
ismethod = instance is not None
_check_no_positional(fn, args, ismethod, allowed=allowed)
_check_required(fn, kwargs)
return fn(*args, **kwargs)
return disallow_positional_args_dec(wrapped) | python | def disallow_positional_args(wrapped=None, allowed=None):
"""Requires function to be called using keyword arguments."""
# See
# https://wrapt.readthedocs.io/en/latest/decorators.html#decorators-with-optional-arguments
# for decorator pattern.
if wrapped is None:
return functools.partial(disallow_positional_args, allowed=allowed)
@wrapt.decorator
def disallow_positional_args_dec(fn, instance, args, kwargs):
ismethod = instance is not None
_check_no_positional(fn, args, ismethod, allowed=allowed)
_check_required(fn, kwargs)
return fn(*args, **kwargs)
return disallow_positional_args_dec(wrapped) | [
"def",
"disallow_positional_args",
"(",
"wrapped",
"=",
"None",
",",
"allowed",
"=",
"None",
")",
":",
"# See",
"# https://wrapt.readthedocs.io/en/latest/decorators.html#decorators-with-optional-arguments",
"# for decorator pattern.",
"if",
"wrapped",
"is",
"None",
":",
"return",
"functools",
".",
"partial",
"(",
"disallow_positional_args",
",",
"allowed",
"=",
"allowed",
")",
"@",
"wrapt",
".",
"decorator",
"def",
"disallow_positional_args_dec",
"(",
"fn",
",",
"instance",
",",
"args",
",",
"kwargs",
")",
":",
"ismethod",
"=",
"instance",
"is",
"not",
"None",
"_check_no_positional",
"(",
"fn",
",",
"args",
",",
"ismethod",
",",
"allowed",
"=",
"allowed",
")",
"_check_required",
"(",
"fn",
",",
"kwargs",
")",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"disallow_positional_args_dec",
"(",
"wrapped",
")"
] | Requires function to be called using keyword arguments. | [
"Requires",
"function",
"to",
"be",
"called",
"using",
"keyword",
"arguments",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/api_utils.py#L39-L54 |
26,323 | tensorflow/datasets | tensorflow_datasets/core/api_utils.py | _required_args | def _required_args(fn):
"""Returns arguments of fn with default=REQUIRED_ARG."""
spec = getargspec(fn)
if not spec.defaults:
return []
arg_names = spec.args[-len(spec.defaults):]
return [name for name, val in zip(arg_names, spec.defaults)
if val is REQUIRED_ARG] | python | def _required_args(fn):
"""Returns arguments of fn with default=REQUIRED_ARG."""
spec = getargspec(fn)
if not spec.defaults:
return []
arg_names = spec.args[-len(spec.defaults):]
return [name for name, val in zip(arg_names, spec.defaults)
if val is REQUIRED_ARG] | [
"def",
"_required_args",
"(",
"fn",
")",
":",
"spec",
"=",
"getargspec",
"(",
"fn",
")",
"if",
"not",
"spec",
".",
"defaults",
":",
"return",
"[",
"]",
"arg_names",
"=",
"spec",
".",
"args",
"[",
"-",
"len",
"(",
"spec",
".",
"defaults",
")",
":",
"]",
"return",
"[",
"name",
"for",
"name",
",",
"val",
"in",
"zip",
"(",
"arg_names",
",",
"spec",
".",
"defaults",
")",
"if",
"val",
"is",
"REQUIRED_ARG",
"]"
] | Returns arguments of fn with default=REQUIRED_ARG. | [
"Returns",
"arguments",
"of",
"fn",
"with",
"default",
"=",
"REQUIRED_ARG",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/api_utils.py#L67-L75 |
26,324 | tensorflow/datasets | tensorflow_datasets/core/utils/gcs_utils.py | download_gcs_file | def download_gcs_file(path, out_fname=None, prefix_filter=None):
"""Download a file from GCS, optionally to a file."""
url = posixpath.join(GCS_BUCKET, path)
if prefix_filter:
url += "?prefix=%s" % prefix_filter
stream = bool(out_fname)
resp = requests.get(url, stream=stream)
if not resp.ok:
raise ValueError("GCS bucket inaccessible")
if out_fname:
with tf.io.gfile.GFile(out_fname, "wb") as f:
for chunk in resp.iter_content(1024):
f.write(chunk)
else:
return resp.content | python | def download_gcs_file(path, out_fname=None, prefix_filter=None):
"""Download a file from GCS, optionally to a file."""
url = posixpath.join(GCS_BUCKET, path)
if prefix_filter:
url += "?prefix=%s" % prefix_filter
stream = bool(out_fname)
resp = requests.get(url, stream=stream)
if not resp.ok:
raise ValueError("GCS bucket inaccessible")
if out_fname:
with tf.io.gfile.GFile(out_fname, "wb") as f:
for chunk in resp.iter_content(1024):
f.write(chunk)
else:
return resp.content | [
"def",
"download_gcs_file",
"(",
"path",
",",
"out_fname",
"=",
"None",
",",
"prefix_filter",
"=",
"None",
")",
":",
"url",
"=",
"posixpath",
".",
"join",
"(",
"GCS_BUCKET",
",",
"path",
")",
"if",
"prefix_filter",
":",
"url",
"+=",
"\"?prefix=%s\"",
"%",
"prefix_filter",
"stream",
"=",
"bool",
"(",
"out_fname",
")",
"resp",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"stream",
")",
"if",
"not",
"resp",
".",
"ok",
":",
"raise",
"ValueError",
"(",
"\"GCS bucket inaccessible\"",
")",
"if",
"out_fname",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"out_fname",
",",
"\"wb\"",
")",
"as",
"f",
":",
"for",
"chunk",
"in",
"resp",
".",
"iter_content",
"(",
"1024",
")",
":",
"f",
".",
"write",
"(",
"chunk",
")",
"else",
":",
"return",
"resp",
".",
"content"
] | Download a file from GCS, optionally to a file. | [
"Download",
"a",
"file",
"from",
"GCS",
"optionally",
"to",
"a",
"file",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/gcs_utils.py#L34-L48 |
26,325 | tensorflow/datasets | tensorflow_datasets/core/utils/gcs_utils.py | gcs_files | def gcs_files(prefix_filter=None):
"""List all files in GCS bucket."""
top_level_xml_str = download_gcs_file("", prefix_filter=prefix_filter)
xml_root = ElementTree.fromstring(top_level_xml_str)
filenames = [el[0].text for el in xml_root if el.tag.endswith("Contents")]
return filenames | python | def gcs_files(prefix_filter=None):
"""List all files in GCS bucket."""
top_level_xml_str = download_gcs_file("", prefix_filter=prefix_filter)
xml_root = ElementTree.fromstring(top_level_xml_str)
filenames = [el[0].text for el in xml_root if el.tag.endswith("Contents")]
return filenames | [
"def",
"gcs_files",
"(",
"prefix_filter",
"=",
"None",
")",
":",
"top_level_xml_str",
"=",
"download_gcs_file",
"(",
"\"\"",
",",
"prefix_filter",
"=",
"prefix_filter",
")",
"xml_root",
"=",
"ElementTree",
".",
"fromstring",
"(",
"top_level_xml_str",
")",
"filenames",
"=",
"[",
"el",
"[",
"0",
"]",
".",
"text",
"for",
"el",
"in",
"xml_root",
"if",
"el",
".",
"tag",
".",
"endswith",
"(",
"\"Contents\"",
")",
"]",
"return",
"filenames"
] | List all files in GCS bucket. | [
"List",
"all",
"files",
"in",
"GCS",
"bucket",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/gcs_utils.py#L52-L57 |
26,326 | tensorflow/datasets | tensorflow_datasets/core/utils/gcs_utils.py | gcs_dataset_info_files | def gcs_dataset_info_files(dataset_dir):
"""Return paths to GCS files in the given dataset directory."""
prefix = posixpath.join(GCS_DATASET_INFO_DIR, dataset_dir, "")
# Filter for this dataset
filenames = [el for el in gcs_files(prefix_filter=prefix)
if el.startswith(prefix) and len(el) > len(prefix)]
return filenames | python | def gcs_dataset_info_files(dataset_dir):
"""Return paths to GCS files in the given dataset directory."""
prefix = posixpath.join(GCS_DATASET_INFO_DIR, dataset_dir, "")
# Filter for this dataset
filenames = [el for el in gcs_files(prefix_filter=prefix)
if el.startswith(prefix) and len(el) > len(prefix)]
return filenames | [
"def",
"gcs_dataset_info_files",
"(",
"dataset_dir",
")",
":",
"prefix",
"=",
"posixpath",
".",
"join",
"(",
"GCS_DATASET_INFO_DIR",
",",
"dataset_dir",
",",
"\"\"",
")",
"# Filter for this dataset",
"filenames",
"=",
"[",
"el",
"for",
"el",
"in",
"gcs_files",
"(",
"prefix_filter",
"=",
"prefix",
")",
"if",
"el",
".",
"startswith",
"(",
"prefix",
")",
"and",
"len",
"(",
"el",
")",
">",
"len",
"(",
"prefix",
")",
"]",
"return",
"filenames"
] | Return paths to GCS files in the given dataset directory. | [
"Return",
"paths",
"to",
"GCS",
"files",
"in",
"the",
"given",
"dataset",
"directory",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/gcs_utils.py#L60-L66 |
26,327 | tensorflow/datasets | tensorflow_datasets/core/download/kaggle.py | _run_kaggle_command | def _run_kaggle_command(command_args, competition_name):
"""Run kaggle command with subprocess."""
try:
output = sp.check_output(command_args)
return tf.compat.as_text(output)
except sp.CalledProcessError as err:
output = err.output
_log_command_output(output, error=True)
if output.startswith(b"404"):
logging.error(_NOT_FOUND_ERR_MSG, competition_name)
raise
logging.error(_ERR_MSG, competition_name)
raise | python | def _run_kaggle_command(command_args, competition_name):
"""Run kaggle command with subprocess."""
try:
output = sp.check_output(command_args)
return tf.compat.as_text(output)
except sp.CalledProcessError as err:
output = err.output
_log_command_output(output, error=True)
if output.startswith(b"404"):
logging.error(_NOT_FOUND_ERR_MSG, competition_name)
raise
logging.error(_ERR_MSG, competition_name)
raise | [
"def",
"_run_kaggle_command",
"(",
"command_args",
",",
"competition_name",
")",
":",
"try",
":",
"output",
"=",
"sp",
".",
"check_output",
"(",
"command_args",
")",
"return",
"tf",
".",
"compat",
".",
"as_text",
"(",
"output",
")",
"except",
"sp",
".",
"CalledProcessError",
"as",
"err",
":",
"output",
"=",
"err",
".",
"output",
"_log_command_output",
"(",
"output",
",",
"error",
"=",
"True",
")",
"if",
"output",
".",
"startswith",
"(",
"b\"404\"",
")",
":",
"logging",
".",
"error",
"(",
"_NOT_FOUND_ERR_MSG",
",",
"competition_name",
")",
"raise",
"logging",
".",
"error",
"(",
"_ERR_MSG",
",",
"competition_name",
")",
"raise"
] | Run kaggle command with subprocess. | [
"Run",
"kaggle",
"command",
"with",
"subprocess",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/kaggle.py#L138-L150 |
26,328 | tensorflow/datasets | tensorflow_datasets/core/download/kaggle.py | KaggleCompetitionDownloader.competition_files | def competition_files(self):
"""List of competition files."""
command = [
"kaggle",
"datasets" if "/" in self._competition_name else "competitions",
"files",
"-v",
self._competition_name,
]
output = _run_kaggle_command(command, self._competition_name)
return sorted([
line.split(",")[0] for line in output.split("\n")[1:] if line
]) | python | def competition_files(self):
"""List of competition files."""
command = [
"kaggle",
"datasets" if "/" in self._competition_name else "competitions",
"files",
"-v",
self._competition_name,
]
output = _run_kaggle_command(command, self._competition_name)
return sorted([
line.split(",")[0] for line in output.split("\n")[1:] if line
]) | [
"def",
"competition_files",
"(",
"self",
")",
":",
"command",
"=",
"[",
"\"kaggle\"",
",",
"\"datasets\"",
"if",
"\"/\"",
"in",
"self",
".",
"_competition_name",
"else",
"\"competitions\"",
",",
"\"files\"",
",",
"\"-v\"",
",",
"self",
".",
"_competition_name",
",",
"]",
"output",
"=",
"_run_kaggle_command",
"(",
"command",
",",
"self",
".",
"_competition_name",
")",
"return",
"sorted",
"(",
"[",
"line",
".",
"split",
"(",
"\",\"",
")",
"[",
"0",
"]",
"for",
"line",
"in",
"output",
".",
"split",
"(",
"\"\\n\"",
")",
"[",
"1",
":",
"]",
"if",
"line",
"]",
")"
] | List of competition files. | [
"List",
"of",
"competition",
"files",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/kaggle.py#L96-L108 |
26,329 | tensorflow/datasets | tensorflow_datasets/core/download/kaggle.py | KaggleCompetitionDownloader.download_file | def download_file(self, fname, output_dir):
"""Downloads competition file to output_dir."""
if fname not in self.competition_files: # pylint: disable=unsupported-membership-test
raise ValueError("%s is not one of the competition's "
"files: %s" % (fname, self.competition_files))
command = [
"kaggle",
"competitions",
"download",
"--file",
fname,
"--path",
output_dir,
"-c",
self._competition_name,
]
_run_kaggle_command(command, self._competition_name)
return os.path.join(output_dir, fname) | python | def download_file(self, fname, output_dir):
"""Downloads competition file to output_dir."""
if fname not in self.competition_files: # pylint: disable=unsupported-membership-test
raise ValueError("%s is not one of the competition's "
"files: %s" % (fname, self.competition_files))
command = [
"kaggle",
"competitions",
"download",
"--file",
fname,
"--path",
output_dir,
"-c",
self._competition_name,
]
_run_kaggle_command(command, self._competition_name)
return os.path.join(output_dir, fname) | [
"def",
"download_file",
"(",
"self",
",",
"fname",
",",
"output_dir",
")",
":",
"if",
"fname",
"not",
"in",
"self",
".",
"competition_files",
":",
"# pylint: disable=unsupported-membership-test",
"raise",
"ValueError",
"(",
"\"%s is not one of the competition's \"",
"\"files: %s\"",
"%",
"(",
"fname",
",",
"self",
".",
"competition_files",
")",
")",
"command",
"=",
"[",
"\"kaggle\"",
",",
"\"competitions\"",
",",
"\"download\"",
",",
"\"--file\"",
",",
"fname",
",",
"\"--path\"",
",",
"output_dir",
",",
"\"-c\"",
",",
"self",
".",
"_competition_name",
",",
"]",
"_run_kaggle_command",
"(",
"command",
",",
"self",
".",
"_competition_name",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"fname",
")"
] | Downloads competition file to output_dir. | [
"Downloads",
"competition",
"file",
"to",
"output_dir",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/kaggle.py#L118-L135 |
26,330 | tensorflow/datasets | tensorflow_datasets/image/flowers.py | TFFlowers._generate_examples | def _generate_examples(self, images_dir_path):
"""Generate flower images and labels given the image directory path.
Args:
images_dir_path: path to the directory where the images are stored.
Yields:
The image path and its corresponding label.
"""
parent_dir = tf.io.gfile.listdir(images_dir_path)[0]
walk_dir = os.path.join(images_dir_path, parent_dir)
dirs = tf.io.gfile.listdir(walk_dir)
for d in dirs:
if tf.io.gfile.isdir(os.path.join(walk_dir, d)):
for full_path, _, fname in tf.io.gfile.walk(os.path.join(walk_dir, d)):
for image_file in fname:
if image_file.endswith(".jpg"):
image_path = os.path.join(full_path, image_file)
yield {
"image": image_path,
"label": d.lower(),
} | python | def _generate_examples(self, images_dir_path):
"""Generate flower images and labels given the image directory path.
Args:
images_dir_path: path to the directory where the images are stored.
Yields:
The image path and its corresponding label.
"""
parent_dir = tf.io.gfile.listdir(images_dir_path)[0]
walk_dir = os.path.join(images_dir_path, parent_dir)
dirs = tf.io.gfile.listdir(walk_dir)
for d in dirs:
if tf.io.gfile.isdir(os.path.join(walk_dir, d)):
for full_path, _, fname in tf.io.gfile.walk(os.path.join(walk_dir, d)):
for image_file in fname:
if image_file.endswith(".jpg"):
image_path = os.path.join(full_path, image_file)
yield {
"image": image_path,
"label": d.lower(),
} | [
"def",
"_generate_examples",
"(",
"self",
",",
"images_dir_path",
")",
":",
"parent_dir",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"images_dir_path",
")",
"[",
"0",
"]",
"walk_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"images_dir_path",
",",
"parent_dir",
")",
"dirs",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"walk_dir",
")",
"for",
"d",
"in",
"dirs",
":",
"if",
"tf",
".",
"io",
".",
"gfile",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"walk_dir",
",",
"d",
")",
")",
":",
"for",
"full_path",
",",
"_",
",",
"fname",
"in",
"tf",
".",
"io",
".",
"gfile",
".",
"walk",
"(",
"os",
".",
"path",
".",
"join",
"(",
"walk_dir",
",",
"d",
")",
")",
":",
"for",
"image_file",
"in",
"fname",
":",
"if",
"image_file",
".",
"endswith",
"(",
"\".jpg\"",
")",
":",
"image_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"full_path",
",",
"image_file",
")",
"yield",
"{",
"\"image\"",
":",
"image_path",
",",
"\"label\"",
":",
"d",
".",
"lower",
"(",
")",
",",
"}"
] | Generate flower images and labels given the image directory path.
Args:
images_dir_path: path to the directory where the images are stored.
Yields:
The image path and its corresponding label. | [
"Generate",
"flower",
"images",
"and",
"labels",
"given",
"the",
"image",
"directory",
"path",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/flowers.py#L71-L93 |
26,331 | tensorflow/datasets | tensorflow_datasets/core/download/checksums.py | _get_path | def _get_path(dataset_name):
"""Returns path to where checksums are stored for a given dataset."""
path = _checksum_paths().get(dataset_name, None)
if path:
return path
msg = ('No checksums file could be find for dataset %s. Please create one in '
'one of: %s') % (dataset_name, ', '.join(_CHECKSUM_DIRS))
raise AssertionError(msg) | python | def _get_path(dataset_name):
"""Returns path to where checksums are stored for a given dataset."""
path = _checksum_paths().get(dataset_name, None)
if path:
return path
msg = ('No checksums file could be find for dataset %s. Please create one in '
'one of: %s') % (dataset_name, ', '.join(_CHECKSUM_DIRS))
raise AssertionError(msg) | [
"def",
"_get_path",
"(",
"dataset_name",
")",
":",
"path",
"=",
"_checksum_paths",
"(",
")",
".",
"get",
"(",
"dataset_name",
",",
"None",
")",
"if",
"path",
":",
"return",
"path",
"msg",
"=",
"(",
"'No checksums file could be find for dataset %s. Please create one in '",
"'one of: %s'",
")",
"%",
"(",
"dataset_name",
",",
"', '",
".",
"join",
"(",
"_CHECKSUM_DIRS",
")",
")",
"raise",
"AssertionError",
"(",
"msg",
")"
] | Returns path to where checksums are stored for a given dataset. | [
"Returns",
"path",
"to",
"where",
"checksums",
"are",
"stored",
"for",
"a",
"given",
"dataset",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/checksums.py#L59-L66 |
26,332 | tensorflow/datasets | tensorflow_datasets/core/download/checksums.py | store_checksums | def store_checksums(dataset_name, sizes_checksums):
"""Store given checksums and sizes for specific dataset.
Content of file is never disgarded, only updated. This is to ensure that if
process is killed right after first download finishes, checksums registered
during previous runs aren't lost.
It is the responsibility of the caller not to call function multiple times in
parallel for a given dataset.
Only original file content is updated. This means the entire set of new sizes
and checksums must be given at every call.
Args:
dataset_name: string.
sizes_checksums: dict, {url: (size_in_bytes, checksum)}.
"""
path = _get_path(dataset_name)
original_data = _get_sizes_checksums(path)
new_data = original_data.copy()
new_data.update(sizes_checksums)
if original_data == new_data:
return
with tf.io.gfile.GFile(path, 'w') as f:
for url, (size, checksum) in sorted(new_data.items()):
f.write('%s %s %s\n' % (url, size, checksum)) | python | def store_checksums(dataset_name, sizes_checksums):
"""Store given checksums and sizes for specific dataset.
Content of file is never disgarded, only updated. This is to ensure that if
process is killed right after first download finishes, checksums registered
during previous runs aren't lost.
It is the responsibility of the caller not to call function multiple times in
parallel for a given dataset.
Only original file content is updated. This means the entire set of new sizes
and checksums must be given at every call.
Args:
dataset_name: string.
sizes_checksums: dict, {url: (size_in_bytes, checksum)}.
"""
path = _get_path(dataset_name)
original_data = _get_sizes_checksums(path)
new_data = original_data.copy()
new_data.update(sizes_checksums)
if original_data == new_data:
return
with tf.io.gfile.GFile(path, 'w') as f:
for url, (size, checksum) in sorted(new_data.items()):
f.write('%s %s %s\n' % (url, size, checksum)) | [
"def",
"store_checksums",
"(",
"dataset_name",
",",
"sizes_checksums",
")",
":",
"path",
"=",
"_get_path",
"(",
"dataset_name",
")",
"original_data",
"=",
"_get_sizes_checksums",
"(",
"path",
")",
"new_data",
"=",
"original_data",
".",
"copy",
"(",
")",
"new_data",
".",
"update",
"(",
"sizes_checksums",
")",
"if",
"original_data",
"==",
"new_data",
":",
"return",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"url",
",",
"(",
"size",
",",
"checksum",
")",
"in",
"sorted",
"(",
"new_data",
".",
"items",
"(",
")",
")",
":",
"f",
".",
"write",
"(",
"'%s %s %s\\n'",
"%",
"(",
"url",
",",
"size",
",",
"checksum",
")",
")"
] | Store given checksums and sizes for specific dataset.
Content of file is never disgarded, only updated. This is to ensure that if
process is killed right after first download finishes, checksums registered
during previous runs aren't lost.
It is the responsibility of the caller not to call function multiple times in
parallel for a given dataset.
Only original file content is updated. This means the entire set of new sizes
and checksums must be given at every call.
Args:
dataset_name: string.
sizes_checksums: dict, {url: (size_in_bytes, checksum)}. | [
"Store",
"given",
"checksums",
"and",
"sizes",
"for",
"specific",
"dataset",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/checksums.py#L102-L127 |
26,333 | tensorflow/datasets | tensorflow_datasets/core/download/resource.py | _sanitize_url | def _sanitize_url(url, max_length):
"""Sanitize and shorten url to fit in max_length.
Function is stable: same input MUST ALWAYS give same result, accros changes
in code as well. Different URLs might give same result.
As much as possible, the extension should be kept.
Heuristics are applied to only keep useful info from url.
1- Drop generic [sub]domains.
'www.cs.toronto.edu/...' -> 'cs.toronto.edu/...'
'storage.googleapis.com/foo/...' -> 'foo/...'
'drive.google.com/bar/...' -> 'bar/...'
'github.com/baz/...' -> 'baz/...'
2- Remove leading '0's from url components:
'foo/train-00004-of-00010.tfrecords' -> 'foo/train-4-of-10.tfrecords'
3- Truncate each component of url until total size fits or each component is
left with 4 chars (or total size is <= limit):
'MoveUnitToBorder_64x64_png/train-4-of-10.tfrecords'
(here truncate components to 4 chars per component max)
-> 'Move_64x6_png/trai-4-of-10.tfrecords'
4- Truncate result, keeping prefix: 'abc_def_ghi_jkl' -> 'abc_def'
Args:
url: string, url to sanitize and shorten.
max_length: int, max length of result.
Returns:
(string, string): sanitized and shorted url, file extension.
"""
url = urllib.parse.urlparse(url)
netloc = url.netloc
for prefix in _NETLOC_COMMON_PREFIXES:
if netloc.startswith(prefix):
netloc = netloc[len(prefix):]
for suffix in _NETLOC_COMMON_SUFFIXES:
if netloc.endswith(suffix):
netloc = netloc[:-len(suffix)]
url = '%s%s%s%s' % (netloc, url.path, url.params, url.query)
# Get the extension:
for ext in _KNOWN_EXTENSIONS:
if url.endswith(ext):
extension = ext
url = url[:-len(extension)]
break
else:
url, extension = os.path.splitext(url)
max_length -= len(extension)
# Replace non authorized chars (including '/') by '_':
url = re.sub(r'[^a-zA-Z0-9\.\-_]+', '_', url)
# Remove parts with no info:
for common_part in _URL_COMMON_PARTS:
url = url.replace(common_part, '_')
url = url.strip('_')
# Remove leading zeros in groups of numbers:
url = re.sub('(?<![0-9])0+(?=[0-9])', '', url)
# Decrease max size of URL components:
c_size = max(len(c) for c in re.split(r'[\.\-_]', url))
while c_size > 4 and len(url) > max_length:
c_size -= 1
url = re.sub(r'[^\.\-_]{4,}', lambda match: match.group(0)[:c_size], url)
return url[:max_length], extension | python | def _sanitize_url(url, max_length):
"""Sanitize and shorten url to fit in max_length.
Function is stable: same input MUST ALWAYS give same result, accros changes
in code as well. Different URLs might give same result.
As much as possible, the extension should be kept.
Heuristics are applied to only keep useful info from url.
1- Drop generic [sub]domains.
'www.cs.toronto.edu/...' -> 'cs.toronto.edu/...'
'storage.googleapis.com/foo/...' -> 'foo/...'
'drive.google.com/bar/...' -> 'bar/...'
'github.com/baz/...' -> 'baz/...'
2- Remove leading '0's from url components:
'foo/train-00004-of-00010.tfrecords' -> 'foo/train-4-of-10.tfrecords'
3- Truncate each component of url until total size fits or each component is
left with 4 chars (or total size is <= limit):
'MoveUnitToBorder_64x64_png/train-4-of-10.tfrecords'
(here truncate components to 4 chars per component max)
-> 'Move_64x6_png/trai-4-of-10.tfrecords'
4- Truncate result, keeping prefix: 'abc_def_ghi_jkl' -> 'abc_def'
Args:
url: string, url to sanitize and shorten.
max_length: int, max length of result.
Returns:
(string, string): sanitized and shorted url, file extension.
"""
url = urllib.parse.urlparse(url)
netloc = url.netloc
for prefix in _NETLOC_COMMON_PREFIXES:
if netloc.startswith(prefix):
netloc = netloc[len(prefix):]
for suffix in _NETLOC_COMMON_SUFFIXES:
if netloc.endswith(suffix):
netloc = netloc[:-len(suffix)]
url = '%s%s%s%s' % (netloc, url.path, url.params, url.query)
# Get the extension:
for ext in _KNOWN_EXTENSIONS:
if url.endswith(ext):
extension = ext
url = url[:-len(extension)]
break
else:
url, extension = os.path.splitext(url)
max_length -= len(extension)
# Replace non authorized chars (including '/') by '_':
url = re.sub(r'[^a-zA-Z0-9\.\-_]+', '_', url)
# Remove parts with no info:
for common_part in _URL_COMMON_PARTS:
url = url.replace(common_part, '_')
url = url.strip('_')
# Remove leading zeros in groups of numbers:
url = re.sub('(?<![0-9])0+(?=[0-9])', '', url)
# Decrease max size of URL components:
c_size = max(len(c) for c in re.split(r'[\.\-_]', url))
while c_size > 4 and len(url) > max_length:
c_size -= 1
url = re.sub(r'[^\.\-_]{4,}', lambda match: match.group(0)[:c_size], url)
return url[:max_length], extension | [
"def",
"_sanitize_url",
"(",
"url",
",",
"max_length",
")",
":",
"url",
"=",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"url",
")",
"netloc",
"=",
"url",
".",
"netloc",
"for",
"prefix",
"in",
"_NETLOC_COMMON_PREFIXES",
":",
"if",
"netloc",
".",
"startswith",
"(",
"prefix",
")",
":",
"netloc",
"=",
"netloc",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"for",
"suffix",
"in",
"_NETLOC_COMMON_SUFFIXES",
":",
"if",
"netloc",
".",
"endswith",
"(",
"suffix",
")",
":",
"netloc",
"=",
"netloc",
"[",
":",
"-",
"len",
"(",
"suffix",
")",
"]",
"url",
"=",
"'%s%s%s%s'",
"%",
"(",
"netloc",
",",
"url",
".",
"path",
",",
"url",
".",
"params",
",",
"url",
".",
"query",
")",
"# Get the extension:",
"for",
"ext",
"in",
"_KNOWN_EXTENSIONS",
":",
"if",
"url",
".",
"endswith",
"(",
"ext",
")",
":",
"extension",
"=",
"ext",
"url",
"=",
"url",
"[",
":",
"-",
"len",
"(",
"extension",
")",
"]",
"break",
"else",
":",
"url",
",",
"extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"url",
")",
"max_length",
"-=",
"len",
"(",
"extension",
")",
"# Replace non authorized chars (including '/') by '_':",
"url",
"=",
"re",
".",
"sub",
"(",
"r'[^a-zA-Z0-9\\.\\-_]+'",
",",
"'_'",
",",
"url",
")",
"# Remove parts with no info:",
"for",
"common_part",
"in",
"_URL_COMMON_PARTS",
":",
"url",
"=",
"url",
".",
"replace",
"(",
"common_part",
",",
"'_'",
")",
"url",
"=",
"url",
".",
"strip",
"(",
"'_'",
")",
"# Remove leading zeros in groups of numbers:",
"url",
"=",
"re",
".",
"sub",
"(",
"'(?<![0-9])0+(?=[0-9])'",
",",
"''",
",",
"url",
")",
"# Decrease max size of URL components:",
"c_size",
"=",
"max",
"(",
"len",
"(",
"c",
")",
"for",
"c",
"in",
"re",
".",
"split",
"(",
"r'[\\.\\-_]'",
",",
"url",
")",
")",
"while",
"c_size",
">",
"4",
"and",
"len",
"(",
"url",
")",
">",
"max_length",
":",
"c_size",
"-=",
"1",
"url",
"=",
"re",
".",
"sub",
"(",
"r'[^\\.\\-_]{4,}'",
",",
"lambda",
"match",
":",
"match",
".",
"group",
"(",
"0",
")",
"[",
":",
"c_size",
"]",
",",
"url",
")",
"return",
"url",
"[",
":",
"max_length",
"]",
",",
"extension"
] | Sanitize and shorten url to fit in max_length.
Function is stable: same input MUST ALWAYS give same result, accros changes
in code as well. Different URLs might give same result.
As much as possible, the extension should be kept.
Heuristics are applied to only keep useful info from url.
1- Drop generic [sub]domains.
'www.cs.toronto.edu/...' -> 'cs.toronto.edu/...'
'storage.googleapis.com/foo/...' -> 'foo/...'
'drive.google.com/bar/...' -> 'bar/...'
'github.com/baz/...' -> 'baz/...'
2- Remove leading '0's from url components:
'foo/train-00004-of-00010.tfrecords' -> 'foo/train-4-of-10.tfrecords'
3- Truncate each component of url until total size fits or each component is
left with 4 chars (or total size is <= limit):
'MoveUnitToBorder_64x64_png/train-4-of-10.tfrecords'
(here truncate components to 4 chars per component max)
-> 'Move_64x6_png/trai-4-of-10.tfrecords'
4- Truncate result, keeping prefix: 'abc_def_ghi_jkl' -> 'abc_def'
Args:
url: string, url to sanitize and shorten.
max_length: int, max length of result.
Returns:
(string, string): sanitized and shorted url, file extension. | [
"Sanitize",
"and",
"shorten",
"url",
"to",
"fit",
"in",
"max_length",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L102-L166 |
26,334 | tensorflow/datasets | tensorflow_datasets/core/download/resource.py | get_dl_dirname | def get_dl_dirname(url):
"""Returns name of temp dir for given url."""
checksum = hashlib.sha256(tf.compat.as_bytes(url)).hexdigest()
return get_dl_fname(url, checksum) | python | def get_dl_dirname(url):
"""Returns name of temp dir for given url."""
checksum = hashlib.sha256(tf.compat.as_bytes(url)).hexdigest()
return get_dl_fname(url, checksum) | [
"def",
"get_dl_dirname",
"(",
"url",
")",
":",
"checksum",
"=",
"hashlib",
".",
"sha256",
"(",
"tf",
".",
"compat",
".",
"as_bytes",
"(",
"url",
")",
")",
".",
"hexdigest",
"(",
")",
"return",
"get_dl_fname",
"(",
"url",
",",
"checksum",
")"
] | Returns name of temp dir for given url. | [
"Returns",
"name",
"of",
"temp",
"dir",
"for",
"given",
"url",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L193-L196 |
26,335 | tensorflow/datasets | tensorflow_datasets/core/download/resource.py | _read_info | def _read_info(info_path):
"""Returns info dict or None."""
if not tf.io.gfile.exists(info_path):
return None
with tf.io.gfile.GFile(info_path) as info_f:
return json.load(info_f) | python | def _read_info(info_path):
"""Returns info dict or None."""
if not tf.io.gfile.exists(info_path):
return None
with tf.io.gfile.GFile(info_path) as info_f:
return json.load(info_f) | [
"def",
"_read_info",
"(",
"info_path",
")",
":",
"if",
"not",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"info_path",
")",
":",
"return",
"None",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"info_path",
")",
"as",
"info_f",
":",
"return",
"json",
".",
"load",
"(",
"info_f",
")"
] | Returns info dict or None. | [
"Returns",
"info",
"dict",
"or",
"None",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L204-L209 |
26,336 | tensorflow/datasets | tensorflow_datasets/core/download/resource.py | write_info_file | def write_info_file(resource, path, dataset_name, original_fname):
"""Write the INFO file next to local file.
Although the method is synchronized, there is still a risk two processes
running at the same time overlap here. Risk accepted, since potentially lost
data (`dataset_name`) is only for human consumption.
Args:
resource: resource for which to write the INFO file.
path: path of downloaded file.
dataset_name: data used to dl the file.
original_fname: name of file as downloaded.
"""
info_path = _get_info_path(path)
info = _read_info(info_path) or {}
urls = set(info.get('urls', []) + [resource.url])
dataset_names = info.get('dataset_names', [])
if dataset_name:
dataset_names.append(dataset_name)
if 'original_fname' in info and info['original_fname'] != original_fname:
raise AssertionError(
'`original_fname` "%s" stored in %s does NOT match "%s".' % (
info['original_fname'], info_path, original_fname))
info = dict(urls=list(urls), dataset_names=list(set(dataset_names)),
original_fname=original_fname)
with py_utils.atomic_write(info_path, 'w') as info_f:
json.dump(info, info_f, sort_keys=True) | python | def write_info_file(resource, path, dataset_name, original_fname):
"""Write the INFO file next to local file.
Although the method is synchronized, there is still a risk two processes
running at the same time overlap here. Risk accepted, since potentially lost
data (`dataset_name`) is only for human consumption.
Args:
resource: resource for which to write the INFO file.
path: path of downloaded file.
dataset_name: data used to dl the file.
original_fname: name of file as downloaded.
"""
info_path = _get_info_path(path)
info = _read_info(info_path) or {}
urls = set(info.get('urls', []) + [resource.url])
dataset_names = info.get('dataset_names', [])
if dataset_name:
dataset_names.append(dataset_name)
if 'original_fname' in info and info['original_fname'] != original_fname:
raise AssertionError(
'`original_fname` "%s" stored in %s does NOT match "%s".' % (
info['original_fname'], info_path, original_fname))
info = dict(urls=list(urls), dataset_names=list(set(dataset_names)),
original_fname=original_fname)
with py_utils.atomic_write(info_path, 'w') as info_f:
json.dump(info, info_f, sort_keys=True) | [
"def",
"write_info_file",
"(",
"resource",
",",
"path",
",",
"dataset_name",
",",
"original_fname",
")",
":",
"info_path",
"=",
"_get_info_path",
"(",
"path",
")",
"info",
"=",
"_read_info",
"(",
"info_path",
")",
"or",
"{",
"}",
"urls",
"=",
"set",
"(",
"info",
".",
"get",
"(",
"'urls'",
",",
"[",
"]",
")",
"+",
"[",
"resource",
".",
"url",
"]",
")",
"dataset_names",
"=",
"info",
".",
"get",
"(",
"'dataset_names'",
",",
"[",
"]",
")",
"if",
"dataset_name",
":",
"dataset_names",
".",
"append",
"(",
"dataset_name",
")",
"if",
"'original_fname'",
"in",
"info",
"and",
"info",
"[",
"'original_fname'",
"]",
"!=",
"original_fname",
":",
"raise",
"AssertionError",
"(",
"'`original_fname` \"%s\" stored in %s does NOT match \"%s\".'",
"%",
"(",
"info",
"[",
"'original_fname'",
"]",
",",
"info_path",
",",
"original_fname",
")",
")",
"info",
"=",
"dict",
"(",
"urls",
"=",
"list",
"(",
"urls",
")",
",",
"dataset_names",
"=",
"list",
"(",
"set",
"(",
"dataset_names",
")",
")",
",",
"original_fname",
"=",
"original_fname",
")",
"with",
"py_utils",
".",
"atomic_write",
"(",
"info_path",
",",
"'w'",
")",
"as",
"info_f",
":",
"json",
".",
"dump",
"(",
"info",
",",
"info_f",
",",
"sort_keys",
"=",
"True",
")"
] | Write the INFO file next to local file.
Although the method is synchronized, there is still a risk two processes
running at the same time overlap here. Risk accepted, since potentially lost
data (`dataset_name`) is only for human consumption.
Args:
resource: resource for which to write the INFO file.
path: path of downloaded file.
dataset_name: data used to dl the file.
original_fname: name of file as downloaded. | [
"Write",
"the",
"INFO",
"file",
"next",
"to",
"local",
"file",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L214-L240 |
26,337 | tensorflow/datasets | tensorflow_datasets/core/download/resource.py | get_extract_method | def get_extract_method(path):
"""Returns `ExtractMethod` to use on resource at path. Cannot be None."""
info_path = _get_info_path(path)
info = _read_info(info_path)
fname = info.get('original_fname', path) if info else path
return _guess_extract_method(fname) | python | def get_extract_method(path):
"""Returns `ExtractMethod` to use on resource at path. Cannot be None."""
info_path = _get_info_path(path)
info = _read_info(info_path)
fname = info.get('original_fname', path) if info else path
return _guess_extract_method(fname) | [
"def",
"get_extract_method",
"(",
"path",
")",
":",
"info_path",
"=",
"_get_info_path",
"(",
"path",
")",
"info",
"=",
"_read_info",
"(",
"info_path",
")",
"fname",
"=",
"info",
".",
"get",
"(",
"'original_fname'",
",",
"path",
")",
"if",
"info",
"else",
"path",
"return",
"_guess_extract_method",
"(",
"fname",
")"
] | Returns `ExtractMethod` to use on resource at path. Cannot be None. | [
"Returns",
"ExtractMethod",
"to",
"use",
"on",
"resource",
"at",
"path",
".",
"Cannot",
"be",
"None",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L243-L248 |
26,338 | tensorflow/datasets | tensorflow_datasets/core/download/resource.py | Resource.exists_locally | def exists_locally(cls, path):
"""Returns whether the resource exists locally, at `resource.path`."""
# If INFO file doesn't exist, consider resource does NOT exist, as it would
# prevent guessing the `extract_method`.
return (tf.io.gfile.exists(path) and
tf.io.gfile.exists(_get_info_path(path))) | python | def exists_locally(cls, path):
"""Returns whether the resource exists locally, at `resource.path`."""
# If INFO file doesn't exist, consider resource does NOT exist, as it would
# prevent guessing the `extract_method`.
return (tf.io.gfile.exists(path) and
tf.io.gfile.exists(_get_info_path(path))) | [
"def",
"exists_locally",
"(",
"cls",
",",
"path",
")",
":",
"# If INFO file doesn't exist, consider resource does NOT exist, as it would",
"# prevent guessing the `extract_method`.",
"return",
"(",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"path",
")",
"and",
"tf",
".",
"io",
".",
"gfile",
".",
"exists",
"(",
"_get_info_path",
"(",
"path",
")",
")",
")"
] | Returns whether the resource exists locally, at `resource.path`. | [
"Returns",
"whether",
"the",
"resource",
"exists",
"locally",
"at",
"resource",
".",
"path",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L273-L278 |
26,339 | tensorflow/datasets | tensorflow_datasets/core/features/text_feature.py | Text.maybe_build_from_corpus | def maybe_build_from_corpus(self, corpus_generator, **kwargs):
"""Call SubwordTextEncoder.build_from_corpus is encoder_cls is such."""
if self._encoder_cls is not text_lib.SubwordTextEncoder:
return
if self.encoder:
return
vocab_size = self._encoder_config.vocab_size
self.encoder = text_lib.SubwordTextEncoder.build_from_corpus(
corpus_generator=corpus_generator,
target_vocab_size=vocab_size,
**kwargs) | python | def maybe_build_from_corpus(self, corpus_generator, **kwargs):
"""Call SubwordTextEncoder.build_from_corpus is encoder_cls is such."""
if self._encoder_cls is not text_lib.SubwordTextEncoder:
return
if self.encoder:
return
vocab_size = self._encoder_config.vocab_size
self.encoder = text_lib.SubwordTextEncoder.build_from_corpus(
corpus_generator=corpus_generator,
target_vocab_size=vocab_size,
**kwargs) | [
"def",
"maybe_build_from_corpus",
"(",
"self",
",",
"corpus_generator",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_encoder_cls",
"is",
"not",
"text_lib",
".",
"SubwordTextEncoder",
":",
"return",
"if",
"self",
".",
"encoder",
":",
"return",
"vocab_size",
"=",
"self",
".",
"_encoder_config",
".",
"vocab_size",
"self",
".",
"encoder",
"=",
"text_lib",
".",
"SubwordTextEncoder",
".",
"build_from_corpus",
"(",
"corpus_generator",
"=",
"corpus_generator",
",",
"target_vocab_size",
"=",
"vocab_size",
",",
"*",
"*",
"kwargs",
")"
] | Call SubwordTextEncoder.build_from_corpus is encoder_cls is such. | [
"Call",
"SubwordTextEncoder",
".",
"build_from_corpus",
"is",
"encoder_cls",
"is",
"such",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text_feature.py#L137-L148 |
26,340 | tensorflow/datasets | tensorflow_datasets/core/naming.py | sharded_filenames | def sharded_filenames(filename_prefix, num_shards):
"""Sharded filenames given prefix and number of shards."""
shard_suffix = "%05d-of-%05d"
return [
"%s-%s" % (filename_prefix, shard_suffix % (i, num_shards))
for i in range(num_shards)
] | python | def sharded_filenames(filename_prefix, num_shards):
"""Sharded filenames given prefix and number of shards."""
shard_suffix = "%05d-of-%05d"
return [
"%s-%s" % (filename_prefix, shard_suffix % (i, num_shards))
for i in range(num_shards)
] | [
"def",
"sharded_filenames",
"(",
"filename_prefix",
",",
"num_shards",
")",
":",
"shard_suffix",
"=",
"\"%05d-of-%05d\"",
"return",
"[",
"\"%s-%s\"",
"%",
"(",
"filename_prefix",
",",
"shard_suffix",
"%",
"(",
"i",
",",
"num_shards",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_shards",
")",
"]"
] | Sharded filenames given prefix and number of shards. | [
"Sharded",
"filenames",
"given",
"prefix",
"and",
"number",
"of",
"shards",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/naming.py#L52-L58 |
26,341 | tensorflow/datasets | tensorflow_datasets/image/omniglot.py | _walk_omniglot_dir | def _walk_omniglot_dir(directory):
"""Walk an Omniglot directory and yield examples."""
directory = os.path.join(directory, tf.io.gfile.listdir(directory)[0])
alphabets = sorted(tf.io.gfile.listdir(directory))
for alphabet in alphabets:
alphabet_dir = os.path.join(directory, alphabet)
characters = sorted(tf.io.gfile.listdir(alphabet_dir))
for character in characters:
character_id = int(character[len("character"):]) - 1
character_dir = os.path.join(alphabet_dir, character)
images = tf.io.gfile.listdir(character_dir)
for image in images:
label, _ = image.split("_")
label = int(label) - 1
image_path = os.path.join(character_dir, image)
yield alphabet, character_id, label, image_path | python | def _walk_omniglot_dir(directory):
"""Walk an Omniglot directory and yield examples."""
directory = os.path.join(directory, tf.io.gfile.listdir(directory)[0])
alphabets = sorted(tf.io.gfile.listdir(directory))
for alphabet in alphabets:
alphabet_dir = os.path.join(directory, alphabet)
characters = sorted(tf.io.gfile.listdir(alphabet_dir))
for character in characters:
character_id = int(character[len("character"):]) - 1
character_dir = os.path.join(alphabet_dir, character)
images = tf.io.gfile.listdir(character_dir)
for image in images:
label, _ = image.split("_")
label = int(label) - 1
image_path = os.path.join(character_dir, image)
yield alphabet, character_id, label, image_path | [
"def",
"_walk_omniglot_dir",
"(",
"directory",
")",
":",
"directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"directory",
")",
"[",
"0",
"]",
")",
"alphabets",
"=",
"sorted",
"(",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"directory",
")",
")",
"for",
"alphabet",
"in",
"alphabets",
":",
"alphabet_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"alphabet",
")",
"characters",
"=",
"sorted",
"(",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"alphabet_dir",
")",
")",
"for",
"character",
"in",
"characters",
":",
"character_id",
"=",
"int",
"(",
"character",
"[",
"len",
"(",
"\"character\"",
")",
":",
"]",
")",
"-",
"1",
"character_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"alphabet_dir",
",",
"character",
")",
"images",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"character_dir",
")",
"for",
"image",
"in",
"images",
":",
"label",
",",
"_",
"=",
"image",
".",
"split",
"(",
"\"_\"",
")",
"label",
"=",
"int",
"(",
"label",
")",
"-",
"1",
"image_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"character_dir",
",",
"image",
")",
"yield",
"alphabet",
",",
"character_id",
",",
"label",
",",
"image_path"
] | Walk an Omniglot directory and yield examples. | [
"Walk",
"an",
"Omniglot",
"directory",
"and",
"yield",
"examples",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/omniglot.py#L128-L143 |
26,342 | tensorflow/datasets | tensorflow_datasets/image/omniglot.py | _get_names | def _get_names(dirs):
"""Get alphabet and label names, union across all dirs."""
alphabets = set()
label_names = {}
for d in dirs:
for example in _walk_omniglot_dir(d):
alphabet, alphabet_char_id, label, _ = example
alphabets.add(alphabet)
label_name = "%s_%d" % (alphabet, alphabet_char_id)
if label in label_names:
assert label_names[label] == label_name
else:
label_names[label] = label_name
label_names = [label_names[k] for k in sorted(label_names)]
return alphabets, label_names | python | def _get_names(dirs):
"""Get alphabet and label names, union across all dirs."""
alphabets = set()
label_names = {}
for d in dirs:
for example in _walk_omniglot_dir(d):
alphabet, alphabet_char_id, label, _ = example
alphabets.add(alphabet)
label_name = "%s_%d" % (alphabet, alphabet_char_id)
if label in label_names:
assert label_names[label] == label_name
else:
label_names[label] = label_name
label_names = [label_names[k] for k in sorted(label_names)]
return alphabets, label_names | [
"def",
"_get_names",
"(",
"dirs",
")",
":",
"alphabets",
"=",
"set",
"(",
")",
"label_names",
"=",
"{",
"}",
"for",
"d",
"in",
"dirs",
":",
"for",
"example",
"in",
"_walk_omniglot_dir",
"(",
"d",
")",
":",
"alphabet",
",",
"alphabet_char_id",
",",
"label",
",",
"_",
"=",
"example",
"alphabets",
".",
"add",
"(",
"alphabet",
")",
"label_name",
"=",
"\"%s_%d\"",
"%",
"(",
"alphabet",
",",
"alphabet_char_id",
")",
"if",
"label",
"in",
"label_names",
":",
"assert",
"label_names",
"[",
"label",
"]",
"==",
"label_name",
"else",
":",
"label_names",
"[",
"label",
"]",
"=",
"label_name",
"label_names",
"=",
"[",
"label_names",
"[",
"k",
"]",
"for",
"k",
"in",
"sorted",
"(",
"label_names",
")",
"]",
"return",
"alphabets",
",",
"label_names"
] | Get alphabet and label names, union across all dirs. | [
"Get",
"alphabet",
"and",
"label",
"names",
"union",
"across",
"all",
"dirs",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/omniglot.py#L146-L160 |
26,343 | tensorflow/datasets | tensorflow_datasets/core/units.py | size_str | def size_str(size_in_bytes):
"""Returns a human readable size string.
If size_in_bytes is None, then returns "?? GiB".
For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`.
Args:
size_in_bytes: `int` or `None`, the size, in bytes, that we want to
format as a human-readable size string.
"""
if not size_in_bytes:
return "?? GiB"
size_in_bytes = float(size_in_bytes)
for (name, size_bytes) in _NAME_LIST:
value = size_in_bytes / size_bytes
if value >= 1.0:
return "{:.2f} {}".format(value, name)
return "{} {}".format(int(size_in_bytes), "bytes") | python | def size_str(size_in_bytes):
"""Returns a human readable size string.
If size_in_bytes is None, then returns "?? GiB".
For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`.
Args:
size_in_bytes: `int` or `None`, the size, in bytes, that we want to
format as a human-readable size string.
"""
if not size_in_bytes:
return "?? GiB"
size_in_bytes = float(size_in_bytes)
for (name, size_bytes) in _NAME_LIST:
value = size_in_bytes / size_bytes
if value >= 1.0:
return "{:.2f} {}".format(value, name)
return "{} {}".format(int(size_in_bytes), "bytes") | [
"def",
"size_str",
"(",
"size_in_bytes",
")",
":",
"if",
"not",
"size_in_bytes",
":",
"return",
"\"?? GiB\"",
"size_in_bytes",
"=",
"float",
"(",
"size_in_bytes",
")",
"for",
"(",
"name",
",",
"size_bytes",
")",
"in",
"_NAME_LIST",
":",
"value",
"=",
"size_in_bytes",
"/",
"size_bytes",
"if",
"value",
">=",
"1.0",
":",
"return",
"\"{:.2f} {}\"",
".",
"format",
"(",
"value",
",",
"name",
")",
"return",
"\"{} {}\"",
".",
"format",
"(",
"int",
"(",
"size_in_bytes",
")",
",",
"\"bytes\"",
")"
] | Returns a human readable size string.
If size_in_bytes is None, then returns "?? GiB".
For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`.
Args:
size_in_bytes: `int` or `None`, the size, in bytes, that we want to
format as a human-readable size string. | [
"Returns",
"a",
"human",
"readable",
"size",
"string",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/units.py#L34-L53 |
26,344 | tensorflow/datasets | tensorflow_datasets/core/download/downloader.py | _Downloader.tqdm | def tqdm(self):
"""Add a progression bar for the current download."""
async_tqdm = utils.async_tqdm
with async_tqdm(total=0, desc='Dl Completed...', unit=' url') as pbar_url:
with async_tqdm(total=0, desc='Dl Size...', unit=' MiB') as pbar_dl_size:
self._pbar_url = pbar_url
self._pbar_dl_size = pbar_dl_size
yield | python | def tqdm(self):
"""Add a progression bar for the current download."""
async_tqdm = utils.async_tqdm
with async_tqdm(total=0, desc='Dl Completed...', unit=' url') as pbar_url:
with async_tqdm(total=0, desc='Dl Size...', unit=' MiB') as pbar_dl_size:
self._pbar_url = pbar_url
self._pbar_dl_size = pbar_dl_size
yield | [
"def",
"tqdm",
"(",
"self",
")",
":",
"async_tqdm",
"=",
"utils",
".",
"async_tqdm",
"with",
"async_tqdm",
"(",
"total",
"=",
"0",
",",
"desc",
"=",
"'Dl Completed...'",
",",
"unit",
"=",
"' url'",
")",
"as",
"pbar_url",
":",
"with",
"async_tqdm",
"(",
"total",
"=",
"0",
",",
"desc",
"=",
"'Dl Size...'",
",",
"unit",
"=",
"' MiB'",
")",
"as",
"pbar_dl_size",
":",
"self",
".",
"_pbar_url",
"=",
"pbar_url",
"self",
".",
"_pbar_dl_size",
"=",
"pbar_dl_size",
"yield"
] | Add a progression bar for the current download. | [
"Add",
"a",
"progression",
"bar",
"for",
"the",
"current",
"download",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L84-L91 |
26,345 | tensorflow/datasets | tensorflow_datasets/core/download/downloader.py | _Downloader.download | def download(self, url, destination_path):
"""Download url to given path.
Returns Promise -> sha256 of downloaded file.
Args:
url: address of resource to download.
destination_path: `str`, path to directory where to download the resource.
Returns:
Promise obj -> (`str`, int): (downloaded object checksum, size in bytes).
"""
self._pbar_url.update_total(1)
future = self._executor.submit(self._sync_download, url, destination_path)
return promise.Promise.resolve(future) | python | def download(self, url, destination_path):
"""Download url to given path.
Returns Promise -> sha256 of downloaded file.
Args:
url: address of resource to download.
destination_path: `str`, path to directory where to download the resource.
Returns:
Promise obj -> (`str`, int): (downloaded object checksum, size in bytes).
"""
self._pbar_url.update_total(1)
future = self._executor.submit(self._sync_download, url, destination_path)
return promise.Promise.resolve(future) | [
"def",
"download",
"(",
"self",
",",
"url",
",",
"destination_path",
")",
":",
"self",
".",
"_pbar_url",
".",
"update_total",
"(",
"1",
")",
"future",
"=",
"self",
".",
"_executor",
".",
"submit",
"(",
"self",
".",
"_sync_download",
",",
"url",
",",
"destination_path",
")",
"return",
"promise",
".",
"Promise",
".",
"resolve",
"(",
"future",
")"
] | Download url to given path.
Returns Promise -> sha256 of downloaded file.
Args:
url: address of resource to download.
destination_path: `str`, path to directory where to download the resource.
Returns:
Promise obj -> (`str`, int): (downloaded object checksum, size in bytes). | [
"Download",
"url",
"to",
"given",
"path",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L93-L107 |
26,346 | tensorflow/datasets | tensorflow_datasets/core/download/downloader.py | _Downloader._sync_kaggle_download | def _sync_kaggle_download(self, kaggle_url, destination_path):
"""Download with Kaggle API."""
kaggle_file = kaggle.KaggleFile.from_url(kaggle_url)
downloader = self.kaggle_downloader(kaggle_file.competition)
filepath = downloader.download_file(kaggle_file.filename, destination_path)
dl_size = tf.io.gfile.stat(filepath).length
checksum = self._checksumer()
with tf.io.gfile.GFile(filepath, 'rb') as f:
while True:
block = f.read(io.DEFAULT_BUFFER_SIZE)
if not block:
break
checksum.update(block)
return checksum.hexdigest(), dl_size | python | def _sync_kaggle_download(self, kaggle_url, destination_path):
"""Download with Kaggle API."""
kaggle_file = kaggle.KaggleFile.from_url(kaggle_url)
downloader = self.kaggle_downloader(kaggle_file.competition)
filepath = downloader.download_file(kaggle_file.filename, destination_path)
dl_size = tf.io.gfile.stat(filepath).length
checksum = self._checksumer()
with tf.io.gfile.GFile(filepath, 'rb') as f:
while True:
block = f.read(io.DEFAULT_BUFFER_SIZE)
if not block:
break
checksum.update(block)
return checksum.hexdigest(), dl_size | [
"def",
"_sync_kaggle_download",
"(",
"self",
",",
"kaggle_url",
",",
"destination_path",
")",
":",
"kaggle_file",
"=",
"kaggle",
".",
"KaggleFile",
".",
"from_url",
"(",
"kaggle_url",
")",
"downloader",
"=",
"self",
".",
"kaggle_downloader",
"(",
"kaggle_file",
".",
"competition",
")",
"filepath",
"=",
"downloader",
".",
"download_file",
"(",
"kaggle_file",
".",
"filename",
",",
"destination_path",
")",
"dl_size",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"stat",
"(",
"filepath",
")",
".",
"length",
"checksum",
"=",
"self",
".",
"_checksumer",
"(",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
",",
"'rb'",
")",
"as",
"f",
":",
"while",
"True",
":",
"block",
"=",
"f",
".",
"read",
"(",
"io",
".",
"DEFAULT_BUFFER_SIZE",
")",
"if",
"not",
"block",
":",
"break",
"checksum",
".",
"update",
"(",
"block",
")",
"return",
"checksum",
".",
"hexdigest",
"(",
")",
",",
"dl_size"
] | Download with Kaggle API. | [
"Download",
"with",
"Kaggle",
"API",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L109-L123 |
26,347 | tensorflow/datasets | tensorflow_datasets/core/download/downloader.py | _Downloader._get_drive_url | def _get_drive_url(self, url, session):
"""Returns url, possibly with confirmation token."""
response = session.get(url, stream=True)
if response.status_code != 200:
raise DownloadError(
'Failed to get url %s. HTTP code: %d.' % (url, response.status_code))
for k, v in response.cookies.items():
if k.startswith('download_warning'):
return url + '&confirm=' + v # v is the confirm token
# No token found, let's try with original URL:
return url | python | def _get_drive_url(self, url, session):
"""Returns url, possibly with confirmation token."""
response = session.get(url, stream=True)
if response.status_code != 200:
raise DownloadError(
'Failed to get url %s. HTTP code: %d.' % (url, response.status_code))
for k, v in response.cookies.items():
if k.startswith('download_warning'):
return url + '&confirm=' + v # v is the confirm token
# No token found, let's try with original URL:
return url | [
"def",
"_get_drive_url",
"(",
"self",
",",
"url",
",",
"session",
")",
":",
"response",
"=",
"session",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"raise",
"DownloadError",
"(",
"'Failed to get url %s. HTTP code: %d.'",
"%",
"(",
"url",
",",
"response",
".",
"status_code",
")",
")",
"for",
"k",
",",
"v",
"in",
"response",
".",
"cookies",
".",
"items",
"(",
")",
":",
"if",
"k",
".",
"startswith",
"(",
"'download_warning'",
")",
":",
"return",
"url",
"+",
"'&confirm='",
"+",
"v",
"# v is the confirm token",
"# No token found, let's try with original URL:",
"return",
"url"
] | Returns url, possibly with confirmation token. | [
"Returns",
"url",
"possibly",
"with",
"confirmation",
"token",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L125-L135 |
26,348 | tensorflow/datasets | tensorflow_datasets/core/download/downloader.py | _Downloader._sync_download | def _sync_download(self, url, destination_path):
"""Synchronous version of `download` method."""
proxies = {
'http': os.environ.get('TFDS_HTTP_PROXY', None),
'https': os.environ.get('TFDS_HTTPS_PROXY', None),
'ftp': os.environ.get('TFDS_FTP_PROXY', None)
}
if kaggle.KaggleFile.is_kaggle_url(url):
if proxies['http']:
os.environ['KAGGLE_PROXY'] = proxies['http']
return self._sync_kaggle_download(url, destination_path)
try:
# If url is on a filesystem that gfile understands, use copy. Otherwise,
# use requests.
if not url.startswith('http'):
return self._sync_file_copy(url, destination_path)
except tf.errors.UnimplementedError:
pass
session = requests.Session()
session.proxies = proxies
if _DRIVE_URL.match(url):
url = self._get_drive_url(url, session)
use_urllib = url.startswith('ftp')
if use_urllib:
if proxies['ftp']:
proxy = urllib.request.ProxyHandler({'ftp': proxies['ftp']})
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener) # pylint: disable=too-many-function-args
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
else:
response = session.get(url, stream=True)
if response.status_code != 200:
raise DownloadError('Failed to get url %s. HTTP code: %d.' %
(url, response.status_code))
fname = _get_filename(response)
path = os.path.join(destination_path, fname)
size = 0
size_mb = 0
unit_mb = units.MiB
self._pbar_dl_size.update_total(
int(response.headers.get('Content-length', 0)) // unit_mb)
with tf.io.gfile.GFile(path, 'wb') as file_:
checksum = self._checksumer()
if use_urllib:
iterator = iter(lambda: response.read(io.DEFAULT_BUFFER_SIZE), b'')
else:
iterator = response.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE)
for block in iterator:
size += len(block)
# Update the progress bar
size_mb += len(block)
if size_mb > unit_mb:
self._pbar_dl_size.update(size_mb // unit_mb)
size_mb %= unit_mb
checksum.update(block)
file_.write(block)
self._pbar_url.update(1)
return checksum.hexdigest(), size | python | def _sync_download(self, url, destination_path):
"""Synchronous version of `download` method."""
proxies = {
'http': os.environ.get('TFDS_HTTP_PROXY', None),
'https': os.environ.get('TFDS_HTTPS_PROXY', None),
'ftp': os.environ.get('TFDS_FTP_PROXY', None)
}
if kaggle.KaggleFile.is_kaggle_url(url):
if proxies['http']:
os.environ['KAGGLE_PROXY'] = proxies['http']
return self._sync_kaggle_download(url, destination_path)
try:
# If url is on a filesystem that gfile understands, use copy. Otherwise,
# use requests.
if not url.startswith('http'):
return self._sync_file_copy(url, destination_path)
except tf.errors.UnimplementedError:
pass
session = requests.Session()
session.proxies = proxies
if _DRIVE_URL.match(url):
url = self._get_drive_url(url, session)
use_urllib = url.startswith('ftp')
if use_urllib:
if proxies['ftp']:
proxy = urllib.request.ProxyHandler({'ftp': proxies['ftp']})
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener) # pylint: disable=too-many-function-args
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
else:
response = session.get(url, stream=True)
if response.status_code != 200:
raise DownloadError('Failed to get url %s. HTTP code: %d.' %
(url, response.status_code))
fname = _get_filename(response)
path = os.path.join(destination_path, fname)
size = 0
size_mb = 0
unit_mb = units.MiB
self._pbar_dl_size.update_total(
int(response.headers.get('Content-length', 0)) // unit_mb)
with tf.io.gfile.GFile(path, 'wb') as file_:
checksum = self._checksumer()
if use_urllib:
iterator = iter(lambda: response.read(io.DEFAULT_BUFFER_SIZE), b'')
else:
iterator = response.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE)
for block in iterator:
size += len(block)
# Update the progress bar
size_mb += len(block)
if size_mb > unit_mb:
self._pbar_dl_size.update(size_mb // unit_mb)
size_mb %= unit_mb
checksum.update(block)
file_.write(block)
self._pbar_url.update(1)
return checksum.hexdigest(), size | [
"def",
"_sync_download",
"(",
"self",
",",
"url",
",",
"destination_path",
")",
":",
"proxies",
"=",
"{",
"'http'",
":",
"os",
".",
"environ",
".",
"get",
"(",
"'TFDS_HTTP_PROXY'",
",",
"None",
")",
",",
"'https'",
":",
"os",
".",
"environ",
".",
"get",
"(",
"'TFDS_HTTPS_PROXY'",
",",
"None",
")",
",",
"'ftp'",
":",
"os",
".",
"environ",
".",
"get",
"(",
"'TFDS_FTP_PROXY'",
",",
"None",
")",
"}",
"if",
"kaggle",
".",
"KaggleFile",
".",
"is_kaggle_url",
"(",
"url",
")",
":",
"if",
"proxies",
"[",
"'http'",
"]",
":",
"os",
".",
"environ",
"[",
"'KAGGLE_PROXY'",
"]",
"=",
"proxies",
"[",
"'http'",
"]",
"return",
"self",
".",
"_sync_kaggle_download",
"(",
"url",
",",
"destination_path",
")",
"try",
":",
"# If url is on a filesystem that gfile understands, use copy. Otherwise,",
"# use requests.",
"if",
"not",
"url",
".",
"startswith",
"(",
"'http'",
")",
":",
"return",
"self",
".",
"_sync_file_copy",
"(",
"url",
",",
"destination_path",
")",
"except",
"tf",
".",
"errors",
".",
"UnimplementedError",
":",
"pass",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"session",
".",
"proxies",
"=",
"proxies",
"if",
"_DRIVE_URL",
".",
"match",
"(",
"url",
")",
":",
"url",
"=",
"self",
".",
"_get_drive_url",
"(",
"url",
",",
"session",
")",
"use_urllib",
"=",
"url",
".",
"startswith",
"(",
"'ftp'",
")",
"if",
"use_urllib",
":",
"if",
"proxies",
"[",
"'ftp'",
"]",
":",
"proxy",
"=",
"urllib",
".",
"request",
".",
"ProxyHandler",
"(",
"{",
"'ftp'",
":",
"proxies",
"[",
"'ftp'",
"]",
"}",
")",
"opener",
"=",
"urllib",
".",
"request",
".",
"build_opener",
"(",
"proxy",
")",
"urllib",
".",
"request",
".",
"install_opener",
"(",
"opener",
")",
"# pylint: disable=too-many-function-args",
"request",
"=",
"urllib",
".",
"request",
".",
"Request",
"(",
"url",
")",
"response",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"request",
")",
"else",
":",
"response",
"=",
"session",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"raise",
"DownloadError",
"(",
"'Failed to get url %s. HTTP code: %d.'",
"%",
"(",
"url",
",",
"response",
".",
"status_code",
")",
")",
"fname",
"=",
"_get_filename",
"(",
"response",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destination_path",
",",
"fname",
")",
"size",
"=",
"0",
"size_mb",
"=",
"0",
"unit_mb",
"=",
"units",
".",
"MiB",
"self",
".",
"_pbar_dl_size",
".",
"update_total",
"(",
"int",
"(",
"response",
".",
"headers",
".",
"get",
"(",
"'Content-length'",
",",
"0",
")",
")",
"//",
"unit_mb",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"path",
",",
"'wb'",
")",
"as",
"file_",
":",
"checksum",
"=",
"self",
".",
"_checksumer",
"(",
")",
"if",
"use_urllib",
":",
"iterator",
"=",
"iter",
"(",
"lambda",
":",
"response",
".",
"read",
"(",
"io",
".",
"DEFAULT_BUFFER_SIZE",
")",
",",
"b''",
")",
"else",
":",
"iterator",
"=",
"response",
".",
"iter_content",
"(",
"chunk_size",
"=",
"io",
".",
"DEFAULT_BUFFER_SIZE",
")",
"for",
"block",
"in",
"iterator",
":",
"size",
"+=",
"len",
"(",
"block",
")",
"# Update the progress bar",
"size_mb",
"+=",
"len",
"(",
"block",
")",
"if",
"size_mb",
">",
"unit_mb",
":",
"self",
".",
"_pbar_dl_size",
".",
"update",
"(",
"size_mb",
"//",
"unit_mb",
")",
"size_mb",
"%=",
"unit_mb",
"checksum",
".",
"update",
"(",
"block",
")",
"file_",
".",
"write",
"(",
"block",
")",
"self",
".",
"_pbar_url",
".",
"update",
"(",
"1",
")",
"return",
"checksum",
".",
"hexdigest",
"(",
")",
",",
"size"
] | Synchronous version of `download` method. | [
"Synchronous",
"version",
"of",
"download",
"method",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L144-L208 |
26,349 | tensorflow/datasets | tensorflow_datasets/image/diabetic_retinopathy_detection.py | DiabeticRetinopathyDetection._generate_examples | def _generate_examples(self, images_dir_path, csv_path=None, csv_usage=None):
"""Yields Example instances from given CSV.
Args:
images_dir_path: path to dir in which images are stored.
csv_path: optional, path to csv file with two columns: name of image and
label. If not provided, just scan image directory, don't set labels.
csv_usage: optional, subset of examples from the csv file to use based on
the "Usage" column from the csv.
"""
if csv_path:
with tf.io.gfile.GFile(csv_path) as csv_f:
reader = csv.DictReader(csv_f)
data = [(row["image"], int(row["level"]))
for row in reader
if csv_usage is None or row["Usage"] == csv_usage]
else:
data = [(fname[:-5], -1)
for fname in tf.io.gfile.listdir(images_dir_path)
if fname.endswith(".jpeg")]
for name, label in data:
yield {
"name": name,
"image": _resize_image_if_necessary(
tf.io.gfile.GFile("%s/%s.jpeg" % (images_dir_path, name),
mode="rb"),
target_pixels=self.builder_config.target_pixels),
"label": label,
} | python | def _generate_examples(self, images_dir_path, csv_path=None, csv_usage=None):
"""Yields Example instances from given CSV.
Args:
images_dir_path: path to dir in which images are stored.
csv_path: optional, path to csv file with two columns: name of image and
label. If not provided, just scan image directory, don't set labels.
csv_usage: optional, subset of examples from the csv file to use based on
the "Usage" column from the csv.
"""
if csv_path:
with tf.io.gfile.GFile(csv_path) as csv_f:
reader = csv.DictReader(csv_f)
data = [(row["image"], int(row["level"]))
for row in reader
if csv_usage is None or row["Usage"] == csv_usage]
else:
data = [(fname[:-5], -1)
for fname in tf.io.gfile.listdir(images_dir_path)
if fname.endswith(".jpeg")]
for name, label in data:
yield {
"name": name,
"image": _resize_image_if_necessary(
tf.io.gfile.GFile("%s/%s.jpeg" % (images_dir_path, name),
mode="rb"),
target_pixels=self.builder_config.target_pixels),
"label": label,
} | [
"def",
"_generate_examples",
"(",
"self",
",",
"images_dir_path",
",",
"csv_path",
"=",
"None",
",",
"csv_usage",
"=",
"None",
")",
":",
"if",
"csv_path",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"csv_path",
")",
"as",
"csv_f",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"csv_f",
")",
"data",
"=",
"[",
"(",
"row",
"[",
"\"image\"",
"]",
",",
"int",
"(",
"row",
"[",
"\"level\"",
"]",
")",
")",
"for",
"row",
"in",
"reader",
"if",
"csv_usage",
"is",
"None",
"or",
"row",
"[",
"\"Usage\"",
"]",
"==",
"csv_usage",
"]",
"else",
":",
"data",
"=",
"[",
"(",
"fname",
"[",
":",
"-",
"5",
"]",
",",
"-",
"1",
")",
"for",
"fname",
"in",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"images_dir_path",
")",
"if",
"fname",
".",
"endswith",
"(",
"\".jpeg\"",
")",
"]",
"for",
"name",
",",
"label",
"in",
"data",
":",
"yield",
"{",
"\"name\"",
":",
"name",
",",
"\"image\"",
":",
"_resize_image_if_necessary",
"(",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"\"%s/%s.jpeg\"",
"%",
"(",
"images_dir_path",
",",
"name",
")",
",",
"mode",
"=",
"\"rb\"",
")",
",",
"target_pixels",
"=",
"self",
".",
"builder_config",
".",
"target_pixels",
")",
",",
"\"label\"",
":",
"label",
",",
"}"
] | Yields Example instances from given CSV.
Args:
images_dir_path: path to dir in which images are stored.
csv_path: optional, path to csv file with two columns: name of image and
label. If not provided, just scan image directory, don't set labels.
csv_usage: optional, subset of examples from the csv file to use based on
the "Usage" column from the csv. | [
"Yields",
"Example",
"instances",
"from",
"given",
"CSV",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/diabetic_retinopathy_detection.py#L150-L178 |
26,350 | tensorflow/datasets | tensorflow_datasets/core/dataset_builder.py | FileAdapterBuilder._slice_split_info_to_instruction_dicts | def _slice_split_info_to_instruction_dicts(self, list_sliced_split_info):
"""Return the list of files and reading mask of the files to read."""
instruction_dicts = []
for sliced_split_info in list_sliced_split_info:
mask = splits_lib.slice_to_percent_mask(sliced_split_info.slice_value)
# Compute filenames from the given split
filepaths = list(sorted(self._build_split_filenames(
split_info_list=[sliced_split_info.split_info],
)))
# Compute the offsets
if sliced_split_info.split_info.num_examples:
shard_id2num_examples = splits_lib.get_shard_id2num_examples(
sliced_split_info.split_info.num_shards,
sliced_split_info.split_info.num_examples,
)
mask_offsets = splits_lib.compute_mask_offsets(shard_id2num_examples)
else:
logging.warning(
"Statistics not present in the dataset. TFDS is not able to load "
"the total number of examples, so using the subsplit API may not "
"provide precise subsplits."
)
mask_offsets = [0] * len(filepaths)
for filepath, mask_offset in zip(filepaths, mask_offsets):
instruction_dicts.append({
"filepath": filepath,
"mask": mask,
"mask_offset": mask_offset,
})
return instruction_dicts | python | def _slice_split_info_to_instruction_dicts(self, list_sliced_split_info):
"""Return the list of files and reading mask of the files to read."""
instruction_dicts = []
for sliced_split_info in list_sliced_split_info:
mask = splits_lib.slice_to_percent_mask(sliced_split_info.slice_value)
# Compute filenames from the given split
filepaths = list(sorted(self._build_split_filenames(
split_info_list=[sliced_split_info.split_info],
)))
# Compute the offsets
if sliced_split_info.split_info.num_examples:
shard_id2num_examples = splits_lib.get_shard_id2num_examples(
sliced_split_info.split_info.num_shards,
sliced_split_info.split_info.num_examples,
)
mask_offsets = splits_lib.compute_mask_offsets(shard_id2num_examples)
else:
logging.warning(
"Statistics not present in the dataset. TFDS is not able to load "
"the total number of examples, so using the subsplit API may not "
"provide precise subsplits."
)
mask_offsets = [0] * len(filepaths)
for filepath, mask_offset in zip(filepaths, mask_offsets):
instruction_dicts.append({
"filepath": filepath,
"mask": mask,
"mask_offset": mask_offset,
})
return instruction_dicts | [
"def",
"_slice_split_info_to_instruction_dicts",
"(",
"self",
",",
"list_sliced_split_info",
")",
":",
"instruction_dicts",
"=",
"[",
"]",
"for",
"sliced_split_info",
"in",
"list_sliced_split_info",
":",
"mask",
"=",
"splits_lib",
".",
"slice_to_percent_mask",
"(",
"sliced_split_info",
".",
"slice_value",
")",
"# Compute filenames from the given split",
"filepaths",
"=",
"list",
"(",
"sorted",
"(",
"self",
".",
"_build_split_filenames",
"(",
"split_info_list",
"=",
"[",
"sliced_split_info",
".",
"split_info",
"]",
",",
")",
")",
")",
"# Compute the offsets",
"if",
"sliced_split_info",
".",
"split_info",
".",
"num_examples",
":",
"shard_id2num_examples",
"=",
"splits_lib",
".",
"get_shard_id2num_examples",
"(",
"sliced_split_info",
".",
"split_info",
".",
"num_shards",
",",
"sliced_split_info",
".",
"split_info",
".",
"num_examples",
",",
")",
"mask_offsets",
"=",
"splits_lib",
".",
"compute_mask_offsets",
"(",
"shard_id2num_examples",
")",
"else",
":",
"logging",
".",
"warning",
"(",
"\"Statistics not present in the dataset. TFDS is not able to load \"",
"\"the total number of examples, so using the subsplit API may not \"",
"\"provide precise subsplits.\"",
")",
"mask_offsets",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"filepaths",
")",
"for",
"filepath",
",",
"mask_offset",
"in",
"zip",
"(",
"filepaths",
",",
"mask_offsets",
")",
":",
"instruction_dicts",
".",
"append",
"(",
"{",
"\"filepath\"",
":",
"filepath",
",",
"\"mask\"",
":",
"mask",
",",
"\"mask_offset\"",
":",
"mask_offset",
",",
"}",
")",
"return",
"instruction_dicts"
] | Return the list of files and reading mask of the files to read. | [
"Return",
"the",
"list",
"of",
"files",
"and",
"reading",
"mask",
"of",
"the",
"files",
"to",
"read",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_builder.py#L707-L739 |
26,351 | tensorflow/datasets | tensorflow_datasets/core/dataset_builder.py | FileAdapterBuilder._build_split_filenames | def _build_split_filenames(self, split_info_list):
"""Construct the split filenames associated with the split info.
The filenames correspond to the pre-processed datasets files present in
the root directory of the dataset.
Args:
split_info_list: (list[SplitInfo]) List of split from which generate the
filenames
Returns:
filenames: (list[str]) The list of filenames path corresponding to the
split info object
"""
filenames = []
for split_info in split_info_list:
filenames.extend(naming.filepaths_for_dataset_split(
dataset_name=self.name,
split=split_info.name,
num_shards=split_info.num_shards,
data_dir=self._data_dir,
filetype_suffix=self._file_format_adapter.filetype_suffix,
))
return filenames | python | def _build_split_filenames(self, split_info_list):
"""Construct the split filenames associated with the split info.
The filenames correspond to the pre-processed datasets files present in
the root directory of the dataset.
Args:
split_info_list: (list[SplitInfo]) List of split from which generate the
filenames
Returns:
filenames: (list[str]) The list of filenames path corresponding to the
split info object
"""
filenames = []
for split_info in split_info_list:
filenames.extend(naming.filepaths_for_dataset_split(
dataset_name=self.name,
split=split_info.name,
num_shards=split_info.num_shards,
data_dir=self._data_dir,
filetype_suffix=self._file_format_adapter.filetype_suffix,
))
return filenames | [
"def",
"_build_split_filenames",
"(",
"self",
",",
"split_info_list",
")",
":",
"filenames",
"=",
"[",
"]",
"for",
"split_info",
"in",
"split_info_list",
":",
"filenames",
".",
"extend",
"(",
"naming",
".",
"filepaths_for_dataset_split",
"(",
"dataset_name",
"=",
"self",
".",
"name",
",",
"split",
"=",
"split_info",
".",
"name",
",",
"num_shards",
"=",
"split_info",
".",
"num_shards",
",",
"data_dir",
"=",
"self",
".",
"_data_dir",
",",
"filetype_suffix",
"=",
"self",
".",
"_file_format_adapter",
".",
"filetype_suffix",
",",
")",
")",
"return",
"filenames"
] | Construct the split filenames associated with the split info.
The filenames correspond to the pre-processed datasets files present in
the root directory of the dataset.
Args:
split_info_list: (list[SplitInfo]) List of split from which generate the
filenames
Returns:
filenames: (list[str]) The list of filenames path corresponding to the
split info object | [
"Construct",
"the",
"split",
"filenames",
"associated",
"with",
"the",
"split",
"info",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_builder.py#L741-L765 |
26,352 | tensorflow/datasets | tensorflow_datasets/video/moving_mnist.py | MovingMnist._generate_examples | def _generate_examples(self, data_path):
"""Generate MovingMnist sequences.
Args:
data_path (str): Path to the data file
Yields:
20 x 64 x 64 x 1 uint8 numpy arrays
"""
with tf.io.gfile.GFile(data_path, "rb") as fp:
images = np.load(fp)
images = np.transpose(images, (1, 0, 2, 3))
images = np.expand_dims(images, axis=-1)
for sequence in images:
yield dict(image_sequence=sequence) | python | def _generate_examples(self, data_path):
"""Generate MovingMnist sequences.
Args:
data_path (str): Path to the data file
Yields:
20 x 64 x 64 x 1 uint8 numpy arrays
"""
with tf.io.gfile.GFile(data_path, "rb") as fp:
images = np.load(fp)
images = np.transpose(images, (1, 0, 2, 3))
images = np.expand_dims(images, axis=-1)
for sequence in images:
yield dict(image_sequence=sequence) | [
"def",
"_generate_examples",
"(",
"self",
",",
"data_path",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"data_path",
",",
"\"rb\"",
")",
"as",
"fp",
":",
"images",
"=",
"np",
".",
"load",
"(",
"fp",
")",
"images",
"=",
"np",
".",
"transpose",
"(",
"images",
",",
"(",
"1",
",",
"0",
",",
"2",
",",
"3",
")",
")",
"images",
"=",
"np",
".",
"expand_dims",
"(",
"images",
",",
"axis",
"=",
"-",
"1",
")",
"for",
"sequence",
"in",
"images",
":",
"yield",
"dict",
"(",
"image_sequence",
"=",
"sequence",
")"
] | Generate MovingMnist sequences.
Args:
data_path (str): Path to the data file
Yields:
20 x 64 x 64 x 1 uint8 numpy arrays | [
"Generate",
"MovingMnist",
"sequences",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/video/moving_mnist.py#L85-L99 |
26,353 | tensorflow/datasets | tensorflow_datasets/video/starcraft.py | StarcraftVideo._parse_single_video | def _parse_single_video(self, example_proto):
"""Parses single video from the input tfrecords.
Args:
example_proto: tfExample proto with a single video.
Returns:
dict with all frames, positions and actions.
"""
context_features = {
"game_duration_loops": tf.io.FixedLenFeature([1], tf.int64),
"game_duration_seconds": tf.io.FixedLenFeature([1], tf.float32),
"n_steps": tf.io.FixedLenFeature([1], tf.int64),
"screen_size": tf.io.FixedLenFeature([2], tf.int64),
}
sequence_features = {
"rgb_screen": tf.io.FixedLenSequenceFeature([], tf.string),
}
_, seq_feat = tf.io.parse_single_sequence_example(
example_proto,
context_features=context_features,
sequence_features=sequence_features)
video_frames = tf.map_fn(
tf.image.decode_png, seq_feat["rgb_screen"], dtype=tf.uint8)
return video_frames | python | def _parse_single_video(self, example_proto):
"""Parses single video from the input tfrecords.
Args:
example_proto: tfExample proto with a single video.
Returns:
dict with all frames, positions and actions.
"""
context_features = {
"game_duration_loops": tf.io.FixedLenFeature([1], tf.int64),
"game_duration_seconds": tf.io.FixedLenFeature([1], tf.float32),
"n_steps": tf.io.FixedLenFeature([1], tf.int64),
"screen_size": tf.io.FixedLenFeature([2], tf.int64),
}
sequence_features = {
"rgb_screen": tf.io.FixedLenSequenceFeature([], tf.string),
}
_, seq_feat = tf.io.parse_single_sequence_example(
example_proto,
context_features=context_features,
sequence_features=sequence_features)
video_frames = tf.map_fn(
tf.image.decode_png, seq_feat["rgb_screen"], dtype=tf.uint8)
return video_frames | [
"def",
"_parse_single_video",
"(",
"self",
",",
"example_proto",
")",
":",
"context_features",
"=",
"{",
"\"game_duration_loops\"",
":",
"tf",
".",
"io",
".",
"FixedLenFeature",
"(",
"[",
"1",
"]",
",",
"tf",
".",
"int64",
")",
",",
"\"game_duration_seconds\"",
":",
"tf",
".",
"io",
".",
"FixedLenFeature",
"(",
"[",
"1",
"]",
",",
"tf",
".",
"float32",
")",
",",
"\"n_steps\"",
":",
"tf",
".",
"io",
".",
"FixedLenFeature",
"(",
"[",
"1",
"]",
",",
"tf",
".",
"int64",
")",
",",
"\"screen_size\"",
":",
"tf",
".",
"io",
".",
"FixedLenFeature",
"(",
"[",
"2",
"]",
",",
"tf",
".",
"int64",
")",
",",
"}",
"sequence_features",
"=",
"{",
"\"rgb_screen\"",
":",
"tf",
".",
"io",
".",
"FixedLenSequenceFeature",
"(",
"[",
"]",
",",
"tf",
".",
"string",
")",
",",
"}",
"_",
",",
"seq_feat",
"=",
"tf",
".",
"io",
".",
"parse_single_sequence_example",
"(",
"example_proto",
",",
"context_features",
"=",
"context_features",
",",
"sequence_features",
"=",
"sequence_features",
")",
"video_frames",
"=",
"tf",
".",
"map_fn",
"(",
"tf",
".",
"image",
".",
"decode_png",
",",
"seq_feat",
"[",
"\"rgb_screen\"",
"]",
",",
"dtype",
"=",
"tf",
".",
"uint8",
")",
"return",
"video_frames"
] | Parses single video from the input tfrecords.
Args:
example_proto: tfExample proto with a single video.
Returns:
dict with all frames, positions and actions. | [
"Parses",
"single",
"video",
"from",
"the",
"input",
"tfrecords",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/video/starcraft.py#L181-L208 |
26,354 | tensorflow/datasets | tensorflow_datasets/image/dsprites.py | Dsprites._generate_examples | def _generate_examples(self, filepath):
"""Generates examples for the dSprites data set.
Args:
filepath: path to the dSprites hdf5 file.
Yields:
Dictionaries with images, latent classes, and latent values.
"""
# Simultaneously iterating through the different data sets in the hdf5
# file is >100x slower and the data set is small (26.7MB). Hence, we first
# load everything into memory before yielding the samples.
image_array, class_array, values_array = _load_data(filepath)
for image, classes, values in moves.zip(image_array, class_array,
values_array):
yield dict(
image=np.expand_dims(image, -1),
label_shape=classes[1],
label_scale=classes[2],
label_orientation=classes[3],
label_x_position=classes[4],
label_y_position=classes[5],
value_shape=values[1],
value_scale=values[2],
value_orientation=values[3],
value_x_position=values[4],
value_y_position=values[5]) | python | def _generate_examples(self, filepath):
"""Generates examples for the dSprites data set.
Args:
filepath: path to the dSprites hdf5 file.
Yields:
Dictionaries with images, latent classes, and latent values.
"""
# Simultaneously iterating through the different data sets in the hdf5
# file is >100x slower and the data set is small (26.7MB). Hence, we first
# load everything into memory before yielding the samples.
image_array, class_array, values_array = _load_data(filepath)
for image, classes, values in moves.zip(image_array, class_array,
values_array):
yield dict(
image=np.expand_dims(image, -1),
label_shape=classes[1],
label_scale=classes[2],
label_orientation=classes[3],
label_x_position=classes[4],
label_y_position=classes[5],
value_shape=values[1],
value_scale=values[2],
value_orientation=values[3],
value_x_position=values[4],
value_y_position=values[5]) | [
"def",
"_generate_examples",
"(",
"self",
",",
"filepath",
")",
":",
"# Simultaneously iterating through the different data sets in the hdf5",
"# file is >100x slower and the data set is small (26.7MB). Hence, we first",
"# load everything into memory before yielding the samples.",
"image_array",
",",
"class_array",
",",
"values_array",
"=",
"_load_data",
"(",
"filepath",
")",
"for",
"image",
",",
"classes",
",",
"values",
"in",
"moves",
".",
"zip",
"(",
"image_array",
",",
"class_array",
",",
"values_array",
")",
":",
"yield",
"dict",
"(",
"image",
"=",
"np",
".",
"expand_dims",
"(",
"image",
",",
"-",
"1",
")",
",",
"label_shape",
"=",
"classes",
"[",
"1",
"]",
",",
"label_scale",
"=",
"classes",
"[",
"2",
"]",
",",
"label_orientation",
"=",
"classes",
"[",
"3",
"]",
",",
"label_x_position",
"=",
"classes",
"[",
"4",
"]",
",",
"label_y_position",
"=",
"classes",
"[",
"5",
"]",
",",
"value_shape",
"=",
"values",
"[",
"1",
"]",
",",
"value_scale",
"=",
"values",
"[",
"2",
"]",
",",
"value_orientation",
"=",
"values",
"[",
"3",
"]",
",",
"value_x_position",
"=",
"values",
"[",
"4",
"]",
",",
"value_y_position",
"=",
"values",
"[",
"5",
"]",
")"
] | Generates examples for the dSprites data set.
Args:
filepath: path to the dSprites hdf5 file.
Yields:
Dictionaries with images, latent classes, and latent values. | [
"Generates",
"examples",
"for",
"the",
"dSprites",
"data",
"set",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/dsprites.py#L117-L143 |
26,355 | tensorflow/datasets | tensorflow_datasets/image/open_images.py | _load_objects | def _load_objects(csv_paths, csv_positions, prefix):
"""Returns objects listed within given CSV files."""
logging.info('Loading CSVs %s from positions %s with prefix %s',
csv_paths, csv_positions, prefix)
objects = collections.defaultdict(list)
for i, labels_path in enumerate(csv_paths):
with tf.io.gfile.GFile(labels_path) as csv_f:
if csv_positions[i] > 0:
csv_f.seek(csv_positions[i])
else:
csv_f.readline() # Drop headers
reader = csv.reader(csv_f)
for image_id, source, label, confidence in reader:
if prefix and image_id[0] != prefix:
break
csv_positions[i] = csv_f.tell()
image_id = int(image_id, 16)
current_obj = _Object(label, int(float(confidence) * 10), source)
objects[image_id].append(current_obj)
return dict(objects) | python | def _load_objects(csv_paths, csv_positions, prefix):
"""Returns objects listed within given CSV files."""
logging.info('Loading CSVs %s from positions %s with prefix %s',
csv_paths, csv_positions, prefix)
objects = collections.defaultdict(list)
for i, labels_path in enumerate(csv_paths):
with tf.io.gfile.GFile(labels_path) as csv_f:
if csv_positions[i] > 0:
csv_f.seek(csv_positions[i])
else:
csv_f.readline() # Drop headers
reader = csv.reader(csv_f)
for image_id, source, label, confidence in reader:
if prefix and image_id[0] != prefix:
break
csv_positions[i] = csv_f.tell()
image_id = int(image_id, 16)
current_obj = _Object(label, int(float(confidence) * 10), source)
objects[image_id].append(current_obj)
return dict(objects) | [
"def",
"_load_objects",
"(",
"csv_paths",
",",
"csv_positions",
",",
"prefix",
")",
":",
"logging",
".",
"info",
"(",
"'Loading CSVs %s from positions %s with prefix %s'",
",",
"csv_paths",
",",
"csv_positions",
",",
"prefix",
")",
"objects",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"i",
",",
"labels_path",
"in",
"enumerate",
"(",
"csv_paths",
")",
":",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"labels_path",
")",
"as",
"csv_f",
":",
"if",
"csv_positions",
"[",
"i",
"]",
">",
"0",
":",
"csv_f",
".",
"seek",
"(",
"csv_positions",
"[",
"i",
"]",
")",
"else",
":",
"csv_f",
".",
"readline",
"(",
")",
"# Drop headers",
"reader",
"=",
"csv",
".",
"reader",
"(",
"csv_f",
")",
"for",
"image_id",
",",
"source",
",",
"label",
",",
"confidence",
"in",
"reader",
":",
"if",
"prefix",
"and",
"image_id",
"[",
"0",
"]",
"!=",
"prefix",
":",
"break",
"csv_positions",
"[",
"i",
"]",
"=",
"csv_f",
".",
"tell",
"(",
")",
"image_id",
"=",
"int",
"(",
"image_id",
",",
"16",
")",
"current_obj",
"=",
"_Object",
"(",
"label",
",",
"int",
"(",
"float",
"(",
"confidence",
")",
"*",
"10",
")",
",",
"source",
")",
"objects",
"[",
"image_id",
"]",
".",
"append",
"(",
"current_obj",
")",
"return",
"dict",
"(",
"objects",
")"
] | Returns objects listed within given CSV files. | [
"Returns",
"objects",
"listed",
"within",
"given",
"CSV",
"files",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/open_images.py#L322-L341 |
26,356 | tensorflow/datasets | tensorflow_datasets/image/open_images.py | _load_bboxes | def _load_bboxes(csv_path, csv_positions, prefix):
"""Returns bounded boxes listed within given CSV file."""
logging.info('Loading CSVs %s from positions %s with prefix %s',
csv_path, csv_positions, prefix)
boxes = collections.defaultdict(list)
with tf.io.gfile.GFile(csv_path) as csv_f:
if csv_positions[0] > 0:
csv_f.seek(csv_positions[0])
else:
csv_f.readline() # Drop headers
reader = csv.reader(csv_f)
for (image_id, source, label, confidence, xmin, xmax, ymin, ymax,
is_occluded, is_truncated, is_group_of, is_depiction, is_inside,
) in reader:
if prefix and image_id[0] != prefix:
break
csv_positions[0] = csv_f.tell()
image_id = int(image_id, 16)
del confidence # always 1 in bounding boxes.
current_row = _Bbox(
label, source, tfds.features.BBox(
float(ymin), float(xmin), float(ymax), float(xmax)),
int(is_occluded), int(is_truncated),
int(is_group_of), int(is_depiction), int(is_inside))
boxes[image_id].append(current_row)
return dict(boxes) | python | def _load_bboxes(csv_path, csv_positions, prefix):
"""Returns bounded boxes listed within given CSV file."""
logging.info('Loading CSVs %s from positions %s with prefix %s',
csv_path, csv_positions, prefix)
boxes = collections.defaultdict(list)
with tf.io.gfile.GFile(csv_path) as csv_f:
if csv_positions[0] > 0:
csv_f.seek(csv_positions[0])
else:
csv_f.readline() # Drop headers
reader = csv.reader(csv_f)
for (image_id, source, label, confidence, xmin, xmax, ymin, ymax,
is_occluded, is_truncated, is_group_of, is_depiction, is_inside,
) in reader:
if prefix and image_id[0] != prefix:
break
csv_positions[0] = csv_f.tell()
image_id = int(image_id, 16)
del confidence # always 1 in bounding boxes.
current_row = _Bbox(
label, source, tfds.features.BBox(
float(ymin), float(xmin), float(ymax), float(xmax)),
int(is_occluded), int(is_truncated),
int(is_group_of), int(is_depiction), int(is_inside))
boxes[image_id].append(current_row)
return dict(boxes) | [
"def",
"_load_bboxes",
"(",
"csv_path",
",",
"csv_positions",
",",
"prefix",
")",
":",
"logging",
".",
"info",
"(",
"'Loading CSVs %s from positions %s with prefix %s'",
",",
"csv_path",
",",
"csv_positions",
",",
"prefix",
")",
"boxes",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"with",
"tf",
".",
"io",
".",
"gfile",
".",
"GFile",
"(",
"csv_path",
")",
"as",
"csv_f",
":",
"if",
"csv_positions",
"[",
"0",
"]",
">",
"0",
":",
"csv_f",
".",
"seek",
"(",
"csv_positions",
"[",
"0",
"]",
")",
"else",
":",
"csv_f",
".",
"readline",
"(",
")",
"# Drop headers",
"reader",
"=",
"csv",
".",
"reader",
"(",
"csv_f",
")",
"for",
"(",
"image_id",
",",
"source",
",",
"label",
",",
"confidence",
",",
"xmin",
",",
"xmax",
",",
"ymin",
",",
"ymax",
",",
"is_occluded",
",",
"is_truncated",
",",
"is_group_of",
",",
"is_depiction",
",",
"is_inside",
",",
")",
"in",
"reader",
":",
"if",
"prefix",
"and",
"image_id",
"[",
"0",
"]",
"!=",
"prefix",
":",
"break",
"csv_positions",
"[",
"0",
"]",
"=",
"csv_f",
".",
"tell",
"(",
")",
"image_id",
"=",
"int",
"(",
"image_id",
",",
"16",
")",
"del",
"confidence",
"# always 1 in bounding boxes.",
"current_row",
"=",
"_Bbox",
"(",
"label",
",",
"source",
",",
"tfds",
".",
"features",
".",
"BBox",
"(",
"float",
"(",
"ymin",
")",
",",
"float",
"(",
"xmin",
")",
",",
"float",
"(",
"ymax",
")",
",",
"float",
"(",
"xmax",
")",
")",
",",
"int",
"(",
"is_occluded",
")",
",",
"int",
"(",
"is_truncated",
")",
",",
"int",
"(",
"is_group_of",
")",
",",
"int",
"(",
"is_depiction",
")",
",",
"int",
"(",
"is_inside",
")",
")",
"boxes",
"[",
"image_id",
"]",
".",
"append",
"(",
"current_row",
")",
"return",
"dict",
"(",
"boxes",
")"
] | Returns bounded boxes listed within given CSV file. | [
"Returns",
"bounded",
"boxes",
"listed",
"within",
"given",
"CSV",
"file",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/open_images.py#L344-L369 |
26,357 | tensorflow/datasets | tensorflow_datasets/text/imdb.py | IMDBReviews._generate_examples | def _generate_examples(self, archive, directory):
"""Generate IMDB examples."""
reg = re.compile(os.path.join("^%s" % directory, "(?P<label>neg|pos)", ""))
for path, imdb_f in archive:
res = reg.match(path)
if not res:
continue
text = imdb_f.read().strip()
yield {
"text": text,
"label": res.groupdict()["label"],
} | python | def _generate_examples(self, archive, directory):
"""Generate IMDB examples."""
reg = re.compile(os.path.join("^%s" % directory, "(?P<label>neg|pos)", ""))
for path, imdb_f in archive:
res = reg.match(path)
if not res:
continue
text = imdb_f.read().strip()
yield {
"text": text,
"label": res.groupdict()["label"],
} | [
"def",
"_generate_examples",
"(",
"self",
",",
"archive",
",",
"directory",
")",
":",
"reg",
"=",
"re",
".",
"compile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"^%s\"",
"%",
"directory",
",",
"\"(?P<label>neg|pos)\"",
",",
"\"\"",
")",
")",
"for",
"path",
",",
"imdb_f",
"in",
"archive",
":",
"res",
"=",
"reg",
".",
"match",
"(",
"path",
")",
"if",
"not",
"res",
":",
"continue",
"text",
"=",
"imdb_f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"yield",
"{",
"\"text\"",
":",
"text",
",",
"\"label\"",
":",
"res",
".",
"groupdict",
"(",
")",
"[",
"\"label\"",
"]",
",",
"}"
] | Generate IMDB examples. | [
"Generate",
"IMDB",
"examples",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/imdb.py#L146-L157 |
26,358 | tensorflow/datasets | tensorflow_datasets/text/cnn_dailymail.py | _get_url_hashes | def _get_url_hashes(path):
"""Get hashes of urls in file."""
urls = _read_text_file(path)
def url_hash(u):
h = hashlib.sha1()
try:
u = u.encode('utf-8')
except UnicodeDecodeError:
logging.error('Cannot hash url: %s', u)
h.update(u)
return h.hexdigest()
return {url_hash(u): True for u in urls} | python | def _get_url_hashes(path):
"""Get hashes of urls in file."""
urls = _read_text_file(path)
def url_hash(u):
h = hashlib.sha1()
try:
u = u.encode('utf-8')
except UnicodeDecodeError:
logging.error('Cannot hash url: %s', u)
h.update(u)
return h.hexdigest()
return {url_hash(u): True for u in urls} | [
"def",
"_get_url_hashes",
"(",
"path",
")",
":",
"urls",
"=",
"_read_text_file",
"(",
"path",
")",
"def",
"url_hash",
"(",
"u",
")",
":",
"h",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"try",
":",
"u",
"=",
"u",
".",
"encode",
"(",
"'utf-8'",
")",
"except",
"UnicodeDecodeError",
":",
"logging",
".",
"error",
"(",
"'Cannot hash url: %s'",
",",
"u",
")",
"h",
".",
"update",
"(",
"u",
")",
"return",
"h",
".",
"hexdigest",
"(",
")",
"return",
"{",
"url_hash",
"(",
"u",
")",
":",
"True",
"for",
"u",
"in",
"urls",
"}"
] | Get hashes of urls in file. | [
"Get",
"hashes",
"of",
"urls",
"in",
"file",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/cnn_dailymail.py#L97-L108 |
26,359 | tensorflow/datasets | tensorflow_datasets/text/cnn_dailymail.py | _find_files | def _find_files(dl_paths, publisher, url_dict):
"""Find files corresponding to urls."""
if publisher == 'cnn':
top_dir = os.path.join(dl_paths['cnn_stories'], 'cnn', 'stories')
elif publisher == 'dm':
top_dir = os.path.join(dl_paths['dm_stories'], 'dailymail', 'stories')
else:
logging.fatal('Unsupported publisher: %s', publisher)
files = tf.io.gfile.listdir(top_dir)
ret_files = []
for p in files:
basename = os.path.basename(p)
if basename[0:basename.find('.story')] in url_dict:
ret_files.append(os.path.join(top_dir, p))
return ret_files | python | def _find_files(dl_paths, publisher, url_dict):
"""Find files corresponding to urls."""
if publisher == 'cnn':
top_dir = os.path.join(dl_paths['cnn_stories'], 'cnn', 'stories')
elif publisher == 'dm':
top_dir = os.path.join(dl_paths['dm_stories'], 'dailymail', 'stories')
else:
logging.fatal('Unsupported publisher: %s', publisher)
files = tf.io.gfile.listdir(top_dir)
ret_files = []
for p in files:
basename = os.path.basename(p)
if basename[0:basename.find('.story')] in url_dict:
ret_files.append(os.path.join(top_dir, p))
return ret_files | [
"def",
"_find_files",
"(",
"dl_paths",
",",
"publisher",
",",
"url_dict",
")",
":",
"if",
"publisher",
"==",
"'cnn'",
":",
"top_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dl_paths",
"[",
"'cnn_stories'",
"]",
",",
"'cnn'",
",",
"'stories'",
")",
"elif",
"publisher",
"==",
"'dm'",
":",
"top_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dl_paths",
"[",
"'dm_stories'",
"]",
",",
"'dailymail'",
",",
"'stories'",
")",
"else",
":",
"logging",
".",
"fatal",
"(",
"'Unsupported publisher: %s'",
",",
"publisher",
")",
"files",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"listdir",
"(",
"top_dir",
")",
"ret_files",
"=",
"[",
"]",
"for",
"p",
"in",
"files",
":",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"p",
")",
"if",
"basename",
"[",
"0",
":",
"basename",
".",
"find",
"(",
"'.story'",
")",
"]",
"in",
"url_dict",
":",
"ret_files",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"top_dir",
",",
"p",
")",
")",
"return",
"ret_files"
] | Find files corresponding to urls. | [
"Find",
"files",
"corresponding",
"to",
"urls",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/cnn_dailymail.py#L111-L126 |
26,360 | tensorflow/datasets | tensorflow_datasets/text/cnn_dailymail.py | _subset_filenames | def _subset_filenames(dl_paths, split):
"""Get filenames for a particular split."""
assert isinstance(dl_paths, dict), dl_paths
# Get filenames for a split.
if split == tfds.Split.TRAIN:
urls = _get_url_hashes(dl_paths['train_urls'])
elif split == tfds.Split.VALIDATION:
urls = _get_url_hashes(dl_paths['val_urls'])
elif split == tfds.Split.TEST:
urls = _get_url_hashes(dl_paths['test_urls'])
else:
logging.fatal('Unsupported split: %s', split)
cnn = _find_files(dl_paths, 'cnn', urls)
dm = _find_files(dl_paths, 'dm', urls)
return cnn + dm | python | def _subset_filenames(dl_paths, split):
"""Get filenames for a particular split."""
assert isinstance(dl_paths, dict), dl_paths
# Get filenames for a split.
if split == tfds.Split.TRAIN:
urls = _get_url_hashes(dl_paths['train_urls'])
elif split == tfds.Split.VALIDATION:
urls = _get_url_hashes(dl_paths['val_urls'])
elif split == tfds.Split.TEST:
urls = _get_url_hashes(dl_paths['test_urls'])
else:
logging.fatal('Unsupported split: %s', split)
cnn = _find_files(dl_paths, 'cnn', urls)
dm = _find_files(dl_paths, 'dm', urls)
return cnn + dm | [
"def",
"_subset_filenames",
"(",
"dl_paths",
",",
"split",
")",
":",
"assert",
"isinstance",
"(",
"dl_paths",
",",
"dict",
")",
",",
"dl_paths",
"# Get filenames for a split.",
"if",
"split",
"==",
"tfds",
".",
"Split",
".",
"TRAIN",
":",
"urls",
"=",
"_get_url_hashes",
"(",
"dl_paths",
"[",
"'train_urls'",
"]",
")",
"elif",
"split",
"==",
"tfds",
".",
"Split",
".",
"VALIDATION",
":",
"urls",
"=",
"_get_url_hashes",
"(",
"dl_paths",
"[",
"'val_urls'",
"]",
")",
"elif",
"split",
"==",
"tfds",
".",
"Split",
".",
"TEST",
":",
"urls",
"=",
"_get_url_hashes",
"(",
"dl_paths",
"[",
"'test_urls'",
"]",
")",
"else",
":",
"logging",
".",
"fatal",
"(",
"'Unsupported split: %s'",
",",
"split",
")",
"cnn",
"=",
"_find_files",
"(",
"dl_paths",
",",
"'cnn'",
",",
"urls",
")",
"dm",
"=",
"_find_files",
"(",
"dl_paths",
",",
"'dm'",
",",
"urls",
")",
"return",
"cnn",
"+",
"dm"
] | Get filenames for a particular split. | [
"Get",
"filenames",
"for",
"a",
"particular",
"split",
"."
] | 46ceb0cf7b4690f38ecbbc689e4d659a903d08dc | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/cnn_dailymail.py#L129-L143 |
26,361 | s0md3v/Photon | plugins/exporter.py | exporter | def exporter(directory, method, datasets):
"""Export the results."""
if method.lower() == 'json':
# Convert json_dict to a JSON styled string
json_string = json.dumps(datasets, indent=4)
savefile = open('{}/exported.json'.format(directory), 'w+')
savefile.write(json_string)
savefile.close()
if method.lower() == 'csv':
with open('{}/exported.csv'.format(directory), 'w+') as csvfile:
csv_writer = csv.writer(
csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for key, values in datasets.items():
if values is None:
csv_writer.writerow([key])
else:
csv_writer.writerow([key] + values)
csvfile.close() | python | def exporter(directory, method, datasets):
"""Export the results."""
if method.lower() == 'json':
# Convert json_dict to a JSON styled string
json_string = json.dumps(datasets, indent=4)
savefile = open('{}/exported.json'.format(directory), 'w+')
savefile.write(json_string)
savefile.close()
if method.lower() == 'csv':
with open('{}/exported.csv'.format(directory), 'w+') as csvfile:
csv_writer = csv.writer(
csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for key, values in datasets.items():
if values is None:
csv_writer.writerow([key])
else:
csv_writer.writerow([key] + values)
csvfile.close() | [
"def",
"exporter",
"(",
"directory",
",",
"method",
",",
"datasets",
")",
":",
"if",
"method",
".",
"lower",
"(",
")",
"==",
"'json'",
":",
"# Convert json_dict to a JSON styled string",
"json_string",
"=",
"json",
".",
"dumps",
"(",
"datasets",
",",
"indent",
"=",
"4",
")",
"savefile",
"=",
"open",
"(",
"'{}/exported.json'",
".",
"format",
"(",
"directory",
")",
",",
"'w+'",
")",
"savefile",
".",
"write",
"(",
"json_string",
")",
"savefile",
".",
"close",
"(",
")",
"if",
"method",
".",
"lower",
"(",
")",
"==",
"'csv'",
":",
"with",
"open",
"(",
"'{}/exported.csv'",
".",
"format",
"(",
"directory",
")",
",",
"'w+'",
")",
"as",
"csvfile",
":",
"csv_writer",
"=",
"csv",
".",
"writer",
"(",
"csvfile",
",",
"delimiter",
"=",
"','",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_MINIMAL",
")",
"for",
"key",
",",
"values",
"in",
"datasets",
".",
"items",
"(",
")",
":",
"if",
"values",
"is",
"None",
":",
"csv_writer",
".",
"writerow",
"(",
"[",
"key",
"]",
")",
"else",
":",
"csv_writer",
".",
"writerow",
"(",
"[",
"key",
"]",
"+",
"values",
")",
"csvfile",
".",
"close",
"(",
")"
] | Export the results. | [
"Export",
"the",
"results",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/plugins/exporter.py#L6-L24 |
26,362 | s0md3v/Photon | plugins/wayback.py | time_machine | def time_machine(host, mode):
"""Query archive.org."""
now = datetime.datetime.now()
to = str(now.year) + str(now.day) + str(now.month)
if now.month > 6:
fro = str(now.year) + str(now.day) + str(now.month - 6)
else:
fro = str(now.year - 1) + str(now.day) + str(now.month + 6)
url = "http://web.archive.org/cdx/search?url=%s&matchType=%s&collapse=urlkey&fl=original&filter=mimetype:text/html&filter=statuscode:200&output=json&from=%s&to=%s" % (host, mode, fro, to)
response = get(url).text
parsed = json.loads(response)[1:]
urls = []
for item in parsed:
urls.append(item[0])
return urls | python | def time_machine(host, mode):
"""Query archive.org."""
now = datetime.datetime.now()
to = str(now.year) + str(now.day) + str(now.month)
if now.month > 6:
fro = str(now.year) + str(now.day) + str(now.month - 6)
else:
fro = str(now.year - 1) + str(now.day) + str(now.month + 6)
url = "http://web.archive.org/cdx/search?url=%s&matchType=%s&collapse=urlkey&fl=original&filter=mimetype:text/html&filter=statuscode:200&output=json&from=%s&to=%s" % (host, mode, fro, to)
response = get(url).text
parsed = json.loads(response)[1:]
urls = []
for item in parsed:
urls.append(item[0])
return urls | [
"def",
"time_machine",
"(",
"host",
",",
"mode",
")",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"to",
"=",
"str",
"(",
"now",
".",
"year",
")",
"+",
"str",
"(",
"now",
".",
"day",
")",
"+",
"str",
"(",
"now",
".",
"month",
")",
"if",
"now",
".",
"month",
">",
"6",
":",
"fro",
"=",
"str",
"(",
"now",
".",
"year",
")",
"+",
"str",
"(",
"now",
".",
"day",
")",
"+",
"str",
"(",
"now",
".",
"month",
"-",
"6",
")",
"else",
":",
"fro",
"=",
"str",
"(",
"now",
".",
"year",
"-",
"1",
")",
"+",
"str",
"(",
"now",
".",
"day",
")",
"+",
"str",
"(",
"now",
".",
"month",
"+",
"6",
")",
"url",
"=",
"\"http://web.archive.org/cdx/search?url=%s&matchType=%s&collapse=urlkey&fl=original&filter=mimetype:text/html&filter=statuscode:200&output=json&from=%s&to=%s\"",
"%",
"(",
"host",
",",
"mode",
",",
"fro",
",",
"to",
")",
"response",
"=",
"get",
"(",
"url",
")",
".",
"text",
"parsed",
"=",
"json",
".",
"loads",
"(",
"response",
")",
"[",
"1",
":",
"]",
"urls",
"=",
"[",
"]",
"for",
"item",
"in",
"parsed",
":",
"urls",
".",
"append",
"(",
"item",
"[",
"0",
"]",
")",
"return",
"urls"
] | Query archive.org. | [
"Query",
"archive",
".",
"org",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/plugins/wayback.py#L8-L22 |
26,363 | s0md3v/Photon | core/zap.py | zap | def zap(input_url, archive, domain, host, internal, robots, proxies):
"""Extract links from robots.txt and sitemap.xml."""
if archive:
print('%s Fetching URLs from archive.org' % run)
if False:
archived_urls = time_machine(domain, 'domain')
else:
archived_urls = time_machine(host, 'host')
print('%s Retrieved %i URLs from archive.org' % (
good, len(archived_urls) - 1))
for url in archived_urls:
verb('Internal page', url)
internal.add(url)
# Makes request to robots.txt
response = requests.get(input_url + '/robots.txt',
proxies=random.choice(proxies)).text
# Making sure robots.txt isn't some fancy 404 page
if '<body' not in response:
# If you know it, you know it
matches = re.findall(r'Allow: (.*)|Disallow: (.*)', response)
if matches:
# Iterating over the matches, match is a tuple here
for match in matches:
# One item in match will always be empty so will combine both
# items
match = ''.join(match)
# If the URL doesn't use a wildcard
if '*' not in match:
url = input_url + match
# Add the URL to internal list for crawling
internal.add(url)
# Add the URL to robots list
robots.add(url)
print('%s URLs retrieved from robots.txt: %s' % (good, len(robots)))
# Makes request to sitemap.xml
response = requests.get(input_url + '/sitemap.xml',
proxies=random.choice(proxies)).text
# Making sure robots.txt isn't some fancy 404 page
if '<body' not in response:
matches = xml_parser(response)
if matches: # if there are any matches
print('%s URLs retrieved from sitemap.xml: %s' % (
good, len(matches)))
for match in matches:
verb('Internal page', match)
# Cleaning up the URL and adding it to the internal list for
# crawling
internal.add(match) | python | def zap(input_url, archive, domain, host, internal, robots, proxies):
"""Extract links from robots.txt and sitemap.xml."""
if archive:
print('%s Fetching URLs from archive.org' % run)
if False:
archived_urls = time_machine(domain, 'domain')
else:
archived_urls = time_machine(host, 'host')
print('%s Retrieved %i URLs from archive.org' % (
good, len(archived_urls) - 1))
for url in archived_urls:
verb('Internal page', url)
internal.add(url)
# Makes request to robots.txt
response = requests.get(input_url + '/robots.txt',
proxies=random.choice(proxies)).text
# Making sure robots.txt isn't some fancy 404 page
if '<body' not in response:
# If you know it, you know it
matches = re.findall(r'Allow: (.*)|Disallow: (.*)', response)
if matches:
# Iterating over the matches, match is a tuple here
for match in matches:
# One item in match will always be empty so will combine both
# items
match = ''.join(match)
# If the URL doesn't use a wildcard
if '*' not in match:
url = input_url + match
# Add the URL to internal list for crawling
internal.add(url)
# Add the URL to robots list
robots.add(url)
print('%s URLs retrieved from robots.txt: %s' % (good, len(robots)))
# Makes request to sitemap.xml
response = requests.get(input_url + '/sitemap.xml',
proxies=random.choice(proxies)).text
# Making sure robots.txt isn't some fancy 404 page
if '<body' not in response:
matches = xml_parser(response)
if matches: # if there are any matches
print('%s URLs retrieved from sitemap.xml: %s' % (
good, len(matches)))
for match in matches:
verb('Internal page', match)
# Cleaning up the URL and adding it to the internal list for
# crawling
internal.add(match) | [
"def",
"zap",
"(",
"input_url",
",",
"archive",
",",
"domain",
",",
"host",
",",
"internal",
",",
"robots",
",",
"proxies",
")",
":",
"if",
"archive",
":",
"print",
"(",
"'%s Fetching URLs from archive.org'",
"%",
"run",
")",
"if",
"False",
":",
"archived_urls",
"=",
"time_machine",
"(",
"domain",
",",
"'domain'",
")",
"else",
":",
"archived_urls",
"=",
"time_machine",
"(",
"host",
",",
"'host'",
")",
"print",
"(",
"'%s Retrieved %i URLs from archive.org'",
"%",
"(",
"good",
",",
"len",
"(",
"archived_urls",
")",
"-",
"1",
")",
")",
"for",
"url",
"in",
"archived_urls",
":",
"verb",
"(",
"'Internal page'",
",",
"url",
")",
"internal",
".",
"add",
"(",
"url",
")",
"# Makes request to robots.txt",
"response",
"=",
"requests",
".",
"get",
"(",
"input_url",
"+",
"'/robots.txt'",
",",
"proxies",
"=",
"random",
".",
"choice",
"(",
"proxies",
")",
")",
".",
"text",
"# Making sure robots.txt isn't some fancy 404 page",
"if",
"'<body'",
"not",
"in",
"response",
":",
"# If you know it, you know it",
"matches",
"=",
"re",
".",
"findall",
"(",
"r'Allow: (.*)|Disallow: (.*)'",
",",
"response",
")",
"if",
"matches",
":",
"# Iterating over the matches, match is a tuple here",
"for",
"match",
"in",
"matches",
":",
"# One item in match will always be empty so will combine both",
"# items",
"match",
"=",
"''",
".",
"join",
"(",
"match",
")",
"# If the URL doesn't use a wildcard",
"if",
"'*'",
"not",
"in",
"match",
":",
"url",
"=",
"input_url",
"+",
"match",
"# Add the URL to internal list for crawling",
"internal",
".",
"add",
"(",
"url",
")",
"# Add the URL to robots list",
"robots",
".",
"add",
"(",
"url",
")",
"print",
"(",
"'%s URLs retrieved from robots.txt: %s'",
"%",
"(",
"good",
",",
"len",
"(",
"robots",
")",
")",
")",
"# Makes request to sitemap.xml",
"response",
"=",
"requests",
".",
"get",
"(",
"input_url",
"+",
"'/sitemap.xml'",
",",
"proxies",
"=",
"random",
".",
"choice",
"(",
"proxies",
")",
")",
".",
"text",
"# Making sure robots.txt isn't some fancy 404 page",
"if",
"'<body'",
"not",
"in",
"response",
":",
"matches",
"=",
"xml_parser",
"(",
"response",
")",
"if",
"matches",
":",
"# if there are any matches",
"print",
"(",
"'%s URLs retrieved from sitemap.xml: %s'",
"%",
"(",
"good",
",",
"len",
"(",
"matches",
")",
")",
")",
"for",
"match",
"in",
"matches",
":",
"verb",
"(",
"'Internal page'",
",",
"match",
")",
"# Cleaning up the URL and adding it to the internal list for",
"# crawling",
"internal",
".",
"add",
"(",
"match",
")"
] | Extract links from robots.txt and sitemap.xml. | [
"Extract",
"links",
"from",
"robots",
".",
"txt",
"and",
"sitemap",
".",
"xml",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/zap.py#L10-L57 |
26,364 | s0md3v/Photon | core/requester.py | requester | def requester(
url,
main_url=None,
delay=0,
cook=None,
headers=None,
timeout=10,
host=None,
proxies=[None],
user_agents=[None],
failed=None,
processed=None
):
"""Handle the requests and return the response body."""
cook = cook or set()
headers = headers or set()
user_agents = user_agents or ['Photon']
failed = failed or set()
processed = processed or set()
# Mark the URL as crawled
processed.add(url)
# Pause/sleep the program for specified time
time.sleep(delay)
def make_request(url):
"""Default request"""
final_headers = headers or {
'Host': host,
# Selecting a random user-agent
'User-Agent': random.choice(user_agents),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip',
'DNT': '1',
'Connection': 'close',
}
try:
response = SESSION.get(
url,
cookies=cook,
headers=final_headers,
verify=False,
timeout=timeout,
stream=True,
proxies=random.choice(proxies)
)
except TooManyRedirects:
return 'dummy'
if 'text/html' in response.headers['content-type'] or \
'text/plain' in response.headers['content-type']:
if response.status_code != '404':
return response.text
else:
response.close()
failed.add(url)
return 'dummy'
else:
response.close()
return 'dummy'
return make_request(url) | python | def requester(
url,
main_url=None,
delay=0,
cook=None,
headers=None,
timeout=10,
host=None,
proxies=[None],
user_agents=[None],
failed=None,
processed=None
):
"""Handle the requests and return the response body."""
cook = cook or set()
headers = headers or set()
user_agents = user_agents or ['Photon']
failed = failed or set()
processed = processed or set()
# Mark the URL as crawled
processed.add(url)
# Pause/sleep the program for specified time
time.sleep(delay)
def make_request(url):
"""Default request"""
final_headers = headers or {
'Host': host,
# Selecting a random user-agent
'User-Agent': random.choice(user_agents),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip',
'DNT': '1',
'Connection': 'close',
}
try:
response = SESSION.get(
url,
cookies=cook,
headers=final_headers,
verify=False,
timeout=timeout,
stream=True,
proxies=random.choice(proxies)
)
except TooManyRedirects:
return 'dummy'
if 'text/html' in response.headers['content-type'] or \
'text/plain' in response.headers['content-type']:
if response.status_code != '404':
return response.text
else:
response.close()
failed.add(url)
return 'dummy'
else:
response.close()
return 'dummy'
return make_request(url) | [
"def",
"requester",
"(",
"url",
",",
"main_url",
"=",
"None",
",",
"delay",
"=",
"0",
",",
"cook",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"timeout",
"=",
"10",
",",
"host",
"=",
"None",
",",
"proxies",
"=",
"[",
"None",
"]",
",",
"user_agents",
"=",
"[",
"None",
"]",
",",
"failed",
"=",
"None",
",",
"processed",
"=",
"None",
")",
":",
"cook",
"=",
"cook",
"or",
"set",
"(",
")",
"headers",
"=",
"headers",
"or",
"set",
"(",
")",
"user_agents",
"=",
"user_agents",
"or",
"[",
"'Photon'",
"]",
"failed",
"=",
"failed",
"or",
"set",
"(",
")",
"processed",
"=",
"processed",
"or",
"set",
"(",
")",
"# Mark the URL as crawled",
"processed",
".",
"add",
"(",
"url",
")",
"# Pause/sleep the program for specified time",
"time",
".",
"sleep",
"(",
"delay",
")",
"def",
"make_request",
"(",
"url",
")",
":",
"\"\"\"Default request\"\"\"",
"final_headers",
"=",
"headers",
"or",
"{",
"'Host'",
":",
"host",
",",
"# Selecting a random user-agent",
"'User-Agent'",
":",
"random",
".",
"choice",
"(",
"user_agents",
")",
",",
"'Accept'",
":",
"'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'",
",",
"'Accept-Language'",
":",
"'en-US,en;q=0.5'",
",",
"'Accept-Encoding'",
":",
"'gzip'",
",",
"'DNT'",
":",
"'1'",
",",
"'Connection'",
":",
"'close'",
",",
"}",
"try",
":",
"response",
"=",
"SESSION",
".",
"get",
"(",
"url",
",",
"cookies",
"=",
"cook",
",",
"headers",
"=",
"final_headers",
",",
"verify",
"=",
"False",
",",
"timeout",
"=",
"timeout",
",",
"stream",
"=",
"True",
",",
"proxies",
"=",
"random",
".",
"choice",
"(",
"proxies",
")",
")",
"except",
"TooManyRedirects",
":",
"return",
"'dummy'",
"if",
"'text/html'",
"in",
"response",
".",
"headers",
"[",
"'content-type'",
"]",
"or",
"'text/plain'",
"in",
"response",
".",
"headers",
"[",
"'content-type'",
"]",
":",
"if",
"response",
".",
"status_code",
"!=",
"'404'",
":",
"return",
"response",
".",
"text",
"else",
":",
"response",
".",
"close",
"(",
")",
"failed",
".",
"add",
"(",
"url",
")",
"return",
"'dummy'",
"else",
":",
"response",
".",
"close",
"(",
")",
"return",
"'dummy'",
"return",
"make_request",
"(",
"url",
")"
] | Handle the requests and return the response body. | [
"Handle",
"the",
"requests",
"and",
"return",
"the",
"response",
"body",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/requester.py#L11-L72 |
26,365 | s0md3v/Photon | photon.py | intel_extractor | def intel_extractor(url, response):
"""Extract intel from the response body."""
for rintel in rintels:
res = re.sub(r'<(script).*?</\1>(?s)', '', response)
res = re.sub(r'<[^<]+?>', '', res)
matches = rintel[0].findall(res)
if matches:
for match in matches:
verb('Intel', match)
bad_intel.add((match, rintel[1], url)) | python | def intel_extractor(url, response):
"""Extract intel from the response body."""
for rintel in rintels:
res = re.sub(r'<(script).*?</\1>(?s)', '', response)
res = re.sub(r'<[^<]+?>', '', res)
matches = rintel[0].findall(res)
if matches:
for match in matches:
verb('Intel', match)
bad_intel.add((match, rintel[1], url)) | [
"def",
"intel_extractor",
"(",
"url",
",",
"response",
")",
":",
"for",
"rintel",
"in",
"rintels",
":",
"res",
"=",
"re",
".",
"sub",
"(",
"r'<(script).*?</\\1>(?s)'",
",",
"''",
",",
"response",
")",
"res",
"=",
"re",
".",
"sub",
"(",
"r'<[^<]+?>'",
",",
"''",
",",
"res",
")",
"matches",
"=",
"rintel",
"[",
"0",
"]",
".",
"findall",
"(",
"res",
")",
"if",
"matches",
":",
"for",
"match",
"in",
"matches",
":",
"verb",
"(",
"'Intel'",
",",
"match",
")",
"bad_intel",
".",
"add",
"(",
"(",
"match",
",",
"rintel",
"[",
"1",
"]",
",",
"url",
")",
")"
] | Extract intel from the response body. | [
"Extract",
"intel",
"from",
"the",
"response",
"body",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/photon.py#L208-L217 |
26,366 | s0md3v/Photon | photon.py | js_extractor | def js_extractor(response):
"""Extract js files from the response body"""
# Extract .js files
matches = rscript.findall(response)
for match in matches:
match = match[2].replace('\'', '').replace('"', '')
verb('JS file', match)
bad_scripts.add(match) | python | def js_extractor(response):
"""Extract js files from the response body"""
# Extract .js files
matches = rscript.findall(response)
for match in matches:
match = match[2].replace('\'', '').replace('"', '')
verb('JS file', match)
bad_scripts.add(match) | [
"def",
"js_extractor",
"(",
"response",
")",
":",
"# Extract .js files\r",
"matches",
"=",
"rscript",
".",
"findall",
"(",
"response",
")",
"for",
"match",
"in",
"matches",
":",
"match",
"=",
"match",
"[",
"2",
"]",
".",
"replace",
"(",
"'\\''",
",",
"''",
")",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
"verb",
"(",
"'JS file'",
",",
"match",
")",
"bad_scripts",
".",
"add",
"(",
"match",
")"
] | Extract js files from the response body | [
"Extract",
"js",
"files",
"from",
"the",
"response",
"body"
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/photon.py#L220-L227 |
26,367 | s0md3v/Photon | photon.py | extractor | def extractor(url):
"""Extract details from the response body."""
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
if clone:
mirror(url, response)
matches = rhref.findall(response)
for link in matches:
# Remove everything after a "#" to deal with in-page anchors
link = link[1].replace('\'', '').replace('"', '').split('#')[0]
# Checks if the URLs should be crawled
if is_link(link, processed, files):
if link[:4] == 'http':
if link.startswith(main_url):
verb('Internal page', link)
internal.add(link)
else:
verb('External page', link)
external.add(link)
elif link[:2] == '//':
if link.split('/')[2].startswith(host):
verb('Internal page', link)
internal.add(schema + '://' + link)
else:
verb('External page', link)
external.add(link)
elif link[:1] == '/':
verb('Internal page', link)
internal.add(remove_file(url) + link)
else:
verb('Internal page', link)
usable_url = remove_file(url)
if usable_url.endswith('/'):
internal.add(usable_url + link)
elif link.startswith('/'):
internal.add(usable_url + link)
else:
internal.add(usable_url + '/' + link)
if not only_urls:
intel_extractor(url, response)
js_extractor(response)
if args.regex and not supress_regex:
regxy(args.regex, response, supress_regex, custom)
if api:
matches = rentropy.findall(response)
for match in matches:
if entropy(match) >= 4:
verb('Key', match)
keys.add(url + ': ' + match) | python | def extractor(url):
"""Extract details from the response body."""
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
if clone:
mirror(url, response)
matches = rhref.findall(response)
for link in matches:
# Remove everything after a "#" to deal with in-page anchors
link = link[1].replace('\'', '').replace('"', '').split('#')[0]
# Checks if the URLs should be crawled
if is_link(link, processed, files):
if link[:4] == 'http':
if link.startswith(main_url):
verb('Internal page', link)
internal.add(link)
else:
verb('External page', link)
external.add(link)
elif link[:2] == '//':
if link.split('/')[2].startswith(host):
verb('Internal page', link)
internal.add(schema + '://' + link)
else:
verb('External page', link)
external.add(link)
elif link[:1] == '/':
verb('Internal page', link)
internal.add(remove_file(url) + link)
else:
verb('Internal page', link)
usable_url = remove_file(url)
if usable_url.endswith('/'):
internal.add(usable_url + link)
elif link.startswith('/'):
internal.add(usable_url + link)
else:
internal.add(usable_url + '/' + link)
if not only_urls:
intel_extractor(url, response)
js_extractor(response)
if args.regex and not supress_regex:
regxy(args.regex, response, supress_regex, custom)
if api:
matches = rentropy.findall(response)
for match in matches:
if entropy(match) >= 4:
verb('Key', match)
keys.add(url + ': ' + match) | [
"def",
"extractor",
"(",
"url",
")",
":",
"response",
"=",
"requester",
"(",
"url",
",",
"main_url",
",",
"delay",
",",
"cook",
",",
"headers",
",",
"timeout",
",",
"host",
",",
"proxies",
",",
"user_agents",
",",
"failed",
",",
"processed",
")",
"if",
"clone",
":",
"mirror",
"(",
"url",
",",
"response",
")",
"matches",
"=",
"rhref",
".",
"findall",
"(",
"response",
")",
"for",
"link",
"in",
"matches",
":",
"# Remove everything after a \"#\" to deal with in-page anchors\r",
"link",
"=",
"link",
"[",
"1",
"]",
".",
"replace",
"(",
"'\\''",
",",
"''",
")",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
".",
"split",
"(",
"'#'",
")",
"[",
"0",
"]",
"# Checks if the URLs should be crawled\r",
"if",
"is_link",
"(",
"link",
",",
"processed",
",",
"files",
")",
":",
"if",
"link",
"[",
":",
"4",
"]",
"==",
"'http'",
":",
"if",
"link",
".",
"startswith",
"(",
"main_url",
")",
":",
"verb",
"(",
"'Internal page'",
",",
"link",
")",
"internal",
".",
"add",
"(",
"link",
")",
"else",
":",
"verb",
"(",
"'External page'",
",",
"link",
")",
"external",
".",
"add",
"(",
"link",
")",
"elif",
"link",
"[",
":",
"2",
"]",
"==",
"'//'",
":",
"if",
"link",
".",
"split",
"(",
"'/'",
")",
"[",
"2",
"]",
".",
"startswith",
"(",
"host",
")",
":",
"verb",
"(",
"'Internal page'",
",",
"link",
")",
"internal",
".",
"add",
"(",
"schema",
"+",
"'://'",
"+",
"link",
")",
"else",
":",
"verb",
"(",
"'External page'",
",",
"link",
")",
"external",
".",
"add",
"(",
"link",
")",
"elif",
"link",
"[",
":",
"1",
"]",
"==",
"'/'",
":",
"verb",
"(",
"'Internal page'",
",",
"link",
")",
"internal",
".",
"add",
"(",
"remove_file",
"(",
"url",
")",
"+",
"link",
")",
"else",
":",
"verb",
"(",
"'Internal page'",
",",
"link",
")",
"usable_url",
"=",
"remove_file",
"(",
"url",
")",
"if",
"usable_url",
".",
"endswith",
"(",
"'/'",
")",
":",
"internal",
".",
"add",
"(",
"usable_url",
"+",
"link",
")",
"elif",
"link",
".",
"startswith",
"(",
"'/'",
")",
":",
"internal",
".",
"add",
"(",
"usable_url",
"+",
"link",
")",
"else",
":",
"internal",
".",
"add",
"(",
"usable_url",
"+",
"'/'",
"+",
"link",
")",
"if",
"not",
"only_urls",
":",
"intel_extractor",
"(",
"url",
",",
"response",
")",
"js_extractor",
"(",
"response",
")",
"if",
"args",
".",
"regex",
"and",
"not",
"supress_regex",
":",
"regxy",
"(",
"args",
".",
"regex",
",",
"response",
",",
"supress_regex",
",",
"custom",
")",
"if",
"api",
":",
"matches",
"=",
"rentropy",
".",
"findall",
"(",
"response",
")",
"for",
"match",
"in",
"matches",
":",
"if",
"entropy",
"(",
"match",
")",
">=",
"4",
":",
"verb",
"(",
"'Key'",
",",
"match",
")",
"keys",
".",
"add",
"(",
"url",
"+",
"': '",
"+",
"match",
")"
] | Extract details from the response body. | [
"Extract",
"details",
"from",
"the",
"response",
"body",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/photon.py#L239-L287 |
26,368 | s0md3v/Photon | photon.py | jscanner | def jscanner(url):
"""Extract endpoints from JavaScript code."""
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
# Extract URLs/endpoints
matches = rendpoint.findall(response)
# Iterate over the matches, match is a tuple
for match in matches:
# Combining the items because one of them is always empty
match = match[0] + match[1]
# Making sure it's not some JavaScript code
if not re.search(r'[}{><"\']', match) and not match == '/':
verb('JS endpoint', match)
endpoints.add(match) | python | def jscanner(url):
"""Extract endpoints from JavaScript code."""
response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed)
# Extract URLs/endpoints
matches = rendpoint.findall(response)
# Iterate over the matches, match is a tuple
for match in matches:
# Combining the items because one of them is always empty
match = match[0] + match[1]
# Making sure it's not some JavaScript code
if not re.search(r'[}{><"\']', match) and not match == '/':
verb('JS endpoint', match)
endpoints.add(match) | [
"def",
"jscanner",
"(",
"url",
")",
":",
"response",
"=",
"requester",
"(",
"url",
",",
"main_url",
",",
"delay",
",",
"cook",
",",
"headers",
",",
"timeout",
",",
"host",
",",
"proxies",
",",
"user_agents",
",",
"failed",
",",
"processed",
")",
"# Extract URLs/endpoints\r",
"matches",
"=",
"rendpoint",
".",
"findall",
"(",
"response",
")",
"# Iterate over the matches, match is a tuple\r",
"for",
"match",
"in",
"matches",
":",
"# Combining the items because one of them is always empty\r",
"match",
"=",
"match",
"[",
"0",
"]",
"+",
"match",
"[",
"1",
"]",
"# Making sure it's not some JavaScript code\r",
"if",
"not",
"re",
".",
"search",
"(",
"r'[}{><\"\\']'",
",",
"match",
")",
"and",
"not",
"match",
"==",
"'/'",
":",
"verb",
"(",
"'JS endpoint'",
",",
"match",
")",
"endpoints",
".",
"add",
"(",
"match",
")"
] | Extract endpoints from JavaScript code. | [
"Extract",
"endpoints",
"from",
"JavaScript",
"code",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/photon.py#L290-L302 |
26,369 | s0md3v/Photon | core/updater.py | updater | def updater():
"""Update the current installation.
git clones the latest version and merges it with the current directory.
"""
print('%s Checking for updates' % run)
# Changes must be separated by ;
changes = '''major bug fixes;removed ninja mode;dropped python < 3.2 support;fixed unicode output;proxy support;more intels'''
latest_commit = requester('https://raw.githubusercontent.com/s0md3v/Photon/master/core/updater.py', host='raw.githubusercontent.com')
# Just a hack to see if a new version is available
if changes not in latest_commit:
changelog = re.search(r"changes = '''(.*?)'''", latest_commit)
# Splitting the changes to form a list
changelog = changelog.group(1).split(';')
print('%s A new version of Photon is available.' % good)
print('%s Changes:' % info)
for change in changelog: # print changes
print('%s>%s %s' % (green, end, change))
current_path = os.getcwd().split('/') # if you know it, you know it
folder = current_path[-1] # current directory name
path = '/'.join(current_path) # current directory path
choice = input('%s Would you like to update? [Y/n] ' % que).lower()
if choice != 'n':
print('%s Updating Photon' % run)
os.system('git clone --quiet https://github.com/s0md3v/Photon %s'
% (folder))
os.system('cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null'
% (path, folder, path, path, folder))
print('%s Update successful!' % good)
else:
print('%s Photon is up to date!' % good) | python | def updater():
"""Update the current installation.
git clones the latest version and merges it with the current directory.
"""
print('%s Checking for updates' % run)
# Changes must be separated by ;
changes = '''major bug fixes;removed ninja mode;dropped python < 3.2 support;fixed unicode output;proxy support;more intels'''
latest_commit = requester('https://raw.githubusercontent.com/s0md3v/Photon/master/core/updater.py', host='raw.githubusercontent.com')
# Just a hack to see if a new version is available
if changes not in latest_commit:
changelog = re.search(r"changes = '''(.*?)'''", latest_commit)
# Splitting the changes to form a list
changelog = changelog.group(1).split(';')
print('%s A new version of Photon is available.' % good)
print('%s Changes:' % info)
for change in changelog: # print changes
print('%s>%s %s' % (green, end, change))
current_path = os.getcwd().split('/') # if you know it, you know it
folder = current_path[-1] # current directory name
path = '/'.join(current_path) # current directory path
choice = input('%s Would you like to update? [Y/n] ' % que).lower()
if choice != 'n':
print('%s Updating Photon' % run)
os.system('git clone --quiet https://github.com/s0md3v/Photon %s'
% (folder))
os.system('cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null'
% (path, folder, path, path, folder))
print('%s Update successful!' % good)
else:
print('%s Photon is up to date!' % good) | [
"def",
"updater",
"(",
")",
":",
"print",
"(",
"'%s Checking for updates'",
"%",
"run",
")",
"# Changes must be separated by ;",
"changes",
"=",
"'''major bug fixes;removed ninja mode;dropped python < 3.2 support;fixed unicode output;proxy support;more intels'''",
"latest_commit",
"=",
"requester",
"(",
"'https://raw.githubusercontent.com/s0md3v/Photon/master/core/updater.py'",
",",
"host",
"=",
"'raw.githubusercontent.com'",
")",
"# Just a hack to see if a new version is available",
"if",
"changes",
"not",
"in",
"latest_commit",
":",
"changelog",
"=",
"re",
".",
"search",
"(",
"r\"changes = '''(.*?)'''\"",
",",
"latest_commit",
")",
"# Splitting the changes to form a list",
"changelog",
"=",
"changelog",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
"';'",
")",
"print",
"(",
"'%s A new version of Photon is available.'",
"%",
"good",
")",
"print",
"(",
"'%s Changes:'",
"%",
"info",
")",
"for",
"change",
"in",
"changelog",
":",
"# print changes",
"print",
"(",
"'%s>%s %s'",
"%",
"(",
"green",
",",
"end",
",",
"change",
")",
")",
"current_path",
"=",
"os",
".",
"getcwd",
"(",
")",
".",
"split",
"(",
"'/'",
")",
"# if you know it, you know it",
"folder",
"=",
"current_path",
"[",
"-",
"1",
"]",
"# current directory name",
"path",
"=",
"'/'",
".",
"join",
"(",
"current_path",
")",
"# current directory path",
"choice",
"=",
"input",
"(",
"'%s Would you like to update? [Y/n] '",
"%",
"que",
")",
".",
"lower",
"(",
")",
"if",
"choice",
"!=",
"'n'",
":",
"print",
"(",
"'%s Updating Photon'",
"%",
"run",
")",
"os",
".",
"system",
"(",
"'git clone --quiet https://github.com/s0md3v/Photon %s'",
"%",
"(",
"folder",
")",
")",
"os",
".",
"system",
"(",
"'cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null'",
"%",
"(",
"path",
",",
"folder",
",",
"path",
",",
"path",
",",
"folder",
")",
")",
"print",
"(",
"'%s Update successful!'",
"%",
"good",
")",
"else",
":",
"print",
"(",
"'%s Photon is up to date!'",
"%",
"good",
")"
] | Update the current installation.
git clones the latest version and merges it with the current directory. | [
"Update",
"the",
"current",
"installation",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/updater.py#L8-L40 |
26,370 | s0md3v/Photon | plugins/find_subdomains.py | find_subdomains | def find_subdomains(domain):
"""Find subdomains according to the TLD."""
result = set()
response = get('https://findsubdomains.com/subdomains-of/' + domain).text
matches = findall(r'(?s)<div class="domains js-domain-name">(.*?)</div>', response)
for match in matches:
result.add(match.replace(' ', '').replace('\n', ''))
return list(result) | python | def find_subdomains(domain):
"""Find subdomains according to the TLD."""
result = set()
response = get('https://findsubdomains.com/subdomains-of/' + domain).text
matches = findall(r'(?s)<div class="domains js-domain-name">(.*?)</div>', response)
for match in matches:
result.add(match.replace(' ', '').replace('\n', ''))
return list(result) | [
"def",
"find_subdomains",
"(",
"domain",
")",
":",
"result",
"=",
"set",
"(",
")",
"response",
"=",
"get",
"(",
"'https://findsubdomains.com/subdomains-of/'",
"+",
"domain",
")",
".",
"text",
"matches",
"=",
"findall",
"(",
"r'(?s)<div class=\"domains js-domain-name\">(.*?)</div>'",
",",
"response",
")",
"for",
"match",
"in",
"matches",
":",
"result",
".",
"add",
"(",
"match",
".",
"replace",
"(",
"' '",
",",
"''",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
")",
"return",
"list",
"(",
"result",
")"
] | Find subdomains according to the TLD. | [
"Find",
"subdomains",
"according",
"to",
"the",
"TLD",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/plugins/find_subdomains.py#L7-L14 |
26,371 | s0md3v/Photon | core/flash.py | flash | def flash(function, links, thread_count):
"""Process the URLs and uses a threadpool to execute a function."""
# Convert links (set) to list
links = list(links)
threadpool = concurrent.futures.ThreadPoolExecutor(
max_workers=thread_count)
futures = (threadpool.submit(function, link) for link in links)
for i, _ in enumerate(concurrent.futures.as_completed(futures)):
if i + 1 == len(links) or (i + 1) % thread_count == 0:
print('%s Progress: %i/%i' % (info, i + 1, len(links)),
end='\r')
print('') | python | def flash(function, links, thread_count):
"""Process the URLs and uses a threadpool to execute a function."""
# Convert links (set) to list
links = list(links)
threadpool = concurrent.futures.ThreadPoolExecutor(
max_workers=thread_count)
futures = (threadpool.submit(function, link) for link in links)
for i, _ in enumerate(concurrent.futures.as_completed(futures)):
if i + 1 == len(links) or (i + 1) % thread_count == 0:
print('%s Progress: %i/%i' % (info, i + 1, len(links)),
end='\r')
print('') | [
"def",
"flash",
"(",
"function",
",",
"links",
",",
"thread_count",
")",
":",
"# Convert links (set) to list",
"links",
"=",
"list",
"(",
"links",
")",
"threadpool",
"=",
"concurrent",
".",
"futures",
".",
"ThreadPoolExecutor",
"(",
"max_workers",
"=",
"thread_count",
")",
"futures",
"=",
"(",
"threadpool",
".",
"submit",
"(",
"function",
",",
"link",
")",
"for",
"link",
"in",
"links",
")",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"concurrent",
".",
"futures",
".",
"as_completed",
"(",
"futures",
")",
")",
":",
"if",
"i",
"+",
"1",
"==",
"len",
"(",
"links",
")",
"or",
"(",
"i",
"+",
"1",
")",
"%",
"thread_count",
"==",
"0",
":",
"print",
"(",
"'%s Progress: %i/%i'",
"%",
"(",
"info",
",",
"i",
"+",
"1",
",",
"len",
"(",
"links",
")",
")",
",",
"end",
"=",
"'\\r'",
")",
"print",
"(",
"''",
")"
] | Process the URLs and uses a threadpool to execute a function. | [
"Process",
"the",
"URLs",
"and",
"uses",
"a",
"threadpool",
"to",
"execute",
"a",
"function",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/flash.py#L6-L17 |
26,372 | s0md3v/Photon | core/utils.py | regxy | def regxy(pattern, response, supress_regex, custom):
"""Extract a string based on regex pattern supplied by user."""
try:
matches = re.findall(r'%s' % pattern, response)
for match in matches:
verb('Custom regex', match)
custom.add(match)
except:
supress_regex = True | python | def regxy(pattern, response, supress_regex, custom):
"""Extract a string based on regex pattern supplied by user."""
try:
matches = re.findall(r'%s' % pattern, response)
for match in matches:
verb('Custom regex', match)
custom.add(match)
except:
supress_regex = True | [
"def",
"regxy",
"(",
"pattern",
",",
"response",
",",
"supress_regex",
",",
"custom",
")",
":",
"try",
":",
"matches",
"=",
"re",
".",
"findall",
"(",
"r'%s'",
"%",
"pattern",
",",
"response",
")",
"for",
"match",
"in",
"matches",
":",
"verb",
"(",
"'Custom regex'",
",",
"match",
")",
"custom",
".",
"add",
"(",
"match",
")",
"except",
":",
"supress_regex",
"=",
"True"
] | Extract a string based on regex pattern supplied by user. | [
"Extract",
"a",
"string",
"based",
"on",
"regex",
"pattern",
"supplied",
"by",
"user",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L15-L23 |
26,373 | s0md3v/Photon | core/utils.py | is_link | def is_link(url, processed, files):
"""
Determine whether or not a link should be crawled
A url should not be crawled if it
- Is a file
- Has already been crawled
Args:
url: str Url to be processed
processed: list[str] List of urls that have already been crawled
Returns:
bool If `url` should be crawled
"""
if url not in processed:
is_file = url.endswith(BAD_TYPES)
if is_file:
files.add(url)
return False
return True
return False | python | def is_link(url, processed, files):
"""
Determine whether or not a link should be crawled
A url should not be crawled if it
- Is a file
- Has already been crawled
Args:
url: str Url to be processed
processed: list[str] List of urls that have already been crawled
Returns:
bool If `url` should be crawled
"""
if url not in processed:
is_file = url.endswith(BAD_TYPES)
if is_file:
files.add(url)
return False
return True
return False | [
"def",
"is_link",
"(",
"url",
",",
"processed",
",",
"files",
")",
":",
"if",
"url",
"not",
"in",
"processed",
":",
"is_file",
"=",
"url",
".",
"endswith",
"(",
"BAD_TYPES",
")",
"if",
"is_file",
":",
"files",
".",
"add",
"(",
"url",
")",
"return",
"False",
"return",
"True",
"return",
"False"
] | Determine whether or not a link should be crawled
A url should not be crawled if it
- Is a file
- Has already been crawled
Args:
url: str Url to be processed
processed: list[str] List of urls that have already been crawled
Returns:
bool If `url` should be crawled | [
"Determine",
"whether",
"or",
"not",
"a",
"link",
"should",
"be",
"crawled",
"A",
"url",
"should",
"not",
"be",
"crawled",
"if",
"it",
"-",
"Is",
"a",
"file",
"-",
"Has",
"already",
"been",
"crawled"
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L26-L46 |
26,374 | s0md3v/Photon | core/utils.py | remove_regex | def remove_regex(urls, regex):
"""
Parse a list for non-matches to a regex.
Args:
urls: iterable of urls
regex: string regex to be parsed for
Returns:
list of strings not matching regex
"""
if not regex:
return urls
# To avoid iterating over the characters of a string
if not isinstance(urls, (list, set, tuple)):
urls = [urls]
try:
non_matching_urls = [url for url in urls if not re.search(regex, url)]
except TypeError:
return []
return non_matching_urls | python | def remove_regex(urls, regex):
"""
Parse a list for non-matches to a regex.
Args:
urls: iterable of urls
regex: string regex to be parsed for
Returns:
list of strings not matching regex
"""
if not regex:
return urls
# To avoid iterating over the characters of a string
if not isinstance(urls, (list, set, tuple)):
urls = [urls]
try:
non_matching_urls = [url for url in urls if not re.search(regex, url)]
except TypeError:
return []
return non_matching_urls | [
"def",
"remove_regex",
"(",
"urls",
",",
"regex",
")",
":",
"if",
"not",
"regex",
":",
"return",
"urls",
"# To avoid iterating over the characters of a string",
"if",
"not",
"isinstance",
"(",
"urls",
",",
"(",
"list",
",",
"set",
",",
"tuple",
")",
")",
":",
"urls",
"=",
"[",
"urls",
"]",
"try",
":",
"non_matching_urls",
"=",
"[",
"url",
"for",
"url",
"in",
"urls",
"if",
"not",
"re",
".",
"search",
"(",
"regex",
",",
"url",
")",
"]",
"except",
"TypeError",
":",
"return",
"[",
"]",
"return",
"non_matching_urls"
] | Parse a list for non-matches to a regex.
Args:
urls: iterable of urls
regex: string regex to be parsed for
Returns:
list of strings not matching regex | [
"Parse",
"a",
"list",
"for",
"non",
"-",
"matches",
"to",
"a",
"regex",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L49-L73 |
26,375 | s0md3v/Photon | core/utils.py | writer | def writer(datasets, dataset_names, output_dir):
"""Write the results."""
for dataset, dataset_name in zip(datasets, dataset_names):
if dataset:
filepath = output_dir + '/' + dataset_name + '.txt'
with open(filepath, 'w+') as out_file:
joined = '\n'.join(dataset)
out_file.write(str(joined.encode('utf-8').decode('utf-8')))
out_file.write('\n') | python | def writer(datasets, dataset_names, output_dir):
"""Write the results."""
for dataset, dataset_name in zip(datasets, dataset_names):
if dataset:
filepath = output_dir + '/' + dataset_name + '.txt'
with open(filepath, 'w+') as out_file:
joined = '\n'.join(dataset)
out_file.write(str(joined.encode('utf-8').decode('utf-8')))
out_file.write('\n') | [
"def",
"writer",
"(",
"datasets",
",",
"dataset_names",
",",
"output_dir",
")",
":",
"for",
"dataset",
",",
"dataset_name",
"in",
"zip",
"(",
"datasets",
",",
"dataset_names",
")",
":",
"if",
"dataset",
":",
"filepath",
"=",
"output_dir",
"+",
"'/'",
"+",
"dataset_name",
"+",
"'.txt'",
"with",
"open",
"(",
"filepath",
",",
"'w+'",
")",
"as",
"out_file",
":",
"joined",
"=",
"'\\n'",
".",
"join",
"(",
"dataset",
")",
"out_file",
".",
"write",
"(",
"str",
"(",
"joined",
".",
"encode",
"(",
"'utf-8'",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")",
"out_file",
".",
"write",
"(",
"'\\n'",
")"
] | Write the results. | [
"Write",
"the",
"results",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L76-L84 |
26,376 | s0md3v/Photon | core/utils.py | timer | def timer(diff, processed):
"""Return the passed time."""
# Changes seconds into minutes and seconds
minutes, seconds = divmod(diff, 60)
try:
# Finds average time taken by requests
time_per_request = diff / float(len(processed))
except ZeroDivisionError:
time_per_request = 0
return minutes, seconds, time_per_request | python | def timer(diff, processed):
"""Return the passed time."""
# Changes seconds into minutes and seconds
minutes, seconds = divmod(diff, 60)
try:
# Finds average time taken by requests
time_per_request = diff / float(len(processed))
except ZeroDivisionError:
time_per_request = 0
return minutes, seconds, time_per_request | [
"def",
"timer",
"(",
"diff",
",",
"processed",
")",
":",
"# Changes seconds into minutes and seconds",
"minutes",
",",
"seconds",
"=",
"divmod",
"(",
"diff",
",",
"60",
")",
"try",
":",
"# Finds average time taken by requests",
"time_per_request",
"=",
"diff",
"/",
"float",
"(",
"len",
"(",
"processed",
")",
")",
"except",
"ZeroDivisionError",
":",
"time_per_request",
"=",
"0",
"return",
"minutes",
",",
"seconds",
",",
"time_per_request"
] | Return the passed time. | [
"Return",
"the",
"passed",
"time",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L87-L96 |
26,377 | s0md3v/Photon | core/utils.py | entropy | def entropy(string):
"""Calculate the entropy of a string."""
entropy = 0
for number in range(256):
result = float(string.encode('utf-8').count(
chr(number))) / len(string.encode('utf-8'))
if result != 0:
entropy = entropy - result * math.log(result, 2)
return entropy | python | def entropy(string):
"""Calculate the entropy of a string."""
entropy = 0
for number in range(256):
result = float(string.encode('utf-8').count(
chr(number))) / len(string.encode('utf-8'))
if result != 0:
entropy = entropy - result * math.log(result, 2)
return entropy | [
"def",
"entropy",
"(",
"string",
")",
":",
"entropy",
"=",
"0",
"for",
"number",
"in",
"range",
"(",
"256",
")",
":",
"result",
"=",
"float",
"(",
"string",
".",
"encode",
"(",
"'utf-8'",
")",
".",
"count",
"(",
"chr",
"(",
"number",
")",
")",
")",
"/",
"len",
"(",
"string",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"if",
"result",
"!=",
"0",
":",
"entropy",
"=",
"entropy",
"-",
"result",
"*",
"math",
".",
"log",
"(",
"result",
",",
"2",
")",
"return",
"entropy"
] | Calculate the entropy of a string. | [
"Calculate",
"the",
"entropy",
"of",
"a",
"string",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L99-L107 |
26,378 | s0md3v/Photon | core/utils.py | extract_headers | def extract_headers(headers):
"""This function extracts valid headers from interactive input."""
sorted_headers = {}
matches = re.findall(r'(.*):\s(.*)', headers)
for match in matches:
header = match[0]
value = match[1]
try:
if value[-1] == ',':
value = value[:-1]
sorted_headers[header] = value
except IndexError:
pass
return sorted_headers | python | def extract_headers(headers):
"""This function extracts valid headers from interactive input."""
sorted_headers = {}
matches = re.findall(r'(.*):\s(.*)', headers)
for match in matches:
header = match[0]
value = match[1]
try:
if value[-1] == ',':
value = value[:-1]
sorted_headers[header] = value
except IndexError:
pass
return sorted_headers | [
"def",
"extract_headers",
"(",
"headers",
")",
":",
"sorted_headers",
"=",
"{",
"}",
"matches",
"=",
"re",
".",
"findall",
"(",
"r'(.*):\\s(.*)'",
",",
"headers",
")",
"for",
"match",
"in",
"matches",
":",
"header",
"=",
"match",
"[",
"0",
"]",
"value",
"=",
"match",
"[",
"1",
"]",
"try",
":",
"if",
"value",
"[",
"-",
"1",
"]",
"==",
"','",
":",
"value",
"=",
"value",
"[",
":",
"-",
"1",
"]",
"sorted_headers",
"[",
"header",
"]",
"=",
"value",
"except",
"IndexError",
":",
"pass",
"return",
"sorted_headers"
] | This function extracts valid headers from interactive input. | [
"This",
"function",
"extracts",
"valid",
"headers",
"from",
"interactive",
"input",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L122-L135 |
26,379 | s0md3v/Photon | core/utils.py | top_level | def top_level(url, fix_protocol=True):
"""Extract the top level domain from an URL."""
ext = tld.get_tld(url, fix_protocol=fix_protocol)
toplevel = '.'.join(urlparse(url).netloc.split('.')[-2:]).split(
ext)[0] + ext
return toplevel | python | def top_level(url, fix_protocol=True):
"""Extract the top level domain from an URL."""
ext = tld.get_tld(url, fix_protocol=fix_protocol)
toplevel = '.'.join(urlparse(url).netloc.split('.')[-2:]).split(
ext)[0] + ext
return toplevel | [
"def",
"top_level",
"(",
"url",
",",
"fix_protocol",
"=",
"True",
")",
":",
"ext",
"=",
"tld",
".",
"get_tld",
"(",
"url",
",",
"fix_protocol",
"=",
"fix_protocol",
")",
"toplevel",
"=",
"'.'",
".",
"join",
"(",
"urlparse",
"(",
"url",
")",
".",
"netloc",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"2",
":",
"]",
")",
".",
"split",
"(",
"ext",
")",
"[",
"0",
"]",
"+",
"ext",
"return",
"toplevel"
] | Extract the top level domain from an URL. | [
"Extract",
"the",
"top",
"level",
"domain",
"from",
"an",
"URL",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L138-L143 |
26,380 | s0md3v/Photon | core/prompt.py | prompt | def prompt(default=None):
"""Present the user a prompt."""
editor = 'nano'
with tempfile.NamedTemporaryFile(mode='r+') as tmpfile:
if default:
tmpfile.write(default)
tmpfile.flush()
child_pid = os.fork()
is_child = child_pid == 0
if is_child:
os.execvp(editor, [editor, tmpfile.name])
else:
os.waitpid(child_pid, 0)
tmpfile.seek(0)
return tmpfile.read().strip() | python | def prompt(default=None):
"""Present the user a prompt."""
editor = 'nano'
with tempfile.NamedTemporaryFile(mode='r+') as tmpfile:
if default:
tmpfile.write(default)
tmpfile.flush()
child_pid = os.fork()
is_child = child_pid == 0
if is_child:
os.execvp(editor, [editor, tmpfile.name])
else:
os.waitpid(child_pid, 0)
tmpfile.seek(0)
return tmpfile.read().strip() | [
"def",
"prompt",
"(",
"default",
"=",
"None",
")",
":",
"editor",
"=",
"'nano'",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'r+'",
")",
"as",
"tmpfile",
":",
"if",
"default",
":",
"tmpfile",
".",
"write",
"(",
"default",
")",
"tmpfile",
".",
"flush",
"(",
")",
"child_pid",
"=",
"os",
".",
"fork",
"(",
")",
"is_child",
"=",
"child_pid",
"==",
"0",
"if",
"is_child",
":",
"os",
".",
"execvp",
"(",
"editor",
",",
"[",
"editor",
",",
"tmpfile",
".",
"name",
"]",
")",
"else",
":",
"os",
".",
"waitpid",
"(",
"child_pid",
",",
"0",
")",
"tmpfile",
".",
"seek",
"(",
"0",
")",
"return",
"tmpfile",
".",
"read",
"(",
")",
".",
"strip",
"(",
")"
] | Present the user a prompt. | [
"Present",
"the",
"user",
"a",
"prompt",
"."
] | 6a29f2c9782ea9b3dc090db1774a259033600e39 | https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/prompt.py#L6-L22 |
26,381 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QAApplication/QATradeRealtime.py | QA_RealTrade.run | def run(self):
"""generator driven data flow
"""
# 如果出现了日期的改变 才会进行结算的事件
_date = None
while QA_util_if_tradetime(self.now):
for data in self.ingest_data: # 对于在ingest_data中的数据
# <class 'QUANTAXIS.QAData.QADataStruct.QA_DataStruct_Stock_day'>
date = data.date[0]
if self.market_type is MARKET_TYPE.STOCK_CN: # 如果是股票市场
if _date != date: # 如果新的date
# 前一天的交易日已经过去
# 往 broker 和 account 发送 settle 事件
try:
self.market.trade_engine.join()
# time.sleep(2)
self.market._settle(self.broker_name)
except Exception as e:
raise e
# 基金 指数 期货
elif self.market_type in [MARKET_TYPE.FUND_CN, MARKET_TYPE.INDEX_CN, MARKET_TYPE.FUTURE_CN]:
self.market._settle(self.broker_name)
# print(data)
self.broker.run(
QA_Event(event_type=ENGINE_EVENT.UPCOMING_DATA, market_data=data))
# 生成 UPCOMING_DATA 事件放到 队列中去执行
self.market.upcoming_data(self.broker_name, data)
self.market.trade_engine.join()
_date = date | python | def run(self):
"""generator driven data flow
"""
# 如果出现了日期的改变 才会进行结算的事件
_date = None
while QA_util_if_tradetime(self.now):
for data in self.ingest_data: # 对于在ingest_data中的数据
# <class 'QUANTAXIS.QAData.QADataStruct.QA_DataStruct_Stock_day'>
date = data.date[0]
if self.market_type is MARKET_TYPE.STOCK_CN: # 如果是股票市场
if _date != date: # 如果新的date
# 前一天的交易日已经过去
# 往 broker 和 account 发送 settle 事件
try:
self.market.trade_engine.join()
# time.sleep(2)
self.market._settle(self.broker_name)
except Exception as e:
raise e
# 基金 指数 期货
elif self.market_type in [MARKET_TYPE.FUND_CN, MARKET_TYPE.INDEX_CN, MARKET_TYPE.FUTURE_CN]:
self.market._settle(self.broker_name)
# print(data)
self.broker.run(
QA_Event(event_type=ENGINE_EVENT.UPCOMING_DATA, market_data=data))
# 生成 UPCOMING_DATA 事件放到 队列中去执行
self.market.upcoming_data(self.broker_name, data)
self.market.trade_engine.join()
_date = date | [
"def",
"run",
"(",
"self",
")",
":",
"# 如果出现了日期的改变 才会进行结算的事件",
"_date",
"=",
"None",
"while",
"QA_util_if_tradetime",
"(",
"self",
".",
"now",
")",
":",
"for",
"data",
"in",
"self",
".",
"ingest_data",
":",
"# 对于在ingest_data中的数据",
"# <class 'QUANTAXIS.QAData.QADataStruct.QA_DataStruct_Stock_day'>",
"date",
"=",
"data",
".",
"date",
"[",
"0",
"]",
"if",
"self",
".",
"market_type",
"is",
"MARKET_TYPE",
".",
"STOCK_CN",
":",
"# 如果是股票市场",
"if",
"_date",
"!=",
"date",
":",
"# 如果新的date",
"# 前一天的交易日已经过去",
"# 往 broker 和 account 发送 settle 事件",
"try",
":",
"self",
".",
"market",
".",
"trade_engine",
".",
"join",
"(",
")",
"# time.sleep(2)",
"self",
".",
"market",
".",
"_settle",
"(",
"self",
".",
"broker_name",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"# 基金 指数 期货",
"elif",
"self",
".",
"market_type",
"in",
"[",
"MARKET_TYPE",
".",
"FUND_CN",
",",
"MARKET_TYPE",
".",
"INDEX_CN",
",",
"MARKET_TYPE",
".",
"FUTURE_CN",
"]",
":",
"self",
".",
"market",
".",
"_settle",
"(",
"self",
".",
"broker_name",
")",
"# print(data)",
"self",
".",
"broker",
".",
"run",
"(",
"QA_Event",
"(",
"event_type",
"=",
"ENGINE_EVENT",
".",
"UPCOMING_DATA",
",",
"market_data",
"=",
"data",
")",
")",
"# 生成 UPCOMING_DATA 事件放到 队列中去执行",
"self",
".",
"market",
".",
"upcoming_data",
"(",
"self",
".",
"broker_name",
",",
"data",
")",
"self",
".",
"market",
".",
"trade_engine",
".",
"join",
"(",
")",
"_date",
"=",
"date"
] | generator driven data flow | [
"generator",
"driven",
"data",
"flow"
] | bb1fe424e4108b62a1f712b81a05cf829297a5c0 | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAApplication/QATradeRealtime.py#L84-L117 |
26,382 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QAARP/QAAccount.py | QA_Account.message | def message(self):
'the standard message which can be transfer'
return {
'source':
'account',
'frequence':
self.frequence,
'account_cookie':
self.account_cookie,
'portfolio_cookie':
self.portfolio_cookie,
'user_cookie':
self.user_cookie,
'broker':
self.broker,
'market_type':
self.market_type,
'strategy_name':
self.strategy_name,
'current_time':
str(self._currenttime),
'allow_sellopen':
self.allow_sellopen,
'allow_margin':
self.allow_margin,
'allow_t0':
self.allow_t0,
'margin_level':
self.margin_level,
'init_assets':
self.init_assets,
'init_cash':
self.init_cash,
'init_hold':
self.init_hold.to_dict(),
'commission_coeff':
self.commission_coeff,
'tax_coeff':
self.tax_coeff,
'cash':
self.cash,
'history':
self.history,
'trade_index':
self.time_index_max,
'running_time':
str(datetime.datetime.now())
if self.running_time is None else str(self.running_time),
'quantaxis_version':
self.quantaxis_version,
'running_environment':
self.running_environment,
'start_date':
self.start_date,
'end_date':
self.end_date,
'frozen':
self.frozen,
'finished_id':
self.finishedOrderid
} | python | def message(self):
'the standard message which can be transfer'
return {
'source':
'account',
'frequence':
self.frequence,
'account_cookie':
self.account_cookie,
'portfolio_cookie':
self.portfolio_cookie,
'user_cookie':
self.user_cookie,
'broker':
self.broker,
'market_type':
self.market_type,
'strategy_name':
self.strategy_name,
'current_time':
str(self._currenttime),
'allow_sellopen':
self.allow_sellopen,
'allow_margin':
self.allow_margin,
'allow_t0':
self.allow_t0,
'margin_level':
self.margin_level,
'init_assets':
self.init_assets,
'init_cash':
self.init_cash,
'init_hold':
self.init_hold.to_dict(),
'commission_coeff':
self.commission_coeff,
'tax_coeff':
self.tax_coeff,
'cash':
self.cash,
'history':
self.history,
'trade_index':
self.time_index_max,
'running_time':
str(datetime.datetime.now())
if self.running_time is None else str(self.running_time),
'quantaxis_version':
self.quantaxis_version,
'running_environment':
self.running_environment,
'start_date':
self.start_date,
'end_date':
self.end_date,
'frozen':
self.frozen,
'finished_id':
self.finishedOrderid
} | [
"def",
"message",
"(",
"self",
")",
":",
"return",
"{",
"'source'",
":",
"'account'",
",",
"'frequence'",
":",
"self",
".",
"frequence",
",",
"'account_cookie'",
":",
"self",
".",
"account_cookie",
",",
"'portfolio_cookie'",
":",
"self",
".",
"portfolio_cookie",
",",
"'user_cookie'",
":",
"self",
".",
"user_cookie",
",",
"'broker'",
":",
"self",
".",
"broker",
",",
"'market_type'",
":",
"self",
".",
"market_type",
",",
"'strategy_name'",
":",
"self",
".",
"strategy_name",
",",
"'current_time'",
":",
"str",
"(",
"self",
".",
"_currenttime",
")",
",",
"'allow_sellopen'",
":",
"self",
".",
"allow_sellopen",
",",
"'allow_margin'",
":",
"self",
".",
"allow_margin",
",",
"'allow_t0'",
":",
"self",
".",
"allow_t0",
",",
"'margin_level'",
":",
"self",
".",
"margin_level",
",",
"'init_assets'",
":",
"self",
".",
"init_assets",
",",
"'init_cash'",
":",
"self",
".",
"init_cash",
",",
"'init_hold'",
":",
"self",
".",
"init_hold",
".",
"to_dict",
"(",
")",
",",
"'commission_coeff'",
":",
"self",
".",
"commission_coeff",
",",
"'tax_coeff'",
":",
"self",
".",
"tax_coeff",
",",
"'cash'",
":",
"self",
".",
"cash",
",",
"'history'",
":",
"self",
".",
"history",
",",
"'trade_index'",
":",
"self",
".",
"time_index_max",
",",
"'running_time'",
":",
"str",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
")",
"if",
"self",
".",
"running_time",
"is",
"None",
"else",
"str",
"(",
"self",
".",
"running_time",
")",
",",
"'quantaxis_version'",
":",
"self",
".",
"quantaxis_version",
",",
"'running_environment'",
":",
"self",
".",
"running_environment",
",",
"'start_date'",
":",
"self",
".",
"start_date",
",",
"'end_date'",
":",
"self",
".",
"end_date",
",",
"'frozen'",
":",
"self",
".",
"frozen",
",",
"'finished_id'",
":",
"self",
".",
"finishedOrderid",
"}"
] | the standard message which can be transfer | [
"the",
"standard",
"message",
"which",
"can",
"be",
"transfer"
] | bb1fe424e4108b62a1f712b81a05cf829297a5c0 | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L429-L489 |
26,383 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QAFetch/QACrawler.py | QA_fetch_get_sh_margin | def QA_fetch_get_sh_margin(date):
"""return shanghai margin data
Arguments:
date {str YYYY-MM-DD} -- date format
Returns:
pandas.DataFrame -- res for margin data
"""
if date in trade_date_sse:
data= pd.read_excel(_sh_url.format(QA_util_date_str2int
(date)), 1).assign(date=date).assign(sse='sh')
data.columns=['code','name','leveraged_balance','leveraged_buyout','leveraged_payoff','margin_left','margin_sell','margin_repay','date','sse']
return data
else:
pass | python | def QA_fetch_get_sh_margin(date):
"""return shanghai margin data
Arguments:
date {str YYYY-MM-DD} -- date format
Returns:
pandas.DataFrame -- res for margin data
"""
if date in trade_date_sse:
data= pd.read_excel(_sh_url.format(QA_util_date_str2int
(date)), 1).assign(date=date).assign(sse='sh')
data.columns=['code','name','leveraged_balance','leveraged_buyout','leveraged_payoff','margin_left','margin_sell','margin_repay','date','sse']
return data
else:
pass | [
"def",
"QA_fetch_get_sh_margin",
"(",
"date",
")",
":",
"if",
"date",
"in",
"trade_date_sse",
":",
"data",
"=",
"pd",
".",
"read_excel",
"(",
"_sh_url",
".",
"format",
"(",
"QA_util_date_str2int",
"(",
"date",
")",
")",
",",
"1",
")",
".",
"assign",
"(",
"date",
"=",
"date",
")",
".",
"assign",
"(",
"sse",
"=",
"'sh'",
")",
"data",
".",
"columns",
"=",
"[",
"'code'",
",",
"'name'",
",",
"'leveraged_balance'",
",",
"'leveraged_buyout'",
",",
"'leveraged_payoff'",
",",
"'margin_left'",
",",
"'margin_sell'",
",",
"'margin_repay'",
",",
"'date'",
",",
"'sse'",
"]",
"return",
"data",
"else",
":",
"pass"
] | return shanghai margin data
Arguments:
date {str YYYY-MM-DD} -- date format
Returns:
pandas.DataFrame -- res for margin data | [
"return",
"shanghai",
"margin",
"data"
] | bb1fe424e4108b62a1f712b81a05cf829297a5c0 | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QACrawler.py#L34-L49 |
26,384 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QAFetch/QACrawler.py | QA_fetch_get_sz_margin | def QA_fetch_get_sz_margin(date):
"""return shenzhen margin data
Arguments:
date {str YYYY-MM-DD} -- date format
Returns:
pandas.DataFrame -- res for margin data
"""
if date in trade_date_sse:
return pd.read_excel(_sz_url.format(date)).assign(date=date).assign(sse='sz') | python | def QA_fetch_get_sz_margin(date):
"""return shenzhen margin data
Arguments:
date {str YYYY-MM-DD} -- date format
Returns:
pandas.DataFrame -- res for margin data
"""
if date in trade_date_sse:
return pd.read_excel(_sz_url.format(date)).assign(date=date).assign(sse='sz') | [
"def",
"QA_fetch_get_sz_margin",
"(",
"date",
")",
":",
"if",
"date",
"in",
"trade_date_sse",
":",
"return",
"pd",
".",
"read_excel",
"(",
"_sz_url",
".",
"format",
"(",
"date",
")",
")",
".",
"assign",
"(",
"date",
"=",
"date",
")",
".",
"assign",
"(",
"sse",
"=",
"'sz'",
")"
] | return shenzhen margin data
Arguments:
date {str YYYY-MM-DD} -- date format
Returns:
pandas.DataFrame -- res for margin data | [
"return",
"shenzhen",
"margin",
"data"
] | bb1fe424e4108b62a1f712b81a05cf829297a5c0 | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QACrawler.py#L52-L63 |
26,385 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QAData/base_datastruct.py | _quotation_base.kline_echarts | def kline_echarts(self, code=None):
def kline_formater(param):
return param.name + ':' + vars(param)
"""plot the market_data"""
if code is None:
path_name = '.' + os.sep + 'QA_' + self.type + \
'_codepackage_' + self.if_fq + '.html'
kline = Kline(
'CodePackage_' + self.if_fq + '_' + self.type,
width=1360,
height=700,
page_title='QUANTAXIS'
)
bar = Bar()
data_splits = self.splits()
for ds in data_splits:
data = []
axis = []
if ds.type[-3:] == 'day':
datetime = np.array(ds.date.map(str))
else:
datetime = np.array(ds.datetime.map(str))
ohlc = np.array(
ds.data.loc[:,
['open',
'close',
'low',
'high']]
)
kline.add(
ds.code[0],
datetime,
ohlc,
mark_point=["max",
"min"],
is_datazoom_show=True,
datazoom_orient='horizontal'
)
return kline
else:
data = []
axis = []
ds = self.select_code(code)
data = []
#axis = []
if self.type[-3:] == 'day':
datetime = np.array(ds.date.map(str))
else:
datetime = np.array(ds.datetime.map(str))
ohlc = np.array(ds.data.loc[:, ['open', 'close', 'low', 'high']])
vol = np.array(ds.volume)
kline = Kline(
'{}__{}__{}'.format(code,
self.if_fq,
self.type),
width=1360,
height=700,
page_title='QUANTAXIS'
)
bar = Bar()
kline.add(self.code, datetime, ohlc,
mark_point=["max", "min"],
# is_label_show=True,
is_datazoom_show=True,
is_xaxis_show=False,
# is_toolbox_show=True,
tooltip_formatter='{b}:{c}', # kline_formater,
# is_more_utils=True,
datazoom_orient='horizontal')
bar.add(
self.code,
datetime,
vol,
is_datazoom_show=True,
datazoom_xaxis_index=[0,
1]
)
grid = Grid(width=1360, height=700, page_title='QUANTAXIS')
grid.add(bar, grid_top="80%")
grid.add(kline, grid_bottom="30%")
return grid | python | def kline_echarts(self, code=None):
def kline_formater(param):
return param.name + ':' + vars(param)
"""plot the market_data"""
if code is None:
path_name = '.' + os.sep + 'QA_' + self.type + \
'_codepackage_' + self.if_fq + '.html'
kline = Kline(
'CodePackage_' + self.if_fq + '_' + self.type,
width=1360,
height=700,
page_title='QUANTAXIS'
)
bar = Bar()
data_splits = self.splits()
for ds in data_splits:
data = []
axis = []
if ds.type[-3:] == 'day':
datetime = np.array(ds.date.map(str))
else:
datetime = np.array(ds.datetime.map(str))
ohlc = np.array(
ds.data.loc[:,
['open',
'close',
'low',
'high']]
)
kline.add(
ds.code[0],
datetime,
ohlc,
mark_point=["max",
"min"],
is_datazoom_show=True,
datazoom_orient='horizontal'
)
return kline
else:
data = []
axis = []
ds = self.select_code(code)
data = []
#axis = []
if self.type[-3:] == 'day':
datetime = np.array(ds.date.map(str))
else:
datetime = np.array(ds.datetime.map(str))
ohlc = np.array(ds.data.loc[:, ['open', 'close', 'low', 'high']])
vol = np.array(ds.volume)
kline = Kline(
'{}__{}__{}'.format(code,
self.if_fq,
self.type),
width=1360,
height=700,
page_title='QUANTAXIS'
)
bar = Bar()
kline.add(self.code, datetime, ohlc,
mark_point=["max", "min"],
# is_label_show=True,
is_datazoom_show=True,
is_xaxis_show=False,
# is_toolbox_show=True,
tooltip_formatter='{b}:{c}', # kline_formater,
# is_more_utils=True,
datazoom_orient='horizontal')
bar.add(
self.code,
datetime,
vol,
is_datazoom_show=True,
datazoom_xaxis_index=[0,
1]
)
grid = Grid(width=1360, height=700, page_title='QUANTAXIS')
grid.add(bar, grid_top="80%")
grid.add(kline, grid_bottom="30%")
return grid | [
"def",
"kline_echarts",
"(",
"self",
",",
"code",
"=",
"None",
")",
":",
"def",
"kline_formater",
"(",
"param",
")",
":",
"return",
"param",
".",
"name",
"+",
"':'",
"+",
"vars",
"(",
"param",
")",
"if",
"code",
"is",
"None",
":",
"path_name",
"=",
"'.'",
"+",
"os",
".",
"sep",
"+",
"'QA_'",
"+",
"self",
".",
"type",
"+",
"'_codepackage_'",
"+",
"self",
".",
"if_fq",
"+",
"'.html'",
"kline",
"=",
"Kline",
"(",
"'CodePackage_'",
"+",
"self",
".",
"if_fq",
"+",
"'_'",
"+",
"self",
".",
"type",
",",
"width",
"=",
"1360",
",",
"height",
"=",
"700",
",",
"page_title",
"=",
"'QUANTAXIS'",
")",
"bar",
"=",
"Bar",
"(",
")",
"data_splits",
"=",
"self",
".",
"splits",
"(",
")",
"for",
"ds",
"in",
"data_splits",
":",
"data",
"=",
"[",
"]",
"axis",
"=",
"[",
"]",
"if",
"ds",
".",
"type",
"[",
"-",
"3",
":",
"]",
"==",
"'day'",
":",
"datetime",
"=",
"np",
".",
"array",
"(",
"ds",
".",
"date",
".",
"map",
"(",
"str",
")",
")",
"else",
":",
"datetime",
"=",
"np",
".",
"array",
"(",
"ds",
".",
"datetime",
".",
"map",
"(",
"str",
")",
")",
"ohlc",
"=",
"np",
".",
"array",
"(",
"ds",
".",
"data",
".",
"loc",
"[",
":",
",",
"[",
"'open'",
",",
"'close'",
",",
"'low'",
",",
"'high'",
"]",
"]",
")",
"kline",
".",
"add",
"(",
"ds",
".",
"code",
"[",
"0",
"]",
",",
"datetime",
",",
"ohlc",
",",
"mark_point",
"=",
"[",
"\"max\"",
",",
"\"min\"",
"]",
",",
"is_datazoom_show",
"=",
"True",
",",
"datazoom_orient",
"=",
"'horizontal'",
")",
"return",
"kline",
"else",
":",
"data",
"=",
"[",
"]",
"axis",
"=",
"[",
"]",
"ds",
"=",
"self",
".",
"select_code",
"(",
"code",
")",
"data",
"=",
"[",
"]",
"#axis = []",
"if",
"self",
".",
"type",
"[",
"-",
"3",
":",
"]",
"==",
"'day'",
":",
"datetime",
"=",
"np",
".",
"array",
"(",
"ds",
".",
"date",
".",
"map",
"(",
"str",
")",
")",
"else",
":",
"datetime",
"=",
"np",
".",
"array",
"(",
"ds",
".",
"datetime",
".",
"map",
"(",
"str",
")",
")",
"ohlc",
"=",
"np",
".",
"array",
"(",
"ds",
".",
"data",
".",
"loc",
"[",
":",
",",
"[",
"'open'",
",",
"'close'",
",",
"'low'",
",",
"'high'",
"]",
"]",
")",
"vol",
"=",
"np",
".",
"array",
"(",
"ds",
".",
"volume",
")",
"kline",
"=",
"Kline",
"(",
"'{}__{}__{}'",
".",
"format",
"(",
"code",
",",
"self",
".",
"if_fq",
",",
"self",
".",
"type",
")",
",",
"width",
"=",
"1360",
",",
"height",
"=",
"700",
",",
"page_title",
"=",
"'QUANTAXIS'",
")",
"bar",
"=",
"Bar",
"(",
")",
"kline",
".",
"add",
"(",
"self",
".",
"code",
",",
"datetime",
",",
"ohlc",
",",
"mark_point",
"=",
"[",
"\"max\"",
",",
"\"min\"",
"]",
",",
"# is_label_show=True,",
"is_datazoom_show",
"=",
"True",
",",
"is_xaxis_show",
"=",
"False",
",",
"# is_toolbox_show=True,",
"tooltip_formatter",
"=",
"'{b}:{c}'",
",",
"# kline_formater,",
"# is_more_utils=True,",
"datazoom_orient",
"=",
"'horizontal'",
")",
"bar",
".",
"add",
"(",
"self",
".",
"code",
",",
"datetime",
",",
"vol",
",",
"is_datazoom_show",
"=",
"True",
",",
"datazoom_xaxis_index",
"=",
"[",
"0",
",",
"1",
"]",
")",
"grid",
"=",
"Grid",
"(",
"width",
"=",
"1360",
",",
"height",
"=",
"700",
",",
"page_title",
"=",
"'QUANTAXIS'",
")",
"grid",
".",
"add",
"(",
"bar",
",",
"grid_top",
"=",
"\"80%\"",
")",
"grid",
".",
"add",
"(",
"kline",
",",
"grid_bottom",
"=",
"\"30%\"",
")",
"return",
"grid"
] | plot the market_data | [
"plot",
"the",
"market_data"
] | bb1fe424e4108b62a1f712b81a05cf829297a5c0 | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAData/base_datastruct.py#L680-L769 |
26,386 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QAData/dsmethods.py | from_tushare | def from_tushare(dataframe, dtype='day'):
"""dataframe from tushare
Arguments:
dataframe {[type]} -- [description]
Returns:
[type] -- [description]
"""
if dtype in ['day']:
return QA_DataStruct_Stock_day(
dataframe.assign(date=pd.to_datetime(dataframe.date)
).set_index(['date',
'code'],
drop=False),
dtype='stock_day'
)
elif dtype in ['min']:
return QA_DataStruct_Stock_min(
dataframe.assign(datetime=pd.to_datetime(dataframe.datetime)
).set_index(['datetime',
'code'],
drop=False),
dtype='stock_min'
) | python | def from_tushare(dataframe, dtype='day'):
"""dataframe from tushare
Arguments:
dataframe {[type]} -- [description]
Returns:
[type] -- [description]
"""
if dtype in ['day']:
return QA_DataStruct_Stock_day(
dataframe.assign(date=pd.to_datetime(dataframe.date)
).set_index(['date',
'code'],
drop=False),
dtype='stock_day'
)
elif dtype in ['min']:
return QA_DataStruct_Stock_min(
dataframe.assign(datetime=pd.to_datetime(dataframe.datetime)
).set_index(['datetime',
'code'],
drop=False),
dtype='stock_min'
) | [
"def",
"from_tushare",
"(",
"dataframe",
",",
"dtype",
"=",
"'day'",
")",
":",
"if",
"dtype",
"in",
"[",
"'day'",
"]",
":",
"return",
"QA_DataStruct_Stock_day",
"(",
"dataframe",
".",
"assign",
"(",
"date",
"=",
"pd",
".",
"to_datetime",
"(",
"dataframe",
".",
"date",
")",
")",
".",
"set_index",
"(",
"[",
"'date'",
",",
"'code'",
"]",
",",
"drop",
"=",
"False",
")",
",",
"dtype",
"=",
"'stock_day'",
")",
"elif",
"dtype",
"in",
"[",
"'min'",
"]",
":",
"return",
"QA_DataStruct_Stock_min",
"(",
"dataframe",
".",
"assign",
"(",
"datetime",
"=",
"pd",
".",
"to_datetime",
"(",
"dataframe",
".",
"datetime",
")",
")",
".",
"set_index",
"(",
"[",
"'datetime'",
",",
"'code'",
"]",
",",
"drop",
"=",
"False",
")",
",",
"dtype",
"=",
"'stock_min'",
")"
] | dataframe from tushare
Arguments:
dataframe {[type]} -- [description]
Returns:
[type] -- [description] | [
"dataframe",
"from",
"tushare"
] | bb1fe424e4108b62a1f712b81a05cf829297a5c0 | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAData/dsmethods.py#L141-L166 |
26,387 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QASetting/cache.py | Cache._create | def _create(self, cache_file):
"""Create the tables needed to store the information."""
conn = sqlite3.connect(cache_file)
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute('''
CREATE TABLE jobs(
hash TEXT NOT NULL UNIQUE PRIMARY KEY, description TEXT NOT NULL,
last_run REAL, next_run REAL, last_run_result INTEGER)''')
cur.execute('''
CREATE TABLE history(
hash TEXT, description TEXT, time REAL, result INTEGER,
FOREIGN KEY(hash) REFERENCES jobs(hash))''')
conn.commit()
conn.close() | python | def _create(self, cache_file):
"""Create the tables needed to store the information."""
conn = sqlite3.connect(cache_file)
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute('''
CREATE TABLE jobs(
hash TEXT NOT NULL UNIQUE PRIMARY KEY, description TEXT NOT NULL,
last_run REAL, next_run REAL, last_run_result INTEGER)''')
cur.execute('''
CREATE TABLE history(
hash TEXT, description TEXT, time REAL, result INTEGER,
FOREIGN KEY(hash) REFERENCES jobs(hash))''')
conn.commit()
conn.close() | [
"def",
"_create",
"(",
"self",
",",
"cache_file",
")",
":",
"conn",
"=",
"sqlite3",
".",
"connect",
"(",
"cache_file",
")",
"cur",
"=",
"conn",
".",
"cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"\"PRAGMA foreign_keys = ON\"",
")",
"cur",
".",
"execute",
"(",
"'''\n CREATE TABLE jobs(\n hash TEXT NOT NULL UNIQUE PRIMARY KEY, description TEXT NOT NULL,\n last_run REAL, next_run REAL, last_run_result INTEGER)'''",
")",
"cur",
".",
"execute",
"(",
"'''\n CREATE TABLE history(\n hash TEXT, description TEXT, time REAL, result INTEGER,\n FOREIGN KEY(hash) REFERENCES jobs(hash))'''",
")",
"conn",
".",
"commit",
"(",
")",
"conn",
".",
"close",
"(",
")"
] | Create the tables needed to store the information. | [
"Create",
"the",
"tables",
"needed",
"to",
"store",
"the",
"information",
"."
] | bb1fe424e4108b62a1f712b81a05cf829297a5c0 | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QASetting/cache.py#L68-L84 |
26,388 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QASU/main.py | QA_SU_save_stock_info | def QA_SU_save_stock_info(engine, client=DATABASE):
"""save stock info
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_stock_info(client=client) | python | def QA_SU_save_stock_info(engine, client=DATABASE):
"""save stock info
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_stock_info(client=client) | [
"def",
"QA_SU_save_stock_info",
"(",
"engine",
",",
"client",
"=",
"DATABASE",
")",
":",
"engine",
"=",
"select_save_engine",
"(",
"engine",
")",
"engine",
".",
"QA_SU_save_stock_info",
"(",
"client",
"=",
"client",
")"
] | save stock info
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE}) | [
"save",
"stock",
"info"
] | bb1fe424e4108b62a1f712b81a05cf829297a5c0 | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QASU/main.py#L38-L49 |
26,389 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QAFetch/QAQuery.py | QA_fetch_risk | def QA_fetch_risk(message={}, params={"_id": 0, 'assets': 0, 'timeindex': 0, 'totaltimeindex': 0, 'benchmark_assets': 0, 'month_profit': 0}, db=DATABASE):
"""get the risk message
Arguments:
query_mes {[type]} -- [description]
Keyword Arguments:
collection {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description]
"""
collection = DATABASE.risk
return [res for res in collection.find(message, params)] | python | def QA_fetch_risk(message={}, params={"_id": 0, 'assets': 0, 'timeindex': 0, 'totaltimeindex': 0, 'benchmark_assets': 0, 'month_profit': 0}, db=DATABASE):
"""get the risk message
Arguments:
query_mes {[type]} -- [description]
Keyword Arguments:
collection {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description]
"""
collection = DATABASE.risk
return [res for res in collection.find(message, params)] | [
"def",
"QA_fetch_risk",
"(",
"message",
"=",
"{",
"}",
",",
"params",
"=",
"{",
"\"_id\"",
":",
"0",
",",
"'assets'",
":",
"0",
",",
"'timeindex'",
":",
"0",
",",
"'totaltimeindex'",
":",
"0",
",",
"'benchmark_assets'",
":",
"0",
",",
"'month_profit'",
":",
"0",
"}",
",",
"db",
"=",
"DATABASE",
")",
":",
"collection",
"=",
"DATABASE",
".",
"risk",
"return",
"[",
"res",
"for",
"res",
"in",
"collection",
".",
"find",
"(",
"message",
",",
"params",
")",
"]"
] | get the risk message
Arguments:
query_mes {[type]} -- [description]
Keyword Arguments:
collection {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description] | [
"get",
"the",
"risk",
"message"
] | bb1fe424e4108b62a1f712b81a05cf829297a5c0 | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QAQuery.py#L552-L565 |
26,390 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QAFetch/QAQuery.py | QA_fetch_user | def QA_fetch_user(user_cookie, db=DATABASE):
"""
get the user
Arguments:
user_cookie : str the unique cookie_id for a user
Keyword Arguments:
db: database for query
Returns:
list --- [ACCOUNT]
"""
collection = DATABASE.account
return [res for res in collection.find({'user_cookie': user_cookie}, {"_id": 0})] | python | def QA_fetch_user(user_cookie, db=DATABASE):
"""
get the user
Arguments:
user_cookie : str the unique cookie_id for a user
Keyword Arguments:
db: database for query
Returns:
list --- [ACCOUNT]
"""
collection = DATABASE.account
return [res for res in collection.find({'user_cookie': user_cookie}, {"_id": 0})] | [
"def",
"QA_fetch_user",
"(",
"user_cookie",
",",
"db",
"=",
"DATABASE",
")",
":",
"collection",
"=",
"DATABASE",
".",
"account",
"return",
"[",
"res",
"for",
"res",
"in",
"collection",
".",
"find",
"(",
"{",
"'user_cookie'",
":",
"user_cookie",
"}",
",",
"{",
"\"_id\"",
":",
"0",
"}",
")",
"]"
] | get the user
Arguments:
user_cookie : str the unique cookie_id for a user
Keyword Arguments:
db: database for query
Returns:
list --- [ACCOUNT] | [
"get",
"the",
"user"
] | bb1fe424e4108b62a1f712b81a05cf829297a5c0 | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QAQuery.py#L568-L582 |
26,391 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QACmd/__init__.py | CLI.do_shell | def do_shell(self, arg):
"run a shell commad"
print(">", arg)
sub_cmd = subprocess.Popen(arg, shell=True, stdout=subprocess.PIPE)
print(sub_cmd.communicate()[0]) | python | def do_shell(self, arg):
"run a shell commad"
print(">", arg)
sub_cmd = subprocess.Popen(arg, shell=True, stdout=subprocess.PIPE)
print(sub_cmd.communicate()[0]) | [
"def",
"do_shell",
"(",
"self",
",",
"arg",
")",
":",
"print",
"(",
"\">\"",
",",
"arg",
")",
"sub_cmd",
"=",
"subprocess",
".",
"Popen",
"(",
"arg",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"print",
"(",
"sub_cmd",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
")"
] | run a shell commad | [
"run",
"a",
"shell",
"commad"
] | bb1fe424e4108b62a1f712b81a05cf829297a5c0 | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QACmd/__init__.py#L84-L88 |
26,392 | gunthercox/ChatterBot | chatterbot/chatterbot.py | ChatBot.get_response | def get_response(self, statement=None, **kwargs):
"""
Return the bot's response based on the input.
:param statement: An statement object or string.
:returns: A response to the input.
:rtype: Statement
:param additional_response_selection_parameters: Parameters to pass to the
chat bot's logic adapters to control response selection.
:type additional_response_selection_parameters: dict
:param persist_values_to_response: Values that should be saved to the response
that the chat bot generates.
:type persist_values_to_response: dict
"""
Statement = self.storage.get_object('statement')
additional_response_selection_parameters = kwargs.pop('additional_response_selection_parameters', {})
persist_values_to_response = kwargs.pop('persist_values_to_response', {})
if isinstance(statement, str):
kwargs['text'] = statement
if isinstance(statement, dict):
kwargs.update(statement)
if statement is None and 'text' not in kwargs:
raise self.ChatBotException(
'Either a statement object or a "text" keyword '
'argument is required. Neither was provided.'
)
if hasattr(statement, 'serialize'):
kwargs.update(**statement.serialize())
tags = kwargs.pop('tags', [])
text = kwargs.pop('text')
input_statement = Statement(text=text, **kwargs)
input_statement.add_tags(*tags)
# Preprocess the input statement
for preprocessor in self.preprocessors:
input_statement = preprocessor(input_statement)
# Make sure the input statement has its search text saved
if not input_statement.search_text:
input_statement.search_text = self.storage.tagger.get_bigram_pair_string(input_statement.text)
if not input_statement.search_in_response_to and input_statement.in_response_to:
input_statement.search_in_response_to = self.storage.tagger.get_bigram_pair_string(input_statement.in_response_to)
response = self.generate_response(input_statement, additional_response_selection_parameters)
# Update any response data that needs to be changed
if persist_values_to_response:
for response_key in persist_values_to_response:
response_value = persist_values_to_response[response_key]
if response_key == 'tags':
input_statement.add_tags(*response_value)
response.add_tags(*response_value)
else:
setattr(input_statement, response_key, response_value)
setattr(response, response_key, response_value)
if not self.read_only:
self.learn_response(input_statement)
# Save the response generated for the input
self.storage.create(**response.serialize())
return response | python | def get_response(self, statement=None, **kwargs):
"""
Return the bot's response based on the input.
:param statement: An statement object or string.
:returns: A response to the input.
:rtype: Statement
:param additional_response_selection_parameters: Parameters to pass to the
chat bot's logic adapters to control response selection.
:type additional_response_selection_parameters: dict
:param persist_values_to_response: Values that should be saved to the response
that the chat bot generates.
:type persist_values_to_response: dict
"""
Statement = self.storage.get_object('statement')
additional_response_selection_parameters = kwargs.pop('additional_response_selection_parameters', {})
persist_values_to_response = kwargs.pop('persist_values_to_response', {})
if isinstance(statement, str):
kwargs['text'] = statement
if isinstance(statement, dict):
kwargs.update(statement)
if statement is None and 'text' not in kwargs:
raise self.ChatBotException(
'Either a statement object or a "text" keyword '
'argument is required. Neither was provided.'
)
if hasattr(statement, 'serialize'):
kwargs.update(**statement.serialize())
tags = kwargs.pop('tags', [])
text = kwargs.pop('text')
input_statement = Statement(text=text, **kwargs)
input_statement.add_tags(*tags)
# Preprocess the input statement
for preprocessor in self.preprocessors:
input_statement = preprocessor(input_statement)
# Make sure the input statement has its search text saved
if not input_statement.search_text:
input_statement.search_text = self.storage.tagger.get_bigram_pair_string(input_statement.text)
if not input_statement.search_in_response_to and input_statement.in_response_to:
input_statement.search_in_response_to = self.storage.tagger.get_bigram_pair_string(input_statement.in_response_to)
response = self.generate_response(input_statement, additional_response_selection_parameters)
# Update any response data that needs to be changed
if persist_values_to_response:
for response_key in persist_values_to_response:
response_value = persist_values_to_response[response_key]
if response_key == 'tags':
input_statement.add_tags(*response_value)
response.add_tags(*response_value)
else:
setattr(input_statement, response_key, response_value)
setattr(response, response_key, response_value)
if not self.read_only:
self.learn_response(input_statement)
# Save the response generated for the input
self.storage.create(**response.serialize())
return response | [
"def",
"get_response",
"(",
"self",
",",
"statement",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"Statement",
"=",
"self",
".",
"storage",
".",
"get_object",
"(",
"'statement'",
")",
"additional_response_selection_parameters",
"=",
"kwargs",
".",
"pop",
"(",
"'additional_response_selection_parameters'",
",",
"{",
"}",
")",
"persist_values_to_response",
"=",
"kwargs",
".",
"pop",
"(",
"'persist_values_to_response'",
",",
"{",
"}",
")",
"if",
"isinstance",
"(",
"statement",
",",
"str",
")",
":",
"kwargs",
"[",
"'text'",
"]",
"=",
"statement",
"if",
"isinstance",
"(",
"statement",
",",
"dict",
")",
":",
"kwargs",
".",
"update",
"(",
"statement",
")",
"if",
"statement",
"is",
"None",
"and",
"'text'",
"not",
"in",
"kwargs",
":",
"raise",
"self",
".",
"ChatBotException",
"(",
"'Either a statement object or a \"text\" keyword '",
"'argument is required. Neither was provided.'",
")",
"if",
"hasattr",
"(",
"statement",
",",
"'serialize'",
")",
":",
"kwargs",
".",
"update",
"(",
"*",
"*",
"statement",
".",
"serialize",
"(",
")",
")",
"tags",
"=",
"kwargs",
".",
"pop",
"(",
"'tags'",
",",
"[",
"]",
")",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'text'",
")",
"input_statement",
"=",
"Statement",
"(",
"text",
"=",
"text",
",",
"*",
"*",
"kwargs",
")",
"input_statement",
".",
"add_tags",
"(",
"*",
"tags",
")",
"# Preprocess the input statement",
"for",
"preprocessor",
"in",
"self",
".",
"preprocessors",
":",
"input_statement",
"=",
"preprocessor",
"(",
"input_statement",
")",
"# Make sure the input statement has its search text saved",
"if",
"not",
"input_statement",
".",
"search_text",
":",
"input_statement",
".",
"search_text",
"=",
"self",
".",
"storage",
".",
"tagger",
".",
"get_bigram_pair_string",
"(",
"input_statement",
".",
"text",
")",
"if",
"not",
"input_statement",
".",
"search_in_response_to",
"and",
"input_statement",
".",
"in_response_to",
":",
"input_statement",
".",
"search_in_response_to",
"=",
"self",
".",
"storage",
".",
"tagger",
".",
"get_bigram_pair_string",
"(",
"input_statement",
".",
"in_response_to",
")",
"response",
"=",
"self",
".",
"generate_response",
"(",
"input_statement",
",",
"additional_response_selection_parameters",
")",
"# Update any response data that needs to be changed",
"if",
"persist_values_to_response",
":",
"for",
"response_key",
"in",
"persist_values_to_response",
":",
"response_value",
"=",
"persist_values_to_response",
"[",
"response_key",
"]",
"if",
"response_key",
"==",
"'tags'",
":",
"input_statement",
".",
"add_tags",
"(",
"*",
"response_value",
")",
"response",
".",
"add_tags",
"(",
"*",
"response_value",
")",
"else",
":",
"setattr",
"(",
"input_statement",
",",
"response_key",
",",
"response_value",
")",
"setattr",
"(",
"response",
",",
"response_key",
",",
"response_value",
")",
"if",
"not",
"self",
".",
"read_only",
":",
"self",
".",
"learn_response",
"(",
"input_statement",
")",
"# Save the response generated for the input",
"self",
".",
"storage",
".",
"create",
"(",
"*",
"*",
"response",
".",
"serialize",
"(",
")",
")",
"return",
"response"
] | Return the bot's response based on the input.
:param statement: An statement object or string.
:returns: A response to the input.
:rtype: Statement
:param additional_response_selection_parameters: Parameters to pass to the
chat bot's logic adapters to control response selection.
:type additional_response_selection_parameters: dict
:param persist_values_to_response: Values that should be saved to the response
that the chat bot generates.
:type persist_values_to_response: dict | [
"Return",
"the",
"bot",
"s",
"response",
"based",
"on",
"the",
"input",
"."
] | 1a03dcb45cba7bdc24d3db5e750582e0cb1518e2 | https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/chatterbot.py#L57-L133 |
26,393 | gunthercox/ChatterBot | chatterbot/chatterbot.py | ChatBot.generate_response | def generate_response(self, input_statement, additional_response_selection_parameters=None):
"""
Return a response based on a given input statement.
:param input_statement: The input statement to be processed.
"""
Statement = self.storage.get_object('statement')
results = []
result = None
max_confidence = -1
for adapter in self.logic_adapters:
if adapter.can_process(input_statement):
output = adapter.process(input_statement, additional_response_selection_parameters)
results.append(output)
self.logger.info(
'{} selected "{}" as a response with a confidence of {}'.format(
adapter.class_name, output.text, output.confidence
)
)
if output.confidence > max_confidence:
result = output
max_confidence = output.confidence
else:
self.logger.info(
'Not processing the statement using {}'.format(adapter.class_name)
)
class ResultOption:
def __init__(self, statement, count=1):
self.statement = statement
self.count = count
# If multiple adapters agree on the same statement,
# then that statement is more likely to be the correct response
if len(results) >= 3:
result_options = {}
for result_option in results:
result_string = result_option.text + ':' + (result_option.in_response_to or '')
if result_string in result_options:
result_options[result_string].count += 1
if result_options[result_string].statement.confidence < result_option.confidence:
result_options[result_string].statement = result_option
else:
result_options[result_string] = ResultOption(
result_option
)
most_common = list(result_options.values())[0]
for result_option in result_options.values():
if result_option.count > most_common.count:
most_common = result_option
if most_common.count > 1:
result = most_common.statement
response = Statement(
text=result.text,
in_response_to=input_statement.text,
conversation=input_statement.conversation,
persona='bot:' + self.name
)
response.confidence = result.confidence
return response | python | def generate_response(self, input_statement, additional_response_selection_parameters=None):
"""
Return a response based on a given input statement.
:param input_statement: The input statement to be processed.
"""
Statement = self.storage.get_object('statement')
results = []
result = None
max_confidence = -1
for adapter in self.logic_adapters:
if adapter.can_process(input_statement):
output = adapter.process(input_statement, additional_response_selection_parameters)
results.append(output)
self.logger.info(
'{} selected "{}" as a response with a confidence of {}'.format(
adapter.class_name, output.text, output.confidence
)
)
if output.confidence > max_confidence:
result = output
max_confidence = output.confidence
else:
self.logger.info(
'Not processing the statement using {}'.format(adapter.class_name)
)
class ResultOption:
def __init__(self, statement, count=1):
self.statement = statement
self.count = count
# If multiple adapters agree on the same statement,
# then that statement is more likely to be the correct response
if len(results) >= 3:
result_options = {}
for result_option in results:
result_string = result_option.text + ':' + (result_option.in_response_to or '')
if result_string in result_options:
result_options[result_string].count += 1
if result_options[result_string].statement.confidence < result_option.confidence:
result_options[result_string].statement = result_option
else:
result_options[result_string] = ResultOption(
result_option
)
most_common = list(result_options.values())[0]
for result_option in result_options.values():
if result_option.count > most_common.count:
most_common = result_option
if most_common.count > 1:
result = most_common.statement
response = Statement(
text=result.text,
in_response_to=input_statement.text,
conversation=input_statement.conversation,
persona='bot:' + self.name
)
response.confidence = result.confidence
return response | [
"def",
"generate_response",
"(",
"self",
",",
"input_statement",
",",
"additional_response_selection_parameters",
"=",
"None",
")",
":",
"Statement",
"=",
"self",
".",
"storage",
".",
"get_object",
"(",
"'statement'",
")",
"results",
"=",
"[",
"]",
"result",
"=",
"None",
"max_confidence",
"=",
"-",
"1",
"for",
"adapter",
"in",
"self",
".",
"logic_adapters",
":",
"if",
"adapter",
".",
"can_process",
"(",
"input_statement",
")",
":",
"output",
"=",
"adapter",
".",
"process",
"(",
"input_statement",
",",
"additional_response_selection_parameters",
")",
"results",
".",
"append",
"(",
"output",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'{} selected \"{}\" as a response with a confidence of {}'",
".",
"format",
"(",
"adapter",
".",
"class_name",
",",
"output",
".",
"text",
",",
"output",
".",
"confidence",
")",
")",
"if",
"output",
".",
"confidence",
">",
"max_confidence",
":",
"result",
"=",
"output",
"max_confidence",
"=",
"output",
".",
"confidence",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Not processing the statement using {}'",
".",
"format",
"(",
"adapter",
".",
"class_name",
")",
")",
"class",
"ResultOption",
":",
"def",
"__init__",
"(",
"self",
",",
"statement",
",",
"count",
"=",
"1",
")",
":",
"self",
".",
"statement",
"=",
"statement",
"self",
".",
"count",
"=",
"count",
"# If multiple adapters agree on the same statement,",
"# then that statement is more likely to be the correct response",
"if",
"len",
"(",
"results",
")",
">=",
"3",
":",
"result_options",
"=",
"{",
"}",
"for",
"result_option",
"in",
"results",
":",
"result_string",
"=",
"result_option",
".",
"text",
"+",
"':'",
"+",
"(",
"result_option",
".",
"in_response_to",
"or",
"''",
")",
"if",
"result_string",
"in",
"result_options",
":",
"result_options",
"[",
"result_string",
"]",
".",
"count",
"+=",
"1",
"if",
"result_options",
"[",
"result_string",
"]",
".",
"statement",
".",
"confidence",
"<",
"result_option",
".",
"confidence",
":",
"result_options",
"[",
"result_string",
"]",
".",
"statement",
"=",
"result_option",
"else",
":",
"result_options",
"[",
"result_string",
"]",
"=",
"ResultOption",
"(",
"result_option",
")",
"most_common",
"=",
"list",
"(",
"result_options",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"for",
"result_option",
"in",
"result_options",
".",
"values",
"(",
")",
":",
"if",
"result_option",
".",
"count",
">",
"most_common",
".",
"count",
":",
"most_common",
"=",
"result_option",
"if",
"most_common",
".",
"count",
">",
"1",
":",
"result",
"=",
"most_common",
".",
"statement",
"response",
"=",
"Statement",
"(",
"text",
"=",
"result",
".",
"text",
",",
"in_response_to",
"=",
"input_statement",
".",
"text",
",",
"conversation",
"=",
"input_statement",
".",
"conversation",
",",
"persona",
"=",
"'bot:'",
"+",
"self",
".",
"name",
")",
"response",
".",
"confidence",
"=",
"result",
".",
"confidence",
"return",
"response"
] | Return a response based on a given input statement.
:param input_statement: The input statement to be processed. | [
"Return",
"a",
"response",
"based",
"on",
"a",
"given",
"input",
"statement",
"."
] | 1a03dcb45cba7bdc24d3db5e750582e0cb1518e2 | https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/chatterbot.py#L135-L206 |
26,394 | gunthercox/ChatterBot | chatterbot/chatterbot.py | ChatBot.learn_response | def learn_response(self, statement, previous_statement=None):
"""
Learn that the statement provided is a valid response.
"""
if not previous_statement:
previous_statement = statement.in_response_to
if not previous_statement:
previous_statement = self.get_latest_response(statement.conversation)
if previous_statement:
previous_statement = previous_statement.text
previous_statement_text = previous_statement
if not isinstance(previous_statement, (str, type(None), )):
statement.in_response_to = previous_statement.text
elif isinstance(previous_statement, str):
statement.in_response_to = previous_statement
self.logger.info('Adding "{}" as a response to "{}"'.format(
statement.text,
previous_statement_text
))
# Save the input statement
return self.storage.create(**statement.serialize()) | python | def learn_response(self, statement, previous_statement=None):
"""
Learn that the statement provided is a valid response.
"""
if not previous_statement:
previous_statement = statement.in_response_to
if not previous_statement:
previous_statement = self.get_latest_response(statement.conversation)
if previous_statement:
previous_statement = previous_statement.text
previous_statement_text = previous_statement
if not isinstance(previous_statement, (str, type(None), )):
statement.in_response_to = previous_statement.text
elif isinstance(previous_statement, str):
statement.in_response_to = previous_statement
self.logger.info('Adding "{}" as a response to "{}"'.format(
statement.text,
previous_statement_text
))
# Save the input statement
return self.storage.create(**statement.serialize()) | [
"def",
"learn_response",
"(",
"self",
",",
"statement",
",",
"previous_statement",
"=",
"None",
")",
":",
"if",
"not",
"previous_statement",
":",
"previous_statement",
"=",
"statement",
".",
"in_response_to",
"if",
"not",
"previous_statement",
":",
"previous_statement",
"=",
"self",
".",
"get_latest_response",
"(",
"statement",
".",
"conversation",
")",
"if",
"previous_statement",
":",
"previous_statement",
"=",
"previous_statement",
".",
"text",
"previous_statement_text",
"=",
"previous_statement",
"if",
"not",
"isinstance",
"(",
"previous_statement",
",",
"(",
"str",
",",
"type",
"(",
"None",
")",
",",
")",
")",
":",
"statement",
".",
"in_response_to",
"=",
"previous_statement",
".",
"text",
"elif",
"isinstance",
"(",
"previous_statement",
",",
"str",
")",
":",
"statement",
".",
"in_response_to",
"=",
"previous_statement",
"self",
".",
"logger",
".",
"info",
"(",
"'Adding \"{}\" as a response to \"{}\"'",
".",
"format",
"(",
"statement",
".",
"text",
",",
"previous_statement_text",
")",
")",
"# Save the input statement",
"return",
"self",
".",
"storage",
".",
"create",
"(",
"*",
"*",
"statement",
".",
"serialize",
"(",
")",
")"
] | Learn that the statement provided is a valid response. | [
"Learn",
"that",
"the",
"statement",
"provided",
"is",
"a",
"valid",
"response",
"."
] | 1a03dcb45cba7bdc24d3db5e750582e0cb1518e2 | https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/chatterbot.py#L208-L233 |
26,395 | gunthercox/ChatterBot | chatterbot/utils.py | import_module | def import_module(dotted_path):
"""
Imports the specified module based on the
dot notated import path for the module.
"""
import importlib
module_parts = dotted_path.split('.')
module_path = '.'.join(module_parts[:-1])
module = importlib.import_module(module_path)
return getattr(module, module_parts[-1]) | python | def import_module(dotted_path):
"""
Imports the specified module based on the
dot notated import path for the module.
"""
import importlib
module_parts = dotted_path.split('.')
module_path = '.'.join(module_parts[:-1])
module = importlib.import_module(module_path)
return getattr(module, module_parts[-1]) | [
"def",
"import_module",
"(",
"dotted_path",
")",
":",
"import",
"importlib",
"module_parts",
"=",
"dotted_path",
".",
"split",
"(",
"'.'",
")",
"module_path",
"=",
"'.'",
".",
"join",
"(",
"module_parts",
"[",
":",
"-",
"1",
"]",
")",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"module_path",
")",
"return",
"getattr",
"(",
"module",
",",
"module_parts",
"[",
"-",
"1",
"]",
")"
] | Imports the specified module based on the
dot notated import path for the module. | [
"Imports",
"the",
"specified",
"module",
"based",
"on",
"the",
"dot",
"notated",
"import",
"path",
"for",
"the",
"module",
"."
] | 1a03dcb45cba7bdc24d3db5e750582e0cb1518e2 | https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/utils.py#L6-L17 |
26,396 | gunthercox/ChatterBot | chatterbot/utils.py | validate_adapter_class | def validate_adapter_class(validate_class, adapter_class):
"""
Raises an exception if validate_class is not a
subclass of adapter_class.
:param validate_class: The class to be validated.
:type validate_class: class
:param adapter_class: The class type to check against.
:type adapter_class: class
:raises: Adapter.InvalidAdapterTypeException
"""
from chatterbot.adapters import Adapter
# If a dictionary was passed in, check if it has an import_path attribute
if isinstance(validate_class, dict):
if 'import_path' not in validate_class:
raise Adapter.InvalidAdapterTypeException(
'The dictionary {} must contain a value for "import_path"'.format(
str(validate_class)
)
)
# Set the class to the import path for the next check
validate_class = validate_class.get('import_path')
if not issubclass(import_module(validate_class), adapter_class):
raise Adapter.InvalidAdapterTypeException(
'{} must be a subclass of {}'.format(
validate_class,
adapter_class.__name__
)
) | python | def validate_adapter_class(validate_class, adapter_class):
"""
Raises an exception if validate_class is not a
subclass of adapter_class.
:param validate_class: The class to be validated.
:type validate_class: class
:param adapter_class: The class type to check against.
:type adapter_class: class
:raises: Adapter.InvalidAdapterTypeException
"""
from chatterbot.adapters import Adapter
# If a dictionary was passed in, check if it has an import_path attribute
if isinstance(validate_class, dict):
if 'import_path' not in validate_class:
raise Adapter.InvalidAdapterTypeException(
'The dictionary {} must contain a value for "import_path"'.format(
str(validate_class)
)
)
# Set the class to the import path for the next check
validate_class = validate_class.get('import_path')
if not issubclass(import_module(validate_class), adapter_class):
raise Adapter.InvalidAdapterTypeException(
'{} must be a subclass of {}'.format(
validate_class,
adapter_class.__name__
)
) | [
"def",
"validate_adapter_class",
"(",
"validate_class",
",",
"adapter_class",
")",
":",
"from",
"chatterbot",
".",
"adapters",
"import",
"Adapter",
"# If a dictionary was passed in, check if it has an import_path attribute",
"if",
"isinstance",
"(",
"validate_class",
",",
"dict",
")",
":",
"if",
"'import_path'",
"not",
"in",
"validate_class",
":",
"raise",
"Adapter",
".",
"InvalidAdapterTypeException",
"(",
"'The dictionary {} must contain a value for \"import_path\"'",
".",
"format",
"(",
"str",
"(",
"validate_class",
")",
")",
")",
"# Set the class to the import path for the next check",
"validate_class",
"=",
"validate_class",
".",
"get",
"(",
"'import_path'",
")",
"if",
"not",
"issubclass",
"(",
"import_module",
"(",
"validate_class",
")",
",",
"adapter_class",
")",
":",
"raise",
"Adapter",
".",
"InvalidAdapterTypeException",
"(",
"'{} must be a subclass of {}'",
".",
"format",
"(",
"validate_class",
",",
"adapter_class",
".",
"__name__",
")",
")"
] | Raises an exception if validate_class is not a
subclass of adapter_class.
:param validate_class: The class to be validated.
:type validate_class: class
:param adapter_class: The class type to check against.
:type adapter_class: class
:raises: Adapter.InvalidAdapterTypeException | [
"Raises",
"an",
"exception",
"if",
"validate_class",
"is",
"not",
"a",
"subclass",
"of",
"adapter_class",
"."
] | 1a03dcb45cba7bdc24d3db5e750582e0cb1518e2 | https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/utils.py#L36-L70 |
26,397 | gunthercox/ChatterBot | chatterbot/utils.py | get_response_time | def get_response_time(chatbot, statement='Hello'):
"""
Returns the amount of time taken for a given
chat bot to return a response.
:param chatbot: A chat bot instance.
:type chatbot: ChatBot
:returns: The response time in seconds.
:rtype: float
"""
import time
start_time = time.time()
chatbot.get_response(statement)
return time.time() - start_time | python | def get_response_time(chatbot, statement='Hello'):
"""
Returns the amount of time taken for a given
chat bot to return a response.
:param chatbot: A chat bot instance.
:type chatbot: ChatBot
:returns: The response time in seconds.
:rtype: float
"""
import time
start_time = time.time()
chatbot.get_response(statement)
return time.time() - start_time | [
"def",
"get_response_time",
"(",
"chatbot",
",",
"statement",
"=",
"'Hello'",
")",
":",
"import",
"time",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"chatbot",
".",
"get_response",
"(",
"statement",
")",
"return",
"time",
".",
"time",
"(",
")",
"-",
"start_time"
] | Returns the amount of time taken for a given
chat bot to return a response.
:param chatbot: A chat bot instance.
:type chatbot: ChatBot
:returns: The response time in seconds.
:rtype: float | [
"Returns",
"the",
"amount",
"of",
"time",
"taken",
"for",
"a",
"given",
"chat",
"bot",
"to",
"return",
"a",
"response",
"."
] | 1a03dcb45cba7bdc24d3db5e750582e0cb1518e2 | https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/utils.py#L73-L90 |
26,398 | gunthercox/ChatterBot | chatterbot/logic/unit_conversion.py | UnitConversion.get_valid_units | def get_valid_units(self, ureg, from_unit, target_unit):
"""
Returns the firt match `pint.unit.Unit` object for from_unit and
target_unit strings from a possible variation of metric unit names
supported by pint library.
:param ureg: unit registry which units are defined and handled
:type ureg: `pint.registry.UnitRegistry`
:param from_unit: source metric unit
:type from_unit: str
:param from_unit: target metric unit
:type from_unit: str
"""
from_unit_variations = [from_unit.lower(), from_unit.upper()]
target_unit_variations = [target_unit.lower(), target_unit.upper()]
from_unit = self.get_unit(ureg, from_unit_variations)
target_unit = self.get_unit(ureg, target_unit_variations)
return from_unit, target_unit | python | def get_valid_units(self, ureg, from_unit, target_unit):
"""
Returns the firt match `pint.unit.Unit` object for from_unit and
target_unit strings from a possible variation of metric unit names
supported by pint library.
:param ureg: unit registry which units are defined and handled
:type ureg: `pint.registry.UnitRegistry`
:param from_unit: source metric unit
:type from_unit: str
:param from_unit: target metric unit
:type from_unit: str
"""
from_unit_variations = [from_unit.lower(), from_unit.upper()]
target_unit_variations = [target_unit.lower(), target_unit.upper()]
from_unit = self.get_unit(ureg, from_unit_variations)
target_unit = self.get_unit(ureg, target_unit_variations)
return from_unit, target_unit | [
"def",
"get_valid_units",
"(",
"self",
",",
"ureg",
",",
"from_unit",
",",
"target_unit",
")",
":",
"from_unit_variations",
"=",
"[",
"from_unit",
".",
"lower",
"(",
")",
",",
"from_unit",
".",
"upper",
"(",
")",
"]",
"target_unit_variations",
"=",
"[",
"target_unit",
".",
"lower",
"(",
")",
",",
"target_unit",
".",
"upper",
"(",
")",
"]",
"from_unit",
"=",
"self",
".",
"get_unit",
"(",
"ureg",
",",
"from_unit_variations",
")",
"target_unit",
"=",
"self",
".",
"get_unit",
"(",
"ureg",
",",
"target_unit_variations",
")",
"return",
"from_unit",
",",
"target_unit"
] | Returns the firt match `pint.unit.Unit` object for from_unit and
target_unit strings from a possible variation of metric unit names
supported by pint library.
:param ureg: unit registry which units are defined and handled
:type ureg: `pint.registry.UnitRegistry`
:param from_unit: source metric unit
:type from_unit: str
:param from_unit: target metric unit
:type from_unit: str | [
"Returns",
"the",
"firt",
"match",
"pint",
".",
"unit",
".",
"Unit",
"object",
"for",
"from_unit",
"and",
"target_unit",
"strings",
"from",
"a",
"possible",
"variation",
"of",
"metric",
"unit",
"names",
"supported",
"by",
"pint",
"library",
"."
] | 1a03dcb45cba7bdc24d3db5e750582e0cb1518e2 | https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/logic/unit_conversion.py#L85-L104 |
26,399 | gunthercox/ChatterBot | chatterbot/logic/unit_conversion.py | UnitConversion.handle_matches | def handle_matches(self, match):
"""
Returns a response statement from a matched input statement.
:param match: It is a valid matched pattern from the input statement
:type: `_sre.SRE_Match`
"""
response = Statement(text='')
from_parsed = match.group("from")
target_parsed = match.group("target")
n_statement = match.group("number")
if n_statement == 'a' or n_statement == 'an':
n_statement = '1.0'
n = mathparse.parse(n_statement, self.language.ISO_639.upper())
ureg = UnitRegistry()
from_parsed, target_parsed = self.get_valid_units(ureg, from_parsed, target_parsed)
if from_parsed is None or target_parsed is None:
response.confidence = 0.0
else:
from_value = ureg.Quantity(float(n), from_parsed)
target_value = from_value.to(target_parsed)
response.confidence = 1.0
response.text = str(target_value.magnitude)
return response | python | def handle_matches(self, match):
"""
Returns a response statement from a matched input statement.
:param match: It is a valid matched pattern from the input statement
:type: `_sre.SRE_Match`
"""
response = Statement(text='')
from_parsed = match.group("from")
target_parsed = match.group("target")
n_statement = match.group("number")
if n_statement == 'a' or n_statement == 'an':
n_statement = '1.0'
n = mathparse.parse(n_statement, self.language.ISO_639.upper())
ureg = UnitRegistry()
from_parsed, target_parsed = self.get_valid_units(ureg, from_parsed, target_parsed)
if from_parsed is None or target_parsed is None:
response.confidence = 0.0
else:
from_value = ureg.Quantity(float(n), from_parsed)
target_value = from_value.to(target_parsed)
response.confidence = 1.0
response.text = str(target_value.magnitude)
return response | [
"def",
"handle_matches",
"(",
"self",
",",
"match",
")",
":",
"response",
"=",
"Statement",
"(",
"text",
"=",
"''",
")",
"from_parsed",
"=",
"match",
".",
"group",
"(",
"\"from\"",
")",
"target_parsed",
"=",
"match",
".",
"group",
"(",
"\"target\"",
")",
"n_statement",
"=",
"match",
".",
"group",
"(",
"\"number\"",
")",
"if",
"n_statement",
"==",
"'a'",
"or",
"n_statement",
"==",
"'an'",
":",
"n_statement",
"=",
"'1.0'",
"n",
"=",
"mathparse",
".",
"parse",
"(",
"n_statement",
",",
"self",
".",
"language",
".",
"ISO_639",
".",
"upper",
"(",
")",
")",
"ureg",
"=",
"UnitRegistry",
"(",
")",
"from_parsed",
",",
"target_parsed",
"=",
"self",
".",
"get_valid_units",
"(",
"ureg",
",",
"from_parsed",
",",
"target_parsed",
")",
"if",
"from_parsed",
"is",
"None",
"or",
"target_parsed",
"is",
"None",
":",
"response",
".",
"confidence",
"=",
"0.0",
"else",
":",
"from_value",
"=",
"ureg",
".",
"Quantity",
"(",
"float",
"(",
"n",
")",
",",
"from_parsed",
")",
"target_value",
"=",
"from_value",
".",
"to",
"(",
"target_parsed",
")",
"response",
".",
"confidence",
"=",
"1.0",
"response",
".",
"text",
"=",
"str",
"(",
"target_value",
".",
"magnitude",
")",
"return",
"response"
] | Returns a response statement from a matched input statement.
:param match: It is a valid matched pattern from the input statement
:type: `_sre.SRE_Match` | [
"Returns",
"a",
"response",
"statement",
"from",
"a",
"matched",
"input",
"statement",
"."
] | 1a03dcb45cba7bdc24d3db5e750582e0cb1518e2 | https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/logic/unit_conversion.py#L106-L135 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.