partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
train
KaggleCompetitionDownloader.download_file
Downloads competition file to output_dir.
tensorflow_datasets/core/download/kaggle.py
def download_file(self, fname, output_dir): """Downloads competition file to output_dir.""" if fname not in self.competition_files: # pylint: disable=unsupported-membership-test raise ValueError("%s is not one of the competition's " "files: %s" % (fname, self.competition_files)) command = [ "kaggle", "competitions", "download", "--file", fname, "--path", output_dir, "-c", self._competition_name, ] _run_kaggle_command(command, self._competition_name) return os.path.join(output_dir, fname)
def download_file(self, fname, output_dir): """Downloads competition file to output_dir.""" if fname not in self.competition_files: # pylint: disable=unsupported-membership-test raise ValueError("%s is not one of the competition's " "files: %s" % (fname, self.competition_files)) command = [ "kaggle", "competitions", "download", "--file", fname, "--path", output_dir, "-c", self._competition_name, ] _run_kaggle_command(command, self._competition_name) return os.path.join(output_dir, fname)
[ "Downloads", "competition", "file", "to", "output_dir", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/kaggle.py#L118-L135
[ "def", "download_file", "(", "self", ",", "fname", ",", "output_dir", ")", ":", "if", "fname", "not", "in", "self", ".", "competition_files", ":", "# pylint: disable=unsupported-membership-test", "raise", "ValueError", "(", "\"%s is not one of the competition's \"", "\"...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
TFFlowers._generate_examples
Generate flower images and labels given the image directory path. Args: images_dir_path: path to the directory where the images are stored. Yields: The image path and its corresponding label.
tensorflow_datasets/image/flowers.py
def _generate_examples(self, images_dir_path): """Generate flower images and labels given the image directory path. Args: images_dir_path: path to the directory where the images are stored. Yields: The image path and its corresponding label. """ parent_dir = tf.io.gfile.listdir(images_dir_path)[0] walk_dir = os.path.join(images_dir_path, parent_dir) dirs = tf.io.gfile.listdir(walk_dir) for d in dirs: if tf.io.gfile.isdir(os.path.join(walk_dir, d)): for full_path, _, fname in tf.io.gfile.walk(os.path.join(walk_dir, d)): for image_file in fname: if image_file.endswith(".jpg"): image_path = os.path.join(full_path, image_file) yield { "image": image_path, "label": d.lower(), }
def _generate_examples(self, images_dir_path): """Generate flower images and labels given the image directory path. Args: images_dir_path: path to the directory where the images are stored. Yields: The image path and its corresponding label. """ parent_dir = tf.io.gfile.listdir(images_dir_path)[0] walk_dir = os.path.join(images_dir_path, parent_dir) dirs = tf.io.gfile.listdir(walk_dir) for d in dirs: if tf.io.gfile.isdir(os.path.join(walk_dir, d)): for full_path, _, fname in tf.io.gfile.walk(os.path.join(walk_dir, d)): for image_file in fname: if image_file.endswith(".jpg"): image_path = os.path.join(full_path, image_file) yield { "image": image_path, "label": d.lower(), }
[ "Generate", "flower", "images", "and", "labels", "given", "the", "image", "directory", "path", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/flowers.py#L71-L93
[ "def", "_generate_examples", "(", "self", ",", "images_dir_path", ")", ":", "parent_dir", "=", "tf", ".", "io", ".", "gfile", ".", "listdir", "(", "images_dir_path", ")", "[", "0", "]", "walk_dir", "=", "os", ".", "path", ".", "join", "(", "images_dir_pa...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_checksum_paths
Returns dict {'dataset_name': 'path/to/checksums/file'}.
tensorflow_datasets/core/download/checksums.py
def _checksum_paths(): """Returns dict {'dataset_name': 'path/to/checksums/file'}.""" dataset2path = {} for dir_path in _CHECKSUM_DIRS: for fname in _list_dir(dir_path): if not fname.endswith(_CHECKSUM_SUFFIX): continue fpath = os.path.join(dir_path, fname) dataset_name = fname[:-len(_CHECKSUM_SUFFIX)] dataset2path[dataset_name] = fpath return dataset2path
def _checksum_paths(): """Returns dict {'dataset_name': 'path/to/checksums/file'}.""" dataset2path = {} for dir_path in _CHECKSUM_DIRS: for fname in _list_dir(dir_path): if not fname.endswith(_CHECKSUM_SUFFIX): continue fpath = os.path.join(dir_path, fname) dataset_name = fname[:-len(_CHECKSUM_SUFFIX)] dataset2path[dataset_name] = fpath return dataset2path
[ "Returns", "dict", "{", "dataset_name", ":", "path", "/", "to", "/", "checksums", "/", "file", "}", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/checksums.py#L46-L56
[ "def", "_checksum_paths", "(", ")", ":", "dataset2path", "=", "{", "}", "for", "dir_path", "in", "_CHECKSUM_DIRS", ":", "for", "fname", "in", "_list_dir", "(", "dir_path", ")", ":", "if", "not", "fname", ".", "endswith", "(", "_CHECKSUM_SUFFIX", ")", ":", ...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_get_path
Returns path to where checksums are stored for a given dataset.
tensorflow_datasets/core/download/checksums.py
def _get_path(dataset_name): """Returns path to where checksums are stored for a given dataset.""" path = _checksum_paths().get(dataset_name, None) if path: return path msg = ('No checksums file could be find for dataset %s. Please create one in ' 'one of: %s') % (dataset_name, ', '.join(_CHECKSUM_DIRS)) raise AssertionError(msg)
def _get_path(dataset_name): """Returns path to where checksums are stored for a given dataset.""" path = _checksum_paths().get(dataset_name, None) if path: return path msg = ('No checksums file could be find for dataset %s. Please create one in ' 'one of: %s') % (dataset_name, ', '.join(_CHECKSUM_DIRS)) raise AssertionError(msg)
[ "Returns", "path", "to", "where", "checksums", "are", "stored", "for", "a", "given", "dataset", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/checksums.py#L59-L66
[ "def", "_get_path", "(", "dataset_name", ")", ":", "path", "=", "_checksum_paths", "(", ")", ".", "get", "(", "dataset_name", ",", "None", ")", "if", "path", ":", "return", "path", "msg", "=", "(", "'No checksums file could be find for dataset %s. Please create on...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_get_sizes_checksums
Returns {URL: (size, checksum)}s stored within file.
tensorflow_datasets/core/download/checksums.py
def _get_sizes_checksums(checksums_path): """Returns {URL: (size, checksum)}s stored within file.""" checksums = {} for line in _read_file(checksums_path).split('\n'): if not line: continue # URL might have spaces inside, but size and checksum will not. url, size, checksum = line.rsplit(' ', 2) checksums[url] = (int(size), checksum) return checksums
def _get_sizes_checksums(checksums_path): """Returns {URL: (size, checksum)}s stored within file.""" checksums = {} for line in _read_file(checksums_path).split('\n'): if not line: continue # URL might have spaces inside, but size and checksum will not. url, size, checksum = line.rsplit(' ', 2) checksums[url] = (int(size), checksum) return checksums
[ "Returns", "{", "URL", ":", "(", "size", "checksum", ")", "}", "s", "stored", "within", "file", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/checksums.py#L75-L84
[ "def", "_get_sizes_checksums", "(", "checksums_path", ")", ":", "checksums", "=", "{", "}", "for", "line", "in", "_read_file", "(", "checksums_path", ")", ".", "split", "(", "'\\n'", ")", ":", "if", "not", "line", ":", "continue", "# URL might have spaces insi...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
get_all_sizes_checksums
Returns dict associating URL to (size, sha256).
tensorflow_datasets/core/download/checksums.py
def get_all_sizes_checksums(): """Returns dict associating URL to (size, sha256).""" sizes_checksums = {} for path in _checksum_paths().values(): data = _get_sizes_checksums(path) for url, size_checksum in data.items(): if (url in sizes_checksums and sizes_checksums[url] != size_checksum): raise AssertionError( 'URL %s is registered with 2+ distinct size/checksum tuples.' % url) sizes_checksums.update(data) return sizes_checksums
def get_all_sizes_checksums(): """Returns dict associating URL to (size, sha256).""" sizes_checksums = {} for path in _checksum_paths().values(): data = _get_sizes_checksums(path) for url, size_checksum in data.items(): if (url in sizes_checksums and sizes_checksums[url] != size_checksum): raise AssertionError( 'URL %s is registered with 2+ distinct size/checksum tuples.' % url) sizes_checksums.update(data) return sizes_checksums
[ "Returns", "dict", "associating", "URL", "to", "(", "size", "sha256", ")", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/checksums.py#L88-L99
[ "def", "get_all_sizes_checksums", "(", ")", ":", "sizes_checksums", "=", "{", "}", "for", "path", "in", "_checksum_paths", "(", ")", ".", "values", "(", ")", ":", "data", "=", "_get_sizes_checksums", "(", "path", ")", "for", "url", ",", "size_checksum", "i...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
store_checksums
Store given checksums and sizes for specific dataset. Content of file is never disgarded, only updated. This is to ensure that if process is killed right after first download finishes, checksums registered during previous runs aren't lost. It is the responsibility of the caller not to call function multiple times in parallel for a given dataset. Only original file content is updated. This means the entire set of new sizes and checksums must be given at every call. Args: dataset_name: string. sizes_checksums: dict, {url: (size_in_bytes, checksum)}.
tensorflow_datasets/core/download/checksums.py
def store_checksums(dataset_name, sizes_checksums): """Store given checksums and sizes for specific dataset. Content of file is never disgarded, only updated. This is to ensure that if process is killed right after first download finishes, checksums registered during previous runs aren't lost. It is the responsibility of the caller not to call function multiple times in parallel for a given dataset. Only original file content is updated. This means the entire set of new sizes and checksums must be given at every call. Args: dataset_name: string. sizes_checksums: dict, {url: (size_in_bytes, checksum)}. """ path = _get_path(dataset_name) original_data = _get_sizes_checksums(path) new_data = original_data.copy() new_data.update(sizes_checksums) if original_data == new_data: return with tf.io.gfile.GFile(path, 'w') as f: for url, (size, checksum) in sorted(new_data.items()): f.write('%s %s %s\n' % (url, size, checksum))
def store_checksums(dataset_name, sizes_checksums): """Store given checksums and sizes for specific dataset. Content of file is never disgarded, only updated. This is to ensure that if process is killed right after first download finishes, checksums registered during previous runs aren't lost. It is the responsibility of the caller not to call function multiple times in parallel for a given dataset. Only original file content is updated. This means the entire set of new sizes and checksums must be given at every call. Args: dataset_name: string. sizes_checksums: dict, {url: (size_in_bytes, checksum)}. """ path = _get_path(dataset_name) original_data = _get_sizes_checksums(path) new_data = original_data.copy() new_data.update(sizes_checksums) if original_data == new_data: return with tf.io.gfile.GFile(path, 'w') as f: for url, (size, checksum) in sorted(new_data.items()): f.write('%s %s %s\n' % (url, size, checksum))
[ "Store", "given", "checksums", "and", "sizes", "for", "specific", "dataset", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/checksums.py#L102-L127
[ "def", "store_checksums", "(", "dataset_name", ",", "sizes_checksums", ")", ":", "path", "=", "_get_path", "(", "dataset_name", ")", "original_data", "=", "_get_sizes_checksums", "(", "path", ")", "new_data", "=", "original_data", ".", "copy", "(", ")", "new_dat...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_guess_extract_method
Guess extraction method, given file name (or path).
tensorflow_datasets/core/download/resource.py
def _guess_extract_method(fname): """Guess extraction method, given file name (or path).""" for method, extensions in _EXTRACTION_METHOD_TO_EXTS: for ext in extensions: if fname.endswith(ext): return method return ExtractMethod.NO_EXTRACT
def _guess_extract_method(fname): """Guess extraction method, given file name (or path).""" for method, extensions in _EXTRACTION_METHOD_TO_EXTS: for ext in extensions: if fname.endswith(ext): return method return ExtractMethod.NO_EXTRACT
[ "Guess", "extraction", "method", "given", "file", "name", "(", "or", "path", ")", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L93-L99
[ "def", "_guess_extract_method", "(", "fname", ")", ":", "for", "method", ",", "extensions", "in", "_EXTRACTION_METHOD_TO_EXTS", ":", "for", "ext", "in", "extensions", ":", "if", "fname", ".", "endswith", "(", "ext", ")", ":", "return", "method", "return", "E...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_sanitize_url
Sanitize and shorten url to fit in max_length. Function is stable: same input MUST ALWAYS give same result, accros changes in code as well. Different URLs might give same result. As much as possible, the extension should be kept. Heuristics are applied to only keep useful info from url. 1- Drop generic [sub]domains. 'www.cs.toronto.edu/...' -> 'cs.toronto.edu/...' 'storage.googleapis.com/foo/...' -> 'foo/...' 'drive.google.com/bar/...' -> 'bar/...' 'github.com/baz/...' -> 'baz/...' 2- Remove leading '0's from url components: 'foo/train-00004-of-00010.tfrecords' -> 'foo/train-4-of-10.tfrecords' 3- Truncate each component of url until total size fits or each component is left with 4 chars (or total size is <= limit): 'MoveUnitToBorder_64x64_png/train-4-of-10.tfrecords' (here truncate components to 4 chars per component max) -> 'Move_64x6_png/trai-4-of-10.tfrecords' 4- Truncate result, keeping prefix: 'abc_def_ghi_jkl' -> 'abc_def' Args: url: string, url to sanitize and shorten. max_length: int, max length of result. Returns: (string, string): sanitized and shorted url, file extension.
tensorflow_datasets/core/download/resource.py
def _sanitize_url(url, max_length): """Sanitize and shorten url to fit in max_length. Function is stable: same input MUST ALWAYS give same result, accros changes in code as well. Different URLs might give same result. As much as possible, the extension should be kept. Heuristics are applied to only keep useful info from url. 1- Drop generic [sub]domains. 'www.cs.toronto.edu/...' -> 'cs.toronto.edu/...' 'storage.googleapis.com/foo/...' -> 'foo/...' 'drive.google.com/bar/...' -> 'bar/...' 'github.com/baz/...' -> 'baz/...' 2- Remove leading '0's from url components: 'foo/train-00004-of-00010.tfrecords' -> 'foo/train-4-of-10.tfrecords' 3- Truncate each component of url until total size fits or each component is left with 4 chars (or total size is <= limit): 'MoveUnitToBorder_64x64_png/train-4-of-10.tfrecords' (here truncate components to 4 chars per component max) -> 'Move_64x6_png/trai-4-of-10.tfrecords' 4- Truncate result, keeping prefix: 'abc_def_ghi_jkl' -> 'abc_def' Args: url: string, url to sanitize and shorten. max_length: int, max length of result. Returns: (string, string): sanitized and shorted url, file extension. """ url = urllib.parse.urlparse(url) netloc = url.netloc for prefix in _NETLOC_COMMON_PREFIXES: if netloc.startswith(prefix): netloc = netloc[len(prefix):] for suffix in _NETLOC_COMMON_SUFFIXES: if netloc.endswith(suffix): netloc = netloc[:-len(suffix)] url = '%s%s%s%s' % (netloc, url.path, url.params, url.query) # Get the extension: for ext in _KNOWN_EXTENSIONS: if url.endswith(ext): extension = ext url = url[:-len(extension)] break else: url, extension = os.path.splitext(url) max_length -= len(extension) # Replace non authorized chars (including '/') by '_': url = re.sub(r'[^a-zA-Z0-9\.\-_]+', '_', url) # Remove parts with no info: for common_part in _URL_COMMON_PARTS: url = url.replace(common_part, '_') url = url.strip('_') # Remove leading zeros in groups of numbers: url = re.sub('(?<![0-9])0+(?=[0-9])', '', url) # Decrease max size of URL components: c_size = max(len(c) for c in re.split(r'[\.\-_]', url)) while c_size > 4 and len(url) > max_length: c_size -= 1 url = re.sub(r'[^\.\-_]{4,}', lambda match: match.group(0)[:c_size], url) return url[:max_length], extension
def _sanitize_url(url, max_length): """Sanitize and shorten url to fit in max_length. Function is stable: same input MUST ALWAYS give same result, accros changes in code as well. Different URLs might give same result. As much as possible, the extension should be kept. Heuristics are applied to only keep useful info from url. 1- Drop generic [sub]domains. 'www.cs.toronto.edu/...' -> 'cs.toronto.edu/...' 'storage.googleapis.com/foo/...' -> 'foo/...' 'drive.google.com/bar/...' -> 'bar/...' 'github.com/baz/...' -> 'baz/...' 2- Remove leading '0's from url components: 'foo/train-00004-of-00010.tfrecords' -> 'foo/train-4-of-10.tfrecords' 3- Truncate each component of url until total size fits or each component is left with 4 chars (or total size is <= limit): 'MoveUnitToBorder_64x64_png/train-4-of-10.tfrecords' (here truncate components to 4 chars per component max) -> 'Move_64x6_png/trai-4-of-10.tfrecords' 4- Truncate result, keeping prefix: 'abc_def_ghi_jkl' -> 'abc_def' Args: url: string, url to sanitize and shorten. max_length: int, max length of result. Returns: (string, string): sanitized and shorted url, file extension. """ url = urllib.parse.urlparse(url) netloc = url.netloc for prefix in _NETLOC_COMMON_PREFIXES: if netloc.startswith(prefix): netloc = netloc[len(prefix):] for suffix in _NETLOC_COMMON_SUFFIXES: if netloc.endswith(suffix): netloc = netloc[:-len(suffix)] url = '%s%s%s%s' % (netloc, url.path, url.params, url.query) # Get the extension: for ext in _KNOWN_EXTENSIONS: if url.endswith(ext): extension = ext url = url[:-len(extension)] break else: url, extension = os.path.splitext(url) max_length -= len(extension) # Replace non authorized chars (including '/') by '_': url = re.sub(r'[^a-zA-Z0-9\.\-_]+', '_', url) # Remove parts with no info: for common_part in _URL_COMMON_PARTS: url = url.replace(common_part, '_') url = url.strip('_') # Remove leading zeros in groups of numbers: url = re.sub('(?<![0-9])0+(?=[0-9])', '', url) # Decrease max size of URL components: c_size = max(len(c) for c in re.split(r'[\.\-_]', url)) while c_size > 4 and len(url) > max_length: c_size -= 1 url = re.sub(r'[^\.\-_]{4,}', lambda match: match.group(0)[:c_size], url) return url[:max_length], extension
[ "Sanitize", "and", "shorten", "url", "to", "fit", "in", "max_length", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L102-L166
[ "def", "_sanitize_url", "(", "url", ",", "max_length", ")", ":", "url", "=", "urllib", ".", "parse", ".", "urlparse", "(", "url", ")", "netloc", "=", "url", ".", "netloc", "for", "prefix", "in", "_NETLOC_COMMON_PREFIXES", ":", "if", "netloc", ".", "start...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
get_dl_fname
Returns name of file for (url, checksum). The max length of linux and windows filenames is 255 chars. Windows however expects short paths (260 chars), so we limit the file name to an arbitrary 90 chars. Naming pattern: '${url}${checksum}'. - url: url sanitized and shortened to 46 chars. - checksum: base64url encoded sha256: 44 chars (removing trailing '='). Args: url: `str`, url of the file. checksum: `str` (hex), the sha256 hexdigest of file or url. Returns: string of 90 chars max.
tensorflow_datasets/core/download/resource.py
def get_dl_fname(url, checksum): """Returns name of file for (url, checksum). The max length of linux and windows filenames is 255 chars. Windows however expects short paths (260 chars), so we limit the file name to an arbitrary 90 chars. Naming pattern: '${url}${checksum}'. - url: url sanitized and shortened to 46 chars. - checksum: base64url encoded sha256: 44 chars (removing trailing '='). Args: url: `str`, url of the file. checksum: `str` (hex), the sha256 hexdigest of file or url. Returns: string of 90 chars max. """ checksum = base64.urlsafe_b64encode(_decode_hex(checksum)) checksum = tf.compat.as_text(checksum)[:-1] name, extension = _sanitize_url(url, max_length=46) return '%s%s%s' % (name, checksum, extension)
def get_dl_fname(url, checksum): """Returns name of file for (url, checksum). The max length of linux and windows filenames is 255 chars. Windows however expects short paths (260 chars), so we limit the file name to an arbitrary 90 chars. Naming pattern: '${url}${checksum}'. - url: url sanitized and shortened to 46 chars. - checksum: base64url encoded sha256: 44 chars (removing trailing '='). Args: url: `str`, url of the file. checksum: `str` (hex), the sha256 hexdigest of file or url. Returns: string of 90 chars max. """ checksum = base64.urlsafe_b64encode(_decode_hex(checksum)) checksum = tf.compat.as_text(checksum)[:-1] name, extension = _sanitize_url(url, max_length=46) return '%s%s%s' % (name, checksum, extension)
[ "Returns", "name", "of", "file", "for", "(", "url", "checksum", ")", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L169-L190
[ "def", "get_dl_fname", "(", "url", ",", "checksum", ")", ":", "checksum", "=", "base64", ".", "urlsafe_b64encode", "(", "_decode_hex", "(", "checksum", ")", ")", "checksum", "=", "tf", ".", "compat", ".", "as_text", "(", "checksum", ")", "[", ":", "-", ...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
get_dl_dirname
Returns name of temp dir for given url.
tensorflow_datasets/core/download/resource.py
def get_dl_dirname(url): """Returns name of temp dir for given url.""" checksum = hashlib.sha256(tf.compat.as_bytes(url)).hexdigest() return get_dl_fname(url, checksum)
def get_dl_dirname(url): """Returns name of temp dir for given url.""" checksum = hashlib.sha256(tf.compat.as_bytes(url)).hexdigest() return get_dl_fname(url, checksum)
[ "Returns", "name", "of", "temp", "dir", "for", "given", "url", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L193-L196
[ "def", "get_dl_dirname", "(", "url", ")", ":", "checksum", "=", "hashlib", ".", "sha256", "(", "tf", ".", "compat", ".", "as_bytes", "(", "url", ")", ")", ".", "hexdigest", "(", ")", "return", "get_dl_fname", "(", "url", ",", "checksum", ")" ]
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_read_info
Returns info dict or None.
tensorflow_datasets/core/download/resource.py
def _read_info(info_path): """Returns info dict or None.""" if not tf.io.gfile.exists(info_path): return None with tf.io.gfile.GFile(info_path) as info_f: return json.load(info_f)
def _read_info(info_path): """Returns info dict or None.""" if not tf.io.gfile.exists(info_path): return None with tf.io.gfile.GFile(info_path) as info_f: return json.load(info_f)
[ "Returns", "info", "dict", "or", "None", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L204-L209
[ "def", "_read_info", "(", "info_path", ")", ":", "if", "not", "tf", ".", "io", ".", "gfile", ".", "exists", "(", "info_path", ")", ":", "return", "None", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "info_path", ")", "as", "info_f", ":...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
write_info_file
Write the INFO file next to local file. Although the method is synchronized, there is still a risk two processes running at the same time overlap here. Risk accepted, since potentially lost data (`dataset_name`) is only for human consumption. Args: resource: resource for which to write the INFO file. path: path of downloaded file. dataset_name: data used to dl the file. original_fname: name of file as downloaded.
tensorflow_datasets/core/download/resource.py
def write_info_file(resource, path, dataset_name, original_fname): """Write the INFO file next to local file. Although the method is synchronized, there is still a risk two processes running at the same time overlap here. Risk accepted, since potentially lost data (`dataset_name`) is only for human consumption. Args: resource: resource for which to write the INFO file. path: path of downloaded file. dataset_name: data used to dl the file. original_fname: name of file as downloaded. """ info_path = _get_info_path(path) info = _read_info(info_path) or {} urls = set(info.get('urls', []) + [resource.url]) dataset_names = info.get('dataset_names', []) if dataset_name: dataset_names.append(dataset_name) if 'original_fname' in info and info['original_fname'] != original_fname: raise AssertionError( '`original_fname` "%s" stored in %s does NOT match "%s".' % ( info['original_fname'], info_path, original_fname)) info = dict(urls=list(urls), dataset_names=list(set(dataset_names)), original_fname=original_fname) with py_utils.atomic_write(info_path, 'w') as info_f: json.dump(info, info_f, sort_keys=True)
def write_info_file(resource, path, dataset_name, original_fname): """Write the INFO file next to local file. Although the method is synchronized, there is still a risk two processes running at the same time overlap here. Risk accepted, since potentially lost data (`dataset_name`) is only for human consumption. Args: resource: resource for which to write the INFO file. path: path of downloaded file. dataset_name: data used to dl the file. original_fname: name of file as downloaded. """ info_path = _get_info_path(path) info = _read_info(info_path) or {} urls = set(info.get('urls', []) + [resource.url]) dataset_names = info.get('dataset_names', []) if dataset_name: dataset_names.append(dataset_name) if 'original_fname' in info and info['original_fname'] != original_fname: raise AssertionError( '`original_fname` "%s" stored in %s does NOT match "%s".' % ( info['original_fname'], info_path, original_fname)) info = dict(urls=list(urls), dataset_names=list(set(dataset_names)), original_fname=original_fname) with py_utils.atomic_write(info_path, 'w') as info_f: json.dump(info, info_f, sort_keys=True)
[ "Write", "the", "INFO", "file", "next", "to", "local", "file", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L214-L240
[ "def", "write_info_file", "(", "resource", ",", "path", ",", "dataset_name", ",", "original_fname", ")", ":", "info_path", "=", "_get_info_path", "(", "path", ")", "info", "=", "_read_info", "(", "info_path", ")", "or", "{", "}", "urls", "=", "set", "(", ...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
get_extract_method
Returns `ExtractMethod` to use on resource at path. Cannot be None.
tensorflow_datasets/core/download/resource.py
def get_extract_method(path): """Returns `ExtractMethod` to use on resource at path. Cannot be None.""" info_path = _get_info_path(path) info = _read_info(info_path) fname = info.get('original_fname', path) if info else path return _guess_extract_method(fname)
def get_extract_method(path): """Returns `ExtractMethod` to use on resource at path. Cannot be None.""" info_path = _get_info_path(path) info = _read_info(info_path) fname = info.get('original_fname', path) if info else path return _guess_extract_method(fname)
[ "Returns", "ExtractMethod", "to", "use", "on", "resource", "at", "path", ".", "Cannot", "be", "None", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L243-L248
[ "def", "get_extract_method", "(", "path", ")", ":", "info_path", "=", "_get_info_path", "(", "path", ")", "info", "=", "_read_info", "(", "info_path", ")", "fname", "=", "info", ".", "get", "(", "'original_fname'", ",", "path", ")", "if", "info", "else", ...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
Resource.exists_locally
Returns whether the resource exists locally, at `resource.path`.
tensorflow_datasets/core/download/resource.py
def exists_locally(cls, path): """Returns whether the resource exists locally, at `resource.path`.""" # If INFO file doesn't exist, consider resource does NOT exist, as it would # prevent guessing the `extract_method`. return (tf.io.gfile.exists(path) and tf.io.gfile.exists(_get_info_path(path)))
def exists_locally(cls, path): """Returns whether the resource exists locally, at `resource.path`.""" # If INFO file doesn't exist, consider resource does NOT exist, as it would # prevent guessing the `extract_method`. return (tf.io.gfile.exists(path) and tf.io.gfile.exists(_get_info_path(path)))
[ "Returns", "whether", "the", "resource", "exists", "locally", "at", "resource", ".", "path", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/resource.py#L273-L278
[ "def", "exists_locally", "(", "cls", ",", "path", ")", ":", "# If INFO file doesn't exist, consider resource does NOT exist, as it would", "# prevent guessing the `extract_method`.", "return", "(", "tf", ".", "io", ".", "gfile", ".", "exists", "(", "path", ")", "and", "...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
Coco2014._split_generators
Returns SplitGenerators.
tensorflow_datasets/image/coco.py
def _split_generators(self, dl_manager): """Returns SplitGenerators.""" root_url = "http://images.cocodataset.org/" urls = { # Train/validation set "train_images": "zips/train2014.zip", "val_images": "zips/val2014.zip", "trainval_annotations": "annotations/annotations_trainval2014.zip", # Testing set (no annotations) (2014) "test_images": "zips/test2014.zip", "test_annotations": "annotations/image_info_test2014.zip", # Testing set (no annotations) (2015) "test2015_images": "zips/test2015.zip", "test2015_annotations": "annotations/image_info_test2015.zip", } extracted_paths = dl_manager.download_and_extract({ key: root_url + url for key, url in urls.items() }) return [ tfds.core.SplitGenerator( name=tfds.Split.TRAIN, num_shards=10, gen_kwargs=dict( image_dir=extracted_paths["train_images"], annotation_dir=extracted_paths["trainval_annotations"], split_type="train2014", )), tfds.core.SplitGenerator( name=tfds.Split.VALIDATION, num_shards=10, gen_kwargs=dict( image_dir=extracted_paths["val_images"], annotation_dir=extracted_paths["trainval_annotations"], split_type="val2014", )), # Warning: Testing split only contains the images without any annotation tfds.core.SplitGenerator( name=tfds.Split.TEST, num_shards=10, gen_kwargs=dict( image_dir=extracted_paths["test_images"], annotation_dir=extracted_paths["test_annotations"], split_type="test2014", has_annotation=False, )), tfds.core.SplitGenerator( name="test2015", num_shards=10, gen_kwargs=dict( image_dir=extracted_paths["test2015_images"], annotation_dir=extracted_paths["test2015_annotations"], split_type="test2015", has_annotation=False, )), ]
def _split_generators(self, dl_manager): """Returns SplitGenerators.""" root_url = "http://images.cocodataset.org/" urls = { # Train/validation set "train_images": "zips/train2014.zip", "val_images": "zips/val2014.zip", "trainval_annotations": "annotations/annotations_trainval2014.zip", # Testing set (no annotations) (2014) "test_images": "zips/test2014.zip", "test_annotations": "annotations/image_info_test2014.zip", # Testing set (no annotations) (2015) "test2015_images": "zips/test2015.zip", "test2015_annotations": "annotations/image_info_test2015.zip", } extracted_paths = dl_manager.download_and_extract({ key: root_url + url for key, url in urls.items() }) return [ tfds.core.SplitGenerator( name=tfds.Split.TRAIN, num_shards=10, gen_kwargs=dict( image_dir=extracted_paths["train_images"], annotation_dir=extracted_paths["trainval_annotations"], split_type="train2014", )), tfds.core.SplitGenerator( name=tfds.Split.VALIDATION, num_shards=10, gen_kwargs=dict( image_dir=extracted_paths["val_images"], annotation_dir=extracted_paths["trainval_annotations"], split_type="val2014", )), # Warning: Testing split only contains the images without any annotation tfds.core.SplitGenerator( name=tfds.Split.TEST, num_shards=10, gen_kwargs=dict( image_dir=extracted_paths["test_images"], annotation_dir=extracted_paths["test_annotations"], split_type="test2014", has_annotation=False, )), tfds.core.SplitGenerator( name="test2015", num_shards=10, gen_kwargs=dict( image_dir=extracted_paths["test2015_images"], annotation_dir=extracted_paths["test2015_annotations"], split_type="test2015", has_annotation=False, )), ]
[ "Returns", "SplitGenerators", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/coco.py#L94-L149
[ "def", "_split_generators", "(", "self", ",", "dl_manager", ")", ":", "root_url", "=", "\"http://images.cocodataset.org/\"", "urls", "=", "{", "# Train/validation set", "\"train_images\"", ":", "\"zips/train2014.zip\"", ",", "\"val_images\"", ":", "\"zips/val2014.zip\"", ...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
Coco2014._generate_examples
Generate examples as dicts. Args: image_dir: `str`, directory containing the images annotation_dir: `str`, directory containing split_type: `str`, <split_name><year> (ex: train2014) has_annotation: `bool`, when False (for the testing set), the annotations are not recorded Yields: Generator yielding the next samples
tensorflow_datasets/image/coco.py
def _generate_examples( self, image_dir, annotation_dir, split_type, has_annotation=True): """Generate examples as dicts. Args: image_dir: `str`, directory containing the images annotation_dir: `str`, directory containing split_type: `str`, <split_name><year> (ex: train2014) has_annotation: `bool`, when False (for the testing set), the annotations are not recorded Yields: Generator yielding the next samples """ if has_annotation: instance_filename = "instances_{}.json" else: instance_filename = "image_info_{}.json" # Load the label names and images instance_path = os.path.join( annotation_dir, "annotations", instance_filename.format(split_type), ) coco_annotation = CocoAnnotation(instance_path) # Each category is a dict: # { # 'id': 51, # From 1-91, some entry missing # 'name': 'bowl', # 'supercategory': 'kitchen', # } categories = coco_annotation.categories # Each image is a dict: # { # 'id': 262145, # 'file_name': 'COCO_train2014_000000262145.jpg' # 'flickr_url': 'http://farm8.staticflickr.com/7187/xyz.jpg', # 'coco_url': 'http://images.cocodataset.org/train2014/xyz.jpg', # 'license': 2, # 'date_captured': '2013-11-20 02:07:55', # 'height': 427, # 'width': 640, # } images = coco_annotation.images # TODO(b/121375022): ClassLabel names should also contains 'id' and # and 'supercategory' (in addition to 'name') # Warning: As Coco only use 80 out of the 91 labels, the c['id'] and # dataset names ids won't match. self.info.features["objects"]["label"].names = [ c["name"] for c in categories ] # TODO(b/121375022): Conversion should be done by ClassLabel categories_id2name = {c["id"]: c["name"] for c in categories} # Iterate over all images annotation_skipped = 0 for image_info in sorted(images, key=lambda x: x["id"]): if has_annotation: # Each instance annotation is a dict: # { # 'iscrowd': 0, # 'bbox': [116.95, 305.86, 285.3, 266.03], # 'image_id': 480023, # 'segmentation': [[312.29, 562.89, 402.25, ...]], # 'category_id': 58, # 'area': 54652.9556, # 'id': 86, # } instances = coco_annotation.get_annotations(img_id=image_info["id"]) else: instances = [] # No annotations if not instances: annotation_skipped += 1 def build_bbox(x, y, width, height): # pylint: disable=cell-var-from-loop # build_bbox is only used within the loop so it is ok to use image_info return tfds.features.BBox( ymin=y / image_info["height"], xmin=x / image_info["width"], ymax=(y + height) / image_info["height"], xmax=(x + width) / image_info["width"], ) # pylint: enable=cell-var-from-loop yield { "image": os.path.join(image_dir, split_type, image_info["file_name"]), "image/filename": image_info["file_name"], "objects": [{ "bbox": build_bbox(*instance_info["bbox"]), "label": categories_id2name[instance_info["category_id"]], "is_crowd": bool(instance_info["iscrowd"]), } for instance_info in instances], } logging.info( "%d/%d images do not contains any annotations", annotation_skipped, len(images), )
def _generate_examples( self, image_dir, annotation_dir, split_type, has_annotation=True): """Generate examples as dicts. Args: image_dir: `str`, directory containing the images annotation_dir: `str`, directory containing split_type: `str`, <split_name><year> (ex: train2014) has_annotation: `bool`, when False (for the testing set), the annotations are not recorded Yields: Generator yielding the next samples """ if has_annotation: instance_filename = "instances_{}.json" else: instance_filename = "image_info_{}.json" # Load the label names and images instance_path = os.path.join( annotation_dir, "annotations", instance_filename.format(split_type), ) coco_annotation = CocoAnnotation(instance_path) # Each category is a dict: # { # 'id': 51, # From 1-91, some entry missing # 'name': 'bowl', # 'supercategory': 'kitchen', # } categories = coco_annotation.categories # Each image is a dict: # { # 'id': 262145, # 'file_name': 'COCO_train2014_000000262145.jpg' # 'flickr_url': 'http://farm8.staticflickr.com/7187/xyz.jpg', # 'coco_url': 'http://images.cocodataset.org/train2014/xyz.jpg', # 'license': 2, # 'date_captured': '2013-11-20 02:07:55', # 'height': 427, # 'width': 640, # } images = coco_annotation.images # TODO(b/121375022): ClassLabel names should also contains 'id' and # and 'supercategory' (in addition to 'name') # Warning: As Coco only use 80 out of the 91 labels, the c['id'] and # dataset names ids won't match. self.info.features["objects"]["label"].names = [ c["name"] for c in categories ] # TODO(b/121375022): Conversion should be done by ClassLabel categories_id2name = {c["id"]: c["name"] for c in categories} # Iterate over all images annotation_skipped = 0 for image_info in sorted(images, key=lambda x: x["id"]): if has_annotation: # Each instance annotation is a dict: # { # 'iscrowd': 0, # 'bbox': [116.95, 305.86, 285.3, 266.03], # 'image_id': 480023, # 'segmentation': [[312.29, 562.89, 402.25, ...]], # 'category_id': 58, # 'area': 54652.9556, # 'id': 86, # } instances = coco_annotation.get_annotations(img_id=image_info["id"]) else: instances = [] # No annotations if not instances: annotation_skipped += 1 def build_bbox(x, y, width, height): # pylint: disable=cell-var-from-loop # build_bbox is only used within the loop so it is ok to use image_info return tfds.features.BBox( ymin=y / image_info["height"], xmin=x / image_info["width"], ymax=(y + height) / image_info["height"], xmax=(x + width) / image_info["width"], ) # pylint: enable=cell-var-from-loop yield { "image": os.path.join(image_dir, split_type, image_info["file_name"]), "image/filename": image_info["file_name"], "objects": [{ "bbox": build_bbox(*instance_info["bbox"]), "label": categories_id2name[instance_info["category_id"]], "is_crowd": bool(instance_info["iscrowd"]), } for instance_info in instances], } logging.info( "%d/%d images do not contains any annotations", annotation_skipped, len(images), )
[ "Generate", "examples", "as", "dicts", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/coco.py#L151-L252
[ "def", "_generate_examples", "(", "self", ",", "image_dir", ",", "annotation_dir", ",", "split_type", ",", "has_annotation", "=", "True", ")", ":", "if", "has_annotation", ":", "instance_filename", "=", "\"instances_{}.json\"", "else", ":", "instance_filename", "=",...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
Text.str2ints
Conversion string => encoded list[int].
tensorflow_datasets/core/features/text_feature.py
def str2ints(self, str_value): """Conversion string => encoded list[int].""" if not self._encoder: raise ValueError( "Text.str2ints is not available because encoder hasn't been defined.") return self._encoder.encode(str_value)
def str2ints(self, str_value): """Conversion string => encoded list[int].""" if not self._encoder: raise ValueError( "Text.str2ints is not available because encoder hasn't been defined.") return self._encoder.encode(str_value)
[ "Conversion", "string", "=", ">", "encoded", "list", "[", "int", "]", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text_feature.py#L83-L88
[ "def", "str2ints", "(", "self", ",", "str_value", ")", ":", "if", "not", "self", ".", "_encoder", ":", "raise", "ValueError", "(", "\"Text.str2ints is not available because encoder hasn't been defined.\"", ")", "return", "self", ".", "_encoder", ".", "encode", "(", ...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
Text.ints2str
Conversion list[int] => decoded string.
tensorflow_datasets/core/features/text_feature.py
def ints2str(self, int_values): """Conversion list[int] => decoded string.""" if not self._encoder: raise ValueError( "Text.ints2str is not available because encoder hasn't been defined.") return self._encoder.decode(int_values)
def ints2str(self, int_values): """Conversion list[int] => decoded string.""" if not self._encoder: raise ValueError( "Text.ints2str is not available because encoder hasn't been defined.") return self._encoder.decode(int_values)
[ "Conversion", "list", "[", "int", "]", "=", ">", "decoded", "string", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text_feature.py#L90-L95
[ "def", "ints2str", "(", "self", ",", "int_values", ")", ":", "if", "not", "self", ".", "_encoder", ":", "raise", "ValueError", "(", "\"Text.ints2str is not available because encoder hasn't been defined.\"", ")", "return", "self", ".", "_encoder", ".", "decode", "(",...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
Text.maybe_build_from_corpus
Call SubwordTextEncoder.build_from_corpus is encoder_cls is such.
tensorflow_datasets/core/features/text_feature.py
def maybe_build_from_corpus(self, corpus_generator, **kwargs): """Call SubwordTextEncoder.build_from_corpus is encoder_cls is such.""" if self._encoder_cls is not text_lib.SubwordTextEncoder: return if self.encoder: return vocab_size = self._encoder_config.vocab_size self.encoder = text_lib.SubwordTextEncoder.build_from_corpus( corpus_generator=corpus_generator, target_vocab_size=vocab_size, **kwargs)
def maybe_build_from_corpus(self, corpus_generator, **kwargs): """Call SubwordTextEncoder.build_from_corpus is encoder_cls is such.""" if self._encoder_cls is not text_lib.SubwordTextEncoder: return if self.encoder: return vocab_size = self._encoder_config.vocab_size self.encoder = text_lib.SubwordTextEncoder.build_from_corpus( corpus_generator=corpus_generator, target_vocab_size=vocab_size, **kwargs)
[ "Call", "SubwordTextEncoder", ".", "build_from_corpus", "is", "encoder_cls", "is", "such", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text_feature.py#L137-L148
[ "def", "maybe_build_from_corpus", "(", "self", ",", "corpus_generator", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_encoder_cls", "is", "not", "text_lib", ".", "SubwordTextEncoder", ":", "return", "if", "self", ".", "encoder", ":", "return", "voc...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
sharded_filenames
Sharded filenames given prefix and number of shards.
tensorflow_datasets/core/naming.py
def sharded_filenames(filename_prefix, num_shards): """Sharded filenames given prefix and number of shards.""" shard_suffix = "%05d-of-%05d" return [ "%s-%s" % (filename_prefix, shard_suffix % (i, num_shards)) for i in range(num_shards) ]
def sharded_filenames(filename_prefix, num_shards): """Sharded filenames given prefix and number of shards.""" shard_suffix = "%05d-of-%05d" return [ "%s-%s" % (filename_prefix, shard_suffix % (i, num_shards)) for i in range(num_shards) ]
[ "Sharded", "filenames", "given", "prefix", "and", "number", "of", "shards", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/naming.py#L52-L58
[ "def", "sharded_filenames", "(", "filename_prefix", ",", "num_shards", ")", ":", "shard_suffix", "=", "\"%05d-of-%05d\"", "return", "[", "\"%s-%s\"", "%", "(", "filename_prefix", ",", "shard_suffix", "%", "(", "i", ",", "num_shards", ")", ")", "for", "i", "in"...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_walk_omniglot_dir
Walk an Omniglot directory and yield examples.
tensorflow_datasets/image/omniglot.py
def _walk_omniglot_dir(directory): """Walk an Omniglot directory and yield examples.""" directory = os.path.join(directory, tf.io.gfile.listdir(directory)[0]) alphabets = sorted(tf.io.gfile.listdir(directory)) for alphabet in alphabets: alphabet_dir = os.path.join(directory, alphabet) characters = sorted(tf.io.gfile.listdir(alphabet_dir)) for character in characters: character_id = int(character[len("character"):]) - 1 character_dir = os.path.join(alphabet_dir, character) images = tf.io.gfile.listdir(character_dir) for image in images: label, _ = image.split("_") label = int(label) - 1 image_path = os.path.join(character_dir, image) yield alphabet, character_id, label, image_path
def _walk_omniglot_dir(directory): """Walk an Omniglot directory and yield examples.""" directory = os.path.join(directory, tf.io.gfile.listdir(directory)[0]) alphabets = sorted(tf.io.gfile.listdir(directory)) for alphabet in alphabets: alphabet_dir = os.path.join(directory, alphabet) characters = sorted(tf.io.gfile.listdir(alphabet_dir)) for character in characters: character_id = int(character[len("character"):]) - 1 character_dir = os.path.join(alphabet_dir, character) images = tf.io.gfile.listdir(character_dir) for image in images: label, _ = image.split("_") label = int(label) - 1 image_path = os.path.join(character_dir, image) yield alphabet, character_id, label, image_path
[ "Walk", "an", "Omniglot", "directory", "and", "yield", "examples", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/omniglot.py#L128-L143
[ "def", "_walk_omniglot_dir", "(", "directory", ")", ":", "directory", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "tf", ".", "io", ".", "gfile", ".", "listdir", "(", "directory", ")", "[", "0", "]", ")", "alphabets", "=", "sorted", "(...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_get_names
Get alphabet and label names, union across all dirs.
tensorflow_datasets/image/omniglot.py
def _get_names(dirs): """Get alphabet and label names, union across all dirs.""" alphabets = set() label_names = {} for d in dirs: for example in _walk_omniglot_dir(d): alphabet, alphabet_char_id, label, _ = example alphabets.add(alphabet) label_name = "%s_%d" % (alphabet, alphabet_char_id) if label in label_names: assert label_names[label] == label_name else: label_names[label] = label_name label_names = [label_names[k] for k in sorted(label_names)] return alphabets, label_names
def _get_names(dirs): """Get alphabet and label names, union across all dirs.""" alphabets = set() label_names = {} for d in dirs: for example in _walk_omniglot_dir(d): alphabet, alphabet_char_id, label, _ = example alphabets.add(alphabet) label_name = "%s_%d" % (alphabet, alphabet_char_id) if label in label_names: assert label_names[label] == label_name else: label_names[label] = label_name label_names = [label_names[k] for k in sorted(label_names)] return alphabets, label_names
[ "Get", "alphabet", "and", "label", "names", "union", "across", "all", "dirs", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/omniglot.py#L146-L160
[ "def", "_get_names", "(", "dirs", ")", ":", "alphabets", "=", "set", "(", ")", "label_names", "=", "{", "}", "for", "d", "in", "dirs", ":", "for", "example", "in", "_walk_omniglot_dir", "(", "d", ")", ":", "alphabet", ",", "alphabet_char_id", ",", "lab...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
size_str
Returns a human readable size string. If size_in_bytes is None, then returns "?? GiB". For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`. Args: size_in_bytes: `int` or `None`, the size, in bytes, that we want to format as a human-readable size string.
tensorflow_datasets/core/units.py
def size_str(size_in_bytes): """Returns a human readable size string. If size_in_bytes is None, then returns "?? GiB". For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`. Args: size_in_bytes: `int` or `None`, the size, in bytes, that we want to format as a human-readable size string. """ if not size_in_bytes: return "?? GiB" size_in_bytes = float(size_in_bytes) for (name, size_bytes) in _NAME_LIST: value = size_in_bytes / size_bytes if value >= 1.0: return "{:.2f} {}".format(value, name) return "{} {}".format(int(size_in_bytes), "bytes")
def size_str(size_in_bytes): """Returns a human readable size string. If size_in_bytes is None, then returns "?? GiB". For example `size_str(1.5 * tfds.units.GiB) == "1.50 GiB"`. Args: size_in_bytes: `int` or `None`, the size, in bytes, that we want to format as a human-readable size string. """ if not size_in_bytes: return "?? GiB" size_in_bytes = float(size_in_bytes) for (name, size_bytes) in _NAME_LIST: value = size_in_bytes / size_bytes if value >= 1.0: return "{:.2f} {}".format(value, name) return "{} {}".format(int(size_in_bytes), "bytes")
[ "Returns", "a", "human", "readable", "size", "string", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/units.py#L34-L53
[ "def", "size_str", "(", "size_in_bytes", ")", ":", "if", "not", "size_in_bytes", ":", "return", "\"?? GiB\"", "size_in_bytes", "=", "float", "(", "size_in_bytes", ")", "for", "(", "name", ",", "size_bytes", ")", "in", "_NAME_LIST", ":", "value", "=", "size_i...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_Downloader.tqdm
Add a progression bar for the current download.
tensorflow_datasets/core/download/downloader.py
def tqdm(self): """Add a progression bar for the current download.""" async_tqdm = utils.async_tqdm with async_tqdm(total=0, desc='Dl Completed...', unit=' url') as pbar_url: with async_tqdm(total=0, desc='Dl Size...', unit=' MiB') as pbar_dl_size: self._pbar_url = pbar_url self._pbar_dl_size = pbar_dl_size yield
def tqdm(self): """Add a progression bar for the current download.""" async_tqdm = utils.async_tqdm with async_tqdm(total=0, desc='Dl Completed...', unit=' url') as pbar_url: with async_tqdm(total=0, desc='Dl Size...', unit=' MiB') as pbar_dl_size: self._pbar_url = pbar_url self._pbar_dl_size = pbar_dl_size yield
[ "Add", "a", "progression", "bar", "for", "the", "current", "download", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L84-L91
[ "def", "tqdm", "(", "self", ")", ":", "async_tqdm", "=", "utils", ".", "async_tqdm", "with", "async_tqdm", "(", "total", "=", "0", ",", "desc", "=", "'Dl Completed...'", ",", "unit", "=", "' url'", ")", "as", "pbar_url", ":", "with", "async_tqdm", "(", ...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_Downloader.download
Download url to given path. Returns Promise -> sha256 of downloaded file. Args: url: address of resource to download. destination_path: `str`, path to directory where to download the resource. Returns: Promise obj -> (`str`, int): (downloaded object checksum, size in bytes).
tensorflow_datasets/core/download/downloader.py
def download(self, url, destination_path): """Download url to given path. Returns Promise -> sha256 of downloaded file. Args: url: address of resource to download. destination_path: `str`, path to directory where to download the resource. Returns: Promise obj -> (`str`, int): (downloaded object checksum, size in bytes). """ self._pbar_url.update_total(1) future = self._executor.submit(self._sync_download, url, destination_path) return promise.Promise.resolve(future)
def download(self, url, destination_path): """Download url to given path. Returns Promise -> sha256 of downloaded file. Args: url: address of resource to download. destination_path: `str`, path to directory where to download the resource. Returns: Promise obj -> (`str`, int): (downloaded object checksum, size in bytes). """ self._pbar_url.update_total(1) future = self._executor.submit(self._sync_download, url, destination_path) return promise.Promise.resolve(future)
[ "Download", "url", "to", "given", "path", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L93-L107
[ "def", "download", "(", "self", ",", "url", ",", "destination_path", ")", ":", "self", ".", "_pbar_url", ".", "update_total", "(", "1", ")", "future", "=", "self", ".", "_executor", ".", "submit", "(", "self", ".", "_sync_download", ",", "url", ",", "d...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_Downloader._sync_kaggle_download
Download with Kaggle API.
tensorflow_datasets/core/download/downloader.py
def _sync_kaggle_download(self, kaggle_url, destination_path): """Download with Kaggle API.""" kaggle_file = kaggle.KaggleFile.from_url(kaggle_url) downloader = self.kaggle_downloader(kaggle_file.competition) filepath = downloader.download_file(kaggle_file.filename, destination_path) dl_size = tf.io.gfile.stat(filepath).length checksum = self._checksumer() with tf.io.gfile.GFile(filepath, 'rb') as f: while True: block = f.read(io.DEFAULT_BUFFER_SIZE) if not block: break checksum.update(block) return checksum.hexdigest(), dl_size
def _sync_kaggle_download(self, kaggle_url, destination_path): """Download with Kaggle API.""" kaggle_file = kaggle.KaggleFile.from_url(kaggle_url) downloader = self.kaggle_downloader(kaggle_file.competition) filepath = downloader.download_file(kaggle_file.filename, destination_path) dl_size = tf.io.gfile.stat(filepath).length checksum = self._checksumer() with tf.io.gfile.GFile(filepath, 'rb') as f: while True: block = f.read(io.DEFAULT_BUFFER_SIZE) if not block: break checksum.update(block) return checksum.hexdigest(), dl_size
[ "Download", "with", "Kaggle", "API", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L109-L123
[ "def", "_sync_kaggle_download", "(", "self", ",", "kaggle_url", ",", "destination_path", ")", ":", "kaggle_file", "=", "kaggle", ".", "KaggleFile", ".", "from_url", "(", "kaggle_url", ")", "downloader", "=", "self", ".", "kaggle_downloader", "(", "kaggle_file", ...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_Downloader._get_drive_url
Returns url, possibly with confirmation token.
tensorflow_datasets/core/download/downloader.py
def _get_drive_url(self, url, session): """Returns url, possibly with confirmation token.""" response = session.get(url, stream=True) if response.status_code != 200: raise DownloadError( 'Failed to get url %s. HTTP code: %d.' % (url, response.status_code)) for k, v in response.cookies.items(): if k.startswith('download_warning'): return url + '&confirm=' + v # v is the confirm token # No token found, let's try with original URL: return url
def _get_drive_url(self, url, session): """Returns url, possibly with confirmation token.""" response = session.get(url, stream=True) if response.status_code != 200: raise DownloadError( 'Failed to get url %s. HTTP code: %d.' % (url, response.status_code)) for k, v in response.cookies.items(): if k.startswith('download_warning'): return url + '&confirm=' + v # v is the confirm token # No token found, let's try with original URL: return url
[ "Returns", "url", "possibly", "with", "confirmation", "token", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L125-L135
[ "def", "_get_drive_url", "(", "self", ",", "url", ",", "session", ")", ":", "response", "=", "session", ".", "get", "(", "url", ",", "stream", "=", "True", ")", "if", "response", ".", "status_code", "!=", "200", ":", "raise", "DownloadError", "(", "'Fa...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_Downloader._sync_download
Synchronous version of `download` method.
tensorflow_datasets/core/download/downloader.py
def _sync_download(self, url, destination_path): """Synchronous version of `download` method.""" proxies = { 'http': os.environ.get('TFDS_HTTP_PROXY', None), 'https': os.environ.get('TFDS_HTTPS_PROXY', None), 'ftp': os.environ.get('TFDS_FTP_PROXY', None) } if kaggle.KaggleFile.is_kaggle_url(url): if proxies['http']: os.environ['KAGGLE_PROXY'] = proxies['http'] return self._sync_kaggle_download(url, destination_path) try: # If url is on a filesystem that gfile understands, use copy. Otherwise, # use requests. if not url.startswith('http'): return self._sync_file_copy(url, destination_path) except tf.errors.UnimplementedError: pass session = requests.Session() session.proxies = proxies if _DRIVE_URL.match(url): url = self._get_drive_url(url, session) use_urllib = url.startswith('ftp') if use_urllib: if proxies['ftp']: proxy = urllib.request.ProxyHandler({'ftp': proxies['ftp']}) opener = urllib.request.build_opener(proxy) urllib.request.install_opener(opener) # pylint: disable=too-many-function-args request = urllib.request.Request(url) response = urllib.request.urlopen(request) else: response = session.get(url, stream=True) if response.status_code != 200: raise DownloadError('Failed to get url %s. HTTP code: %d.' % (url, response.status_code)) fname = _get_filename(response) path = os.path.join(destination_path, fname) size = 0 size_mb = 0 unit_mb = units.MiB self._pbar_dl_size.update_total( int(response.headers.get('Content-length', 0)) // unit_mb) with tf.io.gfile.GFile(path, 'wb') as file_: checksum = self._checksumer() if use_urllib: iterator = iter(lambda: response.read(io.DEFAULT_BUFFER_SIZE), b'') else: iterator = response.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE) for block in iterator: size += len(block) # Update the progress bar size_mb += len(block) if size_mb > unit_mb: self._pbar_dl_size.update(size_mb // unit_mb) size_mb %= unit_mb checksum.update(block) file_.write(block) self._pbar_url.update(1) return checksum.hexdigest(), size
def _sync_download(self, url, destination_path): """Synchronous version of `download` method.""" proxies = { 'http': os.environ.get('TFDS_HTTP_PROXY', None), 'https': os.environ.get('TFDS_HTTPS_PROXY', None), 'ftp': os.environ.get('TFDS_FTP_PROXY', None) } if kaggle.KaggleFile.is_kaggle_url(url): if proxies['http']: os.environ['KAGGLE_PROXY'] = proxies['http'] return self._sync_kaggle_download(url, destination_path) try: # If url is on a filesystem that gfile understands, use copy. Otherwise, # use requests. if not url.startswith('http'): return self._sync_file_copy(url, destination_path) except tf.errors.UnimplementedError: pass session = requests.Session() session.proxies = proxies if _DRIVE_URL.match(url): url = self._get_drive_url(url, session) use_urllib = url.startswith('ftp') if use_urllib: if proxies['ftp']: proxy = urllib.request.ProxyHandler({'ftp': proxies['ftp']}) opener = urllib.request.build_opener(proxy) urllib.request.install_opener(opener) # pylint: disable=too-many-function-args request = urllib.request.Request(url) response = urllib.request.urlopen(request) else: response = session.get(url, stream=True) if response.status_code != 200: raise DownloadError('Failed to get url %s. HTTP code: %d.' % (url, response.status_code)) fname = _get_filename(response) path = os.path.join(destination_path, fname) size = 0 size_mb = 0 unit_mb = units.MiB self._pbar_dl_size.update_total( int(response.headers.get('Content-length', 0)) // unit_mb) with tf.io.gfile.GFile(path, 'wb') as file_: checksum = self._checksumer() if use_urllib: iterator = iter(lambda: response.read(io.DEFAULT_BUFFER_SIZE), b'') else: iterator = response.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE) for block in iterator: size += len(block) # Update the progress bar size_mb += len(block) if size_mb > unit_mb: self._pbar_dl_size.update(size_mb // unit_mb) size_mb %= unit_mb checksum.update(block) file_.write(block) self._pbar_url.update(1) return checksum.hexdigest(), size
[ "Synchronous", "version", "of", "download", "method", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/downloader.py#L144-L208
[ "def", "_sync_download", "(", "self", ",", "url", ",", "destination_path", ")", ":", "proxies", "=", "{", "'http'", ":", "os", ".", "environ", ".", "get", "(", "'TFDS_HTTP_PROXY'", ",", "None", ")", ",", "'https'", ":", "os", ".", "environ", ".", "get"...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_resize_image_if_necessary
Resize an image to have (roughly) the given number of target pixels. Args: image_fobj: File object containing the original image. target_pixels: If given, number of pixels that the image must have. Returns: A file object.
tensorflow_datasets/image/diabetic_retinopathy_detection.py
def _resize_image_if_necessary(image_fobj, target_pixels=None): """Resize an image to have (roughly) the given number of target pixels. Args: image_fobj: File object containing the original image. target_pixels: If given, number of pixels that the image must have. Returns: A file object. """ if target_pixels is None: return image_fobj cv2 = tfds.core.lazy_imports.cv2 # Decode image using OpenCV2. image = cv2.imdecode( np.fromstring(image_fobj.read(), dtype=np.uint8), flags=3) # Get image height and width. height, width, _ = image.shape actual_pixels = height * width if actual_pixels > target_pixels: factor = np.sqrt(target_pixels / actual_pixels) image = cv2.resize(image, dsize=None, fx=factor, fy=factor) # Encode the image with quality=72 and store it in a BytesIO object. _, buff = cv2.imencode(".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), 72]) return io.BytesIO(buff.tostring())
def _resize_image_if_necessary(image_fobj, target_pixels=None): """Resize an image to have (roughly) the given number of target pixels. Args: image_fobj: File object containing the original image. target_pixels: If given, number of pixels that the image must have. Returns: A file object. """ if target_pixels is None: return image_fobj cv2 = tfds.core.lazy_imports.cv2 # Decode image using OpenCV2. image = cv2.imdecode( np.fromstring(image_fobj.read(), dtype=np.uint8), flags=3) # Get image height and width. height, width, _ = image.shape actual_pixels = height * width if actual_pixels > target_pixels: factor = np.sqrt(target_pixels / actual_pixels) image = cv2.resize(image, dsize=None, fx=factor, fy=factor) # Encode the image with quality=72 and store it in a BytesIO object. _, buff = cv2.imencode(".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), 72]) return io.BytesIO(buff.tostring())
[ "Resize", "an", "image", "to", "have", "(", "roughly", ")", "the", "given", "number", "of", "target", "pixels", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/diabetic_retinopathy_detection.py#L181-L206
[ "def", "_resize_image_if_necessary", "(", "image_fobj", ",", "target_pixels", "=", "None", ")", ":", "if", "target_pixels", "is", "None", ":", "return", "image_fobj", "cv2", "=", "tfds", ".", "core", ".", "lazy_imports", ".", "cv2", "# Decode image using OpenCV2."...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
DiabeticRetinopathyDetection._generate_examples
Yields Example instances from given CSV. Args: images_dir_path: path to dir in which images are stored. csv_path: optional, path to csv file with two columns: name of image and label. If not provided, just scan image directory, don't set labels. csv_usage: optional, subset of examples from the csv file to use based on the "Usage" column from the csv.
tensorflow_datasets/image/diabetic_retinopathy_detection.py
def _generate_examples(self, images_dir_path, csv_path=None, csv_usage=None): """Yields Example instances from given CSV. Args: images_dir_path: path to dir in which images are stored. csv_path: optional, path to csv file with two columns: name of image and label. If not provided, just scan image directory, don't set labels. csv_usage: optional, subset of examples from the csv file to use based on the "Usage" column from the csv. """ if csv_path: with tf.io.gfile.GFile(csv_path) as csv_f: reader = csv.DictReader(csv_f) data = [(row["image"], int(row["level"])) for row in reader if csv_usage is None or row["Usage"] == csv_usage] else: data = [(fname[:-5], -1) for fname in tf.io.gfile.listdir(images_dir_path) if fname.endswith(".jpeg")] for name, label in data: yield { "name": name, "image": _resize_image_if_necessary( tf.io.gfile.GFile("%s/%s.jpeg" % (images_dir_path, name), mode="rb"), target_pixels=self.builder_config.target_pixels), "label": label, }
def _generate_examples(self, images_dir_path, csv_path=None, csv_usage=None): """Yields Example instances from given CSV. Args: images_dir_path: path to dir in which images are stored. csv_path: optional, path to csv file with two columns: name of image and label. If not provided, just scan image directory, don't set labels. csv_usage: optional, subset of examples from the csv file to use based on the "Usage" column from the csv. """ if csv_path: with tf.io.gfile.GFile(csv_path) as csv_f: reader = csv.DictReader(csv_f) data = [(row["image"], int(row["level"])) for row in reader if csv_usage is None or row["Usage"] == csv_usage] else: data = [(fname[:-5], -1) for fname in tf.io.gfile.listdir(images_dir_path) if fname.endswith(".jpeg")] for name, label in data: yield { "name": name, "image": _resize_image_if_necessary( tf.io.gfile.GFile("%s/%s.jpeg" % (images_dir_path, name), mode="rb"), target_pixels=self.builder_config.target_pixels), "label": label, }
[ "Yields", "Example", "instances", "from", "given", "CSV", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/diabetic_retinopathy_detection.py#L150-L178
[ "def", "_generate_examples", "(", "self", ",", "images_dir_path", ",", "csv_path", "=", "None", ",", "csv_usage", "=", "None", ")", ":", "if", "csv_path", ":", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "csv_path", ")", "as", "csv_f", ":...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
FileAdapterBuilder._slice_split_info_to_instruction_dicts
Return the list of files and reading mask of the files to read.
tensorflow_datasets/core/dataset_builder.py
def _slice_split_info_to_instruction_dicts(self, list_sliced_split_info): """Return the list of files and reading mask of the files to read.""" instruction_dicts = [] for sliced_split_info in list_sliced_split_info: mask = splits_lib.slice_to_percent_mask(sliced_split_info.slice_value) # Compute filenames from the given split filepaths = list(sorted(self._build_split_filenames( split_info_list=[sliced_split_info.split_info], ))) # Compute the offsets if sliced_split_info.split_info.num_examples: shard_id2num_examples = splits_lib.get_shard_id2num_examples( sliced_split_info.split_info.num_shards, sliced_split_info.split_info.num_examples, ) mask_offsets = splits_lib.compute_mask_offsets(shard_id2num_examples) else: logging.warning( "Statistics not present in the dataset. TFDS is not able to load " "the total number of examples, so using the subsplit API may not " "provide precise subsplits." ) mask_offsets = [0] * len(filepaths) for filepath, mask_offset in zip(filepaths, mask_offsets): instruction_dicts.append({ "filepath": filepath, "mask": mask, "mask_offset": mask_offset, }) return instruction_dicts
def _slice_split_info_to_instruction_dicts(self, list_sliced_split_info): """Return the list of files and reading mask of the files to read.""" instruction_dicts = [] for sliced_split_info in list_sliced_split_info: mask = splits_lib.slice_to_percent_mask(sliced_split_info.slice_value) # Compute filenames from the given split filepaths = list(sorted(self._build_split_filenames( split_info_list=[sliced_split_info.split_info], ))) # Compute the offsets if sliced_split_info.split_info.num_examples: shard_id2num_examples = splits_lib.get_shard_id2num_examples( sliced_split_info.split_info.num_shards, sliced_split_info.split_info.num_examples, ) mask_offsets = splits_lib.compute_mask_offsets(shard_id2num_examples) else: logging.warning( "Statistics not present in the dataset. TFDS is not able to load " "the total number of examples, so using the subsplit API may not " "provide precise subsplits." ) mask_offsets = [0] * len(filepaths) for filepath, mask_offset in zip(filepaths, mask_offsets): instruction_dicts.append({ "filepath": filepath, "mask": mask, "mask_offset": mask_offset, }) return instruction_dicts
[ "Return", "the", "list", "of", "files", "and", "reading", "mask", "of", "the", "files", "to", "read", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_builder.py#L707-L739
[ "def", "_slice_split_info_to_instruction_dicts", "(", "self", ",", "list_sliced_split_info", ")", ":", "instruction_dicts", "=", "[", "]", "for", "sliced_split_info", "in", "list_sliced_split_info", ":", "mask", "=", "splits_lib", ".", "slice_to_percent_mask", "(", "sli...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
FileAdapterBuilder._build_split_filenames
Construct the split filenames associated with the split info. The filenames correspond to the pre-processed datasets files present in the root directory of the dataset. Args: split_info_list: (list[SplitInfo]) List of split from which generate the filenames Returns: filenames: (list[str]) The list of filenames path corresponding to the split info object
tensorflow_datasets/core/dataset_builder.py
def _build_split_filenames(self, split_info_list): """Construct the split filenames associated with the split info. The filenames correspond to the pre-processed datasets files present in the root directory of the dataset. Args: split_info_list: (list[SplitInfo]) List of split from which generate the filenames Returns: filenames: (list[str]) The list of filenames path corresponding to the split info object """ filenames = [] for split_info in split_info_list: filenames.extend(naming.filepaths_for_dataset_split( dataset_name=self.name, split=split_info.name, num_shards=split_info.num_shards, data_dir=self._data_dir, filetype_suffix=self._file_format_adapter.filetype_suffix, )) return filenames
def _build_split_filenames(self, split_info_list): """Construct the split filenames associated with the split info. The filenames correspond to the pre-processed datasets files present in the root directory of the dataset. Args: split_info_list: (list[SplitInfo]) List of split from which generate the filenames Returns: filenames: (list[str]) The list of filenames path corresponding to the split info object """ filenames = [] for split_info in split_info_list: filenames.extend(naming.filepaths_for_dataset_split( dataset_name=self.name, split=split_info.name, num_shards=split_info.num_shards, data_dir=self._data_dir, filetype_suffix=self._file_format_adapter.filetype_suffix, )) return filenames
[ "Construct", "the", "split", "filenames", "associated", "with", "the", "split", "info", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_builder.py#L741-L765
[ "def", "_build_split_filenames", "(", "self", ",", "split_info_list", ")", ":", "filenames", "=", "[", "]", "for", "split_info", "in", "split_info_list", ":", "filenames", ".", "extend", "(", "naming", ".", "filepaths_for_dataset_split", "(", "dataset_name", "=", ...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
MovingMnist._generate_examples
Generate MovingMnist sequences. Args: data_path (str): Path to the data file Yields: 20 x 64 x 64 x 1 uint8 numpy arrays
tensorflow_datasets/video/moving_mnist.py
def _generate_examples(self, data_path): """Generate MovingMnist sequences. Args: data_path (str): Path to the data file Yields: 20 x 64 x 64 x 1 uint8 numpy arrays """ with tf.io.gfile.GFile(data_path, "rb") as fp: images = np.load(fp) images = np.transpose(images, (1, 0, 2, 3)) images = np.expand_dims(images, axis=-1) for sequence in images: yield dict(image_sequence=sequence)
def _generate_examples(self, data_path): """Generate MovingMnist sequences. Args: data_path (str): Path to the data file Yields: 20 x 64 x 64 x 1 uint8 numpy arrays """ with tf.io.gfile.GFile(data_path, "rb") as fp: images = np.load(fp) images = np.transpose(images, (1, 0, 2, 3)) images = np.expand_dims(images, axis=-1) for sequence in images: yield dict(image_sequence=sequence)
[ "Generate", "MovingMnist", "sequences", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/video/moving_mnist.py#L85-L99
[ "def", "_generate_examples", "(", "self", ",", "data_path", ")", ":", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "data_path", ",", "\"rb\"", ")", "as", "fp", ":", "images", "=", "np", ".", "load", "(", "fp", ")", "images", "=", "np",...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
StarcraftVideo._parse_single_video
Parses single video from the input tfrecords. Args: example_proto: tfExample proto with a single video. Returns: dict with all frames, positions and actions.
tensorflow_datasets/video/starcraft.py
def _parse_single_video(self, example_proto): """Parses single video from the input tfrecords. Args: example_proto: tfExample proto with a single video. Returns: dict with all frames, positions and actions. """ context_features = { "game_duration_loops": tf.io.FixedLenFeature([1], tf.int64), "game_duration_seconds": tf.io.FixedLenFeature([1], tf.float32), "n_steps": tf.io.FixedLenFeature([1], tf.int64), "screen_size": tf.io.FixedLenFeature([2], tf.int64), } sequence_features = { "rgb_screen": tf.io.FixedLenSequenceFeature([], tf.string), } _, seq_feat = tf.io.parse_single_sequence_example( example_proto, context_features=context_features, sequence_features=sequence_features) video_frames = tf.map_fn( tf.image.decode_png, seq_feat["rgb_screen"], dtype=tf.uint8) return video_frames
def _parse_single_video(self, example_proto): """Parses single video from the input tfrecords. Args: example_proto: tfExample proto with a single video. Returns: dict with all frames, positions and actions. """ context_features = { "game_duration_loops": tf.io.FixedLenFeature([1], tf.int64), "game_duration_seconds": tf.io.FixedLenFeature([1], tf.float32), "n_steps": tf.io.FixedLenFeature([1], tf.int64), "screen_size": tf.io.FixedLenFeature([2], tf.int64), } sequence_features = { "rgb_screen": tf.io.FixedLenSequenceFeature([], tf.string), } _, seq_feat = tf.io.parse_single_sequence_example( example_proto, context_features=context_features, sequence_features=sequence_features) video_frames = tf.map_fn( tf.image.decode_png, seq_feat["rgb_screen"], dtype=tf.uint8) return video_frames
[ "Parses", "single", "video", "from", "the", "input", "tfrecords", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/video/starcraft.py#L181-L208
[ "def", "_parse_single_video", "(", "self", ",", "example_proto", ")", ":", "context_features", "=", "{", "\"game_duration_loops\"", ":", "tf", ".", "io", ".", "FixedLenFeature", "(", "[", "1", "]", ",", "tf", ".", "int64", ")", ",", "\"game_duration_seconds\""...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
Dsprites._generate_examples
Generates examples for the dSprites data set. Args: filepath: path to the dSprites hdf5 file. Yields: Dictionaries with images, latent classes, and latent values.
tensorflow_datasets/image/dsprites.py
def _generate_examples(self, filepath): """Generates examples for the dSprites data set. Args: filepath: path to the dSprites hdf5 file. Yields: Dictionaries with images, latent classes, and latent values. """ # Simultaneously iterating through the different data sets in the hdf5 # file is >100x slower and the data set is small (26.7MB). Hence, we first # load everything into memory before yielding the samples. image_array, class_array, values_array = _load_data(filepath) for image, classes, values in moves.zip(image_array, class_array, values_array): yield dict( image=np.expand_dims(image, -1), label_shape=classes[1], label_scale=classes[2], label_orientation=classes[3], label_x_position=classes[4], label_y_position=classes[5], value_shape=values[1], value_scale=values[2], value_orientation=values[3], value_x_position=values[4], value_y_position=values[5])
def _generate_examples(self, filepath): """Generates examples for the dSprites data set. Args: filepath: path to the dSprites hdf5 file. Yields: Dictionaries with images, latent classes, and latent values. """ # Simultaneously iterating through the different data sets in the hdf5 # file is >100x slower and the data set is small (26.7MB). Hence, we first # load everything into memory before yielding the samples. image_array, class_array, values_array = _load_data(filepath) for image, classes, values in moves.zip(image_array, class_array, values_array): yield dict( image=np.expand_dims(image, -1), label_shape=classes[1], label_scale=classes[2], label_orientation=classes[3], label_x_position=classes[4], label_y_position=classes[5], value_shape=values[1], value_scale=values[2], value_orientation=values[3], value_x_position=values[4], value_y_position=values[5])
[ "Generates", "examples", "for", "the", "dSprites", "data", "set", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/dsprites.py#L117-L143
[ "def", "_generate_examples", "(", "self", ",", "filepath", ")", ":", "# Simultaneously iterating through the different data sets in the hdf5", "# file is >100x slower and the data set is small (26.7MB). Hence, we first", "# load everything into memory before yielding the samples.", "image_arra...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
OxfordIIITPet._split_generators
Returns splits.
tensorflow_datasets/image/oxford_iiit_pet.py
def _split_generators(self, dl_manager): """Returns splits.""" # Download images and annotations that come in separate archives. # Note, that the extension of archives is .tar.gz even though the actual # archives format is uncompressed tar. dl_paths = dl_manager.download_and_extract({ "images": tfds.download.Resource( url=os.path.join(_BASE_URL, "images.tar.gz"), extract_method=tfds.download.ExtractMethod.TAR), "annotations": tfds.download.Resource( url=os.path.join(_BASE_URL, "annotations.tar.gz"), extract_method=tfds.download.ExtractMethod.TAR) }) images_path_dir = os.path.join(dl_paths["images"], "images") annotations_path_dir = os.path.join(dl_paths["annotations"], "annotations") # Setup train and test splits train_split = tfds.core.SplitGenerator( name="train", num_shards=_NUM_SHARDS, gen_kwargs={ "images_dir_path": images_path_dir, "images_list_file": os.path.join(annotations_path_dir, "trainval.txt"), }, ) test_split = tfds.core.SplitGenerator( name="test", num_shards=_NUM_SHARDS, gen_kwargs={ "images_dir_path": images_path_dir, "images_list_file": os.path.join(annotations_path_dir, "test.txt") }, ) return [train_split, test_split]
def _split_generators(self, dl_manager): """Returns splits.""" # Download images and annotations that come in separate archives. # Note, that the extension of archives is .tar.gz even though the actual # archives format is uncompressed tar. dl_paths = dl_manager.download_and_extract({ "images": tfds.download.Resource( url=os.path.join(_BASE_URL, "images.tar.gz"), extract_method=tfds.download.ExtractMethod.TAR), "annotations": tfds.download.Resource( url=os.path.join(_BASE_URL, "annotations.tar.gz"), extract_method=tfds.download.ExtractMethod.TAR) }) images_path_dir = os.path.join(dl_paths["images"], "images") annotations_path_dir = os.path.join(dl_paths["annotations"], "annotations") # Setup train and test splits train_split = tfds.core.SplitGenerator( name="train", num_shards=_NUM_SHARDS, gen_kwargs={ "images_dir_path": images_path_dir, "images_list_file": os.path.join(annotations_path_dir, "trainval.txt"), }, ) test_split = tfds.core.SplitGenerator( name="test", num_shards=_NUM_SHARDS, gen_kwargs={ "images_dir_path": images_path_dir, "images_list_file": os.path.join(annotations_path_dir, "test.txt") }, ) return [train_split, test_split]
[ "Returns", "splits", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/oxford_iiit_pet.py#L65-L102
[ "def", "_split_generators", "(", "self", ",", "dl_manager", ")", ":", "# Download images and annotations that come in separate archives.", "# Note, that the extension of archives is .tar.gz even though the actual", "# archives format is uncompressed tar.", "dl_paths", "=", "dl_manager", ...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_load_objects
Returns objects listed within given CSV files.
tensorflow_datasets/image/open_images.py
def _load_objects(csv_paths, csv_positions, prefix): """Returns objects listed within given CSV files.""" logging.info('Loading CSVs %s from positions %s with prefix %s', csv_paths, csv_positions, prefix) objects = collections.defaultdict(list) for i, labels_path in enumerate(csv_paths): with tf.io.gfile.GFile(labels_path) as csv_f: if csv_positions[i] > 0: csv_f.seek(csv_positions[i]) else: csv_f.readline() # Drop headers reader = csv.reader(csv_f) for image_id, source, label, confidence in reader: if prefix and image_id[0] != prefix: break csv_positions[i] = csv_f.tell() image_id = int(image_id, 16) current_obj = _Object(label, int(float(confidence) * 10), source) objects[image_id].append(current_obj) return dict(objects)
def _load_objects(csv_paths, csv_positions, prefix): """Returns objects listed within given CSV files.""" logging.info('Loading CSVs %s from positions %s with prefix %s', csv_paths, csv_positions, prefix) objects = collections.defaultdict(list) for i, labels_path in enumerate(csv_paths): with tf.io.gfile.GFile(labels_path) as csv_f: if csv_positions[i] > 0: csv_f.seek(csv_positions[i]) else: csv_f.readline() # Drop headers reader = csv.reader(csv_f) for image_id, source, label, confidence in reader: if prefix and image_id[0] != prefix: break csv_positions[i] = csv_f.tell() image_id = int(image_id, 16) current_obj = _Object(label, int(float(confidence) * 10), source) objects[image_id].append(current_obj) return dict(objects)
[ "Returns", "objects", "listed", "within", "given", "CSV", "files", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/open_images.py#L322-L341
[ "def", "_load_objects", "(", "csv_paths", ",", "csv_positions", ",", "prefix", ")", ":", "logging", ".", "info", "(", "'Loading CSVs %s from positions %s with prefix %s'", ",", "csv_paths", ",", "csv_positions", ",", "prefix", ")", "objects", "=", "collections", "."...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_load_bboxes
Returns bounded boxes listed within given CSV file.
tensorflow_datasets/image/open_images.py
def _load_bboxes(csv_path, csv_positions, prefix): """Returns bounded boxes listed within given CSV file.""" logging.info('Loading CSVs %s from positions %s with prefix %s', csv_path, csv_positions, prefix) boxes = collections.defaultdict(list) with tf.io.gfile.GFile(csv_path) as csv_f: if csv_positions[0] > 0: csv_f.seek(csv_positions[0]) else: csv_f.readline() # Drop headers reader = csv.reader(csv_f) for (image_id, source, label, confidence, xmin, xmax, ymin, ymax, is_occluded, is_truncated, is_group_of, is_depiction, is_inside, ) in reader: if prefix and image_id[0] != prefix: break csv_positions[0] = csv_f.tell() image_id = int(image_id, 16) del confidence # always 1 in bounding boxes. current_row = _Bbox( label, source, tfds.features.BBox( float(ymin), float(xmin), float(ymax), float(xmax)), int(is_occluded), int(is_truncated), int(is_group_of), int(is_depiction), int(is_inside)) boxes[image_id].append(current_row) return dict(boxes)
def _load_bboxes(csv_path, csv_positions, prefix): """Returns bounded boxes listed within given CSV file.""" logging.info('Loading CSVs %s from positions %s with prefix %s', csv_path, csv_positions, prefix) boxes = collections.defaultdict(list) with tf.io.gfile.GFile(csv_path) as csv_f: if csv_positions[0] > 0: csv_f.seek(csv_positions[0]) else: csv_f.readline() # Drop headers reader = csv.reader(csv_f) for (image_id, source, label, confidence, xmin, xmax, ymin, ymax, is_occluded, is_truncated, is_group_of, is_depiction, is_inside, ) in reader: if prefix and image_id[0] != prefix: break csv_positions[0] = csv_f.tell() image_id = int(image_id, 16) del confidence # always 1 in bounding boxes. current_row = _Bbox( label, source, tfds.features.BBox( float(ymin), float(xmin), float(ymax), float(xmax)), int(is_occluded), int(is_truncated), int(is_group_of), int(is_depiction), int(is_inside)) boxes[image_id].append(current_row) return dict(boxes)
[ "Returns", "bounded", "boxes", "listed", "within", "given", "CSV", "file", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/open_images.py#L344-L369
[ "def", "_load_bboxes", "(", "csv_path", ",", "csv_positions", ",", "prefix", ")", ":", "logging", ".", "info", "(", "'Loading CSVs %s from positions %s with prefix %s'", ",", "csv_path", ",", "csv_positions", ",", "prefix", ")", "boxes", "=", "collections", ".", "...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
OpenImagesV4._split_generators
Returns SplitGenerators.
tensorflow_datasets/image/open_images.py
def _split_generators(self, dl_manager): """Returns SplitGenerators.""" paths = dl_manager.download_and_extract(_URLS) # Load labels from CSVs: def load(names): csv_positions = [0] * len(names) return functools.partial(_load_objects, [paths[name] for name in names], csv_positions) train_objects = load(['train_human_labels', 'train_machine_labels']) test_objects = load(['test_human_labels', 'test_machine_labels']) validation_objects = load(['validation_human_labels', 'validation_machine_labels']) def load_boxes(name): csv_positions = [0] return functools.partial(_load_bboxes, paths[name], csv_positions) train_bbox = load_boxes('train-annotations-bbox') test_bbox = load_boxes('test-annotations-bbox') validation_bbox = load_boxes('validation-annotations-bbox') return [ tfds.core.SplitGenerator( name=tfds.Split.TRAIN, num_shards=512, gen_kwargs=dict(archive_paths=paths['train_images'], objects_getter=train_objects, bboxes_getter=train_bbox, prefixes='0123456789abcdef'), ), tfds.core.SplitGenerator( name=tfds.Split.TEST, num_shards=36, gen_kwargs=dict(archive_paths=[paths['test_images']], objects_getter=test_objects, bboxes_getter=test_bbox), ), tfds.core.SplitGenerator( name=tfds.Split.VALIDATION, num_shards=12, gen_kwargs=dict(archive_paths=[paths['validation_images']], objects_getter=validation_objects, bboxes_getter=validation_bbox), ), ]
def _split_generators(self, dl_manager): """Returns SplitGenerators.""" paths = dl_manager.download_and_extract(_URLS) # Load labels from CSVs: def load(names): csv_positions = [0] * len(names) return functools.partial(_load_objects, [paths[name] for name in names], csv_positions) train_objects = load(['train_human_labels', 'train_machine_labels']) test_objects = load(['test_human_labels', 'test_machine_labels']) validation_objects = load(['validation_human_labels', 'validation_machine_labels']) def load_boxes(name): csv_positions = [0] return functools.partial(_load_bboxes, paths[name], csv_positions) train_bbox = load_boxes('train-annotations-bbox') test_bbox = load_boxes('test-annotations-bbox') validation_bbox = load_boxes('validation-annotations-bbox') return [ tfds.core.SplitGenerator( name=tfds.Split.TRAIN, num_shards=512, gen_kwargs=dict(archive_paths=paths['train_images'], objects_getter=train_objects, bboxes_getter=train_bbox, prefixes='0123456789abcdef'), ), tfds.core.SplitGenerator( name=tfds.Split.TEST, num_shards=36, gen_kwargs=dict(archive_paths=[paths['test_images']], objects_getter=test_objects, bboxes_getter=test_bbox), ), tfds.core.SplitGenerator( name=tfds.Split.VALIDATION, num_shards=12, gen_kwargs=dict(archive_paths=[paths['validation_images']], objects_getter=validation_objects, bboxes_getter=validation_bbox), ), ]
[ "Returns", "SplitGenerators", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/open_images.py#L221-L262
[ "def", "_split_generators", "(", "self", ",", "dl_manager", ")", ":", "paths", "=", "dl_manager", ".", "download_and_extract", "(", "_URLS", ")", "# Load labels from CSVs:", "def", "load", "(", "names", ")", ":", "csv_positions", "=", "[", "0", "]", "*", "le...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
OpenImagesV4._generate_examples
Yields examples.
tensorflow_datasets/image/open_images.py
def _generate_examples(self, archive_paths, objects_getter, bboxes_getter, prefixes=None): """Yields examples.""" trainable_classes = set( self.info.features['objects_trainable']['label'].names) for i, archive_path in enumerate(archive_paths): prefix = prefixes[i] if prefixes else None objects = objects_getter(prefix) bboxes = bboxes_getter(prefix) logging.info('Opening archive %s ...', archive_path) archive = tfds.download.iter_archive( archive_path, tfds.download.ExtractMethod.TAR_STREAM) for fpath, fobj in archive: fname = os.path.basename(fpath) image_id = int(os.path.splitext(fname)[0], 16) image_objects = [obj._asdict() for obj in objects.get(image_id, [])] image_bboxes = [bbox._asdict() for bbox in bboxes.get(image_id, [])] image_objects_trainable = [ obj for obj in image_objects if obj['label'] in trainable_classes ] yield { 'image': _resize_image_if_necessary( fobj, target_pixels=self.builder_config.target_pixels), 'image/filename': fname, 'objects': image_objects, 'objects_trainable': image_objects_trainable, 'bobjects': image_bboxes, }
def _generate_examples(self, archive_paths, objects_getter, bboxes_getter, prefixes=None): """Yields examples.""" trainable_classes = set( self.info.features['objects_trainable']['label'].names) for i, archive_path in enumerate(archive_paths): prefix = prefixes[i] if prefixes else None objects = objects_getter(prefix) bboxes = bboxes_getter(prefix) logging.info('Opening archive %s ...', archive_path) archive = tfds.download.iter_archive( archive_path, tfds.download.ExtractMethod.TAR_STREAM) for fpath, fobj in archive: fname = os.path.basename(fpath) image_id = int(os.path.splitext(fname)[0], 16) image_objects = [obj._asdict() for obj in objects.get(image_id, [])] image_bboxes = [bbox._asdict() for bbox in bboxes.get(image_id, [])] image_objects_trainable = [ obj for obj in image_objects if obj['label'] in trainable_classes ] yield { 'image': _resize_image_if_necessary( fobj, target_pixels=self.builder_config.target_pixels), 'image/filename': fname, 'objects': image_objects, 'objects_trainable': image_objects_trainable, 'bobjects': image_bboxes, }
[ "Yields", "examples", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/open_images.py#L264-L291
[ "def", "_generate_examples", "(", "self", ",", "archive_paths", ",", "objects_getter", ",", "bboxes_getter", ",", "prefixes", "=", "None", ")", ":", "trainable_classes", "=", "set", "(", "self", ".", "info", ".", "features", "[", "'objects_trainable'", "]", "[...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
IMDBReviews._generate_examples
Generate IMDB examples.
tensorflow_datasets/text/imdb.py
def _generate_examples(self, archive, directory): """Generate IMDB examples.""" reg = re.compile(os.path.join("^%s" % directory, "(?P<label>neg|pos)", "")) for path, imdb_f in archive: res = reg.match(path) if not res: continue text = imdb_f.read().strip() yield { "text": text, "label": res.groupdict()["label"], }
def _generate_examples(self, archive, directory): """Generate IMDB examples.""" reg = re.compile(os.path.join("^%s" % directory, "(?P<label>neg|pos)", "")) for path, imdb_f in archive: res = reg.match(path) if not res: continue text = imdb_f.read().strip() yield { "text": text, "label": res.groupdict()["label"], }
[ "Generate", "IMDB", "examples", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/imdb.py#L146-L157
[ "def", "_generate_examples", "(", "self", ",", "archive", ",", "directory", ")", ":", "reg", "=", "re", ".", "compile", "(", "os", ".", "path", ".", "join", "(", "\"^%s\"", "%", "directory", ",", "\"(?P<label>neg|pos)\"", ",", "\"\"", ")", ")", "for", ...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_get_url_hashes
Get hashes of urls in file.
tensorflow_datasets/text/cnn_dailymail.py
def _get_url_hashes(path): """Get hashes of urls in file.""" urls = _read_text_file(path) def url_hash(u): h = hashlib.sha1() try: u = u.encode('utf-8') except UnicodeDecodeError: logging.error('Cannot hash url: %s', u) h.update(u) return h.hexdigest() return {url_hash(u): True for u in urls}
def _get_url_hashes(path): """Get hashes of urls in file.""" urls = _read_text_file(path) def url_hash(u): h = hashlib.sha1() try: u = u.encode('utf-8') except UnicodeDecodeError: logging.error('Cannot hash url: %s', u) h.update(u) return h.hexdigest() return {url_hash(u): True for u in urls}
[ "Get", "hashes", "of", "urls", "in", "file", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/cnn_dailymail.py#L97-L108
[ "def", "_get_url_hashes", "(", "path", ")", ":", "urls", "=", "_read_text_file", "(", "path", ")", "def", "url_hash", "(", "u", ")", ":", "h", "=", "hashlib", ".", "sha1", "(", ")", "try", ":", "u", "=", "u", ".", "encode", "(", "'utf-8'", ")", "...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_find_files
Find files corresponding to urls.
tensorflow_datasets/text/cnn_dailymail.py
def _find_files(dl_paths, publisher, url_dict): """Find files corresponding to urls.""" if publisher == 'cnn': top_dir = os.path.join(dl_paths['cnn_stories'], 'cnn', 'stories') elif publisher == 'dm': top_dir = os.path.join(dl_paths['dm_stories'], 'dailymail', 'stories') else: logging.fatal('Unsupported publisher: %s', publisher) files = tf.io.gfile.listdir(top_dir) ret_files = [] for p in files: basename = os.path.basename(p) if basename[0:basename.find('.story')] in url_dict: ret_files.append(os.path.join(top_dir, p)) return ret_files
def _find_files(dl_paths, publisher, url_dict): """Find files corresponding to urls.""" if publisher == 'cnn': top_dir = os.path.join(dl_paths['cnn_stories'], 'cnn', 'stories') elif publisher == 'dm': top_dir = os.path.join(dl_paths['dm_stories'], 'dailymail', 'stories') else: logging.fatal('Unsupported publisher: %s', publisher) files = tf.io.gfile.listdir(top_dir) ret_files = [] for p in files: basename = os.path.basename(p) if basename[0:basename.find('.story')] in url_dict: ret_files.append(os.path.join(top_dir, p)) return ret_files
[ "Find", "files", "corresponding", "to", "urls", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/cnn_dailymail.py#L111-L126
[ "def", "_find_files", "(", "dl_paths", ",", "publisher", ",", "url_dict", ")", ":", "if", "publisher", "==", "'cnn'", ":", "top_dir", "=", "os", ".", "path", ".", "join", "(", "dl_paths", "[", "'cnn_stories'", "]", ",", "'cnn'", ",", "'stories'", ")", ...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_subset_filenames
Get filenames for a particular split.
tensorflow_datasets/text/cnn_dailymail.py
def _subset_filenames(dl_paths, split): """Get filenames for a particular split.""" assert isinstance(dl_paths, dict), dl_paths # Get filenames for a split. if split == tfds.Split.TRAIN: urls = _get_url_hashes(dl_paths['train_urls']) elif split == tfds.Split.VALIDATION: urls = _get_url_hashes(dl_paths['val_urls']) elif split == tfds.Split.TEST: urls = _get_url_hashes(dl_paths['test_urls']) else: logging.fatal('Unsupported split: %s', split) cnn = _find_files(dl_paths, 'cnn', urls) dm = _find_files(dl_paths, 'dm', urls) return cnn + dm
def _subset_filenames(dl_paths, split): """Get filenames for a particular split.""" assert isinstance(dl_paths, dict), dl_paths # Get filenames for a split. if split == tfds.Split.TRAIN: urls = _get_url_hashes(dl_paths['train_urls']) elif split == tfds.Split.VALIDATION: urls = _get_url_hashes(dl_paths['val_urls']) elif split == tfds.Split.TEST: urls = _get_url_hashes(dl_paths['test_urls']) else: logging.fatal('Unsupported split: %s', split) cnn = _find_files(dl_paths, 'cnn', urls) dm = _find_files(dl_paths, 'dm', urls) return cnn + dm
[ "Get", "filenames", "for", "a", "particular", "split", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/cnn_dailymail.py#L129-L143
[ "def", "_subset_filenames", "(", "dl_paths", ",", "split", ")", ":", "assert", "isinstance", "(", "dl_paths", ",", "dict", ")", ",", "dl_paths", "# Get filenames for a split.", "if", "split", "==", "tfds", ".", "Split", ".", "TRAIN", ":", "urls", "=", "_get_...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
_get_art_abs
Get abstract (highlights) and article from a story file path.
tensorflow_datasets/text/cnn_dailymail.py
def _get_art_abs(story_file): """Get abstract (highlights) and article from a story file path.""" # Based on https://github.com/abisee/cnn-dailymail/blob/master/ # make_datafiles.py lines = _read_text_file(story_file) # Lowercase everything lines = [line.lower() for line in lines] # Put periods on the ends of lines that are missing them # (this is a problem in the dataset because many image captions don't end in # periods; consequently they end up in the body of the article as run-on # sentences) def fix_missing_period(line): """Adds a period to a line that is missing a period.""" if '@highlight' in line: return line if not line: return line if line[-1] in END_TOKENS: return line return line + ' .' lines = [fix_missing_period(line) for line in lines] # Separate out article and abstract sentences article_lines = [] highlights = [] next_is_highlight = False for line in lines: if not line: continue # empty line elif line.startswith('@highlight'): next_is_highlight = True elif next_is_highlight: highlights.append(line) else: article_lines.append(line) # Make article into a single string article = ' '.join(article_lines) # Make abstract into a single string, putting <s> and </s> tags around # the sentences. abstract = ' '.join(['%s %s %s' % (SENTENCE_START, sent, SENTENCE_END) for sent in highlights]) return article, abstract
def _get_art_abs(story_file): """Get abstract (highlights) and article from a story file path.""" # Based on https://github.com/abisee/cnn-dailymail/blob/master/ # make_datafiles.py lines = _read_text_file(story_file) # Lowercase everything lines = [line.lower() for line in lines] # Put periods on the ends of lines that are missing them # (this is a problem in the dataset because many image captions don't end in # periods; consequently they end up in the body of the article as run-on # sentences) def fix_missing_period(line): """Adds a period to a line that is missing a period.""" if '@highlight' in line: return line if not line: return line if line[-1] in END_TOKENS: return line return line + ' .' lines = [fix_missing_period(line) for line in lines] # Separate out article and abstract sentences article_lines = [] highlights = [] next_is_highlight = False for line in lines: if not line: continue # empty line elif line.startswith('@highlight'): next_is_highlight = True elif next_is_highlight: highlights.append(line) else: article_lines.append(line) # Make article into a single string article = ' '.join(article_lines) # Make abstract into a single string, putting <s> and </s> tags around # the sentences. abstract = ' '.join(['%s %s %s' % (SENTENCE_START, sent, SENTENCE_END) for sent in highlights]) return article, abstract
[ "Get", "abstract", "(", "highlights", ")", "and", "article", "from", "a", "story", "file", "path", "." ]
tensorflow/datasets
python
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/cnn_dailymail.py#L163-L207
[ "def", "_get_art_abs", "(", "story_file", ")", ":", "# Based on https://github.com/abisee/cnn-dailymail/blob/master/", "# make_datafiles.py", "lines", "=", "_read_text_file", "(", "story_file", ")", "# Lowercase everything", "lines", "=", "[", "line", ".", "lower", "(",...
46ceb0cf7b4690f38ecbbc689e4d659a903d08dc
train
exporter
Export the results.
plugins/exporter.py
def exporter(directory, method, datasets): """Export the results.""" if method.lower() == 'json': # Convert json_dict to a JSON styled string json_string = json.dumps(datasets, indent=4) savefile = open('{}/exported.json'.format(directory), 'w+') savefile.write(json_string) savefile.close() if method.lower() == 'csv': with open('{}/exported.csv'.format(directory), 'w+') as csvfile: csv_writer = csv.writer( csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL) for key, values in datasets.items(): if values is None: csv_writer.writerow([key]) else: csv_writer.writerow([key] + values) csvfile.close()
def exporter(directory, method, datasets): """Export the results.""" if method.lower() == 'json': # Convert json_dict to a JSON styled string json_string = json.dumps(datasets, indent=4) savefile = open('{}/exported.json'.format(directory), 'w+') savefile.write(json_string) savefile.close() if method.lower() == 'csv': with open('{}/exported.csv'.format(directory), 'w+') as csvfile: csv_writer = csv.writer( csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL) for key, values in datasets.items(): if values is None: csv_writer.writerow([key]) else: csv_writer.writerow([key] + values) csvfile.close()
[ "Export", "the", "results", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/plugins/exporter.py#L6-L24
[ "def", "exporter", "(", "directory", ",", "method", ",", "datasets", ")", ":", "if", "method", ".", "lower", "(", ")", "==", "'json'", ":", "# Convert json_dict to a JSON styled string", "json_string", "=", "json", ".", "dumps", "(", "datasets", ",", "indent",...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
time_machine
Query archive.org.
plugins/wayback.py
def time_machine(host, mode): """Query archive.org.""" now = datetime.datetime.now() to = str(now.year) + str(now.day) + str(now.month) if now.month > 6: fro = str(now.year) + str(now.day) + str(now.month - 6) else: fro = str(now.year - 1) + str(now.day) + str(now.month + 6) url = "http://web.archive.org/cdx/search?url=%s&matchType=%s&collapse=urlkey&fl=original&filter=mimetype:text/html&filter=statuscode:200&output=json&from=%s&to=%s" % (host, mode, fro, to) response = get(url).text parsed = json.loads(response)[1:] urls = [] for item in parsed: urls.append(item[0]) return urls
def time_machine(host, mode): """Query archive.org.""" now = datetime.datetime.now() to = str(now.year) + str(now.day) + str(now.month) if now.month > 6: fro = str(now.year) + str(now.day) + str(now.month - 6) else: fro = str(now.year - 1) + str(now.day) + str(now.month + 6) url = "http://web.archive.org/cdx/search?url=%s&matchType=%s&collapse=urlkey&fl=original&filter=mimetype:text/html&filter=statuscode:200&output=json&from=%s&to=%s" % (host, mode, fro, to) response = get(url).text parsed = json.loads(response)[1:] urls = [] for item in parsed: urls.append(item[0]) return urls
[ "Query", "archive", ".", "org", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/plugins/wayback.py#L8-L22
[ "def", "time_machine", "(", "host", ",", "mode", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "to", "=", "str", "(", "now", ".", "year", ")", "+", "str", "(", "now", ".", "day", ")", "+", "str", "(", "now", ".", "m...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
zap
Extract links from robots.txt and sitemap.xml.
core/zap.py
def zap(input_url, archive, domain, host, internal, robots, proxies): """Extract links from robots.txt and sitemap.xml.""" if archive: print('%s Fetching URLs from archive.org' % run) if False: archived_urls = time_machine(domain, 'domain') else: archived_urls = time_machine(host, 'host') print('%s Retrieved %i URLs from archive.org' % ( good, len(archived_urls) - 1)) for url in archived_urls: verb('Internal page', url) internal.add(url) # Makes request to robots.txt response = requests.get(input_url + '/robots.txt', proxies=random.choice(proxies)).text # Making sure robots.txt isn't some fancy 404 page if '<body' not in response: # If you know it, you know it matches = re.findall(r'Allow: (.*)|Disallow: (.*)', response) if matches: # Iterating over the matches, match is a tuple here for match in matches: # One item in match will always be empty so will combine both # items match = ''.join(match) # If the URL doesn't use a wildcard if '*' not in match: url = input_url + match # Add the URL to internal list for crawling internal.add(url) # Add the URL to robots list robots.add(url) print('%s URLs retrieved from robots.txt: %s' % (good, len(robots))) # Makes request to sitemap.xml response = requests.get(input_url + '/sitemap.xml', proxies=random.choice(proxies)).text # Making sure robots.txt isn't some fancy 404 page if '<body' not in response: matches = xml_parser(response) if matches: # if there are any matches print('%s URLs retrieved from sitemap.xml: %s' % ( good, len(matches))) for match in matches: verb('Internal page', match) # Cleaning up the URL and adding it to the internal list for # crawling internal.add(match)
def zap(input_url, archive, domain, host, internal, robots, proxies): """Extract links from robots.txt and sitemap.xml.""" if archive: print('%s Fetching URLs from archive.org' % run) if False: archived_urls = time_machine(domain, 'domain') else: archived_urls = time_machine(host, 'host') print('%s Retrieved %i URLs from archive.org' % ( good, len(archived_urls) - 1)) for url in archived_urls: verb('Internal page', url) internal.add(url) # Makes request to robots.txt response = requests.get(input_url + '/robots.txt', proxies=random.choice(proxies)).text # Making sure robots.txt isn't some fancy 404 page if '<body' not in response: # If you know it, you know it matches = re.findall(r'Allow: (.*)|Disallow: (.*)', response) if matches: # Iterating over the matches, match is a tuple here for match in matches: # One item in match will always be empty so will combine both # items match = ''.join(match) # If the URL doesn't use a wildcard if '*' not in match: url = input_url + match # Add the URL to internal list for crawling internal.add(url) # Add the URL to robots list robots.add(url) print('%s URLs retrieved from robots.txt: %s' % (good, len(robots))) # Makes request to sitemap.xml response = requests.get(input_url + '/sitemap.xml', proxies=random.choice(proxies)).text # Making sure robots.txt isn't some fancy 404 page if '<body' not in response: matches = xml_parser(response) if matches: # if there are any matches print('%s URLs retrieved from sitemap.xml: %s' % ( good, len(matches))) for match in matches: verb('Internal page', match) # Cleaning up the URL and adding it to the internal list for # crawling internal.add(match)
[ "Extract", "links", "from", "robots", ".", "txt", "and", "sitemap", ".", "xml", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/zap.py#L10-L57
[ "def", "zap", "(", "input_url", ",", "archive", ",", "domain", ",", "host", ",", "internal", ",", "robots", ",", "proxies", ")", ":", "if", "archive", ":", "print", "(", "'%s Fetching URLs from archive.org'", "%", "run", ")", "if", "False", ":", "archived_...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
requester
Handle the requests and return the response body.
core/requester.py
def requester( url, main_url=None, delay=0, cook=None, headers=None, timeout=10, host=None, proxies=[None], user_agents=[None], failed=None, processed=None ): """Handle the requests and return the response body.""" cook = cook or set() headers = headers or set() user_agents = user_agents or ['Photon'] failed = failed or set() processed = processed or set() # Mark the URL as crawled processed.add(url) # Pause/sleep the program for specified time time.sleep(delay) def make_request(url): """Default request""" final_headers = headers or { 'Host': host, # Selecting a random user-agent 'User-Agent': random.choice(user_agents), 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip', 'DNT': '1', 'Connection': 'close', } try: response = SESSION.get( url, cookies=cook, headers=final_headers, verify=False, timeout=timeout, stream=True, proxies=random.choice(proxies) ) except TooManyRedirects: return 'dummy' if 'text/html' in response.headers['content-type'] or \ 'text/plain' in response.headers['content-type']: if response.status_code != '404': return response.text else: response.close() failed.add(url) return 'dummy' else: response.close() return 'dummy' return make_request(url)
def requester( url, main_url=None, delay=0, cook=None, headers=None, timeout=10, host=None, proxies=[None], user_agents=[None], failed=None, processed=None ): """Handle the requests and return the response body.""" cook = cook or set() headers = headers or set() user_agents = user_agents or ['Photon'] failed = failed or set() processed = processed or set() # Mark the URL as crawled processed.add(url) # Pause/sleep the program for specified time time.sleep(delay) def make_request(url): """Default request""" final_headers = headers or { 'Host': host, # Selecting a random user-agent 'User-Agent': random.choice(user_agents), 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip', 'DNT': '1', 'Connection': 'close', } try: response = SESSION.get( url, cookies=cook, headers=final_headers, verify=False, timeout=timeout, stream=True, proxies=random.choice(proxies) ) except TooManyRedirects: return 'dummy' if 'text/html' in response.headers['content-type'] or \ 'text/plain' in response.headers['content-type']: if response.status_code != '404': return response.text else: response.close() failed.add(url) return 'dummy' else: response.close() return 'dummy' return make_request(url)
[ "Handle", "the", "requests", "and", "return", "the", "response", "body", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/requester.py#L11-L72
[ "def", "requester", "(", "url", ",", "main_url", "=", "None", ",", "delay", "=", "0", ",", "cook", "=", "None", ",", "headers", "=", "None", ",", "timeout", "=", "10", ",", "host", "=", "None", ",", "proxies", "=", "[", "None", "]", ",", "user_ag...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
intel_extractor
Extract intel from the response body.
photon.py
def intel_extractor(url, response): """Extract intel from the response body.""" for rintel in rintels: res = re.sub(r'<(script).*?</\1>(?s)', '', response) res = re.sub(r'<[^<]+?>', '', res) matches = rintel[0].findall(res) if matches: for match in matches: verb('Intel', match) bad_intel.add((match, rintel[1], url))
def intel_extractor(url, response): """Extract intel from the response body.""" for rintel in rintels: res = re.sub(r'<(script).*?</\1>(?s)', '', response) res = re.sub(r'<[^<]+?>', '', res) matches = rintel[0].findall(res) if matches: for match in matches: verb('Intel', match) bad_intel.add((match, rintel[1], url))
[ "Extract", "intel", "from", "the", "response", "body", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/photon.py#L208-L217
[ "def", "intel_extractor", "(", "url", ",", "response", ")", ":", "for", "rintel", "in", "rintels", ":", "res", "=", "re", ".", "sub", "(", "r'<(script).*?</\\1>(?s)'", ",", "''", ",", "response", ")", "res", "=", "re", ".", "sub", "(", "r'<[^<]+?>'", "...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
js_extractor
Extract js files from the response body
photon.py
def js_extractor(response): """Extract js files from the response body""" # Extract .js files matches = rscript.findall(response) for match in matches: match = match[2].replace('\'', '').replace('"', '') verb('JS file', match) bad_scripts.add(match)
def js_extractor(response): """Extract js files from the response body""" # Extract .js files matches = rscript.findall(response) for match in matches: match = match[2].replace('\'', '').replace('"', '') verb('JS file', match) bad_scripts.add(match)
[ "Extract", "js", "files", "from", "the", "response", "body" ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/photon.py#L220-L227
[ "def", "js_extractor", "(", "response", ")", ":", "# Extract .js files\r", "matches", "=", "rscript", ".", "findall", "(", "response", ")", "for", "match", "in", "matches", ":", "match", "=", "match", "[", "2", "]", ".", "replace", "(", "'\\''", ",", "''...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
extractor
Extract details from the response body.
photon.py
def extractor(url): """Extract details from the response body.""" response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed) if clone: mirror(url, response) matches = rhref.findall(response) for link in matches: # Remove everything after a "#" to deal with in-page anchors link = link[1].replace('\'', '').replace('"', '').split('#')[0] # Checks if the URLs should be crawled if is_link(link, processed, files): if link[:4] == 'http': if link.startswith(main_url): verb('Internal page', link) internal.add(link) else: verb('External page', link) external.add(link) elif link[:2] == '//': if link.split('/')[2].startswith(host): verb('Internal page', link) internal.add(schema + '://' + link) else: verb('External page', link) external.add(link) elif link[:1] == '/': verb('Internal page', link) internal.add(remove_file(url) + link) else: verb('Internal page', link) usable_url = remove_file(url) if usable_url.endswith('/'): internal.add(usable_url + link) elif link.startswith('/'): internal.add(usable_url + link) else: internal.add(usable_url + '/' + link) if not only_urls: intel_extractor(url, response) js_extractor(response) if args.regex and not supress_regex: regxy(args.regex, response, supress_regex, custom) if api: matches = rentropy.findall(response) for match in matches: if entropy(match) >= 4: verb('Key', match) keys.add(url + ': ' + match)
def extractor(url): """Extract details from the response body.""" response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed) if clone: mirror(url, response) matches = rhref.findall(response) for link in matches: # Remove everything after a "#" to deal with in-page anchors link = link[1].replace('\'', '').replace('"', '').split('#')[0] # Checks if the URLs should be crawled if is_link(link, processed, files): if link[:4] == 'http': if link.startswith(main_url): verb('Internal page', link) internal.add(link) else: verb('External page', link) external.add(link) elif link[:2] == '//': if link.split('/')[2].startswith(host): verb('Internal page', link) internal.add(schema + '://' + link) else: verb('External page', link) external.add(link) elif link[:1] == '/': verb('Internal page', link) internal.add(remove_file(url) + link) else: verb('Internal page', link) usable_url = remove_file(url) if usable_url.endswith('/'): internal.add(usable_url + link) elif link.startswith('/'): internal.add(usable_url + link) else: internal.add(usable_url + '/' + link) if not only_urls: intel_extractor(url, response) js_extractor(response) if args.regex and not supress_regex: regxy(args.regex, response, supress_regex, custom) if api: matches = rentropy.findall(response) for match in matches: if entropy(match) >= 4: verb('Key', match) keys.add(url + ': ' + match)
[ "Extract", "details", "from", "the", "response", "body", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/photon.py#L239-L287
[ "def", "extractor", "(", "url", ")", ":", "response", "=", "requester", "(", "url", ",", "main_url", ",", "delay", ",", "cook", ",", "headers", ",", "timeout", ",", "host", ",", "proxies", ",", "user_agents", ",", "failed", ",", "processed", ")", "if",...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
jscanner
Extract endpoints from JavaScript code.
photon.py
def jscanner(url): """Extract endpoints from JavaScript code.""" response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed) # Extract URLs/endpoints matches = rendpoint.findall(response) # Iterate over the matches, match is a tuple for match in matches: # Combining the items because one of them is always empty match = match[0] + match[1] # Making sure it's not some JavaScript code if not re.search(r'[}{><"\']', match) and not match == '/': verb('JS endpoint', match) endpoints.add(match)
def jscanner(url): """Extract endpoints from JavaScript code.""" response = requester(url, main_url, delay, cook, headers, timeout, host, proxies, user_agents, failed, processed) # Extract URLs/endpoints matches = rendpoint.findall(response) # Iterate over the matches, match is a tuple for match in matches: # Combining the items because one of them is always empty match = match[0] + match[1] # Making sure it's not some JavaScript code if not re.search(r'[}{><"\']', match) and not match == '/': verb('JS endpoint', match) endpoints.add(match)
[ "Extract", "endpoints", "from", "JavaScript", "code", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/photon.py#L290-L302
[ "def", "jscanner", "(", "url", ")", ":", "response", "=", "requester", "(", "url", ",", "main_url", ",", "delay", ",", "cook", ",", "headers", ",", "timeout", ",", "host", ",", "proxies", ",", "user_agents", ",", "failed", ",", "processed", ")", "# Ext...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
updater
Update the current installation. git clones the latest version and merges it with the current directory.
core/updater.py
def updater(): """Update the current installation. git clones the latest version and merges it with the current directory. """ print('%s Checking for updates' % run) # Changes must be separated by ; changes = '''major bug fixes;removed ninja mode;dropped python < 3.2 support;fixed unicode output;proxy support;more intels''' latest_commit = requester('https://raw.githubusercontent.com/s0md3v/Photon/master/core/updater.py', host='raw.githubusercontent.com') # Just a hack to see if a new version is available if changes not in latest_commit: changelog = re.search(r"changes = '''(.*?)'''", latest_commit) # Splitting the changes to form a list changelog = changelog.group(1).split(';') print('%s A new version of Photon is available.' % good) print('%s Changes:' % info) for change in changelog: # print changes print('%s>%s %s' % (green, end, change)) current_path = os.getcwd().split('/') # if you know it, you know it folder = current_path[-1] # current directory name path = '/'.join(current_path) # current directory path choice = input('%s Would you like to update? [Y/n] ' % que).lower() if choice != 'n': print('%s Updating Photon' % run) os.system('git clone --quiet https://github.com/s0md3v/Photon %s' % (folder)) os.system('cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null' % (path, folder, path, path, folder)) print('%s Update successful!' % good) else: print('%s Photon is up to date!' % good)
def updater(): """Update the current installation. git clones the latest version and merges it with the current directory. """ print('%s Checking for updates' % run) # Changes must be separated by ; changes = '''major bug fixes;removed ninja mode;dropped python < 3.2 support;fixed unicode output;proxy support;more intels''' latest_commit = requester('https://raw.githubusercontent.com/s0md3v/Photon/master/core/updater.py', host='raw.githubusercontent.com') # Just a hack to see if a new version is available if changes not in latest_commit: changelog = re.search(r"changes = '''(.*?)'''", latest_commit) # Splitting the changes to form a list changelog = changelog.group(1).split(';') print('%s A new version of Photon is available.' % good) print('%s Changes:' % info) for change in changelog: # print changes print('%s>%s %s' % (green, end, change)) current_path = os.getcwd().split('/') # if you know it, you know it folder = current_path[-1] # current directory name path = '/'.join(current_path) # current directory path choice = input('%s Would you like to update? [Y/n] ' % que).lower() if choice != 'n': print('%s Updating Photon' % run) os.system('git clone --quiet https://github.com/s0md3v/Photon %s' % (folder)) os.system('cp -r %s/%s/* %s && rm -r %s/%s/ 2>/dev/null' % (path, folder, path, path, folder)) print('%s Update successful!' % good) else: print('%s Photon is up to date!' % good)
[ "Update", "the", "current", "installation", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/updater.py#L8-L40
[ "def", "updater", "(", ")", ":", "print", "(", "'%s Checking for updates'", "%", "run", ")", "# Changes must be separated by ;", "changes", "=", "'''major bug fixes;removed ninja mode;dropped python < 3.2 support;fixed unicode output;proxy support;more intels'''", "latest_commit", "=...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
find_subdomains
Find subdomains according to the TLD.
plugins/find_subdomains.py
def find_subdomains(domain): """Find subdomains according to the TLD.""" result = set() response = get('https://findsubdomains.com/subdomains-of/' + domain).text matches = findall(r'(?s)<div class="domains js-domain-name">(.*?)</div>', response) for match in matches: result.add(match.replace(' ', '').replace('\n', '')) return list(result)
def find_subdomains(domain): """Find subdomains according to the TLD.""" result = set() response = get('https://findsubdomains.com/subdomains-of/' + domain).text matches = findall(r'(?s)<div class="domains js-domain-name">(.*?)</div>', response) for match in matches: result.add(match.replace(' ', '').replace('\n', '')) return list(result)
[ "Find", "subdomains", "according", "to", "the", "TLD", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/plugins/find_subdomains.py#L7-L14
[ "def", "find_subdomains", "(", "domain", ")", ":", "result", "=", "set", "(", ")", "response", "=", "get", "(", "'https://findsubdomains.com/subdomains-of/'", "+", "domain", ")", ".", "text", "matches", "=", "findall", "(", "r'(?s)<div class=\"domains js-domain-name...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
flash
Process the URLs and uses a threadpool to execute a function.
core/flash.py
def flash(function, links, thread_count): """Process the URLs and uses a threadpool to execute a function.""" # Convert links (set) to list links = list(links) threadpool = concurrent.futures.ThreadPoolExecutor( max_workers=thread_count) futures = (threadpool.submit(function, link) for link in links) for i, _ in enumerate(concurrent.futures.as_completed(futures)): if i + 1 == len(links) or (i + 1) % thread_count == 0: print('%s Progress: %i/%i' % (info, i + 1, len(links)), end='\r') print('')
def flash(function, links, thread_count): """Process the URLs and uses a threadpool to execute a function.""" # Convert links (set) to list links = list(links) threadpool = concurrent.futures.ThreadPoolExecutor( max_workers=thread_count) futures = (threadpool.submit(function, link) for link in links) for i, _ in enumerate(concurrent.futures.as_completed(futures)): if i + 1 == len(links) or (i + 1) % thread_count == 0: print('%s Progress: %i/%i' % (info, i + 1, len(links)), end='\r') print('')
[ "Process", "the", "URLs", "and", "uses", "a", "threadpool", "to", "execute", "a", "function", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/flash.py#L6-L17
[ "def", "flash", "(", "function", ",", "links", ",", "thread_count", ")", ":", "# Convert links (set) to list", "links", "=", "list", "(", "links", ")", "threadpool", "=", "concurrent", ".", "futures", ".", "ThreadPoolExecutor", "(", "max_workers", "=", "thread_c...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
regxy
Extract a string based on regex pattern supplied by user.
core/utils.py
def regxy(pattern, response, supress_regex, custom): """Extract a string based on regex pattern supplied by user.""" try: matches = re.findall(r'%s' % pattern, response) for match in matches: verb('Custom regex', match) custom.add(match) except: supress_regex = True
def regxy(pattern, response, supress_regex, custom): """Extract a string based on regex pattern supplied by user.""" try: matches = re.findall(r'%s' % pattern, response) for match in matches: verb('Custom regex', match) custom.add(match) except: supress_regex = True
[ "Extract", "a", "string", "based", "on", "regex", "pattern", "supplied", "by", "user", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L15-L23
[ "def", "regxy", "(", "pattern", ",", "response", ",", "supress_regex", ",", "custom", ")", ":", "try", ":", "matches", "=", "re", ".", "findall", "(", "r'%s'", "%", "pattern", ",", "response", ")", "for", "match", "in", "matches", ":", "verb", "(", "...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
is_link
Determine whether or not a link should be crawled A url should not be crawled if it - Is a file - Has already been crawled Args: url: str Url to be processed processed: list[str] List of urls that have already been crawled Returns: bool If `url` should be crawled
core/utils.py
def is_link(url, processed, files): """ Determine whether or not a link should be crawled A url should not be crawled if it - Is a file - Has already been crawled Args: url: str Url to be processed processed: list[str] List of urls that have already been crawled Returns: bool If `url` should be crawled """ if url not in processed: is_file = url.endswith(BAD_TYPES) if is_file: files.add(url) return False return True return False
def is_link(url, processed, files): """ Determine whether or not a link should be crawled A url should not be crawled if it - Is a file - Has already been crawled Args: url: str Url to be processed processed: list[str] List of urls that have already been crawled Returns: bool If `url` should be crawled """ if url not in processed: is_file = url.endswith(BAD_TYPES) if is_file: files.add(url) return False return True return False
[ "Determine", "whether", "or", "not", "a", "link", "should", "be", "crawled", "A", "url", "should", "not", "be", "crawled", "if", "it", "-", "Is", "a", "file", "-", "Has", "already", "been", "crawled" ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L26-L46
[ "def", "is_link", "(", "url", ",", "processed", ",", "files", ")", ":", "if", "url", "not", "in", "processed", ":", "is_file", "=", "url", ".", "endswith", "(", "BAD_TYPES", ")", "if", "is_file", ":", "files", ".", "add", "(", "url", ")", "return", ...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
remove_regex
Parse a list for non-matches to a regex. Args: urls: iterable of urls regex: string regex to be parsed for Returns: list of strings not matching regex
core/utils.py
def remove_regex(urls, regex): """ Parse a list for non-matches to a regex. Args: urls: iterable of urls regex: string regex to be parsed for Returns: list of strings not matching regex """ if not regex: return urls # To avoid iterating over the characters of a string if not isinstance(urls, (list, set, tuple)): urls = [urls] try: non_matching_urls = [url for url in urls if not re.search(regex, url)] except TypeError: return [] return non_matching_urls
def remove_regex(urls, regex): """ Parse a list for non-matches to a regex. Args: urls: iterable of urls regex: string regex to be parsed for Returns: list of strings not matching regex """ if not regex: return urls # To avoid iterating over the characters of a string if not isinstance(urls, (list, set, tuple)): urls = [urls] try: non_matching_urls = [url for url in urls if not re.search(regex, url)] except TypeError: return [] return non_matching_urls
[ "Parse", "a", "list", "for", "non", "-", "matches", "to", "a", "regex", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L49-L73
[ "def", "remove_regex", "(", "urls", ",", "regex", ")", ":", "if", "not", "regex", ":", "return", "urls", "# To avoid iterating over the characters of a string", "if", "not", "isinstance", "(", "urls", ",", "(", "list", ",", "set", ",", "tuple", ")", ")", ":"...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
writer
Write the results.
core/utils.py
def writer(datasets, dataset_names, output_dir): """Write the results.""" for dataset, dataset_name in zip(datasets, dataset_names): if dataset: filepath = output_dir + '/' + dataset_name + '.txt' with open(filepath, 'w+') as out_file: joined = '\n'.join(dataset) out_file.write(str(joined.encode('utf-8').decode('utf-8'))) out_file.write('\n')
def writer(datasets, dataset_names, output_dir): """Write the results.""" for dataset, dataset_name in zip(datasets, dataset_names): if dataset: filepath = output_dir + '/' + dataset_name + '.txt' with open(filepath, 'w+') as out_file: joined = '\n'.join(dataset) out_file.write(str(joined.encode('utf-8').decode('utf-8'))) out_file.write('\n')
[ "Write", "the", "results", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L76-L84
[ "def", "writer", "(", "datasets", ",", "dataset_names", ",", "output_dir", ")", ":", "for", "dataset", ",", "dataset_name", "in", "zip", "(", "datasets", ",", "dataset_names", ")", ":", "if", "dataset", ":", "filepath", "=", "output_dir", "+", "'/'", "+", ...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
timer
Return the passed time.
core/utils.py
def timer(diff, processed): """Return the passed time.""" # Changes seconds into minutes and seconds minutes, seconds = divmod(diff, 60) try: # Finds average time taken by requests time_per_request = diff / float(len(processed)) except ZeroDivisionError: time_per_request = 0 return minutes, seconds, time_per_request
def timer(diff, processed): """Return the passed time.""" # Changes seconds into minutes and seconds minutes, seconds = divmod(diff, 60) try: # Finds average time taken by requests time_per_request = diff / float(len(processed)) except ZeroDivisionError: time_per_request = 0 return minutes, seconds, time_per_request
[ "Return", "the", "passed", "time", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L87-L96
[ "def", "timer", "(", "diff", ",", "processed", ")", ":", "# Changes seconds into minutes and seconds", "minutes", ",", "seconds", "=", "divmod", "(", "diff", ",", "60", ")", "try", ":", "# Finds average time taken by requests", "time_per_request", "=", "diff", "/", ...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
entropy
Calculate the entropy of a string.
core/utils.py
def entropy(string): """Calculate the entropy of a string.""" entropy = 0 for number in range(256): result = float(string.encode('utf-8').count( chr(number))) / len(string.encode('utf-8')) if result != 0: entropy = entropy - result * math.log(result, 2) return entropy
def entropy(string): """Calculate the entropy of a string.""" entropy = 0 for number in range(256): result = float(string.encode('utf-8').count( chr(number))) / len(string.encode('utf-8')) if result != 0: entropy = entropy - result * math.log(result, 2) return entropy
[ "Calculate", "the", "entropy", "of", "a", "string", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L99-L107
[ "def", "entropy", "(", "string", ")", ":", "entropy", "=", "0", "for", "number", "in", "range", "(", "256", ")", ":", "result", "=", "float", "(", "string", ".", "encode", "(", "'utf-8'", ")", ".", "count", "(", "chr", "(", "number", ")", ")", ")...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
extract_headers
This function extracts valid headers from interactive input.
core/utils.py
def extract_headers(headers): """This function extracts valid headers from interactive input.""" sorted_headers = {} matches = re.findall(r'(.*):\s(.*)', headers) for match in matches: header = match[0] value = match[1] try: if value[-1] == ',': value = value[:-1] sorted_headers[header] = value except IndexError: pass return sorted_headers
def extract_headers(headers): """This function extracts valid headers from interactive input.""" sorted_headers = {} matches = re.findall(r'(.*):\s(.*)', headers) for match in matches: header = match[0] value = match[1] try: if value[-1] == ',': value = value[:-1] sorted_headers[header] = value except IndexError: pass return sorted_headers
[ "This", "function", "extracts", "valid", "headers", "from", "interactive", "input", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L122-L135
[ "def", "extract_headers", "(", "headers", ")", ":", "sorted_headers", "=", "{", "}", "matches", "=", "re", ".", "findall", "(", "r'(.*):\\s(.*)'", ",", "headers", ")", "for", "match", "in", "matches", ":", "header", "=", "match", "[", "0", "]", "value", ...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
top_level
Extract the top level domain from an URL.
core/utils.py
def top_level(url, fix_protocol=True): """Extract the top level domain from an URL.""" ext = tld.get_tld(url, fix_protocol=fix_protocol) toplevel = '.'.join(urlparse(url).netloc.split('.')[-2:]).split( ext)[0] + ext return toplevel
def top_level(url, fix_protocol=True): """Extract the top level domain from an URL.""" ext = tld.get_tld(url, fix_protocol=fix_protocol) toplevel = '.'.join(urlparse(url).netloc.split('.')[-2:]).split( ext)[0] + ext return toplevel
[ "Extract", "the", "top", "level", "domain", "from", "an", "URL", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L138-L143
[ "def", "top_level", "(", "url", ",", "fix_protocol", "=", "True", ")", ":", "ext", "=", "tld", ".", "get_tld", "(", "url", ",", "fix_protocol", "=", "fix_protocol", ")", "toplevel", "=", "'.'", ".", "join", "(", "urlparse", "(", "url", ")", ".", "net...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
proxy_type
Match IP:PORT or DOMAIN:PORT in a losse manner
core/utils.py
def proxy_type(v): """ Match IP:PORT or DOMAIN:PORT in a losse manner """ proxies = [] if re.match(r"((http|socks5):\/\/.)?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5})", v): proxies.append({"http": v, "https": v}) return proxies elif re.match(r"((http|socks5):\/\/.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}:(\d{1,5})", v): proxies.append({"http": v, "https": v}) return proxies elif is_proxy_list(v, proxies): return proxies else: raise argparse.ArgumentTypeError( "Proxy should follow IP:PORT or DOMAIN:PORT format")
def proxy_type(v): """ Match IP:PORT or DOMAIN:PORT in a losse manner """ proxies = [] if re.match(r"((http|socks5):\/\/.)?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5})", v): proxies.append({"http": v, "https": v}) return proxies elif re.match(r"((http|socks5):\/\/.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}:(\d{1,5})", v): proxies.append({"http": v, "https": v}) return proxies elif is_proxy_list(v, proxies): return proxies else: raise argparse.ArgumentTypeError( "Proxy should follow IP:PORT or DOMAIN:PORT format")
[ "Match", "IP", ":", "PORT", "or", "DOMAIN", ":", "PORT", "in", "a", "losse", "manner" ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/utils.py#L162-L177
[ "def", "proxy_type", "(", "v", ")", ":", "proxies", "=", "[", "]", "if", "re", ".", "match", "(", "r\"((http|socks5):\\/\\/.)?(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}):(\\d{1,5})\"", ",", "v", ")", ":", "proxies", ".", "append", "(", "{", "\"http\"", ":", "v",...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
dnsdumpster
Query dnsdumpster.com.
plugins/dnsdumpster.py
def dnsdumpster(domain, output_dir): """Query dnsdumpster.com.""" response = requests.Session().get('https://dnsdumpster.com/').text csrf_token = re.search( r"name='csrfmiddlewaretoken' value='(.*?)'", response).group(1) cookies = {'csrftoken': csrf_token} headers = {'Referer': 'https://dnsdumpster.com/'} data = {'csrfmiddlewaretoken': csrf_token, 'targetip': domain} response = requests.Session().post( 'https://dnsdumpster.com/', cookies=cookies, data=data, headers=headers) image = requests.get('https://dnsdumpster.com/static/map/%s.png' % domain) if image.status_code == 200: with open('%s/%s.png' % (output_dir, domain), 'wb') as f: f.write(image.content)
def dnsdumpster(domain, output_dir): """Query dnsdumpster.com.""" response = requests.Session().get('https://dnsdumpster.com/').text csrf_token = re.search( r"name='csrfmiddlewaretoken' value='(.*?)'", response).group(1) cookies = {'csrftoken': csrf_token} headers = {'Referer': 'https://dnsdumpster.com/'} data = {'csrfmiddlewaretoken': csrf_token, 'targetip': domain} response = requests.Session().post( 'https://dnsdumpster.com/', cookies=cookies, data=data, headers=headers) image = requests.get('https://dnsdumpster.com/static/map/%s.png' % domain) if image.status_code == 200: with open('%s/%s.png' % (output_dir, domain), 'wb') as f: f.write(image.content)
[ "Query", "dnsdumpster", ".", "com", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/plugins/dnsdumpster.py#L7-L22
[ "def", "dnsdumpster", "(", "domain", ",", "output_dir", ")", ":", "response", "=", "requests", ".", "Session", "(", ")", ".", "get", "(", "'https://dnsdumpster.com/'", ")", ".", "text", "csrf_token", "=", "re", ".", "search", "(", "r\"name='csrfmiddlewaretoken...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
prompt
Present the user a prompt.
core/prompt.py
def prompt(default=None): """Present the user a prompt.""" editor = 'nano' with tempfile.NamedTemporaryFile(mode='r+') as tmpfile: if default: tmpfile.write(default) tmpfile.flush() child_pid = os.fork() is_child = child_pid == 0 if is_child: os.execvp(editor, [editor, tmpfile.name]) else: os.waitpid(child_pid, 0) tmpfile.seek(0) return tmpfile.read().strip()
def prompt(default=None): """Present the user a prompt.""" editor = 'nano' with tempfile.NamedTemporaryFile(mode='r+') as tmpfile: if default: tmpfile.write(default) tmpfile.flush() child_pid = os.fork() is_child = child_pid == 0 if is_child: os.execvp(editor, [editor, tmpfile.name]) else: os.waitpid(child_pid, 0) tmpfile.seek(0) return tmpfile.read().strip()
[ "Present", "the", "user", "a", "prompt", "." ]
s0md3v/Photon
python
https://github.com/s0md3v/Photon/blob/6a29f2c9782ea9b3dc090db1774a259033600e39/core/prompt.py#L6-L22
[ "def", "prompt", "(", "default", "=", "None", ")", ":", "editor", "=", "'nano'", "with", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "'r+'", ")", "as", "tmpfile", ":", "if", "default", ":", "tmpfile", ".", "write", "(", "default", ")", "tm...
6a29f2c9782ea9b3dc090db1774a259033600e39
train
QA_RealTrade.start_market
start the market thread and register backtest broker thread QAMarket 继承QATrader, QATrader 中有 trade_engine属性 , trade_engine类型是QA_Engine从 QA_Thread继承
QUANTAXIS/QAApplication/QATradeRealtime.py
def start_market(self): """ start the market thread and register backtest broker thread QAMarket 继承QATrader, QATrader 中有 trade_engine属性 , trade_engine类型是QA_Engine从 QA_Thread继承 """ # 启动 trade_engine 线程 self.market.start() # 注册 backtest_broker ,并且启动和它关联线程QAThread 存放在 kernels 词典中, { 'broker_name': QAThread } #self.market.register(self.broker_name, self.broker) self.market.connect(self.broker_name)
def start_market(self): """ start the market thread and register backtest broker thread QAMarket 继承QATrader, QATrader 中有 trade_engine属性 , trade_engine类型是QA_Engine从 QA_Thread继承 """ # 启动 trade_engine 线程 self.market.start() # 注册 backtest_broker ,并且启动和它关联线程QAThread 存放在 kernels 词典中, { 'broker_name': QAThread } #self.market.register(self.broker_name, self.broker) self.market.connect(self.broker_name)
[ "start", "the", "market", "thread", "and", "register", "backtest", "broker", "thread", "QAMarket", "继承QATrader,", "QATrader", "中有", "trade_engine属性", ",", "trade_engine类型是QA_Engine从", "QA_Thread继承" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAApplication/QATradeRealtime.py#L72-L82
[ "def", "start_market", "(", "self", ")", ":", "# 启动 trade_engine 线程", "self", ".", "market", ".", "start", "(", ")", "# 注册 backtest_broker ,并且启动和它关联线程QAThread 存放在 kernels 词典中, { 'broker_name': QAThread }", "#self.market.register(self.broker_name, self.broker)", "self", ".", "mark...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_RealTrade.run
generator driven data flow
QUANTAXIS/QAApplication/QATradeRealtime.py
def run(self): """generator driven data flow """ # 如果出现了日期的改变 才会进行结算的事件 _date = None while QA_util_if_tradetime(self.now): for data in self.ingest_data: # 对于在ingest_data中的数据 # <class 'QUANTAXIS.QAData.QADataStruct.QA_DataStruct_Stock_day'> date = data.date[0] if self.market_type is MARKET_TYPE.STOCK_CN: # 如果是股票市场 if _date != date: # 如果新的date # 前一天的交易日已经过去 # 往 broker 和 account 发送 settle 事件 try: self.market.trade_engine.join() # time.sleep(2) self.market._settle(self.broker_name) except Exception as e: raise e # 基金 指数 期货 elif self.market_type in [MARKET_TYPE.FUND_CN, MARKET_TYPE.INDEX_CN, MARKET_TYPE.FUTURE_CN]: self.market._settle(self.broker_name) # print(data) self.broker.run( QA_Event(event_type=ENGINE_EVENT.UPCOMING_DATA, market_data=data)) # 生成 UPCOMING_DATA 事件放到 队列中去执行 self.market.upcoming_data(self.broker_name, data) self.market.trade_engine.join() _date = date
def run(self): """generator driven data flow """ # 如果出现了日期的改变 才会进行结算的事件 _date = None while QA_util_if_tradetime(self.now): for data in self.ingest_data: # 对于在ingest_data中的数据 # <class 'QUANTAXIS.QAData.QADataStruct.QA_DataStruct_Stock_day'> date = data.date[0] if self.market_type is MARKET_TYPE.STOCK_CN: # 如果是股票市场 if _date != date: # 如果新的date # 前一天的交易日已经过去 # 往 broker 和 account 发送 settle 事件 try: self.market.trade_engine.join() # time.sleep(2) self.market._settle(self.broker_name) except Exception as e: raise e # 基金 指数 期货 elif self.market_type in [MARKET_TYPE.FUND_CN, MARKET_TYPE.INDEX_CN, MARKET_TYPE.FUTURE_CN]: self.market._settle(self.broker_name) # print(data) self.broker.run( QA_Event(event_type=ENGINE_EVENT.UPCOMING_DATA, market_data=data)) # 生成 UPCOMING_DATA 事件放到 队列中去执行 self.market.upcoming_data(self.broker_name, data) self.market.trade_engine.join() _date = date
[ "generator", "driven", "data", "flow" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAApplication/QATradeRealtime.py#L84-L117
[ "def", "run", "(", "self", ")", ":", "# 如果出现了日期的改变 才会进行结算的事件", "_date", "=", "None", "while", "QA_util_if_tradetime", "(", "self", ".", "now", ")", ":", "for", "data", "in", "self", ".", "ingest_data", ":", "# 对于在ingest_data中的数据", "# <class 'QUANTAXIS.QAData.QADat...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.message
the standard message which can be transfer
QUANTAXIS/QAARP/QAAccount.py
def message(self): 'the standard message which can be transfer' return { 'source': 'account', 'frequence': self.frequence, 'account_cookie': self.account_cookie, 'portfolio_cookie': self.portfolio_cookie, 'user_cookie': self.user_cookie, 'broker': self.broker, 'market_type': self.market_type, 'strategy_name': self.strategy_name, 'current_time': str(self._currenttime), 'allow_sellopen': self.allow_sellopen, 'allow_margin': self.allow_margin, 'allow_t0': self.allow_t0, 'margin_level': self.margin_level, 'init_assets': self.init_assets, 'init_cash': self.init_cash, 'init_hold': self.init_hold.to_dict(), 'commission_coeff': self.commission_coeff, 'tax_coeff': self.tax_coeff, 'cash': self.cash, 'history': self.history, 'trade_index': self.time_index_max, 'running_time': str(datetime.datetime.now()) if self.running_time is None else str(self.running_time), 'quantaxis_version': self.quantaxis_version, 'running_environment': self.running_environment, 'start_date': self.start_date, 'end_date': self.end_date, 'frozen': self.frozen, 'finished_id': self.finishedOrderid }
def message(self): 'the standard message which can be transfer' return { 'source': 'account', 'frequence': self.frequence, 'account_cookie': self.account_cookie, 'portfolio_cookie': self.portfolio_cookie, 'user_cookie': self.user_cookie, 'broker': self.broker, 'market_type': self.market_type, 'strategy_name': self.strategy_name, 'current_time': str(self._currenttime), 'allow_sellopen': self.allow_sellopen, 'allow_margin': self.allow_margin, 'allow_t0': self.allow_t0, 'margin_level': self.margin_level, 'init_assets': self.init_assets, 'init_cash': self.init_cash, 'init_hold': self.init_hold.to_dict(), 'commission_coeff': self.commission_coeff, 'tax_coeff': self.tax_coeff, 'cash': self.cash, 'history': self.history, 'trade_index': self.time_index_max, 'running_time': str(datetime.datetime.now()) if self.running_time is None else str(self.running_time), 'quantaxis_version': self.quantaxis_version, 'running_environment': self.running_environment, 'start_date': self.start_date, 'end_date': self.end_date, 'frozen': self.frozen, 'finished_id': self.finishedOrderid }
[ "the", "standard", "message", "which", "can", "be", "transfer" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L429-L489
[ "def", "message", "(", "self", ")", ":", "return", "{", "'source'", ":", "'account'", ",", "'frequence'", ":", "self", ".", "frequence", ",", "'account_cookie'", ":", "self", ".", "account_cookie", ",", "'portfolio_cookie'", ":", "self", ".", "portfolio_cookie...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.init_hold_with_account
带account_cookie的初始化持仓 Returns: [type] -- [description]
QUANTAXIS/QAARP/QAAccount.py
def init_hold_with_account(self): """带account_cookie的初始化持仓 Returns: [type] -- [description] """ return self.init_hold.reset_index().assign( account_cookie=self.account_cookie ).set_index(['code', 'account_cookie'])
def init_hold_with_account(self): """带account_cookie的初始化持仓 Returns: [type] -- [description] """ return self.init_hold.reset_index().assign( account_cookie=self.account_cookie ).set_index(['code', 'account_cookie'])
[ "带account_cookie的初始化持仓" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L508-L518
[ "def", "init_hold_with_account", "(", "self", ")", ":", "return", "self", ".", "init_hold", ".", "reset_index", "(", ")", ".", "assign", "(", "account_cookie", "=", "self", ".", "account_cookie", ")", ".", "set_index", "(", "[", "'code'", ",", "'account_cook...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.start_date
账户的起始交易日期(只在回测中使用) Raises: RuntimeWarning -- [description] Returns: [type] -- [description]
QUANTAXIS/QAARP/QAAccount.py
def start_date(self): """账户的起始交易日期(只在回测中使用) Raises: RuntimeWarning -- [description] Returns: [type] -- [description] """ if self.start_==None: if len(self.time_index_max) > 0: return str(min(self.time_index_max))[0:10] else: print( RuntimeWarning( 'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE' ) ) else: return self.start_
def start_date(self): """账户的起始交易日期(只在回测中使用) Raises: RuntimeWarning -- [description] Returns: [type] -- [description] """ if self.start_==None: if len(self.time_index_max) > 0: return str(min(self.time_index_max))[0:10] else: print( RuntimeWarning( 'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE' ) ) else: return self.start_
[ "账户的起始交易日期", "(", "只在回测中使用", ")" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L558-L577
[ "def", "start_date", "(", "self", ")", ":", "if", "self", ".", "start_", "==", "None", ":", "if", "len", "(", "self", ".", "time_index_max", ")", ">", "0", ":", "return", "str", "(", "min", "(", "self", ".", "time_index_max", ")", ")", "[", "0", ...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.end_date
账户的交易结束日期(只在回测中使用) Raises: RuntimeWarning -- [description] Returns: [type] -- [description]
QUANTAXIS/QAARP/QAAccount.py
def end_date(self): """账户的交易结束日期(只在回测中使用) Raises: RuntimeWarning -- [description] Returns: [type] -- [description] """ if self.start_==None: if len(self.time_index_max) > 0: return str(max(self.time_index_max))[0:10] else: print( RuntimeWarning( 'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE' ) ) else: return self.end_
def end_date(self): """账户的交易结束日期(只在回测中使用) Raises: RuntimeWarning -- [description] Returns: [type] -- [description] """ if self.start_==None: if len(self.time_index_max) > 0: return str(max(self.time_index_max))[0:10] else: print( RuntimeWarning( 'QAACCOUNT: THIS ACCOUNT DOESNOT HAVE ANY TRADE' ) ) else: return self.end_
[ "账户的交易结束日期", "(", "只在回测中使用", ")" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L580-L599
[ "def", "end_date", "(", "self", ")", ":", "if", "self", ".", "start_", "==", "None", ":", "if", "len", "(", "self", ".", "time_index_max", ")", ">", "0", ":", "return", "str", "(", "max", "(", "self", ".", "time_index_max", ")", ")", "[", "0", ":...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.history_table_min
区间交易历史的table
QUANTAXIS/QAARP/QAAccount.py
def history_table_min(self): '区间交易历史的table' if len(self.history_min) > 0: lens = len(self.history_min[0]) else: lens = len(self._history_headers) return pd.DataFrame( data=self.history_min, columns=self._history_headers[:lens] ).sort_index()
def history_table_min(self): '区间交易历史的table' if len(self.history_min) > 0: lens = len(self.history_min[0]) else: lens = len(self._history_headers) return pd.DataFrame( data=self.history_min, columns=self._history_headers[:lens] ).sort_index()
[ "区间交易历史的table" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L639-L649
[ "def", "history_table_min", "(", "self", ")", ":", "if", "len", "(", "self", ".", "history_min", ")", ">", "0", ":", "lens", "=", "len", "(", "self", ".", "history_min", "[", "0", "]", ")", "else", ":", "lens", "=", "len", "(", "self", ".", "_his...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.history_table
交易历史的table
QUANTAXIS/QAARP/QAAccount.py
def history_table(self): '交易历史的table' if len(self.history) > 0: lens = len(self.history[0]) else: lens = len(self._history_headers) return pd.DataFrame( data=self.history, columns=self._history_headers[:lens] ).sort_index()
def history_table(self): '交易历史的table' if len(self.history) > 0: lens = len(self.history[0]) else: lens = len(self._history_headers) return pd.DataFrame( data=self.history, columns=self._history_headers[:lens] ).sort_index()
[ "交易历史的table" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L670-L680
[ "def", "history_table", "(", "self", ")", ":", "if", "len", "(", "self", ".", "history", ")", ">", "0", ":", "lens", "=", "len", "(", "self", ".", "history", "[", "0", "]", ")", "else", ":", "lens", "=", "len", "(", "self", ".", "_history_headers...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.cash_table
现金的table
QUANTAXIS/QAARP/QAAccount.py
def cash_table(self): '现金的table' _cash = pd.DataFrame( data=[self.cash[1::], self.time_index_max], index=['cash', 'datetime'] ).T _cash = _cash.assign( date=_cash.datetime.apply(lambda x: pd.to_datetime(str(x)[0:10])) ).assign(account_cookie=self.account_cookie) # .sort_values('datetime') return _cash.set_index(['datetime', 'account_cookie'], drop=False) """ 实验性质 @2018-06-09 # 对于账户持仓的分解 1. 真实持仓hold: 正常模式/TZero模式: hold = 历史持仓(init_hold)+ 初始化账户后发生的所有交易导致的持仓(hold_available) 动态持仓(初始化账户后的持仓)hold_available: self.history 计算而得 2. 账户的可卖额度(sell_available) 正常模式: sell_available 结算前: init_hold+ 买卖交易(卖-) 结算后: init_hold+ 买卖交易(买+ 卖-) TZero模式: sell_available 结算前: init_hold - 买卖交易占用的额度(abs(买+ 卖-)) 结算过程 是为了补平(等于让hold={}) 结算后: init_hold """
def cash_table(self): '现金的table' _cash = pd.DataFrame( data=[self.cash[1::], self.time_index_max], index=['cash', 'datetime'] ).T _cash = _cash.assign( date=_cash.datetime.apply(lambda x: pd.to_datetime(str(x)[0:10])) ).assign(account_cookie=self.account_cookie) # .sort_values('datetime') return _cash.set_index(['datetime', 'account_cookie'], drop=False) """ 实验性质 @2018-06-09 # 对于账户持仓的分解 1. 真实持仓hold: 正常模式/TZero模式: hold = 历史持仓(init_hold)+ 初始化账户后发生的所有交易导致的持仓(hold_available) 动态持仓(初始化账户后的持仓)hold_available: self.history 计算而得 2. 账户的可卖额度(sell_available) 正常模式: sell_available 结算前: init_hold+ 买卖交易(卖-) 结算后: init_hold+ 买卖交易(买+ 卖-) TZero模式: sell_available 结算前: init_hold - 买卖交易占用的额度(abs(买+ 卖-)) 结算过程 是为了补平(等于让hold={}) 结算后: init_hold """
[ "现金的table" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L690-L727
[ "def", "cash_table", "(", "self", ")", ":", "_cash", "=", "pd", ".", "DataFrame", "(", "data", "=", "[", "self", ".", "cash", "[", "1", ":", ":", "]", ",", "self", ".", "time_index_max", "]", ",", "index", "=", "[", "'cash'", ",", "'datetime'", "...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.hold
真实持仓
QUANTAXIS/QAARP/QAAccount.py
def hold(self): """真实持仓 """ return pd.concat( [self.init_hold, self.hold_available] ).groupby('code').sum().replace(0, np.nan).dropna().sort_index()
def hold(self): """真实持仓 """ return pd.concat( [self.init_hold, self.hold_available] ).groupby('code').sum().replace(0, np.nan).dropna().sort_index()
[ "真实持仓" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L730-L737
[ "def", "hold", "(", "self", ")", ":", "return", "pd", ".", "concat", "(", "[", "self", ".", "init_hold", ",", "self", ".", "hold_available", "]", ")", ".", "groupby", "(", "'code'", ")", ".", "sum", "(", ")", ".", "replace", "(", "0", ",", "np", ...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.hold_available
可用持仓
QUANTAXIS/QAARP/QAAccount.py
def hold_available(self): """可用持仓 """ return self.history_table.groupby('code').amount.sum().replace( 0, np.nan ).dropna().sort_index()
def hold_available(self): """可用持仓 """ return self.history_table.groupby('code').amount.sum().replace( 0, np.nan ).dropna().sort_index()
[ "可用持仓" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L741-L747
[ "def", "hold_available", "(", "self", ")", ":", "return", "self", ".", "history_table", ".", "groupby", "(", "'code'", ")", ".", "amount", ".", "sum", "(", ")", ".", "replace", "(", "0", ",", "np", ".", "nan", ")", ".", "dropna", "(", ")", ".", "...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.trade
每次交易的pivot表 Returns: pd.DataFrame 此处的pivot_table一定要用np.sum
QUANTAXIS/QAARP/QAAccount.py
def trade(self): """每次交易的pivot表 Returns: pd.DataFrame 此处的pivot_table一定要用np.sum """ return self.history_table.pivot_table( index=['datetime', 'account_cookie'], columns='code', values='amount', aggfunc=np.sum ).fillna(0).sort_index()
def trade(self): """每次交易的pivot表 Returns: pd.DataFrame 此处的pivot_table一定要用np.sum """ return self.history_table.pivot_table( index=['datetime', 'account_cookie'], columns='code', values='amount', aggfunc=np.sum ).fillna(0).sort_index()
[ "每次交易的pivot表" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L755-L770
[ "def", "trade", "(", "self", ")", ":", "return", "self", ".", "history_table", ".", "pivot_table", "(", "index", "=", "[", "'datetime'", ",", "'account_cookie'", "]", ",", "columns", "=", "'code'", ",", "values", "=", "'amount'", ",", "aggfunc", "=", "np...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.daily_cash
每日交易结算时的现金表
QUANTAXIS/QAARP/QAAccount.py
def daily_cash(self): '每日交易结算时的现金表' res = self.cash_table.drop_duplicates(subset='date', keep='last') le=pd.DataFrame(pd.Series(data=None, index=pd.to_datetime(self.trade_range_max).set_names('date'), name='predrop')) ri=res.set_index('date') res_=pd.merge(le,ri,how='left',left_index=True,right_index=True) res_=res_.ffill().fillna(self.init_cash).drop(['predrop','datetime','account_cookie'], axis=1).reset_index().set_index(['date'],drop=False).sort_index() res_=res_[res_.index.isin(self.trade_range)] return res_
def daily_cash(self): '每日交易结算时的现金表' res = self.cash_table.drop_duplicates(subset='date', keep='last') le=pd.DataFrame(pd.Series(data=None, index=pd.to_datetime(self.trade_range_max).set_names('date'), name='predrop')) ri=res.set_index('date') res_=pd.merge(le,ri,how='left',left_index=True,right_index=True) res_=res_.ffill().fillna(self.init_cash).drop(['predrop','datetime','account_cookie'], axis=1).reset_index().set_index(['date'],drop=False).sort_index() res_=res_[res_.index.isin(self.trade_range)] return res_
[ "每日交易结算时的现金表" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L773-L781
[ "def", "daily_cash", "(", "self", ")", ":", "res", "=", "self", ".", "cash_table", ".", "drop_duplicates", "(", "subset", "=", "'date'", ",", "keep", "=", "'last'", ")", "le", "=", "pd", ".", "DataFrame", "(", "pd", ".", "Series", "(", "data", "=", ...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.daily_hold
每日交易结算时的持仓表
QUANTAXIS/QAARP/QAAccount.py
def daily_hold(self): '每日交易结算时的持仓表' data = self.trade.cumsum() if len(data) < 1: return None else: # print(data.index.levels[0]) data = data.assign(account_cookie=self.account_cookie).assign( date=pd.to_datetime(data.index.levels[0]).date ) data.date = pd.to_datetime(data.date) data = data.set_index(['date', 'account_cookie']) res = data[~data.index.duplicated(keep='last')].sort_index() # 这里会导致股票停牌时的持仓也被计算 但是计算market_value的时候就没了 le=pd.DataFrame(pd.Series(data=None, index=pd.to_datetime(self.trade_range_max).set_names('date'), name='predrop')) ri=res.reset_index().set_index('date') res_=pd.merge(le,ri,how='left',left_index=True,right_index=True) res_=res_.ffill().fillna(0).drop(['predrop','account_cookie'], axis=1).reset_index().set_index(['date']).sort_index() res_=res_[res_.index.isin(self.trade_range)] return res_
def daily_hold(self): '每日交易结算时的持仓表' data = self.trade.cumsum() if len(data) < 1: return None else: # print(data.index.levels[0]) data = data.assign(account_cookie=self.account_cookie).assign( date=pd.to_datetime(data.index.levels[0]).date ) data.date = pd.to_datetime(data.date) data = data.set_index(['date', 'account_cookie']) res = data[~data.index.duplicated(keep='last')].sort_index() # 这里会导致股票停牌时的持仓也被计算 但是计算market_value的时候就没了 le=pd.DataFrame(pd.Series(data=None, index=pd.to_datetime(self.trade_range_max).set_names('date'), name='predrop')) ri=res.reset_index().set_index('date') res_=pd.merge(le,ri,how='left',left_index=True,right_index=True) res_=res_.ffill().fillna(0).drop(['predrop','account_cookie'], axis=1).reset_index().set_index(['date']).sort_index() res_=res_[res_.index.isin(self.trade_range)] return res_
[ "每日交易结算时的持仓表" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L784-L804
[ "def", "daily_hold", "(", "self", ")", ":", "data", "=", "self", ".", "trade", ".", "cumsum", "(", ")", "if", "len", "(", "data", ")", "<", "1", ":", "return", "None", "else", ":", "# print(data.index.levels[0])", "data", "=", "data", ".", "assign", ...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.daily_frozen
每日交易结算时的持仓表
QUANTAXIS/QAARP/QAAccount.py
def daily_frozen(self): '每日交易结算时的持仓表' res_=self.history_table.assign(date=pd.to_datetime(self.history_table.datetime)).set_index('date').resample('D').frozen.last().fillna(method='pad') res_=res_[res_.index.isin(self.trade_range)] return res_
def daily_frozen(self): '每日交易结算时的持仓表' res_=self.history_table.assign(date=pd.to_datetime(self.history_table.datetime)).set_index('date').resample('D').frozen.last().fillna(method='pad') res_=res_[res_.index.isin(self.trade_range)] return res_
[ "每日交易结算时的持仓表" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L807-L811
[ "def", "daily_frozen", "(", "self", ")", ":", "res_", "=", "self", ".", "history_table", ".", "assign", "(", "date", "=", "pd", ".", "to_datetime", "(", "self", ".", "history_table", ".", "datetime", ")", ")", ".", "set_index", "(", "'date'", ")", ".",...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.hold_table
到某一个时刻的持仓 如果给的是日期,则返回当日开盘前的持仓
QUANTAXIS/QAARP/QAAccount.py
def hold_table(self, datetime=None): "到某一个时刻的持仓 如果给的是日期,则返回当日开盘前的持仓" if datetime is None: hold_available = self.history_table.set_index( 'datetime' ).sort_index().groupby('code').amount.sum().sort_index() else: hold_available = self.history_table.set_index( 'datetime' ).sort_index().loc[:datetime].groupby('code' ).amount.sum().sort_index() return pd.concat([self.init_hold, hold_available]).groupby('code').sum().sort_index( ).apply(lambda x: x if x > 0 else None).dropna()
def hold_table(self, datetime=None): "到某一个时刻的持仓 如果给的是日期,则返回当日开盘前的持仓" if datetime is None: hold_available = self.history_table.set_index( 'datetime' ).sort_index().groupby('code').amount.sum().sort_index() else: hold_available = self.history_table.set_index( 'datetime' ).sort_index().loc[:datetime].groupby('code' ).amount.sum().sort_index() return pd.concat([self.init_hold, hold_available]).groupby('code').sum().sort_index( ).apply(lambda x: x if x > 0 else None).dropna()
[ "到某一个时刻的持仓", "如果给的是日期", "则返回当日开盘前的持仓" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L822-L836
[ "def", "hold_table", "(", "self", ",", "datetime", "=", "None", ")", ":", "if", "datetime", "is", "None", ":", "hold_available", "=", "self", ".", "history_table", ".", "set_index", "(", "'datetime'", ")", ".", "sort_index", "(", ")", ".", "groupby", "("...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.current_hold_price
计算目前持仓的成本 用于模拟盘和实盘查询 Returns: [type] -- [description]
QUANTAXIS/QAARP/QAAccount.py
def current_hold_price(self): """计算目前持仓的成本 用于模拟盘和实盘查询 Returns: [type] -- [description] """ def weights(x): n=len(x) res=1 while res>0 or res<0: res=sum(x[:n]['amount']) n=n-1 x=x[n+1:] if sum(x['amount']) != 0: return np.average( x['price'], weights=x['amount'], returned=True ) else: return np.nan return self.history_table.set_index( 'datetime', drop=False ).sort_index().groupby('code').apply(weights).dropna()
def current_hold_price(self): """计算目前持仓的成本 用于模拟盘和实盘查询 Returns: [type] -- [description] """ def weights(x): n=len(x) res=1 while res>0 or res<0: res=sum(x[:n]['amount']) n=n-1 x=x[n+1:] if sum(x['amount']) != 0: return np.average( x['price'], weights=x['amount'], returned=True ) else: return np.nan return self.history_table.set_index( 'datetime', drop=False ).sort_index().groupby('code').apply(weights).dropna()
[ "计算目前持仓的成本", "用于模拟盘和实盘查询" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L838-L865
[ "def", "current_hold_price", "(", "self", ")", ":", "def", "weights", "(", "x", ")", ":", "n", "=", "len", "(", "x", ")", "res", "=", "1", "while", "res", ">", "0", "or", "res", "<", "0", ":", "res", "=", "sum", "(", "x", "[", ":", "n", "]"...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.hold_price
计算持仓成本 如果给的是日期,则返回当日开盘前的持仓 Keyword Arguments: datetime {[type]} -- [description] (default: {None}) Returns: [type] -- [description]
QUANTAXIS/QAARP/QAAccount.py
def hold_price(self, datetime=None): """计算持仓成本 如果给的是日期,则返回当日开盘前的持仓 Keyword Arguments: datetime {[type]} -- [description] (default: {None}) Returns: [type] -- [description] """ def weights(x): if sum(x['amount']) != 0: return np.average( x['price'], weights=x['amount'], returned=True ) else: return np.nan if datetime is None: return self.history_table.set_index( 'datetime', drop=False ).sort_index().groupby('code').apply(weights).dropna() else: return self.history_table.set_index( 'datetime', drop=False ).sort_index().loc[:datetime].groupby('code').apply(weights ).dropna()
def hold_price(self, datetime=None): """计算持仓成本 如果给的是日期,则返回当日开盘前的持仓 Keyword Arguments: datetime {[type]} -- [description] (default: {None}) Returns: [type] -- [description] """ def weights(x): if sum(x['amount']) != 0: return np.average( x['price'], weights=x['amount'], returned=True ) else: return np.nan if datetime is None: return self.history_table.set_index( 'datetime', drop=False ).sort_index().groupby('code').apply(weights).dropna() else: return self.history_table.set_index( 'datetime', drop=False ).sort_index().loc[:datetime].groupby('code').apply(weights ).dropna()
[ "计算持仓成本", "如果给的是日期", "则返回当日开盘前的持仓" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L867-L897
[ "def", "hold_price", "(", "self", ",", "datetime", "=", "None", ")", ":", "def", "weights", "(", "x", ")", ":", "if", "sum", "(", "x", "[", "'amount'", "]", ")", "!=", "0", ":", "return", "np", ".", "average", "(", "x", "[", "'price'", "]", ","...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.hold_time
持仓时间 Keyword Arguments: datetime {[type]} -- [description] (default: {None})
QUANTAXIS/QAARP/QAAccount.py
def hold_time(self, datetime=None): """持仓时间 Keyword Arguments: datetime {[type]} -- [description] (default: {None}) """ def weights(x): if sum(x['amount']) != 0: return pd.Timestamp(self.datetime ) - pd.to_datetime(x.datetime.max()) else: return np.nan if datetime is None: return self.history_table.set_index( 'datetime', drop=False ).sort_index().groupby('code').apply(weights).dropna() else: return self.history_table.set_index( 'datetime', drop=False ).sort_index().loc[:datetime].groupby('code').apply(weights ).dropna()
def hold_time(self, datetime=None): """持仓时间 Keyword Arguments: datetime {[type]} -- [description] (default: {None}) """ def weights(x): if sum(x['amount']) != 0: return pd.Timestamp(self.datetime ) - pd.to_datetime(x.datetime.max()) else: return np.nan if datetime is None: return self.history_table.set_index( 'datetime', drop=False ).sort_index().groupby('code').apply(weights).dropna() else: return self.history_table.set_index( 'datetime', drop=False ).sort_index().loc[:datetime].groupby('code').apply(weights ).dropna()
[ "持仓时间" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L900-L924
[ "def", "hold_time", "(", "self", ",", "datetime", "=", "None", ")", ":", "def", "weights", "(", "x", ")", ":", "if", "sum", "(", "x", "[", "'amount'", "]", ")", "!=", "0", ":", "return", "pd", ".", "Timestamp", "(", "self", ".", "datetime", ")", ...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.reset_assets
reset_history/cash/
QUANTAXIS/QAARP/QAAccount.py
def reset_assets(self, init_cash=None): 'reset_history/cash/' self.sell_available = copy.deepcopy(self.init_hold) self.history = [] self.init_cash = init_cash self.cash = [self.init_cash] self.cash_available = self.cash[-1]
def reset_assets(self, init_cash=None): 'reset_history/cash/' self.sell_available = copy.deepcopy(self.init_hold) self.history = [] self.init_cash = init_cash self.cash = [self.init_cash] self.cash_available = self.cash[-1]
[ "reset_history", "/", "cash", "/" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L926-L932
[ "def", "reset_assets", "(", "self", ",", "init_cash", "=", "None", ")", ":", "self", ".", "sell_available", "=", "copy", ".", "deepcopy", "(", "self", ".", "init_hold", ")", "self", ".", "history", "=", "[", "]", "self", ".", "init_cash", "=", "init_ca...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.receive_simpledeal
快速撮合成交接口 此接口是一个直接可以成交的接口, 所以务必确保给出的信息是可以成交的 此接口涉及的是 1. 股票/期货的成交 2. 历史记录的增加 3. 现金/持仓/冻结资金的处理 Arguments: code {[type]} -- [description] trade_price {[type]} -- [description] trade_amount {[type]} -- [description] trade_towards {[type]} -- [description] trade_time {[type]} -- [description] Keyword Arguments: message {[type]} -- [description] (default: {None}) 2018/11/7 @yutiansut 修复一个bug: 在直接使用该快速撮合接口的时候, 期货卖出会扣减保证金, 买回来的时候应该反算利润 如 3800卖空 3700买回平仓 应为100利润 @2018-12-31 保证金账户ok @2019/1/3 一些重要的意思 frozen = self.market_preset.get_frozen(code) # 保证金率 unit = self.market_preset.get_unit(code) # 合约乘数 raw_trade_money = trade_price*trade_amount*market_towards # 总市值 value = raw_trade_money * unit # 合约总价值 trade_money = value * frozen # 交易保证金
QUANTAXIS/QAARP/QAAccount.py
def receive_simpledeal( self, code, trade_price, trade_amount, trade_towards, trade_time, message=None, order_id=None, trade_id=None, realorder_id=None ): """快速撮合成交接口 此接口是一个直接可以成交的接口, 所以务必确保给出的信息是可以成交的 此接口涉及的是 1. 股票/期货的成交 2. 历史记录的增加 3. 现金/持仓/冻结资金的处理 Arguments: code {[type]} -- [description] trade_price {[type]} -- [description] trade_amount {[type]} -- [description] trade_towards {[type]} -- [description] trade_time {[type]} -- [description] Keyword Arguments: message {[type]} -- [description] (default: {None}) 2018/11/7 @yutiansut 修复一个bug: 在直接使用该快速撮合接口的时候, 期货卖出会扣减保证金, 买回来的时候应该反算利润 如 3800卖空 3700买回平仓 应为100利润 @2018-12-31 保证金账户ok @2019/1/3 一些重要的意思 frozen = self.market_preset.get_frozen(code) # 保证金率 unit = self.market_preset.get_unit(code) # 合约乘数 raw_trade_money = trade_price*trade_amount*market_towards # 总市值 value = raw_trade_money * unit # 合约总价值 trade_money = value * frozen # 交易保证金 """ self.datetime = trade_time if realorder_id in self.finishedOrderid: pass else: self.finishedOrderid.append(realorder_id) market_towards = 1 if trade_towards > 0 else -1 # value 合约价值 unit 合约乘数 if self.allow_margin: frozen = self.market_preset.get_frozen(code) # 保证金率 unit = self.market_preset.get_unit(code) # 合约乘数 raw_trade_money = trade_price * trade_amount * market_towards # 总市值 value = raw_trade_money * unit # 合约总价值 trade_money = value * frozen # 交易保证金 else: trade_money = trade_price * trade_amount * market_towards raw_trade_money = trade_money value = trade_money unit = 1 frozen = 1 # 计算费用 # trade_price if self.market_type == MARKET_TYPE.FUTURE_CN: # 期货不收税 # 双边手续费 也没有最小手续费限制 commission_fee_preset = self.market_preset.get_code(code) if trade_towards in [ORDER_DIRECTION.BUY_OPEN, ORDER_DIRECTION.BUY_CLOSE, ORDER_DIRECTION.SELL_CLOSE, ORDER_DIRECTION.SELL_OPEN]: commission_fee = commission_fee_preset['commission_coeff_pervol'] * trade_amount + \ commission_fee_preset['commission_coeff_peramount'] * \ abs(value) elif trade_towards in [ORDER_DIRECTION.BUY_CLOSETODAY, ORDER_DIRECTION.SELL_CLOSETODAY]: commission_fee = commission_fee_preset['commission_coeff_today_pervol'] * trade_amount + \ commission_fee_preset['commission_coeff_today_peramount'] * \ abs(value) tax_fee = 0 # 买入不收印花税 elif self.market_type == MARKET_TYPE.STOCK_CN: commission_fee = self.commission_coeff * \ abs(trade_money) commission_fee = 5 if commission_fee < 5 else commission_fee if int(trade_towards) > 0: tax_fee = 0 # 买入不收印花税 else: tax_fee = self.tax_coeff * abs(trade_money) # 结算交易 if self.cash[-1] > trade_money + commission_fee + tax_fee: self.time_index_max.append(trade_time) # TODO: 目前还不支持期货的锁仓 if self.allow_sellopen: if trade_towards in [ORDER_DIRECTION.BUY_OPEN, ORDER_DIRECTION.SELL_OPEN]: # 开仓单占用现金 计算avg # 初始化 if code in self.frozen.keys(): if trade_towards in self.frozen[code].keys(): pass else: self.frozen[code][str(trade_towards)] = { 'money': 0, 'amount': 0, 'avg_price': 0 } else: self.frozen[code] = { str(ORDER_DIRECTION.BUY_OPEN): { 'money': 0, 'amount': 0, 'avg_price': 0 }, str(ORDER_DIRECTION.SELL_OPEN): { 'money': 0, 'amount': 0, 'avg_price': 0 } } """[summary] # frozen的计算 # money 冻结的资金 # amount 冻结的数量 2018-12-31 """ self.frozen[code][str(trade_towards)]['money'] = ( ( self.frozen[code][str(trade_towards)]['money'] * self.frozen[code][str(trade_towards)]['amount'] ) + abs(trade_money) ) / ( self.frozen[code][str(trade_towards)]['amount'] + trade_amount ) self.frozen[code][str(trade_towards)]['avg_price'] = ( ( self.frozen[code][str(trade_towards)]['avg_price'] * self.frozen[code][str(trade_towards)]['amount'] ) + abs(raw_trade_money) ) / ( self.frozen[code][str(trade_towards)]['amount'] + trade_amount ) self.frozen[code][str(trade_towards)]['amount'] += trade_amount self.cash.append( self.cash[-1] - abs(trade_money) - commission_fee - tax_fee ) elif trade_towards in [ORDER_DIRECTION.BUY_CLOSE, ORDER_DIRECTION.BUY_CLOSETODAY, ORDER_DIRECTION.SELL_CLOSE, ORDER_DIRECTION.SELL_CLOSETODAY]: # 平仓单释放现金 # if trade_towards == ORDER_DIRECTION.BUY_CLOSE: # 卖空开仓 平仓买入 # self.cash if trade_towards in [ORDER_DIRECTION.BUY_CLOSE, ORDER_DIRECTION.BUY_CLOSETODAY]: # 买入平仓 之前是空开 # self.frozen[code][ORDER_DIRECTION.SELL_OPEN]['money'] -= trade_money self.frozen[code][str(ORDER_DIRECTION.SELL_OPEN) ]['amount'] -= trade_amount frozen_part = self.frozen[code][ str(ORDER_DIRECTION.SELL_OPEN)]['money'] * trade_amount # 账户的现金+ 冻结的的释放 + 买卖价差* 杠杆 self.cash.append( self.cash[-1] + frozen_part + (frozen_part - trade_money) / frozen - commission_fee - tax_fee ) if self.frozen[code][str(ORDER_DIRECTION.SELL_OPEN) ]['amount'] == 0: self.frozen[code][str(ORDER_DIRECTION.SELL_OPEN) ]['money'] = 0 self.frozen[code][str(ORDER_DIRECTION.SELL_OPEN) ]['avg_price'] = 0 elif trade_towards in [ORDER_DIRECTION.SELL_CLOSE, ORDER_DIRECTION.SELL_CLOSETODAY]: # 卖出平仓 之前是多开 # self.frozen[code][ORDER_DIRECTION.BUY_OPEN]['money'] -= trade_money self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN) ]['amount'] -= trade_amount frozen_part = self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN) ]['money'] * trade_amount self.cash.append( self.cash[-1] + frozen_part + (abs(trade_money) - frozen_part) / frozen - commission_fee - tax_fee ) if self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN) ]['amount'] == 0: self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN) ]['money'] = 0 self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN) ]['avg_price'] = 0 else: # 不允许卖空开仓的==> 股票 self.cash.append( self.cash[-1] - trade_money - tax_fee - commission_fee ) if self.allow_t0 or trade_towards == ORDER_DIRECTION.SELL: self.sell_available[code] = self.sell_available.get( code, 0 ) + trade_amount * market_towards self.buy_available = self.sell_available self.cash_available = self.cash[-1] frozen_money = abs(trade_money) if trade_towards in [ ORDER_DIRECTION.BUY_OPEN, ORDER_DIRECTION.SELL_OPEN ] else 0 self.history.append( [ str(trade_time), code, trade_price, market_towards * trade_amount, self.cash[-1], order_id, realorder_id, trade_id, self.account_cookie, commission_fee, tax_fee, message, frozen_money, trade_towards ] ) else: print('ALERT MONEY NOT ENOUGH!!!') print(self.cash[-1]) self.cash_available = self.cash[-1]
def receive_simpledeal( self, code, trade_price, trade_amount, trade_towards, trade_time, message=None, order_id=None, trade_id=None, realorder_id=None ): """快速撮合成交接口 此接口是一个直接可以成交的接口, 所以务必确保给出的信息是可以成交的 此接口涉及的是 1. 股票/期货的成交 2. 历史记录的增加 3. 现金/持仓/冻结资金的处理 Arguments: code {[type]} -- [description] trade_price {[type]} -- [description] trade_amount {[type]} -- [description] trade_towards {[type]} -- [description] trade_time {[type]} -- [description] Keyword Arguments: message {[type]} -- [description] (default: {None}) 2018/11/7 @yutiansut 修复一个bug: 在直接使用该快速撮合接口的时候, 期货卖出会扣减保证金, 买回来的时候应该反算利润 如 3800卖空 3700买回平仓 应为100利润 @2018-12-31 保证金账户ok @2019/1/3 一些重要的意思 frozen = self.market_preset.get_frozen(code) # 保证金率 unit = self.market_preset.get_unit(code) # 合约乘数 raw_trade_money = trade_price*trade_amount*market_towards # 总市值 value = raw_trade_money * unit # 合约总价值 trade_money = value * frozen # 交易保证金 """ self.datetime = trade_time if realorder_id in self.finishedOrderid: pass else: self.finishedOrderid.append(realorder_id) market_towards = 1 if trade_towards > 0 else -1 # value 合约价值 unit 合约乘数 if self.allow_margin: frozen = self.market_preset.get_frozen(code) # 保证金率 unit = self.market_preset.get_unit(code) # 合约乘数 raw_trade_money = trade_price * trade_amount * market_towards # 总市值 value = raw_trade_money * unit # 合约总价值 trade_money = value * frozen # 交易保证金 else: trade_money = trade_price * trade_amount * market_towards raw_trade_money = trade_money value = trade_money unit = 1 frozen = 1 # 计算费用 # trade_price if self.market_type == MARKET_TYPE.FUTURE_CN: # 期货不收税 # 双边手续费 也没有最小手续费限制 commission_fee_preset = self.market_preset.get_code(code) if trade_towards in [ORDER_DIRECTION.BUY_OPEN, ORDER_DIRECTION.BUY_CLOSE, ORDER_DIRECTION.SELL_CLOSE, ORDER_DIRECTION.SELL_OPEN]: commission_fee = commission_fee_preset['commission_coeff_pervol'] * trade_amount + \ commission_fee_preset['commission_coeff_peramount'] * \ abs(value) elif trade_towards in [ORDER_DIRECTION.BUY_CLOSETODAY, ORDER_DIRECTION.SELL_CLOSETODAY]: commission_fee = commission_fee_preset['commission_coeff_today_pervol'] * trade_amount + \ commission_fee_preset['commission_coeff_today_peramount'] * \ abs(value) tax_fee = 0 # 买入不收印花税 elif self.market_type == MARKET_TYPE.STOCK_CN: commission_fee = self.commission_coeff * \ abs(trade_money) commission_fee = 5 if commission_fee < 5 else commission_fee if int(trade_towards) > 0: tax_fee = 0 # 买入不收印花税 else: tax_fee = self.tax_coeff * abs(trade_money) # 结算交易 if self.cash[-1] > trade_money + commission_fee + tax_fee: self.time_index_max.append(trade_time) # TODO: 目前还不支持期货的锁仓 if self.allow_sellopen: if trade_towards in [ORDER_DIRECTION.BUY_OPEN, ORDER_DIRECTION.SELL_OPEN]: # 开仓单占用现金 计算avg # 初始化 if code in self.frozen.keys(): if trade_towards in self.frozen[code].keys(): pass else: self.frozen[code][str(trade_towards)] = { 'money': 0, 'amount': 0, 'avg_price': 0 } else: self.frozen[code] = { str(ORDER_DIRECTION.BUY_OPEN): { 'money': 0, 'amount': 0, 'avg_price': 0 }, str(ORDER_DIRECTION.SELL_OPEN): { 'money': 0, 'amount': 0, 'avg_price': 0 } } """[summary] # frozen的计算 # money 冻结的资金 # amount 冻结的数量 2018-12-31 """ self.frozen[code][str(trade_towards)]['money'] = ( ( self.frozen[code][str(trade_towards)]['money'] * self.frozen[code][str(trade_towards)]['amount'] ) + abs(trade_money) ) / ( self.frozen[code][str(trade_towards)]['amount'] + trade_amount ) self.frozen[code][str(trade_towards)]['avg_price'] = ( ( self.frozen[code][str(trade_towards)]['avg_price'] * self.frozen[code][str(trade_towards)]['amount'] ) + abs(raw_trade_money) ) / ( self.frozen[code][str(trade_towards)]['amount'] + trade_amount ) self.frozen[code][str(trade_towards)]['amount'] += trade_amount self.cash.append( self.cash[-1] - abs(trade_money) - commission_fee - tax_fee ) elif trade_towards in [ORDER_DIRECTION.BUY_CLOSE, ORDER_DIRECTION.BUY_CLOSETODAY, ORDER_DIRECTION.SELL_CLOSE, ORDER_DIRECTION.SELL_CLOSETODAY]: # 平仓单释放现金 # if trade_towards == ORDER_DIRECTION.BUY_CLOSE: # 卖空开仓 平仓买入 # self.cash if trade_towards in [ORDER_DIRECTION.BUY_CLOSE, ORDER_DIRECTION.BUY_CLOSETODAY]: # 买入平仓 之前是空开 # self.frozen[code][ORDER_DIRECTION.SELL_OPEN]['money'] -= trade_money self.frozen[code][str(ORDER_DIRECTION.SELL_OPEN) ]['amount'] -= trade_amount frozen_part = self.frozen[code][ str(ORDER_DIRECTION.SELL_OPEN)]['money'] * trade_amount # 账户的现金+ 冻结的的释放 + 买卖价差* 杠杆 self.cash.append( self.cash[-1] + frozen_part + (frozen_part - trade_money) / frozen - commission_fee - tax_fee ) if self.frozen[code][str(ORDER_DIRECTION.SELL_OPEN) ]['amount'] == 0: self.frozen[code][str(ORDER_DIRECTION.SELL_OPEN) ]['money'] = 0 self.frozen[code][str(ORDER_DIRECTION.SELL_OPEN) ]['avg_price'] = 0 elif trade_towards in [ORDER_DIRECTION.SELL_CLOSE, ORDER_DIRECTION.SELL_CLOSETODAY]: # 卖出平仓 之前是多开 # self.frozen[code][ORDER_DIRECTION.BUY_OPEN]['money'] -= trade_money self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN) ]['amount'] -= trade_amount frozen_part = self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN) ]['money'] * trade_amount self.cash.append( self.cash[-1] + frozen_part + (abs(trade_money) - frozen_part) / frozen - commission_fee - tax_fee ) if self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN) ]['amount'] == 0: self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN) ]['money'] = 0 self.frozen[code][str(ORDER_DIRECTION.BUY_OPEN) ]['avg_price'] = 0 else: # 不允许卖空开仓的==> 股票 self.cash.append( self.cash[-1] - trade_money - tax_fee - commission_fee ) if self.allow_t0 or trade_towards == ORDER_DIRECTION.SELL: self.sell_available[code] = self.sell_available.get( code, 0 ) + trade_amount * market_towards self.buy_available = self.sell_available self.cash_available = self.cash[-1] frozen_money = abs(trade_money) if trade_towards in [ ORDER_DIRECTION.BUY_OPEN, ORDER_DIRECTION.SELL_OPEN ] else 0 self.history.append( [ str(trade_time), code, trade_price, market_towards * trade_amount, self.cash[-1], order_id, realorder_id, trade_id, self.account_cookie, commission_fee, tax_fee, message, frozen_money, trade_towards ] ) else: print('ALERT MONEY NOT ENOUGH!!!') print(self.cash[-1]) self.cash_available = self.cash[-1]
[ "快速撮合成交接口" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L934-L1182
[ "def", "receive_simpledeal", "(", "self", ",", "code", ",", "trade_price", ",", "trade_amount", ",", "trade_towards", ",", "trade_time", ",", "message", "=", "None", ",", "order_id", "=", "None", ",", "trade_id", "=", "None", ",", "realorder_id", "=", "None"...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.receive_deal
更新deal Arguments: code {str} -- [description] trade_id {str} -- [description] order_id {str} -- [description] realorder_id {str} -- [description] trade_price {float} -- [description] trade_amount {int} -- [description] trade_towards {int} -- [description] trade_time {str} -- [description] Returns: [type] -- [description]
QUANTAXIS/QAARP/QAAccount.py
def receive_deal( self, code: str, trade_id: str, order_id: str, realorder_id: str, trade_price: float, trade_amount: int, trade_towards: int, trade_time: str, message=None ): """更新deal Arguments: code {str} -- [description] trade_id {str} -- [description] order_id {str} -- [description] realorder_id {str} -- [description] trade_price {float} -- [description] trade_amount {int} -- [description] trade_towards {int} -- [description] trade_time {str} -- [description] Returns: [type] -- [description] """ print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!receive deal') trade_time = str(trade_time) code = str(code) trade_price = float(trade_price) trade_towards = int(trade_towards) realorder_id = str(realorder_id) trade_id = str(trade_id) trade_amount = int(trade_amount) order_id = str(order_id) market_towards = 1 if trade_towards > 0 else -1 """2019/01/03 直接使用快速撮合接口了 2333 这两个接口现在也没啥区别了.... 太绝望了 """ self.receive_simpledeal( code, trade_price, trade_amount, trade_towards, trade_time, message=message, order_id=order_id, trade_id=trade_id, realorder_id=realorder_id )
def receive_deal( self, code: str, trade_id: str, order_id: str, realorder_id: str, trade_price: float, trade_amount: int, trade_towards: int, trade_time: str, message=None ): """更新deal Arguments: code {str} -- [description] trade_id {str} -- [description] order_id {str} -- [description] realorder_id {str} -- [description] trade_price {float} -- [description] trade_amount {int} -- [description] trade_towards {int} -- [description] trade_time {str} -- [description] Returns: [type] -- [description] """ print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!receive deal') trade_time = str(trade_time) code = str(code) trade_price = float(trade_price) trade_towards = int(trade_towards) realorder_id = str(realorder_id) trade_id = str(trade_id) trade_amount = int(trade_amount) order_id = str(order_id) market_towards = 1 if trade_towards > 0 else -1 """2019/01/03 直接使用快速撮合接口了 2333 这两个接口现在也没啥区别了.... 太绝望了 """ self.receive_simpledeal( code, trade_price, trade_amount, trade_towards, trade_time, message=message, order_id=order_id, trade_id=trade_id, realorder_id=realorder_id )
[ "更新deal" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L1194-L1249
[ "def", "receive_deal", "(", "self", ",", "code", ":", "str", ",", "trade_id", ":", "str", ",", "order_id", ":", "str", ",", "realorder_id", ":", "str", ",", "trade_price", ":", "float", ",", "trade_amount", ":", "int", ",", "trade_towards", ":", "int", ...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.send_order
ATTENTION CHANGELOG 1.0.28 修改了Account的send_order方法, 区分按数量下单和按金额下单两种方式 - AMOUNT_MODEL.BY_PRICE ==> AMOUNT_MODEL.BY_MONEY # 按金额下单 - AMOUNT_MODEL.BY_AMOUNT # 按数量下单 在按金额下单的时候,应给予 money参数 在按数量下单的时候,应给予 amount参数 python code: Account=QA.QA_Account() Order_bymoney=Account.send_order(code='000001', price=11, money=0.3*Account.cash_available, time='2018-05-09', towards=QA.ORDER_DIRECTION.BUY, order_model=QA.ORDER_MODEL.MARKET, amount_model=QA.AMOUNT_MODEL.BY_MONEY ) Order_byamount=Account.send_order(code='000001', price=11, amount=100, time='2018-05-09', towards=QA.ORDER_DIRECTION.BUY, order_model=QA.ORDER_MODEL.MARKET, amount_model=QA.AMOUNT_MODEL.BY_AMOUNT ) :param code: 证券代码 :param amount: 买卖 数量多数股 :param time: Timestamp 对象 下单时间 :param towards: int , towards>0 买入 towards<0 卖出 :param price: 买入,卖出 标的证券的价格 :param money: 买卖 价格 :param order_model: 类型 QA.ORDER_MODE :param amount_model:类型 QA.AMOUNT_MODEL :return: QA_Order | False @2018/12/23 send_order 是QA的标准返回, 如需对接其他接口, 只需要对于QA_Order做适配即可 @2018/12/27 在判断账户为期货账户(及 允许双向交易) @2018/12/30 保证金账户的修改 1. 保证金账户冻结的金额 2. 保证金账户的结算 3. 保证金账户的判断
QUANTAXIS/QAARP/QAAccount.py
def send_order( self, code=None, amount=None, time=None, towards=None, price=None, money=None, order_model=None, amount_model=None, *args, **kwargs ): """ ATTENTION CHANGELOG 1.0.28 修改了Account的send_order方法, 区分按数量下单和按金额下单两种方式 - AMOUNT_MODEL.BY_PRICE ==> AMOUNT_MODEL.BY_MONEY # 按金额下单 - AMOUNT_MODEL.BY_AMOUNT # 按数量下单 在按金额下单的时候,应给予 money参数 在按数量下单的时候,应给予 amount参数 python code: Account=QA.QA_Account() Order_bymoney=Account.send_order(code='000001', price=11, money=0.3*Account.cash_available, time='2018-05-09', towards=QA.ORDER_DIRECTION.BUY, order_model=QA.ORDER_MODEL.MARKET, amount_model=QA.AMOUNT_MODEL.BY_MONEY ) Order_byamount=Account.send_order(code='000001', price=11, amount=100, time='2018-05-09', towards=QA.ORDER_DIRECTION.BUY, order_model=QA.ORDER_MODEL.MARKET, amount_model=QA.AMOUNT_MODEL.BY_AMOUNT ) :param code: 证券代码 :param amount: 买卖 数量多数股 :param time: Timestamp 对象 下单时间 :param towards: int , towards>0 买入 towards<0 卖出 :param price: 买入,卖出 标的证券的价格 :param money: 买卖 价格 :param order_model: 类型 QA.ORDER_MODE :param amount_model:类型 QA.AMOUNT_MODEL :return: QA_Order | False @2018/12/23 send_order 是QA的标准返回, 如需对接其他接口, 只需要对于QA_Order做适配即可 @2018/12/27 在判断账户为期货账户(及 允许双向交易) @2018/12/30 保证金账户的修改 1. 保证金账户冻结的金额 2. 保证金账户的结算 3. 保证金账户的判断 """ wrong_reason = None assert code is not None and time is not None and towards is not None and order_model is not None and amount_model is not None # 🛠todo 移到Utils类中, 时间转换 # date 字符串 2011-10-11 长度10 date = str(time)[0:10] if len(str(time)) == 19 else str(time) # time 字符串 20011-10-11 09:02:00 长度 19 time = str(time) if len(str(time)) == 19 else '{} 09:31:00'.format( str(time)[0:10] ) # 🛠todo 移到Utils类中, amount_to_money 成交量转金额 # BY_MONEY :: amount --钱 如10000元 因此 by_money里面 需要指定价格,来计算实际的股票数 # by_amount :: amount --股数 如10000股 if self.allow_margin: amount = amount if amount_model is AMOUNT_MODEL.BY_AMOUNT else int( money / ( self.market_preset.get_unit(code) * self.market_preset.get_frozen(code) * price * (1 + self.commission_coeff) ) / 100 ) * 100 else: amount = amount if amount_model is AMOUNT_MODEL.BY_AMOUNT else int( money / (price * (1 + self.commission_coeff)) / 100 ) * 100 # 🛠todo 移到Utils类中, money_to_amount 金额转成交量 if self.allow_margin: money = amount * price * self.market_preset.get_unit(code)*self.market_preset.get_frozen(code) * \ (1+self.commission_coeff) if amount_model is AMOUNT_MODEL.BY_AMOUNT else money else: money = amount * price * \ (1+self.commission_coeff) if amount_model is AMOUNT_MODEL.BY_AMOUNT else money # flag 判断买卖 数量和价格以及买卖方向是否正确 flag = False assert (int(towards) != 0) if int(towards) in [1, 2, 3]: # 是买入的情况(包括买入.买开.买平) if self.cash_available >= money: if self.market_type == MARKET_TYPE.STOCK_CN: # 如果是股票 买入的时候有100股的最小限制 amount = int(amount / 100) * 100 self.cash_available -= money flag = True if self.running_environment == RUNNING_ENVIRONMENT.TZERO: if abs(self.buy_available.get(code, 0)) >= amount: flag = True self.cash_available -= money self.buy_available[code] -= amount else: flag = False wrong_reason = 'T0交易买入超出限额' if self.market_type == MARKET_TYPE.FUTURE_CN: # 如果有负持仓-- 允许卖空的时候 if towards == 3: # 多平 _hold = self.sell_available.get(code, 0) # 假设有负持仓: # amount为下单数量 如 账户原先-3手 现在平1手 #left_amount = amount+_hold if _hold < 0 else amount _money = abs( float(amount * price * (1 + self.commission_coeff)) ) print(_hold) if self.cash_available >= _money: if _hold < 0: self.cash_available -= _money flag = True else: wrong_reason = '空单仓位不足' else: wrong_reason = '平多剩余资金不够' if towards == 2: self.cash_available -= money flag = True else: wrong_reason = 'QAACCOUNT: 可用资金不足 cash_available {} code {} time {} amount {} towards {}'.format( self.cash_available, code, time, amount, towards ) elif int(towards) in [-1, -2, -3]: # 是卖出的情况(包括卖出,卖出开仓allow_sellopen如果允许. 卖出平仓) # print(self.sell_available[code]) _hold = self.sell_available.get(code, 0) # _hold 是你的持仓 # 如果你的hold> amount>0 # 持仓数量>卖出数量 if _hold >= amount: self.sell_available[code] -= amount # towards = ORDER_DIRECTION.SELL flag = True # 如果持仓数量<卖出数量 else: # 如果是允许卖空开仓 实际计算时 先减去持仓(正持仓) 再计算 负持仓 就按原先的占用金额计算 if self.allow_sellopen and towards == -2: if self.cash_available >= money: # 卖空的市值小于现金(有担保的卖空), 不允许裸卖空 # self.cash_available -= money flag = True else: print('sellavailable', _hold) print('amount', amount) print('aqureMoney', money) print('cash', self.cash_available) wrong_reason = "卖空资金不足/不允许裸卖空" else: wrong_reason = "卖出仓位不足" if flag and (amount > 0): _order = QA_Order( user_cookie=self.user_cookie, strategy=self.strategy_name, frequence=self.frequence, account_cookie=self.account_cookie, code=code, market_type=self.market_type, date=date, datetime=time, sending_time=time, callback=self.receive_deal, amount=amount, price=price, order_model=order_model, towards=towards, money=money, broker=self.broker, amount_model=amount_model, commission_coeff=self.commission_coeff, tax_coeff=self.tax_coeff, *args, **kwargs ) # init # 历史委托order状态存储, 保存到 QA_Order 对象中的队列中 self.datetime = time self.orders.insert_order(_order) return _order else: print( 'ERROR : CODE {} TIME {} AMOUNT {} TOWARDS {}'.format( code, time, amount, towards ) ) print(wrong_reason) return False
def send_order( self, code=None, amount=None, time=None, towards=None, price=None, money=None, order_model=None, amount_model=None, *args, **kwargs ): """ ATTENTION CHANGELOG 1.0.28 修改了Account的send_order方法, 区分按数量下单和按金额下单两种方式 - AMOUNT_MODEL.BY_PRICE ==> AMOUNT_MODEL.BY_MONEY # 按金额下单 - AMOUNT_MODEL.BY_AMOUNT # 按数量下单 在按金额下单的时候,应给予 money参数 在按数量下单的时候,应给予 amount参数 python code: Account=QA.QA_Account() Order_bymoney=Account.send_order(code='000001', price=11, money=0.3*Account.cash_available, time='2018-05-09', towards=QA.ORDER_DIRECTION.BUY, order_model=QA.ORDER_MODEL.MARKET, amount_model=QA.AMOUNT_MODEL.BY_MONEY ) Order_byamount=Account.send_order(code='000001', price=11, amount=100, time='2018-05-09', towards=QA.ORDER_DIRECTION.BUY, order_model=QA.ORDER_MODEL.MARKET, amount_model=QA.AMOUNT_MODEL.BY_AMOUNT ) :param code: 证券代码 :param amount: 买卖 数量多数股 :param time: Timestamp 对象 下单时间 :param towards: int , towards>0 买入 towards<0 卖出 :param price: 买入,卖出 标的证券的价格 :param money: 买卖 价格 :param order_model: 类型 QA.ORDER_MODE :param amount_model:类型 QA.AMOUNT_MODEL :return: QA_Order | False @2018/12/23 send_order 是QA的标准返回, 如需对接其他接口, 只需要对于QA_Order做适配即可 @2018/12/27 在判断账户为期货账户(及 允许双向交易) @2018/12/30 保证金账户的修改 1. 保证金账户冻结的金额 2. 保证金账户的结算 3. 保证金账户的判断 """ wrong_reason = None assert code is not None and time is not None and towards is not None and order_model is not None and amount_model is not None # 🛠todo 移到Utils类中, 时间转换 # date 字符串 2011-10-11 长度10 date = str(time)[0:10] if len(str(time)) == 19 else str(time) # time 字符串 20011-10-11 09:02:00 长度 19 time = str(time) if len(str(time)) == 19 else '{} 09:31:00'.format( str(time)[0:10] ) # 🛠todo 移到Utils类中, amount_to_money 成交量转金额 # BY_MONEY :: amount --钱 如10000元 因此 by_money里面 需要指定价格,来计算实际的股票数 # by_amount :: amount --股数 如10000股 if self.allow_margin: amount = amount if amount_model is AMOUNT_MODEL.BY_AMOUNT else int( money / ( self.market_preset.get_unit(code) * self.market_preset.get_frozen(code) * price * (1 + self.commission_coeff) ) / 100 ) * 100 else: amount = amount if amount_model is AMOUNT_MODEL.BY_AMOUNT else int( money / (price * (1 + self.commission_coeff)) / 100 ) * 100 # 🛠todo 移到Utils类中, money_to_amount 金额转成交量 if self.allow_margin: money = amount * price * self.market_preset.get_unit(code)*self.market_preset.get_frozen(code) * \ (1+self.commission_coeff) if amount_model is AMOUNT_MODEL.BY_AMOUNT else money else: money = amount * price * \ (1+self.commission_coeff) if amount_model is AMOUNT_MODEL.BY_AMOUNT else money # flag 判断买卖 数量和价格以及买卖方向是否正确 flag = False assert (int(towards) != 0) if int(towards) in [1, 2, 3]: # 是买入的情况(包括买入.买开.买平) if self.cash_available >= money: if self.market_type == MARKET_TYPE.STOCK_CN: # 如果是股票 买入的时候有100股的最小限制 amount = int(amount / 100) * 100 self.cash_available -= money flag = True if self.running_environment == RUNNING_ENVIRONMENT.TZERO: if abs(self.buy_available.get(code, 0)) >= amount: flag = True self.cash_available -= money self.buy_available[code] -= amount else: flag = False wrong_reason = 'T0交易买入超出限额' if self.market_type == MARKET_TYPE.FUTURE_CN: # 如果有负持仓-- 允许卖空的时候 if towards == 3: # 多平 _hold = self.sell_available.get(code, 0) # 假设有负持仓: # amount为下单数量 如 账户原先-3手 现在平1手 #left_amount = amount+_hold if _hold < 0 else amount _money = abs( float(amount * price * (1 + self.commission_coeff)) ) print(_hold) if self.cash_available >= _money: if _hold < 0: self.cash_available -= _money flag = True else: wrong_reason = '空单仓位不足' else: wrong_reason = '平多剩余资金不够' if towards == 2: self.cash_available -= money flag = True else: wrong_reason = 'QAACCOUNT: 可用资金不足 cash_available {} code {} time {} amount {} towards {}'.format( self.cash_available, code, time, amount, towards ) elif int(towards) in [-1, -2, -3]: # 是卖出的情况(包括卖出,卖出开仓allow_sellopen如果允许. 卖出平仓) # print(self.sell_available[code]) _hold = self.sell_available.get(code, 0) # _hold 是你的持仓 # 如果你的hold> amount>0 # 持仓数量>卖出数量 if _hold >= amount: self.sell_available[code] -= amount # towards = ORDER_DIRECTION.SELL flag = True # 如果持仓数量<卖出数量 else: # 如果是允许卖空开仓 实际计算时 先减去持仓(正持仓) 再计算 负持仓 就按原先的占用金额计算 if self.allow_sellopen and towards == -2: if self.cash_available >= money: # 卖空的市值小于现金(有担保的卖空), 不允许裸卖空 # self.cash_available -= money flag = True else: print('sellavailable', _hold) print('amount', amount) print('aqureMoney', money) print('cash', self.cash_available) wrong_reason = "卖空资金不足/不允许裸卖空" else: wrong_reason = "卖出仓位不足" if flag and (amount > 0): _order = QA_Order( user_cookie=self.user_cookie, strategy=self.strategy_name, frequence=self.frequence, account_cookie=self.account_cookie, code=code, market_type=self.market_type, date=date, datetime=time, sending_time=time, callback=self.receive_deal, amount=amount, price=price, order_model=order_model, towards=towards, money=money, broker=self.broker, amount_model=amount_model, commission_coeff=self.commission_coeff, tax_coeff=self.tax_coeff, *args, **kwargs ) # init # 历史委托order状态存储, 保存到 QA_Order 对象中的队列中 self.datetime = time self.orders.insert_order(_order) return _order else: print( 'ERROR : CODE {} TIME {} AMOUNT {} TOWARDS {}'.format( code, time, amount, towards ) ) print(wrong_reason) return False
[ "ATTENTION", "CHANGELOG", "1", ".", "0", ".", "28", "修改了Account的send_order方法", "区分按数量下单和按金额下单两种方式" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L1251-L1477
[ "def", "send_order", "(", "self", ",", "code", "=", "None", ",", "amount", "=", "None", ",", "time", "=", "None", ",", "towards", "=", "None", ",", "price", "=", "None", ",", "money", "=", "None", ",", "order_model", "=", "None", ",", "amount_model",...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.close_positions_order
平仓单 Raises: RuntimeError -- if ACCOUNT.RUNNING_ENVIRONMENT is NOT TZERO Returns: list -- list with order
QUANTAXIS/QAARP/QAAccount.py
def close_positions_order(self): """平仓单 Raises: RuntimeError -- if ACCOUNT.RUNNING_ENVIRONMENT is NOT TZERO Returns: list -- list with order """ order_list = [] time = '{} 15:00:00'.format(self.date) if self.running_environment == RUNNING_ENVIRONMENT.TZERO: for code, amount in self.hold_available.iteritems(): order = False if amount < 0: # 先卖出的单子 买平 order = self.send_order( code=code, price=0, amount=abs(amount), time=time, towards=ORDER_DIRECTION.BUY, order_model=ORDER_MODEL.CLOSE, amount_model=AMOUNT_MODEL.BY_AMOUNT, ) elif amount > 0: # 先买入的单子, 卖平 order = self.send_order( code=code, price=0, amount=abs(amount), time=time, towards=ORDER_DIRECTION.SELL, order_model=ORDER_MODEL.CLOSE, amount_model=AMOUNT_MODEL.BY_AMOUNT ) if order: order_list.append(order) return order_list else: raise RuntimeError( 'QAACCOUNT with {} environments cannot use this methods'.format( self.running_environment ) )
def close_positions_order(self): """平仓单 Raises: RuntimeError -- if ACCOUNT.RUNNING_ENVIRONMENT is NOT TZERO Returns: list -- list with order """ order_list = [] time = '{} 15:00:00'.format(self.date) if self.running_environment == RUNNING_ENVIRONMENT.TZERO: for code, amount in self.hold_available.iteritems(): order = False if amount < 0: # 先卖出的单子 买平 order = self.send_order( code=code, price=0, amount=abs(amount), time=time, towards=ORDER_DIRECTION.BUY, order_model=ORDER_MODEL.CLOSE, amount_model=AMOUNT_MODEL.BY_AMOUNT, ) elif amount > 0: # 先买入的单子, 卖平 order = self.send_order( code=code, price=0, amount=abs(amount), time=time, towards=ORDER_DIRECTION.SELL, order_model=ORDER_MODEL.CLOSE, amount_model=AMOUNT_MODEL.BY_AMOUNT ) if order: order_list.append(order) return order_list else: raise RuntimeError( 'QAACCOUNT with {} environments cannot use this methods'.format( self.running_environment ) )
[ "平仓单" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L1493-L1538
[ "def", "close_positions_order", "(", "self", ")", ":", "order_list", "=", "[", "]", "time", "=", "'{} 15:00:00'", ".", "format", "(", "self", ".", "date", ")", "if", "self", ".", "running_environment", "==", "RUNNING_ENVIRONMENT", ".", "TZERO", ":", "for", ...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.settle
股票/期货的日结算 股票的结算: 结转股票可卖额度 T0的结算: 结转T0的额度 期货的结算: 结转静态资金 @2019-02-25 yutiansut hold 在下面要进行大变化: 从 只计算数量 ==> 数量+成本+买入价 (携带更多信息) 基于history去计算hold ==> last_settle+ today_pos_change
QUANTAXIS/QAARP/QAAccount.py
def settle(self, settle_data = None): """ 股票/期货的日结算 股票的结算: 结转股票可卖额度 T0的结算: 结转T0的额度 期货的结算: 结转静态资金 @2019-02-25 yutiansut hold 在下面要进行大变化: 从 只计算数量 ==> 数量+成本+买入价 (携带更多信息) 基于history去计算hold ==> last_settle+ today_pos_change """ #print('FROM QUANTAXIS QA_ACCOUNT: account settle') if self.running_environment == RUNNING_ENVIRONMENT.TZERO and self.hold_available.sum( ) != 0: raise RuntimeError( 'QAACCOUNT: 该T0账户未当日仓位,请平仓 {}'.format( self.hold_available.to_dict() ) ) if self.market_type == MARKET_TYPE.FUTURE_CN: # 增加逐日盯市制度 self.static_balance['frozen'].append( sum( [ rx['money'] * rx['amount'] for var in self.frozen.values() for rx in var.values() ] ) ) self.static_balance['cash'].append(self.cash[-1]) self.static_balance['hold'].append(self.hold.to_dict()) self.static_balance['date'].append(self.date) """静态权益的结算 只关心开仓价/ 不做盯市制度 动态权益的结算需要关心 """ self.static_balance['static_assets'].append( self.static_balance['cash'][-1] + self.static_balance['frozen'][-1] ) self.sell_available = self.hold self.buy_available = self.hold self.cash_available = self.cash[-1] self.datetime = '{} 09:30:00'.format( QA_util_get_next_day(self.date) ) if self.date is not None else None
def settle(self, settle_data = None): """ 股票/期货的日结算 股票的结算: 结转股票可卖额度 T0的结算: 结转T0的额度 期货的结算: 结转静态资金 @2019-02-25 yutiansut hold 在下面要进行大变化: 从 只计算数量 ==> 数量+成本+买入价 (携带更多信息) 基于history去计算hold ==> last_settle+ today_pos_change """ #print('FROM QUANTAXIS QA_ACCOUNT: account settle') if self.running_environment == RUNNING_ENVIRONMENT.TZERO and self.hold_available.sum( ) != 0: raise RuntimeError( 'QAACCOUNT: 该T0账户未当日仓位,请平仓 {}'.format( self.hold_available.to_dict() ) ) if self.market_type == MARKET_TYPE.FUTURE_CN: # 增加逐日盯市制度 self.static_balance['frozen'].append( sum( [ rx['money'] * rx['amount'] for var in self.frozen.values() for rx in var.values() ] ) ) self.static_balance['cash'].append(self.cash[-1]) self.static_balance['hold'].append(self.hold.to_dict()) self.static_balance['date'].append(self.date) """静态权益的结算 只关心开仓价/ 不做盯市制度 动态权益的结算需要关心 """ self.static_balance['static_assets'].append( self.static_balance['cash'][-1] + self.static_balance['frozen'][-1] ) self.sell_available = self.hold self.buy_available = self.hold self.cash_available = self.cash[-1] self.datetime = '{} 09:30:00'.format( QA_util_get_next_day(self.date) ) if self.date is not None else None
[ "股票", "/", "期货的日结算" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L1540-L1600
[ "def", "settle", "(", "self", ",", "settle_data", "=", "None", ")", ":", "#print('FROM QUANTAXIS QA_ACCOUNT: account settle')", "if", "self", ".", "running_environment", "==", "RUNNING_ENVIRONMENT", ".", "TZERO", "and", "self", ".", "hold_available", ".", "sum", "("...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.on_bar
策略事件 :param event: :return:
QUANTAXIS/QAARP/QAAccount.py
def on_bar(self, event): ''' 策略事件 :param event: :return: ''' 'while updating the market data' print( "on_bar account {} ".format(self.account_cookie), event.market_data.data ) print(event.send_order) try: for code in event.market_data.code: if self.sell_available.get(code, 0) > 0: print('可以卖出 {}'.format(self._currenttime)) event.send_order( account_cookie=self.account_cookie, amount=self.sell_available[code], amount_model=AMOUNT_MODEL.BY_AMOUNT, time=self.current_time, code=code, price=0, order_model=ORDER_MODEL.MARKET, towards=ORDER_DIRECTION.SELL, market_type=self.market_type, frequence=self.frequence, broker_name=self.broker ) else: print('{} 无仓位, 买入{}'.format(self._currenttime, code)) event.send_order( account_cookie=self.account_cookie, amount=100, amount_model=AMOUNT_MODEL.BY_AMOUNT, time=self.current_time, code=code, price=0, order_model=ORDER_MODEL.MARKET, towards=ORDER_DIRECTION.BUY, market_type=self.market_type, frequence=self.frequence, broker_name=self.broker ) except Exception as e: print(e)
def on_bar(self, event): ''' 策略事件 :param event: :return: ''' 'while updating the market data' print( "on_bar account {} ".format(self.account_cookie), event.market_data.data ) print(event.send_order) try: for code in event.market_data.code: if self.sell_available.get(code, 0) > 0: print('可以卖出 {}'.format(self._currenttime)) event.send_order( account_cookie=self.account_cookie, amount=self.sell_available[code], amount_model=AMOUNT_MODEL.BY_AMOUNT, time=self.current_time, code=code, price=0, order_model=ORDER_MODEL.MARKET, towards=ORDER_DIRECTION.SELL, market_type=self.market_type, frequence=self.frequence, broker_name=self.broker ) else: print('{} 无仓位, 买入{}'.format(self._currenttime, code)) event.send_order( account_cookie=self.account_cookie, amount=100, amount_model=AMOUNT_MODEL.BY_AMOUNT, time=self.current_time, code=code, price=0, order_model=ORDER_MODEL.MARKET, towards=ORDER_DIRECTION.BUY, market_type=self.market_type, frequence=self.frequence, broker_name=self.broker ) except Exception as e: print(e)
[ "策略事件", ":", "param", "event", ":", ":", "return", ":" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L1602-L1649
[ "def", "on_bar", "(", "self", ",", "event", ")", ":", "'while updating the market data'", "print", "(", "\"on_bar account {} \"", ".", "format", "(", "self", ".", "account_cookie", ")", ",", "event", ".", "market_data", ".", "data", ")", "print", "(", "event",...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.from_message
resume the account from standard message 这个是从数据库恢复账户时需要的
QUANTAXIS/QAARP/QAAccount.py
def from_message(self, message): """resume the account from standard message 这个是从数据库恢复账户时需要的""" self.account_cookie = message.get('account_cookie', None) self.portfolio_cookie = message.get('portfolio_cookie', None) self.user_cookie = message.get('user_cookie', None) self.broker = message.get('broker', None) self.market_type = message.get('market_type', None) self.strategy_name = message.get('strategy_name', None) self._currenttime = message.get('current_time', None) self.allow_sellopen = message.get('allow_sellopen', False) self.allow_margin = message.get('allow_margin', False) self.allow_t0 = message.get('allow_t0', False) self.margin_level = message.get('margin_level', False) self.frequence = message.get('frequence', FREQUENCE.FIFTEEN_MIN) #默认15min self.init_cash = message.get( 'init_cash', message.get('init_assets', 1000000) ) # 兼容修改 self.init_hold = pd.Series(message.get('init_hold', {}), name='amount') self.init_hold.index.name = 'code' self.commission_coeff = message.get('commission_coeff', 0.00015) self.tax_coeff = message.get('tax_coeff', 0.0015) self.history = message['history'] self.cash = message['cash'] self.time_index_max = message['trade_index'] self.running_time = message.get('running_time', None) self.quantaxis_version = message.get('quantaxis_version', None) self.running_environment = message.get( 'running_environment', RUNNING_ENVIRONMENT.BACKETEST ) self.frozen = message.get('frozen', {}) self.finishedOrderid = message.get('finished_id', []) self.settle() return self
def from_message(self, message): """resume the account from standard message 这个是从数据库恢复账户时需要的""" self.account_cookie = message.get('account_cookie', None) self.portfolio_cookie = message.get('portfolio_cookie', None) self.user_cookie = message.get('user_cookie', None) self.broker = message.get('broker', None) self.market_type = message.get('market_type', None) self.strategy_name = message.get('strategy_name', None) self._currenttime = message.get('current_time', None) self.allow_sellopen = message.get('allow_sellopen', False) self.allow_margin = message.get('allow_margin', False) self.allow_t0 = message.get('allow_t0', False) self.margin_level = message.get('margin_level', False) self.frequence = message.get('frequence', FREQUENCE.FIFTEEN_MIN) #默认15min self.init_cash = message.get( 'init_cash', message.get('init_assets', 1000000) ) # 兼容修改 self.init_hold = pd.Series(message.get('init_hold', {}), name='amount') self.init_hold.index.name = 'code' self.commission_coeff = message.get('commission_coeff', 0.00015) self.tax_coeff = message.get('tax_coeff', 0.0015) self.history = message['history'] self.cash = message['cash'] self.time_index_max = message['trade_index'] self.running_time = message.get('running_time', None) self.quantaxis_version = message.get('quantaxis_version', None) self.running_environment = message.get( 'running_environment', RUNNING_ENVIRONMENT.BACKETEST ) self.frozen = message.get('frozen', {}) self.finishedOrderid = message.get('finished_id', []) self.settle() return self
[ "resume", "the", "account", "from", "standard", "message", "这个是从数据库恢复账户时需要的" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L1661-L1697
[ "def", "from_message", "(", "self", ",", "message", ")", ":", "self", ".", "account_cookie", "=", "message", ".", "get", "(", "'account_cookie'", ",", "None", ")", "self", ".", "portfolio_cookie", "=", "message", ".", "get", "(", "'portfolio_cookie'", ",", ...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.from_otgdict
[summary] balance = static_balance + float_profit "currency": "", # "CNY" (币种) "pre_balance": float("nan"), # 9912934.78 (昨日账户权益) "static_balance": float("nan"), # (静态权益) "balance": float("nan"), # 9963216.55 (账户权益) "available": float("nan"), # 9480176.15 (可用资金) "float_profit": float("nan"), # 8910.0 (浮动盈亏) "position_profit": float("nan"), # 1120.0(持仓盈亏) "close_profit": float("nan"), # -11120.0 (本交易日内平仓盈亏) "frozen_margin": float("nan"), # 0.0(冻结保证金) "margin": float("nan"), # 11232.23 (保证金占用) "frozen_commission": float("nan"), # 0.0 (冻结手续费) "commission": float("nan"), # 123.0 (本交易日内交纳的手续费) "frozen_premium": float("nan"), # 0.0 (冻结权利金) "premium": float("nan"), # 0.0 (本交易日内交纳的权利金) "deposit": float("nan"), # 1234.0 (本交易日内的入金金额) "withdraw": float("nan"), # 890.0 (本交易日内的出金金额) "risk_ratio": float("nan"), # 0.048482375 (风险度)
QUANTAXIS/QAARP/QAAccount.py
def from_otgdict(self, message): """[summary] balance = static_balance + float_profit "currency": "", # "CNY" (币种) "pre_balance": float("nan"), # 9912934.78 (昨日账户权益) "static_balance": float("nan"), # (静态权益) "balance": float("nan"), # 9963216.55 (账户权益) "available": float("nan"), # 9480176.15 (可用资金) "float_profit": float("nan"), # 8910.0 (浮动盈亏) "position_profit": float("nan"), # 1120.0(持仓盈亏) "close_profit": float("nan"), # -11120.0 (本交易日内平仓盈亏) "frozen_margin": float("nan"), # 0.0(冻结保证金) "margin": float("nan"), # 11232.23 (保证金占用) "frozen_commission": float("nan"), # 0.0 (冻结手续费) "commission": float("nan"), # 123.0 (本交易日内交纳的手续费) "frozen_premium": float("nan"), # 0.0 (冻结权利金) "premium": float("nan"), # 0.0 (本交易日内交纳的权利金) "deposit": float("nan"), # 1234.0 (本交易日内的入金金额) "withdraw": float("nan"), # 890.0 (本交易日内的出金金额) "risk_ratio": float("nan"), # 0.048482375 (风险度) """ self.allow_margin = True self.allow_sellopen = True self.allow_t0 = True self.account_cookie = message['accounts']['user_id'] # 可用资金 self.cash_available = message['accounts']['available'] self.balance = message['accounts']['balance'] # 都是在结算的时候计算的 # 昨日权益/静态权益 ==> 这两个是一样的 self.static_balance = message['accounts']['static_balance'] self.pre_balance = message['accounts']['pre_balance'] # 平仓盈亏 self.close_profit = message['accounts']['close_profit'] # 持仓盈亏 self.position_profit = message['accounts']['position_profit'] # 动态权益 self.float_profit = message['accounts']['float_profit'] # 占用保证金 self.margin = message['accounts']['margin'] self.commission = message['accounts']['commission']
def from_otgdict(self, message): """[summary] balance = static_balance + float_profit "currency": "", # "CNY" (币种) "pre_balance": float("nan"), # 9912934.78 (昨日账户权益) "static_balance": float("nan"), # (静态权益) "balance": float("nan"), # 9963216.55 (账户权益) "available": float("nan"), # 9480176.15 (可用资金) "float_profit": float("nan"), # 8910.0 (浮动盈亏) "position_profit": float("nan"), # 1120.0(持仓盈亏) "close_profit": float("nan"), # -11120.0 (本交易日内平仓盈亏) "frozen_margin": float("nan"), # 0.0(冻结保证金) "margin": float("nan"), # 11232.23 (保证金占用) "frozen_commission": float("nan"), # 0.0 (冻结手续费) "commission": float("nan"), # 123.0 (本交易日内交纳的手续费) "frozen_premium": float("nan"), # 0.0 (冻结权利金) "premium": float("nan"), # 0.0 (本交易日内交纳的权利金) "deposit": float("nan"), # 1234.0 (本交易日内的入金金额) "withdraw": float("nan"), # 890.0 (本交易日内的出金金额) "risk_ratio": float("nan"), # 0.048482375 (风险度) """ self.allow_margin = True self.allow_sellopen = True self.allow_t0 = True self.account_cookie = message['accounts']['user_id'] # 可用资金 self.cash_available = message['accounts']['available'] self.balance = message['accounts']['balance'] # 都是在结算的时候计算的 # 昨日权益/静态权益 ==> 这两个是一样的 self.static_balance = message['accounts']['static_balance'] self.pre_balance = message['accounts']['pre_balance'] # 平仓盈亏 self.close_profit = message['accounts']['close_profit'] # 持仓盈亏 self.position_profit = message['accounts']['position_profit'] # 动态权益 self.float_profit = message['accounts']['float_profit'] # 占用保证金 self.margin = message['accounts']['margin'] self.commission = message['accounts']['commission']
[ "[", "summary", "]", "balance", "=", "static_balance", "+", "float_profit" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L1699-L1748
[ "def", "from_otgdict", "(", "self", ",", "message", ")", ":", "self", ".", "allow_margin", "=", "True", "self", ".", "allow_sellopen", "=", "True", "self", ".", "allow_t0", "=", "True", "self", ".", "account_cookie", "=", "message", "[", "'accounts'", "]",...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.table
打印出account的内容
QUANTAXIS/QAARP/QAAccount.py
def table(self): """ 打印出account的内容 """ return pd.DataFrame([ self.message, ]).set_index( 'account_cookie', drop=False ).T
def table(self): """ 打印出account的内容 """ return pd.DataFrame([ self.message, ]).set_index( 'account_cookie', drop=False ).T
[ "打印出account的内容" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L1751-L1760
[ "def", "table", "(", "self", ")", ":", "return", "pd", ".", "DataFrame", "(", "[", "self", ".", "message", ",", "]", ")", ".", "set_index", "(", "'account_cookie'", ",", "drop", "=", "False", ")", ".", "T" ]
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.run
这个方法是被 QA_ThreadEngine 处理队列时候调用的, QA_Task 中 do 方法调用 run (在其它线程中) 'QA_WORKER method 重载' :param event: 事件类型 QA_Event :return:
QUANTAXIS/QAARP/QAAccount.py
def run(self, event): ''' 这个方法是被 QA_ThreadEngine 处理队列时候调用的, QA_Task 中 do 方法调用 run (在其它线程中) 'QA_WORKER method 重载' :param event: 事件类型 QA_Event :return: ''' 'QA_WORKER method' if event.event_type is ACCOUNT_EVENT.SETTLE: print('account_settle') self.settle() # elif event.event_type is ACCOUNT_EVENT.UPDATE: # self.receive_deal(event.message) elif event.event_type is ACCOUNT_EVENT.MAKE_ORDER: """generate order if callback callback the order if not return back the order """ data = self.send_order( code=event.code, amount=event.amount, time=event.time, amount_model=event.amount_model, towards=event.towards, price=event.price, order_model=event.order_model ) if event.callback: event.callback(data) else: return data elif event.event_type is ENGINE_EVENT.UPCOMING_DATA: """update the market_data 1. update the inside market_data struct 2. tell the on_bar methods # 这样有点慢 """ self._currenttime = event.market_data.datetime[0] if self._market_data is None: self._market_data = event.market_data else: self._market_data = self._market_data + event.market_data self.on_bar(event) if event.callback: event.callback(event)
def run(self, event): ''' 这个方法是被 QA_ThreadEngine 处理队列时候调用的, QA_Task 中 do 方法调用 run (在其它线程中) 'QA_WORKER method 重载' :param event: 事件类型 QA_Event :return: ''' 'QA_WORKER method' if event.event_type is ACCOUNT_EVENT.SETTLE: print('account_settle') self.settle() # elif event.event_type is ACCOUNT_EVENT.UPDATE: # self.receive_deal(event.message) elif event.event_type is ACCOUNT_EVENT.MAKE_ORDER: """generate order if callback callback the order if not return back the order """ data = self.send_order( code=event.code, amount=event.amount, time=event.time, amount_model=event.amount_model, towards=event.towards, price=event.price, order_model=event.order_model ) if event.callback: event.callback(data) else: return data elif event.event_type is ENGINE_EVENT.UPCOMING_DATA: """update the market_data 1. update the inside market_data struct 2. tell the on_bar methods # 这样有点慢 """ self._currenttime = event.market_data.datetime[0] if self._market_data is None: self._market_data = event.market_data else: self._market_data = self._market_data + event.market_data self.on_bar(event) if event.callback: event.callback(event)
[ "这个方法是被", "QA_ThreadEngine", "处理队列时候调用的,", "QA_Task", "中", "do", "方法调用", "run", "(在其它线程中)", "QA_WORKER", "method", "重载", ":", "param", "event", ":", "事件类型", "QA_Event", ":", "return", ":" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L1762-L1812
[ "def", "run", "(", "self", ",", "event", ")", ":", "'QA_WORKER method'", "if", "event", ".", "event_type", "is", "ACCOUNT_EVENT", ".", "SETTLE", ":", "print", "(", "'account_settle'", ")", "self", ".", "settle", "(", ")", "# elif event.event_type is ACCOUNT_EVEN...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.sync_account
同步账户 Arguments: sync_message {[type]} -- [description]
QUANTAXIS/QAARP/QAAccount.py
def sync_account(self, sync_message): """同步账户 Arguments: sync_message {[type]} -- [description] """ self.init_hold = sync_message['hold_available'] self.init_cash = sync_message['cash_available'] self.sell_available = copy.deepcopy(self.init_hold) self.history = [] self.cash = [self.init_cash] self.cash_available = self.cash[-1]
def sync_account(self, sync_message): """同步账户 Arguments: sync_message {[type]} -- [description] """ self.init_hold = sync_message['hold_available'] self.init_cash = sync_message['cash_available'] self.sell_available = copy.deepcopy(self.init_hold) self.history = [] self.cash = [self.init_cash] self.cash_available = self.cash[-1]
[ "同步账户" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L1835-L1848
[ "def", "sync_account", "(", "self", ",", "sync_message", ")", ":", "self", ".", "init_hold", "=", "sync_message", "[", "'hold_available'", "]", "self", ".", "init_cash", "=", "sync_message", "[", "'cash_available'", "]", "self", ".", "sell_available", "=", "co...
bb1fe424e4108b62a1f712b81a05cf829297a5c0
train
QA_Account.change_cash
外部操作|高危|
QUANTAXIS/QAARP/QAAccount.py
def change_cash(self, money): """ 外部操作|高危| """ res = self.cash[-1] + money if res >= 0: # 高危操作 self.cash[-1] = res
def change_cash(self, money): """ 外部操作|高危| """ res = self.cash[-1] + money if res >= 0: # 高危操作 self.cash[-1] = res
[ "外部操作|高危|" ]
QUANTAXIS/QUANTAXIS
python
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L1850-L1857
[ "def", "change_cash", "(", "self", ",", "money", ")", ":", "res", "=", "self", ".", "cash", "[", "-", "1", "]", "+", "money", "if", "res", ">=", "0", ":", "# 高危操作", "self", ".", "cash", "[", "-", "1", "]", "=", "res" ]
bb1fe424e4108b62a1f712b81a05cf829297a5c0