id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
29,100
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/_parameterized.py
_ParameterDecorator
def _ParameterDecorator(naming_type, testcases): """Implementation of the parameterization decorators. Args: naming_type: The naming type. testcases: Testcase parameters. Returns: A function for modifying the decorated object. """ def _Apply(obj): if isinstance(obj, type): _ModifyClass( obj, list(testcases) if not isinstance(testcases, collections.Sequence) else testcases, naming_type) return obj else: return _ParameterizedTestIter(obj, testcases, naming_type) if _IsSingletonList(testcases): assert _NonStringIterable(testcases[0]), ( 'Single parameter argument must be a non-string iterable') testcases = testcases[0] return _Apply
python
def _ParameterDecorator(naming_type, testcases): """Implementation of the parameterization decorators. Args: naming_type: The naming type. testcases: Testcase parameters. Returns: A function for modifying the decorated object. """ def _Apply(obj): if isinstance(obj, type): _ModifyClass( obj, list(testcases) if not isinstance(testcases, collections.Sequence) else testcases, naming_type) return obj else: return _ParameterizedTestIter(obj, testcases, naming_type) if _IsSingletonList(testcases): assert _NonStringIterable(testcases[0]), ( 'Single parameter argument must be a non-string iterable') testcases = testcases[0] return _Apply
[ "def", "_ParameterDecorator", "(", "naming_type", ",", "testcases", ")", ":", "def", "_Apply", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "type", ")", ":", "_ModifyClass", "(", "obj", ",", "list", "(", "testcases", ")", "if", "not", "isinstance", "(", "testcases", ",", "collections", ".", "Sequence", ")", "else", "testcases", ",", "naming_type", ")", "return", "obj", "else", ":", "return", "_ParameterizedTestIter", "(", "obj", ",", "testcases", ",", "naming_type", ")", "if", "_IsSingletonList", "(", "testcases", ")", ":", "assert", "_NonStringIterable", "(", "testcases", "[", "0", "]", ")", ",", "(", "'Single parameter argument must be a non-string iterable'", ")", "testcases", "=", "testcases", "[", "0", "]", "return", "_Apply" ]
Implementation of the parameterization decorators. Args: naming_type: The naming type. testcases: Testcase parameters. Returns: A function for modifying the decorated object.
[ "Implementation", "of", "the", "parameterization", "decorators", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/_parameterized.py#L280-L306
29,101
apple/turicreate
deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py
check_header_comment
def check_header_comment(filename): """Checks if the header-comment of the given file needs fixing.""" # Check input file. name = os.path.basename( filename ) # Read content of input file. sourcefile = open( filename, "rU" ) content = sourcefile.read() sourcefile.close() # Search content for '$Id$'. match = re.search(r'\$Id\$', content) if match == None: # Make sure that the correct value for '$Id$' was already set. match = re.search(r'\$Id: ' + name + r'\s+[^$]+\$', content) if match != None: # The given file needs no fixing. return False # The given file needs fixing. return True
python
def check_header_comment(filename): """Checks if the header-comment of the given file needs fixing.""" # Check input file. name = os.path.basename( filename ) # Read content of input file. sourcefile = open( filename, "rU" ) content = sourcefile.read() sourcefile.close() # Search content for '$Id$'. match = re.search(r'\$Id\$', content) if match == None: # Make sure that the correct value for '$Id$' was already set. match = re.search(r'\$Id: ' + name + r'\s+[^$]+\$', content) if match != None: # The given file needs no fixing. return False # The given file needs fixing. return True
[ "def", "check_header_comment", "(", "filename", ")", ":", "# Check input file.", "name", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "# Read content of input file.", "sourcefile", "=", "open", "(", "filename", ",", "\"rU\"", ")", "content", "=", "sourcefile", ".", "read", "(", ")", "sourcefile", ".", "close", "(", ")", "# Search content for '$Id$'.", "match", "=", "re", ".", "search", "(", "r'\\$Id\\$'", ",", "content", ")", "if", "match", "==", "None", ":", "# Make sure that the correct value for '$Id$' was already set.", "match", "=", "re", ".", "search", "(", "r'\\$Id: '", "+", "name", "+", "r'\\s+[^$]+\\$'", ",", "content", ")", "if", "match", "!=", "None", ":", "# The given file needs no fixing.", "return", "False", "# The given file needs fixing.", "return", "True" ]
Checks if the header-comment of the given file needs fixing.
[ "Checks", "if", "the", "header", "-", "comment", "of", "the", "given", "file", "needs", "fixing", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py#L19-L36
29,102
apple/turicreate
deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py
check_input_files_for_variadic_seq
def check_input_files_for_variadic_seq(headerDir, sourceDir): """Checks if files, used as input when pre-processing MPL-containers in their variadic form, need fixing.""" # Check input files in include/source-directories. files = glob.glob( os.path.join( headerDir, "*.hpp" ) ) files += glob.glob( os.path.join( headerDir, "aux_", "*.hpp" ) ) files += glob.glob( os.path.join( sourceDir, "src", "*" ) ) for currentFile in sorted( files ): if check_header_comment( currentFile ): return True return False
python
def check_input_files_for_variadic_seq(headerDir, sourceDir): """Checks if files, used as input when pre-processing MPL-containers in their variadic form, need fixing.""" # Check input files in include/source-directories. files = glob.glob( os.path.join( headerDir, "*.hpp" ) ) files += glob.glob( os.path.join( headerDir, "aux_", "*.hpp" ) ) files += glob.glob( os.path.join( sourceDir, "src", "*" ) ) for currentFile in sorted( files ): if check_header_comment( currentFile ): return True return False
[ "def", "check_input_files_for_variadic_seq", "(", "headerDir", ",", "sourceDir", ")", ":", "# Check input files in include/source-directories.", "files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "headerDir", ",", "\"*.hpp\"", ")", ")", "files", "+=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "headerDir", ",", "\"aux_\"", ",", "\"*.hpp\"", ")", ")", "files", "+=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "sourceDir", ",", "\"src\"", ",", "\"*\"", ")", ")", "for", "currentFile", "in", "sorted", "(", "files", ")", ":", "if", "check_header_comment", "(", "currentFile", ")", ":", "return", "True", "return", "False" ]
Checks if files, used as input when pre-processing MPL-containers in their variadic form, need fixing.
[ "Checks", "if", "files", "used", "as", "input", "when", "pre", "-", "processing", "MPL", "-", "containers", "in", "their", "variadic", "form", "need", "fixing", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py#L39-L48
29,103
apple/turicreate
deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py
check_input_files_for_numbered_seq
def check_input_files_for_numbered_seq(sourceDir, suffix, containers): """Check if files, used as input when pre-processing MPL-containers in their numbered form, need fixing.""" # Check input files for each MPL-container type. for container in containers: files = glob.glob( os.path.join( sourceDir, container, container + '*' + suffix ) ) for currentFile in sorted( files ): if check_header_comment( currentFile ): return True return False
python
def check_input_files_for_numbered_seq(sourceDir, suffix, containers): """Check if files, used as input when pre-processing MPL-containers in their numbered form, need fixing.""" # Check input files for each MPL-container type. for container in containers: files = glob.glob( os.path.join( sourceDir, container, container + '*' + suffix ) ) for currentFile in sorted( files ): if check_header_comment( currentFile ): return True return False
[ "def", "check_input_files_for_numbered_seq", "(", "sourceDir", ",", "suffix", ",", "containers", ")", ":", "# Check input files for each MPL-container type.", "for", "container", "in", "containers", ":", "files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "sourceDir", ",", "container", ",", "container", "+", "'*'", "+", "suffix", ")", ")", "for", "currentFile", "in", "sorted", "(", "files", ")", ":", "if", "check_header_comment", "(", "currentFile", ")", ":", "return", "True", "return", "False" ]
Check if files, used as input when pre-processing MPL-containers in their numbered form, need fixing.
[ "Check", "if", "files", "used", "as", "input", "when", "pre", "-", "processing", "MPL", "-", "containers", "in", "their", "numbered", "form", "need", "fixing", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py#L51-L59
29,104
apple/turicreate
deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py
check_input_files
def check_input_files(headerDir, sourceDir, containers=['vector', 'list', 'set', 'map'], seqType='both', verbose=False): """Checks if source- and header-files, used as input when pre-processing MPL-containers, need fixing.""" # Check the input files for containers in their variadic form. result1 = False if seqType == "both" or seqType == "variadic": if verbose: print "Check if input files for pre-processing Boost.MPL variadic containers need fixing." result1 = check_input_files_for_variadic_seq(headerDir, sourceDir) if verbose: if result1: print " At least one input file needs fixing!" else: print " No input file needs fixing!" # Check the input files for containers in their numbered form. result2 = False result3 = False if seqType == "both" or seqType == "numbered": if verbose: print "Check input files for pre-processing Boost.MPL numbered containers." result2 = check_input_files_for_numbered_seq(headerDir, ".hpp", containers) result3 = check_input_files_for_numbered_seq(sourceDir, ".cpp", containers) if verbose: if result2 or result3: print " At least one input file needs fixing!" else: print " No input file needs fixing!" # Return result. return result1 or result2 or result3
python
def check_input_files(headerDir, sourceDir, containers=['vector', 'list', 'set', 'map'], seqType='both', verbose=False): """Checks if source- and header-files, used as input when pre-processing MPL-containers, need fixing.""" # Check the input files for containers in their variadic form. result1 = False if seqType == "both" or seqType == "variadic": if verbose: print "Check if input files for pre-processing Boost.MPL variadic containers need fixing." result1 = check_input_files_for_variadic_seq(headerDir, sourceDir) if verbose: if result1: print " At least one input file needs fixing!" else: print " No input file needs fixing!" # Check the input files for containers in their numbered form. result2 = False result3 = False if seqType == "both" or seqType == "numbered": if verbose: print "Check input files for pre-processing Boost.MPL numbered containers." result2 = check_input_files_for_numbered_seq(headerDir, ".hpp", containers) result3 = check_input_files_for_numbered_seq(sourceDir, ".cpp", containers) if verbose: if result2 or result3: print " At least one input file needs fixing!" else: print " No input file needs fixing!" # Return result. return result1 or result2 or result3
[ "def", "check_input_files", "(", "headerDir", ",", "sourceDir", ",", "containers", "=", "[", "'vector'", ",", "'list'", ",", "'set'", ",", "'map'", "]", ",", "seqType", "=", "'both'", ",", "verbose", "=", "False", ")", ":", "# Check the input files for containers in their variadic form.", "result1", "=", "False", "if", "seqType", "==", "\"both\"", "or", "seqType", "==", "\"variadic\"", ":", "if", "verbose", ":", "print", "\"Check if input files for pre-processing Boost.MPL variadic containers need fixing.\"", "result1", "=", "check_input_files_for_variadic_seq", "(", "headerDir", ",", "sourceDir", ")", "if", "verbose", ":", "if", "result1", ":", "print", "\" At least one input file needs fixing!\"", "else", ":", "print", "\" No input file needs fixing!\"", "# Check the input files for containers in their numbered form.", "result2", "=", "False", "result3", "=", "False", "if", "seqType", "==", "\"both\"", "or", "seqType", "==", "\"numbered\"", ":", "if", "verbose", ":", "print", "\"Check input files for pre-processing Boost.MPL numbered containers.\"", "result2", "=", "check_input_files_for_numbered_seq", "(", "headerDir", ",", "\".hpp\"", ",", "containers", ")", "result3", "=", "check_input_files_for_numbered_seq", "(", "sourceDir", ",", "\".cpp\"", ",", "containers", ")", "if", "verbose", ":", "if", "result2", "or", "result3", ":", "print", "\" At least one input file needs fixing!\"", "else", ":", "print", "\" No input file needs fixing!\"", "# Return result.", "return", "result1", "or", "result2", "or", "result3" ]
Checks if source- and header-files, used as input when pre-processing MPL-containers, need fixing.
[ "Checks", "if", "source", "-", "and", "header", "-", "files", "used", "as", "input", "when", "pre", "-", "processing", "MPL", "-", "containers", "need", "fixing", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py#L62-L90
29,105
apple/turicreate
deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py
fix_header_comment
def fix_header_comment(filename, timestamp): """Fixes the header-comment of the given file.""" # Fix input file. name = os.path.basename( filename ) for line in fileinput.input( filename, inplace=1, mode="rU" ): # If header-comment already contains anything for '$Id$', remove it. line = re.sub(r'\$Id:[^$]+\$', r'$Id$', line.rstrip()) # Replace '$Id$' by a string containing the file's name (and a timestamp)! line = re.sub(re.escape(r'$Id$'), r'$Id: ' + name + r' ' + timestamp.isoformat() + r' $', line.rstrip()) print(line)
python
def fix_header_comment(filename, timestamp): """Fixes the header-comment of the given file.""" # Fix input file. name = os.path.basename( filename ) for line in fileinput.input( filename, inplace=1, mode="rU" ): # If header-comment already contains anything for '$Id$', remove it. line = re.sub(r'\$Id:[^$]+\$', r'$Id$', line.rstrip()) # Replace '$Id$' by a string containing the file's name (and a timestamp)! line = re.sub(re.escape(r'$Id$'), r'$Id: ' + name + r' ' + timestamp.isoformat() + r' $', line.rstrip()) print(line)
[ "def", "fix_header_comment", "(", "filename", ",", "timestamp", ")", ":", "# Fix input file.", "name", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "for", "line", "in", "fileinput", ".", "input", "(", "filename", ",", "inplace", "=", "1", ",", "mode", "=", "\"rU\"", ")", ":", "# If header-comment already contains anything for '$Id$', remove it.", "line", "=", "re", ".", "sub", "(", "r'\\$Id:[^$]+\\$'", ",", "r'$Id$'", ",", "line", ".", "rstrip", "(", ")", ")", "# Replace '$Id$' by a string containing the file's name (and a timestamp)!", "line", "=", "re", ".", "sub", "(", "re", ".", "escape", "(", "r'$Id$'", ")", ",", "r'$Id: '", "+", "name", "+", "r' '", "+", "timestamp", ".", "isoformat", "(", ")", "+", "r' $'", ",", "line", ".", "rstrip", "(", ")", ")", "print", "(", "line", ")" ]
Fixes the header-comment of the given file.
[ "Fixes", "the", "header", "-", "comment", "of", "the", "given", "file", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py#L92-L101
29,106
apple/turicreate
deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py
fix_input_files_for_variadic_seq
def fix_input_files_for_variadic_seq(headerDir, sourceDir, timestamp): """Fixes files used as input when pre-processing MPL-containers in their variadic form.""" # Fix files in include/source-directories. files = glob.glob( os.path.join( headerDir, "*.hpp" ) ) files += glob.glob( os.path.join( headerDir, "aux_", "*.hpp" ) ) files += glob.glob( os.path.join( sourceDir, "src", "*" ) ) for currentFile in sorted( files ): fix_header_comment( currentFile, timestamp )
python
def fix_input_files_for_variadic_seq(headerDir, sourceDir, timestamp): """Fixes files used as input when pre-processing MPL-containers in their variadic form.""" # Fix files in include/source-directories. files = glob.glob( os.path.join( headerDir, "*.hpp" ) ) files += glob.glob( os.path.join( headerDir, "aux_", "*.hpp" ) ) files += glob.glob( os.path.join( sourceDir, "src", "*" ) ) for currentFile in sorted( files ): fix_header_comment( currentFile, timestamp )
[ "def", "fix_input_files_for_variadic_seq", "(", "headerDir", ",", "sourceDir", ",", "timestamp", ")", ":", "# Fix files in include/source-directories.", "files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "headerDir", ",", "\"*.hpp\"", ")", ")", "files", "+=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "headerDir", ",", "\"aux_\"", ",", "\"*.hpp\"", ")", ")", "files", "+=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "sourceDir", ",", "\"src\"", ",", "\"*\"", ")", ")", "for", "currentFile", "in", "sorted", "(", "files", ")", ":", "fix_header_comment", "(", "currentFile", ",", "timestamp", ")" ]
Fixes files used as input when pre-processing MPL-containers in their variadic form.
[ "Fixes", "files", "used", "as", "input", "when", "pre", "-", "processing", "MPL", "-", "containers", "in", "their", "variadic", "form", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py#L104-L111
29,107
apple/turicreate
deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py
fix_input_files_for_numbered_seq
def fix_input_files_for_numbered_seq(sourceDir, suffix, timestamp, containers): """Fixes files used as input when pre-processing MPL-containers in their numbered form.""" # Fix input files for each MPL-container type. for container in containers: files = glob.glob( os.path.join( sourceDir, container, container + '*' + suffix ) ) for currentFile in sorted( files ): fix_header_comment( currentFile, timestamp )
python
def fix_input_files_for_numbered_seq(sourceDir, suffix, timestamp, containers): """Fixes files used as input when pre-processing MPL-containers in their numbered form.""" # Fix input files for each MPL-container type. for container in containers: files = glob.glob( os.path.join( sourceDir, container, container + '*' + suffix ) ) for currentFile in sorted( files ): fix_header_comment( currentFile, timestamp )
[ "def", "fix_input_files_for_numbered_seq", "(", "sourceDir", ",", "suffix", ",", "timestamp", ",", "containers", ")", ":", "# Fix input files for each MPL-container type.", "for", "container", "in", "containers", ":", "files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "sourceDir", ",", "container", ",", "container", "+", "'*'", "+", "suffix", ")", ")", "for", "currentFile", "in", "sorted", "(", "files", ")", ":", "fix_header_comment", "(", "currentFile", ",", "timestamp", ")" ]
Fixes files used as input when pre-processing MPL-containers in their numbered form.
[ "Fixes", "files", "used", "as", "input", "when", "pre", "-", "processing", "MPL", "-", "containers", "in", "their", "numbered", "form", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py#L114-L120
29,108
apple/turicreate
deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py
fix_input_files
def fix_input_files(headerDir, sourceDir, containers=['vector', 'list', 'set', 'map'], seqType='both', verbose=False): """Fixes source- and header-files used as input when pre-processing MPL-containers.""" # The new modification time. timestamp = datetime.datetime.now(); # Fix the input files for containers in their variadic form. if seqType == "both" or seqType == "variadic": if verbose: print "Fix input files for pre-processing Boost.MPL variadic containers." fix_input_files_for_variadic_seq(headerDir, sourceDir, timestamp) # Fix the input files for containers in their numbered form. if seqType == "both" or seqType == "numbered": if verbose: print "Fix input files for pre-processing Boost.MPL numbered containers." fix_input_files_for_numbered_seq(headerDir, ".hpp", timestamp, containers) fix_input_files_for_numbered_seq(sourceDir, ".cpp", timestamp, containers)
python
def fix_input_files(headerDir, sourceDir, containers=['vector', 'list', 'set', 'map'], seqType='both', verbose=False): """Fixes source- and header-files used as input when pre-processing MPL-containers.""" # The new modification time. timestamp = datetime.datetime.now(); # Fix the input files for containers in their variadic form. if seqType == "both" or seqType == "variadic": if verbose: print "Fix input files for pre-processing Boost.MPL variadic containers." fix_input_files_for_variadic_seq(headerDir, sourceDir, timestamp) # Fix the input files for containers in their numbered form. if seqType == "both" or seqType == "numbered": if verbose: print "Fix input files for pre-processing Boost.MPL numbered containers." fix_input_files_for_numbered_seq(headerDir, ".hpp", timestamp, containers) fix_input_files_for_numbered_seq(sourceDir, ".cpp", timestamp, containers)
[ "def", "fix_input_files", "(", "headerDir", ",", "sourceDir", ",", "containers", "=", "[", "'vector'", ",", "'list'", ",", "'set'", ",", "'map'", "]", ",", "seqType", "=", "'both'", ",", "verbose", "=", "False", ")", ":", "# The new modification time.", "timestamp", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "# Fix the input files for containers in their variadic form.", "if", "seqType", "==", "\"both\"", "or", "seqType", "==", "\"variadic\"", ":", "if", "verbose", ":", "print", "\"Fix input files for pre-processing Boost.MPL variadic containers.\"", "fix_input_files_for_variadic_seq", "(", "headerDir", ",", "sourceDir", ",", "timestamp", ")", "# Fix the input files for containers in their numbered form.", "if", "seqType", "==", "\"both\"", "or", "seqType", "==", "\"numbered\"", ":", "if", "verbose", ":", "print", "\"Fix input files for pre-processing Boost.MPL numbered containers.\"", "fix_input_files_for_numbered_seq", "(", "headerDir", ",", "\".hpp\"", ",", "timestamp", ",", "containers", ")", "fix_input_files_for_numbered_seq", "(", "sourceDir", ",", "\".cpp\"", ",", "timestamp", ",", "containers", ")" ]
Fixes source- and header-files used as input when pre-processing MPL-containers.
[ "Fixes", "source", "-", "and", "header", "-", "files", "used", "as", "input", "when", "pre", "-", "processing", "MPL", "-", "containers", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py#L123-L138
29,109
apple/turicreate
deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py
to_existing_absolute_path
def to_existing_absolute_path(string): """Converts a path into its absolute path and verifies that it exists or throws an exception.""" value = os.path.abspath(string) if not os.path.exists( value ) or not os.path.isdir( value ): msg = '"%r" is not a valid path to a directory.' % string raise argparse.ArgumentTypeError(msg) return value
python
def to_existing_absolute_path(string): """Converts a path into its absolute path and verifies that it exists or throws an exception.""" value = os.path.abspath(string) if not os.path.exists( value ) or not os.path.isdir( value ): msg = '"%r" is not a valid path to a directory.' % string raise argparse.ArgumentTypeError(msg) return value
[ "def", "to_existing_absolute_path", "(", "string", ")", ":", "value", "=", "os", ".", "path", ".", "abspath", "(", "string", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "value", ")", "or", "not", "os", ".", "path", ".", "isdir", "(", "value", ")", ":", "msg", "=", "'\"%r\" is not a valid path to a directory.'", "%", "string", "raise", "argparse", ".", "ArgumentTypeError", "(", "msg", ")", "return", "value" ]
Converts a path into its absolute path and verifies that it exists or throws an exception.
[ "Converts", "a", "path", "into", "its", "absolute", "path", "and", "verifies", "that", "it", "exists", "or", "throws", "an", "exception", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py#L141-L147
29,110
apple/turicreate
src/unity/python/turicreate/toolkits/image_similarity/image_similarity.py
ImageSimilarityModel.query
def query(self, dataset, label=None, k=5, radius=None, verbose=True, batch_size=64): """ For each image, retrieve the nearest neighbors from the model's stored data. In general, the query dataset does not need to be the same as the reference data stored in the model. Parameters ---------- dataset : SFrame | SArray | turicreate.Image Query data. If dataset is an SFrame, it must contain columns with the same names and types as the features used to train the model. Additional columns are ignored. label : str, optional Name of the query SFrame column with row labels. If 'label' is not specified, row numbers are used to identify query dataset rows in the output SFrame. k : int, optional Number of nearest neighbors to return from the reference set for each query observation. The default is 5 neighbors, but setting it to ``None`` will return all neighbors within ``radius`` of the query point. radius : float, optional Only neighbors whose distance to a query point is smaller than this value are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. verbose: bool, optional If True, print progress updates and model details. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. Returns ------- out : SFrame An SFrame with the k-nearest neighbors of each query observation. The result contains four columns: the first is the label of the query observation, the second is the label of the nearby reference observation, the third is the distance between the query and reference observations, and the fourth is the rank of the reference observation among the query's k-nearest neighbors. See Also -------- similarity_graph Notes ----- - If both ``k`` and ``radius`` are set to ``None``, each query point returns all of the reference set. If the reference dataset has :math:`n` rows and the query dataset has :math:`m` rows, the output is an SFrame with :math:`nm` rows. Examples -------- >>> model.query(queries, 'label', k=2) +-------------+-----------------+----------------+------+ | query_label | reference_label | distance | rank | +-------------+-----------------+----------------+------+ | 0 | 2 | 0.305941170816 | 1 | | 0 | 1 | 0.771556867638 | 2 | | 1 | 1 | 0.390128184063 | 1 | | 1 | 0 | 0.464004310325 | 2 | | 2 | 0 | 0.170293863659 | 1 | | 2 | 1 | 0.464004310325 | 2 | +-------------+-----------------+----------------+------+ """ if not isinstance(dataset, (_tc.SFrame, _tc.SArray, _tc.Image)): raise TypeError('dataset must be either an SFrame, SArray or turicreate.Image') if(batch_size < 1): raise ValueError("'batch_size' must be greater than or equal to 1") if isinstance(dataset, _tc.SArray): dataset = _tc.SFrame({self.feature: dataset}) elif isinstance(dataset, _tc.Image): dataset = _tc.SFrame({self.feature: [dataset]}) extracted_features = self._extract_features(dataset, verbose=verbose, batch_size=batch_size) if label is not None: extracted_features[label] = dataset[label] return self.similarity_model.query(extracted_features, label, k, radius, verbose)
python
def query(self, dataset, label=None, k=5, radius=None, verbose=True, batch_size=64): """ For each image, retrieve the nearest neighbors from the model's stored data. In general, the query dataset does not need to be the same as the reference data stored in the model. Parameters ---------- dataset : SFrame | SArray | turicreate.Image Query data. If dataset is an SFrame, it must contain columns with the same names and types as the features used to train the model. Additional columns are ignored. label : str, optional Name of the query SFrame column with row labels. If 'label' is not specified, row numbers are used to identify query dataset rows in the output SFrame. k : int, optional Number of nearest neighbors to return from the reference set for each query observation. The default is 5 neighbors, but setting it to ``None`` will return all neighbors within ``radius`` of the query point. radius : float, optional Only neighbors whose distance to a query point is smaller than this value are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. verbose: bool, optional If True, print progress updates and model details. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. Returns ------- out : SFrame An SFrame with the k-nearest neighbors of each query observation. The result contains four columns: the first is the label of the query observation, the second is the label of the nearby reference observation, the third is the distance between the query and reference observations, and the fourth is the rank of the reference observation among the query's k-nearest neighbors. See Also -------- similarity_graph Notes ----- - If both ``k`` and ``radius`` are set to ``None``, each query point returns all of the reference set. If the reference dataset has :math:`n` rows and the query dataset has :math:`m` rows, the output is an SFrame with :math:`nm` rows. Examples -------- >>> model.query(queries, 'label', k=2) +-------------+-----------------+----------------+------+ | query_label | reference_label | distance | rank | +-------------+-----------------+----------------+------+ | 0 | 2 | 0.305941170816 | 1 | | 0 | 1 | 0.771556867638 | 2 | | 1 | 1 | 0.390128184063 | 1 | | 1 | 0 | 0.464004310325 | 2 | | 2 | 0 | 0.170293863659 | 1 | | 2 | 1 | 0.464004310325 | 2 | +-------------+-----------------+----------------+------+ """ if not isinstance(dataset, (_tc.SFrame, _tc.SArray, _tc.Image)): raise TypeError('dataset must be either an SFrame, SArray or turicreate.Image') if(batch_size < 1): raise ValueError("'batch_size' must be greater than or equal to 1") if isinstance(dataset, _tc.SArray): dataset = _tc.SFrame({self.feature: dataset}) elif isinstance(dataset, _tc.Image): dataset = _tc.SFrame({self.feature: [dataset]}) extracted_features = self._extract_features(dataset, verbose=verbose, batch_size=batch_size) if label is not None: extracted_features[label] = dataset[label] return self.similarity_model.query(extracted_features, label, k, radius, verbose)
[ "def", "query", "(", "self", ",", "dataset", ",", "label", "=", "None", ",", "k", "=", "5", ",", "radius", "=", "None", ",", "verbose", "=", "True", ",", "batch_size", "=", "64", ")", ":", "if", "not", "isinstance", "(", "dataset", ",", "(", "_tc", ".", "SFrame", ",", "_tc", ".", "SArray", ",", "_tc", ".", "Image", ")", ")", ":", "raise", "TypeError", "(", "'dataset must be either an SFrame, SArray or turicreate.Image'", ")", "if", "(", "batch_size", "<", "1", ")", ":", "raise", "ValueError", "(", "\"'batch_size' must be greater than or equal to 1\"", ")", "if", "isinstance", "(", "dataset", ",", "_tc", ".", "SArray", ")", ":", "dataset", "=", "_tc", ".", "SFrame", "(", "{", "self", ".", "feature", ":", "dataset", "}", ")", "elif", "isinstance", "(", "dataset", ",", "_tc", ".", "Image", ")", ":", "dataset", "=", "_tc", ".", "SFrame", "(", "{", "self", ".", "feature", ":", "[", "dataset", "]", "}", ")", "extracted_features", "=", "self", ".", "_extract_features", "(", "dataset", ",", "verbose", "=", "verbose", ",", "batch_size", "=", "batch_size", ")", "if", "label", "is", "not", "None", ":", "extracted_features", "[", "label", "]", "=", "dataset", "[", "label", "]", "return", "self", ".", "similarity_model", ".", "query", "(", "extracted_features", ",", "label", ",", "k", ",", "radius", ",", "verbose", ")" ]
For each image, retrieve the nearest neighbors from the model's stored data. In general, the query dataset does not need to be the same as the reference data stored in the model. Parameters ---------- dataset : SFrame | SArray | turicreate.Image Query data. If dataset is an SFrame, it must contain columns with the same names and types as the features used to train the model. Additional columns are ignored. label : str, optional Name of the query SFrame column with row labels. If 'label' is not specified, row numbers are used to identify query dataset rows in the output SFrame. k : int, optional Number of nearest neighbors to return from the reference set for each query observation. The default is 5 neighbors, but setting it to ``None`` will return all neighbors within ``radius`` of the query point. radius : float, optional Only neighbors whose distance to a query point is smaller than this value are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. verbose: bool, optional If True, print progress updates and model details. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. Returns ------- out : SFrame An SFrame with the k-nearest neighbors of each query observation. The result contains four columns: the first is the label of the query observation, the second is the label of the nearby reference observation, the third is the distance between the query and reference observations, and the fourth is the rank of the reference observation among the query's k-nearest neighbors. See Also -------- similarity_graph Notes ----- - If both ``k`` and ``radius`` are set to ``None``, each query point returns all of the reference set. If the reference dataset has :math:`n` rows and the query dataset has :math:`m` rows, the output is an SFrame with :math:`nm` rows. Examples -------- >>> model.query(queries, 'label', k=2) +-------------+-----------------+----------------+------+ | query_label | reference_label | distance | rank | +-------------+-----------------+----------------+------+ | 0 | 2 | 0.305941170816 | 1 | | 0 | 1 | 0.771556867638 | 2 | | 1 | 1 | 0.390128184063 | 1 | | 1 | 0 | 0.464004310325 | 2 | | 2 | 0 | 0.170293863659 | 1 | | 2 | 1 | 0.464004310325 | 2 | +-------------+-----------------+----------------+------+
[ "For", "each", "image", "retrieve", "the", "nearest", "neighbors", "from", "the", "model", "s", "stored", "data", ".", "In", "general", "the", "query", "dataset", "does", "not", "need", "to", "be", "the", "same", "as", "the", "reference", "data", "stored", "in", "the", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/image_similarity/image_similarity.py#L295-L381
29,111
apple/turicreate
src/unity/python/turicreate/toolkits/image_similarity/image_similarity.py
ImageSimilarityModel.similarity_graph
def similarity_graph(self, k=5, radius=None, include_self_edges=False, output_type='SGraph', verbose=True): """ Construct the similarity graph on the reference dataset, which is already stored in the model to find the top `k` similar images for each image in your input dataset. This is conceptually very similar to running `query` with the reference set, but this method is optimized for the purpose, syntactically simpler, and automatically removes self-edges. WARNING: This method can take time. Parameters ---------- k : int, optional Maximum number of neighbors to return for each point in the dataset. Setting this to ``None`` deactivates the constraint, so that all neighbors are returned within ``radius`` of a given point. radius : float, optional For a given point, only neighbors within this distance are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. include_self_edges : bool, optional For most distance functions, each point in the model's reference dataset is its own nearest neighbor. If this parameter is set to False, this result is ignored, and the nearest neighbors are returned *excluding* the point itself. output_type : {'SGraph', 'SFrame'}, optional By default, the results are returned in the form of an SGraph, where each point in the reference dataset is a vertex and an edge A -> B indicates that vertex B is a nearest neighbor of vertex A. If 'output_type' is set to 'SFrame', the output is in the same form as the results of the 'query' method: an SFrame with columns indicating the query label (in this case the query data is the same as the reference data), reference label, distance between the two points, and the rank of the neighbor. verbose : bool, optional If True, print progress updates and model details. Returns ------- out : SFrame or SGraph The type of the output object depends on the 'output_type' parameter. See the parameter description for more detail. Notes ----- - If both ``k`` and ``radius`` are set to ``None``, each data point is matched to the entire dataset. If the reference dataset has :math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an SGraph with :math:`n^2` edges). Examples -------- >>> graph = model.similarity_graph(k=1) # an SGraph >>> >>> # Most similar image for each image in the input dataset >>> graph.edges +----------+----------+----------------+------+ | __src_id | __dst_id | distance | rank | +----------+----------+----------------+------+ | 0 | 1 | 0.376430604494 | 1 | | 2 | 1 | 0.55542776308 | 1 | | 1 | 0 | 0.376430604494 | 1 | +----------+----------+----------------+------+ """ return self.similarity_model.similarity_graph(k, radius, include_self_edges, output_type, verbose)
python
def similarity_graph(self, k=5, radius=None, include_self_edges=False, output_type='SGraph', verbose=True): """ Construct the similarity graph on the reference dataset, which is already stored in the model to find the top `k` similar images for each image in your input dataset. This is conceptually very similar to running `query` with the reference set, but this method is optimized for the purpose, syntactically simpler, and automatically removes self-edges. WARNING: This method can take time. Parameters ---------- k : int, optional Maximum number of neighbors to return for each point in the dataset. Setting this to ``None`` deactivates the constraint, so that all neighbors are returned within ``radius`` of a given point. radius : float, optional For a given point, only neighbors within this distance are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. include_self_edges : bool, optional For most distance functions, each point in the model's reference dataset is its own nearest neighbor. If this parameter is set to False, this result is ignored, and the nearest neighbors are returned *excluding* the point itself. output_type : {'SGraph', 'SFrame'}, optional By default, the results are returned in the form of an SGraph, where each point in the reference dataset is a vertex and an edge A -> B indicates that vertex B is a nearest neighbor of vertex A. If 'output_type' is set to 'SFrame', the output is in the same form as the results of the 'query' method: an SFrame with columns indicating the query label (in this case the query data is the same as the reference data), reference label, distance between the two points, and the rank of the neighbor. verbose : bool, optional If True, print progress updates and model details. Returns ------- out : SFrame or SGraph The type of the output object depends on the 'output_type' parameter. See the parameter description for more detail. Notes ----- - If both ``k`` and ``radius`` are set to ``None``, each data point is matched to the entire dataset. If the reference dataset has :math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an SGraph with :math:`n^2` edges). Examples -------- >>> graph = model.similarity_graph(k=1) # an SGraph >>> >>> # Most similar image for each image in the input dataset >>> graph.edges +----------+----------+----------------+------+ | __src_id | __dst_id | distance | rank | +----------+----------+----------------+------+ | 0 | 1 | 0.376430604494 | 1 | | 2 | 1 | 0.55542776308 | 1 | | 1 | 0 | 0.376430604494 | 1 | +----------+----------+----------------+------+ """ return self.similarity_model.similarity_graph(k, radius, include_self_edges, output_type, verbose)
[ "def", "similarity_graph", "(", "self", ",", "k", "=", "5", ",", "radius", "=", "None", ",", "include_self_edges", "=", "False", ",", "output_type", "=", "'SGraph'", ",", "verbose", "=", "True", ")", ":", "return", "self", ".", "similarity_model", ".", "similarity_graph", "(", "k", ",", "radius", ",", "include_self_edges", ",", "output_type", ",", "verbose", ")" ]
Construct the similarity graph on the reference dataset, which is already stored in the model to find the top `k` similar images for each image in your input dataset. This is conceptually very similar to running `query` with the reference set, but this method is optimized for the purpose, syntactically simpler, and automatically removes self-edges. WARNING: This method can take time. Parameters ---------- k : int, optional Maximum number of neighbors to return for each point in the dataset. Setting this to ``None`` deactivates the constraint, so that all neighbors are returned within ``radius`` of a given point. radius : float, optional For a given point, only neighbors within this distance are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. include_self_edges : bool, optional For most distance functions, each point in the model's reference dataset is its own nearest neighbor. If this parameter is set to False, this result is ignored, and the nearest neighbors are returned *excluding* the point itself. output_type : {'SGraph', 'SFrame'}, optional By default, the results are returned in the form of an SGraph, where each point in the reference dataset is a vertex and an edge A -> B indicates that vertex B is a nearest neighbor of vertex A. If 'output_type' is set to 'SFrame', the output is in the same form as the results of the 'query' method: an SFrame with columns indicating the query label (in this case the query data is the same as the reference data), reference label, distance between the two points, and the rank of the neighbor. verbose : bool, optional If True, print progress updates and model details. Returns ------- out : SFrame or SGraph The type of the output object depends on the 'output_type' parameter. See the parameter description for more detail. Notes ----- - If both ``k`` and ``radius`` are set to ``None``, each data point is matched to the entire dataset. If the reference dataset has :math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an SGraph with :math:`n^2` edges). Examples -------- >>> graph = model.similarity_graph(k=1) # an SGraph >>> >>> # Most similar image for each image in the input dataset >>> graph.edges +----------+----------+----------------+------+ | __src_id | __dst_id | distance | rank | +----------+----------+----------------+------+ | 0 | 1 | 0.376430604494 | 1 | | 2 | 1 | 0.55542776308 | 1 | | 1 | 0 | 0.376430604494 | 1 | +----------+----------+----------------+------+
[ "Construct", "the", "similarity", "graph", "on", "the", "reference", "dataset", "which", "is", "already", "stored", "in", "the", "model", "to", "find", "the", "top", "k", "similar", "images", "for", "each", "image", "in", "your", "input", "dataset", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/image_similarity/image_similarity.py#L383-L456
29,112
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
_SimpleSizer
def _SimpleSizer(compute_value_size): """A sizer which uses the function compute_value_size to compute the size of each value. Typically compute_value_size is _VarintSize.""" def SpecificSizer(field_number, is_repeated, is_packed): tag_size = _TagSize(field_number) if is_packed: local_VarintSize = _VarintSize def PackedFieldSize(value): result = 0 for element in value: result += compute_value_size(element) return result + local_VarintSize(result) + tag_size return PackedFieldSize elif is_repeated: def RepeatedFieldSize(value): result = tag_size * len(value) for element in value: result += compute_value_size(element) return result return RepeatedFieldSize else: def FieldSize(value): return tag_size + compute_value_size(value) return FieldSize return SpecificSizer
python
def _SimpleSizer(compute_value_size): """A sizer which uses the function compute_value_size to compute the size of each value. Typically compute_value_size is _VarintSize.""" def SpecificSizer(field_number, is_repeated, is_packed): tag_size = _TagSize(field_number) if is_packed: local_VarintSize = _VarintSize def PackedFieldSize(value): result = 0 for element in value: result += compute_value_size(element) return result + local_VarintSize(result) + tag_size return PackedFieldSize elif is_repeated: def RepeatedFieldSize(value): result = tag_size * len(value) for element in value: result += compute_value_size(element) return result return RepeatedFieldSize else: def FieldSize(value): return tag_size + compute_value_size(value) return FieldSize return SpecificSizer
[ "def", "_SimpleSizer", "(", "compute_value_size", ")", ":", "def", "SpecificSizer", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "tag_size", "=", "_TagSize", "(", "field_number", ")", "if", "is_packed", ":", "local_VarintSize", "=", "_VarintSize", "def", "PackedFieldSize", "(", "value", ")", ":", "result", "=", "0", "for", "element", "in", "value", ":", "result", "+=", "compute_value_size", "(", "element", ")", "return", "result", "+", "local_VarintSize", "(", "result", ")", "+", "tag_size", "return", "PackedFieldSize", "elif", "is_repeated", ":", "def", "RepeatedFieldSize", "(", "value", ")", ":", "result", "=", "tag_size", "*", "len", "(", "value", ")", "for", "element", "in", "value", ":", "result", "+=", "compute_value_size", "(", "element", ")", "return", "result", "return", "RepeatedFieldSize", "else", ":", "def", "FieldSize", "(", "value", ")", ":", "return", "tag_size", "+", "compute_value_size", "(", "value", ")", "return", "FieldSize", "return", "SpecificSizer" ]
A sizer which uses the function compute_value_size to compute the size of each value. Typically compute_value_size is _VarintSize.
[ "A", "sizer", "which", "uses", "the", "function", "compute_value_size", "to", "compute", "the", "size", "of", "each", "value", ".", "Typically", "compute_value_size", "is", "_VarintSize", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L126-L152
29,113
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
_FixedSizer
def _FixedSizer(value_size): """Like _SimpleSizer except for a fixed-size field. The input is the size of one value.""" def SpecificSizer(field_number, is_repeated, is_packed): tag_size = _TagSize(field_number) if is_packed: local_VarintSize = _VarintSize def PackedFieldSize(value): result = len(value) * value_size return result + local_VarintSize(result) + tag_size return PackedFieldSize elif is_repeated: element_size = value_size + tag_size def RepeatedFieldSize(value): return len(value) * element_size return RepeatedFieldSize else: field_size = value_size + tag_size def FieldSize(value): return field_size return FieldSize return SpecificSizer
python
def _FixedSizer(value_size): """Like _SimpleSizer except for a fixed-size field. The input is the size of one value.""" def SpecificSizer(field_number, is_repeated, is_packed): tag_size = _TagSize(field_number) if is_packed: local_VarintSize = _VarintSize def PackedFieldSize(value): result = len(value) * value_size return result + local_VarintSize(result) + tag_size return PackedFieldSize elif is_repeated: element_size = value_size + tag_size def RepeatedFieldSize(value): return len(value) * element_size return RepeatedFieldSize else: field_size = value_size + tag_size def FieldSize(value): return field_size return FieldSize return SpecificSizer
[ "def", "_FixedSizer", "(", "value_size", ")", ":", "def", "SpecificSizer", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "tag_size", "=", "_TagSize", "(", "field_number", ")", "if", "is_packed", ":", "local_VarintSize", "=", "_VarintSize", "def", "PackedFieldSize", "(", "value", ")", ":", "result", "=", "len", "(", "value", ")", "*", "value_size", "return", "result", "+", "local_VarintSize", "(", "result", ")", "+", "tag_size", "return", "PackedFieldSize", "elif", "is_repeated", ":", "element_size", "=", "value_size", "+", "tag_size", "def", "RepeatedFieldSize", "(", "value", ")", ":", "return", "len", "(", "value", ")", "*", "element_size", "return", "RepeatedFieldSize", "else", ":", "field_size", "=", "value_size", "+", "tag_size", "def", "FieldSize", "(", "value", ")", ":", "return", "field_size", "return", "FieldSize", "return", "SpecificSizer" ]
Like _SimpleSizer except for a fixed-size field. The input is the size of one value.
[ "Like", "_SimpleSizer", "except", "for", "a", "fixed", "-", "size", "field", ".", "The", "input", "is", "the", "size", "of", "one", "value", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L184-L207
29,114
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
BytesSizer
def BytesSizer(field_number, is_repeated, is_packed): """Returns a sizer for a bytes field.""" tag_size = _TagSize(field_number) local_VarintSize = _VarintSize local_len = len assert not is_packed if is_repeated: def RepeatedFieldSize(value): result = tag_size * len(value) for element in value: l = local_len(element) result += local_VarintSize(l) + l return result return RepeatedFieldSize else: def FieldSize(value): l = local_len(value) return tag_size + local_VarintSize(l) + l return FieldSize
python
def BytesSizer(field_number, is_repeated, is_packed): """Returns a sizer for a bytes field.""" tag_size = _TagSize(field_number) local_VarintSize = _VarintSize local_len = len assert not is_packed if is_repeated: def RepeatedFieldSize(value): result = tag_size * len(value) for element in value: l = local_len(element) result += local_VarintSize(l) + l return result return RepeatedFieldSize else: def FieldSize(value): l = local_len(value) return tag_size + local_VarintSize(l) + l return FieldSize
[ "def", "BytesSizer", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "tag_size", "=", "_TagSize", "(", "field_number", ")", "local_VarintSize", "=", "_VarintSize", "local_len", "=", "len", "assert", "not", "is_packed", "if", "is_repeated", ":", "def", "RepeatedFieldSize", "(", "value", ")", ":", "result", "=", "tag_size", "*", "len", "(", "value", ")", "for", "element", "in", "value", ":", "l", "=", "local_len", "(", "element", ")", "result", "+=", "local_VarintSize", "(", "l", ")", "+", "l", "return", "result", "return", "RepeatedFieldSize", "else", ":", "def", "FieldSize", "(", "value", ")", ":", "l", "=", "local_len", "(", "value", ")", "return", "tag_size", "+", "local_VarintSize", "(", "l", ")", "+", "l", "return", "FieldSize" ]
Returns a sizer for a bytes field.
[ "Returns", "a", "sizer", "for", "a", "bytes", "field", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L252-L271
29,115
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
GroupSizer
def GroupSizer(field_number, is_repeated, is_packed): """Returns a sizer for a group field.""" tag_size = _TagSize(field_number) * 2 assert not is_packed if is_repeated: def RepeatedFieldSize(value): result = tag_size * len(value) for element in value: result += element.ByteSize() return result return RepeatedFieldSize else: def FieldSize(value): return tag_size + value.ByteSize() return FieldSize
python
def GroupSizer(field_number, is_repeated, is_packed): """Returns a sizer for a group field.""" tag_size = _TagSize(field_number) * 2 assert not is_packed if is_repeated: def RepeatedFieldSize(value): result = tag_size * len(value) for element in value: result += element.ByteSize() return result return RepeatedFieldSize else: def FieldSize(value): return tag_size + value.ByteSize() return FieldSize
[ "def", "GroupSizer", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "tag_size", "=", "_TagSize", "(", "field_number", ")", "*", "2", "assert", "not", "is_packed", "if", "is_repeated", ":", "def", "RepeatedFieldSize", "(", "value", ")", ":", "result", "=", "tag_size", "*", "len", "(", "value", ")", "for", "element", "in", "value", ":", "result", "+=", "element", ".", "ByteSize", "(", ")", "return", "result", "return", "RepeatedFieldSize", "else", ":", "def", "FieldSize", "(", "value", ")", ":", "return", "tag_size", "+", "value", ".", "ByteSize", "(", ")", "return", "FieldSize" ]
Returns a sizer for a group field.
[ "Returns", "a", "sizer", "for", "a", "group", "field", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L274-L289
29,116
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
MessageSizer
def MessageSizer(field_number, is_repeated, is_packed): """Returns a sizer for a message field.""" tag_size = _TagSize(field_number) local_VarintSize = _VarintSize assert not is_packed if is_repeated: def RepeatedFieldSize(value): result = tag_size * len(value) for element in value: l = element.ByteSize() result += local_VarintSize(l) + l return result return RepeatedFieldSize else: def FieldSize(value): l = value.ByteSize() return tag_size + local_VarintSize(l) + l return FieldSize
python
def MessageSizer(field_number, is_repeated, is_packed): """Returns a sizer for a message field.""" tag_size = _TagSize(field_number) local_VarintSize = _VarintSize assert not is_packed if is_repeated: def RepeatedFieldSize(value): result = tag_size * len(value) for element in value: l = element.ByteSize() result += local_VarintSize(l) + l return result return RepeatedFieldSize else: def FieldSize(value): l = value.ByteSize() return tag_size + local_VarintSize(l) + l return FieldSize
[ "def", "MessageSizer", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "tag_size", "=", "_TagSize", "(", "field_number", ")", "local_VarintSize", "=", "_VarintSize", "assert", "not", "is_packed", "if", "is_repeated", ":", "def", "RepeatedFieldSize", "(", "value", ")", ":", "result", "=", "tag_size", "*", "len", "(", "value", ")", "for", "element", "in", "value", ":", "l", "=", "element", ".", "ByteSize", "(", ")", "result", "+=", "local_VarintSize", "(", "l", ")", "+", "l", "return", "result", "return", "RepeatedFieldSize", "else", ":", "def", "FieldSize", "(", "value", ")", ":", "l", "=", "value", ".", "ByteSize", "(", ")", "return", "tag_size", "+", "local_VarintSize", "(", "l", ")", "+", "l", "return", "FieldSize" ]
Returns a sizer for a message field.
[ "Returns", "a", "sizer", "for", "a", "message", "field", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L292-L310
29,117
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
MessageSetItemSizer
def MessageSetItemSizer(field_number): """Returns a sizer for extensions of MessageSet. The message set message looks like this: message MessageSet { repeated group Item = 1 { required int32 type_id = 2; required string message = 3; } } """ static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) + _TagSize(3)) local_VarintSize = _VarintSize def FieldSize(value): l = value.ByteSize() return static_size + local_VarintSize(l) + l return FieldSize
python
def MessageSetItemSizer(field_number): """Returns a sizer for extensions of MessageSet. The message set message looks like this: message MessageSet { repeated group Item = 1 { required int32 type_id = 2; required string message = 3; } } """ static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) + _TagSize(3)) local_VarintSize = _VarintSize def FieldSize(value): l = value.ByteSize() return static_size + local_VarintSize(l) + l return FieldSize
[ "def", "MessageSetItemSizer", "(", "field_number", ")", ":", "static_size", "=", "(", "_TagSize", "(", "1", ")", "*", "2", "+", "_TagSize", "(", "2", ")", "+", "_VarintSize", "(", "field_number", ")", "+", "_TagSize", "(", "3", ")", ")", "local_VarintSize", "=", "_VarintSize", "def", "FieldSize", "(", "value", ")", ":", "l", "=", "value", ".", "ByteSize", "(", ")", "return", "static_size", "+", "local_VarintSize", "(", "l", ")", "+", "l", "return", "FieldSize" ]
Returns a sizer for extensions of MessageSet. The message set message looks like this: message MessageSet { repeated group Item = 1 { required int32 type_id = 2; required string message = 3; } }
[ "Returns", "a", "sizer", "for", "extensions", "of", "MessageSet", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L317-L336
29,118
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
MapSizer
def MapSizer(field_descriptor, is_message_map): """Returns a sizer for a map field.""" # Can't look at field_descriptor.message_type._concrete_class because it may # not have been initialized yet. message_type = field_descriptor.message_type message_sizer = MessageSizer(field_descriptor.number, False, False) def FieldSize(map_value): total = 0 for key in map_value: value = map_value[key] # It's wasteful to create the messages and throw them away one second # later since we'll do the same for the actual encode. But there's not an # obvious way to avoid this within the current design without tons of code # duplication. For message map, value.ByteSize() should be called to # update the status. entry_msg = message_type._concrete_class(key=key, value=value) total += message_sizer(entry_msg) if is_message_map: value.ByteSize() return total return FieldSize
python
def MapSizer(field_descriptor, is_message_map): """Returns a sizer for a map field.""" # Can't look at field_descriptor.message_type._concrete_class because it may # not have been initialized yet. message_type = field_descriptor.message_type message_sizer = MessageSizer(field_descriptor.number, False, False) def FieldSize(map_value): total = 0 for key in map_value: value = map_value[key] # It's wasteful to create the messages and throw them away one second # later since we'll do the same for the actual encode. But there's not an # obvious way to avoid this within the current design without tons of code # duplication. For message map, value.ByteSize() should be called to # update the status. entry_msg = message_type._concrete_class(key=key, value=value) total += message_sizer(entry_msg) if is_message_map: value.ByteSize() return total return FieldSize
[ "def", "MapSizer", "(", "field_descriptor", ",", "is_message_map", ")", ":", "# Can't look at field_descriptor.message_type._concrete_class because it may", "# not have been initialized yet.", "message_type", "=", "field_descriptor", ".", "message_type", "message_sizer", "=", "MessageSizer", "(", "field_descriptor", ".", "number", ",", "False", ",", "False", ")", "def", "FieldSize", "(", "map_value", ")", ":", "total", "=", "0", "for", "key", "in", "map_value", ":", "value", "=", "map_value", "[", "key", "]", "# It's wasteful to create the messages and throw them away one second", "# later since we'll do the same for the actual encode. But there's not an", "# obvious way to avoid this within the current design without tons of code", "# duplication. For message map, value.ByteSize() should be called to", "# update the status.", "entry_msg", "=", "message_type", ".", "_concrete_class", "(", "key", "=", "key", ",", "value", "=", "value", ")", "total", "+=", "message_sizer", "(", "entry_msg", ")", "if", "is_message_map", ":", "value", ".", "ByteSize", "(", ")", "return", "total", "return", "FieldSize" ]
Returns a sizer for a map field.
[ "Returns", "a", "sizer", "for", "a", "map", "field", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L343-L366
29,119
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
_VarintBytes
def _VarintBytes(value): """Encode the given integer as a varint and return the bytes. This is only called at startup time so it doesn't need to be fast.""" pieces = [] _EncodeVarint(pieces.append, value) return b"".join(pieces)
python
def _VarintBytes(value): """Encode the given integer as a varint and return the bytes. This is only called at startup time so it doesn't need to be fast.""" pieces = [] _EncodeVarint(pieces.append, value) return b"".join(pieces)
[ "def", "_VarintBytes", "(", "value", ")", ":", "pieces", "=", "[", "]", "_EncodeVarint", "(", "pieces", ".", "append", ",", "value", ")", "return", "b\"\"", ".", "join", "(", "pieces", ")" ]
Encode the given integer as a varint and return the bytes. This is only called at startup time so it doesn't need to be fast.
[ "Encode", "the", "given", "integer", "as", "a", "varint", "and", "return", "the", "bytes", ".", "This", "is", "only", "called", "at", "startup", "time", "so", "it", "doesn", "t", "need", "to", "be", "fast", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L409-L415
29,120
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
_SimpleEncoder
def _SimpleEncoder(wire_type, encode_value, compute_value_size): """Return a constructor for an encoder for fields of a particular type. Args: wire_type: The field's wire type, for encoding tags. encode_value: A function which encodes an individual value, e.g. _EncodeVarint(). compute_value_size: A function which computes the size of an individual value, e.g. _VarintSize(). """ def SpecificEncoder(field_number, is_repeated, is_packed): if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) size = 0 for element in value: size += compute_value_size(element) local_EncodeVarint(write, size) for element in value: encode_value(write, element) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_type) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) encode_value(write, element) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_type) def EncodeField(write, value): write(tag_bytes) return encode_value(write, value) return EncodeField return SpecificEncoder
python
def _SimpleEncoder(wire_type, encode_value, compute_value_size): """Return a constructor for an encoder for fields of a particular type. Args: wire_type: The field's wire type, for encoding tags. encode_value: A function which encodes an individual value, e.g. _EncodeVarint(). compute_value_size: A function which computes the size of an individual value, e.g. _VarintSize(). """ def SpecificEncoder(field_number, is_repeated, is_packed): if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) size = 0 for element in value: size += compute_value_size(element) local_EncodeVarint(write, size) for element in value: encode_value(write, element) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_type) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) encode_value(write, element) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_type) def EncodeField(write, value): write(tag_bytes) return encode_value(write, value) return EncodeField return SpecificEncoder
[ "def", "_SimpleEncoder", "(", "wire_type", ",", "encode_value", ",", "compute_value_size", ")", ":", "def", "SpecificEncoder", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "if", "is_packed", ":", "tag_bytes", "=", "TagBytes", "(", "field_number", ",", "wire_format", ".", "WIRETYPE_LENGTH_DELIMITED", ")", "local_EncodeVarint", "=", "_EncodeVarint", "def", "EncodePackedField", "(", "write", ",", "value", ")", ":", "write", "(", "tag_bytes", ")", "size", "=", "0", "for", "element", "in", "value", ":", "size", "+=", "compute_value_size", "(", "element", ")", "local_EncodeVarint", "(", "write", ",", "size", ")", "for", "element", "in", "value", ":", "encode_value", "(", "write", ",", "element", ")", "return", "EncodePackedField", "elif", "is_repeated", ":", "tag_bytes", "=", "TagBytes", "(", "field_number", ",", "wire_type", ")", "def", "EncodeRepeatedField", "(", "write", ",", "value", ")", ":", "for", "element", "in", "value", ":", "write", "(", "tag_bytes", ")", "encode_value", "(", "write", ",", "element", ")", "return", "EncodeRepeatedField", "else", ":", "tag_bytes", "=", "TagBytes", "(", "field_number", ",", "wire_type", ")", "def", "EncodeField", "(", "write", ",", "value", ")", ":", "write", "(", "tag_bytes", ")", "return", "encode_value", "(", "write", ",", "value", ")", "return", "EncodeField", "return", "SpecificEncoder" ]
Return a constructor for an encoder for fields of a particular type. Args: wire_type: The field's wire type, for encoding tags. encode_value: A function which encodes an individual value, e.g. _EncodeVarint(). compute_value_size: A function which computes the size of an individual value, e.g. _VarintSize().
[ "Return", "a", "constructor", "for", "an", "encoder", "for", "fields", "of", "a", "particular", "type", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L428-L466
29,121
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
_StructPackEncoder
def _StructPackEncoder(wire_type, format): """Return a constructor for an encoder for a fixed-width field. Args: wire_type: The field's wire type, for encoding tags. format: The format string to pass to struct.pack(). """ value_size = struct.calcsize(format) def SpecificEncoder(field_number, is_repeated, is_packed): local_struct_pack = struct.pack if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, len(value) * value_size) for element in value: write(local_struct_pack(format, element)) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_type) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) write(local_struct_pack(format, element)) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_type) def EncodeField(write, value): write(tag_bytes) return write(local_struct_pack(format, value)) return EncodeField return SpecificEncoder
python
def _StructPackEncoder(wire_type, format): """Return a constructor for an encoder for a fixed-width field. Args: wire_type: The field's wire type, for encoding tags. format: The format string to pass to struct.pack(). """ value_size = struct.calcsize(format) def SpecificEncoder(field_number, is_repeated, is_packed): local_struct_pack = struct.pack if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, len(value) * value_size) for element in value: write(local_struct_pack(format, element)) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_type) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) write(local_struct_pack(format, element)) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_type) def EncodeField(write, value): write(tag_bytes) return write(local_struct_pack(format, value)) return EncodeField return SpecificEncoder
[ "def", "_StructPackEncoder", "(", "wire_type", ",", "format", ")", ":", "value_size", "=", "struct", ".", "calcsize", "(", "format", ")", "def", "SpecificEncoder", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "local_struct_pack", "=", "struct", ".", "pack", "if", "is_packed", ":", "tag_bytes", "=", "TagBytes", "(", "field_number", ",", "wire_format", ".", "WIRETYPE_LENGTH_DELIMITED", ")", "local_EncodeVarint", "=", "_EncodeVarint", "def", "EncodePackedField", "(", "write", ",", "value", ")", ":", "write", "(", "tag_bytes", ")", "local_EncodeVarint", "(", "write", ",", "len", "(", "value", ")", "*", "value_size", ")", "for", "element", "in", "value", ":", "write", "(", "local_struct_pack", "(", "format", ",", "element", ")", ")", "return", "EncodePackedField", "elif", "is_repeated", ":", "tag_bytes", "=", "TagBytes", "(", "field_number", ",", "wire_type", ")", "def", "EncodeRepeatedField", "(", "write", ",", "value", ")", ":", "for", "element", "in", "value", ":", "write", "(", "tag_bytes", ")", "write", "(", "local_struct_pack", "(", "format", ",", "element", ")", ")", "return", "EncodeRepeatedField", "else", ":", "tag_bytes", "=", "TagBytes", "(", "field_number", ",", "wire_type", ")", "def", "EncodeField", "(", "write", ",", "value", ")", ":", "write", "(", "tag_bytes", ")", "return", "write", "(", "local_struct_pack", "(", "format", ",", "value", ")", ")", "return", "EncodeField", "return", "SpecificEncoder" ]
Return a constructor for an encoder for a fixed-width field. Args: wire_type: The field's wire type, for encoding tags. format: The format string to pass to struct.pack().
[ "Return", "a", "constructor", "for", "an", "encoder", "for", "a", "fixed", "-", "width", "field", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L503-L538
29,122
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
_FloatingPointEncoder
def _FloatingPointEncoder(wire_type, format): """Return a constructor for an encoder for float fields. This is like StructPackEncoder, but catches errors that may be due to passing non-finite floating-point values to struct.pack, and makes a second attempt to encode those values. Args: wire_type: The field's wire type, for encoding tags. format: The format string to pass to struct.pack(). """ value_size = struct.calcsize(format) if value_size == 4: def EncodeNonFiniteOrRaise(write, value): # Remember that the serialized form uses little-endian byte order. if value == _POS_INF: write(b'\x00\x00\x80\x7F') elif value == _NEG_INF: write(b'\x00\x00\x80\xFF') elif value != value: # NaN write(b'\x00\x00\xC0\x7F') else: raise elif value_size == 8: def EncodeNonFiniteOrRaise(write, value): if value == _POS_INF: write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F') elif value == _NEG_INF: write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF') elif value != value: # NaN write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F') else: raise else: raise ValueError('Can\'t encode floating-point values that are ' '%d bytes long (only 4 or 8)' % value_size) def SpecificEncoder(field_number, is_repeated, is_packed): local_struct_pack = struct.pack if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, len(value) * value_size) for element in value: # This try/except block is going to be faster than any code that # we could write to check whether element is finite. try: write(local_struct_pack(format, element)) except SystemError: EncodeNonFiniteOrRaise(write, element) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_type) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) try: write(local_struct_pack(format, element)) except SystemError: EncodeNonFiniteOrRaise(write, element) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_type) def EncodeField(write, value): write(tag_bytes) try: write(local_struct_pack(format, value)) except SystemError: EncodeNonFiniteOrRaise(write, value) return EncodeField return SpecificEncoder
python
def _FloatingPointEncoder(wire_type, format): """Return a constructor for an encoder for float fields. This is like StructPackEncoder, but catches errors that may be due to passing non-finite floating-point values to struct.pack, and makes a second attempt to encode those values. Args: wire_type: The field's wire type, for encoding tags. format: The format string to pass to struct.pack(). """ value_size = struct.calcsize(format) if value_size == 4: def EncodeNonFiniteOrRaise(write, value): # Remember that the serialized form uses little-endian byte order. if value == _POS_INF: write(b'\x00\x00\x80\x7F') elif value == _NEG_INF: write(b'\x00\x00\x80\xFF') elif value != value: # NaN write(b'\x00\x00\xC0\x7F') else: raise elif value_size == 8: def EncodeNonFiniteOrRaise(write, value): if value == _POS_INF: write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F') elif value == _NEG_INF: write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF') elif value != value: # NaN write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F') else: raise else: raise ValueError('Can\'t encode floating-point values that are ' '%d bytes long (only 4 or 8)' % value_size) def SpecificEncoder(field_number, is_repeated, is_packed): local_struct_pack = struct.pack if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, len(value) * value_size) for element in value: # This try/except block is going to be faster than any code that # we could write to check whether element is finite. try: write(local_struct_pack(format, element)) except SystemError: EncodeNonFiniteOrRaise(write, element) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_type) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) try: write(local_struct_pack(format, element)) except SystemError: EncodeNonFiniteOrRaise(write, element) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_type) def EncodeField(write, value): write(tag_bytes) try: write(local_struct_pack(format, value)) except SystemError: EncodeNonFiniteOrRaise(write, value) return EncodeField return SpecificEncoder
[ "def", "_FloatingPointEncoder", "(", "wire_type", ",", "format", ")", ":", "value_size", "=", "struct", ".", "calcsize", "(", "format", ")", "if", "value_size", "==", "4", ":", "def", "EncodeNonFiniteOrRaise", "(", "write", ",", "value", ")", ":", "# Remember that the serialized form uses little-endian byte order.", "if", "value", "==", "_POS_INF", ":", "write", "(", "b'\\x00\\x00\\x80\\x7F'", ")", "elif", "value", "==", "_NEG_INF", ":", "write", "(", "b'\\x00\\x00\\x80\\xFF'", ")", "elif", "value", "!=", "value", ":", "# NaN", "write", "(", "b'\\x00\\x00\\xC0\\x7F'", ")", "else", ":", "raise", "elif", "value_size", "==", "8", ":", "def", "EncodeNonFiniteOrRaise", "(", "write", ",", "value", ")", ":", "if", "value", "==", "_POS_INF", ":", "write", "(", "b'\\x00\\x00\\x00\\x00\\x00\\x00\\xF0\\x7F'", ")", "elif", "value", "==", "_NEG_INF", ":", "write", "(", "b'\\x00\\x00\\x00\\x00\\x00\\x00\\xF0\\xFF'", ")", "elif", "value", "!=", "value", ":", "# NaN", "write", "(", "b'\\x00\\x00\\x00\\x00\\x00\\x00\\xF8\\x7F'", ")", "else", ":", "raise", "else", ":", "raise", "ValueError", "(", "'Can\\'t encode floating-point values that are '", "'%d bytes long (only 4 or 8)'", "%", "value_size", ")", "def", "SpecificEncoder", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "local_struct_pack", "=", "struct", ".", "pack", "if", "is_packed", ":", "tag_bytes", "=", "TagBytes", "(", "field_number", ",", "wire_format", ".", "WIRETYPE_LENGTH_DELIMITED", ")", "local_EncodeVarint", "=", "_EncodeVarint", "def", "EncodePackedField", "(", "write", ",", "value", ")", ":", "write", "(", "tag_bytes", ")", "local_EncodeVarint", "(", "write", ",", "len", "(", "value", ")", "*", "value_size", ")", "for", "element", "in", "value", ":", "# This try/except block is going to be faster than any code that", "# we could write to check whether element is finite.", "try", ":", "write", "(", "local_struct_pack", "(", "format", ",", "element", ")", ")", "except", "SystemError", ":", "EncodeNonFiniteOrRaise", "(", "write", ",", "element", ")", "return", "EncodePackedField", "elif", "is_repeated", ":", "tag_bytes", "=", "TagBytes", "(", "field_number", ",", "wire_type", ")", "def", "EncodeRepeatedField", "(", "write", ",", "value", ")", ":", "for", "element", "in", "value", ":", "write", "(", "tag_bytes", ")", "try", ":", "write", "(", "local_struct_pack", "(", "format", ",", "element", ")", ")", "except", "SystemError", ":", "EncodeNonFiniteOrRaise", "(", "write", ",", "element", ")", "return", "EncodeRepeatedField", "else", ":", "tag_bytes", "=", "TagBytes", "(", "field_number", ",", "wire_type", ")", "def", "EncodeField", "(", "write", ",", "value", ")", ":", "write", "(", "tag_bytes", ")", "try", ":", "write", "(", "local_struct_pack", "(", "format", ",", "value", ")", ")", "except", "SystemError", ":", "EncodeNonFiniteOrRaise", "(", "write", ",", "value", ")", "return", "EncodeField", "return", "SpecificEncoder" ]
Return a constructor for an encoder for float fields. This is like StructPackEncoder, but catches errors that may be due to passing non-finite floating-point values to struct.pack, and makes a second attempt to encode those values. Args: wire_type: The field's wire type, for encoding tags. format: The format string to pass to struct.pack().
[ "Return", "a", "constructor", "for", "an", "encoder", "for", "float", "fields", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L541-L615
29,123
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
BoolEncoder
def BoolEncoder(field_number, is_repeated, is_packed): """Returns an encoder for a boolean field.""" false_byte = b'\x00' true_byte = b'\x01' if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, len(value)) for element in value: if element: write(true_byte) else: write(false_byte) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) if element: write(true_byte) else: write(false_byte) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT) def EncodeField(write, value): write(tag_bytes) if value: return write(true_byte) return write(false_byte) return EncodeField
python
def BoolEncoder(field_number, is_repeated, is_packed): """Returns an encoder for a boolean field.""" false_byte = b'\x00' true_byte = b'\x01' if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, len(value)) for element in value: if element: write(true_byte) else: write(false_byte) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) if element: write(true_byte) else: write(false_byte) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT) def EncodeField(write, value): write(tag_bytes) if value: return write(true_byte) return write(false_byte) return EncodeField
[ "def", "BoolEncoder", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "false_byte", "=", "b'\\x00'", "true_byte", "=", "b'\\x01'", "if", "is_packed", ":", "tag_bytes", "=", "TagBytes", "(", "field_number", ",", "wire_format", ".", "WIRETYPE_LENGTH_DELIMITED", ")", "local_EncodeVarint", "=", "_EncodeVarint", "def", "EncodePackedField", "(", "write", ",", "value", ")", ":", "write", "(", "tag_bytes", ")", "local_EncodeVarint", "(", "write", ",", "len", "(", "value", ")", ")", "for", "element", "in", "value", ":", "if", "element", ":", "write", "(", "true_byte", ")", "else", ":", "write", "(", "false_byte", ")", "return", "EncodePackedField", "elif", "is_repeated", ":", "tag_bytes", "=", "TagBytes", "(", "field_number", ",", "wire_format", ".", "WIRETYPE_VARINT", ")", "def", "EncodeRepeatedField", "(", "write", ",", "value", ")", ":", "for", "element", "in", "value", ":", "write", "(", "tag_bytes", ")", "if", "element", ":", "write", "(", "true_byte", ")", "else", ":", "write", "(", "false_byte", ")", "return", "EncodeRepeatedField", "else", ":", "tag_bytes", "=", "TagBytes", "(", "field_number", ",", "wire_format", ".", "WIRETYPE_VARINT", ")", "def", "EncodeField", "(", "write", ",", "value", ")", ":", "write", "(", "tag_bytes", ")", "if", "value", ":", "return", "write", "(", "true_byte", ")", "return", "write", "(", "false_byte", ")", "return", "EncodeField" ]
Returns an encoder for a boolean field.
[ "Returns", "an", "encoder", "for", "a", "boolean", "field", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L645-L679
29,124
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
StringEncoder
def StringEncoder(field_number, is_repeated, is_packed): """Returns an encoder for a string field.""" tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint local_len = len assert not is_packed if is_repeated: def EncodeRepeatedField(write, value): for element in value: encoded = element.encode('utf-8') write(tag) local_EncodeVarint(write, local_len(encoded)) write(encoded) return EncodeRepeatedField else: def EncodeField(write, value): encoded = value.encode('utf-8') write(tag) local_EncodeVarint(write, local_len(encoded)) return write(encoded) return EncodeField
python
def StringEncoder(field_number, is_repeated, is_packed): """Returns an encoder for a string field.""" tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint local_len = len assert not is_packed if is_repeated: def EncodeRepeatedField(write, value): for element in value: encoded = element.encode('utf-8') write(tag) local_EncodeVarint(write, local_len(encoded)) write(encoded) return EncodeRepeatedField else: def EncodeField(write, value): encoded = value.encode('utf-8') write(tag) local_EncodeVarint(write, local_len(encoded)) return write(encoded) return EncodeField
[ "def", "StringEncoder", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "tag", "=", "TagBytes", "(", "field_number", ",", "wire_format", ".", "WIRETYPE_LENGTH_DELIMITED", ")", "local_EncodeVarint", "=", "_EncodeVarint", "local_len", "=", "len", "assert", "not", "is_packed", "if", "is_repeated", ":", "def", "EncodeRepeatedField", "(", "write", ",", "value", ")", ":", "for", "element", "in", "value", ":", "encoded", "=", "element", ".", "encode", "(", "'utf-8'", ")", "write", "(", "tag", ")", "local_EncodeVarint", "(", "write", ",", "local_len", "(", "encoded", ")", ")", "write", "(", "encoded", ")", "return", "EncodeRepeatedField", "else", ":", "def", "EncodeField", "(", "write", ",", "value", ")", ":", "encoded", "=", "value", ".", "encode", "(", "'utf-8'", ")", "write", "(", "tag", ")", "local_EncodeVarint", "(", "write", ",", "local_len", "(", "encoded", ")", ")", "return", "write", "(", "encoded", ")", "return", "EncodeField" ]
Returns an encoder for a string field.
[ "Returns", "an", "encoder", "for", "a", "string", "field", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L682-L703
29,125
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
GroupEncoder
def GroupEncoder(field_number, is_repeated, is_packed): """Returns an encoder for a group field.""" start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP) end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP) assert not is_packed if is_repeated: def EncodeRepeatedField(write, value): for element in value: write(start_tag) element._InternalSerialize(write) write(end_tag) return EncodeRepeatedField else: def EncodeField(write, value): write(start_tag) value._InternalSerialize(write) return write(end_tag) return EncodeField
python
def GroupEncoder(field_number, is_repeated, is_packed): """Returns an encoder for a group field.""" start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP) end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP) assert not is_packed if is_repeated: def EncodeRepeatedField(write, value): for element in value: write(start_tag) element._InternalSerialize(write) write(end_tag) return EncodeRepeatedField else: def EncodeField(write, value): write(start_tag) value._InternalSerialize(write) return write(end_tag) return EncodeField
[ "def", "GroupEncoder", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "start_tag", "=", "TagBytes", "(", "field_number", ",", "wire_format", ".", "WIRETYPE_START_GROUP", ")", "end_tag", "=", "TagBytes", "(", "field_number", ",", "wire_format", ".", "WIRETYPE_END_GROUP", ")", "assert", "not", "is_packed", "if", "is_repeated", ":", "def", "EncodeRepeatedField", "(", "write", ",", "value", ")", ":", "for", "element", "in", "value", ":", "write", "(", "start_tag", ")", "element", ".", "_InternalSerialize", "(", "write", ")", "write", "(", "end_tag", ")", "return", "EncodeRepeatedField", "else", ":", "def", "EncodeField", "(", "write", ",", "value", ")", ":", "write", "(", "start_tag", ")", "value", ".", "_InternalSerialize", "(", "write", ")", "return", "write", "(", "end_tag", ")", "return", "EncodeField" ]
Returns an encoder for a group field.
[ "Returns", "an", "encoder", "for", "a", "group", "field", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L728-L746
29,126
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
MessageEncoder
def MessageEncoder(field_number, is_repeated, is_packed): """Returns an encoder for a message field.""" tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint assert not is_packed if is_repeated: def EncodeRepeatedField(write, value): for element in value: write(tag) local_EncodeVarint(write, element.ByteSize()) element._InternalSerialize(write) return EncodeRepeatedField else: def EncodeField(write, value): write(tag) local_EncodeVarint(write, value.ByteSize()) return value._InternalSerialize(write) return EncodeField
python
def MessageEncoder(field_number, is_repeated, is_packed): """Returns an encoder for a message field.""" tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint assert not is_packed if is_repeated: def EncodeRepeatedField(write, value): for element in value: write(tag) local_EncodeVarint(write, element.ByteSize()) element._InternalSerialize(write) return EncodeRepeatedField else: def EncodeField(write, value): write(tag) local_EncodeVarint(write, value.ByteSize()) return value._InternalSerialize(write) return EncodeField
[ "def", "MessageEncoder", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "tag", "=", "TagBytes", "(", "field_number", ",", "wire_format", ".", "WIRETYPE_LENGTH_DELIMITED", ")", "local_EncodeVarint", "=", "_EncodeVarint", "assert", "not", "is_packed", "if", "is_repeated", ":", "def", "EncodeRepeatedField", "(", "write", ",", "value", ")", ":", "for", "element", "in", "value", ":", "write", "(", "tag", ")", "local_EncodeVarint", "(", "write", ",", "element", ".", "ByteSize", "(", ")", ")", "element", ".", "_InternalSerialize", "(", "write", ")", "return", "EncodeRepeatedField", "else", ":", "def", "EncodeField", "(", "write", ",", "value", ")", ":", "write", "(", "tag", ")", "local_EncodeVarint", "(", "write", ",", "value", ".", "ByteSize", "(", ")", ")", "return", "value", ".", "_InternalSerialize", "(", "write", ")", "return", "EncodeField" ]
Returns an encoder for a message field.
[ "Returns", "an", "encoder", "for", "a", "message", "field", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L749-L767
29,127
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/converters/caffe/_caffe_converter.py
convert
def convert(model, image_input_names=[], is_bgr=False, red_bias=0.0, blue_bias=0.0, green_bias=0.0, gray_bias=0.0, image_scale=1.0, class_labels=None, predicted_feature_name=None, model_precision=_MLMODEL_FULL_PRECISION): """ Convert a Caffe model to Core ML format. Parameters ---------- model: str | (str, str) | (str, str, str) | (str, str, dict) A trained Caffe neural network model which can be represented as: - Path on disk to a trained Caffe model (.caffemodel) - A tuple of two paths, where the first path is the path to the .caffemodel file while the second is the path to the deploy.prototxt. - A tuple of three paths, where the first path is the path to the trained .caffemodel file, the second is the path to the deploy.prototxt while the third is a path to the mean image binary, data in which is subtracted from the input image as a preprocessing step. - A tuple of two paths to .caffemodel and .prototxt and a dict with image input names as keys and paths to mean image binaryprotos as values. The keys should be same as the input names provided via the argument 'image_input_name'. image_input_names: [str] | str The name(s) of the input blob(s) in the Caffe model that can be treated as images by Core ML. All other inputs are treated as MultiArrays (N-D Arrays) by Core ML. is_bgr: bool | dict() Flag indicating the channel order the model internally uses to represent color images. Set to True if the internal channel order is BGR, otherwise it will be assumed RGB. This flag is applicable only if image_input_names is specified. To specify a different value for each image input, provide a dictionary with input names as keys. Note that this flag is about the models internal channel order. An input image can be passed to the model in any color pixel layout containing red, green and blue values (e.g. 32BGRA or 32ARGB). This flag determines how those pixel values get mapped to the internal multiarray representation. red_bias: float | dict() Bias value to be added to the red channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. blue_bias: float | dict() Bias value to be added to the the blue channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. green_bias: float | dict() Bias value to be added to the green channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. gray_bias: float | dict() Bias value to be added to the input image (in grayscale). Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. image_scale: float | dict() Value by which the input images will be scaled before bias is added and Core ML model makes a prediction. Defaults to 1.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. class_labels: str Filepath where classes are parsed as a list of newline separated strings. Class labels map the index of the output of a neural network to labels in a classifier. Provide this argument to get a model of type classifier. predicted_feature_name: str Name of the output feature for the class labels exposed in the Core ML model (applies to classifiers only). Defaults to 'classLabel' model_precision: str Precision at which model will be saved. Currently full precision (float) and half precision (float16) models are supported. Defaults to '_MLMODEL_FULL_PRECISION' (full precision). Returns ------- model: MLModel Model in Core ML format. Examples -------- .. sourcecode:: python # Convert it with default input and output names >>> import coremltools >>> coreml_model = coremltools.converters.caffe.convert('my_caffe_model.caffemodel') # Saving the Core ML model to a file. >>> coreml_model.save('my_model.mlmodel') Sometimes, critical information in the Caffe converter is missing from the .caffemodel file. This information is present in the deploy.prototxt file. You can provide us with both files in the conversion process. .. sourcecode:: python >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_deploy.prototxt')) Some models (like Resnet-50) also require a mean image file which is subtracted from the input image before passing through the network. This file can also be provided during conversion: .. sourcecode:: python >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', ... 'my_deploy.prototxt', 'mean_image.binaryproto'), image_input_names = 'image_input') # Multiple mean images for preprocessing >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', ... 'my_deploy.prototxt', {'image1': 'mean_image1.binaryproto', 'image2': 'mean_image2.binaryproto'}), ... image_input_names = ['image1', 'image2']) # Multiple image inputs and bias/scale values >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_deploy.prototxt'), ... red_bias = {'image1': -100, 'image2': -110}, ... green_bias = {'image1': -90, 'image2': -125}, ... blue_bias = {'image1': -105, 'image2': -120}, ... image_input_names = ['image1', 'image2']) Input and output names used in the interface of the converted Core ML model are inferred from the .prototxt file, which contains a description of the network architecture. Input names are read from the input layer definition in the .prototxt. By default, they are of type MultiArray. Argument "image_input_names" can be used to assign image type to specific inputs. All the blobs that are "dangling", i.e. which do not feed as input to any other layer are taken as outputs. The .prototxt file can be modified to specify custom input and output names. The converted Core ML model is of type classifier when the argument "class_labels" is specified. Advanced usage with custom classifiers, and images: .. sourcecode:: python # Mark some inputs as Images >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_caffe_model.prototxt'), ... image_input_names = 'my_image_input') # Export as a classifier with classes from a file >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_caffe_model.prototxt'), ... image_input_names = 'my_image_input', class_labels = 'labels.txt') Sometimes the converter might return a message about not able to infer input data dimensions. This happens when the input size information is absent from the deploy.prototxt file. This can be easily provided by editing the .prototxt in a text editor. Simply add a snippet in the beginning, similar to the following, for each of the inputs to the model: .. code-block:: bash input: "my_image_input" input_dim: 1 input_dim: 3 input_dim: 227 input_dim: 227 Here we have specified an input with dimensions (1,3,227,227), using Caffe's convention, in the order (batch, channel, height, width). Input name string ("my_image_input") must also match the name of the input (or "bottom", as inputs are known in Caffe) of the first layer in the .prototxt. """ from ...models import MLModel from ...models.utils import convert_neural_network_weights_to_fp16 as convert_neural_network_weights_to_fp16 if model_precision not in _VALID_MLMODEL_PRECISION_TYPES: raise RuntimeError('Model precision {} is not valid'.format(model_precision)) import tempfile model_path = tempfile.mktemp() _export(model_path, model, image_input_names, is_bgr, red_bias, blue_bias, green_bias, gray_bias, image_scale, class_labels, predicted_feature_name) model = MLModel(model_path) if model_precision == _MLMODEL_HALF_PRECISION and model is not None: model = convert_neural_network_weights_to_fp16(model) return model
python
def convert(model, image_input_names=[], is_bgr=False, red_bias=0.0, blue_bias=0.0, green_bias=0.0, gray_bias=0.0, image_scale=1.0, class_labels=None, predicted_feature_name=None, model_precision=_MLMODEL_FULL_PRECISION): """ Convert a Caffe model to Core ML format. Parameters ---------- model: str | (str, str) | (str, str, str) | (str, str, dict) A trained Caffe neural network model which can be represented as: - Path on disk to a trained Caffe model (.caffemodel) - A tuple of two paths, where the first path is the path to the .caffemodel file while the second is the path to the deploy.prototxt. - A tuple of three paths, where the first path is the path to the trained .caffemodel file, the second is the path to the deploy.prototxt while the third is a path to the mean image binary, data in which is subtracted from the input image as a preprocessing step. - A tuple of two paths to .caffemodel and .prototxt and a dict with image input names as keys and paths to mean image binaryprotos as values. The keys should be same as the input names provided via the argument 'image_input_name'. image_input_names: [str] | str The name(s) of the input blob(s) in the Caffe model that can be treated as images by Core ML. All other inputs are treated as MultiArrays (N-D Arrays) by Core ML. is_bgr: bool | dict() Flag indicating the channel order the model internally uses to represent color images. Set to True if the internal channel order is BGR, otherwise it will be assumed RGB. This flag is applicable only if image_input_names is specified. To specify a different value for each image input, provide a dictionary with input names as keys. Note that this flag is about the models internal channel order. An input image can be passed to the model in any color pixel layout containing red, green and blue values (e.g. 32BGRA or 32ARGB). This flag determines how those pixel values get mapped to the internal multiarray representation. red_bias: float | dict() Bias value to be added to the red channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. blue_bias: float | dict() Bias value to be added to the the blue channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. green_bias: float | dict() Bias value to be added to the green channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. gray_bias: float | dict() Bias value to be added to the input image (in grayscale). Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. image_scale: float | dict() Value by which the input images will be scaled before bias is added and Core ML model makes a prediction. Defaults to 1.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. class_labels: str Filepath where classes are parsed as a list of newline separated strings. Class labels map the index of the output of a neural network to labels in a classifier. Provide this argument to get a model of type classifier. predicted_feature_name: str Name of the output feature for the class labels exposed in the Core ML model (applies to classifiers only). Defaults to 'classLabel' model_precision: str Precision at which model will be saved. Currently full precision (float) and half precision (float16) models are supported. Defaults to '_MLMODEL_FULL_PRECISION' (full precision). Returns ------- model: MLModel Model in Core ML format. Examples -------- .. sourcecode:: python # Convert it with default input and output names >>> import coremltools >>> coreml_model = coremltools.converters.caffe.convert('my_caffe_model.caffemodel') # Saving the Core ML model to a file. >>> coreml_model.save('my_model.mlmodel') Sometimes, critical information in the Caffe converter is missing from the .caffemodel file. This information is present in the deploy.prototxt file. You can provide us with both files in the conversion process. .. sourcecode:: python >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_deploy.prototxt')) Some models (like Resnet-50) also require a mean image file which is subtracted from the input image before passing through the network. This file can also be provided during conversion: .. sourcecode:: python >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', ... 'my_deploy.prototxt', 'mean_image.binaryproto'), image_input_names = 'image_input') # Multiple mean images for preprocessing >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', ... 'my_deploy.prototxt', {'image1': 'mean_image1.binaryproto', 'image2': 'mean_image2.binaryproto'}), ... image_input_names = ['image1', 'image2']) # Multiple image inputs and bias/scale values >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_deploy.prototxt'), ... red_bias = {'image1': -100, 'image2': -110}, ... green_bias = {'image1': -90, 'image2': -125}, ... blue_bias = {'image1': -105, 'image2': -120}, ... image_input_names = ['image1', 'image2']) Input and output names used in the interface of the converted Core ML model are inferred from the .prototxt file, which contains a description of the network architecture. Input names are read from the input layer definition in the .prototxt. By default, they are of type MultiArray. Argument "image_input_names" can be used to assign image type to specific inputs. All the blobs that are "dangling", i.e. which do not feed as input to any other layer are taken as outputs. The .prototxt file can be modified to specify custom input and output names. The converted Core ML model is of type classifier when the argument "class_labels" is specified. Advanced usage with custom classifiers, and images: .. sourcecode:: python # Mark some inputs as Images >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_caffe_model.prototxt'), ... image_input_names = 'my_image_input') # Export as a classifier with classes from a file >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_caffe_model.prototxt'), ... image_input_names = 'my_image_input', class_labels = 'labels.txt') Sometimes the converter might return a message about not able to infer input data dimensions. This happens when the input size information is absent from the deploy.prototxt file. This can be easily provided by editing the .prototxt in a text editor. Simply add a snippet in the beginning, similar to the following, for each of the inputs to the model: .. code-block:: bash input: "my_image_input" input_dim: 1 input_dim: 3 input_dim: 227 input_dim: 227 Here we have specified an input with dimensions (1,3,227,227), using Caffe's convention, in the order (batch, channel, height, width). Input name string ("my_image_input") must also match the name of the input (or "bottom", as inputs are known in Caffe) of the first layer in the .prototxt. """ from ...models import MLModel from ...models.utils import convert_neural_network_weights_to_fp16 as convert_neural_network_weights_to_fp16 if model_precision not in _VALID_MLMODEL_PRECISION_TYPES: raise RuntimeError('Model precision {} is not valid'.format(model_precision)) import tempfile model_path = tempfile.mktemp() _export(model_path, model, image_input_names, is_bgr, red_bias, blue_bias, green_bias, gray_bias, image_scale, class_labels, predicted_feature_name) model = MLModel(model_path) if model_precision == _MLMODEL_HALF_PRECISION and model is not None: model = convert_neural_network_weights_to_fp16(model) return model
[ "def", "convert", "(", "model", ",", "image_input_names", "=", "[", "]", ",", "is_bgr", "=", "False", ",", "red_bias", "=", "0.0", ",", "blue_bias", "=", "0.0", ",", "green_bias", "=", "0.0", ",", "gray_bias", "=", "0.0", ",", "image_scale", "=", "1.0", ",", "class_labels", "=", "None", ",", "predicted_feature_name", "=", "None", ",", "model_precision", "=", "_MLMODEL_FULL_PRECISION", ")", ":", "from", ".", ".", ".", "models", "import", "MLModel", "from", ".", ".", ".", "models", ".", "utils", "import", "convert_neural_network_weights_to_fp16", "as", "convert_neural_network_weights_to_fp16", "if", "model_precision", "not", "in", "_VALID_MLMODEL_PRECISION_TYPES", ":", "raise", "RuntimeError", "(", "'Model precision {} is not valid'", ".", "format", "(", "model_precision", ")", ")", "import", "tempfile", "model_path", "=", "tempfile", ".", "mktemp", "(", ")", "_export", "(", "model_path", ",", "model", ",", "image_input_names", ",", "is_bgr", ",", "red_bias", ",", "blue_bias", ",", "green_bias", ",", "gray_bias", ",", "image_scale", ",", "class_labels", ",", "predicted_feature_name", ")", "model", "=", "MLModel", "(", "model_path", ")", "if", "model_precision", "==", "_MLMODEL_HALF_PRECISION", "and", "model", "is", "not", "None", ":", "model", "=", "convert_neural_network_weights_to_fp16", "(", "model", ")", "return", "model" ]
Convert a Caffe model to Core ML format. Parameters ---------- model: str | (str, str) | (str, str, str) | (str, str, dict) A trained Caffe neural network model which can be represented as: - Path on disk to a trained Caffe model (.caffemodel) - A tuple of two paths, where the first path is the path to the .caffemodel file while the second is the path to the deploy.prototxt. - A tuple of three paths, where the first path is the path to the trained .caffemodel file, the second is the path to the deploy.prototxt while the third is a path to the mean image binary, data in which is subtracted from the input image as a preprocessing step. - A tuple of two paths to .caffemodel and .prototxt and a dict with image input names as keys and paths to mean image binaryprotos as values. The keys should be same as the input names provided via the argument 'image_input_name'. image_input_names: [str] | str The name(s) of the input blob(s) in the Caffe model that can be treated as images by Core ML. All other inputs are treated as MultiArrays (N-D Arrays) by Core ML. is_bgr: bool | dict() Flag indicating the channel order the model internally uses to represent color images. Set to True if the internal channel order is BGR, otherwise it will be assumed RGB. This flag is applicable only if image_input_names is specified. To specify a different value for each image input, provide a dictionary with input names as keys. Note that this flag is about the models internal channel order. An input image can be passed to the model in any color pixel layout containing red, green and blue values (e.g. 32BGRA or 32ARGB). This flag determines how those pixel values get mapped to the internal multiarray representation. red_bias: float | dict() Bias value to be added to the red channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. blue_bias: float | dict() Bias value to be added to the the blue channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. green_bias: float | dict() Bias value to be added to the green channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. gray_bias: float | dict() Bias value to be added to the input image (in grayscale). Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. image_scale: float | dict() Value by which the input images will be scaled before bias is added and Core ML model makes a prediction. Defaults to 1.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. class_labels: str Filepath where classes are parsed as a list of newline separated strings. Class labels map the index of the output of a neural network to labels in a classifier. Provide this argument to get a model of type classifier. predicted_feature_name: str Name of the output feature for the class labels exposed in the Core ML model (applies to classifiers only). Defaults to 'classLabel' model_precision: str Precision at which model will be saved. Currently full precision (float) and half precision (float16) models are supported. Defaults to '_MLMODEL_FULL_PRECISION' (full precision). Returns ------- model: MLModel Model in Core ML format. Examples -------- .. sourcecode:: python # Convert it with default input and output names >>> import coremltools >>> coreml_model = coremltools.converters.caffe.convert('my_caffe_model.caffemodel') # Saving the Core ML model to a file. >>> coreml_model.save('my_model.mlmodel') Sometimes, critical information in the Caffe converter is missing from the .caffemodel file. This information is present in the deploy.prototxt file. You can provide us with both files in the conversion process. .. sourcecode:: python >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_deploy.prototxt')) Some models (like Resnet-50) also require a mean image file which is subtracted from the input image before passing through the network. This file can also be provided during conversion: .. sourcecode:: python >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', ... 'my_deploy.prototxt', 'mean_image.binaryproto'), image_input_names = 'image_input') # Multiple mean images for preprocessing >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', ... 'my_deploy.prototxt', {'image1': 'mean_image1.binaryproto', 'image2': 'mean_image2.binaryproto'}), ... image_input_names = ['image1', 'image2']) # Multiple image inputs and bias/scale values >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_deploy.prototxt'), ... red_bias = {'image1': -100, 'image2': -110}, ... green_bias = {'image1': -90, 'image2': -125}, ... blue_bias = {'image1': -105, 'image2': -120}, ... image_input_names = ['image1', 'image2']) Input and output names used in the interface of the converted Core ML model are inferred from the .prototxt file, which contains a description of the network architecture. Input names are read from the input layer definition in the .prototxt. By default, they are of type MultiArray. Argument "image_input_names" can be used to assign image type to specific inputs. All the blobs that are "dangling", i.e. which do not feed as input to any other layer are taken as outputs. The .prototxt file can be modified to specify custom input and output names. The converted Core ML model is of type classifier when the argument "class_labels" is specified. Advanced usage with custom classifiers, and images: .. sourcecode:: python # Mark some inputs as Images >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_caffe_model.prototxt'), ... image_input_names = 'my_image_input') # Export as a classifier with classes from a file >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_caffe_model.prototxt'), ... image_input_names = 'my_image_input', class_labels = 'labels.txt') Sometimes the converter might return a message about not able to infer input data dimensions. This happens when the input size information is absent from the deploy.prototxt file. This can be easily provided by editing the .prototxt in a text editor. Simply add a snippet in the beginning, similar to the following, for each of the inputs to the model: .. code-block:: bash input: "my_image_input" input_dim: 1 input_dim: 3 input_dim: 227 input_dim: 227 Here we have specified an input with dimensions (1,3,227,227), using Caffe's convention, in the order (batch, channel, height, width). Input name string ("my_image_input") must also match the name of the input (or "bottom", as inputs are known in Caffe) of the first layer in the .prototxt.
[ "Convert", "a", "Caffe", "model", "to", "Core", "ML", "format", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/caffe/_caffe_converter.py#L10-L197
29,128
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_svm_common.py
_set_kernel
def _set_kernel(model, spec): """ Takes the sklearn SVM model and returns the spec with the protobuf kernel for that model. """ def gamma_value(model): if(model.gamma == 'auto'): # auto gamma value is 1/num_features return 1/float(len(model.support_vectors_[0])) else: return model.gamma result = None if(model.kernel == 'linear'): spec.kernel.linearKernel.MergeFromString(b'') # hack to set kernel to an empty type elif(model.kernel == 'rbf'): spec.kernel.rbfKernel.gamma = gamma_value(model) elif(model.kernel == 'poly'): spec.kernel.polyKernel.gamma = gamma_value(model) spec.kernel.polyKernel.c = model.coef0 spec.kernel.polyKernel.degree = model.degree elif(model.kernel == 'sigmoid'): spec.kernel.sigmoidKernel.gamma = gamma_value(model) spec.kernel.sigmoidKernel.c = model.coef0 else: raise ValueError('Unsupported kernel. The following kernel are supported: linear, RBF, polynomial and sigmoid.') return result
python
def _set_kernel(model, spec): """ Takes the sklearn SVM model and returns the spec with the protobuf kernel for that model. """ def gamma_value(model): if(model.gamma == 'auto'): # auto gamma value is 1/num_features return 1/float(len(model.support_vectors_[0])) else: return model.gamma result = None if(model.kernel == 'linear'): spec.kernel.linearKernel.MergeFromString(b'') # hack to set kernel to an empty type elif(model.kernel == 'rbf'): spec.kernel.rbfKernel.gamma = gamma_value(model) elif(model.kernel == 'poly'): spec.kernel.polyKernel.gamma = gamma_value(model) spec.kernel.polyKernel.c = model.coef0 spec.kernel.polyKernel.degree = model.degree elif(model.kernel == 'sigmoid'): spec.kernel.sigmoidKernel.gamma = gamma_value(model) spec.kernel.sigmoidKernel.c = model.coef0 else: raise ValueError('Unsupported kernel. The following kernel are supported: linear, RBF, polynomial and sigmoid.') return result
[ "def", "_set_kernel", "(", "model", ",", "spec", ")", ":", "def", "gamma_value", "(", "model", ")", ":", "if", "(", "model", ".", "gamma", "==", "'auto'", ")", ":", "# auto gamma value is 1/num_features", "return", "1", "/", "float", "(", "len", "(", "model", ".", "support_vectors_", "[", "0", "]", ")", ")", "else", ":", "return", "model", ".", "gamma", "result", "=", "None", "if", "(", "model", ".", "kernel", "==", "'linear'", ")", ":", "spec", ".", "kernel", ".", "linearKernel", ".", "MergeFromString", "(", "b''", ")", "# hack to set kernel to an empty type", "elif", "(", "model", ".", "kernel", "==", "'rbf'", ")", ":", "spec", ".", "kernel", ".", "rbfKernel", ".", "gamma", "=", "gamma_value", "(", "model", ")", "elif", "(", "model", ".", "kernel", "==", "'poly'", ")", ":", "spec", ".", "kernel", ".", "polyKernel", ".", "gamma", "=", "gamma_value", "(", "model", ")", "spec", ".", "kernel", ".", "polyKernel", ".", "c", "=", "model", ".", "coef0", "spec", ".", "kernel", ".", "polyKernel", ".", "degree", "=", "model", ".", "degree", "elif", "(", "model", ".", "kernel", "==", "'sigmoid'", ")", ":", "spec", ".", "kernel", ".", "sigmoidKernel", ".", "gamma", "=", "gamma_value", "(", "model", ")", "spec", ".", "kernel", ".", "sigmoidKernel", ".", "c", "=", "model", ".", "coef0", "else", ":", "raise", "ValueError", "(", "'Unsupported kernel. The following kernel are supported: linear, RBF, polynomial and sigmoid.'", ")", "return", "result" ]
Takes the sklearn SVM model and returns the spec with the protobuf kernel for that model.
[ "Takes", "the", "sklearn", "SVM", "model", "and", "returns", "the", "spec", "with", "the", "protobuf", "kernel", "for", "that", "model", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_svm_common.py#L11-L37
29,129
apple/turicreate
src/unity/python/turicreate/data_structures/sframe_builder.py
SFrameBuilder.append
def append(self, data, segment=0): """ Append a single row to an SFrame. Throws a RuntimeError if one or more column's type is incompatible with a type appended. Parameters ---------- data : iterable An iterable representation of a single row. segment : int The segment to write this row. Each segment is numbered sequentially, starting with 0. Any value in segment 1 will be after any value in segment 0, and the order of rows in each segment is preserved as they are added. """ # Assume this case refers to an SFrame with a single column if not hasattr(data, '__iter__'): data = [data] self._builder.append(data, segment)
python
def append(self, data, segment=0): """ Append a single row to an SFrame. Throws a RuntimeError if one or more column's type is incompatible with a type appended. Parameters ---------- data : iterable An iterable representation of a single row. segment : int The segment to write this row. Each segment is numbered sequentially, starting with 0. Any value in segment 1 will be after any value in segment 0, and the order of rows in each segment is preserved as they are added. """ # Assume this case refers to an SFrame with a single column if not hasattr(data, '__iter__'): data = [data] self._builder.append(data, segment)
[ "def", "append", "(", "self", ",", "data", ",", "segment", "=", "0", ")", ":", "# Assume this case refers to an SFrame with a single column", "if", "not", "hasattr", "(", "data", ",", "'__iter__'", ")", ":", "data", "=", "[", "data", "]", "self", ".", "_builder", ".", "append", "(", "data", ",", "segment", ")" ]
Append a single row to an SFrame. Throws a RuntimeError if one or more column's type is incompatible with a type appended. Parameters ---------- data : iterable An iterable representation of a single row. segment : int The segment to write this row. Each segment is numbered sequentially, starting with 0. Any value in segment 1 will be after any value in segment 0, and the order of rows in each segment is preserved as they are added.
[ "Append", "a", "single", "row", "to", "an", "SFrame", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sframe_builder.py#L108-L129
29,130
apple/turicreate
src/unity/python/turicreate/data_structures/sframe_builder.py
SFrameBuilder.append_multiple
def append_multiple(self, data, segment=0): """ Append multiple rows to an SFrame. Throws a RuntimeError if one or more column's type is incompatible with a type appended. Parameters ---------- data : iterable[iterable] A collection of multiple iterables, each representing a single row. segment : int The segment to write the given rows. Each segment is numbered sequentially, starting with 0. Any value in segment 1 will be after any value in segment 0, and the order of rows in each segment is preserved as they are added. """ if not hasattr(data, '__iter__'): raise TypeError("append_multiple must be passed an iterable object") tmp_list = [] # Avoid copy in cases that we are passed materialized data that is # smaller than our block size if hasattr(data, '__len__'): if len(data) <= self._block_size: self._builder.append_multiple(data, segment) return for i in data: tmp_list.append(i) if len(tmp_list) >= self._block_size: self._builder.append_multiple(tmp_list, segment) tmp_list = [] if len(tmp_list) > 0: self._builder.append_multiple(tmp_list, segment)
python
def append_multiple(self, data, segment=0): """ Append multiple rows to an SFrame. Throws a RuntimeError if one or more column's type is incompatible with a type appended. Parameters ---------- data : iterable[iterable] A collection of multiple iterables, each representing a single row. segment : int The segment to write the given rows. Each segment is numbered sequentially, starting with 0. Any value in segment 1 will be after any value in segment 0, and the order of rows in each segment is preserved as they are added. """ if not hasattr(data, '__iter__'): raise TypeError("append_multiple must be passed an iterable object") tmp_list = [] # Avoid copy in cases that we are passed materialized data that is # smaller than our block size if hasattr(data, '__len__'): if len(data) <= self._block_size: self._builder.append_multiple(data, segment) return for i in data: tmp_list.append(i) if len(tmp_list) >= self._block_size: self._builder.append_multiple(tmp_list, segment) tmp_list = [] if len(tmp_list) > 0: self._builder.append_multiple(tmp_list, segment)
[ "def", "append_multiple", "(", "self", ",", "data", ",", "segment", "=", "0", ")", ":", "if", "not", "hasattr", "(", "data", ",", "'__iter__'", ")", ":", "raise", "TypeError", "(", "\"append_multiple must be passed an iterable object\"", ")", "tmp_list", "=", "[", "]", "# Avoid copy in cases that we are passed materialized data that is", "# smaller than our block size", "if", "hasattr", "(", "data", ",", "'__len__'", ")", ":", "if", "len", "(", "data", ")", "<=", "self", ".", "_block_size", ":", "self", ".", "_builder", ".", "append_multiple", "(", "data", ",", "segment", ")", "return", "for", "i", "in", "data", ":", "tmp_list", ".", "append", "(", "i", ")", "if", "len", "(", "tmp_list", ")", ">=", "self", ".", "_block_size", ":", "self", ".", "_builder", ".", "append_multiple", "(", "tmp_list", ",", "segment", ")", "tmp_list", "=", "[", "]", "if", "len", "(", "tmp_list", ")", ">", "0", ":", "self", ".", "_builder", ".", "append_multiple", "(", "tmp_list", ",", "segment", ")" ]
Append multiple rows to an SFrame. Throws a RuntimeError if one or more column's type is incompatible with a type appended. Parameters ---------- data : iterable[iterable] A collection of multiple iterables, each representing a single row. segment : int The segment to write the given rows. Each segment is numbered sequentially, starting with 0. Any value in segment 1 will be after any value in segment 0, and the order of rows in each segment is preserved as they are added.
[ "Append", "multiple", "rows", "to", "an", "SFrame", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sframe_builder.py#L131-L166
29,131
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/tools/stage.py
InstallTargetClass.targets_to_stage
def targets_to_stage(self, source_targets, ps): """Given the list of source targets explicitly passed to 'stage', returns the list of targets which must be staged.""" result = [] # Traverse the dependencies, if needed. if ps.get('install-dependencies') == ['on']: source_targets = self.collect_targets(source_targets) # Filter the target types, if needed. included_types = ps.get('install-type') for r in source_targets: ty = r.type() if ty: # Do not stage searched libs. if ty != "SEARCHED_LIB": if included_types: if self.include_type(ty, included_types): result.append(r) else: result.append(r) elif not included_types: # Don't install typeless target if there is an explicit list of # allowed types. result.append(r) return result
python
def targets_to_stage(self, source_targets, ps): """Given the list of source targets explicitly passed to 'stage', returns the list of targets which must be staged.""" result = [] # Traverse the dependencies, if needed. if ps.get('install-dependencies') == ['on']: source_targets = self.collect_targets(source_targets) # Filter the target types, if needed. included_types = ps.get('install-type') for r in source_targets: ty = r.type() if ty: # Do not stage searched libs. if ty != "SEARCHED_LIB": if included_types: if self.include_type(ty, included_types): result.append(r) else: result.append(r) elif not included_types: # Don't install typeless target if there is an explicit list of # allowed types. result.append(r) return result
[ "def", "targets_to_stage", "(", "self", ",", "source_targets", ",", "ps", ")", ":", "result", "=", "[", "]", "# Traverse the dependencies, if needed.", "if", "ps", ".", "get", "(", "'install-dependencies'", ")", "==", "[", "'on'", "]", ":", "source_targets", "=", "self", ".", "collect_targets", "(", "source_targets", ")", "# Filter the target types, if needed.", "included_types", "=", "ps", ".", "get", "(", "'install-type'", ")", "for", "r", "in", "source_targets", ":", "ty", "=", "r", ".", "type", "(", ")", "if", "ty", ":", "# Do not stage searched libs.", "if", "ty", "!=", "\"SEARCHED_LIB\"", ":", "if", "included_types", ":", "if", "self", ".", "include_type", "(", "ty", ",", "included_types", ")", ":", "result", ".", "append", "(", "r", ")", "else", ":", "result", ".", "append", "(", "r", ")", "elif", "not", "included_types", ":", "# Don't install typeless target if there is an explicit list of", "# allowed types.", "result", ".", "append", "(", "r", ")", "return", "result" ]
Given the list of source targets explicitly passed to 'stage', returns the list of targets which must be staged.
[ "Given", "the", "list", "of", "source", "targets", "explicitly", "passed", "to", "stage", "returns", "the", "list", "of", "targets", "which", "must", "be", "staged", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/stage.py#L139-L166
29,132
apple/turicreate
src/unity/python/turicreate/config/__init__.py
init_logger
def init_logger(): """ Initialize the logging configuration for the turicreate package. This does not affect the root logging config. """ import logging as _logging import logging.config # Package level logger _logging.config.dictConfig({ 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': '%(asctime)s [%(levelname)s] %(name)s, %(lineno)s: %(message)s' }, 'brief': { 'format': '[%(levelname)s] %(name)s: %(message)s' } }, 'handlers': { 'default': { 'class': 'logging.StreamHandler', 'formatter': 'brief' }, 'file': { 'class': 'logging.FileHandler', 'formatter': 'standard', 'filename': _client_log_file, 'encoding': 'UTF-8', 'delay': 'False', } }, 'loggers': { _root_package_name: { 'handlers': ['default', 'file'], 'propagate': 'True' } } }) # Set module specific log levels _logging.getLogger('requests').setLevel(_logging.CRITICAL) if _i_am_a_lambda_worker(): _logging.getLogger(_root_package_name).setLevel(_logging.WARNING) else: _logging.getLogger(_root_package_name).setLevel(_logging.INFO)
python
def init_logger(): """ Initialize the logging configuration for the turicreate package. This does not affect the root logging config. """ import logging as _logging import logging.config # Package level logger _logging.config.dictConfig({ 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': '%(asctime)s [%(levelname)s] %(name)s, %(lineno)s: %(message)s' }, 'brief': { 'format': '[%(levelname)s] %(name)s: %(message)s' } }, 'handlers': { 'default': { 'class': 'logging.StreamHandler', 'formatter': 'brief' }, 'file': { 'class': 'logging.FileHandler', 'formatter': 'standard', 'filename': _client_log_file, 'encoding': 'UTF-8', 'delay': 'False', } }, 'loggers': { _root_package_name: { 'handlers': ['default', 'file'], 'propagate': 'True' } } }) # Set module specific log levels _logging.getLogger('requests').setLevel(_logging.CRITICAL) if _i_am_a_lambda_worker(): _logging.getLogger(_root_package_name).setLevel(_logging.WARNING) else: _logging.getLogger(_root_package_name).setLevel(_logging.INFO)
[ "def", "init_logger", "(", ")", ":", "import", "logging", "as", "_logging", "import", "logging", ".", "config", "# Package level logger", "_logging", ".", "config", ".", "dictConfig", "(", "{", "'version'", ":", "1", ",", "'disable_existing_loggers'", ":", "False", ",", "'formatters'", ":", "{", "'standard'", ":", "{", "'format'", ":", "'%(asctime)s [%(levelname)s] %(name)s, %(lineno)s: %(message)s'", "}", ",", "'brief'", ":", "{", "'format'", ":", "'[%(levelname)s] %(name)s: %(message)s'", "}", "}", ",", "'handlers'", ":", "{", "'default'", ":", "{", "'class'", ":", "'logging.StreamHandler'", ",", "'formatter'", ":", "'brief'", "}", ",", "'file'", ":", "{", "'class'", ":", "'logging.FileHandler'", ",", "'formatter'", ":", "'standard'", ",", "'filename'", ":", "_client_log_file", ",", "'encoding'", ":", "'UTF-8'", ",", "'delay'", ":", "'False'", ",", "}", "}", ",", "'loggers'", ":", "{", "_root_package_name", ":", "{", "'handlers'", ":", "[", "'default'", ",", "'file'", "]", ",", "'propagate'", ":", "'True'", "}", "}", "}", ")", "# Set module specific log levels", "_logging", ".", "getLogger", "(", "'requests'", ")", ".", "setLevel", "(", "_logging", ".", "CRITICAL", ")", "if", "_i_am_a_lambda_worker", "(", ")", ":", "_logging", ".", "getLogger", "(", "_root_package_name", ")", ".", "setLevel", "(", "_logging", ".", "WARNING", ")", "else", ":", "_logging", ".", "getLogger", "(", "_root_package_name", ")", ".", "setLevel", "(", "_logging", ".", "INFO", ")" ]
Initialize the logging configuration for the turicreate package. This does not affect the root logging config.
[ "Initialize", "the", "logging", "configuration", "for", "the", "turicreate", "package", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/config/__init__.py#L34-L81
29,133
apple/turicreate
src/unity/python/turicreate/config/__init__.py
get_environment_config
def get_environment_config(): """ Returns all the Turi Create configuration variables that can only be set via environment variables. - *TURI_FILEIO_WRITER_BUFFER_SIZE*: The file write buffer size. - *TURI_FILEIO_READER_BUFFER_SIZE*: The file read buffer size. - *OMP_NUM_THREADS*: The maximum number of threads to use for parallel processing. Returns ------- Returns a dictionary of {key:value,..} """ from .._connect import main as _glconnect unity = _glconnect.get_unity() return unity.list_globals(False)
python
def get_environment_config(): """ Returns all the Turi Create configuration variables that can only be set via environment variables. - *TURI_FILEIO_WRITER_BUFFER_SIZE*: The file write buffer size. - *TURI_FILEIO_READER_BUFFER_SIZE*: The file read buffer size. - *OMP_NUM_THREADS*: The maximum number of threads to use for parallel processing. Returns ------- Returns a dictionary of {key:value,..} """ from .._connect import main as _glconnect unity = _glconnect.get_unity() return unity.list_globals(False)
[ "def", "get_environment_config", "(", ")", ":", "from", ".", ".", "_connect", "import", "main", "as", "_glconnect", "unity", "=", "_glconnect", ".", "get_unity", "(", ")", "return", "unity", ".", "list_globals", "(", "False", ")" ]
Returns all the Turi Create configuration variables that can only be set via environment variables. - *TURI_FILEIO_WRITER_BUFFER_SIZE*: The file write buffer size. - *TURI_FILEIO_READER_BUFFER_SIZE*: The file read buffer size. - *OMP_NUM_THREADS*: The maximum number of threads to use for parallel processing. Returns ------- Returns a dictionary of {key:value,..}
[ "Returns", "all", "the", "Turi", "Create", "configuration", "variables", "that", "can", "only", "be", "set", "via", "environment", "variables", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/config/__init__.py#L145-L160
29,134
apple/turicreate
src/unity/python/turicreate/config/__init__.py
set_log_level
def set_log_level(level): """ Sets the log level. Lower log levels log more. if level is 8, nothing is logged. If level is 0, everything is logged. """ from .._connect import main as _glconnect unity = _glconnect.get_unity() return unity.set_log_level(level)
python
def set_log_level(level): """ Sets the log level. Lower log levels log more. if level is 8, nothing is logged. If level is 0, everything is logged. """ from .._connect import main as _glconnect unity = _glconnect.get_unity() return unity.set_log_level(level)
[ "def", "set_log_level", "(", "level", ")", ":", "from", ".", ".", "_connect", "import", "main", "as", "_glconnect", "unity", "=", "_glconnect", ".", "get_unity", "(", ")", "return", "unity", ".", "set_log_level", "(", "level", ")" ]
Sets the log level. Lower log levels log more. if level is 8, nothing is logged. If level is 0, everything is logged.
[ "Sets", "the", "log", "level", ".", "Lower", "log", "levels", "log", "more", ".", "if", "level", "is", "8", "nothing", "is", "logged", ".", "If", "level", "is", "0", "everything", "is", "logged", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/config/__init__.py#L162-L170
29,135
apple/turicreate
src/unity/python/turicreate/data_structures/sgraph.py
load_sgraph
def load_sgraph(filename, format='binary', delimiter='auto'): """ Load SGraph from text file or previously saved SGraph binary. Parameters ---------- filename : string Location of the file. Can be a local path or a remote URL. format : {'binary', 'snap', 'csv', 'tsv'}, optional Format to of the file to load. - 'binary': native graph format obtained from `SGraph.save`. - 'snap': tab or space separated edge list format with comments, used in the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_. - 'csv': comma-separated edge list without header or comments. - 'tsv': tab-separated edge list without header or comments. delimiter : str, optional Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those format has default delimiter, but sometimes it is useful to overwrite the default delimiter. Returns ------- out : SGraph Loaded SGraph. See Also -------- SGraph, SGraph.save Examples -------- >>> g = turicreate.SGraph().add_vertices([turicreate.Vertex(i) for i in range(5)]) Save and load in binary format. >>> g.save('mygraph') >>> g2 = turicreate.load_sgraph('mygraph') """ if not format in ['binary', 'snap', 'csv', 'tsv']: raise ValueError('Invalid format: %s' % format) with cython_context(): g = None if format is 'binary': proxy = glconnect.get_unity().load_graph(_make_internal_url(filename)) g = SGraph(_proxy=proxy) elif format is 'snap': if delimiter == 'auto': delimiter = '\t' sf = SFrame.read_csv(filename, comment_char='#', delimiter=delimiter, header=False, column_type_hints=int) g = SGraph().add_edges(sf, 'X1', 'X2') elif format is 'csv': if delimiter == 'auto': delimiter = ',' sf = SFrame.read_csv(filename, header=False, delimiter=delimiter) g = SGraph().add_edges(sf, 'X1', 'X2') elif format is 'tsv': if delimiter == 'auto': delimiter = '\t' sf = SFrame.read_csv(filename, header=False, delimiter=delimiter) g = SGraph().add_edges(sf, 'X1', 'X2') g.summary() # materialize return g
python
def load_sgraph(filename, format='binary', delimiter='auto'): """ Load SGraph from text file or previously saved SGraph binary. Parameters ---------- filename : string Location of the file. Can be a local path or a remote URL. format : {'binary', 'snap', 'csv', 'tsv'}, optional Format to of the file to load. - 'binary': native graph format obtained from `SGraph.save`. - 'snap': tab or space separated edge list format with comments, used in the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_. - 'csv': comma-separated edge list without header or comments. - 'tsv': tab-separated edge list without header or comments. delimiter : str, optional Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those format has default delimiter, but sometimes it is useful to overwrite the default delimiter. Returns ------- out : SGraph Loaded SGraph. See Also -------- SGraph, SGraph.save Examples -------- >>> g = turicreate.SGraph().add_vertices([turicreate.Vertex(i) for i in range(5)]) Save and load in binary format. >>> g.save('mygraph') >>> g2 = turicreate.load_sgraph('mygraph') """ if not format in ['binary', 'snap', 'csv', 'tsv']: raise ValueError('Invalid format: %s' % format) with cython_context(): g = None if format is 'binary': proxy = glconnect.get_unity().load_graph(_make_internal_url(filename)) g = SGraph(_proxy=proxy) elif format is 'snap': if delimiter == 'auto': delimiter = '\t' sf = SFrame.read_csv(filename, comment_char='#', delimiter=delimiter, header=False, column_type_hints=int) g = SGraph().add_edges(sf, 'X1', 'X2') elif format is 'csv': if delimiter == 'auto': delimiter = ',' sf = SFrame.read_csv(filename, header=False, delimiter=delimiter) g = SGraph().add_edges(sf, 'X1', 'X2') elif format is 'tsv': if delimiter == 'auto': delimiter = '\t' sf = SFrame.read_csv(filename, header=False, delimiter=delimiter) g = SGraph().add_edges(sf, 'X1', 'X2') g.summary() # materialize return g
[ "def", "load_sgraph", "(", "filename", ",", "format", "=", "'binary'", ",", "delimiter", "=", "'auto'", ")", ":", "if", "not", "format", "in", "[", "'binary'", ",", "'snap'", ",", "'csv'", ",", "'tsv'", "]", ":", "raise", "ValueError", "(", "'Invalid format: %s'", "%", "format", ")", "with", "cython_context", "(", ")", ":", "g", "=", "None", "if", "format", "is", "'binary'", ":", "proxy", "=", "glconnect", ".", "get_unity", "(", ")", ".", "load_graph", "(", "_make_internal_url", "(", "filename", ")", ")", "g", "=", "SGraph", "(", "_proxy", "=", "proxy", ")", "elif", "format", "is", "'snap'", ":", "if", "delimiter", "==", "'auto'", ":", "delimiter", "=", "'\\t'", "sf", "=", "SFrame", ".", "read_csv", "(", "filename", ",", "comment_char", "=", "'#'", ",", "delimiter", "=", "delimiter", ",", "header", "=", "False", ",", "column_type_hints", "=", "int", ")", "g", "=", "SGraph", "(", ")", ".", "add_edges", "(", "sf", ",", "'X1'", ",", "'X2'", ")", "elif", "format", "is", "'csv'", ":", "if", "delimiter", "==", "'auto'", ":", "delimiter", "=", "','", "sf", "=", "SFrame", ".", "read_csv", "(", "filename", ",", "header", "=", "False", ",", "delimiter", "=", "delimiter", ")", "g", "=", "SGraph", "(", ")", ".", "add_edges", "(", "sf", ",", "'X1'", ",", "'X2'", ")", "elif", "format", "is", "'tsv'", ":", "if", "delimiter", "==", "'auto'", ":", "delimiter", "=", "'\\t'", "sf", "=", "SFrame", ".", "read_csv", "(", "filename", ",", "header", "=", "False", ",", "delimiter", "=", "delimiter", ")", "g", "=", "SGraph", "(", ")", ".", "add_edges", "(", "sf", ",", "'X1'", ",", "'X2'", ")", "g", ".", "summary", "(", ")", "# materialize", "return", "g" ]
Load SGraph from text file or previously saved SGraph binary. Parameters ---------- filename : string Location of the file. Can be a local path or a remote URL. format : {'binary', 'snap', 'csv', 'tsv'}, optional Format to of the file to load. - 'binary': native graph format obtained from `SGraph.save`. - 'snap': tab or space separated edge list format with comments, used in the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_. - 'csv': comma-separated edge list without header or comments. - 'tsv': tab-separated edge list without header or comments. delimiter : str, optional Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those format has default delimiter, but sometimes it is useful to overwrite the default delimiter. Returns ------- out : SGraph Loaded SGraph. See Also -------- SGraph, SGraph.save Examples -------- >>> g = turicreate.SGraph().add_vertices([turicreate.Vertex(i) for i in range(5)]) Save and load in binary format. >>> g.save('mygraph') >>> g2 = turicreate.load_sgraph('mygraph')
[ "Load", "SGraph", "from", "text", "file", "or", "previously", "saved", "SGraph", "binary", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sgraph.py#L1153-L1221
29,136
apple/turicreate
src/unity/python/turicreate/data_structures/sgraph.py
_vertex_list_to_dataframe
def _vertex_list_to_dataframe(ls, id_column_name): """ Convert a list of vertices into dataframe. """ assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.' cols = reduce(set.union, (set(v.attr.keys()) for v in ls)) df = pd.DataFrame({id_column_name: [v.vid for v in ls]}) for c in cols: df[c] = [v.attr.get(c) for v in ls] return df
python
def _vertex_list_to_dataframe(ls, id_column_name): """ Convert a list of vertices into dataframe. """ assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.' cols = reduce(set.union, (set(v.attr.keys()) for v in ls)) df = pd.DataFrame({id_column_name: [v.vid for v in ls]}) for c in cols: df[c] = [v.attr.get(c) for v in ls] return df
[ "def", "_vertex_list_to_dataframe", "(", "ls", ",", "id_column_name", ")", ":", "assert", "HAS_PANDAS", ",", "'Cannot use dataframe because Pandas is not available or version is too low.'", "cols", "=", "reduce", "(", "set", ".", "union", ",", "(", "set", "(", "v", ".", "attr", ".", "keys", "(", ")", ")", "for", "v", "in", "ls", ")", ")", "df", "=", "pd", ".", "DataFrame", "(", "{", "id_column_name", ":", "[", "v", ".", "vid", "for", "v", "in", "ls", "]", "}", ")", "for", "c", "in", "cols", ":", "df", "[", "c", "]", "=", "[", "v", ".", "attr", ".", "get", "(", "c", ")", "for", "v", "in", "ls", "]", "return", "df" ]
Convert a list of vertices into dataframe.
[ "Convert", "a", "list", "of", "vertices", "into", "dataframe", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sgraph.py#L1229-L1238
29,137
apple/turicreate
src/unity/python/turicreate/data_structures/sgraph.py
_vertex_list_to_sframe
def _vertex_list_to_sframe(ls, id_column_name): """ Convert a list of vertices into an SFrame. """ sf = SFrame() if type(ls) == list: cols = reduce(set.union, (set(v.attr.keys()) for v in ls)) sf[id_column_name] = [v.vid for v in ls] for c in cols: sf[c] = [v.attr.get(c) for v in ls] elif type(ls) == Vertex: sf[id_column_name] = [ls.vid] for col, val in ls.attr.iteritems(): sf[col] = [val] else: raise TypeError('Vertices type {} is Not supported.'.format(type(ls))) return sf
python
def _vertex_list_to_sframe(ls, id_column_name): """ Convert a list of vertices into an SFrame. """ sf = SFrame() if type(ls) == list: cols = reduce(set.union, (set(v.attr.keys()) for v in ls)) sf[id_column_name] = [v.vid for v in ls] for c in cols: sf[c] = [v.attr.get(c) for v in ls] elif type(ls) == Vertex: sf[id_column_name] = [ls.vid] for col, val in ls.attr.iteritems(): sf[col] = [val] else: raise TypeError('Vertices type {} is Not supported.'.format(type(ls))) return sf
[ "def", "_vertex_list_to_sframe", "(", "ls", ",", "id_column_name", ")", ":", "sf", "=", "SFrame", "(", ")", "if", "type", "(", "ls", ")", "==", "list", ":", "cols", "=", "reduce", "(", "set", ".", "union", ",", "(", "set", "(", "v", ".", "attr", ".", "keys", "(", ")", ")", "for", "v", "in", "ls", ")", ")", "sf", "[", "id_column_name", "]", "=", "[", "v", ".", "vid", "for", "v", "in", "ls", "]", "for", "c", "in", "cols", ":", "sf", "[", "c", "]", "=", "[", "v", ".", "attr", ".", "get", "(", "c", ")", "for", "v", "in", "ls", "]", "elif", "type", "(", "ls", ")", "==", "Vertex", ":", "sf", "[", "id_column_name", "]", "=", "[", "ls", ".", "vid", "]", "for", "col", ",", "val", "in", "ls", ".", "attr", ".", "iteritems", "(", ")", ":", "sf", "[", "col", "]", "=", "[", "val", "]", "else", ":", "raise", "TypeError", "(", "'Vertices type {} is Not supported.'", ".", "format", "(", "type", "(", "ls", ")", ")", ")", "return", "sf" ]
Convert a list of vertices into an SFrame.
[ "Convert", "a", "list", "of", "vertices", "into", "an", "SFrame", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sgraph.py#L1240-L1260
29,138
apple/turicreate
src/unity/python/turicreate/data_structures/sgraph.py
_edge_list_to_dataframe
def _edge_list_to_dataframe(ls, src_column_name, dst_column_name): """ Convert a list of edges into dataframe. """ assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.' cols = reduce(set.union, (set(e.attr.keys()) for e in ls)) df = pd.DataFrame({ src_column_name: [e.src_vid for e in ls], dst_column_name: [e.dst_vid for e in ls]}) for c in cols: df[c] = [e.attr.get(c) for e in ls] return df
python
def _edge_list_to_dataframe(ls, src_column_name, dst_column_name): """ Convert a list of edges into dataframe. """ assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.' cols = reduce(set.union, (set(e.attr.keys()) for e in ls)) df = pd.DataFrame({ src_column_name: [e.src_vid for e in ls], dst_column_name: [e.dst_vid for e in ls]}) for c in cols: df[c] = [e.attr.get(c) for e in ls] return df
[ "def", "_edge_list_to_dataframe", "(", "ls", ",", "src_column_name", ",", "dst_column_name", ")", ":", "assert", "HAS_PANDAS", ",", "'Cannot use dataframe because Pandas is not available or version is too low.'", "cols", "=", "reduce", "(", "set", ".", "union", ",", "(", "set", "(", "e", ".", "attr", ".", "keys", "(", ")", ")", "for", "e", "in", "ls", ")", ")", "df", "=", "pd", ".", "DataFrame", "(", "{", "src_column_name", ":", "[", "e", ".", "src_vid", "for", "e", "in", "ls", "]", ",", "dst_column_name", ":", "[", "e", ".", "dst_vid", "for", "e", "in", "ls", "]", "}", ")", "for", "c", "in", "cols", ":", "df", "[", "c", "]", "=", "[", "e", ".", "attr", ".", "get", "(", "c", ")", "for", "e", "in", "ls", "]", "return", "df" ]
Convert a list of edges into dataframe.
[ "Convert", "a", "list", "of", "edges", "into", "dataframe", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sgraph.py#L1262-L1273
29,139
apple/turicreate
src/unity/python/turicreate/data_structures/sgraph.py
_edge_list_to_sframe
def _edge_list_to_sframe(ls, src_column_name, dst_column_name): """ Convert a list of edges into an SFrame. """ sf = SFrame() if type(ls) == list: cols = reduce(set.union, (set(v.attr.keys()) for v in ls)) sf[src_column_name] = [e.src_vid for e in ls] sf[dst_column_name] = [e.dst_vid for e in ls] for c in cols: sf[c] = [e.attr.get(c) for e in ls] elif type(ls) == Edge: sf[src_column_name] = [ls.src_vid] sf[dst_column_name] = [ls.dst_vid] else: raise TypeError('Edges type {} is Not supported.'.format(type(ls))) return sf
python
def _edge_list_to_sframe(ls, src_column_name, dst_column_name): """ Convert a list of edges into an SFrame. """ sf = SFrame() if type(ls) == list: cols = reduce(set.union, (set(v.attr.keys()) for v in ls)) sf[src_column_name] = [e.src_vid for e in ls] sf[dst_column_name] = [e.dst_vid for e in ls] for c in cols: sf[c] = [e.attr.get(c) for e in ls] elif type(ls) == Edge: sf[src_column_name] = [ls.src_vid] sf[dst_column_name] = [ls.dst_vid] else: raise TypeError('Edges type {} is Not supported.'.format(type(ls))) return sf
[ "def", "_edge_list_to_sframe", "(", "ls", ",", "src_column_name", ",", "dst_column_name", ")", ":", "sf", "=", "SFrame", "(", ")", "if", "type", "(", "ls", ")", "==", "list", ":", "cols", "=", "reduce", "(", "set", ".", "union", ",", "(", "set", "(", "v", ".", "attr", ".", "keys", "(", ")", ")", "for", "v", "in", "ls", ")", ")", "sf", "[", "src_column_name", "]", "=", "[", "e", ".", "src_vid", "for", "e", "in", "ls", "]", "sf", "[", "dst_column_name", "]", "=", "[", "e", ".", "dst_vid", "for", "e", "in", "ls", "]", "for", "c", "in", "cols", ":", "sf", "[", "c", "]", "=", "[", "e", ".", "attr", ".", "get", "(", "c", ")", "for", "e", "in", "ls", "]", "elif", "type", "(", "ls", ")", "==", "Edge", ":", "sf", "[", "src_column_name", "]", "=", "[", "ls", ".", "src_vid", "]", "sf", "[", "dst_column_name", "]", "=", "[", "ls", ".", "dst_vid", "]", "else", ":", "raise", "TypeError", "(", "'Edges type {} is Not supported.'", ".", "format", "(", "type", "(", "ls", ")", ")", ")", "return", "sf" ]
Convert a list of edges into an SFrame.
[ "Convert", "a", "list", "of", "edges", "into", "an", "SFrame", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sgraph.py#L1275-L1295
29,140
apple/turicreate
src/unity/python/turicreate/data_structures/sgraph.py
_dataframe_to_vertex_list
def _dataframe_to_vertex_list(df): """ Convert dataframe into list of vertices, assuming that vertex ids are stored in _VID_COLUMN. """ cols = df.columns if len(cols): assert _VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _VID_COLUMN df = df[cols].T ret = [Vertex(None, _series=df[col]) for col in df] return ret else: return []
python
def _dataframe_to_vertex_list(df): """ Convert dataframe into list of vertices, assuming that vertex ids are stored in _VID_COLUMN. """ cols = df.columns if len(cols): assert _VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _VID_COLUMN df = df[cols].T ret = [Vertex(None, _series=df[col]) for col in df] return ret else: return []
[ "def", "_dataframe_to_vertex_list", "(", "df", ")", ":", "cols", "=", "df", ".", "columns", "if", "len", "(", "cols", ")", ":", "assert", "_VID_COLUMN", "in", "cols", ",", "\"Vertex DataFrame must contain column %s\"", "%", "_VID_COLUMN", "df", "=", "df", "[", "cols", "]", ".", "T", "ret", "=", "[", "Vertex", "(", "None", ",", "_series", "=", "df", "[", "col", "]", ")", "for", "col", "in", "df", "]", "return", "ret", "else", ":", "return", "[", "]" ]
Convert dataframe into list of vertices, assuming that vertex ids are stored in _VID_COLUMN.
[ "Convert", "dataframe", "into", "list", "of", "vertices", "assuming", "that", "vertex", "ids", "are", "stored", "in", "_VID_COLUMN", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sgraph.py#L1297-L1308
29,141
apple/turicreate
src/unity/python/turicreate/data_structures/sgraph.py
_dataframe_to_edge_list
def _dataframe_to_edge_list(df): """ Convert dataframe into list of edges, assuming that source and target ids are stored in _SRC_VID_COLUMN, and _DST_VID_COLUMN respectively. """ cols = df.columns if len(cols): assert _SRC_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _SRC_VID_COLUMN assert _DST_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _DST_VID_COLUMN df = df[cols].T ret = [Edge(None, None, _series=df[col]) for col in df] return ret else: return []
python
def _dataframe_to_edge_list(df): """ Convert dataframe into list of edges, assuming that source and target ids are stored in _SRC_VID_COLUMN, and _DST_VID_COLUMN respectively. """ cols = df.columns if len(cols): assert _SRC_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _SRC_VID_COLUMN assert _DST_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _DST_VID_COLUMN df = df[cols].T ret = [Edge(None, None, _series=df[col]) for col in df] return ret else: return []
[ "def", "_dataframe_to_edge_list", "(", "df", ")", ":", "cols", "=", "df", ".", "columns", "if", "len", "(", "cols", ")", ":", "assert", "_SRC_VID_COLUMN", "in", "cols", ",", "\"Vertex DataFrame must contain column %s\"", "%", "_SRC_VID_COLUMN", "assert", "_DST_VID_COLUMN", "in", "cols", ",", "\"Vertex DataFrame must contain column %s\"", "%", "_DST_VID_COLUMN", "df", "=", "df", "[", "cols", "]", ".", "T", "ret", "=", "[", "Edge", "(", "None", ",", "None", ",", "_series", "=", "df", "[", "col", "]", ")", "for", "col", "in", "df", "]", "return", "ret", "else", ":", "return", "[", "]" ]
Convert dataframe into list of edges, assuming that source and target ids are stored in _SRC_VID_COLUMN, and _DST_VID_COLUMN respectively.
[ "Convert", "dataframe", "into", "list", "of", "edges", "assuming", "that", "source", "and", "target", "ids", "are", "stored", "in", "_SRC_VID_COLUMN", "and", "_DST_VID_COLUMN", "respectively", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sgraph.py#L1311-L1323
29,142
apple/turicreate
src/unity/python/turicreate/data_structures/sgraph.py
SGraph.select_fields
def select_fields(self, fields): """ Return a new SGraph with only the selected fields. Other fields are discarded, while fields that do not exist in the SGraph are ignored. Parameters ---------- fields : string | list [string] A single field name or a list of field names to select. Returns ------- out : SGraph A new graph whose vertex and edge data are projected to the selected fields. See Also -------- get_fields, get_vertex_fields, get_edge_fields Examples -------- >>> from turicreate import SGraph, Vertex >>> verts = [Vertex(0, attr={'breed': 'labrador', 'age': 5}), Vertex(1, attr={'breed': 'labrador', 'age': 3}), Vertex(2, attr={'breed': 'vizsla', 'age': 8})] >>> g = SGraph() >>> g = g.add_vertices(verts) >>> g2 = g.select_fields(fields=['breed']) """ if (type(fields) is str): fields = [fields] if not isinstance(fields, list) or not all(type(x) is str for x in fields): raise TypeError('\"fields\" must be a str or list[str]') vfields = self.__proxy__.get_vertex_fields() efields = self.__proxy__.get_edge_fields() selected_vfields = [] selected_efields = [] for f in fields: found = False if f in vfields: selected_vfields.append(f) found = True if f in efields: selected_efields.append(f) found = True if not found: raise ValueError('Field \'%s\' not in graph' % f) with cython_context(): proxy = self.__proxy__ proxy = proxy.select_vertex_fields(selected_vfields) proxy = proxy.select_edge_fields(selected_efields) return SGraph(_proxy=proxy)
python
def select_fields(self, fields): """ Return a new SGraph with only the selected fields. Other fields are discarded, while fields that do not exist in the SGraph are ignored. Parameters ---------- fields : string | list [string] A single field name or a list of field names to select. Returns ------- out : SGraph A new graph whose vertex and edge data are projected to the selected fields. See Also -------- get_fields, get_vertex_fields, get_edge_fields Examples -------- >>> from turicreate import SGraph, Vertex >>> verts = [Vertex(0, attr={'breed': 'labrador', 'age': 5}), Vertex(1, attr={'breed': 'labrador', 'age': 3}), Vertex(2, attr={'breed': 'vizsla', 'age': 8})] >>> g = SGraph() >>> g = g.add_vertices(verts) >>> g2 = g.select_fields(fields=['breed']) """ if (type(fields) is str): fields = [fields] if not isinstance(fields, list) or not all(type(x) is str for x in fields): raise TypeError('\"fields\" must be a str or list[str]') vfields = self.__proxy__.get_vertex_fields() efields = self.__proxy__.get_edge_fields() selected_vfields = [] selected_efields = [] for f in fields: found = False if f in vfields: selected_vfields.append(f) found = True if f in efields: selected_efields.append(f) found = True if not found: raise ValueError('Field \'%s\' not in graph' % f) with cython_context(): proxy = self.__proxy__ proxy = proxy.select_vertex_fields(selected_vfields) proxy = proxy.select_edge_fields(selected_efields) return SGraph(_proxy=proxy)
[ "def", "select_fields", "(", "self", ",", "fields", ")", ":", "if", "(", "type", "(", "fields", ")", "is", "str", ")", ":", "fields", "=", "[", "fields", "]", "if", "not", "isinstance", "(", "fields", ",", "list", ")", "or", "not", "all", "(", "type", "(", "x", ")", "is", "str", "for", "x", "in", "fields", ")", ":", "raise", "TypeError", "(", "'\\\"fields\\\" must be a str or list[str]'", ")", "vfields", "=", "self", ".", "__proxy__", ".", "get_vertex_fields", "(", ")", "efields", "=", "self", ".", "__proxy__", ".", "get_edge_fields", "(", ")", "selected_vfields", "=", "[", "]", "selected_efields", "=", "[", "]", "for", "f", "in", "fields", ":", "found", "=", "False", "if", "f", "in", "vfields", ":", "selected_vfields", ".", "append", "(", "f", ")", "found", "=", "True", "if", "f", "in", "efields", ":", "selected_efields", ".", "append", "(", "f", ")", "found", "=", "True", "if", "not", "found", ":", "raise", "ValueError", "(", "'Field \\'%s\\' not in graph'", "%", "f", ")", "with", "cython_context", "(", ")", ":", "proxy", "=", "self", ".", "__proxy__", "proxy", "=", "proxy", ".", "select_vertex_fields", "(", "selected_vfields", ")", "proxy", "=", "proxy", ".", "select_edge_fields", "(", "selected_efields", ")", "return", "SGraph", "(", "_proxy", "=", "proxy", ")" ]
Return a new SGraph with only the selected fields. Other fields are discarded, while fields that do not exist in the SGraph are ignored. Parameters ---------- fields : string | list [string] A single field name or a list of field names to select. Returns ------- out : SGraph A new graph whose vertex and edge data are projected to the selected fields. See Also -------- get_fields, get_vertex_fields, get_edge_fields Examples -------- >>> from turicreate import SGraph, Vertex >>> verts = [Vertex(0, attr={'breed': 'labrador', 'age': 5}), Vertex(1, attr={'breed': 'labrador', 'age': 3}), Vertex(2, attr={'breed': 'vizsla', 'age': 8})] >>> g = SGraph() >>> g = g.add_vertices(verts) >>> g2 = g.select_fields(fields=['breed'])
[ "Return", "a", "new", "SGraph", "with", "only", "the", "selected", "fields", ".", "Other", "fields", "are", "discarded", "while", "fields", "that", "do", "not", "exist", "in", "the", "SGraph", "are", "ignored", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sgraph.py#L811-L866
29,143
apple/turicreate
src/unity/python/turicreate/data_structures/sgraph.py
SGraph.get_neighborhood
def get_neighborhood(self, ids, radius=1, full_subgraph=True): """ Retrieve the graph neighborhood around a set of vertices, ignoring edge directions. Note that setting radius greater than two often results in a time-consuming query for a very large subgraph. Parameters ---------- ids : list [int | float | str] List of target vertex IDs. radius : int, optional Radius of the neighborhood. Every vertex in the returned subgraph is reachable from at least one of the target vertices on a path of length no longer than ``radius``. Setting radius larger than 2 may result in a very large subgraph. full_subgraph : bool, optional If True, return all edges between vertices in the returned neighborhood. The result is also known as the subgraph induced by the target nodes' neighbors, or the egocentric network for the target nodes. If False, return only edges on paths of length <= ``radius`` from the target node, also known as the reachability graph. Returns ------- out : Graph The subgraph with the neighborhoods around the target vertices. See Also -------- get_edges, get_vertices References ---------- - Marsden, P. (2002) `Egocentric and sociocentric measures of network centrality <http://www.sciencedirect.com/science/article/pii/S03788733 02000163>`_. - `Wikipedia - Reachability <http://en.wikipedia.org/wiki/Reachability>`_ Examples -------- >>> sf_edge = turicreate.SFrame({'source': range(9), 'dest': range(1, 10)}) >>> g = turicreate.SGraph() >>> g = g.add_edges(sf_edge, src_field='source', dst_field='dest') >>> subgraph = g.get_neighborhood(ids=[1, 7], radius=2, full_subgraph=True) """ verts = ids ## find the vertices within radius (and the path edges) for i in range(radius): edges_out = self.get_edges(src_ids=verts) edges_in = self.get_edges(dst_ids=verts) verts = list(edges_in['__src_id']) + list(edges_in['__dst_id']) + \ list(edges_out['__src_id']) + list(edges_out['__dst_id']) verts = list(set(verts)) ## make a new graph to return and add the vertices g = SGraph() g = g.add_vertices(self.get_vertices(verts), vid_field='__id') ## add the requested edge set if full_subgraph is True: induced_edge_out = self.get_edges(src_ids=verts) induced_edge_in = self.get_edges(dst_ids=verts) df_induced = induced_edge_out.append(induced_edge_in) df_induced = df_induced.groupby(df_induced.column_names(), {}) verts_sa = SArray(list(verts)) edges = df_induced.filter_by(verts_sa, "__src_id") edges = edges.filter_by(verts_sa, "__dst_id") else: path_edges = edges_out.append(edges_in) edges = path_edges.groupby(path_edges.column_names(), {}) g = g.add_edges(edges, src_field='__src_id', dst_field='__dst_id') return g
python
def get_neighborhood(self, ids, radius=1, full_subgraph=True): """ Retrieve the graph neighborhood around a set of vertices, ignoring edge directions. Note that setting radius greater than two often results in a time-consuming query for a very large subgraph. Parameters ---------- ids : list [int | float | str] List of target vertex IDs. radius : int, optional Radius of the neighborhood. Every vertex in the returned subgraph is reachable from at least one of the target vertices on a path of length no longer than ``radius``. Setting radius larger than 2 may result in a very large subgraph. full_subgraph : bool, optional If True, return all edges between vertices in the returned neighborhood. The result is also known as the subgraph induced by the target nodes' neighbors, or the egocentric network for the target nodes. If False, return only edges on paths of length <= ``radius`` from the target node, also known as the reachability graph. Returns ------- out : Graph The subgraph with the neighborhoods around the target vertices. See Also -------- get_edges, get_vertices References ---------- - Marsden, P. (2002) `Egocentric and sociocentric measures of network centrality <http://www.sciencedirect.com/science/article/pii/S03788733 02000163>`_. - `Wikipedia - Reachability <http://en.wikipedia.org/wiki/Reachability>`_ Examples -------- >>> sf_edge = turicreate.SFrame({'source': range(9), 'dest': range(1, 10)}) >>> g = turicreate.SGraph() >>> g = g.add_edges(sf_edge, src_field='source', dst_field='dest') >>> subgraph = g.get_neighborhood(ids=[1, 7], radius=2, full_subgraph=True) """ verts = ids ## find the vertices within radius (and the path edges) for i in range(radius): edges_out = self.get_edges(src_ids=verts) edges_in = self.get_edges(dst_ids=verts) verts = list(edges_in['__src_id']) + list(edges_in['__dst_id']) + \ list(edges_out['__src_id']) + list(edges_out['__dst_id']) verts = list(set(verts)) ## make a new graph to return and add the vertices g = SGraph() g = g.add_vertices(self.get_vertices(verts), vid_field='__id') ## add the requested edge set if full_subgraph is True: induced_edge_out = self.get_edges(src_ids=verts) induced_edge_in = self.get_edges(dst_ids=verts) df_induced = induced_edge_out.append(induced_edge_in) df_induced = df_induced.groupby(df_induced.column_names(), {}) verts_sa = SArray(list(verts)) edges = df_induced.filter_by(verts_sa, "__src_id") edges = edges.filter_by(verts_sa, "__dst_id") else: path_edges = edges_out.append(edges_in) edges = path_edges.groupby(path_edges.column_names(), {}) g = g.add_edges(edges, src_field='__src_id', dst_field='__dst_id') return g
[ "def", "get_neighborhood", "(", "self", ",", "ids", ",", "radius", "=", "1", ",", "full_subgraph", "=", "True", ")", ":", "verts", "=", "ids", "## find the vertices within radius (and the path edges)", "for", "i", "in", "range", "(", "radius", ")", ":", "edges_out", "=", "self", ".", "get_edges", "(", "src_ids", "=", "verts", ")", "edges_in", "=", "self", ".", "get_edges", "(", "dst_ids", "=", "verts", ")", "verts", "=", "list", "(", "edges_in", "[", "'__src_id'", "]", ")", "+", "list", "(", "edges_in", "[", "'__dst_id'", "]", ")", "+", "list", "(", "edges_out", "[", "'__src_id'", "]", ")", "+", "list", "(", "edges_out", "[", "'__dst_id'", "]", ")", "verts", "=", "list", "(", "set", "(", "verts", ")", ")", "## make a new graph to return and add the vertices", "g", "=", "SGraph", "(", ")", "g", "=", "g", ".", "add_vertices", "(", "self", ".", "get_vertices", "(", "verts", ")", ",", "vid_field", "=", "'__id'", ")", "## add the requested edge set", "if", "full_subgraph", "is", "True", ":", "induced_edge_out", "=", "self", ".", "get_edges", "(", "src_ids", "=", "verts", ")", "induced_edge_in", "=", "self", ".", "get_edges", "(", "dst_ids", "=", "verts", ")", "df_induced", "=", "induced_edge_out", ".", "append", "(", "induced_edge_in", ")", "df_induced", "=", "df_induced", ".", "groupby", "(", "df_induced", ".", "column_names", "(", ")", ",", "{", "}", ")", "verts_sa", "=", "SArray", "(", "list", "(", "verts", ")", ")", "edges", "=", "df_induced", ".", "filter_by", "(", "verts_sa", ",", "\"__src_id\"", ")", "edges", "=", "edges", ".", "filter_by", "(", "verts_sa", ",", "\"__dst_id\"", ")", "else", ":", "path_edges", "=", "edges_out", ".", "append", "(", "edges_in", ")", "edges", "=", "path_edges", ".", "groupby", "(", "path_edges", ".", "column_names", "(", ")", ",", "{", "}", ")", "g", "=", "g", ".", "add_edges", "(", "edges", ",", "src_field", "=", "'__src_id'", ",", "dst_field", "=", "'__dst_id'", ")", "return", "g" ]
Retrieve the graph neighborhood around a set of vertices, ignoring edge directions. Note that setting radius greater than two often results in a time-consuming query for a very large subgraph. Parameters ---------- ids : list [int | float | str] List of target vertex IDs. radius : int, optional Radius of the neighborhood. Every vertex in the returned subgraph is reachable from at least one of the target vertices on a path of length no longer than ``radius``. Setting radius larger than 2 may result in a very large subgraph. full_subgraph : bool, optional If True, return all edges between vertices in the returned neighborhood. The result is also known as the subgraph induced by the target nodes' neighbors, or the egocentric network for the target nodes. If False, return only edges on paths of length <= ``radius`` from the target node, also known as the reachability graph. Returns ------- out : Graph The subgraph with the neighborhoods around the target vertices. See Also -------- get_edges, get_vertices References ---------- - Marsden, P. (2002) `Egocentric and sociocentric measures of network centrality <http://www.sciencedirect.com/science/article/pii/S03788733 02000163>`_. - `Wikipedia - Reachability <http://en.wikipedia.org/wiki/Reachability>`_ Examples -------- >>> sf_edge = turicreate.SFrame({'source': range(9), 'dest': range(1, 10)}) >>> g = turicreate.SGraph() >>> g = g.add_edges(sf_edge, src_field='source', dst_field='dest') >>> subgraph = g.get_neighborhood(ids=[1, 7], radius=2, full_subgraph=True)
[ "Retrieve", "the", "graph", "neighborhood", "around", "a", "set", "of", "vertices", "ignoring", "edge", "directions", ".", "Note", "that", "setting", "radius", "greater", "than", "two", "often", "results", "in", "a", "time", "-", "consuming", "query", "for", "a", "very", "large", "subgraph", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sgraph.py#L1063-L1145
29,144
apple/turicreate
src/unity/python/turicreate/toolkits/graph_analytics/_model_base.py
GraphAnalyticsModel._get
def _get(self, field): """ Return the value for the queried field. Get the value of a given field. The list of all queryable fields is documented in the beginning of the model class. >>> out = m._get('graph') Parameters ---------- field : string Name of the field to be retrieved. Returns ------- out : value The current value of the requested field. """ if field in self._list_fields(): return self.__proxy__.get(field) else: raise KeyError('Key \"%s\" not in model. Available fields are %s.' % (field, ', '.join(self._list_fields())))
python
def _get(self, field): """ Return the value for the queried field. Get the value of a given field. The list of all queryable fields is documented in the beginning of the model class. >>> out = m._get('graph') Parameters ---------- field : string Name of the field to be retrieved. Returns ------- out : value The current value of the requested field. """ if field in self._list_fields(): return self.__proxy__.get(field) else: raise KeyError('Key \"%s\" not in model. Available fields are %s.' % (field, ', '.join(self._list_fields())))
[ "def", "_get", "(", "self", ",", "field", ")", ":", "if", "field", "in", "self", ".", "_list_fields", "(", ")", ":", "return", "self", ".", "__proxy__", ".", "get", "(", "field", ")", "else", ":", "raise", "KeyError", "(", "'Key \\\"%s\\\" not in model. Available fields are %s.'", "%", "(", "field", ",", "', '", ".", "join", "(", "self", ".", "_list_fields", "(", ")", ")", ")", ")" ]
Return the value for the queried field. Get the value of a given field. The list of all queryable fields is documented in the beginning of the model class. >>> out = m._get('graph') Parameters ---------- field : string Name of the field to be retrieved. Returns ------- out : value The current value of the requested field.
[ "Return", "the", "value", "for", "the", "queried", "field", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/graph_analytics/_model_base.py#L31-L53
29,145
apple/turicreate
src/unity/python/turicreate/toolkits/graph_analytics/_model_base.py
GraphAnalyticsModel._describe_fields
def _describe_fields(cls): """ Return a dictionary for the class fields description. Fields should NOT be wrapped by _precomputed_field, if necessary """ dispatch_table = { 'ShortestPathModel': 'sssp', 'GraphColoringModel': 'graph_coloring', 'PagerankModel': 'pagerank', 'ConnectedComponentsModel': 'connected_components', 'TriangleCountingModel': 'triangle_counting', 'KcoreModel': 'kcore', 'DegreeCountingModel': 'degree_count', 'LabelPropagationModel': 'label_propagation' } try: toolkit_name = dispatch_table[cls.__name__] toolkit = _tc.extensions._toolkits.graph.__dict__[toolkit_name] return toolkit.get_model_fields({}) except: raise RuntimeError('Model %s does not have fields description' % cls.__name__)
python
def _describe_fields(cls): """ Return a dictionary for the class fields description. Fields should NOT be wrapped by _precomputed_field, if necessary """ dispatch_table = { 'ShortestPathModel': 'sssp', 'GraphColoringModel': 'graph_coloring', 'PagerankModel': 'pagerank', 'ConnectedComponentsModel': 'connected_components', 'TriangleCountingModel': 'triangle_counting', 'KcoreModel': 'kcore', 'DegreeCountingModel': 'degree_count', 'LabelPropagationModel': 'label_propagation' } try: toolkit_name = dispatch_table[cls.__name__] toolkit = _tc.extensions._toolkits.graph.__dict__[toolkit_name] return toolkit.get_model_fields({}) except: raise RuntimeError('Model %s does not have fields description' % cls.__name__)
[ "def", "_describe_fields", "(", "cls", ")", ":", "dispatch_table", "=", "{", "'ShortestPathModel'", ":", "'sssp'", ",", "'GraphColoringModel'", ":", "'graph_coloring'", ",", "'PagerankModel'", ":", "'pagerank'", ",", "'ConnectedComponentsModel'", ":", "'connected_components'", ",", "'TriangleCountingModel'", ":", "'triangle_counting'", ",", "'KcoreModel'", ":", "'kcore'", ",", "'DegreeCountingModel'", ":", "'degree_count'", ",", "'LabelPropagationModel'", ":", "'label_propagation'", "}", "try", ":", "toolkit_name", "=", "dispatch_table", "[", "cls", ".", "__name__", "]", "toolkit", "=", "_tc", ".", "extensions", ".", "_toolkits", ".", "graph", ".", "__dict__", "[", "toolkit_name", "]", "return", "toolkit", ".", "get_model_fields", "(", "{", "}", ")", "except", ":", "raise", "RuntimeError", "(", "'Model %s does not have fields description'", "%", "cls", ".", "__name__", ")" ]
Return a dictionary for the class fields description. Fields should NOT be wrapped by _precomputed_field, if necessary
[ "Return", "a", "dictionary", "for", "the", "class", "fields", "description", ".", "Fields", "should", "NOT", "be", "wrapped", "by", "_precomputed_field", "if", "necessary" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/graph_analytics/_model_base.py#L56-L76
29,146
apple/turicreate
src/unity/python/turicreate/util/_type_checks.py
_raise_error_if_not_of_type
def _raise_error_if_not_of_type(arg, expected_type, arg_name=None): """ Check if the input is of expected type. Parameters ---------- arg : Input argument. expected_type : A type OR a list of types that the argument is expected to be. arg_name : The name of the variable in the function being used. No name is assumed if set to None. Examples -------- _raise_error_if_not_of_type(sf, str, 'sf') _raise_error_if_not_of_type(sf, [str, int], 'sf') """ display_name = "%s " % arg_name if arg_name is not None else "Argument " lst_expected_type = [expected_type] if \ type(expected_type) == type else expected_type err_msg = "%smust be of type %s " % (display_name, ' or '.join([x.__name__ for x in lst_expected_type])) err_msg += "(not %s)." % type(arg).__name__ if not any(map(lambda x: isinstance(arg, x), lst_expected_type)): raise TypeError(err_msg)
python
def _raise_error_if_not_of_type(arg, expected_type, arg_name=None): """ Check if the input is of expected type. Parameters ---------- arg : Input argument. expected_type : A type OR a list of types that the argument is expected to be. arg_name : The name of the variable in the function being used. No name is assumed if set to None. Examples -------- _raise_error_if_not_of_type(sf, str, 'sf') _raise_error_if_not_of_type(sf, [str, int], 'sf') """ display_name = "%s " % arg_name if arg_name is not None else "Argument " lst_expected_type = [expected_type] if \ type(expected_type) == type else expected_type err_msg = "%smust be of type %s " % (display_name, ' or '.join([x.__name__ for x in lst_expected_type])) err_msg += "(not %s)." % type(arg).__name__ if not any(map(lambda x: isinstance(arg, x), lst_expected_type)): raise TypeError(err_msg)
[ "def", "_raise_error_if_not_of_type", "(", "arg", ",", "expected_type", ",", "arg_name", "=", "None", ")", ":", "display_name", "=", "\"%s \"", "%", "arg_name", "if", "arg_name", "is", "not", "None", "else", "\"Argument \"", "lst_expected_type", "=", "[", "expected_type", "]", "if", "type", "(", "expected_type", ")", "==", "type", "else", "expected_type", "err_msg", "=", "\"%smust be of type %s \"", "%", "(", "display_name", ",", "' or '", ".", "join", "(", "[", "x", ".", "__name__", "for", "x", "in", "lst_expected_type", "]", ")", ")", "err_msg", "+=", "\"(not %s).\"", "%", "type", "(", "arg", ")", ".", "__name__", "if", "not", "any", "(", "map", "(", "lambda", "x", ":", "isinstance", "(", "arg", ",", "x", ")", ",", "lst_expected_type", ")", ")", ":", "raise", "TypeError", "(", "err_msg", ")" ]
Check if the input is of expected type. Parameters ---------- arg : Input argument. expected_type : A type OR a list of types that the argument is expected to be. arg_name : The name of the variable in the function being used. No name is assumed if set to None. Examples -------- _raise_error_if_not_of_type(sf, str, 'sf') _raise_error_if_not_of_type(sf, [str, int], 'sf')
[ "Check", "if", "the", "input", "is", "of", "expected", "type", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/_type_checks.py#L11-L39
29,147
apple/turicreate
src/unity/python/turicreate/toolkits/sound_classifier/vggish_input.py
waveform_to_examples
def waveform_to_examples(data, sample_rate): """Converts audio waveform into an array of examples for VGGish. Args: data: np.array of either one dimension (mono) or two dimensions (multi-channel, with the outer dimension representing channels). Each sample is generally expected to lie in the range [-1.0, +1.0], although this is not required. sample_rate: Sample rate of data. Returns: 3-D np.array of shape [num_examples, num_frames, num_bands] which represents a sequence of examples, each of which contains a patch of log mel spectrogram, covering num_frames frames of audio and num_bands mel frequency bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS. """ import resampy # Convert to mono. if len(data.shape) > 1: data = np.mean(data, axis=1) # Resample to the rate assumed by VGGish. if sample_rate != vggish_params.SAMPLE_RATE: data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE) # Compute log mel spectrogram features. log_mel = mel_features.log_mel_spectrogram( data, audio_sample_rate=vggish_params.SAMPLE_RATE, log_offset=vggish_params.LOG_OFFSET, window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS, hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS, num_mel_bins=vggish_params.NUM_MEL_BINS, lower_edge_hertz=vggish_params.MEL_MIN_HZ, upper_edge_hertz=vggish_params.MEL_MAX_HZ) # Frame features into examples. features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS example_window_length = int(round( vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate)) example_hop_length = int(round( vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate)) log_mel_examples = mel_features.frame( log_mel, window_length=example_window_length, hop_length=example_hop_length) return log_mel_examples
python
def waveform_to_examples(data, sample_rate): """Converts audio waveform into an array of examples for VGGish. Args: data: np.array of either one dimension (mono) or two dimensions (multi-channel, with the outer dimension representing channels). Each sample is generally expected to lie in the range [-1.0, +1.0], although this is not required. sample_rate: Sample rate of data. Returns: 3-D np.array of shape [num_examples, num_frames, num_bands] which represents a sequence of examples, each of which contains a patch of log mel spectrogram, covering num_frames frames of audio and num_bands mel frequency bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS. """ import resampy # Convert to mono. if len(data.shape) > 1: data = np.mean(data, axis=1) # Resample to the rate assumed by VGGish. if sample_rate != vggish_params.SAMPLE_RATE: data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE) # Compute log mel spectrogram features. log_mel = mel_features.log_mel_spectrogram( data, audio_sample_rate=vggish_params.SAMPLE_RATE, log_offset=vggish_params.LOG_OFFSET, window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS, hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS, num_mel_bins=vggish_params.NUM_MEL_BINS, lower_edge_hertz=vggish_params.MEL_MIN_HZ, upper_edge_hertz=vggish_params.MEL_MAX_HZ) # Frame features into examples. features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS example_window_length = int(round( vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate)) example_hop_length = int(round( vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate)) log_mel_examples = mel_features.frame( log_mel, window_length=example_window_length, hop_length=example_hop_length) return log_mel_examples
[ "def", "waveform_to_examples", "(", "data", ",", "sample_rate", ")", ":", "import", "resampy", "# Convert to mono.", "if", "len", "(", "data", ".", "shape", ")", ">", "1", ":", "data", "=", "np", ".", "mean", "(", "data", ",", "axis", "=", "1", ")", "# Resample to the rate assumed by VGGish.", "if", "sample_rate", "!=", "vggish_params", ".", "SAMPLE_RATE", ":", "data", "=", "resampy", ".", "resample", "(", "data", ",", "sample_rate", ",", "vggish_params", ".", "SAMPLE_RATE", ")", "# Compute log mel spectrogram features.", "log_mel", "=", "mel_features", ".", "log_mel_spectrogram", "(", "data", ",", "audio_sample_rate", "=", "vggish_params", ".", "SAMPLE_RATE", ",", "log_offset", "=", "vggish_params", ".", "LOG_OFFSET", ",", "window_length_secs", "=", "vggish_params", ".", "STFT_WINDOW_LENGTH_SECONDS", ",", "hop_length_secs", "=", "vggish_params", ".", "STFT_HOP_LENGTH_SECONDS", ",", "num_mel_bins", "=", "vggish_params", ".", "NUM_MEL_BINS", ",", "lower_edge_hertz", "=", "vggish_params", ".", "MEL_MIN_HZ", ",", "upper_edge_hertz", "=", "vggish_params", ".", "MEL_MAX_HZ", ")", "# Frame features into examples.", "features_sample_rate", "=", "1.0", "/", "vggish_params", ".", "STFT_HOP_LENGTH_SECONDS", "example_window_length", "=", "int", "(", "round", "(", "vggish_params", ".", "EXAMPLE_WINDOW_SECONDS", "*", "features_sample_rate", ")", ")", "example_hop_length", "=", "int", "(", "round", "(", "vggish_params", ".", "EXAMPLE_HOP_SECONDS", "*", "features_sample_rate", ")", ")", "log_mel_examples", "=", "mel_features", ".", "frame", "(", "log_mel", ",", "window_length", "=", "example_window_length", ",", "hop_length", "=", "example_hop_length", ")", "return", "log_mel_examples" ]
Converts audio waveform into an array of examples for VGGish. Args: data: np.array of either one dimension (mono) or two dimensions (multi-channel, with the outer dimension representing channels). Each sample is generally expected to lie in the range [-1.0, +1.0], although this is not required. sample_rate: Sample rate of data. Returns: 3-D np.array of shape [num_examples, num_frames, num_bands] which represents a sequence of examples, each of which contains a patch of log mel spectrogram, covering num_frames frames of audio and num_bands mel frequency bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS.
[ "Converts", "audio", "waveform", "into", "an", "array", "of", "examples", "for", "VGGish", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/sound_classifier/vggish_input.py#L24-L71
29,148
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/build_request.py
expand_no_defaults
def expand_no_defaults (property_sets): """ Expand the given build request by combining all property_sets which don't specify conflicting non-free features. """ assert is_iterable_typed(property_sets, property_set.PropertySet) # First make all features and subfeatures explicit expanded_property_sets = [ps.expand_subfeatures() for ps in property_sets] # Now combine all of the expanded property_sets product = __x_product (expanded_property_sets) return [property_set.create(p) for p in product]
python
def expand_no_defaults (property_sets): """ Expand the given build request by combining all property_sets which don't specify conflicting non-free features. """ assert is_iterable_typed(property_sets, property_set.PropertySet) # First make all features and subfeatures explicit expanded_property_sets = [ps.expand_subfeatures() for ps in property_sets] # Now combine all of the expanded property_sets product = __x_product (expanded_property_sets) return [property_set.create(p) for p in product]
[ "def", "expand_no_defaults", "(", "property_sets", ")", ":", "assert", "is_iterable_typed", "(", "property_sets", ",", "property_set", ".", "PropertySet", ")", "# First make all features and subfeatures explicit", "expanded_property_sets", "=", "[", "ps", ".", "expand_subfeatures", "(", ")", "for", "ps", "in", "property_sets", "]", "# Now combine all of the expanded property_sets", "product", "=", "__x_product", "(", "expanded_property_sets", ")", "return", "[", "property_set", ".", "create", "(", "p", ")", "for", "p", "in", "product", "]" ]
Expand the given build request by combining all property_sets which don't specify conflicting non-free features.
[ "Expand", "the", "given", "build", "request", "by", "combining", "all", "property_sets", "which", "don", "t", "specify", "conflicting", "non", "-", "free", "features", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/build_request.py#L17-L28
29,149
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/build_request.py
__x_product
def __x_product (property_sets): """ Return the cross-product of all elements of property_sets, less any that would contain conflicting values for single-valued features. """ assert is_iterable_typed(property_sets, property_set.PropertySet) x_product_seen = set() return __x_product_aux (property_sets, x_product_seen)[0]
python
def __x_product (property_sets): """ Return the cross-product of all elements of property_sets, less any that would contain conflicting values for single-valued features. """ assert is_iterable_typed(property_sets, property_set.PropertySet) x_product_seen = set() return __x_product_aux (property_sets, x_product_seen)[0]
[ "def", "__x_product", "(", "property_sets", ")", ":", "assert", "is_iterable_typed", "(", "property_sets", ",", "property_set", ".", "PropertySet", ")", "x_product_seen", "=", "set", "(", ")", "return", "__x_product_aux", "(", "property_sets", ",", "x_product_seen", ")", "[", "0", "]" ]
Return the cross-product of all elements of property_sets, less any that would contain conflicting values for single-valued features.
[ "Return", "the", "cross", "-", "product", "of", "all", "elements", "of", "property_sets", "less", "any", "that", "would", "contain", "conflicting", "values", "for", "single", "-", "valued", "features", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/build_request.py#L31-L37
29,150
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/build_request.py
__x_product_aux
def __x_product_aux (property_sets, seen_features): """Returns non-conflicting combinations of property sets. property_sets is a list of PropertySet instances. seen_features is a set of Property instances. Returns a tuple of: - list of lists of Property instances, such that within each list, no two Property instance have the same feature, and no Property is for feature in seen_features. - set of features we saw in property_sets """ assert is_iterable_typed(property_sets, property_set.PropertySet) assert isinstance(seen_features, set) if not property_sets: return ([], set()) properties = property_sets[0].all() these_features = set() for p in property_sets[0].non_free(): these_features.add(p.feature) # Note: the algorithm as implemented here, as in original Jam code, appears to # detect conflicts based on features, not properties. For example, if command # line build request say: # # <a>1/<b>1 c<1>/<b>1 # # It will decide that those two property sets conflict, because they both specify # a value for 'b' and will not try building "<a>1 <c1> <b1>", but rather two # different property sets. This is a topic for future fixing, maybe. if these_features & seen_features: (inner_result, inner_seen) = __x_product_aux(property_sets[1:], seen_features) return (inner_result, inner_seen | these_features) else: result = [] (inner_result, inner_seen) = __x_product_aux(property_sets[1:], seen_features | these_features) if inner_result: for inner in inner_result: result.append(properties + inner) else: result.append(properties) if inner_seen & these_features: # Some of elements in property_sets[1:] conflict with elements of property_sets[0], # Try again, this time omitting elements of property_sets[0] (inner_result2, inner_seen2) = __x_product_aux(property_sets[1:], seen_features) result.extend(inner_result2) return (result, inner_seen | these_features)
python
def __x_product_aux (property_sets, seen_features): """Returns non-conflicting combinations of property sets. property_sets is a list of PropertySet instances. seen_features is a set of Property instances. Returns a tuple of: - list of lists of Property instances, such that within each list, no two Property instance have the same feature, and no Property is for feature in seen_features. - set of features we saw in property_sets """ assert is_iterable_typed(property_sets, property_set.PropertySet) assert isinstance(seen_features, set) if not property_sets: return ([], set()) properties = property_sets[0].all() these_features = set() for p in property_sets[0].non_free(): these_features.add(p.feature) # Note: the algorithm as implemented here, as in original Jam code, appears to # detect conflicts based on features, not properties. For example, if command # line build request say: # # <a>1/<b>1 c<1>/<b>1 # # It will decide that those two property sets conflict, because they both specify # a value for 'b' and will not try building "<a>1 <c1> <b1>", but rather two # different property sets. This is a topic for future fixing, maybe. if these_features & seen_features: (inner_result, inner_seen) = __x_product_aux(property_sets[1:], seen_features) return (inner_result, inner_seen | these_features) else: result = [] (inner_result, inner_seen) = __x_product_aux(property_sets[1:], seen_features | these_features) if inner_result: for inner in inner_result: result.append(properties + inner) else: result.append(properties) if inner_seen & these_features: # Some of elements in property_sets[1:] conflict with elements of property_sets[0], # Try again, this time omitting elements of property_sets[0] (inner_result2, inner_seen2) = __x_product_aux(property_sets[1:], seen_features) result.extend(inner_result2) return (result, inner_seen | these_features)
[ "def", "__x_product_aux", "(", "property_sets", ",", "seen_features", ")", ":", "assert", "is_iterable_typed", "(", "property_sets", ",", "property_set", ".", "PropertySet", ")", "assert", "isinstance", "(", "seen_features", ",", "set", ")", "if", "not", "property_sets", ":", "return", "(", "[", "]", ",", "set", "(", ")", ")", "properties", "=", "property_sets", "[", "0", "]", ".", "all", "(", ")", "these_features", "=", "set", "(", ")", "for", "p", "in", "property_sets", "[", "0", "]", ".", "non_free", "(", ")", ":", "these_features", ".", "add", "(", "p", ".", "feature", ")", "# Note: the algorithm as implemented here, as in original Jam code, appears to", "# detect conflicts based on features, not properties. For example, if command", "# line build request say:", "#", "# <a>1/<b>1 c<1>/<b>1", "#", "# It will decide that those two property sets conflict, because they both specify", "# a value for 'b' and will not try building \"<a>1 <c1> <b1>\", but rather two", "# different property sets. This is a topic for future fixing, maybe.", "if", "these_features", "&", "seen_features", ":", "(", "inner_result", ",", "inner_seen", ")", "=", "__x_product_aux", "(", "property_sets", "[", "1", ":", "]", ",", "seen_features", ")", "return", "(", "inner_result", ",", "inner_seen", "|", "these_features", ")", "else", ":", "result", "=", "[", "]", "(", "inner_result", ",", "inner_seen", ")", "=", "__x_product_aux", "(", "property_sets", "[", "1", ":", "]", ",", "seen_features", "|", "these_features", ")", "if", "inner_result", ":", "for", "inner", "in", "inner_result", ":", "result", ".", "append", "(", "properties", "+", "inner", ")", "else", ":", "result", ".", "append", "(", "properties", ")", "if", "inner_seen", "&", "these_features", ":", "# Some of elements in property_sets[1:] conflict with elements of property_sets[0],", "# Try again, this time omitting elements of property_sets[0]", "(", "inner_result2", ",", "inner_seen2", ")", "=", "__x_product_aux", "(", "property_sets", "[", "1", ":", "]", ",", "seen_features", ")", "result", ".", "extend", "(", "inner_result2", ")", "return", "(", "result", ",", "inner_seen", "|", "these_features", ")" ]
Returns non-conflicting combinations of property sets. property_sets is a list of PropertySet instances. seen_features is a set of Property instances. Returns a tuple of: - list of lists of Property instances, such that within each list, no two Property instance have the same feature, and no Property is for feature in seen_features. - set of features we saw in property_sets
[ "Returns", "non", "-", "conflicting", "combinations", "of", "property", "sets", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/build_request.py#L39-L91
29,151
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/build_request.py
looks_like_implicit_value
def looks_like_implicit_value(v): """Returns true if 'v' is either implicit value, or the part before the first '-' symbol is implicit value.""" assert isinstance(v, basestring) if feature.is_implicit_value(v): return 1 else: split = v.split("-") if feature.is_implicit_value(split[0]): return 1 return 0
python
def looks_like_implicit_value(v): """Returns true if 'v' is either implicit value, or the part before the first '-' symbol is implicit value.""" assert isinstance(v, basestring) if feature.is_implicit_value(v): return 1 else: split = v.split("-") if feature.is_implicit_value(split[0]): return 1 return 0
[ "def", "looks_like_implicit_value", "(", "v", ")", ":", "assert", "isinstance", "(", "v", ",", "basestring", ")", "if", "feature", ".", "is_implicit_value", "(", "v", ")", ":", "return", "1", "else", ":", "split", "=", "v", ".", "split", "(", "\"-\"", ")", "if", "feature", ".", "is_implicit_value", "(", "split", "[", "0", "]", ")", ":", "return", "1", "return", "0" ]
Returns true if 'v' is either implicit value, or the part before the first '-' symbol is implicit value.
[ "Returns", "true", "if", "v", "is", "either", "implicit", "value", "or", "the", "part", "before", "the", "first", "-", "symbol", "is", "implicit", "value", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/build_request.py#L95-L106
29,152
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py
regex_to_error_msg
def regex_to_error_msg(regex): """Format a human-readable error message from a regex""" return re.sub('([^\\\\])[()]', '\\1', regex) \ .replace('[ \t]*$', '') \ .replace('^', '') \ .replace('$', '') \ .replace('[ \t]*', ' ') \ .replace('[ \t]+', ' ') \ .replace('[0-9]+', 'X') \ \ .replace('\\[', '[') \ .replace('\\]', ']') \ .replace('\\(', '(') \ .replace('\\)', ')') \ .replace('\\.', '.')
python
def regex_to_error_msg(regex): """Format a human-readable error message from a regex""" return re.sub('([^\\\\])[()]', '\\1', regex) \ .replace('[ \t]*$', '') \ .replace('^', '') \ .replace('$', '') \ .replace('[ \t]*', ' ') \ .replace('[ \t]+', ' ') \ .replace('[0-9]+', 'X') \ \ .replace('\\[', '[') \ .replace('\\]', ']') \ .replace('\\(', '(') \ .replace('\\)', ')') \ .replace('\\.', '.')
[ "def", "regex_to_error_msg", "(", "regex", ")", ":", "return", "re", ".", "sub", "(", "'([^\\\\\\\\])[()]'", ",", "'\\\\1'", ",", "regex", ")", ".", "replace", "(", "'[ \\t]*$'", ",", "''", ")", ".", "replace", "(", "'^'", ",", "''", ")", ".", "replace", "(", "'$'", ",", "''", ")", ".", "replace", "(", "'[ \\t]*'", ",", "' '", ")", ".", "replace", "(", "'[ \\t]+'", ",", "' '", ")", ".", "replace", "(", "'[0-9]+'", ",", "'X'", ")", ".", "replace", "(", "'\\\\['", ",", "'['", ")", ".", "replace", "(", "'\\\\]'", ",", "']'", ")", ".", "replace", "(", "'\\\\('", ",", "'('", ")", ".", "replace", "(", "'\\\\)'", ",", "')'", ")", ".", "replace", "(", "'\\\\.'", ",", "'.'", ")" ]
Format a human-readable error message from a regex
[ "Format", "a", "human", "-", "readable", "error", "message", "from", "a", "regex" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py#L20-L34
29,153
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py
random_chars
def random_chars(number): """Generate random characters""" char_map = { k: v for k, v in chars.CHARS.iteritems() if not format_character(k).startswith('\\x') } char_num = sum(char_map.values()) return ( format_character(nth_char(char_map, random.randint(0, char_num - 1))) for _ in xrange(0, number) )
python
def random_chars(number): """Generate random characters""" char_map = { k: v for k, v in chars.CHARS.iteritems() if not format_character(k).startswith('\\x') } char_num = sum(char_map.values()) return ( format_character(nth_char(char_map, random.randint(0, char_num - 1))) for _ in xrange(0, number) )
[ "def", "random_chars", "(", "number", ")", ":", "char_map", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "chars", ".", "CHARS", ".", "iteritems", "(", ")", "if", "not", "format_character", "(", "k", ")", ".", "startswith", "(", "'\\\\x'", ")", "}", "char_num", "=", "sum", "(", "char_map", ".", "values", "(", ")", ")", "return", "(", "format_character", "(", "nth_char", "(", "char_map", ",", "random", ".", "randint", "(", "0", ",", "char_num", "-", "1", ")", ")", ")", "for", "_", "in", "xrange", "(", "0", ",", "number", ")", ")" ]
Generate random characters
[ "Generate", "random", "characters" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py#L50-L61
29,154
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py
templates_in
def templates_in(path): """Enumerate the templates found in path""" ext = '.cpp' return ( Template(f[0:-len(ext)], load_file(os.path.join(path, f))) for f in os.listdir(path) if f.endswith(ext) )
python
def templates_in(path): """Enumerate the templates found in path""" ext = '.cpp' return ( Template(f[0:-len(ext)], load_file(os.path.join(path, f))) for f in os.listdir(path) if f.endswith(ext) )
[ "def", "templates_in", "(", "path", ")", ":", "ext", "=", "'.cpp'", "return", "(", "Template", "(", "f", "[", "0", ":", "-", "len", "(", "ext", ")", "]", ",", "load_file", "(", "os", ".", "path", ".", "join", "(", "path", ",", "f", ")", ")", ")", "for", "f", "in", "os", ".", "listdir", "(", "path", ")", "if", "f", ".", "endswith", "(", "ext", ")", ")" ]
Enumerate the templates found in path
[ "Enumerate", "the", "templates", "found", "in", "path" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py#L186-L192
29,155
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py
nth_char
def nth_char(char_map, index): """Returns the nth character of a character->occurrence map""" for char in char_map: if index < char_map[char]: return char index = index - char_map[char] return None
python
def nth_char(char_map, index): """Returns the nth character of a character->occurrence map""" for char in char_map: if index < char_map[char]: return char index = index - char_map[char] return None
[ "def", "nth_char", "(", "char_map", ",", "index", ")", ":", "for", "char", "in", "char_map", ":", "if", "index", "<", "char_map", "[", "char", "]", ":", "return", "char", "index", "=", "index", "-", "char_map", "[", "char", "]", "return", "None" ]
Returns the nth character of a character->occurrence map
[ "Returns", "the", "nth", "character", "of", "a", "character", "-", ">", "occurrence", "map" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py#L195-L201
29,156
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py
format_character
def format_character(char): """Returns the C-formatting of the character""" if \ char in string.ascii_letters \ or char in string.digits \ or char in [ '_', '.', ':', ';', ' ', '!', '?', '+', '-', '/', '=', '<', '>', '$', '(', ')', '@', '~', '`', '|', '#', '[', ']', '{', '}', '&', '*', '^', '%']: return char elif char in ['"', '\'', '\\']: return '\\{0}'.format(char) elif char == '\n': return '\\n' elif char == '\r': return '\\r' elif char == '\t': return '\\t' else: return '\\x{:02x}'.format(ord(char))
python
def format_character(char): """Returns the C-formatting of the character""" if \ char in string.ascii_letters \ or char in string.digits \ or char in [ '_', '.', ':', ';', ' ', '!', '?', '+', '-', '/', '=', '<', '>', '$', '(', ')', '@', '~', '`', '|', '#', '[', ']', '{', '}', '&', '*', '^', '%']: return char elif char in ['"', '\'', '\\']: return '\\{0}'.format(char) elif char == '\n': return '\\n' elif char == '\r': return '\\r' elif char == '\t': return '\\t' else: return '\\x{:02x}'.format(ord(char))
[ "def", "format_character", "(", "char", ")", ":", "if", "char", "in", "string", ".", "ascii_letters", "or", "char", "in", "string", ".", "digits", "or", "char", "in", "[", "'_'", ",", "'.'", ",", "':'", ",", "';'", ",", "' '", ",", "'!'", ",", "'?'", ",", "'+'", ",", "'-'", ",", "'/'", ",", "'='", ",", "'<'", ",", "'>'", ",", "'$'", ",", "'('", ",", "')'", ",", "'@'", ",", "'~'", ",", "'`'", ",", "'|'", ",", "'#'", ",", "'['", ",", "']'", ",", "'{'", ",", "'}'", ",", "'&'", ",", "'*'", ",", "'^'", ",", "'%'", "]", ":", "return", "char", "elif", "char", "in", "[", "'\"'", ",", "'\\''", ",", "'\\\\'", "]", ":", "return", "'\\\\{0}'", ".", "format", "(", "char", ")", "elif", "char", "==", "'\\n'", ":", "return", "'\\\\n'", "elif", "char", "==", "'\\r'", ":", "return", "'\\\\r'", "elif", "char", "==", "'\\t'", ":", "return", "'\\\\t'", "else", ":", "return", "'\\\\x{:02x}'", ".", "format", "(", "ord", "(", "char", ")", ")" ]
Returns the C-formatting of the character
[ "Returns", "the", "C", "-", "formatting", "of", "the", "character" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py#L204-L223
29,157
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py
write_file
def write_file(filename, content): """Create the file with the given content""" print 'Generating {0}'.format(filename) with open(filename, 'wb') as out_f: out_f.write(content)
python
def write_file(filename, content): """Create the file with the given content""" print 'Generating {0}'.format(filename) with open(filename, 'wb') as out_f: out_f.write(content)
[ "def", "write_file", "(", "filename", ",", "content", ")", ":", "print", "'Generating {0}'", ".", "format", "(", "filename", ")", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "out_f", ":", "out_f", ".", "write", "(", "content", ")" ]
Create the file with the given content
[ "Create", "the", "file", "with", "the", "given", "content" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py#L226-L230
29,158
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py
out_filename
def out_filename(template, n_val, mode): """Determine the output filename""" return '{0}_{1}_{2}.cpp'.format(template.name, n_val, mode.identifier)
python
def out_filename(template, n_val, mode): """Determine the output filename""" return '{0}_{1}_{2}.cpp'.format(template.name, n_val, mode.identifier)
[ "def", "out_filename", "(", "template", ",", "n_val", ",", "mode", ")", ":", "return", "'{0}_{1}_{2}.cpp'", ".", "format", "(", "template", ".", "name", ",", "n_val", ",", "mode", ".", "identifier", ")" ]
Determine the output filename
[ "Determine", "the", "output", "filename" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py#L233-L235
29,159
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py
Mode.convert_from
def convert_from(self, base): """Convert a BOOST_METAPARSE_STRING mode document into one with this mode""" if self.identifier == 'bmp': return base elif self.identifier == 'man': result = [] prefix = 'BOOST_METAPARSE_STRING("' while True: bmp_at = base.find(prefix) if bmp_at == -1: return ''.join(result) + base else: result.append( base[0:bmp_at] + '::boost::metaparse::string<' ) new_base = '' was_backslash = False comma = '' for i in xrange(bmp_at + len(prefix), len(base)): if was_backslash: result.append( '{0}\'\\{1}\''.format(comma, base[i]) ) was_backslash = False comma = ',' elif base[i] == '"': new_base = base[i+2:] break elif base[i] == '\\': was_backslash = True else: result.append('{0}\'{1}\''.format(comma, base[i])) comma = ',' base = new_base result.append('>')
python
def convert_from(self, base): """Convert a BOOST_METAPARSE_STRING mode document into one with this mode""" if self.identifier == 'bmp': return base elif self.identifier == 'man': result = [] prefix = 'BOOST_METAPARSE_STRING("' while True: bmp_at = base.find(prefix) if bmp_at == -1: return ''.join(result) + base else: result.append( base[0:bmp_at] + '::boost::metaparse::string<' ) new_base = '' was_backslash = False comma = '' for i in xrange(bmp_at + len(prefix), len(base)): if was_backslash: result.append( '{0}\'\\{1}\''.format(comma, base[i]) ) was_backslash = False comma = ',' elif base[i] == '"': new_base = base[i+2:] break elif base[i] == '\\': was_backslash = True else: result.append('{0}\'{1}\''.format(comma, base[i])) comma = ',' base = new_base result.append('>')
[ "def", "convert_from", "(", "self", ",", "base", ")", ":", "if", "self", ".", "identifier", "==", "'bmp'", ":", "return", "base", "elif", "self", ".", "identifier", "==", "'man'", ":", "result", "=", "[", "]", "prefix", "=", "'BOOST_METAPARSE_STRING(\"'", "while", "True", ":", "bmp_at", "=", "base", ".", "find", "(", "prefix", ")", "if", "bmp_at", "==", "-", "1", ":", "return", "''", ".", "join", "(", "result", ")", "+", "base", "else", ":", "result", ".", "append", "(", "base", "[", "0", ":", "bmp_at", "]", "+", "'::boost::metaparse::string<'", ")", "new_base", "=", "''", "was_backslash", "=", "False", "comma", "=", "''", "for", "i", "in", "xrange", "(", "bmp_at", "+", "len", "(", "prefix", ")", ",", "len", "(", "base", ")", ")", ":", "if", "was_backslash", ":", "result", ".", "append", "(", "'{0}\\'\\\\{1}\\''", ".", "format", "(", "comma", ",", "base", "[", "i", "]", ")", ")", "was_backslash", "=", "False", "comma", "=", "','", "elif", "base", "[", "i", "]", "==", "'\"'", ":", "new_base", "=", "base", "[", "i", "+", "2", ":", "]", "break", "elif", "base", "[", "i", "]", "==", "'\\\\'", ":", "was_backslash", "=", "True", "else", ":", "result", ".", "append", "(", "'{0}\\'{1}\\''", ".", "format", "(", "comma", ",", "base", "[", "i", "]", ")", ")", "comma", "=", "','", "base", "=", "new_base", "result", ".", "append", "(", "'>'", ")" ]
Convert a BOOST_METAPARSE_STRING mode document into one with this mode
[ "Convert", "a", "BOOST_METAPARSE_STRING", "mode", "document", "into", "one", "with", "this", "mode" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py#L89-L124
29,160
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py
Template.instantiate
def instantiate(self, value_of_n): """Instantiates the template""" template = Cheetah.Template.Template( self.content, searchList={'n': value_of_n} ) template.random_string = random_string return str(template)
python
def instantiate(self, value_of_n): """Instantiates the template""" template = Cheetah.Template.Template( self.content, searchList={'n': value_of_n} ) template.random_string = random_string return str(template)
[ "def", "instantiate", "(", "self", ",", "value_of_n", ")", ":", "template", "=", "Cheetah", ".", "Template", ".", "Template", "(", "self", ".", "content", ",", "searchList", "=", "{", "'n'", ":", "value_of_n", "}", ")", "template", ".", "random_string", "=", "random_string", "return", "str", "(", "template", ")" ]
Instantiates the template
[ "Instantiates", "the", "template" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py#L134-L141
29,161
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py
Template.range
def range(self): """Returns the range for N""" match = self._match(in_comment( 'n[ \t]+in[ \t]*\\[([0-9]+)\\.\\.([0-9]+)\\),[ \t]+' 'step[ \t]+([0-9]+)' )) return range( int(match.group(1)), int(match.group(2)), int(match.group(3)) )
python
def range(self): """Returns the range for N""" match = self._match(in_comment( 'n[ \t]+in[ \t]*\\[([0-9]+)\\.\\.([0-9]+)\\),[ \t]+' 'step[ \t]+([0-9]+)' )) return range( int(match.group(1)), int(match.group(2)), int(match.group(3)) )
[ "def", "range", "(", "self", ")", ":", "match", "=", "self", ".", "_match", "(", "in_comment", "(", "'n[ \\t]+in[ \\t]*\\\\[([0-9]+)\\\\.\\\\.([0-9]+)\\\\),[ \\t]+'", "'step[ \\t]+([0-9]+)'", ")", ")", "return", "range", "(", "int", "(", "match", ".", "group", "(", "1", ")", ")", ",", "int", "(", "match", ".", "group", "(", "2", ")", ")", ",", "int", "(", "match", ".", "group", "(", "3", ")", ")", ")" ]
Returns the range for N
[ "Returns", "the", "range", "for", "N" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py#L143-L153
29,162
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py
Template._match
def _match(self, regex): """Find the first line matching regex and return the match object""" cregex = re.compile(regex) for line in self.content.splitlines(): match = cregex.match(line) if match: return match raise Exception('No "{0}" line in {1}.cpp'.format( regex_to_error_msg(regex), self.name ))
python
def _match(self, regex): """Find the first line matching regex and return the match object""" cregex = re.compile(regex) for line in self.content.splitlines(): match = cregex.match(line) if match: return match raise Exception('No "{0}" line in {1}.cpp'.format( regex_to_error_msg(regex), self.name ))
[ "def", "_match", "(", "self", ",", "regex", ")", ":", "cregex", "=", "re", ".", "compile", "(", "regex", ")", "for", "line", "in", "self", ".", "content", ".", "splitlines", "(", ")", ":", "match", "=", "cregex", ".", "match", "(", "line", ")", "if", "match", ":", "return", "match", "raise", "Exception", "(", "'No \"{0}\" line in {1}.cpp'", ".", "format", "(", "regex_to_error_msg", "(", "regex", ")", ",", "self", ".", "name", ")", ")" ]
Find the first line matching regex and return the match object
[ "Find", "the", "first", "line", "matching", "regex", "and", "return", "the", "match", "object" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py#L163-L173
29,163
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/setup.py
generate_proto
def generate_proto(source, require = True): """Invokes the Protocol Compiler to generate a _pb2.py from the given .proto file. Does nothing if the output already exists and is newer than the input.""" if not require and not os.path.exists(source): return output = source.replace(".proto", "_pb2.py").replace("../src/", "") if (not os.path.exists(output) or (os.path.exists(source) and os.path.getmtime(source) > os.path.getmtime(output))): print("Generating %s..." % output) if not os.path.exists(source): sys.stderr.write("Can't find required file: %s\n" % source) sys.exit(-1) if protoc is None: sys.stderr.write( "protoc is not installed nor found in ../src. Please compile it " "or install the binary package.\n") sys.exit(-1) protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ] if subprocess.call(protoc_command) != 0: sys.exit(-1)
python
def generate_proto(source, require = True): """Invokes the Protocol Compiler to generate a _pb2.py from the given .proto file. Does nothing if the output already exists and is newer than the input.""" if not require and not os.path.exists(source): return output = source.replace(".proto", "_pb2.py").replace("../src/", "") if (not os.path.exists(output) or (os.path.exists(source) and os.path.getmtime(source) > os.path.getmtime(output))): print("Generating %s..." % output) if not os.path.exists(source): sys.stderr.write("Can't find required file: %s\n" % source) sys.exit(-1) if protoc is None: sys.stderr.write( "protoc is not installed nor found in ../src. Please compile it " "or install the binary package.\n") sys.exit(-1) protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ] if subprocess.call(protoc_command) != 0: sys.exit(-1)
[ "def", "generate_proto", "(", "source", ",", "require", "=", "True", ")", ":", "if", "not", "require", "and", "not", "os", ".", "path", ".", "exists", "(", "source", ")", ":", "return", "output", "=", "source", ".", "replace", "(", "\".proto\"", ",", "\"_pb2.py\"", ")", ".", "replace", "(", "\"../src/\"", ",", "\"\"", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "output", ")", "or", "(", "os", ".", "path", ".", "exists", "(", "source", ")", "and", "os", ".", "path", ".", "getmtime", "(", "source", ")", ">", "os", ".", "path", ".", "getmtime", "(", "output", ")", ")", ")", ":", "print", "(", "\"Generating %s...\"", "%", "output", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "source", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"Can't find required file: %s\\n\"", "%", "source", ")", "sys", ".", "exit", "(", "-", "1", ")", "if", "protoc", "is", "None", ":", "sys", ".", "stderr", ".", "write", "(", "\"protoc is not installed nor found in ../src. Please compile it \"", "\"or install the binary package.\\n\"", ")", "sys", ".", "exit", "(", "-", "1", ")", "protoc_command", "=", "[", "protoc", ",", "\"-I../src\"", ",", "\"-I.\"", ",", "\"--python_out=.\"", ",", "source", "]", "if", "subprocess", ".", "call", "(", "protoc_command", ")", "!=", "0", ":", "sys", ".", "exit", "(", "-", "1", ")" ]
Invokes the Protocol Compiler to generate a _pb2.py from the given .proto file. Does nothing if the output already exists and is newer than the input.
[ "Invokes", "the", "Protocol", "Compiler", "to", "generate", "a", "_pb2", ".", "py", "from", "the", "given", ".", "proto", "file", ".", "Does", "nothing", "if", "the", "output", "already", "exists", "and", "is", "newer", "than", "the", "input", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/setup.py#L50-L77
29,164
apple/turicreate
src/unity/python/turicreate/toolkits/_private_utils.py
_validate_row_label
def _validate_row_label(label, column_type_map): """ Validate a row label column. Parameters ---------- label : str Name of the row label column. column_type_map : dict[str, type] Dictionary mapping the name of each column in an SFrame to the type of the values in the column. """ if not isinstance(label, str): raise TypeError("The row label column name must be a string.") if not label in column_type_map.keys(): raise ToolkitError("Row label column not found in the dataset.") if not column_type_map[label] in (str, int): raise TypeError("Row labels must be integers or strings.")
python
def _validate_row_label(label, column_type_map): """ Validate a row label column. Parameters ---------- label : str Name of the row label column. column_type_map : dict[str, type] Dictionary mapping the name of each column in an SFrame to the type of the values in the column. """ if not isinstance(label, str): raise TypeError("The row label column name must be a string.") if not label in column_type_map.keys(): raise ToolkitError("Row label column not found in the dataset.") if not column_type_map[label] in (str, int): raise TypeError("Row labels must be integers or strings.")
[ "def", "_validate_row_label", "(", "label", ",", "column_type_map", ")", ":", "if", "not", "isinstance", "(", "label", ",", "str", ")", ":", "raise", "TypeError", "(", "\"The row label column name must be a string.\"", ")", "if", "not", "label", "in", "column_type_map", ".", "keys", "(", ")", ":", "raise", "ToolkitError", "(", "\"Row label column not found in the dataset.\"", ")", "if", "not", "column_type_map", "[", "label", "]", "in", "(", "str", ",", "int", ")", ":", "raise", "TypeError", "(", "\"Row labels must be integers or strings.\"", ")" ]
Validate a row label column. Parameters ---------- label : str Name of the row label column. column_type_map : dict[str, type] Dictionary mapping the name of each column in an SFrame to the type of the values in the column.
[ "Validate", "a", "row", "label", "column", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_private_utils.py#L13-L33
29,165
apple/turicreate
src/unity/python/turicreate/toolkits/_private_utils.py
_robust_column_name
def _robust_column_name(base_name, column_names): """ Generate a new column name that is guaranteed not to conflict with an existing set of column names. Parameters ---------- base_name : str The base of the new column name. Usually this does not conflict with the existing column names, in which case this function simply returns `base_name`. column_names : list[str] List of existing column names. Returns ------- robust_name : str The new column name. If `base_name` isn't in `column_names`, then `robust_name` is the same as `base_name`. If there are conflicts, a numeric suffix is added to `base_name` until it no longer conflicts with the column names. """ robust_name = base_name i = 1 while robust_name in column_names: robust_name = base_name + '.{}'.format(i) i += 1 return robust_name
python
def _robust_column_name(base_name, column_names): """ Generate a new column name that is guaranteed not to conflict with an existing set of column names. Parameters ---------- base_name : str The base of the new column name. Usually this does not conflict with the existing column names, in which case this function simply returns `base_name`. column_names : list[str] List of existing column names. Returns ------- robust_name : str The new column name. If `base_name` isn't in `column_names`, then `robust_name` is the same as `base_name`. If there are conflicts, a numeric suffix is added to `base_name` until it no longer conflicts with the column names. """ robust_name = base_name i = 1 while robust_name in column_names: robust_name = base_name + '.{}'.format(i) i += 1 return robust_name
[ "def", "_robust_column_name", "(", "base_name", ",", "column_names", ")", ":", "robust_name", "=", "base_name", "i", "=", "1", "while", "robust_name", "in", "column_names", ":", "robust_name", "=", "base_name", "+", "'.{}'", ".", "format", "(", "i", ")", "i", "+=", "1", "return", "robust_name" ]
Generate a new column name that is guaranteed not to conflict with an existing set of column names. Parameters ---------- base_name : str The base of the new column name. Usually this does not conflict with the existing column names, in which case this function simply returns `base_name`. column_names : list[str] List of existing column names. Returns ------- robust_name : str The new column name. If `base_name` isn't in `column_names`, then `robust_name` is the same as `base_name`. If there are conflicts, a numeric suffix is added to `base_name` until it no longer conflicts with the column names.
[ "Generate", "a", "new", "column", "name", "that", "is", "guaranteed", "not", "to", "conflict", "with", "an", "existing", "set", "of", "column", "names", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_private_utils.py#L36-L66
29,166
apple/turicreate
src/unity/python/turicreate/toolkits/_private_utils.py
_select_valid_features
def _select_valid_features(dataset, features, valid_feature_types, target_column=None): """ Utility function for selecting columns of only valid feature types. Parameters ---------- dataset: SFrame The input SFrame containing columns of potential features. features: list[str] List of feature column names. If None, the candidate feature set is taken to be all the columns in the dataset. valid_feature_types: list[type] List of Python types that represent valid features. If type is array.array, then an extra check is done to ensure that the individual elements of the array are of numeric type. If type is dict, then an extra check is done to ensure that dictionary values are numeric. target_column: str Name of the target column. If not None, the target column is excluded from the list of valid feature columns. Returns ------- out: list[str] List of valid feature column names. Warnings are given for each candidate feature column that is excluded. Examples -------- # Select all the columns of type `str` in sf, excluding the target column named # 'rating' >>> valid_columns = _select_valid_features(sf, None, [str], target_column='rating') # Select the subset of columns 'X1', 'X2', 'X3' that has dictionary type or defines # numeric array type >>> valid_columns = _select_valid_features(sf, ['X1', 'X2', 'X3'], [dict, array.array]) """ if features is not None: if not hasattr(features, '__iter__'): raise TypeError("Input 'features' must be an iterable type.") if not all([isinstance(x, str) for x in features]): raise TypeError("Input 'features' must contain only strings.") ## Extract the features and labels if features is None: features = dataset.column_names() col_type_map = { col_name: col_type for (col_name, col_type) in zip(dataset.column_names(), dataset.column_types())} valid_features = [] for col_name in features: if col_name not in dataset.column_names(): _logging.warning("Column '{}' is not in the input dataset.".format(col_name)) elif col_name == target_column: _logging.warning("Excluding target column " + target_column + " as a feature.") elif col_type_map[col_name] not in valid_feature_types: _logging.warning("Column '{}' is excluded as a ".format(col_name) + "feature due to invalid column type.") else: valid_features.append(col_name) if len(valid_features) == 0: raise ValueError("The dataset does not contain any valid feature columns. " + "Accepted feature types are " + str(valid_feature_types) + ".") return valid_features
python
def _select_valid_features(dataset, features, valid_feature_types, target_column=None): """ Utility function for selecting columns of only valid feature types. Parameters ---------- dataset: SFrame The input SFrame containing columns of potential features. features: list[str] List of feature column names. If None, the candidate feature set is taken to be all the columns in the dataset. valid_feature_types: list[type] List of Python types that represent valid features. If type is array.array, then an extra check is done to ensure that the individual elements of the array are of numeric type. If type is dict, then an extra check is done to ensure that dictionary values are numeric. target_column: str Name of the target column. If not None, the target column is excluded from the list of valid feature columns. Returns ------- out: list[str] List of valid feature column names. Warnings are given for each candidate feature column that is excluded. Examples -------- # Select all the columns of type `str` in sf, excluding the target column named # 'rating' >>> valid_columns = _select_valid_features(sf, None, [str], target_column='rating') # Select the subset of columns 'X1', 'X2', 'X3' that has dictionary type or defines # numeric array type >>> valid_columns = _select_valid_features(sf, ['X1', 'X2', 'X3'], [dict, array.array]) """ if features is not None: if not hasattr(features, '__iter__'): raise TypeError("Input 'features' must be an iterable type.") if not all([isinstance(x, str) for x in features]): raise TypeError("Input 'features' must contain only strings.") ## Extract the features and labels if features is None: features = dataset.column_names() col_type_map = { col_name: col_type for (col_name, col_type) in zip(dataset.column_names(), dataset.column_types())} valid_features = [] for col_name in features: if col_name not in dataset.column_names(): _logging.warning("Column '{}' is not in the input dataset.".format(col_name)) elif col_name == target_column: _logging.warning("Excluding target column " + target_column + " as a feature.") elif col_type_map[col_name] not in valid_feature_types: _logging.warning("Column '{}' is excluded as a ".format(col_name) + "feature due to invalid column type.") else: valid_features.append(col_name) if len(valid_features) == 0: raise ValueError("The dataset does not contain any valid feature columns. " + "Accepted feature types are " + str(valid_feature_types) + ".") return valid_features
[ "def", "_select_valid_features", "(", "dataset", ",", "features", ",", "valid_feature_types", ",", "target_column", "=", "None", ")", ":", "if", "features", "is", "not", "None", ":", "if", "not", "hasattr", "(", "features", ",", "'__iter__'", ")", ":", "raise", "TypeError", "(", "\"Input 'features' must be an iterable type.\"", ")", "if", "not", "all", "(", "[", "isinstance", "(", "x", ",", "str", ")", "for", "x", "in", "features", "]", ")", ":", "raise", "TypeError", "(", "\"Input 'features' must contain only strings.\"", ")", "## Extract the features and labels", "if", "features", "is", "None", ":", "features", "=", "dataset", ".", "column_names", "(", ")", "col_type_map", "=", "{", "col_name", ":", "col_type", "for", "(", "col_name", ",", "col_type", ")", "in", "zip", "(", "dataset", ".", "column_names", "(", ")", ",", "dataset", ".", "column_types", "(", ")", ")", "}", "valid_features", "=", "[", "]", "for", "col_name", "in", "features", ":", "if", "col_name", "not", "in", "dataset", ".", "column_names", "(", ")", ":", "_logging", ".", "warning", "(", "\"Column '{}' is not in the input dataset.\"", ".", "format", "(", "col_name", ")", ")", "elif", "col_name", "==", "target_column", ":", "_logging", ".", "warning", "(", "\"Excluding target column \"", "+", "target_column", "+", "\" as a feature.\"", ")", "elif", "col_type_map", "[", "col_name", "]", "not", "in", "valid_feature_types", ":", "_logging", ".", "warning", "(", "\"Column '{}' is excluded as a \"", ".", "format", "(", "col_name", ")", "+", "\"feature due to invalid column type.\"", ")", "else", ":", "valid_features", ".", "append", "(", "col_name", ")", "if", "len", "(", "valid_features", ")", "==", "0", ":", "raise", "ValueError", "(", "\"The dataset does not contain any valid feature columns. \"", "+", "\"Accepted feature types are \"", "+", "str", "(", "valid_feature_types", ")", "+", "\".\"", ")", "return", "valid_features" ]
Utility function for selecting columns of only valid feature types. Parameters ---------- dataset: SFrame The input SFrame containing columns of potential features. features: list[str] List of feature column names. If None, the candidate feature set is taken to be all the columns in the dataset. valid_feature_types: list[type] List of Python types that represent valid features. If type is array.array, then an extra check is done to ensure that the individual elements of the array are of numeric type. If type is dict, then an extra check is done to ensure that dictionary values are numeric. target_column: str Name of the target column. If not None, the target column is excluded from the list of valid feature columns. Returns ------- out: list[str] List of valid feature column names. Warnings are given for each candidate feature column that is excluded. Examples -------- # Select all the columns of type `str` in sf, excluding the target column named # 'rating' >>> valid_columns = _select_valid_features(sf, None, [str], target_column='rating') # Select the subset of columns 'X1', 'X2', 'X3' that has dictionary type or defines # numeric array type >>> valid_columns = _select_valid_features(sf, ['X1', 'X2', 'X3'], [dict, array.array])
[ "Utility", "function", "for", "selecting", "columns", "of", "only", "valid", "feature", "types", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_private_utils.py#L68-L143
29,167
apple/turicreate
src/unity/python/turicreate/toolkits/_private_utils.py
_check_elements_equal
def _check_elements_equal(lst): """ Returns true if all of the elements in the list are equal. """ assert isinstance(lst, list), "Input value must be a list." return not lst or lst.count(lst[0]) == len(lst)
python
def _check_elements_equal(lst): """ Returns true if all of the elements in the list are equal. """ assert isinstance(lst, list), "Input value must be a list." return not lst or lst.count(lst[0]) == len(lst)
[ "def", "_check_elements_equal", "(", "lst", ")", ":", "assert", "isinstance", "(", "lst", ",", "list", ")", ",", "\"Input value must be a list.\"", "return", "not", "lst", "or", "lst", ".", "count", "(", "lst", "[", "0", "]", ")", "==", "len", "(", "lst", ")" ]
Returns true if all of the elements in the list are equal.
[ "Returns", "true", "if", "all", "of", "the", "elements", "in", "the", "list", "are", "equal", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_private_utils.py#L145-L150
29,168
apple/turicreate
src/unity/python/turicreate/toolkits/_private_utils.py
_summarize_accessible_fields
def _summarize_accessible_fields(field_descriptions, width=40, section_title='Accessible fields'): """ Create a summary string for the accessible fields in a model. Unlike `_toolkit_repr_print`, this function does not look up the values of the fields, it just formats the names and descriptions. Parameters ---------- field_descriptions : dict{str: str} Name of each field and its description, in a dictionary. Keys and values should be strings. width : int, optional Width of the names. This is usually determined and passed by the calling `__repr__` method. section_title : str, optional Name of the accessible fields section in the summary string. Returns ------- out : str """ key_str = "{:<{}}: {}" items = [] items.append(section_title) items.append("-" * len(section_title)) for field_name, field_desc in field_descriptions.items(): items.append(key_str.format(field_name, width, field_desc)) return "\n".join(items)
python
def _summarize_accessible_fields(field_descriptions, width=40, section_title='Accessible fields'): """ Create a summary string for the accessible fields in a model. Unlike `_toolkit_repr_print`, this function does not look up the values of the fields, it just formats the names and descriptions. Parameters ---------- field_descriptions : dict{str: str} Name of each field and its description, in a dictionary. Keys and values should be strings. width : int, optional Width of the names. This is usually determined and passed by the calling `__repr__` method. section_title : str, optional Name of the accessible fields section in the summary string. Returns ------- out : str """ key_str = "{:<{}}: {}" items = [] items.append(section_title) items.append("-" * len(section_title)) for field_name, field_desc in field_descriptions.items(): items.append(key_str.format(field_name, width, field_desc)) return "\n".join(items)
[ "def", "_summarize_accessible_fields", "(", "field_descriptions", ",", "width", "=", "40", ",", "section_title", "=", "'Accessible fields'", ")", ":", "key_str", "=", "\"{:<{}}: {}\"", "items", "=", "[", "]", "items", ".", "append", "(", "section_title", ")", "items", ".", "append", "(", "\"-\"", "*", "len", "(", "section_title", ")", ")", "for", "field_name", ",", "field_desc", "in", "field_descriptions", ".", "items", "(", ")", ":", "items", ".", "append", "(", "key_str", ".", "format", "(", "field_name", ",", "width", ",", "field_desc", ")", ")", "return", "\"\\n\"", ".", "join", "(", "items", ")" ]
Create a summary string for the accessible fields in a model. Unlike `_toolkit_repr_print`, this function does not look up the values of the fields, it just formats the names and descriptions. Parameters ---------- field_descriptions : dict{str: str} Name of each field and its description, in a dictionary. Keys and values should be strings. width : int, optional Width of the names. This is usually determined and passed by the calling `__repr__` method. section_title : str, optional Name of the accessible fields section in the summary string. Returns ------- out : str
[ "Create", "a", "summary", "string", "for", "the", "accessible", "fields", "in", "a", "model", ".", "Unlike", "_toolkit_repr_print", "this", "function", "does", "not", "look", "up", "the", "values", "of", "the", "fields", "it", "just", "formats", "the", "names", "and", "descriptions", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_private_utils.py#L219-L252
29,169
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/datatypes.py
_is_valid_datatype
def _is_valid_datatype(datatype_instance): """ Returns true if datatype_instance is a valid datatype object and false otherwise. """ # Remap so we can still use the python types for the simple cases global _simple_type_remap if datatype_instance in _simple_type_remap: return True # Now set the protobuf from this interface. if isinstance(datatype_instance, (Int64, Double, String, Array)): return True elif isinstance(datatype_instance, Dictionary): kt = datatype_instance.key_type if isinstance(kt, (Int64, String)): return True return False
python
def _is_valid_datatype(datatype_instance): """ Returns true if datatype_instance is a valid datatype object and false otherwise. """ # Remap so we can still use the python types for the simple cases global _simple_type_remap if datatype_instance in _simple_type_remap: return True # Now set the protobuf from this interface. if isinstance(datatype_instance, (Int64, Double, String, Array)): return True elif isinstance(datatype_instance, Dictionary): kt = datatype_instance.key_type if isinstance(kt, (Int64, String)): return True return False
[ "def", "_is_valid_datatype", "(", "datatype_instance", ")", ":", "# Remap so we can still use the python types for the simple cases", "global", "_simple_type_remap", "if", "datatype_instance", "in", "_simple_type_remap", ":", "return", "True", "# Now set the protobuf from this interface.", "if", "isinstance", "(", "datatype_instance", ",", "(", "Int64", ",", "Double", ",", "String", ",", "Array", ")", ")", ":", "return", "True", "elif", "isinstance", "(", "datatype_instance", ",", "Dictionary", ")", ":", "kt", "=", "datatype_instance", ".", "key_type", "if", "isinstance", "(", "kt", ",", "(", "Int64", ",", "String", ")", ")", ":", "return", "True", "return", "False" ]
Returns true if datatype_instance is a valid datatype object and false otherwise.
[ "Returns", "true", "if", "datatype_instance", "is", "a", "valid", "datatype", "object", "and", "false", "otherwise", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/datatypes.py#L130-L150
29,170
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/datatypes.py
_normalize_datatype
def _normalize_datatype(datatype_instance): """ Translates a user specified datatype to an instance of the ones defined above. Valid data types are passed through, and the following type specifications are translated to the proper instances: str, "String" -> String() int, "Int64" -> Int64() float, "Double" -> Double() If a data type is not recognized, then an error is raised. """ global _simple_type_remap if datatype_instance in _simple_type_remap: return _simple_type_remap[datatype_instance] # Now set the protobuf from this interface. if isinstance(datatype_instance, (Int64, Double, String, Array)): return datatype_instance elif isinstance(datatype_instance, Dictionary): kt = datatype_instance.key_type if isinstance(kt, (Int64, String)): return datatype_instance raise ValueError("Datatype instance not recognized.")
python
def _normalize_datatype(datatype_instance): """ Translates a user specified datatype to an instance of the ones defined above. Valid data types are passed through, and the following type specifications are translated to the proper instances: str, "String" -> String() int, "Int64" -> Int64() float, "Double" -> Double() If a data type is not recognized, then an error is raised. """ global _simple_type_remap if datatype_instance in _simple_type_remap: return _simple_type_remap[datatype_instance] # Now set the protobuf from this interface. if isinstance(datatype_instance, (Int64, Double, String, Array)): return datatype_instance elif isinstance(datatype_instance, Dictionary): kt = datatype_instance.key_type if isinstance(kt, (Int64, String)): return datatype_instance raise ValueError("Datatype instance not recognized.")
[ "def", "_normalize_datatype", "(", "datatype_instance", ")", ":", "global", "_simple_type_remap", "if", "datatype_instance", "in", "_simple_type_remap", ":", "return", "_simple_type_remap", "[", "datatype_instance", "]", "# Now set the protobuf from this interface.", "if", "isinstance", "(", "datatype_instance", ",", "(", "Int64", ",", "Double", ",", "String", ",", "Array", ")", ")", ":", "return", "datatype_instance", "elif", "isinstance", "(", "datatype_instance", ",", "Dictionary", ")", ":", "kt", "=", "datatype_instance", ".", "key_type", "if", "isinstance", "(", "kt", ",", "(", "Int64", ",", "String", ")", ")", ":", "return", "datatype_instance", "raise", "ValueError", "(", "\"Datatype instance not recognized.\"", ")" ]
Translates a user specified datatype to an instance of the ones defined above. Valid data types are passed through, and the following type specifications are translated to the proper instances: str, "String" -> String() int, "Int64" -> Int64() float, "Double" -> Double() If a data type is not recognized, then an error is raised.
[ "Translates", "a", "user", "specified", "datatype", "to", "an", "instance", "of", "the", "ones", "defined", "above", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/datatypes.py#L152-L179
29,171
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/util/order.py
Order.order
def order (self, objects): """ Given a list of objects, reorder them so that the constains specified by 'add_pair' are satisfied. The algorithm was adopted from an awk script by Nikita Youshchenko (yoush at cs dot msu dot su) """ # The algorithm used is the same is standard transitive closure, # except that we're not keeping in-degree for all vertices, but # rather removing edges. result = [] if not objects: return result constraints = self.__eliminate_unused_constraits (objects) # Find some library that nobody depends upon and add it to # the 'result' array. obj = None while objects: new_objects = [] while objects: obj = objects [0] if self.__has_no_dependents (obj, constraints): # Emulate break ; new_objects.extend (objects [1:]) objects = [] else: new_objects.append (obj) obj = None objects = objects [1:] if not obj: raise BaseException ("Circular order dependencies") # No problem with placing first. result.append (obj) # Remove all containts where 'obj' comes first, # since they are already satisfied. constraints = self.__remove_satisfied (constraints, obj) # Add the remaining objects for further processing # on the next iteration objects = new_objects return result
python
def order (self, objects): """ Given a list of objects, reorder them so that the constains specified by 'add_pair' are satisfied. The algorithm was adopted from an awk script by Nikita Youshchenko (yoush at cs dot msu dot su) """ # The algorithm used is the same is standard transitive closure, # except that we're not keeping in-degree for all vertices, but # rather removing edges. result = [] if not objects: return result constraints = self.__eliminate_unused_constraits (objects) # Find some library that nobody depends upon and add it to # the 'result' array. obj = None while objects: new_objects = [] while objects: obj = objects [0] if self.__has_no_dependents (obj, constraints): # Emulate break ; new_objects.extend (objects [1:]) objects = [] else: new_objects.append (obj) obj = None objects = objects [1:] if not obj: raise BaseException ("Circular order dependencies") # No problem with placing first. result.append (obj) # Remove all containts where 'obj' comes first, # since they are already satisfied. constraints = self.__remove_satisfied (constraints, obj) # Add the remaining objects for further processing # on the next iteration objects = new_objects return result
[ "def", "order", "(", "self", ",", "objects", ")", ":", "# The algorithm used is the same is standard transitive closure,", "# except that we're not keeping in-degree for all vertices, but", "# rather removing edges.", "result", "=", "[", "]", "if", "not", "objects", ":", "return", "result", "constraints", "=", "self", ".", "__eliminate_unused_constraits", "(", "objects", ")", "# Find some library that nobody depends upon and add it to", "# the 'result' array.", "obj", "=", "None", "while", "objects", ":", "new_objects", "=", "[", "]", "while", "objects", ":", "obj", "=", "objects", "[", "0", "]", "if", "self", ".", "__has_no_dependents", "(", "obj", ",", "constraints", ")", ":", "# Emulate break ;", "new_objects", ".", "extend", "(", "objects", "[", "1", ":", "]", ")", "objects", "=", "[", "]", "else", ":", "new_objects", ".", "append", "(", "obj", ")", "obj", "=", "None", "objects", "=", "objects", "[", "1", ":", "]", "if", "not", "obj", ":", "raise", "BaseException", "(", "\"Circular order dependencies\"", ")", "# No problem with placing first.", "result", ".", "append", "(", "obj", ")", "# Remove all containts where 'obj' comes first,", "# since they are already satisfied.", "constraints", "=", "self", ".", "__remove_satisfied", "(", "constraints", ",", "obj", ")", "# Add the remaining objects for further processing", "# on the next iteration", "objects", "=", "new_objects", "return", "result" ]
Given a list of objects, reorder them so that the constains specified by 'add_pair' are satisfied. The algorithm was adopted from an awk script by Nikita Youshchenko (yoush at cs dot msu dot su)
[ "Given", "a", "list", "of", "objects", "reorder", "them", "so", "that", "the", "constains", "specified", "by", "add_pair", "are", "satisfied", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/order.py#L37-L86
29,172
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/util/order.py
Order.__eliminate_unused_constraits
def __eliminate_unused_constraits (self, objects): """ Eliminate constraints which mention objects not in 'objects'. In graph-theory terms, this is finding subgraph induced by ordered vertices. """ result = [] for c in self.constraints_: if c [0] in objects and c [1] in objects: result.append (c) return result
python
def __eliminate_unused_constraits (self, objects): """ Eliminate constraints which mention objects not in 'objects'. In graph-theory terms, this is finding subgraph induced by ordered vertices. """ result = [] for c in self.constraints_: if c [0] in objects and c [1] in objects: result.append (c) return result
[ "def", "__eliminate_unused_constraits", "(", "self", ",", "objects", ")", ":", "result", "=", "[", "]", "for", "c", "in", "self", ".", "constraints_", ":", "if", "c", "[", "0", "]", "in", "objects", "and", "c", "[", "1", "]", "in", "objects", ":", "result", ".", "append", "(", "c", ")", "return", "result" ]
Eliminate constraints which mention objects not in 'objects'. In graph-theory terms, this is finding subgraph induced by ordered vertices.
[ "Eliminate", "constraints", "which", "mention", "objects", "not", "in", "objects", ".", "In", "graph", "-", "theory", "terms", "this", "is", "finding", "subgraph", "induced", "by", "ordered", "vertices", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/order.py#L88-L98
29,173
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/util/order.py
Order.__has_no_dependents
def __has_no_dependents (self, obj, constraints): """ Returns true if there's no constraint in 'constraints' where 'obj' comes second. """ failed = False while constraints and not failed: c = constraints [0] if c [1] == obj: failed = True constraints = constraints [1:] return not failed
python
def __has_no_dependents (self, obj, constraints): """ Returns true if there's no constraint in 'constraints' where 'obj' comes second. """ failed = False while constraints and not failed: c = constraints [0] if c [1] == obj: failed = True constraints = constraints [1:] return not failed
[ "def", "__has_no_dependents", "(", "self", ",", "obj", ",", "constraints", ")", ":", "failed", "=", "False", "while", "constraints", "and", "not", "failed", ":", "c", "=", "constraints", "[", "0", "]", "if", "c", "[", "1", "]", "==", "obj", ":", "failed", "=", "True", "constraints", "=", "constraints", "[", "1", ":", "]", "return", "not", "failed" ]
Returns true if there's no constraint in 'constraints' where 'obj' comes second.
[ "Returns", "true", "if", "there", "s", "no", "constraint", "in", "constraints", "where", "obj", "comes", "second", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/order.py#L100-L113
29,174
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/property.py
path_order
def path_order (x, y): """ Helper for as_path, below. Orders properties with the implicit ones first, and within the two sections in alphabetical order of feature name. """ if x == y: return 0 xg = get_grist (x) yg = get_grist (y) if yg and not xg: return -1 elif xg and not yg: return 1 else: if not xg: x = feature.expand_subfeatures([x]) y = feature.expand_subfeatures([y]) if x < y: return -1 elif x > y: return 1 else: return 0
python
def path_order (x, y): """ Helper for as_path, below. Orders properties with the implicit ones first, and within the two sections in alphabetical order of feature name. """ if x == y: return 0 xg = get_grist (x) yg = get_grist (y) if yg and not xg: return -1 elif xg and not yg: return 1 else: if not xg: x = feature.expand_subfeatures([x]) y = feature.expand_subfeatures([y]) if x < y: return -1 elif x > y: return 1 else: return 0
[ "def", "path_order", "(", "x", ",", "y", ")", ":", "if", "x", "==", "y", ":", "return", "0", "xg", "=", "get_grist", "(", "x", ")", "yg", "=", "get_grist", "(", "y", ")", "if", "yg", "and", "not", "xg", ":", "return", "-", "1", "elif", "xg", "and", "not", "yg", ":", "return", "1", "else", ":", "if", "not", "xg", ":", "x", "=", "feature", ".", "expand_subfeatures", "(", "[", "x", "]", ")", "y", "=", "feature", ".", "expand_subfeatures", "(", "[", "y", "]", ")", "if", "x", "<", "y", ":", "return", "-", "1", "elif", "x", ">", "y", ":", "return", "1", "else", ":", "return", "0" ]
Helper for as_path, below. Orders properties with the implicit ones first, and within the two sections in alphabetical order of feature name.
[ "Helper", "for", "as_path", "below", ".", "Orders", "properties", "with", "the", "implicit", "ones", "first", "and", "within", "the", "two", "sections", "in", "alphabetical", "order", "of", "feature", "name", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property.py#L244-L271
29,175
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/property.py
refine
def refine (properties, requirements): """ Refines 'properties' by overriding any non-free properties for which a different value is specified in 'requirements'. Conditional requirements are just added without modification. Returns the resulting list of properties. """ assert is_iterable_typed(properties, Property) assert is_iterable_typed(requirements, Property) # The result has no duplicates, so we store it in a set result = set() # Records all requirements. required = {} # All the elements of requirements should be present in the result # Record them so that we can handle 'properties'. for r in requirements: # Don't consider conditional requirements. if not r.condition: required[r.feature] = r for p in properties: # Skip conditional properties if p.condition: result.add(p) # No processing for free properties elif p.feature.free: result.add(p) else: if p.feature in required: result.add(required[p.feature]) else: result.add(p) return sequence.unique(list(result) + requirements)
python
def refine (properties, requirements): """ Refines 'properties' by overriding any non-free properties for which a different value is specified in 'requirements'. Conditional requirements are just added without modification. Returns the resulting list of properties. """ assert is_iterable_typed(properties, Property) assert is_iterable_typed(requirements, Property) # The result has no duplicates, so we store it in a set result = set() # Records all requirements. required = {} # All the elements of requirements should be present in the result # Record them so that we can handle 'properties'. for r in requirements: # Don't consider conditional requirements. if not r.condition: required[r.feature] = r for p in properties: # Skip conditional properties if p.condition: result.add(p) # No processing for free properties elif p.feature.free: result.add(p) else: if p.feature in required: result.add(required[p.feature]) else: result.add(p) return sequence.unique(list(result) + requirements)
[ "def", "refine", "(", "properties", ",", "requirements", ")", ":", "assert", "is_iterable_typed", "(", "properties", ",", "Property", ")", "assert", "is_iterable_typed", "(", "requirements", ",", "Property", ")", "# The result has no duplicates, so we store it in a set", "result", "=", "set", "(", ")", "# Records all requirements.", "required", "=", "{", "}", "# All the elements of requirements should be present in the result", "# Record them so that we can handle 'properties'.", "for", "r", "in", "requirements", ":", "# Don't consider conditional requirements.", "if", "not", "r", ".", "condition", ":", "required", "[", "r", ".", "feature", "]", "=", "r", "for", "p", "in", "properties", ":", "# Skip conditional properties", "if", "p", ".", "condition", ":", "result", ".", "add", "(", "p", ")", "# No processing for free properties", "elif", "p", ".", "feature", ".", "free", ":", "result", ".", "add", "(", "p", ")", "else", ":", "if", "p", ".", "feature", "in", "required", ":", "result", ".", "add", "(", "required", "[", "p", ".", "feature", "]", ")", "else", ":", "result", ".", "add", "(", "p", ")", "return", "sequence", ".", "unique", "(", "list", "(", "result", ")", "+", "requirements", ")" ]
Refines 'properties' by overriding any non-free properties for which a different value is specified in 'requirements'. Conditional requirements are just added without modification. Returns the resulting list of properties.
[ "Refines", "properties", "by", "overriding", "any", "non", "-", "free", "properties", "for", "which", "a", "different", "value", "is", "specified", "in", "requirements", ".", "Conditional", "requirements", "are", "just", "added", "without", "modification", ".", "Returns", "the", "resulting", "list", "of", "properties", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property.py#L277-L311
29,176
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/property.py
translate_paths
def translate_paths (properties, path): """ Interpret all path properties in 'properties' as relative to 'path' The property values are assumed to be in system-specific form, and will be translated into normalized form. """ assert is_iterable_typed(properties, Property) result = [] for p in properties: if p.feature.path: values = __re_two_ampersands.split(p.value) new_value = "&&".join(os.path.normpath(os.path.join(path, v)) for v in values) if new_value != p.value: result.append(Property(p.feature, new_value, p.condition)) else: result.append(p) else: result.append (p) return result
python
def translate_paths (properties, path): """ Interpret all path properties in 'properties' as relative to 'path' The property values are assumed to be in system-specific form, and will be translated into normalized form. """ assert is_iterable_typed(properties, Property) result = [] for p in properties: if p.feature.path: values = __re_two_ampersands.split(p.value) new_value = "&&".join(os.path.normpath(os.path.join(path, v)) for v in values) if new_value != p.value: result.append(Property(p.feature, new_value, p.condition)) else: result.append(p) else: result.append (p) return result
[ "def", "translate_paths", "(", "properties", ",", "path", ")", ":", "assert", "is_iterable_typed", "(", "properties", ",", "Property", ")", "result", "=", "[", "]", "for", "p", "in", "properties", ":", "if", "p", ".", "feature", ".", "path", ":", "values", "=", "__re_two_ampersands", ".", "split", "(", "p", ".", "value", ")", "new_value", "=", "\"&&\"", ".", "join", "(", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "path", ",", "v", ")", ")", "for", "v", "in", "values", ")", "if", "new_value", "!=", "p", ".", "value", ":", "result", ".", "append", "(", "Property", "(", "p", ".", "feature", ",", "new_value", ",", "p", ".", "condition", ")", ")", "else", ":", "result", ".", "append", "(", "p", ")", "else", ":", "result", ".", "append", "(", "p", ")", "return", "result" ]
Interpret all path properties in 'properties' as relative to 'path' The property values are assumed to be in system-specific form, and will be translated into normalized form.
[ "Interpret", "all", "path", "properties", "in", "properties", "as", "relative", "to", "path", "The", "property", "values", "are", "assumed", "to", "be", "in", "system", "-", "specific", "form", "and", "will", "be", "translated", "into", "normalized", "form", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property.py#L313-L336
29,177
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/property.py
translate_indirect
def translate_indirect(properties, context_module): """Assumes that all feature values that start with '@' are names of rules, used in 'context-module'. Such rules can be either local to the module or global. Qualified local rules with the name of the module.""" assert is_iterable_typed(properties, Property) assert isinstance(context_module, basestring) result = [] for p in properties: if p.value[0] == '@': q = qualify_jam_action(p.value[1:], context_module) get_manager().engine().register_bjam_action(q) result.append(Property(p.feature, '@' + q, p.condition)) else: result.append(p) return result
python
def translate_indirect(properties, context_module): """Assumes that all feature values that start with '@' are names of rules, used in 'context-module'. Such rules can be either local to the module or global. Qualified local rules with the name of the module.""" assert is_iterable_typed(properties, Property) assert isinstance(context_module, basestring) result = [] for p in properties: if p.value[0] == '@': q = qualify_jam_action(p.value[1:], context_module) get_manager().engine().register_bjam_action(q) result.append(Property(p.feature, '@' + q, p.condition)) else: result.append(p) return result
[ "def", "translate_indirect", "(", "properties", ",", "context_module", ")", ":", "assert", "is_iterable_typed", "(", "properties", ",", "Property", ")", "assert", "isinstance", "(", "context_module", ",", "basestring", ")", "result", "=", "[", "]", "for", "p", "in", "properties", ":", "if", "p", ".", "value", "[", "0", "]", "==", "'@'", ":", "q", "=", "qualify_jam_action", "(", "p", ".", "value", "[", "1", ":", "]", ",", "context_module", ")", "get_manager", "(", ")", ".", "engine", "(", ")", ".", "register_bjam_action", "(", "q", ")", "result", ".", "append", "(", "Property", "(", "p", ".", "feature", ",", "'@'", "+", "q", ",", "p", ".", "condition", ")", ")", "else", ":", "result", ".", "append", "(", "p", ")", "return", "result" ]
Assumes that all feature values that start with '@' are names of rules, used in 'context-module'. Such rules can be either local to the module or global. Qualified local rules with the name of the module.
[ "Assumes", "that", "all", "feature", "values", "that", "start", "with" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property.py#L338-L354
29,178
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/property.py
validate
def validate (properties): """ Exit with error if any of the properties is not valid. properties may be a single property or a sequence of properties. """ if isinstance(properties, Property): properties = [properties] assert is_iterable_typed(properties, Property) for p in properties: __validate1(p)
python
def validate (properties): """ Exit with error if any of the properties is not valid. properties may be a single property or a sequence of properties. """ if isinstance(properties, Property): properties = [properties] assert is_iterable_typed(properties, Property) for p in properties: __validate1(p)
[ "def", "validate", "(", "properties", ")", ":", "if", "isinstance", "(", "properties", ",", "Property", ")", ":", "properties", "=", "[", "properties", "]", "assert", "is_iterable_typed", "(", "properties", ",", "Property", ")", "for", "p", "in", "properties", ":", "__validate1", "(", "p", ")" ]
Exit with error if any of the properties is not valid. properties may be a single property or a sequence of properties.
[ "Exit", "with", "error", "if", "any", "of", "the", "properties", "is", "not", "valid", ".", "properties", "may", "be", "a", "single", "property", "or", "a", "sequence", "of", "properties", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property.py#L356-L364
29,179
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/property.py
select
def select (features, properties): """ Selects properties which correspond to any of the given features. """ assert is_iterable_typed(properties, basestring) result = [] # add any missing angle brackets features = add_grist (features) return [p for p in properties if get_grist(p) in features]
python
def select (features, properties): """ Selects properties which correspond to any of the given features. """ assert is_iterable_typed(properties, basestring) result = [] # add any missing angle brackets features = add_grist (features) return [p for p in properties if get_grist(p) in features]
[ "def", "select", "(", "features", ",", "properties", ")", ":", "assert", "is_iterable_typed", "(", "properties", ",", "basestring", ")", "result", "=", "[", "]", "# add any missing angle brackets", "features", "=", "add_grist", "(", "features", ")", "return", "[", "p", "for", "p", "in", "properties", "if", "get_grist", "(", "p", ")", "in", "features", "]" ]
Selects properties which correspond to any of the given features.
[ "Selects", "properties", "which", "correspond", "to", "any", "of", "the", "given", "features", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property.py#L410-L419
29,180
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/property.py
evaluate_conditionals_in_context
def evaluate_conditionals_in_context (properties, context): """ Removes all conditional properties which conditions are not met For those with met conditions, removes the condition. Properies in conditions are looked up in 'context' """ if __debug__: from .property_set import PropertySet assert is_iterable_typed(properties, Property) assert isinstance(context, PropertySet) base = [] conditional = [] for p in properties: if p.condition: conditional.append (p) else: base.append (p) result = base[:] for p in conditional: # Evaluate condition # FIXME: probably inefficient if all(x in context for x in p.condition): result.append(Property(p.feature, p.value)) return result
python
def evaluate_conditionals_in_context (properties, context): """ Removes all conditional properties which conditions are not met For those with met conditions, removes the condition. Properies in conditions are looked up in 'context' """ if __debug__: from .property_set import PropertySet assert is_iterable_typed(properties, Property) assert isinstance(context, PropertySet) base = [] conditional = [] for p in properties: if p.condition: conditional.append (p) else: base.append (p) result = base[:] for p in conditional: # Evaluate condition # FIXME: probably inefficient if all(x in context for x in p.condition): result.append(Property(p.feature, p.value)) return result
[ "def", "evaluate_conditionals_in_context", "(", "properties", ",", "context", ")", ":", "if", "__debug__", ":", "from", ".", "property_set", "import", "PropertySet", "assert", "is_iterable_typed", "(", "properties", ",", "Property", ")", "assert", "isinstance", "(", "context", ",", "PropertySet", ")", "base", "=", "[", "]", "conditional", "=", "[", "]", "for", "p", "in", "properties", ":", "if", "p", ".", "condition", ":", "conditional", ".", "append", "(", "p", ")", "else", ":", "base", ".", "append", "(", "p", ")", "result", "=", "base", "[", ":", "]", "for", "p", "in", "conditional", ":", "# Evaluate condition", "# FIXME: probably inefficient", "if", "all", "(", "x", "in", "context", "for", "x", "in", "p", ".", "condition", ")", ":", "result", ".", "append", "(", "Property", "(", "p", ".", "feature", ",", "p", ".", "value", ")", ")", "return", "result" ]
Removes all conditional properties which conditions are not met For those with met conditions, removes the condition. Properies in conditions are looked up in 'context'
[ "Removes", "all", "conditional", "properties", "which", "conditions", "are", "not", "met", "For", "those", "with", "met", "conditions", "removes", "the", "condition", ".", "Properies", "in", "conditions", "are", "looked", "up", "in", "context" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property.py#L428-L454
29,181
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/property.py
change
def change (properties, feature, value = None): """ Returns a modified version of properties with all values of the given feature replaced by the given value. If 'value' is None the feature will be removed. """ assert is_iterable_typed(properties, basestring) assert isinstance(feature, basestring) assert isinstance(value, (basestring, type(None))) result = [] feature = add_grist (feature) for p in properties: if get_grist (p) == feature: if value: result.append (replace_grist (value, feature)) else: result.append (p) return result
python
def change (properties, feature, value = None): """ Returns a modified version of properties with all values of the given feature replaced by the given value. If 'value' is None the feature will be removed. """ assert is_iterable_typed(properties, basestring) assert isinstance(feature, basestring) assert isinstance(value, (basestring, type(None))) result = [] feature = add_grist (feature) for p in properties: if get_grist (p) == feature: if value: result.append (replace_grist (value, feature)) else: result.append (p) return result
[ "def", "change", "(", "properties", ",", "feature", ",", "value", "=", "None", ")", ":", "assert", "is_iterable_typed", "(", "properties", ",", "basestring", ")", "assert", "isinstance", "(", "feature", ",", "basestring", ")", "assert", "isinstance", "(", "value", ",", "(", "basestring", ",", "type", "(", "None", ")", ")", ")", "result", "=", "[", "]", "feature", "=", "add_grist", "(", "feature", ")", "for", "p", "in", "properties", ":", "if", "get_grist", "(", "p", ")", "==", "feature", ":", "if", "value", ":", "result", ".", "append", "(", "replace_grist", "(", "value", ",", "feature", ")", ")", "else", ":", "result", ".", "append", "(", "p", ")", "return", "result" ]
Returns a modified version of properties with all values of the given feature replaced by the given value. If 'value' is None the feature will be removed.
[ "Returns", "a", "modified", "version", "of", "properties", "with", "all", "values", "of", "the", "given", "feature", "replaced", "by", "the", "given", "value", ".", "If", "value", "is", "None", "the", "feature", "will", "be", "removed", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property.py#L457-L477
29,182
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/property.py
__validate1
def __validate1 (property): """ Exit with error if property is not valid. """ assert isinstance(property, Property) msg = None if not property.feature.free: feature.validate_value_string (property.feature, property.value)
python
def __validate1 (property): """ Exit with error if property is not valid. """ assert isinstance(property, Property) msg = None if not property.feature.free: feature.validate_value_string (property.feature, property.value)
[ "def", "__validate1", "(", "property", ")", ":", "assert", "isinstance", "(", "property", ",", "Property", ")", "msg", "=", "None", "if", "not", "property", ".", "feature", ".", "free", ":", "feature", ".", "validate_value_string", "(", "property", ".", "feature", ",", "property", ".", "value", ")" ]
Exit with error if property is not valid.
[ "Exit", "with", "error", "if", "property", "is", "not", "valid", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property.py#L483-L490
29,183
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/property.py
remove
def remove(attributes, properties): """Returns a property sets which include all the elements in 'properties' that do not have attributes listed in 'attributes'.""" if isinstance(attributes, basestring): attributes = [attributes] assert is_iterable_typed(attributes, basestring) assert is_iterable_typed(properties, basestring) result = [] for e in properties: attributes_new = feature.attributes(get_grist(e)) has_common_features = 0 for a in attributes_new: if a in attributes: has_common_features = 1 break if not has_common_features: result += e return result
python
def remove(attributes, properties): """Returns a property sets which include all the elements in 'properties' that do not have attributes listed in 'attributes'.""" if isinstance(attributes, basestring): attributes = [attributes] assert is_iterable_typed(attributes, basestring) assert is_iterable_typed(properties, basestring) result = [] for e in properties: attributes_new = feature.attributes(get_grist(e)) has_common_features = 0 for a in attributes_new: if a in attributes: has_common_features = 1 break if not has_common_features: result += e return result
[ "def", "remove", "(", "attributes", ",", "properties", ")", ":", "if", "isinstance", "(", "attributes", ",", "basestring", ")", ":", "attributes", "=", "[", "attributes", "]", "assert", "is_iterable_typed", "(", "attributes", ",", "basestring", ")", "assert", "is_iterable_typed", "(", "properties", ",", "basestring", ")", "result", "=", "[", "]", "for", "e", "in", "properties", ":", "attributes_new", "=", "feature", ".", "attributes", "(", "get_grist", "(", "e", ")", ")", "has_common_features", "=", "0", "for", "a", "in", "attributes_new", ":", "if", "a", "in", "attributes", ":", "has_common_features", "=", "1", "break", "if", "not", "has_common_features", ":", "result", "+=", "e", "return", "result" ]
Returns a property sets which include all the elements in 'properties' that do not have attributes listed in 'attributes'.
[ "Returns", "a", "property", "sets", "which", "include", "all", "the", "elements", "in", "properties", "that", "do", "not", "have", "attributes", "listed", "in", "attributes", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property.py#L520-L539
29,184
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/property.py
take
def take(attributes, properties): """Returns a property set which include all properties in 'properties' that have any of 'attributes'.""" assert is_iterable_typed(attributes, basestring) assert is_iterable_typed(properties, basestring) result = [] for e in properties: if b2.util.set.intersection(attributes, feature.attributes(get_grist(e))): result.append(e) return result
python
def take(attributes, properties): """Returns a property set which include all properties in 'properties' that have any of 'attributes'.""" assert is_iterable_typed(attributes, basestring) assert is_iterable_typed(properties, basestring) result = [] for e in properties: if b2.util.set.intersection(attributes, feature.attributes(get_grist(e))): result.append(e) return result
[ "def", "take", "(", "attributes", ",", "properties", ")", ":", "assert", "is_iterable_typed", "(", "attributes", ",", "basestring", ")", "assert", "is_iterable_typed", "(", "properties", ",", "basestring", ")", "result", "=", "[", "]", "for", "e", "in", "properties", ":", "if", "b2", ".", "util", ".", "set", ".", "intersection", "(", "attributes", ",", "feature", ".", "attributes", "(", "get_grist", "(", "e", ")", ")", ")", ":", "result", ".", "append", "(", "e", ")", "return", "result" ]
Returns a property set which include all properties in 'properties' that have any of 'attributes'.
[ "Returns", "a", "property", "set", "which", "include", "all", "properties", "in", "properties", "that", "have", "any", "of", "attributes", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property.py#L542-L551
29,185
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/property.py
PropertyMap.insert
def insert (self, properties, value): """ Associate value with properties. """ assert is_iterable_typed(properties, basestring) assert isinstance(value, basestring) self.__properties.append(properties) self.__values.append(value)
python
def insert (self, properties, value): """ Associate value with properties. """ assert is_iterable_typed(properties, basestring) assert isinstance(value, basestring) self.__properties.append(properties) self.__values.append(value)
[ "def", "insert", "(", "self", ",", "properties", ",", "value", ")", ":", "assert", "is_iterable_typed", "(", "properties", ",", "basestring", ")", "assert", "isinstance", "(", "value", ",", "basestring", ")", "self", ".", "__properties", ".", "append", "(", "properties", ")", "self", ".", "__values", ".", "append", "(", "value", ")" ]
Associate value with properties.
[ "Associate", "value", "with", "properties", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property.py#L590-L596
29,186
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py
benchmark_command
def benchmark_command(cmd, progress): """Benchmark one command execution""" full_cmd = '/usr/bin/time --format="%U %M" {0}'.format(cmd) print '{0:6.2f}% Running {1}'.format(100.0 * progress, full_cmd) (_, err) = subprocess.Popen( ['/bin/sh', '-c', full_cmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE ).communicate('') values = err.strip().split(' ') if len(values) == 2: try: return (float(values[0]), float(values[1])) except: # pylint:disable=I0011,W0702 pass # Handled by the code after the "if" print err raise Exception('Error during benchmarking')
python
def benchmark_command(cmd, progress): """Benchmark one command execution""" full_cmd = '/usr/bin/time --format="%U %M" {0}'.format(cmd) print '{0:6.2f}% Running {1}'.format(100.0 * progress, full_cmd) (_, err) = subprocess.Popen( ['/bin/sh', '-c', full_cmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE ).communicate('') values = err.strip().split(' ') if len(values) == 2: try: return (float(values[0]), float(values[1])) except: # pylint:disable=I0011,W0702 pass # Handled by the code after the "if" print err raise Exception('Error during benchmarking')
[ "def", "benchmark_command", "(", "cmd", ",", "progress", ")", ":", "full_cmd", "=", "'/usr/bin/time --format=\"%U %M\" {0}'", ".", "format", "(", "cmd", ")", "print", "'{0:6.2f}% Running {1}'", ".", "format", "(", "100.0", "*", "progress", ",", "full_cmd", ")", "(", "_", ",", "err", ")", "=", "subprocess", ".", "Popen", "(", "[", "'/bin/sh'", ",", "'-c'", ",", "full_cmd", "]", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", ".", "communicate", "(", "''", ")", "values", "=", "err", ".", "strip", "(", ")", ".", "split", "(", "' '", ")", "if", "len", "(", "values", ")", "==", "2", ":", "try", ":", "return", "(", "float", "(", "values", "[", "0", "]", ")", ",", "float", "(", "values", "[", "1", "]", ")", ")", "except", ":", "# pylint:disable=I0011,W0702", "pass", "# Handled by the code after the \"if\"", "print", "err", "raise", "Exception", "(", "'Error during benchmarking'", ")" ]
Benchmark one command execution
[ "Benchmark", "one", "command", "execution" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py#L26-L45
29,187
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py
benchmark_file
def benchmark_file( filename, compiler, include_dirs, (progress_from, progress_to), iter_count, extra_flags = ''): """Benchmark one file""" time_sum = 0 mem_sum = 0 for nth_run in xrange(0, iter_count): (time_spent, mem_used) = benchmark_command( '{0} -std=c++11 {1} -c {2} {3}'.format( compiler, ' '.join('-I{0}'.format(i) for i in include_dirs), filename, extra_flags ), ( progress_to * nth_run + progress_from * (iter_count - nth_run) ) / iter_count ) os.remove(os.path.splitext(os.path.basename(filename))[0] + '.o') time_sum = time_sum + time_spent mem_sum = mem_sum + mem_used return { "time": time_sum / iter_count, "memory": mem_sum / (iter_count * 1024) }
python
def benchmark_file( filename, compiler, include_dirs, (progress_from, progress_to), iter_count, extra_flags = ''): """Benchmark one file""" time_sum = 0 mem_sum = 0 for nth_run in xrange(0, iter_count): (time_spent, mem_used) = benchmark_command( '{0} -std=c++11 {1} -c {2} {3}'.format( compiler, ' '.join('-I{0}'.format(i) for i in include_dirs), filename, extra_flags ), ( progress_to * nth_run + progress_from * (iter_count - nth_run) ) / iter_count ) os.remove(os.path.splitext(os.path.basename(filename))[0] + '.o') time_sum = time_sum + time_spent mem_sum = mem_sum + mem_used return { "time": time_sum / iter_count, "memory": mem_sum / (iter_count * 1024) }
[ "def", "benchmark_file", "(", "filename", ",", "compiler", ",", "include_dirs", ",", "(", "progress_from", ",", "progress_to", ")", ",", "iter_count", ",", "extra_flags", "=", "''", ")", ":", "time_sum", "=", "0", "mem_sum", "=", "0", "for", "nth_run", "in", "xrange", "(", "0", ",", "iter_count", ")", ":", "(", "time_spent", ",", "mem_used", ")", "=", "benchmark_command", "(", "'{0} -std=c++11 {1} -c {2} {3}'", ".", "format", "(", "compiler", ",", "' '", ".", "join", "(", "'-I{0}'", ".", "format", "(", "i", ")", "for", "i", "in", "include_dirs", ")", ",", "filename", ",", "extra_flags", ")", ",", "(", "progress_to", "*", "nth_run", "+", "progress_from", "*", "(", "iter_count", "-", "nth_run", ")", ")", "/", "iter_count", ")", "os", ".", "remove", "(", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", ")", "[", "0", "]", "+", "'.o'", ")", "time_sum", "=", "time_sum", "+", "time_spent", "mem_sum", "=", "mem_sum", "+", "mem_used", "return", "{", "\"time\"", ":", "time_sum", "/", "iter_count", ",", "\"memory\"", ":", "mem_sum", "/", "(", "iter_count", "*", "1024", ")", "}" ]
Benchmark one file
[ "Benchmark", "one", "file" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py#L48-L73
29,188
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py
compiler_info
def compiler_info(compiler): """Determine the name + version of the compiler""" (out, err) = subprocess.Popen( ['/bin/sh', '-c', '{0} -v'.format(compiler)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE ).communicate('') gcc_clang = re.compile('(gcc|clang) version ([0-9]+(\\.[0-9]+)*)') for line in (out + err).split('\n'): mtch = gcc_clang.search(line) if mtch: return mtch.group(1) + ' ' + mtch.group(2) return compiler
python
def compiler_info(compiler): """Determine the name + version of the compiler""" (out, err) = subprocess.Popen( ['/bin/sh', '-c', '{0} -v'.format(compiler)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE ).communicate('') gcc_clang = re.compile('(gcc|clang) version ([0-9]+(\\.[0-9]+)*)') for line in (out + err).split('\n'): mtch = gcc_clang.search(line) if mtch: return mtch.group(1) + ' ' + mtch.group(2) return compiler
[ "def", "compiler_info", "(", "compiler", ")", ":", "(", "out", ",", "err", ")", "=", "subprocess", ".", "Popen", "(", "[", "'/bin/sh'", ",", "'-c'", ",", "'{0} -v'", ".", "format", "(", "compiler", ")", "]", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", ".", "communicate", "(", "''", ")", "gcc_clang", "=", "re", ".", "compile", "(", "'(gcc|clang) version ([0-9]+(\\\\.[0-9]+)*)'", ")", "for", "line", "in", "(", "out", "+", "err", ")", ".", "split", "(", "'\\n'", ")", ":", "mtch", "=", "gcc_clang", ".", "search", "(", "line", ")", "if", "mtch", ":", "return", "mtch", ".", "group", "(", "1", ")", "+", "' '", "+", "mtch", ".", "group", "(", "2", ")", "return", "compiler" ]
Determine the name + version of the compiler
[ "Determine", "the", "name", "+", "version", "of", "the", "compiler" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py#L76-L92
29,189
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py
files_in_dir
def files_in_dir(path, extension): """Enumartes the files in path with the given extension""" ends = '.{0}'.format(extension) return (f for f in os.listdir(path) if f.endswith(ends))
python
def files_in_dir(path, extension): """Enumartes the files in path with the given extension""" ends = '.{0}'.format(extension) return (f for f in os.listdir(path) if f.endswith(ends))
[ "def", "files_in_dir", "(", "path", ",", "extension", ")", ":", "ends", "=", "'.{0}'", ".", "format", "(", "extension", ")", "return", "(", "f", "for", "f", "in", "os", ".", "listdir", "(", "path", ")", "if", "f", ".", "endswith", "(", "ends", ")", ")" ]
Enumartes the files in path with the given extension
[ "Enumartes", "the", "files", "in", "path", "with", "the", "given", "extension" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py#L105-L108
29,190
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py
format_time
def format_time(seconds): """Format a duration""" minute = 60 hour = minute * 60 day = hour * 24 week = day * 7 result = [] for name, dur in [ ('week', week), ('day', day), ('hour', hour), ('minute', minute), ('second', 1) ]: if seconds > dur: value = seconds // dur result.append( '{0} {1}{2}'.format(int(value), name, 's' if value > 1 else '') ) seconds = seconds % dur return ' '.join(result)
python
def format_time(seconds): """Format a duration""" minute = 60 hour = minute * 60 day = hour * 24 week = day * 7 result = [] for name, dur in [ ('week', week), ('day', day), ('hour', hour), ('minute', minute), ('second', 1) ]: if seconds > dur: value = seconds // dur result.append( '{0} {1}{2}'.format(int(value), name, 's' if value > 1 else '') ) seconds = seconds % dur return ' '.join(result)
[ "def", "format_time", "(", "seconds", ")", ":", "minute", "=", "60", "hour", "=", "minute", "*", "60", "day", "=", "hour", "*", "24", "week", "=", "day", "*", "7", "result", "=", "[", "]", "for", "name", ",", "dur", "in", "[", "(", "'week'", ",", "week", ")", ",", "(", "'day'", ",", "day", ")", ",", "(", "'hour'", ",", "hour", ")", ",", "(", "'minute'", ",", "minute", ")", ",", "(", "'second'", ",", "1", ")", "]", ":", "if", "seconds", ">", "dur", ":", "value", "=", "seconds", "//", "dur", "result", ".", "append", "(", "'{0} {1}{2}'", ".", "format", "(", "int", "(", "value", ")", ",", "name", ",", "'s'", "if", "value", ">", "1", "else", "''", ")", ")", "seconds", "=", "seconds", "%", "dur", "return", "' '", ".", "join", "(", "result", ")" ]
Format a duration
[ "Format", "a", "duration" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py#L111-L129
29,191
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py
benchmark
def benchmark(src_dir, compiler, include_dirs, iter_count): """Do the benchmarking""" files = list(files_in_dir(src_dir, 'cpp')) random.shuffle(files) has_string_templates = True string_template_file_cnt = sum(1 for file in files if 'bmp' in file) file_count = len(files) + string_template_file_cnt started_at = time.time() result = {} for filename in files: progress = len(result) result[filename] = benchmark_file( os.path.join(src_dir, filename), compiler, include_dirs, (float(progress) / file_count, float(progress + 1) / file_count), iter_count ) if 'bmp' in filename and has_string_templates: try: temp_result = benchmark_file( os.path.join(src_dir, filename), compiler, include_dirs, (float(progress + 1) / file_count, float(progress + 2) / file_count), iter_count, '-Xclang -fstring-literal-templates' ) result[filename.replace('bmp', 'slt')] = temp_result except: has_string_templates = False file_count -= string_template_file_cnt print 'Stopping the benchmarking of string literal templates' elapsed = time.time() - started_at total = float(file_count * elapsed) / len(result) print 'Elapsed time: {0}, Remaining time: {1}'.format( format_time(elapsed), format_time(total - elapsed) ) return result
python
def benchmark(src_dir, compiler, include_dirs, iter_count): """Do the benchmarking""" files = list(files_in_dir(src_dir, 'cpp')) random.shuffle(files) has_string_templates = True string_template_file_cnt = sum(1 for file in files if 'bmp' in file) file_count = len(files) + string_template_file_cnt started_at = time.time() result = {} for filename in files: progress = len(result) result[filename] = benchmark_file( os.path.join(src_dir, filename), compiler, include_dirs, (float(progress) / file_count, float(progress + 1) / file_count), iter_count ) if 'bmp' in filename and has_string_templates: try: temp_result = benchmark_file( os.path.join(src_dir, filename), compiler, include_dirs, (float(progress + 1) / file_count, float(progress + 2) / file_count), iter_count, '-Xclang -fstring-literal-templates' ) result[filename.replace('bmp', 'slt')] = temp_result except: has_string_templates = False file_count -= string_template_file_cnt print 'Stopping the benchmarking of string literal templates' elapsed = time.time() - started_at total = float(file_count * elapsed) / len(result) print 'Elapsed time: {0}, Remaining time: {1}'.format( format_time(elapsed), format_time(total - elapsed) ) return result
[ "def", "benchmark", "(", "src_dir", ",", "compiler", ",", "include_dirs", ",", "iter_count", ")", ":", "files", "=", "list", "(", "files_in_dir", "(", "src_dir", ",", "'cpp'", ")", ")", "random", ".", "shuffle", "(", "files", ")", "has_string_templates", "=", "True", "string_template_file_cnt", "=", "sum", "(", "1", "for", "file", "in", "files", "if", "'bmp'", "in", "file", ")", "file_count", "=", "len", "(", "files", ")", "+", "string_template_file_cnt", "started_at", "=", "time", ".", "time", "(", ")", "result", "=", "{", "}", "for", "filename", "in", "files", ":", "progress", "=", "len", "(", "result", ")", "result", "[", "filename", "]", "=", "benchmark_file", "(", "os", ".", "path", ".", "join", "(", "src_dir", ",", "filename", ")", ",", "compiler", ",", "include_dirs", ",", "(", "float", "(", "progress", ")", "/", "file_count", ",", "float", "(", "progress", "+", "1", ")", "/", "file_count", ")", ",", "iter_count", ")", "if", "'bmp'", "in", "filename", "and", "has_string_templates", ":", "try", ":", "temp_result", "=", "benchmark_file", "(", "os", ".", "path", ".", "join", "(", "src_dir", ",", "filename", ")", ",", "compiler", ",", "include_dirs", ",", "(", "float", "(", "progress", "+", "1", ")", "/", "file_count", ",", "float", "(", "progress", "+", "2", ")", "/", "file_count", ")", ",", "iter_count", ",", "'-Xclang -fstring-literal-templates'", ")", "result", "[", "filename", ".", "replace", "(", "'bmp'", ",", "'slt'", ")", "]", "=", "temp_result", "except", ":", "has_string_templates", "=", "False", "file_count", "-=", "string_template_file_cnt", "print", "'Stopping the benchmarking of string literal templates'", "elapsed", "=", "time", ".", "time", "(", ")", "-", "started_at", "total", "=", "float", "(", "file_count", "*", "elapsed", ")", "/", "len", "(", "result", ")", "print", "'Elapsed time: {0}, Remaining time: {1}'", ".", "format", "(", "format_time", "(", "elapsed", ")", ",", "format_time", "(", "total", "-", "elapsed", ")", ")", "return", "result" ]
Do the benchmarking
[ "Do", "the", "benchmarking" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py#L132-L174
29,192
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py
plot
def plot(values, mode_names, title, (xlabel, ylabel), out_file): """Plot a diagram""" matplotlib.pyplot.clf() for mode, mode_name in mode_names.iteritems(): vals = values[mode] matplotlib.pyplot.plot( [x for x, _ in vals], [y for _, y in vals], label=mode_name ) matplotlib.pyplot.title(title) matplotlib.pyplot.xlabel(xlabel) matplotlib.pyplot.ylabel(ylabel) if len(mode_names) > 1: matplotlib.pyplot.legend() matplotlib.pyplot.savefig(out_file)
python
def plot(values, mode_names, title, (xlabel, ylabel), out_file): """Plot a diagram""" matplotlib.pyplot.clf() for mode, mode_name in mode_names.iteritems(): vals = values[mode] matplotlib.pyplot.plot( [x for x, _ in vals], [y for _, y in vals], label=mode_name ) matplotlib.pyplot.title(title) matplotlib.pyplot.xlabel(xlabel) matplotlib.pyplot.ylabel(ylabel) if len(mode_names) > 1: matplotlib.pyplot.legend() matplotlib.pyplot.savefig(out_file)
[ "def", "plot", "(", "values", ",", "mode_names", ",", "title", ",", "(", "xlabel", ",", "ylabel", ")", ",", "out_file", ")", ":", "matplotlib", ".", "pyplot", ".", "clf", "(", ")", "for", "mode", ",", "mode_name", "in", "mode_names", ".", "iteritems", "(", ")", ":", "vals", "=", "values", "[", "mode", "]", "matplotlib", ".", "pyplot", ".", "plot", "(", "[", "x", "for", "x", ",", "_", "in", "vals", "]", ",", "[", "y", "for", "_", ",", "y", "in", "vals", "]", ",", "label", "=", "mode_name", ")", "matplotlib", ".", "pyplot", ".", "title", "(", "title", ")", "matplotlib", ".", "pyplot", ".", "xlabel", "(", "xlabel", ")", "matplotlib", ".", "pyplot", ".", "ylabel", "(", "ylabel", ")", "if", "len", "(", "mode_names", ")", ">", "1", ":", "matplotlib", ".", "pyplot", ".", "legend", "(", ")", "matplotlib", ".", "pyplot", ".", "savefig", "(", "out_file", ")" ]
Plot a diagram
[ "Plot", "a", "diagram" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py#L177-L192
29,193
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py
configs_in
def configs_in(src_dir): """Enumerate all configs in src_dir""" for filename in files_in_dir(src_dir, 'json'): with open(os.path.join(src_dir, filename), 'rb') as in_f: yield json.load(in_f)
python
def configs_in(src_dir): """Enumerate all configs in src_dir""" for filename in files_in_dir(src_dir, 'json'): with open(os.path.join(src_dir, filename), 'rb') as in_f: yield json.load(in_f)
[ "def", "configs_in", "(", "src_dir", ")", ":", "for", "filename", "in", "files_in_dir", "(", "src_dir", ",", "'json'", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "src_dir", ",", "filename", ")", ",", "'rb'", ")", "as", "in_f", ":", "yield", "json", ".", "load", "(", "in_f", ")" ]
Enumerate all configs in src_dir
[ "Enumerate", "all", "configs", "in", "src_dir" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py#L203-L207
29,194
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py
join_images
def join_images(img_files, out_file): """Join the list of images into the out file""" images = [PIL.Image.open(f) for f in img_files] joined = PIL.Image.new( 'RGB', (sum(i.size[0] for i in images), max(i.size[1] for i in images)) ) left = 0 for img in images: joined.paste(im=img, box=(left, 0)) left = left + img.size[0] joined.save(out_file)
python
def join_images(img_files, out_file): """Join the list of images into the out file""" images = [PIL.Image.open(f) for f in img_files] joined = PIL.Image.new( 'RGB', (sum(i.size[0] for i in images), max(i.size[1] for i in images)) ) left = 0 for img in images: joined.paste(im=img, box=(left, 0)) left = left + img.size[0] joined.save(out_file)
[ "def", "join_images", "(", "img_files", ",", "out_file", ")", ":", "images", "=", "[", "PIL", ".", "Image", ".", "open", "(", "f", ")", "for", "f", "in", "img_files", "]", "joined", "=", "PIL", ".", "Image", ".", "new", "(", "'RGB'", ",", "(", "sum", "(", "i", ".", "size", "[", "0", "]", "for", "i", "in", "images", ")", ",", "max", "(", "i", ".", "size", "[", "1", "]", "for", "i", "in", "images", ")", ")", ")", "left", "=", "0", "for", "img", "in", "images", ":", "joined", ".", "paste", "(", "im", "=", "img", ",", "box", "=", "(", "left", ",", "0", ")", ")", "left", "=", "left", "+", "img", ".", "size", "[", "0", "]", "joined", ".", "save", "(", "out_file", ")" ]
Join the list of images into the out file
[ "Join", "the", "list", "of", "images", "into", "the", "out", "file" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py#L215-L226
29,195
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py
plot_temp_diagrams
def plot_temp_diagrams(config, results, temp_dir): """Plot temporary diagrams""" display_name = { 'time': 'Compilation time (s)', 'memory': 'Compiler memory usage (MB)', } files = config['files'] img_files = [] if any('slt' in result for result in results) and 'bmp' in files.values()[0]: config['modes']['slt'] = 'Using BOOST_METAPARSE_STRING with string literal templates' for f in files.values(): f['slt'] = f['bmp'].replace('bmp', 'slt') for measured in ['time', 'memory']: mpts = sorted(int(k) for k in files.keys()) img_files.append(os.path.join(temp_dir, '_{0}.png'.format(measured))) plot( { m: [(x, results[files[str(x)][m]][measured]) for x in mpts] for m in config['modes'].keys() }, config['modes'], display_name[measured], (config['x_axis_label'], display_name[measured]), img_files[-1] ) return img_files
python
def plot_temp_diagrams(config, results, temp_dir): """Plot temporary diagrams""" display_name = { 'time': 'Compilation time (s)', 'memory': 'Compiler memory usage (MB)', } files = config['files'] img_files = [] if any('slt' in result for result in results) and 'bmp' in files.values()[0]: config['modes']['slt'] = 'Using BOOST_METAPARSE_STRING with string literal templates' for f in files.values(): f['slt'] = f['bmp'].replace('bmp', 'slt') for measured in ['time', 'memory']: mpts = sorted(int(k) for k in files.keys()) img_files.append(os.path.join(temp_dir, '_{0}.png'.format(measured))) plot( { m: [(x, results[files[str(x)][m]][measured]) for x in mpts] for m in config['modes'].keys() }, config['modes'], display_name[measured], (config['x_axis_label'], display_name[measured]), img_files[-1] ) return img_files
[ "def", "plot_temp_diagrams", "(", "config", ",", "results", ",", "temp_dir", ")", ":", "display_name", "=", "{", "'time'", ":", "'Compilation time (s)'", ",", "'memory'", ":", "'Compiler memory usage (MB)'", ",", "}", "files", "=", "config", "[", "'files'", "]", "img_files", "=", "[", "]", "if", "any", "(", "'slt'", "in", "result", "for", "result", "in", "results", ")", "and", "'bmp'", "in", "files", ".", "values", "(", ")", "[", "0", "]", ":", "config", "[", "'modes'", "]", "[", "'slt'", "]", "=", "'Using BOOST_METAPARSE_STRING with string literal templates'", "for", "f", "in", "files", ".", "values", "(", ")", ":", "f", "[", "'slt'", "]", "=", "f", "[", "'bmp'", "]", ".", "replace", "(", "'bmp'", ",", "'slt'", ")", "for", "measured", "in", "[", "'time'", ",", "'memory'", "]", ":", "mpts", "=", "sorted", "(", "int", "(", "k", ")", "for", "k", "in", "files", ".", "keys", "(", ")", ")", "img_files", ".", "append", "(", "os", ".", "path", ".", "join", "(", "temp_dir", ",", "'_{0}.png'", ".", "format", "(", "measured", ")", ")", ")", "plot", "(", "{", "m", ":", "[", "(", "x", ",", "results", "[", "files", "[", "str", "(", "x", ")", "]", "[", "m", "]", "]", "[", "measured", "]", ")", "for", "x", "in", "mpts", "]", "for", "m", "in", "config", "[", "'modes'", "]", ".", "keys", "(", ")", "}", ",", "config", "[", "'modes'", "]", ",", "display_name", "[", "measured", "]", ",", "(", "config", "[", "'x_axis_label'", "]", ",", "display_name", "[", "measured", "]", ")", ",", "img_files", "[", "-", "1", "]", ")", "return", "img_files" ]
Plot temporary diagrams
[ "Plot", "temporary", "diagrams" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py#L229-L257
29,196
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py
plot_diagram
def plot_diagram(config, results, images_dir, out_filename): """Plot one diagram""" img_files = plot_temp_diagrams(config, results, images_dir) join_images(img_files, out_filename) for img_file in img_files: os.remove(img_file)
python
def plot_diagram(config, results, images_dir, out_filename): """Plot one diagram""" img_files = plot_temp_diagrams(config, results, images_dir) join_images(img_files, out_filename) for img_file in img_files: os.remove(img_file)
[ "def", "plot_diagram", "(", "config", ",", "results", ",", "images_dir", ",", "out_filename", ")", ":", "img_files", "=", "plot_temp_diagrams", "(", "config", ",", "results", ",", "images_dir", ")", "join_images", "(", "img_files", ",", "out_filename", ")", "for", "img_file", "in", "img_files", ":", "os", ".", "remove", "(", "img_file", ")" ]
Plot one diagram
[ "Plot", "one", "diagram" ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py#L260-L265
29,197
apple/turicreate
src/unity/python/turicreate/toolkits/_model.py
load_model
def load_model(location): """ Load any Turi Create model that was previously saved. This function assumes the model (can be any model) was previously saved in Turi Create model format with model.save(filename). Parameters ---------- location : string Location of the model to load. Can be a local path or a remote URL. Because models are saved as directories, there is no file extension. Examples ---------- >>> model.save('my_model_file') >>> loaded_model = tc.load_model('my_model_file') """ # Check if the location is a dir_archive, if not, use glunpickler to load # as pure python model # If the location is a http location, skip the check, and directly proceed # to load model as dir_archive. This is because # 1) exists() does not work with http protocol, and # 2) GLUnpickler does not support http protocol = file_util.get_protocol(location) dir_archive_exists = False if protocol == '': model_path = file_util.expand_full_path(location) dir_archive_exists = file_util.exists(os.path.join(model_path, 'dir_archive.ini')) else: model_path = location if protocol in ['http', 'https']: dir_archive_exists = True else: import posixpath dir_archive_exists = file_util.exists(posixpath.join(model_path, 'dir_archive.ini')) if not dir_archive_exists: raise IOError("Directory %s does not exist" % location) _internal_url = _make_internal_url(location) saved_state = glconnect.get_unity().load_model(_internal_url) saved_state = _wrap_function_return(saved_state) # The archive version could be both bytes/unicode key = u'archive_version' archive_version = saved_state[key] if key in saved_state else saved_state[key.encode()] if archive_version < 0: raise ToolkitError("File does not appear to be a Turi Create model.") elif archive_version > 1: raise ToolkitError("Unable to load model.\n\n" "This model looks to have been saved with a future version of Turi Create.\n" "Please upgrade Turi Create before attempting to load this model file.") elif archive_version == 1: name = saved_state['model_name']; if name in MODEL_NAME_MAP: cls = MODEL_NAME_MAP[name] if 'model' in saved_state: # this is a native model return cls(saved_state['model']) else: # this is a CustomModel model_data = saved_state['side_data'] model_version = model_data['model_version'] del model_data['model_version'] return cls._load_version(model_data, model_version) elif hasattr(_extensions, name): return saved_state["model"] else: raise ToolkitError("Unable to load model of name '%s'; model name not registered." % name) else: # very legacy model format. Attempt pickle loading import sys sys.stderr.write("This model was saved in a legacy model format. Compatibility cannot be guaranteed in future versions.\n") if _six.PY3: raise ToolkitError("Unable to load legacy model in Python 3.\n\n" "To migrate a model, try loading it using Turi Create 4.0 or\n" "later in Python 2 and then re-save it. The re-saved model should\n" "work in Python 3.") if 'graphlab' not in sys.modules: sys.modules['graphlab'] = sys.modules['turicreate'] # backward compatibility. Otherwise old pickles will not load sys.modules["turicreate_util"] = sys.modules['turicreate.util'] sys.modules["graphlab_util"] = sys.modules['turicreate.util'] # More backwards compatibility with the turicreate namespace code. for k, v in list(sys.modules.items()): if 'turicreate' in k: sys.modules[k.replace('turicreate', 'graphlab')] = v #legacy loader import pickle model_wrapper = pickle.loads(saved_state[b'model_wrapper']) return model_wrapper(saved_state[b'model_base'])
python
def load_model(location): """ Load any Turi Create model that was previously saved. This function assumes the model (can be any model) was previously saved in Turi Create model format with model.save(filename). Parameters ---------- location : string Location of the model to load. Can be a local path or a remote URL. Because models are saved as directories, there is no file extension. Examples ---------- >>> model.save('my_model_file') >>> loaded_model = tc.load_model('my_model_file') """ # Check if the location is a dir_archive, if not, use glunpickler to load # as pure python model # If the location is a http location, skip the check, and directly proceed # to load model as dir_archive. This is because # 1) exists() does not work with http protocol, and # 2) GLUnpickler does not support http protocol = file_util.get_protocol(location) dir_archive_exists = False if protocol == '': model_path = file_util.expand_full_path(location) dir_archive_exists = file_util.exists(os.path.join(model_path, 'dir_archive.ini')) else: model_path = location if protocol in ['http', 'https']: dir_archive_exists = True else: import posixpath dir_archive_exists = file_util.exists(posixpath.join(model_path, 'dir_archive.ini')) if not dir_archive_exists: raise IOError("Directory %s does not exist" % location) _internal_url = _make_internal_url(location) saved_state = glconnect.get_unity().load_model(_internal_url) saved_state = _wrap_function_return(saved_state) # The archive version could be both bytes/unicode key = u'archive_version' archive_version = saved_state[key] if key in saved_state else saved_state[key.encode()] if archive_version < 0: raise ToolkitError("File does not appear to be a Turi Create model.") elif archive_version > 1: raise ToolkitError("Unable to load model.\n\n" "This model looks to have been saved with a future version of Turi Create.\n" "Please upgrade Turi Create before attempting to load this model file.") elif archive_version == 1: name = saved_state['model_name']; if name in MODEL_NAME_MAP: cls = MODEL_NAME_MAP[name] if 'model' in saved_state: # this is a native model return cls(saved_state['model']) else: # this is a CustomModel model_data = saved_state['side_data'] model_version = model_data['model_version'] del model_data['model_version'] return cls._load_version(model_data, model_version) elif hasattr(_extensions, name): return saved_state["model"] else: raise ToolkitError("Unable to load model of name '%s'; model name not registered." % name) else: # very legacy model format. Attempt pickle loading import sys sys.stderr.write("This model was saved in a legacy model format. Compatibility cannot be guaranteed in future versions.\n") if _six.PY3: raise ToolkitError("Unable to load legacy model in Python 3.\n\n" "To migrate a model, try loading it using Turi Create 4.0 or\n" "later in Python 2 and then re-save it. The re-saved model should\n" "work in Python 3.") if 'graphlab' not in sys.modules: sys.modules['graphlab'] = sys.modules['turicreate'] # backward compatibility. Otherwise old pickles will not load sys.modules["turicreate_util"] = sys.modules['turicreate.util'] sys.modules["graphlab_util"] = sys.modules['turicreate.util'] # More backwards compatibility with the turicreate namespace code. for k, v in list(sys.modules.items()): if 'turicreate' in k: sys.modules[k.replace('turicreate', 'graphlab')] = v #legacy loader import pickle model_wrapper = pickle.loads(saved_state[b'model_wrapper']) return model_wrapper(saved_state[b'model_base'])
[ "def", "load_model", "(", "location", ")", ":", "# Check if the location is a dir_archive, if not, use glunpickler to load", "# as pure python model", "# If the location is a http location, skip the check, and directly proceed", "# to load model as dir_archive. This is because", "# 1) exists() does not work with http protocol, and", "# 2) GLUnpickler does not support http", "protocol", "=", "file_util", ".", "get_protocol", "(", "location", ")", "dir_archive_exists", "=", "False", "if", "protocol", "==", "''", ":", "model_path", "=", "file_util", ".", "expand_full_path", "(", "location", ")", "dir_archive_exists", "=", "file_util", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "model_path", ",", "'dir_archive.ini'", ")", ")", "else", ":", "model_path", "=", "location", "if", "protocol", "in", "[", "'http'", ",", "'https'", "]", ":", "dir_archive_exists", "=", "True", "else", ":", "import", "posixpath", "dir_archive_exists", "=", "file_util", ".", "exists", "(", "posixpath", ".", "join", "(", "model_path", ",", "'dir_archive.ini'", ")", ")", "if", "not", "dir_archive_exists", ":", "raise", "IOError", "(", "\"Directory %s does not exist\"", "%", "location", ")", "_internal_url", "=", "_make_internal_url", "(", "location", ")", "saved_state", "=", "glconnect", ".", "get_unity", "(", ")", ".", "load_model", "(", "_internal_url", ")", "saved_state", "=", "_wrap_function_return", "(", "saved_state", ")", "# The archive version could be both bytes/unicode", "key", "=", "u'archive_version'", "archive_version", "=", "saved_state", "[", "key", "]", "if", "key", "in", "saved_state", "else", "saved_state", "[", "key", ".", "encode", "(", ")", "]", "if", "archive_version", "<", "0", ":", "raise", "ToolkitError", "(", "\"File does not appear to be a Turi Create model.\"", ")", "elif", "archive_version", ">", "1", ":", "raise", "ToolkitError", "(", "\"Unable to load model.\\n\\n\"", "\"This model looks to have been saved with a future version of Turi Create.\\n\"", "\"Please upgrade Turi Create before attempting to load this model file.\"", ")", "elif", "archive_version", "==", "1", ":", "name", "=", "saved_state", "[", "'model_name'", "]", "if", "name", "in", "MODEL_NAME_MAP", ":", "cls", "=", "MODEL_NAME_MAP", "[", "name", "]", "if", "'model'", "in", "saved_state", ":", "# this is a native model", "return", "cls", "(", "saved_state", "[", "'model'", "]", ")", "else", ":", "# this is a CustomModel", "model_data", "=", "saved_state", "[", "'side_data'", "]", "model_version", "=", "model_data", "[", "'model_version'", "]", "del", "model_data", "[", "'model_version'", "]", "return", "cls", ".", "_load_version", "(", "model_data", ",", "model_version", ")", "elif", "hasattr", "(", "_extensions", ",", "name", ")", ":", "return", "saved_state", "[", "\"model\"", "]", "else", ":", "raise", "ToolkitError", "(", "\"Unable to load model of name '%s'; model name not registered.\"", "%", "name", ")", "else", ":", "# very legacy model format. Attempt pickle loading", "import", "sys", "sys", ".", "stderr", ".", "write", "(", "\"This model was saved in a legacy model format. Compatibility cannot be guaranteed in future versions.\\n\"", ")", "if", "_six", ".", "PY3", ":", "raise", "ToolkitError", "(", "\"Unable to load legacy model in Python 3.\\n\\n\"", "\"To migrate a model, try loading it using Turi Create 4.0 or\\n\"", "\"later in Python 2 and then re-save it. The re-saved model should\\n\"", "\"work in Python 3.\"", ")", "if", "'graphlab'", "not", "in", "sys", ".", "modules", ":", "sys", ".", "modules", "[", "'graphlab'", "]", "=", "sys", ".", "modules", "[", "'turicreate'", "]", "# backward compatibility. Otherwise old pickles will not load", "sys", ".", "modules", "[", "\"turicreate_util\"", "]", "=", "sys", ".", "modules", "[", "'turicreate.util'", "]", "sys", ".", "modules", "[", "\"graphlab_util\"", "]", "=", "sys", ".", "modules", "[", "'turicreate.util'", "]", "# More backwards compatibility with the turicreate namespace code.", "for", "k", ",", "v", "in", "list", "(", "sys", ".", "modules", ".", "items", "(", ")", ")", ":", "if", "'turicreate'", "in", "k", ":", "sys", ".", "modules", "[", "k", ".", "replace", "(", "'turicreate'", ",", "'graphlab'", ")", "]", "=", "v", "#legacy loader", "import", "pickle", "model_wrapper", "=", "pickle", ".", "loads", "(", "saved_state", "[", "b'model_wrapper'", "]", ")", "return", "model_wrapper", "(", "saved_state", "[", "b'model_base'", "]", ")" ]
Load any Turi Create model that was previously saved. This function assumes the model (can be any model) was previously saved in Turi Create model format with model.save(filename). Parameters ---------- location : string Location of the model to load. Can be a local path or a remote URL. Because models are saved as directories, there is no file extension. Examples ---------- >>> model.save('my_model_file') >>> loaded_model = tc.load_model('my_model_file')
[ "Load", "any", "Turi", "Create", "model", "that", "was", "previously", "saved", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_model.py#L30-L124
29,198
apple/turicreate
src/unity/python/turicreate/toolkits/_model.py
_get_default_options_wrapper
def _get_default_options_wrapper(unity_server_model_name, module_name='', python_class_name='', sdk_model = False): """ Internal function to return a get_default_options function. Parameters ---------- unity_server_model_name: str Name of the class/toolkit as registered with the unity server module_name: str, optional Name of the module. python_class_name: str, optional Name of the Python class. sdk_model : bool, optional (default False) True if the SDK interface was used for the model. False otherwise. Examples ---------- get_default_options = _get_default_options_wrapper('classifier_svm', 'svm', 'SVMClassifier') """ def get_default_options_for_model(output_type = 'sframe'): """ Get the default options for the toolkit :class:`~turicreate.{module_name}.{python_class_name}`. Parameters ---------- output_type : str, optional The output can be of the following types. - `sframe`: A table description each option used in the model. - `json`: A list of option dictionaries suitable for JSON serialization. | Each dictionary/row in the dictionary/SFrame object describes the following parameters of the given model. +------------------+-------------------------------------------------------+ | Name | Description | +==================+=======================================================+ | name | Name of the option used in the model. | +------------------+---------+---------------------------------------------+ | description | A detailed description of the option used. | +------------------+-------------------------------------------------------+ | type | Option type (REAL, BOOL, INTEGER or CATEGORICAL) | +------------------+-------------------------------------------------------+ | default_value | The default value for the option. | +------------------+-------------------------------------------------------+ | possible_values | List of acceptable values (CATEGORICAL only) | +------------------+-------------------------------------------------------+ | lower_bound | Smallest acceptable value for this option (REAL only) | +------------------+-------------------------------------------------------+ | upper_bound | Largest acceptable value for this option (REAL only) | +------------------+-------------------------------------------------------+ Returns ------- out : dict/SFrame See Also -------- turicreate.{module_name}.{python_class_name}.get_current_options Examples -------- .. sourcecode:: python >>> import turicreate # SFrame formatted output. >>> out_sframe = turicreate.{module_name}.get_default_options() # dict formatted output suitable for JSON serialization. >>> out_json = turicreate.{module_name}.get_default_options('json') """ if sdk_model: response = _tc.extensions._toolkits_sdk_get_default_options( unity_server_model_name) else: response = _tc.extensions._toolkits_get_default_options( unity_server_model_name) if output_type == 'json': return response else: json_list = [{'name': k, '': v} for k,v in response.items()] return _SFrame(json_list).unpack('X1', column_name_prefix='')\ .unpack('X1', column_name_prefix='') # Change the doc string before returning. get_default_options_for_model.__doc__ = get_default_options_for_model.\ __doc__.format(python_class_name = python_class_name, module_name = module_name) return get_default_options_for_model
python
def _get_default_options_wrapper(unity_server_model_name, module_name='', python_class_name='', sdk_model = False): """ Internal function to return a get_default_options function. Parameters ---------- unity_server_model_name: str Name of the class/toolkit as registered with the unity server module_name: str, optional Name of the module. python_class_name: str, optional Name of the Python class. sdk_model : bool, optional (default False) True if the SDK interface was used for the model. False otherwise. Examples ---------- get_default_options = _get_default_options_wrapper('classifier_svm', 'svm', 'SVMClassifier') """ def get_default_options_for_model(output_type = 'sframe'): """ Get the default options for the toolkit :class:`~turicreate.{module_name}.{python_class_name}`. Parameters ---------- output_type : str, optional The output can be of the following types. - `sframe`: A table description each option used in the model. - `json`: A list of option dictionaries suitable for JSON serialization. | Each dictionary/row in the dictionary/SFrame object describes the following parameters of the given model. +------------------+-------------------------------------------------------+ | Name | Description | +==================+=======================================================+ | name | Name of the option used in the model. | +------------------+---------+---------------------------------------------+ | description | A detailed description of the option used. | +------------------+-------------------------------------------------------+ | type | Option type (REAL, BOOL, INTEGER or CATEGORICAL) | +------------------+-------------------------------------------------------+ | default_value | The default value for the option. | +------------------+-------------------------------------------------------+ | possible_values | List of acceptable values (CATEGORICAL only) | +------------------+-------------------------------------------------------+ | lower_bound | Smallest acceptable value for this option (REAL only) | +------------------+-------------------------------------------------------+ | upper_bound | Largest acceptable value for this option (REAL only) | +------------------+-------------------------------------------------------+ Returns ------- out : dict/SFrame See Also -------- turicreate.{module_name}.{python_class_name}.get_current_options Examples -------- .. sourcecode:: python >>> import turicreate # SFrame formatted output. >>> out_sframe = turicreate.{module_name}.get_default_options() # dict formatted output suitable for JSON serialization. >>> out_json = turicreate.{module_name}.get_default_options('json') """ if sdk_model: response = _tc.extensions._toolkits_sdk_get_default_options( unity_server_model_name) else: response = _tc.extensions._toolkits_get_default_options( unity_server_model_name) if output_type == 'json': return response else: json_list = [{'name': k, '': v} for k,v in response.items()] return _SFrame(json_list).unpack('X1', column_name_prefix='')\ .unpack('X1', column_name_prefix='') # Change the doc string before returning. get_default_options_for_model.__doc__ = get_default_options_for_model.\ __doc__.format(python_class_name = python_class_name, module_name = module_name) return get_default_options_for_model
[ "def", "_get_default_options_wrapper", "(", "unity_server_model_name", ",", "module_name", "=", "''", ",", "python_class_name", "=", "''", ",", "sdk_model", "=", "False", ")", ":", "def", "get_default_options_for_model", "(", "output_type", "=", "'sframe'", ")", ":", "\"\"\"\n Get the default options for the toolkit\n :class:`~turicreate.{module_name}.{python_class_name}`.\n\n Parameters\n ----------\n output_type : str, optional\n\n The output can be of the following types.\n\n - `sframe`: A table description each option used in the model.\n - `json`: A list of option dictionaries suitable for JSON serialization.\n\n | Each dictionary/row in the dictionary/SFrame object describes the\n following parameters of the given model.\n\n +------------------+-------------------------------------------------------+\n | Name | Description |\n +==================+=======================================================+\n | name | Name of the option used in the model. |\n +------------------+---------+---------------------------------------------+\n | description | A detailed description of the option used. |\n +------------------+-------------------------------------------------------+\n | type | Option type (REAL, BOOL, INTEGER or CATEGORICAL) |\n +------------------+-------------------------------------------------------+\n | default_value | The default value for the option. |\n +------------------+-------------------------------------------------------+\n | possible_values | List of acceptable values (CATEGORICAL only) |\n +------------------+-------------------------------------------------------+\n | lower_bound | Smallest acceptable value for this option (REAL only) |\n +------------------+-------------------------------------------------------+\n | upper_bound | Largest acceptable value for this option (REAL only) |\n +------------------+-------------------------------------------------------+\n\n Returns\n -------\n out : dict/SFrame\n\n See Also\n --------\n turicreate.{module_name}.{python_class_name}.get_current_options\n\n Examples\n --------\n .. sourcecode:: python\n\n >>> import turicreate\n\n # SFrame formatted output.\n >>> out_sframe = turicreate.{module_name}.get_default_options()\n\n # dict formatted output suitable for JSON serialization.\n >>> out_json = turicreate.{module_name}.get_default_options('json')\n \"\"\"", "if", "sdk_model", ":", "response", "=", "_tc", ".", "extensions", ".", "_toolkits_sdk_get_default_options", "(", "unity_server_model_name", ")", "else", ":", "response", "=", "_tc", ".", "extensions", ".", "_toolkits_get_default_options", "(", "unity_server_model_name", ")", "if", "output_type", "==", "'json'", ":", "return", "response", "else", ":", "json_list", "=", "[", "{", "'name'", ":", "k", ",", "''", ":", "v", "}", "for", "k", ",", "v", "in", "response", ".", "items", "(", ")", "]", "return", "_SFrame", "(", "json_list", ")", ".", "unpack", "(", "'X1'", ",", "column_name_prefix", "=", "''", ")", ".", "unpack", "(", "'X1'", ",", "column_name_prefix", "=", "''", ")", "# Change the doc string before returning.", "get_default_options_for_model", ".", "__doc__", "=", "get_default_options_for_model", ".", "__doc__", ".", "format", "(", "python_class_name", "=", "python_class_name", ",", "module_name", "=", "module_name", ")", "return", "get_default_options_for_model" ]
Internal function to return a get_default_options function. Parameters ---------- unity_server_model_name: str Name of the class/toolkit as registered with the unity server module_name: str, optional Name of the module. python_class_name: str, optional Name of the Python class. sdk_model : bool, optional (default False) True if the SDK interface was used for the model. False otherwise. Examples ---------- get_default_options = _get_default_options_wrapper('classifier_svm', 'svm', 'SVMClassifier')
[ "Internal", "function", "to", "return", "a", "get_default_options", "function", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_model.py#L127-L226
29,199
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/tools/common.py
get_invocation_command_nodefault
def get_invocation_command_nodefault( toolset, tool, user_provided_command=[], additional_paths=[], path_last=False): """ A helper rule to get the command to invoke some tool. If 'user-provided-command' is not given, tries to find binary named 'tool' in PATH and in the passed 'additional-path'. Otherwise, verifies that the first element of 'user-provided-command' is an existing program. This rule returns the command to be used when invoking the tool. If we can't find the tool, a warning is issued. If 'path-last' is specified, PATH is checked after 'additional-paths' when searching for 'tool'. """ assert isinstance(toolset, basestring) assert isinstance(tool, basestring) assert is_iterable_typed(user_provided_command, basestring) assert is_iterable_typed(additional_paths, basestring) or additional_paths is None assert isinstance(path_last, (int, bool)) if not user_provided_command: command = find_tool(tool, additional_paths, path_last) if not command and __debug_configuration: print "warning: toolset", toolset, "initialization: can't find tool, tool" #FIXME #print "warning: initialized from" [ errors.nearest-user-location ] ; else: command = check_tool(user_provided_command) if not command and __debug_configuration: print "warning: toolset", toolset, "initialization:" print "warning: can't find user-provided command", user_provided_command #FIXME #ECHO "warning: initialized from" [ errors.nearest-user-location ] command = [] if command: command = ' '.join(command) return command
python
def get_invocation_command_nodefault( toolset, tool, user_provided_command=[], additional_paths=[], path_last=False): """ A helper rule to get the command to invoke some tool. If 'user-provided-command' is not given, tries to find binary named 'tool' in PATH and in the passed 'additional-path'. Otherwise, verifies that the first element of 'user-provided-command' is an existing program. This rule returns the command to be used when invoking the tool. If we can't find the tool, a warning is issued. If 'path-last' is specified, PATH is checked after 'additional-paths' when searching for 'tool'. """ assert isinstance(toolset, basestring) assert isinstance(tool, basestring) assert is_iterable_typed(user_provided_command, basestring) assert is_iterable_typed(additional_paths, basestring) or additional_paths is None assert isinstance(path_last, (int, bool)) if not user_provided_command: command = find_tool(tool, additional_paths, path_last) if not command and __debug_configuration: print "warning: toolset", toolset, "initialization: can't find tool, tool" #FIXME #print "warning: initialized from" [ errors.nearest-user-location ] ; else: command = check_tool(user_provided_command) if not command and __debug_configuration: print "warning: toolset", toolset, "initialization:" print "warning: can't find user-provided command", user_provided_command #FIXME #ECHO "warning: initialized from" [ errors.nearest-user-location ] command = [] if command: command = ' '.join(command) return command
[ "def", "get_invocation_command_nodefault", "(", "toolset", ",", "tool", ",", "user_provided_command", "=", "[", "]", ",", "additional_paths", "=", "[", "]", ",", "path_last", "=", "False", ")", ":", "assert", "isinstance", "(", "toolset", ",", "basestring", ")", "assert", "isinstance", "(", "tool", ",", "basestring", ")", "assert", "is_iterable_typed", "(", "user_provided_command", ",", "basestring", ")", "assert", "is_iterable_typed", "(", "additional_paths", ",", "basestring", ")", "or", "additional_paths", "is", "None", "assert", "isinstance", "(", "path_last", ",", "(", "int", ",", "bool", ")", ")", "if", "not", "user_provided_command", ":", "command", "=", "find_tool", "(", "tool", ",", "additional_paths", ",", "path_last", ")", "if", "not", "command", "and", "__debug_configuration", ":", "print", "\"warning: toolset\"", ",", "toolset", ",", "\"initialization: can't find tool, tool\"", "#FIXME", "#print \"warning: initialized from\" [ errors.nearest-user-location ] ;", "else", ":", "command", "=", "check_tool", "(", "user_provided_command", ")", "if", "not", "command", "and", "__debug_configuration", ":", "print", "\"warning: toolset\"", ",", "toolset", ",", "\"initialization:\"", "print", "\"warning: can't find user-provided command\"", ",", "user_provided_command", "#FIXME", "#ECHO \"warning: initialized from\" [ errors.nearest-user-location ]", "command", "=", "[", "]", "if", "command", ":", "command", "=", "' '", ".", "join", "(", "command", ")", "return", "command" ]
A helper rule to get the command to invoke some tool. If 'user-provided-command' is not given, tries to find binary named 'tool' in PATH and in the passed 'additional-path'. Otherwise, verifies that the first element of 'user-provided-command' is an existing program. This rule returns the command to be used when invoking the tool. If we can't find the tool, a warning is issued. If 'path-last' is specified, PATH is checked after 'additional-paths' when searching for 'tool'.
[ "A", "helper", "rule", "to", "get", "the", "command", "to", "invoke", "some", "tool", ".", "If", "user", "-", "provided", "-", "command", "is", "not", "given", "tries", "to", "find", "binary", "named", "tool", "in", "PATH", "and", "in", "the", "passed", "additional", "-", "path", ".", "Otherwise", "verifies", "that", "the", "first", "element", "of", "user", "-", "provided", "-", "command", "is", "an", "existing", "program", "." ]
74514c3f99e25b46f22c6e02977fe3da69221c2e
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/common.py#L285-L320