partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
train
StreamingQuery.lastProgress
Returns the most recent :class:`StreamingQueryProgress` update of this streaming query or None if there were no progress updates :return: a map
python/pyspark/sql/streaming.py
def lastProgress(self): """ Returns the most recent :class:`StreamingQueryProgress` update of this streaming query or None if there were no progress updates :return: a map """ lastProgress = self._jsq.lastProgress() if lastProgress: return json.loads(lastProgress.json()) else: return None
def lastProgress(self): """ Returns the most recent :class:`StreamingQueryProgress` update of this streaming query or None if there were no progress updates :return: a map """ lastProgress = self._jsq.lastProgress() if lastProgress: return json.loads(lastProgress.json()) else: return None
[ "Returns", "the", "most", "recent", ":", "class", ":", "StreamingQueryProgress", "update", "of", "this", "streaming", "query", "or", "None", "if", "there", "were", "no", "progress", "updates", ":", "return", ":", "a", "map" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L124-L134
[ "def", "lastProgress", "(", "self", ")", ":", "lastProgress", "=", "self", ".", "_jsq", ".", "lastProgress", "(", ")", "if", "lastProgress", ":", "return", "json", ".", "loads", "(", "lastProgress", ".", "json", "(", ")", ")", "else", ":", "return", "None" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
StreamingQuery.exception
:return: the StreamingQueryException if the query was terminated by an exception, or None.
python/pyspark/sql/streaming.py
def exception(self): """ :return: the StreamingQueryException if the query was terminated by an exception, or None. """ if self._jsq.exception().isDefined(): je = self._jsq.exception().get() msg = je.toString().split(': ', 1)[1] # Drop the Java StreamingQueryException type info stackTrace = '\n\t at '.join(map(lambda x: x.toString(), je.getStackTrace())) return StreamingQueryException(msg, stackTrace, je.getCause()) else: return None
def exception(self): """ :return: the StreamingQueryException if the query was terminated by an exception, or None. """ if self._jsq.exception().isDefined(): je = self._jsq.exception().get() msg = je.toString().split(': ', 1)[1] # Drop the Java StreamingQueryException type info stackTrace = '\n\t at '.join(map(lambda x: x.toString(), je.getStackTrace())) return StreamingQueryException(msg, stackTrace, je.getCause()) else: return None
[ ":", "return", ":", "the", "StreamingQueryException", "if", "the", "query", "was", "terminated", "by", "an", "exception", "or", "None", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L181-L191
[ "def", "exception", "(", "self", ")", ":", "if", "self", ".", "_jsq", ".", "exception", "(", ")", ".", "isDefined", "(", ")", ":", "je", "=", "self", ".", "_jsq", ".", "exception", "(", ")", ".", "get", "(", ")", "msg", "=", "je", ".", "toString", "(", ")", ".", "split", "(", "': '", ",", "1", ")", "[", "1", "]", "# Drop the Java StreamingQueryException type info", "stackTrace", "=", "'\\n\\t at '", ".", "join", "(", "map", "(", "lambda", "x", ":", "x", ".", "toString", "(", ")", ",", "je", ".", "getStackTrace", "(", ")", ")", ")", "return", "StreamingQueryException", "(", "msg", ",", "stackTrace", ",", "je", ".", "getCause", "(", ")", ")", "else", ":", "return", "None" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
StreamingQueryManager.awaitAnyTermination
Wait until any of the queries on the associated SQLContext has terminated since the creation of the context, or since :func:`resetTerminated()` was called. If any query was terminated with an exception, then the exception will be thrown. If `timeout` is set, it returns whether the query has terminated or not within the `timeout` seconds. If a query has terminated, then subsequent calls to :func:`awaitAnyTermination()` will either return immediately (if the query was terminated by :func:`query.stop()`), or throw the exception immediately (if the query was terminated with exception). Use :func:`resetTerminated()` to clear past terminations and wait for new terminations. In the case where multiple queries have terminated since :func:`resetTermination()` was called, if any query has terminated with exception, then :func:`awaitAnyTermination()` will throw any of the exception. For correctly documenting exceptions across multiple queries, users need to stop all of them after any of them terminates with exception, and then check the `query.exception()` for each query. throws :class:`StreamingQueryException`, if `this` query has terminated with an exception
python/pyspark/sql/streaming.py
def awaitAnyTermination(self, timeout=None): """Wait until any of the queries on the associated SQLContext has terminated since the creation of the context, or since :func:`resetTerminated()` was called. If any query was terminated with an exception, then the exception will be thrown. If `timeout` is set, it returns whether the query has terminated or not within the `timeout` seconds. If a query has terminated, then subsequent calls to :func:`awaitAnyTermination()` will either return immediately (if the query was terminated by :func:`query.stop()`), or throw the exception immediately (if the query was terminated with exception). Use :func:`resetTerminated()` to clear past terminations and wait for new terminations. In the case where multiple queries have terminated since :func:`resetTermination()` was called, if any query has terminated with exception, then :func:`awaitAnyTermination()` will throw any of the exception. For correctly documenting exceptions across multiple queries, users need to stop all of them after any of them terminates with exception, and then check the `query.exception()` for each query. throws :class:`StreamingQueryException`, if `this` query has terminated with an exception """ if timeout is not None: if not isinstance(timeout, (int, float)) or timeout < 0: raise ValueError("timeout must be a positive integer or float. Got %s" % timeout) return self._jsqm.awaitAnyTermination(int(timeout * 1000)) else: return self._jsqm.awaitAnyTermination()
def awaitAnyTermination(self, timeout=None): """Wait until any of the queries on the associated SQLContext has terminated since the creation of the context, or since :func:`resetTerminated()` was called. If any query was terminated with an exception, then the exception will be thrown. If `timeout` is set, it returns whether the query has terminated or not within the `timeout` seconds. If a query has terminated, then subsequent calls to :func:`awaitAnyTermination()` will either return immediately (if the query was terminated by :func:`query.stop()`), or throw the exception immediately (if the query was terminated with exception). Use :func:`resetTerminated()` to clear past terminations and wait for new terminations. In the case where multiple queries have terminated since :func:`resetTermination()` was called, if any query has terminated with exception, then :func:`awaitAnyTermination()` will throw any of the exception. For correctly documenting exceptions across multiple queries, users need to stop all of them after any of them terminates with exception, and then check the `query.exception()` for each query. throws :class:`StreamingQueryException`, if `this` query has terminated with an exception """ if timeout is not None: if not isinstance(timeout, (int, float)) or timeout < 0: raise ValueError("timeout must be a positive integer or float. Got %s" % timeout) return self._jsqm.awaitAnyTermination(int(timeout * 1000)) else: return self._jsqm.awaitAnyTermination()
[ "Wait", "until", "any", "of", "the", "queries", "on", "the", "associated", "SQLContext", "has", "terminated", "since", "the", "creation", "of", "the", "context", "or", "since", ":", "func", ":", "resetTerminated", "()", "was", "called", ".", "If", "any", "query", "was", "terminated", "with", "an", "exception", "then", "the", "exception", "will", "be", "thrown", ".", "If", "timeout", "is", "set", "it", "returns", "whether", "the", "query", "has", "terminated", "or", "not", "within", "the", "timeout", "seconds", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L240-L265
[ "def", "awaitAnyTermination", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "not", "None", ":", "if", "not", "isinstance", "(", "timeout", ",", "(", "int", ",", "float", ")", ")", "or", "timeout", "<", "0", ":", "raise", "ValueError", "(", "\"timeout must be a positive integer or float. Got %s\"", "%", "timeout", ")", "return", "self", ".", "_jsqm", ".", "awaitAnyTermination", "(", "int", "(", "timeout", "*", "1000", ")", ")", "else", ":", "return", "self", ".", "_jsqm", ".", "awaitAnyTermination", "(", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
DataStreamReader.load
Loads a data stream from a data source and returns it as a :class`DataFrame`. .. note:: Evolving. :param path: optional string for file-system backed data sources. :param format: optional string for format of the data source. Default to 'parquet'. :param schema: optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param options: all other string options >>> json_sdf = spark.readStream.format("json") \\ ... .schema(sdf_schema) \\ ... .load(tempfile.mkdtemp()) >>> json_sdf.isStreaming True >>> json_sdf.schema == sdf_schema True
python/pyspark/sql/streaming.py
def load(self, path=None, format=None, schema=None, **options): """Loads a data stream from a data source and returns it as a :class`DataFrame`. .. note:: Evolving. :param path: optional string for file-system backed data sources. :param format: optional string for format of the data source. Default to 'parquet'. :param schema: optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param options: all other string options >>> json_sdf = spark.readStream.format("json") \\ ... .schema(sdf_schema) \\ ... .load(tempfile.mkdtemp()) >>> json_sdf.isStreaming True >>> json_sdf.schema == sdf_schema True """ if format is not None: self.format(format) if schema is not None: self.schema(schema) self.options(**options) if path is not None: if type(path) != str or len(path.strip()) == 0: raise ValueError("If the path is provided for stream, it needs to be a " + "non-empty string. List of paths are not supported.") return self._df(self._jreader.load(path)) else: return self._df(self._jreader.load())
def load(self, path=None, format=None, schema=None, **options): """Loads a data stream from a data source and returns it as a :class`DataFrame`. .. note:: Evolving. :param path: optional string for file-system backed data sources. :param format: optional string for format of the data source. Default to 'parquet'. :param schema: optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param options: all other string options >>> json_sdf = spark.readStream.format("json") \\ ... .schema(sdf_schema) \\ ... .load(tempfile.mkdtemp()) >>> json_sdf.isStreaming True >>> json_sdf.schema == sdf_schema True """ if format is not None: self.format(format) if schema is not None: self.schema(schema) self.options(**options) if path is not None: if type(path) != str or len(path.strip()) == 0: raise ValueError("If the path is provided for stream, it needs to be a " + "non-empty string. List of paths are not supported.") return self._df(self._jreader.load(path)) else: return self._df(self._jreader.load())
[ "Loads", "a", "data", "stream", "from", "a", "data", "source", "and", "returns", "it", "as", "a", ":", "class", "DataFrame", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L370-L400
[ "def", "load", "(", "self", ",", "path", "=", "None", ",", "format", "=", "None", ",", "schema", "=", "None", ",", "*", "*", "options", ")", ":", "if", "format", "is", "not", "None", ":", "self", ".", "format", "(", "format", ")", "if", "schema", "is", "not", "None", ":", "self", ".", "schema", "(", "schema", ")", "self", ".", "options", "(", "*", "*", "options", ")", "if", "path", "is", "not", "None", ":", "if", "type", "(", "path", ")", "!=", "str", "or", "len", "(", "path", ".", "strip", "(", ")", ")", "==", "0", ":", "raise", "ValueError", "(", "\"If the path is provided for stream, it needs to be a \"", "+", "\"non-empty string. List of paths are not supported.\"", ")", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "load", "(", "path", ")", ")", "else", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "load", "(", ")", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
DataStreamReader.json
Loads a JSON file stream and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. .. note:: Evolving. :param path: string represents path to the JSON dataset, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. >>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema) >>> json_sdf.isStreaming True >>> json_sdf.schema == sdf_schema True
python/pyspark/sql/streaming.py
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None, allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None, mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None, multiLine=None, allowUnquotedControlChars=None, lineSep=None, locale=None, dropFieldIfAllNull=None, encoding=None): """ Loads a JSON file stream and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. .. note:: Evolving. :param path: string represents path to the JSON dataset, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. >>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema) >>> json_sdf.isStreaming True >>> json_sdf.schema == sdf_schema True """ self._set_opts( schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal, allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames, allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero, allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat, timestampFormat=timestampFormat, multiLine=multiLine, allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep, locale=locale, dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding) if isinstance(path, basestring): return self._df(self._jreader.json(path)) else: raise TypeError("path can be only a single string")
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None, allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None, mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None, multiLine=None, allowUnquotedControlChars=None, lineSep=None, locale=None, dropFieldIfAllNull=None, encoding=None): """ Loads a JSON file stream and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. .. note:: Evolving. :param path: string represents path to the JSON dataset, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. >>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema) >>> json_sdf.isStreaming True >>> json_sdf.schema == sdf_schema True """ self._set_opts( schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal, allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames, allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero, allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat, timestampFormat=timestampFormat, multiLine=multiLine, allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep, locale=locale, dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding) if isinstance(path, basestring): return self._df(self._jreader.json(path)) else: raise TypeError("path can be only a single string")
[ "Loads", "a", "JSON", "file", "stream", "and", "returns", "the", "results", "as", "a", ":", "class", ":", "DataFrame", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L403-L503
[ "def", "json", "(", "self", ",", "path", ",", "schema", "=", "None", ",", "primitivesAsString", "=", "None", ",", "prefersDecimal", "=", "None", ",", "allowComments", "=", "None", ",", "allowUnquotedFieldNames", "=", "None", ",", "allowSingleQuotes", "=", "None", ",", "allowNumericLeadingZero", "=", "None", ",", "allowBackslashEscapingAnyCharacter", "=", "None", ",", "mode", "=", "None", ",", "columnNameOfCorruptRecord", "=", "None", ",", "dateFormat", "=", "None", ",", "timestampFormat", "=", "None", ",", "multiLine", "=", "None", ",", "allowUnquotedControlChars", "=", "None", ",", "lineSep", "=", "None", ",", "locale", "=", "None", ",", "dropFieldIfAllNull", "=", "None", ",", "encoding", "=", "None", ")", ":", "self", ".", "_set_opts", "(", "schema", "=", "schema", ",", "primitivesAsString", "=", "primitivesAsString", ",", "prefersDecimal", "=", "prefersDecimal", ",", "allowComments", "=", "allowComments", ",", "allowUnquotedFieldNames", "=", "allowUnquotedFieldNames", ",", "allowSingleQuotes", "=", "allowSingleQuotes", ",", "allowNumericLeadingZero", "=", "allowNumericLeadingZero", ",", "allowBackslashEscapingAnyCharacter", "=", "allowBackslashEscapingAnyCharacter", ",", "mode", "=", "mode", ",", "columnNameOfCorruptRecord", "=", "columnNameOfCorruptRecord", ",", "dateFormat", "=", "dateFormat", ",", "timestampFormat", "=", "timestampFormat", ",", "multiLine", "=", "multiLine", ",", "allowUnquotedControlChars", "=", "allowUnquotedControlChars", ",", "lineSep", "=", "lineSep", ",", "locale", "=", "locale", ",", "dropFieldIfAllNull", "=", "dropFieldIfAllNull", ",", "encoding", "=", "encoding", ")", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "json", "(", "path", ")", ")", "else", ":", "raise", "TypeError", "(", "\"path can be only a single string\"", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
DataStreamReader.orc
Loads a ORC file stream, returning the result as a :class:`DataFrame`. .. note:: Evolving. >>> orc_sdf = spark.readStream.schema(sdf_schema).orc(tempfile.mkdtemp()) >>> orc_sdf.isStreaming True >>> orc_sdf.schema == sdf_schema True
python/pyspark/sql/streaming.py
def orc(self, path): """Loads a ORC file stream, returning the result as a :class:`DataFrame`. .. note:: Evolving. >>> orc_sdf = spark.readStream.schema(sdf_schema).orc(tempfile.mkdtemp()) >>> orc_sdf.isStreaming True >>> orc_sdf.schema == sdf_schema True """ if isinstance(path, basestring): return self._df(self._jreader.orc(path)) else: raise TypeError("path can be only a single string")
def orc(self, path): """Loads a ORC file stream, returning the result as a :class:`DataFrame`. .. note:: Evolving. >>> orc_sdf = spark.readStream.schema(sdf_schema).orc(tempfile.mkdtemp()) >>> orc_sdf.isStreaming True >>> orc_sdf.schema == sdf_schema True """ if isinstance(path, basestring): return self._df(self._jreader.orc(path)) else: raise TypeError("path can be only a single string")
[ "Loads", "a", "ORC", "file", "stream", "returning", "the", "result", "as", "a", ":", "class", ":", "DataFrame", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L506-L520
[ "def", "orc", "(", "self", ",", "path", ")", ":", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "orc", "(", "path", ")", ")", "else", ":", "raise", "TypeError", "(", "\"path can be only a single string\"", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
DataStreamReader.parquet
Loads a Parquet file stream, returning the result as a :class:`DataFrame`. You can set the following Parquet-specific option(s) for reading Parquet files: * ``mergeSchema``: sets whether we should merge schemas collected from all \ Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \ The default value is specified in ``spark.sql.parquet.mergeSchema``. .. note:: Evolving. >>> parquet_sdf = spark.readStream.schema(sdf_schema).parquet(tempfile.mkdtemp()) >>> parquet_sdf.isStreaming True >>> parquet_sdf.schema == sdf_schema True
python/pyspark/sql/streaming.py
def parquet(self, path): """Loads a Parquet file stream, returning the result as a :class:`DataFrame`. You can set the following Parquet-specific option(s) for reading Parquet files: * ``mergeSchema``: sets whether we should merge schemas collected from all \ Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \ The default value is specified in ``spark.sql.parquet.mergeSchema``. .. note:: Evolving. >>> parquet_sdf = spark.readStream.schema(sdf_schema).parquet(tempfile.mkdtemp()) >>> parquet_sdf.isStreaming True >>> parquet_sdf.schema == sdf_schema True """ if isinstance(path, basestring): return self._df(self._jreader.parquet(path)) else: raise TypeError("path can be only a single string")
def parquet(self, path): """Loads a Parquet file stream, returning the result as a :class:`DataFrame`. You can set the following Parquet-specific option(s) for reading Parquet files: * ``mergeSchema``: sets whether we should merge schemas collected from all \ Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \ The default value is specified in ``spark.sql.parquet.mergeSchema``. .. note:: Evolving. >>> parquet_sdf = spark.readStream.schema(sdf_schema).parquet(tempfile.mkdtemp()) >>> parquet_sdf.isStreaming True >>> parquet_sdf.schema == sdf_schema True """ if isinstance(path, basestring): return self._df(self._jreader.parquet(path)) else: raise TypeError("path can be only a single string")
[ "Loads", "a", "Parquet", "file", "stream", "returning", "the", "result", "as", "a", ":", "class", ":", "DataFrame", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L523-L542
[ "def", "parquet", "(", "self", ",", "path", ")", ":", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "parquet", "(", "path", ")", ")", "else", ":", "raise", "TypeError", "(", "\"path can be only a single string\"", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
DataStreamReader.text
Loads a text file stream and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. .. note:: Evolving. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> text_sdf = spark.readStream.text(tempfile.mkdtemp()) >>> text_sdf.isStreaming True >>> "value" in str(text_sdf.schema) True
python/pyspark/sql/streaming.py
def text(self, path, wholetext=False, lineSep=None): """ Loads a text file stream and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. .. note:: Evolving. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> text_sdf = spark.readStream.text(tempfile.mkdtemp()) >>> text_sdf.isStreaming True >>> "value" in str(text_sdf.schema) True """ self._set_opts(wholetext=wholetext, lineSep=lineSep) if isinstance(path, basestring): return self._df(self._jreader.text(path)) else: raise TypeError("path can be only a single string")
def text(self, path, wholetext=False, lineSep=None): """ Loads a text file stream and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. .. note:: Evolving. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> text_sdf = spark.readStream.text(tempfile.mkdtemp()) >>> text_sdf.isStreaming True >>> "value" in str(text_sdf.schema) True """ self._set_opts(wholetext=wholetext, lineSep=lineSep) if isinstance(path, basestring): return self._df(self._jreader.text(path)) else: raise TypeError("path can be only a single string")
[ "Loads", "a", "text", "file", "stream", "and", "returns", "a", ":", "class", ":", "DataFrame", "whose", "schema", "starts", "with", "a", "string", "column", "named", "value", "and", "followed", "by", "partitioned", "columns", "if", "there", "are", "any", ".", "The", "text", "files", "must", "be", "encoded", "as", "UTF", "-", "8", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L546-L572
[ "def", "text", "(", "self", ",", "path", ",", "wholetext", "=", "False", ",", "lineSep", "=", "None", ")", ":", "self", ".", "_set_opts", "(", "wholetext", "=", "wholetext", ",", "lineSep", "=", "lineSep", ")", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "text", "(", "path", ")", ")", "else", ":", "raise", "TypeError", "(", "\"path can be only a single string\"", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
DataStreamReader.csv
r"""Loads a CSV file stream and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if ``inferSchema`` is enabled. To avoid going through the entire data once, disable ``inferSchema`` option or specify the schema explicitly using ``schema``. .. note:: Evolving. :param path: string, or list of strings, for input path(s). :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param encoding: decodes the CSV files by the given encoding type. If None is set, it uses the default value, ``UTF-8``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If you would like to turn off quotations, you need to set an empty string. :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\``. :param comment: sets a single character used for skipping lines beginning with this character. By default (None), it is disabled. :param header: uses the first line as names of columns. If None is set, it uses the default value, ``false``. :param inferSchema: infers the input schema automatically from data. It requires one extra pass over the data. If None is set, it uses the default value, ``false``. :param enforceSchema: If it is set to ``true``, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to ``false``, the schema will be validated against all headers in CSV files or the first header in RDD if the ``header`` option is set to ``true``. Field names in the schema and column names in CSV headers are checked by their positions taking into account ``spark.sql.caseSensitive``. If None is set, ``true`` is used by default. Though the default value is ``true``, it is recommended to disable the ``enforceSchema`` option to avoid incorrect results. :param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. Since 2.0.1, this ``nullValue`` param applies to all supported types including the string type. :param nanValue: sets the string representation of a non-number value. If None is set, it uses the default value, ``NaN``. :param positiveInf: sets the string representation of a positive infinity value. If None is set, it uses the default value, ``Inf``. :param negativeInf: sets the string representation of a negative infinity value. If None is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given value being read. If None is set, it uses the default value, ``-1`` meaning unlimited length. :param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0. If specified, it is ignored. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ A record with less/more tokens than schema is not a corrupted record to CSV. \ When it meets a record having fewer tokens than the length of the schema, \ sets ``null`` to extra fields. When the record has more tokens than the \ length of the schema, it drops extra tokens. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param multiLine: parse one record, which may span multiple lines. If None is set, it uses the default value, ``false``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise.. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, empty string. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. Maximum length is 1 character. >>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema) >>> csv_sdf.isStreaming True >>> csv_sdf.schema == sdf_schema True
python/pyspark/sql/streaming.py
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None, comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None, negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None, maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None, columnNameOfCorruptRecord=None, multiLine=None, charToEscapeQuoteEscaping=None, enforceSchema=None, emptyValue=None, locale=None, lineSep=None): r"""Loads a CSV file stream and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if ``inferSchema`` is enabled. To avoid going through the entire data once, disable ``inferSchema`` option or specify the schema explicitly using ``schema``. .. note:: Evolving. :param path: string, or list of strings, for input path(s). :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param encoding: decodes the CSV files by the given encoding type. If None is set, it uses the default value, ``UTF-8``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If you would like to turn off quotations, you need to set an empty string. :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\``. :param comment: sets a single character used for skipping lines beginning with this character. By default (None), it is disabled. :param header: uses the first line as names of columns. If None is set, it uses the default value, ``false``. :param inferSchema: infers the input schema automatically from data. It requires one extra pass over the data. If None is set, it uses the default value, ``false``. :param enforceSchema: If it is set to ``true``, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to ``false``, the schema will be validated against all headers in CSV files or the first header in RDD if the ``header`` option is set to ``true``. Field names in the schema and column names in CSV headers are checked by their positions taking into account ``spark.sql.caseSensitive``. If None is set, ``true`` is used by default. Though the default value is ``true``, it is recommended to disable the ``enforceSchema`` option to avoid incorrect results. :param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. Since 2.0.1, this ``nullValue`` param applies to all supported types including the string type. :param nanValue: sets the string representation of a non-number value. If None is set, it uses the default value, ``NaN``. :param positiveInf: sets the string representation of a positive infinity value. If None is set, it uses the default value, ``Inf``. :param negativeInf: sets the string representation of a negative infinity value. If None is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given value being read. If None is set, it uses the default value, ``-1`` meaning unlimited length. :param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0. If specified, it is ignored. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ A record with less/more tokens than schema is not a corrupted record to CSV. \ When it meets a record having fewer tokens than the length of the schema, \ sets ``null`` to extra fields. When the record has more tokens than the \ length of the schema, it drops extra tokens. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param multiLine: parse one record, which may span multiple lines. If None is set, it uses the default value, ``false``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise.. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, empty string. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. Maximum length is 1 character. >>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema) >>> csv_sdf.isStreaming True >>> csv_sdf.schema == sdf_schema True """ self._set_opts( schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment, header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue, nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf, dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns, maxCharsPerColumn=maxCharsPerColumn, maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, enforceSchema=enforceSchema, emptyValue=emptyValue, locale=locale, lineSep=lineSep) if isinstance(path, basestring): return self._df(self._jreader.csv(path)) else: raise TypeError("path can be only a single string")
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None, comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None, negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None, maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None, columnNameOfCorruptRecord=None, multiLine=None, charToEscapeQuoteEscaping=None, enforceSchema=None, emptyValue=None, locale=None, lineSep=None): r"""Loads a CSV file stream and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if ``inferSchema`` is enabled. To avoid going through the entire data once, disable ``inferSchema`` option or specify the schema explicitly using ``schema``. .. note:: Evolving. :param path: string, or list of strings, for input path(s). :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param encoding: decodes the CSV files by the given encoding type. If None is set, it uses the default value, ``UTF-8``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If you would like to turn off quotations, you need to set an empty string. :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\``. :param comment: sets a single character used for skipping lines beginning with this character. By default (None), it is disabled. :param header: uses the first line as names of columns. If None is set, it uses the default value, ``false``. :param inferSchema: infers the input schema automatically from data. It requires one extra pass over the data. If None is set, it uses the default value, ``false``. :param enforceSchema: If it is set to ``true``, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to ``false``, the schema will be validated against all headers in CSV files or the first header in RDD if the ``header`` option is set to ``true``. Field names in the schema and column names in CSV headers are checked by their positions taking into account ``spark.sql.caseSensitive``. If None is set, ``true`` is used by default. Though the default value is ``true``, it is recommended to disable the ``enforceSchema`` option to avoid incorrect results. :param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. Since 2.0.1, this ``nullValue`` param applies to all supported types including the string type. :param nanValue: sets the string representation of a non-number value. If None is set, it uses the default value, ``NaN``. :param positiveInf: sets the string representation of a positive infinity value. If None is set, it uses the default value, ``Inf``. :param negativeInf: sets the string representation of a negative infinity value. If None is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given value being read. If None is set, it uses the default value, ``-1`` meaning unlimited length. :param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0. If specified, it is ignored. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ A record with less/more tokens than schema is not a corrupted record to CSV. \ When it meets a record having fewer tokens than the length of the schema, \ sets ``null`` to extra fields. When the record has more tokens than the \ length of the schema, it drops extra tokens. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param multiLine: parse one record, which may span multiple lines. If None is set, it uses the default value, ``false``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise.. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, empty string. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. Maximum length is 1 character. >>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema) >>> csv_sdf.isStreaming True >>> csv_sdf.schema == sdf_schema True """ self._set_opts( schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment, header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue, nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf, dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns, maxCharsPerColumn=maxCharsPerColumn, maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, enforceSchema=enforceSchema, emptyValue=emptyValue, locale=locale, lineSep=lineSep) if isinstance(path, basestring): return self._df(self._jreader.csv(path)) else: raise TypeError("path can be only a single string")
[ "r", "Loads", "a", "CSV", "file", "stream", "and", "returns", "the", "result", "as", "a", ":", "class", ":", "DataFrame", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L575-L705
[ "def", "csv", "(", "self", ",", "path", ",", "schema", "=", "None", ",", "sep", "=", "None", ",", "encoding", "=", "None", ",", "quote", "=", "None", ",", "escape", "=", "None", ",", "comment", "=", "None", ",", "header", "=", "None", ",", "inferSchema", "=", "None", ",", "ignoreLeadingWhiteSpace", "=", "None", ",", "ignoreTrailingWhiteSpace", "=", "None", ",", "nullValue", "=", "None", ",", "nanValue", "=", "None", ",", "positiveInf", "=", "None", ",", "negativeInf", "=", "None", ",", "dateFormat", "=", "None", ",", "timestampFormat", "=", "None", ",", "maxColumns", "=", "None", ",", "maxCharsPerColumn", "=", "None", ",", "maxMalformedLogPerPartition", "=", "None", ",", "mode", "=", "None", ",", "columnNameOfCorruptRecord", "=", "None", ",", "multiLine", "=", "None", ",", "charToEscapeQuoteEscaping", "=", "None", ",", "enforceSchema", "=", "None", ",", "emptyValue", "=", "None", ",", "locale", "=", "None", ",", "lineSep", "=", "None", ")", ":", "self", ".", "_set_opts", "(", "schema", "=", "schema", ",", "sep", "=", "sep", ",", "encoding", "=", "encoding", ",", "quote", "=", "quote", ",", "escape", "=", "escape", ",", "comment", "=", "comment", ",", "header", "=", "header", ",", "inferSchema", "=", "inferSchema", ",", "ignoreLeadingWhiteSpace", "=", "ignoreLeadingWhiteSpace", ",", "ignoreTrailingWhiteSpace", "=", "ignoreTrailingWhiteSpace", ",", "nullValue", "=", "nullValue", ",", "nanValue", "=", "nanValue", ",", "positiveInf", "=", "positiveInf", ",", "negativeInf", "=", "negativeInf", ",", "dateFormat", "=", "dateFormat", ",", "timestampFormat", "=", "timestampFormat", ",", "maxColumns", "=", "maxColumns", ",", "maxCharsPerColumn", "=", "maxCharsPerColumn", ",", "maxMalformedLogPerPartition", "=", "maxMalformedLogPerPartition", ",", "mode", "=", "mode", ",", "columnNameOfCorruptRecord", "=", "columnNameOfCorruptRecord", ",", "multiLine", "=", "multiLine", ",", "charToEscapeQuoteEscaping", "=", "charToEscapeQuoteEscaping", ",", "enforceSchema", "=", "enforceSchema", ",", "emptyValue", "=", "emptyValue", ",", "locale", "=", "locale", ",", "lineSep", "=", "lineSep", ")", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "csv", "(", "path", ")", ")", "else", ":", "raise", "TypeError", "(", "\"path can be only a single string\"", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
DataStreamWriter.outputMode
Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. Options include: * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. .. note:: Evolving. >>> writer = sdf.writeStream.outputMode('append')
python/pyspark/sql/streaming.py
def outputMode(self, outputMode): """Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. Options include: * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. .. note:: Evolving. >>> writer = sdf.writeStream.outputMode('append') """ if not outputMode or type(outputMode) != str or len(outputMode.strip()) == 0: raise ValueError('The output mode must be a non-empty string. Got: %s' % outputMode) self._jwrite = self._jwrite.outputMode(outputMode) return self
def outputMode(self, outputMode): """Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. Options include: * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. .. note:: Evolving. >>> writer = sdf.writeStream.outputMode('append') """ if not outputMode or type(outputMode) != str or len(outputMode.strip()) == 0: raise ValueError('The output mode must be a non-empty string. Got: %s' % outputMode) self._jwrite = self._jwrite.outputMode(outputMode) return self
[ "Specifies", "how", "data", "of", "a", "streaming", "DataFrame", "/", "Dataset", "is", "written", "to", "a", "streaming", "sink", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L729-L749
[ "def", "outputMode", "(", "self", ",", "outputMode", ")", ":", "if", "not", "outputMode", "or", "type", "(", "outputMode", ")", "!=", "str", "or", "len", "(", "outputMode", ".", "strip", "(", ")", ")", "==", "0", ":", "raise", "ValueError", "(", "'The output mode must be a non-empty string. Got: %s'", "%", "outputMode", ")", "self", ".", "_jwrite", "=", "self", ".", "_jwrite", ".", "outputMode", "(", "outputMode", ")", "return", "self" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
DataStreamWriter.queryName
Specifies the name of the :class:`StreamingQuery` that can be started with :func:`start`. This name must be unique among all the currently active queries in the associated SparkSession. .. note:: Evolving. :param queryName: unique name for the query >>> writer = sdf.writeStream.queryName('streaming_query')
python/pyspark/sql/streaming.py
def queryName(self, queryName): """Specifies the name of the :class:`StreamingQuery` that can be started with :func:`start`. This name must be unique among all the currently active queries in the associated SparkSession. .. note:: Evolving. :param queryName: unique name for the query >>> writer = sdf.writeStream.queryName('streaming_query') """ if not queryName or type(queryName) != str or len(queryName.strip()) == 0: raise ValueError('The queryName must be a non-empty string. Got: %s' % queryName) self._jwrite = self._jwrite.queryName(queryName) return self
def queryName(self, queryName): """Specifies the name of the :class:`StreamingQuery` that can be started with :func:`start`. This name must be unique among all the currently active queries in the associated SparkSession. .. note:: Evolving. :param queryName: unique name for the query >>> writer = sdf.writeStream.queryName('streaming_query') """ if not queryName or type(queryName) != str or len(queryName.strip()) == 0: raise ValueError('The queryName must be a non-empty string. Got: %s' % queryName) self._jwrite = self._jwrite.queryName(queryName) return self
[ "Specifies", "the", "name", "of", "the", ":", "class", ":", "StreamingQuery", "that", "can", "be", "started", "with", ":", "func", ":", "start", ".", "This", "name", "must", "be", "unique", "among", "all", "the", "currently", "active", "queries", "in", "the", "associated", "SparkSession", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L811-L825
[ "def", "queryName", "(", "self", ",", "queryName", ")", ":", "if", "not", "queryName", "or", "type", "(", "queryName", ")", "!=", "str", "or", "len", "(", "queryName", ".", "strip", "(", ")", ")", "==", "0", ":", "raise", "ValueError", "(", "'The queryName must be a non-empty string. Got: %s'", "%", "queryName", ")", "self", ".", "_jwrite", "=", "self", ".", "_jwrite", ".", "queryName", "(", "queryName", ")", "return", "self" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
DataStreamWriter.trigger
Set the trigger for the stream query. If this is not set it will run the query as fast as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``. .. note:: Evolving. :param processingTime: a processing time interval as a string, e.g. '5 seconds', '1 minute'. Set a trigger that runs a query periodically based on the processing time. Only one trigger can be set. :param once: if set to True, set a trigger that processes only one batch of data in a streaming query then terminates the query. Only one trigger can be set. >>> # trigger the query for execution every 5 seconds >>> writer = sdf.writeStream.trigger(processingTime='5 seconds') >>> # trigger the query for just once batch of data >>> writer = sdf.writeStream.trigger(once=True) >>> # trigger the query for execution every 5 seconds >>> writer = sdf.writeStream.trigger(continuous='5 seconds')
python/pyspark/sql/streaming.py
def trigger(self, processingTime=None, once=None, continuous=None): """Set the trigger for the stream query. If this is not set it will run the query as fast as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``. .. note:: Evolving. :param processingTime: a processing time interval as a string, e.g. '5 seconds', '1 minute'. Set a trigger that runs a query periodically based on the processing time. Only one trigger can be set. :param once: if set to True, set a trigger that processes only one batch of data in a streaming query then terminates the query. Only one trigger can be set. >>> # trigger the query for execution every 5 seconds >>> writer = sdf.writeStream.trigger(processingTime='5 seconds') >>> # trigger the query for just once batch of data >>> writer = sdf.writeStream.trigger(once=True) >>> # trigger the query for execution every 5 seconds >>> writer = sdf.writeStream.trigger(continuous='5 seconds') """ params = [processingTime, once, continuous] if params.count(None) == 3: raise ValueError('No trigger provided') elif params.count(None) < 2: raise ValueError('Multiple triggers not allowed.') jTrigger = None if processingTime is not None: if type(processingTime) != str or len(processingTime.strip()) == 0: raise ValueError('Value for processingTime must be a non empty string. Got: %s' % processingTime) interval = processingTime.strip() jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.ProcessingTime( interval) elif once is not None: if once is not True: raise ValueError('Value for once must be True. Got: %s' % once) jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Once() else: if type(continuous) != str or len(continuous.strip()) == 0: raise ValueError('Value for continuous must be a non empty string. Got: %s' % continuous) interval = continuous.strip() jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Continuous( interval) self._jwrite = self._jwrite.trigger(jTrigger) return self
def trigger(self, processingTime=None, once=None, continuous=None): """Set the trigger for the stream query. If this is not set it will run the query as fast as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``. .. note:: Evolving. :param processingTime: a processing time interval as a string, e.g. '5 seconds', '1 minute'. Set a trigger that runs a query periodically based on the processing time. Only one trigger can be set. :param once: if set to True, set a trigger that processes only one batch of data in a streaming query then terminates the query. Only one trigger can be set. >>> # trigger the query for execution every 5 seconds >>> writer = sdf.writeStream.trigger(processingTime='5 seconds') >>> # trigger the query for just once batch of data >>> writer = sdf.writeStream.trigger(once=True) >>> # trigger the query for execution every 5 seconds >>> writer = sdf.writeStream.trigger(continuous='5 seconds') """ params = [processingTime, once, continuous] if params.count(None) == 3: raise ValueError('No trigger provided') elif params.count(None) < 2: raise ValueError('Multiple triggers not allowed.') jTrigger = None if processingTime is not None: if type(processingTime) != str or len(processingTime.strip()) == 0: raise ValueError('Value for processingTime must be a non empty string. Got: %s' % processingTime) interval = processingTime.strip() jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.ProcessingTime( interval) elif once is not None: if once is not True: raise ValueError('Value for once must be True. Got: %s' % once) jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Once() else: if type(continuous) != str or len(continuous.strip()) == 0: raise ValueError('Value for continuous must be a non empty string. Got: %s' % continuous) interval = continuous.strip() jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Continuous( interval) self._jwrite = self._jwrite.trigger(jTrigger) return self
[ "Set", "the", "trigger", "for", "the", "stream", "query", ".", "If", "this", "is", "not", "set", "it", "will", "run", "the", "query", "as", "fast", "as", "possible", "which", "is", "equivalent", "to", "setting", "the", "trigger", "to", "processingTime", "=", "0", "seconds", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L829-L878
[ "def", "trigger", "(", "self", ",", "processingTime", "=", "None", ",", "once", "=", "None", ",", "continuous", "=", "None", ")", ":", "params", "=", "[", "processingTime", ",", "once", ",", "continuous", "]", "if", "params", ".", "count", "(", "None", ")", "==", "3", ":", "raise", "ValueError", "(", "'No trigger provided'", ")", "elif", "params", ".", "count", "(", "None", ")", "<", "2", ":", "raise", "ValueError", "(", "'Multiple triggers not allowed.'", ")", "jTrigger", "=", "None", "if", "processingTime", "is", "not", "None", ":", "if", "type", "(", "processingTime", ")", "!=", "str", "or", "len", "(", "processingTime", ".", "strip", "(", ")", ")", "==", "0", ":", "raise", "ValueError", "(", "'Value for processingTime must be a non empty string. Got: %s'", "%", "processingTime", ")", "interval", "=", "processingTime", ".", "strip", "(", ")", "jTrigger", "=", "self", ".", "_spark", ".", "_sc", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "sql", ".", "streaming", ".", "Trigger", ".", "ProcessingTime", "(", "interval", ")", "elif", "once", "is", "not", "None", ":", "if", "once", "is", "not", "True", ":", "raise", "ValueError", "(", "'Value for once must be True. Got: %s'", "%", "once", ")", "jTrigger", "=", "self", ".", "_spark", ".", "_sc", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "sql", ".", "streaming", ".", "Trigger", ".", "Once", "(", ")", "else", ":", "if", "type", "(", "continuous", ")", "!=", "str", "or", "len", "(", "continuous", ".", "strip", "(", ")", ")", "==", "0", ":", "raise", "ValueError", "(", "'Value for continuous must be a non empty string. Got: %s'", "%", "continuous", ")", "interval", "=", "continuous", ".", "strip", "(", ")", "jTrigger", "=", "self", ".", "_spark", ".", "_sc", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "sql", ".", "streaming", ".", "Trigger", ".", "Continuous", "(", "interval", ")", "self", ".", "_jwrite", "=", "self", ".", "_jwrite", ".", "trigger", "(", "jTrigger", ")", "return", "self" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
DataStreamWriter.foreach
Sets the output of the streaming query to be processed using the provided writer ``f``. This is often used to write the output of a streaming query to arbitrary storage systems. The processing logic can be specified in two ways. #. A **function** that takes a row as input. This is a simple way to express your processing logic. Note that this does not allow you to deduplicate generated data when failures cause reprocessing of some input data. That would require you to specify the processing logic in the next way. #. An **object** with a ``process`` method and optional ``open`` and ``close`` methods. The object can have the following methods. * ``open(partition_id, epoch_id)``: *Optional* method that initializes the processing (for example, open a connection, start a transaction, etc). Additionally, you can use the `partition_id` and `epoch_id` to deduplicate regenerated data (discussed later). * ``process(row)``: *Non-optional* method that processes each :class:`Row`. * ``close(error)``: *Optional* method that finalizes and cleans up (for example, close connection, commit transaction, etc.) after all rows have been processed. The object will be used by Spark in the following way. * A single copy of this object is responsible of all the data generated by a single task in a query. In other words, one instance is responsible for processing one partition of the data generated in a distributed manner. * This object must be serializable because each task will get a fresh serialized-deserialized copy of the provided object. Hence, it is strongly recommended that any initialization for writing data (e.g. opening a connection or starting a transaction) is done after the `open(...)` method has been called, which signifies that the task is ready to generate data. * The lifecycle of the methods are as follows. For each partition with ``partition_id``: ... For each batch/epoch of streaming data with ``epoch_id``: ....... Method ``open(partitionId, epochId)`` is called. ....... If ``open(...)`` returns true, for each row in the partition and batch/epoch, method ``process(row)`` is called. ....... Method ``close(errorOrNull)`` is called with error (if any) seen while processing rows. Important points to note: * The `partitionId` and `epochId` can be used to deduplicate generated data when failures cause reprocessing of some input data. This depends on the execution mode of the query. If the streaming query is being executed in the micro-batch mode, then every partition represented by a unique tuple (partition_id, epoch_id) is guaranteed to have the same data. Hence, (partition_id, epoch_id) can be used to deduplicate and/or transactionally commit data and achieve exactly-once guarantees. However, if the streaming query is being executed in the continuous mode, then this guarantee does not hold and therefore should not be used for deduplication. * The ``close()`` method (if exists) will be called if `open()` method exists and returns successfully (irrespective of the return value), except if the Python crashes in the middle. .. note:: Evolving. >>> # Print every row using a function >>> def print_row(row): ... print(row) ... >>> writer = sdf.writeStream.foreach(print_row) >>> # Print every row using a object with process() method >>> class RowPrinter: ... def open(self, partition_id, epoch_id): ... print("Opened %d, %d" % (partition_id, epoch_id)) ... return True ... def process(self, row): ... print(row) ... def close(self, error): ... print("Closed with error: %s" % str(error)) ... >>> writer = sdf.writeStream.foreach(RowPrinter())
python/pyspark/sql/streaming.py
def foreach(self, f): """ Sets the output of the streaming query to be processed using the provided writer ``f``. This is often used to write the output of a streaming query to arbitrary storage systems. The processing logic can be specified in two ways. #. A **function** that takes a row as input. This is a simple way to express your processing logic. Note that this does not allow you to deduplicate generated data when failures cause reprocessing of some input data. That would require you to specify the processing logic in the next way. #. An **object** with a ``process`` method and optional ``open`` and ``close`` methods. The object can have the following methods. * ``open(partition_id, epoch_id)``: *Optional* method that initializes the processing (for example, open a connection, start a transaction, etc). Additionally, you can use the `partition_id` and `epoch_id` to deduplicate regenerated data (discussed later). * ``process(row)``: *Non-optional* method that processes each :class:`Row`. * ``close(error)``: *Optional* method that finalizes and cleans up (for example, close connection, commit transaction, etc.) after all rows have been processed. The object will be used by Spark in the following way. * A single copy of this object is responsible of all the data generated by a single task in a query. In other words, one instance is responsible for processing one partition of the data generated in a distributed manner. * This object must be serializable because each task will get a fresh serialized-deserialized copy of the provided object. Hence, it is strongly recommended that any initialization for writing data (e.g. opening a connection or starting a transaction) is done after the `open(...)` method has been called, which signifies that the task is ready to generate data. * The lifecycle of the methods are as follows. For each partition with ``partition_id``: ... For each batch/epoch of streaming data with ``epoch_id``: ....... Method ``open(partitionId, epochId)`` is called. ....... If ``open(...)`` returns true, for each row in the partition and batch/epoch, method ``process(row)`` is called. ....... Method ``close(errorOrNull)`` is called with error (if any) seen while processing rows. Important points to note: * The `partitionId` and `epochId` can be used to deduplicate generated data when failures cause reprocessing of some input data. This depends on the execution mode of the query. If the streaming query is being executed in the micro-batch mode, then every partition represented by a unique tuple (partition_id, epoch_id) is guaranteed to have the same data. Hence, (partition_id, epoch_id) can be used to deduplicate and/or transactionally commit data and achieve exactly-once guarantees. However, if the streaming query is being executed in the continuous mode, then this guarantee does not hold and therefore should not be used for deduplication. * The ``close()`` method (if exists) will be called if `open()` method exists and returns successfully (irrespective of the return value), except if the Python crashes in the middle. .. note:: Evolving. >>> # Print every row using a function >>> def print_row(row): ... print(row) ... >>> writer = sdf.writeStream.foreach(print_row) >>> # Print every row using a object with process() method >>> class RowPrinter: ... def open(self, partition_id, epoch_id): ... print("Opened %d, %d" % (partition_id, epoch_id)) ... return True ... def process(self, row): ... print(row) ... def close(self, error): ... print("Closed with error: %s" % str(error)) ... >>> writer = sdf.writeStream.foreach(RowPrinter()) """ from pyspark.rdd import _wrap_function from pyspark.serializers import PickleSerializer, AutoBatchedSerializer from pyspark.taskcontext import TaskContext if callable(f): # The provided object is a callable function that is supposed to be called on each row. # Construct a function that takes an iterator and calls the provided function on each # row. def func_without_process(_, iterator): for x in iterator: f(x) return iter([]) func = func_without_process else: # The provided object is not a callable function. Then it is expected to have a # 'process(row)' method, and optional 'open(partition_id, epoch_id)' and # 'close(error)' methods. if not hasattr(f, 'process'): raise Exception("Provided object does not have a 'process' method") if not callable(getattr(f, 'process')): raise Exception("Attribute 'process' in provided object is not callable") def doesMethodExist(method_name): exists = hasattr(f, method_name) if exists and not callable(getattr(f, method_name)): raise Exception( "Attribute '%s' in provided object is not callable" % method_name) return exists open_exists = doesMethodExist('open') close_exists = doesMethodExist('close') def func_with_open_process_close(partition_id, iterator): epoch_id = TaskContext.get().getLocalProperty('streaming.sql.batchId') if epoch_id: epoch_id = int(epoch_id) else: raise Exception("Could not get batch id from TaskContext") # Check if the data should be processed should_process = True if open_exists: should_process = f.open(partition_id, epoch_id) error = None try: if should_process: for x in iterator: f.process(x) except Exception as ex: error = ex finally: if close_exists: f.close(error) if error: raise error return iter([]) func = func_with_open_process_close serializer = AutoBatchedSerializer(PickleSerializer()) wrapped_func = _wrap_function(self._spark._sc, func, serializer, serializer) jForeachWriter = \ self._spark._sc._jvm.org.apache.spark.sql.execution.python.PythonForeachWriter( wrapped_func, self._df._jdf.schema()) self._jwrite.foreach(jForeachWriter) return self
def foreach(self, f): """ Sets the output of the streaming query to be processed using the provided writer ``f``. This is often used to write the output of a streaming query to arbitrary storage systems. The processing logic can be specified in two ways. #. A **function** that takes a row as input. This is a simple way to express your processing logic. Note that this does not allow you to deduplicate generated data when failures cause reprocessing of some input data. That would require you to specify the processing logic in the next way. #. An **object** with a ``process`` method and optional ``open`` and ``close`` methods. The object can have the following methods. * ``open(partition_id, epoch_id)``: *Optional* method that initializes the processing (for example, open a connection, start a transaction, etc). Additionally, you can use the `partition_id` and `epoch_id` to deduplicate regenerated data (discussed later). * ``process(row)``: *Non-optional* method that processes each :class:`Row`. * ``close(error)``: *Optional* method that finalizes and cleans up (for example, close connection, commit transaction, etc.) after all rows have been processed. The object will be used by Spark in the following way. * A single copy of this object is responsible of all the data generated by a single task in a query. In other words, one instance is responsible for processing one partition of the data generated in a distributed manner. * This object must be serializable because each task will get a fresh serialized-deserialized copy of the provided object. Hence, it is strongly recommended that any initialization for writing data (e.g. opening a connection or starting a transaction) is done after the `open(...)` method has been called, which signifies that the task is ready to generate data. * The lifecycle of the methods are as follows. For each partition with ``partition_id``: ... For each batch/epoch of streaming data with ``epoch_id``: ....... Method ``open(partitionId, epochId)`` is called. ....... If ``open(...)`` returns true, for each row in the partition and batch/epoch, method ``process(row)`` is called. ....... Method ``close(errorOrNull)`` is called with error (if any) seen while processing rows. Important points to note: * The `partitionId` and `epochId` can be used to deduplicate generated data when failures cause reprocessing of some input data. This depends on the execution mode of the query. If the streaming query is being executed in the micro-batch mode, then every partition represented by a unique tuple (partition_id, epoch_id) is guaranteed to have the same data. Hence, (partition_id, epoch_id) can be used to deduplicate and/or transactionally commit data and achieve exactly-once guarantees. However, if the streaming query is being executed in the continuous mode, then this guarantee does not hold and therefore should not be used for deduplication. * The ``close()`` method (if exists) will be called if `open()` method exists and returns successfully (irrespective of the return value), except if the Python crashes in the middle. .. note:: Evolving. >>> # Print every row using a function >>> def print_row(row): ... print(row) ... >>> writer = sdf.writeStream.foreach(print_row) >>> # Print every row using a object with process() method >>> class RowPrinter: ... def open(self, partition_id, epoch_id): ... print("Opened %d, %d" % (partition_id, epoch_id)) ... return True ... def process(self, row): ... print(row) ... def close(self, error): ... print("Closed with error: %s" % str(error)) ... >>> writer = sdf.writeStream.foreach(RowPrinter()) """ from pyspark.rdd import _wrap_function from pyspark.serializers import PickleSerializer, AutoBatchedSerializer from pyspark.taskcontext import TaskContext if callable(f): # The provided object is a callable function that is supposed to be called on each row. # Construct a function that takes an iterator and calls the provided function on each # row. def func_without_process(_, iterator): for x in iterator: f(x) return iter([]) func = func_without_process else: # The provided object is not a callable function. Then it is expected to have a # 'process(row)' method, and optional 'open(partition_id, epoch_id)' and # 'close(error)' methods. if not hasattr(f, 'process'): raise Exception("Provided object does not have a 'process' method") if not callable(getattr(f, 'process')): raise Exception("Attribute 'process' in provided object is not callable") def doesMethodExist(method_name): exists = hasattr(f, method_name) if exists and not callable(getattr(f, method_name)): raise Exception( "Attribute '%s' in provided object is not callable" % method_name) return exists open_exists = doesMethodExist('open') close_exists = doesMethodExist('close') def func_with_open_process_close(partition_id, iterator): epoch_id = TaskContext.get().getLocalProperty('streaming.sql.batchId') if epoch_id: epoch_id = int(epoch_id) else: raise Exception("Could not get batch id from TaskContext") # Check if the data should be processed should_process = True if open_exists: should_process = f.open(partition_id, epoch_id) error = None try: if should_process: for x in iterator: f.process(x) except Exception as ex: error = ex finally: if close_exists: f.close(error) if error: raise error return iter([]) func = func_with_open_process_close serializer = AutoBatchedSerializer(PickleSerializer()) wrapped_func = _wrap_function(self._spark._sc, func, serializer, serializer) jForeachWriter = \ self._spark._sc._jvm.org.apache.spark.sql.execution.python.PythonForeachWriter( wrapped_func, self._df._jdf.schema()) self._jwrite.foreach(jForeachWriter) return self
[ "Sets", "the", "output", "of", "the", "streaming", "query", "to", "be", "processed", "using", "the", "provided", "writer", "f", ".", "This", "is", "often", "used", "to", "write", "the", "output", "of", "a", "streaming", "query", "to", "arbitrary", "storage", "systems", ".", "The", "processing", "logic", "can", "be", "specified", "in", "two", "ways", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L881-L1040
[ "def", "foreach", "(", "self", ",", "f", ")", ":", "from", "pyspark", ".", "rdd", "import", "_wrap_function", "from", "pyspark", ".", "serializers", "import", "PickleSerializer", ",", "AutoBatchedSerializer", "from", "pyspark", ".", "taskcontext", "import", "TaskContext", "if", "callable", "(", "f", ")", ":", "# The provided object is a callable function that is supposed to be called on each row.", "# Construct a function that takes an iterator and calls the provided function on each", "# row.", "def", "func_without_process", "(", "_", ",", "iterator", ")", ":", "for", "x", "in", "iterator", ":", "f", "(", "x", ")", "return", "iter", "(", "[", "]", ")", "func", "=", "func_without_process", "else", ":", "# The provided object is not a callable function. Then it is expected to have a", "# 'process(row)' method, and optional 'open(partition_id, epoch_id)' and", "# 'close(error)' methods.", "if", "not", "hasattr", "(", "f", ",", "'process'", ")", ":", "raise", "Exception", "(", "\"Provided object does not have a 'process' method\"", ")", "if", "not", "callable", "(", "getattr", "(", "f", ",", "'process'", ")", ")", ":", "raise", "Exception", "(", "\"Attribute 'process' in provided object is not callable\"", ")", "def", "doesMethodExist", "(", "method_name", ")", ":", "exists", "=", "hasattr", "(", "f", ",", "method_name", ")", "if", "exists", "and", "not", "callable", "(", "getattr", "(", "f", ",", "method_name", ")", ")", ":", "raise", "Exception", "(", "\"Attribute '%s' in provided object is not callable\"", "%", "method_name", ")", "return", "exists", "open_exists", "=", "doesMethodExist", "(", "'open'", ")", "close_exists", "=", "doesMethodExist", "(", "'close'", ")", "def", "func_with_open_process_close", "(", "partition_id", ",", "iterator", ")", ":", "epoch_id", "=", "TaskContext", ".", "get", "(", ")", ".", "getLocalProperty", "(", "'streaming.sql.batchId'", ")", "if", "epoch_id", ":", "epoch_id", "=", "int", "(", "epoch_id", ")", "else", ":", "raise", "Exception", "(", "\"Could not get batch id from TaskContext\"", ")", "# Check if the data should be processed", "should_process", "=", "True", "if", "open_exists", ":", "should_process", "=", "f", ".", "open", "(", "partition_id", ",", "epoch_id", ")", "error", "=", "None", "try", ":", "if", "should_process", ":", "for", "x", "in", "iterator", ":", "f", ".", "process", "(", "x", ")", "except", "Exception", "as", "ex", ":", "error", "=", "ex", "finally", ":", "if", "close_exists", ":", "f", ".", "close", "(", "error", ")", "if", "error", ":", "raise", "error", "return", "iter", "(", "[", "]", ")", "func", "=", "func_with_open_process_close", "serializer", "=", "AutoBatchedSerializer", "(", "PickleSerializer", "(", ")", ")", "wrapped_func", "=", "_wrap_function", "(", "self", ".", "_spark", ".", "_sc", ",", "func", ",", "serializer", ",", "serializer", ")", "jForeachWriter", "=", "self", ".", "_spark", ".", "_sc", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "sql", ".", "execution", ".", "python", ".", "PythonForeachWriter", "(", "wrapped_func", ",", "self", ".", "_df", ".", "_jdf", ".", "schema", "(", ")", ")", "self", ".", "_jwrite", ".", "foreach", "(", "jForeachWriter", ")", "return", "self" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
DataStreamWriter.foreachBatch
Sets the output of the streaming query to be processed using the provided function. This is supported only the in the micro-batch execution modes (that is, when the trigger is not continuous). In every micro-batch, the provided function will be called in every micro-batch with (i) the output rows as a DataFrame and (ii) the batch identifier. The batchId can be used deduplicate and transactionally write the output (that is, the provided Dataset) to external systems. The output DataFrame is guaranteed to exactly same for the same batchId (assuming all operations are deterministic in the query). .. note:: Evolving. >>> def func(batch_df, batch_id): ... batch_df.collect() ... >>> writer = sdf.writeStream.foreach(func)
python/pyspark/sql/streaming.py
def foreachBatch(self, func): """ Sets the output of the streaming query to be processed using the provided function. This is supported only the in the micro-batch execution modes (that is, when the trigger is not continuous). In every micro-batch, the provided function will be called in every micro-batch with (i) the output rows as a DataFrame and (ii) the batch identifier. The batchId can be used deduplicate and transactionally write the output (that is, the provided Dataset) to external systems. The output DataFrame is guaranteed to exactly same for the same batchId (assuming all operations are deterministic in the query). .. note:: Evolving. >>> def func(batch_df, batch_id): ... batch_df.collect() ... >>> writer = sdf.writeStream.foreach(func) """ from pyspark.java_gateway import ensure_callback_server_started gw = self._spark._sc._gateway java_import(gw.jvm, "org.apache.spark.sql.execution.streaming.sources.*") wrapped_func = ForeachBatchFunction(self._spark, func) gw.jvm.PythonForeachBatchHelper.callForeachBatch(self._jwrite, wrapped_func) ensure_callback_server_started(gw) return self
def foreachBatch(self, func): """ Sets the output of the streaming query to be processed using the provided function. This is supported only the in the micro-batch execution modes (that is, when the trigger is not continuous). In every micro-batch, the provided function will be called in every micro-batch with (i) the output rows as a DataFrame and (ii) the batch identifier. The batchId can be used deduplicate and transactionally write the output (that is, the provided Dataset) to external systems. The output DataFrame is guaranteed to exactly same for the same batchId (assuming all operations are deterministic in the query). .. note:: Evolving. >>> def func(batch_df, batch_id): ... batch_df.collect() ... >>> writer = sdf.writeStream.foreach(func) """ from pyspark.java_gateway import ensure_callback_server_started gw = self._spark._sc._gateway java_import(gw.jvm, "org.apache.spark.sql.execution.streaming.sources.*") wrapped_func = ForeachBatchFunction(self._spark, func) gw.jvm.PythonForeachBatchHelper.callForeachBatch(self._jwrite, wrapped_func) ensure_callback_server_started(gw) return self
[ "Sets", "the", "output", "of", "the", "streaming", "query", "to", "be", "processed", "using", "the", "provided", "function", ".", "This", "is", "supported", "only", "the", "in", "the", "micro", "-", "batch", "execution", "modes", "(", "that", "is", "when", "the", "trigger", "is", "not", "continuous", ")", ".", "In", "every", "micro", "-", "batch", "the", "provided", "function", "will", "be", "called", "in", "every", "micro", "-", "batch", "with", "(", "i", ")", "the", "output", "rows", "as", "a", "DataFrame", "and", "(", "ii", ")", "the", "batch", "identifier", ".", "The", "batchId", "can", "be", "used", "deduplicate", "and", "transactionally", "write", "the", "output", "(", "that", "is", "the", "provided", "Dataset", ")", "to", "external", "systems", ".", "The", "output", "DataFrame", "is", "guaranteed", "to", "exactly", "same", "for", "the", "same", "batchId", "(", "assuming", "all", "operations", "are", "deterministic", "in", "the", "query", ")", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L1043-L1069
[ "def", "foreachBatch", "(", "self", ",", "func", ")", ":", "from", "pyspark", ".", "java_gateway", "import", "ensure_callback_server_started", "gw", "=", "self", ".", "_spark", ".", "_sc", ".", "_gateway", "java_import", "(", "gw", ".", "jvm", ",", "\"org.apache.spark.sql.execution.streaming.sources.*\"", ")", "wrapped_func", "=", "ForeachBatchFunction", "(", "self", ".", "_spark", ",", "func", ")", "gw", ".", "jvm", ".", "PythonForeachBatchHelper", ".", "callForeachBatch", "(", "self", ".", "_jwrite", ",", "wrapped_func", ")", "ensure_callback_server_started", "(", "gw", ")", "return", "self" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
DataStreamWriter.start
Streams the contents of the :class:`DataFrame` to a data source. The data source is specified by the ``format`` and a set of ``options``. If ``format`` is not specified, the default data source configured by ``spark.sql.sources.default`` will be used. .. note:: Evolving. :param path: the path in a Hadoop supported file system :param format: the format used to save :param outputMode: specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. :param partitionBy: names of partitioning columns :param queryName: unique name for the query :param options: All other string options. You may want to provide a `checkpointLocation` for most streams, however it is not required for a `memory` stream. >>> sq = sdf.writeStream.format('memory').queryName('this_query').start() >>> sq.isActive True >>> sq.name u'this_query' >>> sq.stop() >>> sq.isActive False >>> sq = sdf.writeStream.trigger(processingTime='5 seconds').start( ... queryName='that_query', outputMode="append", format='memory') >>> sq.name u'that_query' >>> sq.isActive True >>> sq.stop()
python/pyspark/sql/streaming.py
def start(self, path=None, format=None, outputMode=None, partitionBy=None, queryName=None, **options): """Streams the contents of the :class:`DataFrame` to a data source. The data source is specified by the ``format`` and a set of ``options``. If ``format`` is not specified, the default data source configured by ``spark.sql.sources.default`` will be used. .. note:: Evolving. :param path: the path in a Hadoop supported file system :param format: the format used to save :param outputMode: specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. :param partitionBy: names of partitioning columns :param queryName: unique name for the query :param options: All other string options. You may want to provide a `checkpointLocation` for most streams, however it is not required for a `memory` stream. >>> sq = sdf.writeStream.format('memory').queryName('this_query').start() >>> sq.isActive True >>> sq.name u'this_query' >>> sq.stop() >>> sq.isActive False >>> sq = sdf.writeStream.trigger(processingTime='5 seconds').start( ... queryName='that_query', outputMode="append", format='memory') >>> sq.name u'that_query' >>> sq.isActive True >>> sq.stop() """ self.options(**options) if outputMode is not None: self.outputMode(outputMode) if partitionBy is not None: self.partitionBy(partitionBy) if format is not None: self.format(format) if queryName is not None: self.queryName(queryName) if path is None: return self._sq(self._jwrite.start()) else: return self._sq(self._jwrite.start(path))
def start(self, path=None, format=None, outputMode=None, partitionBy=None, queryName=None, **options): """Streams the contents of the :class:`DataFrame` to a data source. The data source is specified by the ``format`` and a set of ``options``. If ``format`` is not specified, the default data source configured by ``spark.sql.sources.default`` will be used. .. note:: Evolving. :param path: the path in a Hadoop supported file system :param format: the format used to save :param outputMode: specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. :param partitionBy: names of partitioning columns :param queryName: unique name for the query :param options: All other string options. You may want to provide a `checkpointLocation` for most streams, however it is not required for a `memory` stream. >>> sq = sdf.writeStream.format('memory').queryName('this_query').start() >>> sq.isActive True >>> sq.name u'this_query' >>> sq.stop() >>> sq.isActive False >>> sq = sdf.writeStream.trigger(processingTime='5 seconds').start( ... queryName='that_query', outputMode="append", format='memory') >>> sq.name u'that_query' >>> sq.isActive True >>> sq.stop() """ self.options(**options) if outputMode is not None: self.outputMode(outputMode) if partitionBy is not None: self.partitionBy(partitionBy) if format is not None: self.format(format) if queryName is not None: self.queryName(queryName) if path is None: return self._sq(self._jwrite.start()) else: return self._sq(self._jwrite.start(path))
[ "Streams", "the", "contents", "of", "the", ":", "class", ":", "DataFrame", "to", "a", "data", "source", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L1073-L1128
[ "def", "start", "(", "self", ",", "path", "=", "None", ",", "format", "=", "None", ",", "outputMode", "=", "None", ",", "partitionBy", "=", "None", ",", "queryName", "=", "None", ",", "*", "*", "options", ")", ":", "self", ".", "options", "(", "*", "*", "options", ")", "if", "outputMode", "is", "not", "None", ":", "self", ".", "outputMode", "(", "outputMode", ")", "if", "partitionBy", "is", "not", "None", ":", "self", ".", "partitionBy", "(", "partitionBy", ")", "if", "format", "is", "not", "None", ":", "self", ".", "format", "(", "format", ")", "if", "queryName", "is", "not", "None", ":", "self", ".", "queryName", "(", "queryName", ")", "if", "path", "is", "None", ":", "return", "self", ".", "_sq", "(", "self", ".", "_jwrite", ".", "start", "(", ")", ")", "else", ":", "return", "self", ".", "_sq", "(", "self", ".", "_jwrite", ".", "start", "(", "path", ")", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
_make_cell_set_template_code
Get the Python compiler to emit LOAD_FAST(arg); STORE_DEREF Notes ----- In Python 3, we could use an easier function: .. code-block:: python def f(): cell = None def _stub(value): nonlocal cell cell = value return _stub _cell_set_template_code = f().__code__ This function is _only_ a LOAD_FAST(arg); STORE_DEREF, but that is invalid syntax on Python 2. If we use this function we also don't need to do the weird freevars/cellvars swap below
python/pyspark/cloudpickle.py
def _make_cell_set_template_code(): """Get the Python compiler to emit LOAD_FAST(arg); STORE_DEREF Notes ----- In Python 3, we could use an easier function: .. code-block:: python def f(): cell = None def _stub(value): nonlocal cell cell = value return _stub _cell_set_template_code = f().__code__ This function is _only_ a LOAD_FAST(arg); STORE_DEREF, but that is invalid syntax on Python 2. If we use this function we also don't need to do the weird freevars/cellvars swap below """ def inner(value): lambda: cell # make ``cell`` a closure so that we get a STORE_DEREF cell = value co = inner.__code__ # NOTE: we are marking the cell variable as a free variable intentionally # so that we simulate an inner function instead of the outer function. This # is what gives us the ``nonlocal`` behavior in a Python 2 compatible way. if not PY3: # pragma: no branch return types.CodeType( co.co_argcount, co.co_nlocals, co.co_stacksize, co.co_flags, co.co_code, co.co_consts, co.co_names, co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno, co.co_lnotab, co.co_cellvars, # this is the trickery (), ) else: return types.CodeType( co.co_argcount, co.co_kwonlyargcount, co.co_nlocals, co.co_stacksize, co.co_flags, co.co_code, co.co_consts, co.co_names, co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno, co.co_lnotab, co.co_cellvars, # this is the trickery (), )
def _make_cell_set_template_code(): """Get the Python compiler to emit LOAD_FAST(arg); STORE_DEREF Notes ----- In Python 3, we could use an easier function: .. code-block:: python def f(): cell = None def _stub(value): nonlocal cell cell = value return _stub _cell_set_template_code = f().__code__ This function is _only_ a LOAD_FAST(arg); STORE_DEREF, but that is invalid syntax on Python 2. If we use this function we also don't need to do the weird freevars/cellvars swap below """ def inner(value): lambda: cell # make ``cell`` a closure so that we get a STORE_DEREF cell = value co = inner.__code__ # NOTE: we are marking the cell variable as a free variable intentionally # so that we simulate an inner function instead of the outer function. This # is what gives us the ``nonlocal`` behavior in a Python 2 compatible way. if not PY3: # pragma: no branch return types.CodeType( co.co_argcount, co.co_nlocals, co.co_stacksize, co.co_flags, co.co_code, co.co_consts, co.co_names, co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno, co.co_lnotab, co.co_cellvars, # this is the trickery (), ) else: return types.CodeType( co.co_argcount, co.co_kwonlyargcount, co.co_nlocals, co.co_stacksize, co.co_flags, co.co_code, co.co_consts, co.co_names, co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno, co.co_lnotab, co.co_cellvars, # this is the trickery (), )
[ "Get", "the", "Python", "compiler", "to", "emit", "LOAD_FAST", "(", "arg", ")", ";", "STORE_DEREF" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L82-L149
[ "def", "_make_cell_set_template_code", "(", ")", ":", "def", "inner", "(", "value", ")", ":", "lambda", ":", "cell", "# make ``cell`` a closure so that we get a STORE_DEREF", "cell", "=", "value", "co", "=", "inner", ".", "__code__", "# NOTE: we are marking the cell variable as a free variable intentionally", "# so that we simulate an inner function instead of the outer function. This", "# is what gives us the ``nonlocal`` behavior in a Python 2 compatible way.", "if", "not", "PY3", ":", "# pragma: no branch", "return", "types", ".", "CodeType", "(", "co", ".", "co_argcount", ",", "co", ".", "co_nlocals", ",", "co", ".", "co_stacksize", ",", "co", ".", "co_flags", ",", "co", ".", "co_code", ",", "co", ".", "co_consts", ",", "co", ".", "co_names", ",", "co", ".", "co_varnames", ",", "co", ".", "co_filename", ",", "co", ".", "co_name", ",", "co", ".", "co_firstlineno", ",", "co", ".", "co_lnotab", ",", "co", ".", "co_cellvars", ",", "# this is the trickery", "(", ")", ",", ")", "else", ":", "return", "types", ".", "CodeType", "(", "co", ".", "co_argcount", ",", "co", ".", "co_kwonlyargcount", ",", "co", ".", "co_nlocals", ",", "co", ".", "co_stacksize", ",", "co", ".", "co_flags", ",", "co", ".", "co_code", ",", "co", ".", "co_consts", ",", "co", ".", "co_names", ",", "co", ".", "co_varnames", ",", "co", ".", "co_filename", ",", "co", ".", "co_name", ",", "co", ".", "co_firstlineno", ",", "co", ".", "co_lnotab", ",", "co", ".", "co_cellvars", ",", "# this is the trickery", "(", ")", ",", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
is_tornado_coroutine
Return whether *func* is a Tornado coroutine function. Running coroutines are not supported.
python/pyspark/cloudpickle.py
def is_tornado_coroutine(func): """ Return whether *func* is a Tornado coroutine function. Running coroutines are not supported. """ if 'tornado.gen' not in sys.modules: return False gen = sys.modules['tornado.gen'] if not hasattr(gen, "is_coroutine_function"): # Tornado version is too old return False return gen.is_coroutine_function(func)
def is_tornado_coroutine(func): """ Return whether *func* is a Tornado coroutine function. Running coroutines are not supported. """ if 'tornado.gen' not in sys.modules: return False gen = sys.modules['tornado.gen'] if not hasattr(gen, "is_coroutine_function"): # Tornado version is too old return False return gen.is_coroutine_function(func)
[ "Return", "whether", "*", "func", "*", "is", "a", "Tornado", "coroutine", "function", ".", "Running", "coroutines", "are", "not", "supported", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L905-L916
[ "def", "is_tornado_coroutine", "(", "func", ")", ":", "if", "'tornado.gen'", "not", "in", "sys", ".", "modules", ":", "return", "False", "gen", "=", "sys", ".", "modules", "[", "'tornado.gen'", "]", "if", "not", "hasattr", "(", "gen", ",", "\"is_coroutine_function\"", ")", ":", "# Tornado version is too old", "return", "False", "return", "gen", ".", "is_coroutine_function", "(", "func", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
dump
Serialize obj as bytes streamed into file protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python.
python/pyspark/cloudpickle.py
def dump(obj, file, protocol=None): """Serialize obj as bytes streamed into file protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python. """ CloudPickler(file, protocol=protocol).dump(obj)
def dump(obj, file, protocol=None): """Serialize obj as bytes streamed into file protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python. """ CloudPickler(file, protocol=protocol).dump(obj)
[ "Serialize", "obj", "as", "bytes", "streamed", "into", "file" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L926-L936
[ "def", "dump", "(", "obj", ",", "file", ",", "protocol", "=", "None", ")", ":", "CloudPickler", "(", "file", ",", "protocol", "=", "protocol", ")", ".", "dump", "(", "obj", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
dumps
Serialize obj as a string of bytes allocated in memory protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python.
python/pyspark/cloudpickle.py
def dumps(obj, protocol=None): """Serialize obj as a string of bytes allocated in memory protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python. """ file = StringIO() try: cp = CloudPickler(file, protocol=protocol) cp.dump(obj) return file.getvalue() finally: file.close()
def dumps(obj, protocol=None): """Serialize obj as a string of bytes allocated in memory protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python. """ file = StringIO() try: cp = CloudPickler(file, protocol=protocol) cp.dump(obj) return file.getvalue() finally: file.close()
[ "Serialize", "obj", "as", "a", "string", "of", "bytes", "allocated", "in", "memory" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L939-L955
[ "def", "dumps", "(", "obj", ",", "protocol", "=", "None", ")", ":", "file", "=", "StringIO", "(", ")", "try", ":", "cp", "=", "CloudPickler", "(", "file", ",", "protocol", "=", "protocol", ")", "cp", ".", "dump", "(", "obj", ")", "return", "file", ".", "getvalue", "(", ")", "finally", ":", "file", ".", "close", "(", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
_fill_function
Fills in the rest of function data into the skeleton function object The skeleton itself is create by _make_skel_func().
python/pyspark/cloudpickle.py
def _fill_function(*args): """Fills in the rest of function data into the skeleton function object The skeleton itself is create by _make_skel_func(). """ if len(args) == 2: func = args[0] state = args[1] elif len(args) == 5: # Backwards compat for cloudpickle v0.4.0, after which the `module` # argument was introduced func = args[0] keys = ['globals', 'defaults', 'dict', 'closure_values'] state = dict(zip(keys, args[1:])) elif len(args) == 6: # Backwards compat for cloudpickle v0.4.1, after which the function # state was passed as a dict to the _fill_function it-self. func = args[0] keys = ['globals', 'defaults', 'dict', 'module', 'closure_values'] state = dict(zip(keys, args[1:])) else: raise ValueError('Unexpected _fill_value arguments: %r' % (args,)) # - At pickling time, any dynamic global variable used by func is # serialized by value (in state['globals']). # - At unpickling time, func's __globals__ attribute is initialized by # first retrieving an empty isolated namespace that will be shared # with other functions pickled from the same original module # by the same CloudPickler instance and then updated with the # content of state['globals'] to populate the shared isolated # namespace with all the global variables that are specifically # referenced for this function. func.__globals__.update(state['globals']) func.__defaults__ = state['defaults'] func.__dict__ = state['dict'] if 'annotations' in state: func.__annotations__ = state['annotations'] if 'doc' in state: func.__doc__ = state['doc'] if 'name' in state: func.__name__ = state['name'] if 'module' in state: func.__module__ = state['module'] if 'qualname' in state: func.__qualname__ = state['qualname'] cells = func.__closure__ if cells is not None: for cell, value in zip(cells, state['closure_values']): if value is not _empty_cell_value: cell_set(cell, value) return func
def _fill_function(*args): """Fills in the rest of function data into the skeleton function object The skeleton itself is create by _make_skel_func(). """ if len(args) == 2: func = args[0] state = args[1] elif len(args) == 5: # Backwards compat for cloudpickle v0.4.0, after which the `module` # argument was introduced func = args[0] keys = ['globals', 'defaults', 'dict', 'closure_values'] state = dict(zip(keys, args[1:])) elif len(args) == 6: # Backwards compat for cloudpickle v0.4.1, after which the function # state was passed as a dict to the _fill_function it-self. func = args[0] keys = ['globals', 'defaults', 'dict', 'module', 'closure_values'] state = dict(zip(keys, args[1:])) else: raise ValueError('Unexpected _fill_value arguments: %r' % (args,)) # - At pickling time, any dynamic global variable used by func is # serialized by value (in state['globals']). # - At unpickling time, func's __globals__ attribute is initialized by # first retrieving an empty isolated namespace that will be shared # with other functions pickled from the same original module # by the same CloudPickler instance and then updated with the # content of state['globals'] to populate the shared isolated # namespace with all the global variables that are specifically # referenced for this function. func.__globals__.update(state['globals']) func.__defaults__ = state['defaults'] func.__dict__ = state['dict'] if 'annotations' in state: func.__annotations__ = state['annotations'] if 'doc' in state: func.__doc__ = state['doc'] if 'name' in state: func.__name__ = state['name'] if 'module' in state: func.__module__ = state['module'] if 'qualname' in state: func.__qualname__ = state['qualname'] cells = func.__closure__ if cells is not None: for cell, value in zip(cells, state['closure_values']): if value is not _empty_cell_value: cell_set(cell, value) return func
[ "Fills", "in", "the", "rest", "of", "function", "data", "into", "the", "skeleton", "function", "object" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L1060-L1113
[ "def", "_fill_function", "(", "*", "args", ")", ":", "if", "len", "(", "args", ")", "==", "2", ":", "func", "=", "args", "[", "0", "]", "state", "=", "args", "[", "1", "]", "elif", "len", "(", "args", ")", "==", "5", ":", "# Backwards compat for cloudpickle v0.4.0, after which the `module`", "# argument was introduced", "func", "=", "args", "[", "0", "]", "keys", "=", "[", "'globals'", ",", "'defaults'", ",", "'dict'", ",", "'closure_values'", "]", "state", "=", "dict", "(", "zip", "(", "keys", ",", "args", "[", "1", ":", "]", ")", ")", "elif", "len", "(", "args", ")", "==", "6", ":", "# Backwards compat for cloudpickle v0.4.1, after which the function", "# state was passed as a dict to the _fill_function it-self.", "func", "=", "args", "[", "0", "]", "keys", "=", "[", "'globals'", ",", "'defaults'", ",", "'dict'", ",", "'module'", ",", "'closure_values'", "]", "state", "=", "dict", "(", "zip", "(", "keys", ",", "args", "[", "1", ":", "]", ")", ")", "else", ":", "raise", "ValueError", "(", "'Unexpected _fill_value arguments: %r'", "%", "(", "args", ",", ")", ")", "# - At pickling time, any dynamic global variable used by func is", "# serialized by value (in state['globals']).", "# - At unpickling time, func's __globals__ attribute is initialized by", "# first retrieving an empty isolated namespace that will be shared", "# with other functions pickled from the same original module", "# by the same CloudPickler instance and then updated with the", "# content of state['globals'] to populate the shared isolated", "# namespace with all the global variables that are specifically", "# referenced for this function.", "func", ".", "__globals__", ".", "update", "(", "state", "[", "'globals'", "]", ")", "func", ".", "__defaults__", "=", "state", "[", "'defaults'", "]", "func", ".", "__dict__", "=", "state", "[", "'dict'", "]", "if", "'annotations'", "in", "state", ":", "func", ".", "__annotations__", "=", "state", "[", "'annotations'", "]", "if", "'doc'", "in", "state", ":", "func", ".", "__doc__", "=", "state", "[", "'doc'", "]", "if", "'name'", "in", "state", ":", "func", ".", "__name__", "=", "state", "[", "'name'", "]", "if", "'module'", "in", "state", ":", "func", ".", "__module__", "=", "state", "[", "'module'", "]", "if", "'qualname'", "in", "state", ":", "func", ".", "__qualname__", "=", "state", "[", "'qualname'", "]", "cells", "=", "func", ".", "__closure__", "if", "cells", "is", "not", "None", ":", "for", "cell", ",", "value", "in", "zip", "(", "cells", ",", "state", "[", "'closure_values'", "]", ")", ":", "if", "value", "is", "not", "_empty_cell_value", ":", "cell_set", "(", "cell", ",", "value", ")", "return", "func" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
_rehydrate_skeleton_class
Put attributes from `class_dict` back on `skeleton_class`. See CloudPickler.save_dynamic_class for more info.
python/pyspark/cloudpickle.py
def _rehydrate_skeleton_class(skeleton_class, class_dict): """Put attributes from `class_dict` back on `skeleton_class`. See CloudPickler.save_dynamic_class for more info. """ registry = None for attrname, attr in class_dict.items(): if attrname == "_abc_impl": registry = attr else: setattr(skeleton_class, attrname, attr) if registry is not None: for subclass in registry: skeleton_class.register(subclass) return skeleton_class
def _rehydrate_skeleton_class(skeleton_class, class_dict): """Put attributes from `class_dict` back on `skeleton_class`. See CloudPickler.save_dynamic_class for more info. """ registry = None for attrname, attr in class_dict.items(): if attrname == "_abc_impl": registry = attr else: setattr(skeleton_class, attrname, attr) if registry is not None: for subclass in registry: skeleton_class.register(subclass) return skeleton_class
[ "Put", "attributes", "from", "class_dict", "back", "on", "skeleton_class", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L1146-L1161
[ "def", "_rehydrate_skeleton_class", "(", "skeleton_class", ",", "class_dict", ")", ":", "registry", "=", "None", "for", "attrname", ",", "attr", "in", "class_dict", ".", "items", "(", ")", ":", "if", "attrname", "==", "\"_abc_impl\"", ":", "registry", "=", "attr", "else", ":", "setattr", "(", "skeleton_class", ",", "attrname", ",", "attr", ")", "if", "registry", "is", "not", "None", ":", "for", "subclass", "in", "registry", ":", "skeleton_class", ".", "register", "(", "subclass", ")", "return", "skeleton_class" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
_is_dynamic
Return True if the module is special module that cannot be imported by its name.
python/pyspark/cloudpickle.py
def _is_dynamic(module): """ Return True if the module is special module that cannot be imported by its name. """ # Quick check: module that have __file__ attribute are not dynamic modules. if hasattr(module, '__file__'): return False if hasattr(module, '__spec__'): return module.__spec__ is None else: # Backward compat for Python 2 import imp try: path = None for part in module.__name__.split('.'): if path is not None: path = [path] f, path, description = imp.find_module(part, path) if f is not None: f.close() except ImportError: return True return False
def _is_dynamic(module): """ Return True if the module is special module that cannot be imported by its name. """ # Quick check: module that have __file__ attribute are not dynamic modules. if hasattr(module, '__file__'): return False if hasattr(module, '__spec__'): return module.__spec__ is None else: # Backward compat for Python 2 import imp try: path = None for part in module.__name__.split('.'): if path is not None: path = [path] f, path, description = imp.find_module(part, path) if f is not None: f.close() except ImportError: return True return False
[ "Return", "True", "if", "the", "module", "is", "special", "module", "that", "cannot", "be", "imported", "by", "its", "name", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L1164-L1188
[ "def", "_is_dynamic", "(", "module", ")", ":", "# Quick check: module that have __file__ attribute are not dynamic modules.", "if", "hasattr", "(", "module", ",", "'__file__'", ")", ":", "return", "False", "if", "hasattr", "(", "module", ",", "'__spec__'", ")", ":", "return", "module", ".", "__spec__", "is", "None", "else", ":", "# Backward compat for Python 2", "import", "imp", "try", ":", "path", "=", "None", "for", "part", "in", "module", ".", "__name__", ".", "split", "(", "'.'", ")", ":", "if", "path", "is", "not", "None", ":", "path", "=", "[", "path", "]", "f", ",", "path", ",", "description", "=", "imp", ".", "find_module", "(", "part", ",", "path", ")", "if", "f", "is", "not", "None", ":", "f", ".", "close", "(", ")", "except", "ImportError", ":", "return", "True", "return", "False" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
CloudPickler.save_codeobject
Save a code object
python/pyspark/cloudpickle.py
def save_codeobject(self, obj): """ Save a code object """ if PY3: # pragma: no branch args = ( obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, obj.co_filename, obj.co_name, obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars ) else: args = ( obj.co_argcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, obj.co_filename, obj.co_name, obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars ) self.save_reduce(types.CodeType, args, obj=obj)
def save_codeobject(self, obj): """ Save a code object """ if PY3: # pragma: no branch args = ( obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, obj.co_filename, obj.co_name, obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars ) else: args = ( obj.co_argcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, obj.co_filename, obj.co_name, obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars ) self.save_reduce(types.CodeType, args, obj=obj)
[ "Save", "a", "code", "object" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L298-L315
[ "def", "save_codeobject", "(", "self", ",", "obj", ")", ":", "if", "PY3", ":", "# pragma: no branch", "args", "=", "(", "obj", ".", "co_argcount", ",", "obj", ".", "co_kwonlyargcount", ",", "obj", ".", "co_nlocals", ",", "obj", ".", "co_stacksize", ",", "obj", ".", "co_flags", ",", "obj", ".", "co_code", ",", "obj", ".", "co_consts", ",", "obj", ".", "co_names", ",", "obj", ".", "co_varnames", ",", "obj", ".", "co_filename", ",", "obj", ".", "co_name", ",", "obj", ".", "co_firstlineno", ",", "obj", ".", "co_lnotab", ",", "obj", ".", "co_freevars", ",", "obj", ".", "co_cellvars", ")", "else", ":", "args", "=", "(", "obj", ".", "co_argcount", ",", "obj", ".", "co_nlocals", ",", "obj", ".", "co_stacksize", ",", "obj", ".", "co_flags", ",", "obj", ".", "co_code", ",", "obj", ".", "co_consts", ",", "obj", ".", "co_names", ",", "obj", ".", "co_varnames", ",", "obj", ".", "co_filename", ",", "obj", ".", "co_name", ",", "obj", ".", "co_firstlineno", ",", "obj", ".", "co_lnotab", ",", "obj", ".", "co_freevars", ",", "obj", ".", "co_cellvars", ")", "self", ".", "save_reduce", "(", "types", ".", "CodeType", ",", "args", ",", "obj", "=", "obj", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
CloudPickler.save_function
Registered with the dispatch to handle all function types. Determines what kind of function obj is (e.g. lambda, defined at interactive prompt, etc) and handles the pickling appropriately.
python/pyspark/cloudpickle.py
def save_function(self, obj, name=None): """ Registered with the dispatch to handle all function types. Determines what kind of function obj is (e.g. lambda, defined at interactive prompt, etc) and handles the pickling appropriately. """ try: should_special_case = obj in _BUILTIN_TYPE_CONSTRUCTORS except TypeError: # Methods of builtin types aren't hashable in python 2. should_special_case = False if should_special_case: # We keep a special-cased cache of built-in type constructors at # global scope, because these functions are structured very # differently in different python versions and implementations (for # example, they're instances of types.BuiltinFunctionType in # CPython, but they're ordinary types.FunctionType instances in # PyPy). # # If the function we've received is in that cache, we just # serialize it as a lookup into the cache. return self.save_reduce(_BUILTIN_TYPE_CONSTRUCTORS[obj], (), obj=obj) write = self.write if name is None: name = obj.__name__ try: # whichmodule() could fail, see # https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling modname = pickle.whichmodule(obj, name) except Exception: modname = None # print('which gives %s %s %s' % (modname, obj, name)) try: themodule = sys.modules[modname] except KeyError: # eval'd items such as namedtuple give invalid items for their function __module__ modname = '__main__' if modname == '__main__': themodule = None try: lookedup_by_name = getattr(themodule, name, None) except Exception: lookedup_by_name = None if themodule: if lookedup_by_name is obj: return self.save_global(obj, name) # a builtin_function_or_method which comes in as an attribute of some # object (e.g., itertools.chain.from_iterable) will end # up with modname "__main__" and so end up here. But these functions # have no __code__ attribute in CPython, so the handling for # user-defined functions below will fail. # So we pickle them here using save_reduce; have to do it differently # for different python versions. if not hasattr(obj, '__code__'): if PY3: # pragma: no branch rv = obj.__reduce_ex__(self.proto) else: if hasattr(obj, '__self__'): rv = (getattr, (obj.__self__, name)) else: raise pickle.PicklingError("Can't pickle %r" % obj) return self.save_reduce(obj=obj, *rv) # if func is lambda, def'ed at prompt, is in main, or is nested, then # we'll pickle the actual function object rather than simply saving a # reference (as is done in default pickler), via save_function_tuple. if (islambda(obj) or getattr(obj.__code__, 'co_filename', None) == '<stdin>' or themodule is None): self.save_function_tuple(obj) return else: # func is nested if lookedup_by_name is None or lookedup_by_name is not obj: self.save_function_tuple(obj) return if obj.__dict__: # essentially save_reduce, but workaround needed to avoid recursion self.save(_restore_attr) write(pickle.MARK + pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj) self.save(obj.__dict__) write(pickle.TUPLE + pickle.REDUCE) else: write(pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj)
def save_function(self, obj, name=None): """ Registered with the dispatch to handle all function types. Determines what kind of function obj is (e.g. lambda, defined at interactive prompt, etc) and handles the pickling appropriately. """ try: should_special_case = obj in _BUILTIN_TYPE_CONSTRUCTORS except TypeError: # Methods of builtin types aren't hashable in python 2. should_special_case = False if should_special_case: # We keep a special-cased cache of built-in type constructors at # global scope, because these functions are structured very # differently in different python versions and implementations (for # example, they're instances of types.BuiltinFunctionType in # CPython, but they're ordinary types.FunctionType instances in # PyPy). # # If the function we've received is in that cache, we just # serialize it as a lookup into the cache. return self.save_reduce(_BUILTIN_TYPE_CONSTRUCTORS[obj], (), obj=obj) write = self.write if name is None: name = obj.__name__ try: # whichmodule() could fail, see # https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling modname = pickle.whichmodule(obj, name) except Exception: modname = None # print('which gives %s %s %s' % (modname, obj, name)) try: themodule = sys.modules[modname] except KeyError: # eval'd items such as namedtuple give invalid items for their function __module__ modname = '__main__' if modname == '__main__': themodule = None try: lookedup_by_name = getattr(themodule, name, None) except Exception: lookedup_by_name = None if themodule: if lookedup_by_name is obj: return self.save_global(obj, name) # a builtin_function_or_method which comes in as an attribute of some # object (e.g., itertools.chain.from_iterable) will end # up with modname "__main__" and so end up here. But these functions # have no __code__ attribute in CPython, so the handling for # user-defined functions below will fail. # So we pickle them here using save_reduce; have to do it differently # for different python versions. if not hasattr(obj, '__code__'): if PY3: # pragma: no branch rv = obj.__reduce_ex__(self.proto) else: if hasattr(obj, '__self__'): rv = (getattr, (obj.__self__, name)) else: raise pickle.PicklingError("Can't pickle %r" % obj) return self.save_reduce(obj=obj, *rv) # if func is lambda, def'ed at prompt, is in main, or is nested, then # we'll pickle the actual function object rather than simply saving a # reference (as is done in default pickler), via save_function_tuple. if (islambda(obj) or getattr(obj.__code__, 'co_filename', None) == '<stdin>' or themodule is None): self.save_function_tuple(obj) return else: # func is nested if lookedup_by_name is None or lookedup_by_name is not obj: self.save_function_tuple(obj) return if obj.__dict__: # essentially save_reduce, but workaround needed to avoid recursion self.save(_restore_attr) write(pickle.MARK + pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj) self.save(obj.__dict__) write(pickle.TUPLE + pickle.REDUCE) else: write(pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj)
[ "Registered", "with", "the", "dispatch", "to", "handle", "all", "function", "types", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L319-L412
[ "def", "save_function", "(", "self", ",", "obj", ",", "name", "=", "None", ")", ":", "try", ":", "should_special_case", "=", "obj", "in", "_BUILTIN_TYPE_CONSTRUCTORS", "except", "TypeError", ":", "# Methods of builtin types aren't hashable in python 2.", "should_special_case", "=", "False", "if", "should_special_case", ":", "# We keep a special-cased cache of built-in type constructors at", "# global scope, because these functions are structured very", "# differently in different python versions and implementations (for", "# example, they're instances of types.BuiltinFunctionType in", "# CPython, but they're ordinary types.FunctionType instances in", "# PyPy).", "#", "# If the function we've received is in that cache, we just", "# serialize it as a lookup into the cache.", "return", "self", ".", "save_reduce", "(", "_BUILTIN_TYPE_CONSTRUCTORS", "[", "obj", "]", ",", "(", ")", ",", "obj", "=", "obj", ")", "write", "=", "self", ".", "write", "if", "name", "is", "None", ":", "name", "=", "obj", ".", "__name__", "try", ":", "# whichmodule() could fail, see", "# https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling", "modname", "=", "pickle", ".", "whichmodule", "(", "obj", ",", "name", ")", "except", "Exception", ":", "modname", "=", "None", "# print('which gives %s %s %s' % (modname, obj, name))", "try", ":", "themodule", "=", "sys", ".", "modules", "[", "modname", "]", "except", "KeyError", ":", "# eval'd items such as namedtuple give invalid items for their function __module__", "modname", "=", "'__main__'", "if", "modname", "==", "'__main__'", ":", "themodule", "=", "None", "try", ":", "lookedup_by_name", "=", "getattr", "(", "themodule", ",", "name", ",", "None", ")", "except", "Exception", ":", "lookedup_by_name", "=", "None", "if", "themodule", ":", "if", "lookedup_by_name", "is", "obj", ":", "return", "self", ".", "save_global", "(", "obj", ",", "name", ")", "# a builtin_function_or_method which comes in as an attribute of some", "# object (e.g., itertools.chain.from_iterable) will end", "# up with modname \"__main__\" and so end up here. But these functions", "# have no __code__ attribute in CPython, so the handling for", "# user-defined functions below will fail.", "# So we pickle them here using save_reduce; have to do it differently", "# for different python versions.", "if", "not", "hasattr", "(", "obj", ",", "'__code__'", ")", ":", "if", "PY3", ":", "# pragma: no branch", "rv", "=", "obj", ".", "__reduce_ex__", "(", "self", ".", "proto", ")", "else", ":", "if", "hasattr", "(", "obj", ",", "'__self__'", ")", ":", "rv", "=", "(", "getattr", ",", "(", "obj", ".", "__self__", ",", "name", ")", ")", "else", ":", "raise", "pickle", ".", "PicklingError", "(", "\"Can't pickle %r\"", "%", "obj", ")", "return", "self", ".", "save_reduce", "(", "obj", "=", "obj", ",", "*", "rv", ")", "# if func is lambda, def'ed at prompt, is in main, or is nested, then", "# we'll pickle the actual function object rather than simply saving a", "# reference (as is done in default pickler), via save_function_tuple.", "if", "(", "islambda", "(", "obj", ")", "or", "getattr", "(", "obj", ".", "__code__", ",", "'co_filename'", ",", "None", ")", "==", "'<stdin>'", "or", "themodule", "is", "None", ")", ":", "self", ".", "save_function_tuple", "(", "obj", ")", "return", "else", ":", "# func is nested", "if", "lookedup_by_name", "is", "None", "or", "lookedup_by_name", "is", "not", "obj", ":", "self", ".", "save_function_tuple", "(", "obj", ")", "return", "if", "obj", ".", "__dict__", ":", "# essentially save_reduce, but workaround needed to avoid recursion", "self", ".", "save", "(", "_restore_attr", ")", "write", "(", "pickle", ".", "MARK", "+", "pickle", ".", "GLOBAL", "+", "modname", "+", "'\\n'", "+", "name", "+", "'\\n'", ")", "self", ".", "memoize", "(", "obj", ")", "self", ".", "save", "(", "obj", ".", "__dict__", ")", "write", "(", "pickle", ".", "TUPLE", "+", "pickle", ".", "REDUCE", ")", "else", ":", "write", "(", "pickle", ".", "GLOBAL", "+", "modname", "+", "'\\n'", "+", "name", "+", "'\\n'", ")", "self", ".", "memoize", "(", "obj", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
CloudPickler.save_dynamic_class
Save a class that can't be stored as module global. This method is used to serialize classes that are defined inside functions, or that otherwise can't be serialized as attribute lookups from global modules.
python/pyspark/cloudpickle.py
def save_dynamic_class(self, obj): """ Save a class that can't be stored as module global. This method is used to serialize classes that are defined inside functions, or that otherwise can't be serialized as attribute lookups from global modules. """ clsdict = dict(obj.__dict__) # copy dict proxy to a dict clsdict.pop('__weakref__', None) # For ABCMeta in python3.7+, remove _abc_impl as it is not picklable. # This is a fix which breaks the cache but this only makes the first # calls to issubclass slower. if "_abc_impl" in clsdict: import abc (registry, _, _, _) = abc._get_dump(obj) clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry] # On PyPy, __doc__ is a readonly attribute, so we need to include it in # the initial skeleton class. This is safe because we know that the # doc can't participate in a cycle with the original class. type_kwargs = {'__doc__': clsdict.pop('__doc__', None)} if hasattr(obj, "__slots__"): type_kwargs['__slots__'] = obj.__slots__ # pickle string length optimization: member descriptors of obj are # created automatically from obj's __slots__ attribute, no need to # save them in obj's state if isinstance(obj.__slots__, string_types): clsdict.pop(obj.__slots__) else: for k in obj.__slots__: clsdict.pop(k, None) # If type overrides __dict__ as a property, include it in the type kwargs. # In Python 2, we can't set this attribute after construction. __dict__ = clsdict.pop('__dict__', None) if isinstance(__dict__, property): type_kwargs['__dict__'] = __dict__ save = self.save write = self.write # We write pickle instructions explicitly here to handle the # possibility that the type object participates in a cycle with its own # __dict__. We first write an empty "skeleton" version of the class and # memoize it before writing the class' __dict__ itself. We then write # instructions to "rehydrate" the skeleton class by restoring the # attributes from the __dict__. # # A type can appear in a cycle with its __dict__ if an instance of the # type appears in the type's __dict__ (which happens for the stdlib # Enum class), or if the type defines methods that close over the name # of the type, (which is common for Python 2-style super() calls). # Push the rehydration function. save(_rehydrate_skeleton_class) # Mark the start of the args tuple for the rehydration function. write(pickle.MARK) # Create and memoize an skeleton class with obj's name and bases. tp = type(obj) self.save_reduce(tp, (obj.__name__, obj.__bases__, type_kwargs), obj=obj) # Now save the rest of obj's __dict__. Any references to obj # encountered while saving will point to the skeleton class. save(clsdict) # Write a tuple of (skeleton_class, clsdict). write(pickle.TUPLE) # Call _rehydrate_skeleton_class(skeleton_class, clsdict) write(pickle.REDUCE)
def save_dynamic_class(self, obj): """ Save a class that can't be stored as module global. This method is used to serialize classes that are defined inside functions, or that otherwise can't be serialized as attribute lookups from global modules. """ clsdict = dict(obj.__dict__) # copy dict proxy to a dict clsdict.pop('__weakref__', None) # For ABCMeta in python3.7+, remove _abc_impl as it is not picklable. # This is a fix which breaks the cache but this only makes the first # calls to issubclass slower. if "_abc_impl" in clsdict: import abc (registry, _, _, _) = abc._get_dump(obj) clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry] # On PyPy, __doc__ is a readonly attribute, so we need to include it in # the initial skeleton class. This is safe because we know that the # doc can't participate in a cycle with the original class. type_kwargs = {'__doc__': clsdict.pop('__doc__', None)} if hasattr(obj, "__slots__"): type_kwargs['__slots__'] = obj.__slots__ # pickle string length optimization: member descriptors of obj are # created automatically from obj's __slots__ attribute, no need to # save them in obj's state if isinstance(obj.__slots__, string_types): clsdict.pop(obj.__slots__) else: for k in obj.__slots__: clsdict.pop(k, None) # If type overrides __dict__ as a property, include it in the type kwargs. # In Python 2, we can't set this attribute after construction. __dict__ = clsdict.pop('__dict__', None) if isinstance(__dict__, property): type_kwargs['__dict__'] = __dict__ save = self.save write = self.write # We write pickle instructions explicitly here to handle the # possibility that the type object participates in a cycle with its own # __dict__. We first write an empty "skeleton" version of the class and # memoize it before writing the class' __dict__ itself. We then write # instructions to "rehydrate" the skeleton class by restoring the # attributes from the __dict__. # # A type can appear in a cycle with its __dict__ if an instance of the # type appears in the type's __dict__ (which happens for the stdlib # Enum class), or if the type defines methods that close over the name # of the type, (which is common for Python 2-style super() calls). # Push the rehydration function. save(_rehydrate_skeleton_class) # Mark the start of the args tuple for the rehydration function. write(pickle.MARK) # Create and memoize an skeleton class with obj's name and bases. tp = type(obj) self.save_reduce(tp, (obj.__name__, obj.__bases__, type_kwargs), obj=obj) # Now save the rest of obj's __dict__. Any references to obj # encountered while saving will point to the skeleton class. save(clsdict) # Write a tuple of (skeleton_class, clsdict). write(pickle.TUPLE) # Call _rehydrate_skeleton_class(skeleton_class, clsdict) write(pickle.REDUCE)
[ "Save", "a", "class", "that", "can", "t", "be", "stored", "as", "module", "global", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L463-L538
[ "def", "save_dynamic_class", "(", "self", ",", "obj", ")", ":", "clsdict", "=", "dict", "(", "obj", ".", "__dict__", ")", "# copy dict proxy to a dict", "clsdict", ".", "pop", "(", "'__weakref__'", ",", "None", ")", "# For ABCMeta in python3.7+, remove _abc_impl as it is not picklable.", "# This is a fix which breaks the cache but this only makes the first", "# calls to issubclass slower.", "if", "\"_abc_impl\"", "in", "clsdict", ":", "import", "abc", "(", "registry", ",", "_", ",", "_", ",", "_", ")", "=", "abc", ".", "_get_dump", "(", "obj", ")", "clsdict", "[", "\"_abc_impl\"", "]", "=", "[", "subclass_weakref", "(", ")", "for", "subclass_weakref", "in", "registry", "]", "# On PyPy, __doc__ is a readonly attribute, so we need to include it in", "# the initial skeleton class. This is safe because we know that the", "# doc can't participate in a cycle with the original class.", "type_kwargs", "=", "{", "'__doc__'", ":", "clsdict", ".", "pop", "(", "'__doc__'", ",", "None", ")", "}", "if", "hasattr", "(", "obj", ",", "\"__slots__\"", ")", ":", "type_kwargs", "[", "'__slots__'", "]", "=", "obj", ".", "__slots__", "# pickle string length optimization: member descriptors of obj are", "# created automatically from obj's __slots__ attribute, no need to", "# save them in obj's state", "if", "isinstance", "(", "obj", ".", "__slots__", ",", "string_types", ")", ":", "clsdict", ".", "pop", "(", "obj", ".", "__slots__", ")", "else", ":", "for", "k", "in", "obj", ".", "__slots__", ":", "clsdict", ".", "pop", "(", "k", ",", "None", ")", "# If type overrides __dict__ as a property, include it in the type kwargs.", "# In Python 2, we can't set this attribute after construction.", "__dict__", "=", "clsdict", ".", "pop", "(", "'__dict__'", ",", "None", ")", "if", "isinstance", "(", "__dict__", ",", "property", ")", ":", "type_kwargs", "[", "'__dict__'", "]", "=", "__dict__", "save", "=", "self", ".", "save", "write", "=", "self", ".", "write", "# We write pickle instructions explicitly here to handle the", "# possibility that the type object participates in a cycle with its own", "# __dict__. We first write an empty \"skeleton\" version of the class and", "# memoize it before writing the class' __dict__ itself. We then write", "# instructions to \"rehydrate\" the skeleton class by restoring the", "# attributes from the __dict__.", "#", "# A type can appear in a cycle with its __dict__ if an instance of the", "# type appears in the type's __dict__ (which happens for the stdlib", "# Enum class), or if the type defines methods that close over the name", "# of the type, (which is common for Python 2-style super() calls).", "# Push the rehydration function.", "save", "(", "_rehydrate_skeleton_class", ")", "# Mark the start of the args tuple for the rehydration function.", "write", "(", "pickle", ".", "MARK", ")", "# Create and memoize an skeleton class with obj's name and bases.", "tp", "=", "type", "(", "obj", ")", "self", ".", "save_reduce", "(", "tp", ",", "(", "obj", ".", "__name__", ",", "obj", ".", "__bases__", ",", "type_kwargs", ")", ",", "obj", "=", "obj", ")", "# Now save the rest of obj's __dict__. Any references to obj", "# encountered while saving will point to the skeleton class.", "save", "(", "clsdict", ")", "# Write a tuple of (skeleton_class, clsdict).", "write", "(", "pickle", ".", "TUPLE", ")", "# Call _rehydrate_skeleton_class(skeleton_class, clsdict)", "write", "(", "pickle", ".", "REDUCE", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
CloudPickler.save_function_tuple
Pickles an actual func object. A func comprises: code, globals, defaults, closure, and dict. We extract and save these, injecting reducing functions at certain points to recreate the func object. Keep in mind that some of these pieces can contain a ref to the func itself. Thus, a naive save on these pieces could trigger an infinite loop of save's. To get around that, we first create a skeleton func object using just the code (this is safe, since this won't contain a ref to the func), and memoize it as soon as it's created. The other stuff can then be filled in later.
python/pyspark/cloudpickle.py
def save_function_tuple(self, func): """ Pickles an actual func object. A func comprises: code, globals, defaults, closure, and dict. We extract and save these, injecting reducing functions at certain points to recreate the func object. Keep in mind that some of these pieces can contain a ref to the func itself. Thus, a naive save on these pieces could trigger an infinite loop of save's. To get around that, we first create a skeleton func object using just the code (this is safe, since this won't contain a ref to the func), and memoize it as soon as it's created. The other stuff can then be filled in later. """ if is_tornado_coroutine(func): self.save_reduce(_rebuild_tornado_coroutine, (func.__wrapped__,), obj=func) return save = self.save write = self.write code, f_globals, defaults, closure_values, dct, base_globals = self.extract_func_data(func) save(_fill_function) # skeleton function updater write(pickle.MARK) # beginning of tuple that _fill_function expects self._save_subimports( code, itertools.chain(f_globals.values(), closure_values or ()), ) # create a skeleton function object and memoize it save(_make_skel_func) save(( code, len(closure_values) if closure_values is not None else -1, base_globals, )) write(pickle.REDUCE) self.memoize(func) # save the rest of the func data needed by _fill_function state = { 'globals': f_globals, 'defaults': defaults, 'dict': dct, 'closure_values': closure_values, 'module': func.__module__, 'name': func.__name__, 'doc': func.__doc__, } if hasattr(func, '__annotations__') and sys.version_info >= (3, 7): state['annotations'] = func.__annotations__ if hasattr(func, '__qualname__'): state['qualname'] = func.__qualname__ save(state) write(pickle.TUPLE) write(pickle.REDUCE)
def save_function_tuple(self, func): """ Pickles an actual func object. A func comprises: code, globals, defaults, closure, and dict. We extract and save these, injecting reducing functions at certain points to recreate the func object. Keep in mind that some of these pieces can contain a ref to the func itself. Thus, a naive save on these pieces could trigger an infinite loop of save's. To get around that, we first create a skeleton func object using just the code (this is safe, since this won't contain a ref to the func), and memoize it as soon as it's created. The other stuff can then be filled in later. """ if is_tornado_coroutine(func): self.save_reduce(_rebuild_tornado_coroutine, (func.__wrapped__,), obj=func) return save = self.save write = self.write code, f_globals, defaults, closure_values, dct, base_globals = self.extract_func_data(func) save(_fill_function) # skeleton function updater write(pickle.MARK) # beginning of tuple that _fill_function expects self._save_subimports( code, itertools.chain(f_globals.values(), closure_values or ()), ) # create a skeleton function object and memoize it save(_make_skel_func) save(( code, len(closure_values) if closure_values is not None else -1, base_globals, )) write(pickle.REDUCE) self.memoize(func) # save the rest of the func data needed by _fill_function state = { 'globals': f_globals, 'defaults': defaults, 'dict': dct, 'closure_values': closure_values, 'module': func.__module__, 'name': func.__name__, 'doc': func.__doc__, } if hasattr(func, '__annotations__') and sys.version_info >= (3, 7): state['annotations'] = func.__annotations__ if hasattr(func, '__qualname__'): state['qualname'] = func.__qualname__ save(state) write(pickle.TUPLE) write(pickle.REDUCE)
[ "Pickles", "an", "actual", "func", "object", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L540-L596
[ "def", "save_function_tuple", "(", "self", ",", "func", ")", ":", "if", "is_tornado_coroutine", "(", "func", ")", ":", "self", ".", "save_reduce", "(", "_rebuild_tornado_coroutine", ",", "(", "func", ".", "__wrapped__", ",", ")", ",", "obj", "=", "func", ")", "return", "save", "=", "self", ".", "save", "write", "=", "self", ".", "write", "code", ",", "f_globals", ",", "defaults", ",", "closure_values", ",", "dct", ",", "base_globals", "=", "self", ".", "extract_func_data", "(", "func", ")", "save", "(", "_fill_function", ")", "# skeleton function updater", "write", "(", "pickle", ".", "MARK", ")", "# beginning of tuple that _fill_function expects", "self", ".", "_save_subimports", "(", "code", ",", "itertools", ".", "chain", "(", "f_globals", ".", "values", "(", ")", ",", "closure_values", "or", "(", ")", ")", ",", ")", "# create a skeleton function object and memoize it", "save", "(", "_make_skel_func", ")", "save", "(", "(", "code", ",", "len", "(", "closure_values", ")", "if", "closure_values", "is", "not", "None", "else", "-", "1", ",", "base_globals", ",", ")", ")", "write", "(", "pickle", ".", "REDUCE", ")", "self", ".", "memoize", "(", "func", ")", "# save the rest of the func data needed by _fill_function", "state", "=", "{", "'globals'", ":", "f_globals", ",", "'defaults'", ":", "defaults", ",", "'dict'", ":", "dct", ",", "'closure_values'", ":", "closure_values", ",", "'module'", ":", "func", ".", "__module__", ",", "'name'", ":", "func", ".", "__name__", ",", "'doc'", ":", "func", ".", "__doc__", ",", "}", "if", "hasattr", "(", "func", ",", "'__annotations__'", ")", "and", "sys", ".", "version_info", ">=", "(", "3", ",", "7", ")", ":", "state", "[", "'annotations'", "]", "=", "func", ".", "__annotations__", "if", "hasattr", "(", "func", ",", "'__qualname__'", ")", ":", "state", "[", "'qualname'", "]", "=", "func", ".", "__qualname__", "save", "(", "state", ")", "write", "(", "pickle", ".", "TUPLE", ")", "write", "(", "pickle", ".", "REDUCE", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
CloudPickler.save_global
Save a "global". The name of this method is somewhat misleading: all types get dispatched here.
python/pyspark/cloudpickle.py
def save_global(self, obj, name=None, pack=struct.pack): """ Save a "global". The name of this method is somewhat misleading: all types get dispatched here. """ if obj is type(None): return self.save_reduce(type, (None,), obj=obj) elif obj is type(Ellipsis): return self.save_reduce(type, (Ellipsis,), obj=obj) elif obj is type(NotImplemented): return self.save_reduce(type, (NotImplemented,), obj=obj) if obj.__module__ == "__main__": return self.save_dynamic_class(obj) try: return Pickler.save_global(self, obj, name=name) except Exception: if obj.__module__ == "__builtin__" or obj.__module__ == "builtins": if obj in _BUILTIN_TYPE_NAMES: return self.save_reduce( _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj) typ = type(obj) if typ is not obj and isinstance(obj, (type, types.ClassType)): return self.save_dynamic_class(obj) raise
def save_global(self, obj, name=None, pack=struct.pack): """ Save a "global". The name of this method is somewhat misleading: all types get dispatched here. """ if obj is type(None): return self.save_reduce(type, (None,), obj=obj) elif obj is type(Ellipsis): return self.save_reduce(type, (Ellipsis,), obj=obj) elif obj is type(NotImplemented): return self.save_reduce(type, (NotImplemented,), obj=obj) if obj.__module__ == "__main__": return self.save_dynamic_class(obj) try: return Pickler.save_global(self, obj, name=name) except Exception: if obj.__module__ == "__builtin__" or obj.__module__ == "builtins": if obj in _BUILTIN_TYPE_NAMES: return self.save_reduce( _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj) typ = type(obj) if typ is not obj and isinstance(obj, (type, types.ClassType)): return self.save_dynamic_class(obj) raise
[ "Save", "a", "global", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L678-L707
[ "def", "save_global", "(", "self", ",", "obj", ",", "name", "=", "None", ",", "pack", "=", "struct", ".", "pack", ")", ":", "if", "obj", "is", "type", "(", "None", ")", ":", "return", "self", ".", "save_reduce", "(", "type", ",", "(", "None", ",", ")", ",", "obj", "=", "obj", ")", "elif", "obj", "is", "type", "(", "Ellipsis", ")", ":", "return", "self", ".", "save_reduce", "(", "type", ",", "(", "Ellipsis", ",", ")", ",", "obj", "=", "obj", ")", "elif", "obj", "is", "type", "(", "NotImplemented", ")", ":", "return", "self", ".", "save_reduce", "(", "type", ",", "(", "NotImplemented", ",", ")", ",", "obj", "=", "obj", ")", "if", "obj", ".", "__module__", "==", "\"__main__\"", ":", "return", "self", ".", "save_dynamic_class", "(", "obj", ")", "try", ":", "return", "Pickler", ".", "save_global", "(", "self", ",", "obj", ",", "name", "=", "name", ")", "except", "Exception", ":", "if", "obj", ".", "__module__", "==", "\"__builtin__\"", "or", "obj", ".", "__module__", "==", "\"builtins\"", ":", "if", "obj", "in", "_BUILTIN_TYPE_NAMES", ":", "return", "self", ".", "save_reduce", "(", "_builtin_type", ",", "(", "_BUILTIN_TYPE_NAMES", "[", "obj", "]", ",", ")", ",", "obj", "=", "obj", ")", "typ", "=", "type", "(", "obj", ")", "if", "typ", "is", "not", "obj", "and", "isinstance", "(", "obj", ",", "(", "type", ",", "types", ".", "ClassType", ")", ")", ":", "return", "self", ".", "save_dynamic_class", "(", "obj", ")", "raise" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
CloudPickler.save_inst
Inner logic to save instance. Based off pickle.save_inst
python/pyspark/cloudpickle.py
def save_inst(self, obj): """Inner logic to save instance. Based off pickle.save_inst""" cls = obj.__class__ # Try the dispatch table (pickle module doesn't do it) f = self.dispatch.get(cls) if f: f(self, obj) # Call unbound method with explicit self return memo = self.memo write = self.write save = self.save if hasattr(obj, '__getinitargs__'): args = obj.__getinitargs__() len(args) # XXX Assert it's a sequence pickle._keep_alive(args, memo) else: args = () write(pickle.MARK) if self.bin: save(cls) for arg in args: save(arg) write(pickle.OBJ) else: for arg in args: save(arg) write(pickle.INST + cls.__module__ + '\n' + cls.__name__ + '\n') self.memoize(obj) try: getstate = obj.__getstate__ except AttributeError: stuff = obj.__dict__ else: stuff = getstate() pickle._keep_alive(stuff, memo) save(stuff) write(pickle.BUILD)
def save_inst(self, obj): """Inner logic to save instance. Based off pickle.save_inst""" cls = obj.__class__ # Try the dispatch table (pickle module doesn't do it) f = self.dispatch.get(cls) if f: f(self, obj) # Call unbound method with explicit self return memo = self.memo write = self.write save = self.save if hasattr(obj, '__getinitargs__'): args = obj.__getinitargs__() len(args) # XXX Assert it's a sequence pickle._keep_alive(args, memo) else: args = () write(pickle.MARK) if self.bin: save(cls) for arg in args: save(arg) write(pickle.OBJ) else: for arg in args: save(arg) write(pickle.INST + cls.__module__ + '\n' + cls.__name__ + '\n') self.memoize(obj) try: getstate = obj.__getstate__ except AttributeError: stuff = obj.__dict__ else: stuff = getstate() pickle._keep_alive(stuff, memo) save(stuff) write(pickle.BUILD)
[ "Inner", "logic", "to", "save", "instance", ".", "Based", "off", "pickle", ".", "save_inst" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L725-L768
[ "def", "save_inst", "(", "self", ",", "obj", ")", ":", "cls", "=", "obj", ".", "__class__", "# Try the dispatch table (pickle module doesn't do it)", "f", "=", "self", ".", "dispatch", ".", "get", "(", "cls", ")", "if", "f", ":", "f", "(", "self", ",", "obj", ")", "# Call unbound method with explicit self", "return", "memo", "=", "self", ".", "memo", "write", "=", "self", ".", "write", "save", "=", "self", ".", "save", "if", "hasattr", "(", "obj", ",", "'__getinitargs__'", ")", ":", "args", "=", "obj", ".", "__getinitargs__", "(", ")", "len", "(", "args", ")", "# XXX Assert it's a sequence", "pickle", ".", "_keep_alive", "(", "args", ",", "memo", ")", "else", ":", "args", "=", "(", ")", "write", "(", "pickle", ".", "MARK", ")", "if", "self", ".", "bin", ":", "save", "(", "cls", ")", "for", "arg", "in", "args", ":", "save", "(", "arg", ")", "write", "(", "pickle", ".", "OBJ", ")", "else", ":", "for", "arg", "in", "args", ":", "save", "(", "arg", ")", "write", "(", "pickle", ".", "INST", "+", "cls", ".", "__module__", "+", "'\\n'", "+", "cls", ".", "__name__", "+", "'\\n'", ")", "self", ".", "memoize", "(", "obj", ")", "try", ":", "getstate", "=", "obj", ".", "__getstate__", "except", "AttributeError", ":", "stuff", "=", "obj", ".", "__dict__", "else", ":", "stuff", "=", "getstate", "(", ")", "pickle", ".", "_keep_alive", "(", "stuff", ",", "memo", ")", "save", "(", "stuff", ")", "write", "(", "pickle", ".", "BUILD", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
CloudPickler.save_itemgetter
itemgetter serializer (needed for namedtuple support)
python/pyspark/cloudpickle.py
def save_itemgetter(self, obj): """itemgetter serializer (needed for namedtuple support)""" class Dummy: def __getitem__(self, item): return item items = obj(Dummy()) if not isinstance(items, tuple): items = (items,) return self.save_reduce(operator.itemgetter, items)
def save_itemgetter(self, obj): """itemgetter serializer (needed for namedtuple support)""" class Dummy: def __getitem__(self, item): return item items = obj(Dummy()) if not isinstance(items, tuple): items = (items,) return self.save_reduce(operator.itemgetter, items)
[ "itemgetter", "serializer", "(", "needed", "for", "namedtuple", "support", ")" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L786-L794
[ "def", "save_itemgetter", "(", "self", ",", "obj", ")", ":", "class", "Dummy", ":", "def", "__getitem__", "(", "self", ",", "item", ")", ":", "return", "item", "items", "=", "obj", "(", "Dummy", "(", ")", ")", "if", "not", "isinstance", "(", "items", ",", "tuple", ")", ":", "items", "=", "(", "items", ",", ")", "return", "self", ".", "save_reduce", "(", "operator", ".", "itemgetter", ",", "items", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
CloudPickler.save_attrgetter
attrgetter serializer
python/pyspark/cloudpickle.py
def save_attrgetter(self, obj): """attrgetter serializer""" class Dummy(object): def __init__(self, attrs, index=None): self.attrs = attrs self.index = index def __getattribute__(self, item): attrs = object.__getattribute__(self, "attrs") index = object.__getattribute__(self, "index") if index is None: index = len(attrs) attrs.append(item) else: attrs[index] = ".".join([attrs[index], item]) return type(self)(attrs, index) attrs = [] obj(Dummy(attrs)) return self.save_reduce(operator.attrgetter, tuple(attrs))
def save_attrgetter(self, obj): """attrgetter serializer""" class Dummy(object): def __init__(self, attrs, index=None): self.attrs = attrs self.index = index def __getattribute__(self, item): attrs = object.__getattribute__(self, "attrs") index = object.__getattribute__(self, "index") if index is None: index = len(attrs) attrs.append(item) else: attrs[index] = ".".join([attrs[index], item]) return type(self)(attrs, index) attrs = [] obj(Dummy(attrs)) return self.save_reduce(operator.attrgetter, tuple(attrs))
[ "attrgetter", "serializer" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L799-L816
[ "def", "save_attrgetter", "(", "self", ",", "obj", ")", ":", "class", "Dummy", "(", "object", ")", ":", "def", "__init__", "(", "self", ",", "attrs", ",", "index", "=", "None", ")", ":", "self", ".", "attrs", "=", "attrs", "self", ".", "index", "=", "index", "def", "__getattribute__", "(", "self", ",", "item", ")", ":", "attrs", "=", "object", ".", "__getattribute__", "(", "self", ",", "\"attrs\"", ")", "index", "=", "object", ".", "__getattribute__", "(", "self", ",", "\"index\"", ")", "if", "index", "is", "None", ":", "index", "=", "len", "(", "attrs", ")", "attrs", ".", "append", "(", "item", ")", "else", ":", "attrs", "[", "index", "]", "=", "\".\"", ".", "join", "(", "[", "attrs", "[", "index", "]", ",", "item", "]", ")", "return", "type", "(", "self", ")", "(", "attrs", ",", "index", ")", "attrs", "=", "[", "]", "obj", "(", "Dummy", "(", "attrs", ")", ")", "return", "self", ".", "save_reduce", "(", "operator", ".", "attrgetter", ",", "tuple", "(", "attrs", ")", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Param._copy_new_parent
Copy the current param to a new parent, must be a dummy param.
python/pyspark/ml/param/__init__.py
def _copy_new_parent(self, parent): """Copy the current param to a new parent, must be a dummy param.""" if self.parent == "undefined": param = copy.copy(self) param.parent = parent.uid return param else: raise ValueError("Cannot copy from non-dummy parent %s." % parent)
def _copy_new_parent(self, parent): """Copy the current param to a new parent, must be a dummy param.""" if self.parent == "undefined": param = copy.copy(self) param.parent = parent.uid return param else: raise ValueError("Cannot copy from non-dummy parent %s." % parent)
[ "Copy", "the", "current", "param", "to", "a", "new", "parent", "must", "be", "a", "dummy", "param", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L52-L59
[ "def", "_copy_new_parent", "(", "self", ",", "parent", ")", ":", "if", "self", ".", "parent", "==", "\"undefined\"", ":", "param", "=", "copy", ".", "copy", "(", "self", ")", "param", ".", "parent", "=", "parent", ".", "uid", "return", "param", "else", ":", "raise", "ValueError", "(", "\"Cannot copy from non-dummy parent %s.\"", "%", "parent", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
TypeConverters.toList
Convert a value to a list, if possible.
python/pyspark/ml/param/__init__.py
def toList(value): """ Convert a value to a list, if possible. """ if type(value) == list: return value elif type(value) in [np.ndarray, tuple, xrange, array.array]: return list(value) elif isinstance(value, Vector): return list(value.toArray()) else: raise TypeError("Could not convert %s to list" % value)
def toList(value): """ Convert a value to a list, if possible. """ if type(value) == list: return value elif type(value) in [np.ndarray, tuple, xrange, array.array]: return list(value) elif isinstance(value, Vector): return list(value.toArray()) else: raise TypeError("Could not convert %s to list" % value)
[ "Convert", "a", "value", "to", "a", "list", "if", "possible", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L113-L124
[ "def", "toList", "(", "value", ")", ":", "if", "type", "(", "value", ")", "==", "list", ":", "return", "value", "elif", "type", "(", "value", ")", "in", "[", "np", ".", "ndarray", ",", "tuple", ",", "xrange", ",", "array", ".", "array", "]", ":", "return", "list", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "Vector", ")", ":", "return", "list", "(", "value", ".", "toArray", "(", ")", ")", "else", ":", "raise", "TypeError", "(", "\"Could not convert %s to list\"", "%", "value", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
TypeConverters.toListFloat
Convert a value to list of floats, if possible.
python/pyspark/ml/param/__init__.py
def toListFloat(value): """ Convert a value to list of floats, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_numeric(v), value)): return [float(v) for v in value] raise TypeError("Could not convert %s to list of floats" % value)
def toListFloat(value): """ Convert a value to list of floats, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_numeric(v), value)): return [float(v) for v in value] raise TypeError("Could not convert %s to list of floats" % value)
[ "Convert", "a", "value", "to", "list", "of", "floats", "if", "possible", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L127-L135
[ "def", "toListFloat", "(", "value", ")", ":", "if", "TypeConverters", ".", "_can_convert_to_list", "(", "value", ")", ":", "value", "=", "TypeConverters", ".", "toList", "(", "value", ")", "if", "all", "(", "map", "(", "lambda", "v", ":", "TypeConverters", ".", "_is_numeric", "(", "v", ")", ",", "value", ")", ")", ":", "return", "[", "float", "(", "v", ")", "for", "v", "in", "value", "]", "raise", "TypeError", "(", "\"Could not convert %s to list of floats\"", "%", "value", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
TypeConverters.toListInt
Convert a value to list of ints, if possible.
python/pyspark/ml/param/__init__.py
def toListInt(value): """ Convert a value to list of ints, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_integer(v), value)): return [int(v) for v in value] raise TypeError("Could not convert %s to list of ints" % value)
def toListInt(value): """ Convert a value to list of ints, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_integer(v), value)): return [int(v) for v in value] raise TypeError("Could not convert %s to list of ints" % value)
[ "Convert", "a", "value", "to", "list", "of", "ints", "if", "possible", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L138-L146
[ "def", "toListInt", "(", "value", ")", ":", "if", "TypeConverters", ".", "_can_convert_to_list", "(", "value", ")", ":", "value", "=", "TypeConverters", ".", "toList", "(", "value", ")", "if", "all", "(", "map", "(", "lambda", "v", ":", "TypeConverters", ".", "_is_integer", "(", "v", ")", ",", "value", ")", ")", ":", "return", "[", "int", "(", "v", ")", "for", "v", "in", "value", "]", "raise", "TypeError", "(", "\"Could not convert %s to list of ints\"", "%", "value", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
TypeConverters.toListString
Convert a value to list of strings, if possible.
python/pyspark/ml/param/__init__.py
def toListString(value): """ Convert a value to list of strings, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)): return [TypeConverters.toString(v) for v in value] raise TypeError("Could not convert %s to list of strings" % value)
def toListString(value): """ Convert a value to list of strings, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)): return [TypeConverters.toString(v) for v in value] raise TypeError("Could not convert %s to list of strings" % value)
[ "Convert", "a", "value", "to", "list", "of", "strings", "if", "possible", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L149-L157
[ "def", "toListString", "(", "value", ")", ":", "if", "TypeConverters", ".", "_can_convert_to_list", "(", "value", ")", ":", "value", "=", "TypeConverters", ".", "toList", "(", "value", ")", "if", "all", "(", "map", "(", "lambda", "v", ":", "TypeConverters", ".", "_can_convert_to_string", "(", "v", ")", ",", "value", ")", ")", ":", "return", "[", "TypeConverters", ".", "toString", "(", "v", ")", "for", "v", "in", "value", "]", "raise", "TypeError", "(", "\"Could not convert %s to list of strings\"", "%", "value", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
TypeConverters.toVector
Convert a value to a MLlib Vector, if possible.
python/pyspark/ml/param/__init__.py
def toVector(value): """ Convert a value to a MLlib Vector, if possible. """ if isinstance(value, Vector): return value elif TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_numeric(v), value)): return DenseVector(value) raise TypeError("Could not convert %s to vector" % value)
def toVector(value): """ Convert a value to a MLlib Vector, if possible. """ if isinstance(value, Vector): return value elif TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_numeric(v), value)): return DenseVector(value) raise TypeError("Could not convert %s to vector" % value)
[ "Convert", "a", "value", "to", "a", "MLlib", "Vector", "if", "possible", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L160-L170
[ "def", "toVector", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "Vector", ")", ":", "return", "value", "elif", "TypeConverters", ".", "_can_convert_to_list", "(", "value", ")", ":", "value", "=", "TypeConverters", ".", "toList", "(", "value", ")", "if", "all", "(", "map", "(", "lambda", "v", ":", "TypeConverters", ".", "_is_numeric", "(", "v", ")", ",", "value", ")", ")", ":", "return", "DenseVector", "(", "value", ")", "raise", "TypeError", "(", "\"Could not convert %s to vector\"", "%", "value", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
TypeConverters.toString
Convert a value to a string, if possible.
python/pyspark/ml/param/__init__.py
def toString(value): """ Convert a value to a string, if possible. """ if isinstance(value, basestring): return value elif type(value) in [np.string_, np.str_]: return str(value) elif type(value) == np.unicode_: return unicode(value) else: raise TypeError("Could not convert %s to string type" % type(value))
def toString(value): """ Convert a value to a string, if possible. """ if isinstance(value, basestring): return value elif type(value) in [np.string_, np.str_]: return str(value) elif type(value) == np.unicode_: return unicode(value) else: raise TypeError("Could not convert %s to string type" % type(value))
[ "Convert", "a", "value", "to", "a", "string", "if", "possible", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L202-L213
[ "def", "toString", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "basestring", ")", ":", "return", "value", "elif", "type", "(", "value", ")", "in", "[", "np", ".", "string_", ",", "np", ".", "str_", "]", ":", "return", "str", "(", "value", ")", "elif", "type", "(", "value", ")", "==", "np", ".", "unicode_", ":", "return", "unicode", "(", "value", ")", "else", ":", "raise", "TypeError", "(", "\"Could not convert %s to string type\"", "%", "type", "(", "value", ")", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params._copy_params
Copy all params defined on the class to current object.
python/pyspark/ml/param/__init__.py
def _copy_params(self): """ Copy all params defined on the class to current object. """ cls = type(self) src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)] src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs)) for name, param in src_params: setattr(self, name, param._copy_new_parent(self))
def _copy_params(self): """ Copy all params defined on the class to current object. """ cls = type(self) src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)] src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs)) for name, param in src_params: setattr(self, name, param._copy_new_parent(self))
[ "Copy", "all", "params", "defined", "on", "the", "class", "to", "current", "object", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L250-L258
[ "def", "_copy_params", "(", "self", ")", ":", "cls", "=", "type", "(", "self", ")", "src_name_attrs", "=", "[", "(", "x", ",", "getattr", "(", "cls", ",", "x", ")", ")", "for", "x", "in", "dir", "(", "cls", ")", "]", "src_params", "=", "list", "(", "filter", "(", "lambda", "nameAttr", ":", "isinstance", "(", "nameAttr", "[", "1", "]", ",", "Param", ")", ",", "src_name_attrs", ")", ")", "for", "name", ",", "param", "in", "src_params", ":", "setattr", "(", "self", ",", "name", ",", "param", ".", "_copy_new_parent", "(", "self", ")", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params.params
Returns all params ordered by name. The default implementation uses :py:func:`dir` to get all attributes of type :py:class:`Param`.
python/pyspark/ml/param/__init__.py
def params(self): """ Returns all params ordered by name. The default implementation uses :py:func:`dir` to get all attributes of type :py:class:`Param`. """ if self._params is None: self._params = list(filter(lambda attr: isinstance(attr, Param), [getattr(self, x) for x in dir(self) if x != "params" and not isinstance(getattr(type(self), x, None), property)])) return self._params
def params(self): """ Returns all params ordered by name. The default implementation uses :py:func:`dir` to get all attributes of type :py:class:`Param`. """ if self._params is None: self._params = list(filter(lambda attr: isinstance(attr, Param), [getattr(self, x) for x in dir(self) if x != "params" and not isinstance(getattr(type(self), x, None), property)])) return self._params
[ "Returns", "all", "params", "ordered", "by", "name", ".", "The", "default", "implementation", "uses", ":", "py", ":", "func", ":", "dir", "to", "get", "all", "attributes", "of", "type", ":", "py", ":", "class", ":", "Param", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L261-L271
[ "def", "params", "(", "self", ")", ":", "if", "self", ".", "_params", "is", "None", ":", "self", ".", "_params", "=", "list", "(", "filter", "(", "lambda", "attr", ":", "isinstance", "(", "attr", ",", "Param", ")", ",", "[", "getattr", "(", "self", ",", "x", ")", "for", "x", "in", "dir", "(", "self", ")", "if", "x", "!=", "\"params\"", "and", "not", "isinstance", "(", "getattr", "(", "type", "(", "self", ")", ",", "x", ",", "None", ")", ",", "property", ")", "]", ")", ")", "return", "self", ".", "_params" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params.explainParam
Explains a single param and returns its name, doc, and optional default value and user-supplied value in a string.
python/pyspark/ml/param/__init__.py
def explainParam(self, param): """ Explains a single param and returns its name, doc, and optional default value and user-supplied value in a string. """ param = self._resolveParam(param) values = [] if self.isDefined(param): if param in self._defaultParamMap: values.append("default: %s" % self._defaultParamMap[param]) if param in self._paramMap: values.append("current: %s" % self._paramMap[param]) else: values.append("undefined") valueStr = "(" + ", ".join(values) + ")" return "%s: %s %s" % (param.name, param.doc, valueStr)
def explainParam(self, param): """ Explains a single param and returns its name, doc, and optional default value and user-supplied value in a string. """ param = self._resolveParam(param) values = [] if self.isDefined(param): if param in self._defaultParamMap: values.append("default: %s" % self._defaultParamMap[param]) if param in self._paramMap: values.append("current: %s" % self._paramMap[param]) else: values.append("undefined") valueStr = "(" + ", ".join(values) + ")" return "%s: %s %s" % (param.name, param.doc, valueStr)
[ "Explains", "a", "single", "param", "and", "returns", "its", "name", "doc", "and", "optional", "default", "value", "and", "user", "-", "supplied", "value", "in", "a", "string", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L273-L288
[ "def", "explainParam", "(", "self", ",", "param", ")", ":", "param", "=", "self", ".", "_resolveParam", "(", "param", ")", "values", "=", "[", "]", "if", "self", ".", "isDefined", "(", "param", ")", ":", "if", "param", "in", "self", ".", "_defaultParamMap", ":", "values", ".", "append", "(", "\"default: %s\"", "%", "self", ".", "_defaultParamMap", "[", "param", "]", ")", "if", "param", "in", "self", ".", "_paramMap", ":", "values", ".", "append", "(", "\"current: %s\"", "%", "self", ".", "_paramMap", "[", "param", "]", ")", "else", ":", "values", ".", "append", "(", "\"undefined\"", ")", "valueStr", "=", "\"(\"", "+", "\", \"", ".", "join", "(", "values", ")", "+", "\")\"", "return", "\"%s: %s %s\"", "%", "(", "param", ".", "name", ",", "param", ".", "doc", ",", "valueStr", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params.getParam
Gets a param by its name.
python/pyspark/ml/param/__init__.py
def getParam(self, paramName): """ Gets a param by its name. """ param = getattr(self, paramName) if isinstance(param, Param): return param else: raise ValueError("Cannot find param with name %s." % paramName)
def getParam(self, paramName): """ Gets a param by its name. """ param = getattr(self, paramName) if isinstance(param, Param): return param else: raise ValueError("Cannot find param with name %s." % paramName)
[ "Gets", "a", "param", "by", "its", "name", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L297-L305
[ "def", "getParam", "(", "self", ",", "paramName", ")", ":", "param", "=", "getattr", "(", "self", ",", "paramName", ")", "if", "isinstance", "(", "param", ",", "Param", ")", ":", "return", "param", "else", ":", "raise", "ValueError", "(", "\"Cannot find param with name %s.\"", "%", "paramName", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params.isSet
Checks whether a param is explicitly set by user.
python/pyspark/ml/param/__init__.py
def isSet(self, param): """ Checks whether a param is explicitly set by user. """ param = self._resolveParam(param) return param in self._paramMap
def isSet(self, param): """ Checks whether a param is explicitly set by user. """ param = self._resolveParam(param) return param in self._paramMap
[ "Checks", "whether", "a", "param", "is", "explicitly", "set", "by", "user", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L307-L312
[ "def", "isSet", "(", "self", ",", "param", ")", ":", "param", "=", "self", ".", "_resolveParam", "(", "param", ")", "return", "param", "in", "self", ".", "_paramMap" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params.hasDefault
Checks whether a param has a default value.
python/pyspark/ml/param/__init__.py
def hasDefault(self, param): """ Checks whether a param has a default value. """ param = self._resolveParam(param) return param in self._defaultParamMap
def hasDefault(self, param): """ Checks whether a param has a default value. """ param = self._resolveParam(param) return param in self._defaultParamMap
[ "Checks", "whether", "a", "param", "has", "a", "default", "value", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L314-L319
[ "def", "hasDefault", "(", "self", ",", "param", ")", ":", "param", "=", "self", ".", "_resolveParam", "(", "param", ")", "return", "param", "in", "self", ".", "_defaultParamMap" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params.hasParam
Tests whether this instance contains a param with a given (string) name.
python/pyspark/ml/param/__init__.py
def hasParam(self, paramName): """ Tests whether this instance contains a param with a given (string) name. """ if isinstance(paramName, basestring): p = getattr(self, paramName, None) return isinstance(p, Param) else: raise TypeError("hasParam(): paramName must be a string")
def hasParam(self, paramName): """ Tests whether this instance contains a param with a given (string) name. """ if isinstance(paramName, basestring): p = getattr(self, paramName, None) return isinstance(p, Param) else: raise TypeError("hasParam(): paramName must be a string")
[ "Tests", "whether", "this", "instance", "contains", "a", "param", "with", "a", "given", "(", "string", ")", "name", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L328-L337
[ "def", "hasParam", "(", "self", ",", "paramName", ")", ":", "if", "isinstance", "(", "paramName", ",", "basestring", ")", ":", "p", "=", "getattr", "(", "self", ",", "paramName", ",", "None", ")", "return", "isinstance", "(", "p", ",", "Param", ")", "else", ":", "raise", "TypeError", "(", "\"hasParam(): paramName must be a string\"", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params.getOrDefault
Gets the value of a param in the user-supplied param map or its default value. Raises an error if neither is set.
python/pyspark/ml/param/__init__.py
def getOrDefault(self, param): """ Gets the value of a param in the user-supplied param map or its default value. Raises an error if neither is set. """ param = self._resolveParam(param) if param in self._paramMap: return self._paramMap[param] else: return self._defaultParamMap[param]
def getOrDefault(self, param): """ Gets the value of a param in the user-supplied param map or its default value. Raises an error if neither is set. """ param = self._resolveParam(param) if param in self._paramMap: return self._paramMap[param] else: return self._defaultParamMap[param]
[ "Gets", "the", "value", "of", "a", "param", "in", "the", "user", "-", "supplied", "param", "map", "or", "its", "default", "value", ".", "Raises", "an", "error", "if", "neither", "is", "set", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L339-L348
[ "def", "getOrDefault", "(", "self", ",", "param", ")", ":", "param", "=", "self", ".", "_resolveParam", "(", "param", ")", "if", "param", "in", "self", ".", "_paramMap", ":", "return", "self", ".", "_paramMap", "[", "param", "]", "else", ":", "return", "self", ".", "_defaultParamMap", "[", "param", "]" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params.extractParamMap
Extracts the embedded default param values and user-supplied values, and then merges them with extra values from input into a flat param map, where the latter value is used if there exist conflicts, i.e., with ordering: default param values < user-supplied values < extra. :param extra: extra param values :return: merged param map
python/pyspark/ml/param/__init__.py
def extractParamMap(self, extra=None): """ Extracts the embedded default param values and user-supplied values, and then merges them with extra values from input into a flat param map, where the latter value is used if there exist conflicts, i.e., with ordering: default param values < user-supplied values < extra. :param extra: extra param values :return: merged param map """ if extra is None: extra = dict() paramMap = self._defaultParamMap.copy() paramMap.update(self._paramMap) paramMap.update(extra) return paramMap
def extractParamMap(self, extra=None): """ Extracts the embedded default param values and user-supplied values, and then merges them with extra values from input into a flat param map, where the latter value is used if there exist conflicts, i.e., with ordering: default param values < user-supplied values < extra. :param extra: extra param values :return: merged param map """ if extra is None: extra = dict() paramMap = self._defaultParamMap.copy() paramMap.update(self._paramMap) paramMap.update(extra) return paramMap
[ "Extracts", "the", "embedded", "default", "param", "values", "and", "user", "-", "supplied", "values", "and", "then", "merges", "them", "with", "extra", "values", "from", "input", "into", "a", "flat", "param", "map", "where", "the", "latter", "value", "is", "used", "if", "there", "exist", "conflicts", "i", ".", "e", ".", "with", "ordering", ":", "default", "param", "values", "<", "user", "-", "supplied", "values", "<", "extra", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L350-L366
[ "def", "extractParamMap", "(", "self", ",", "extra", "=", "None", ")", ":", "if", "extra", "is", "None", ":", "extra", "=", "dict", "(", ")", "paramMap", "=", "self", ".", "_defaultParamMap", ".", "copy", "(", ")", "paramMap", ".", "update", "(", "self", ".", "_paramMap", ")", "paramMap", ".", "update", "(", "extra", ")", "return", "paramMap" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params.copy
Creates a copy of this instance with the same uid and some extra params. The default implementation creates a shallow copy using :py:func:`copy.copy`, and then copies the embedded and extra parameters over and returns the copy. Subclasses should override this method if the default approach is not sufficient. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance
python/pyspark/ml/param/__init__.py
def copy(self, extra=None): """ Creates a copy of this instance with the same uid and some extra params. The default implementation creates a shallow copy using :py:func:`copy.copy`, and then copies the embedded and extra parameters over and returns the copy. Subclasses should override this method if the default approach is not sufficient. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """ if extra is None: extra = dict() that = copy.copy(self) that._paramMap = {} that._defaultParamMap = {} return self._copyValues(that, extra)
def copy(self, extra=None): """ Creates a copy of this instance with the same uid and some extra params. The default implementation creates a shallow copy using :py:func:`copy.copy`, and then copies the embedded and extra parameters over and returns the copy. Subclasses should override this method if the default approach is not sufficient. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """ if extra is None: extra = dict() that = copy.copy(self) that._paramMap = {} that._defaultParamMap = {} return self._copyValues(that, extra)
[ "Creates", "a", "copy", "of", "this", "instance", "with", "the", "same", "uid", "and", "some", "extra", "params", ".", "The", "default", "implementation", "creates", "a", "shallow", "copy", "using", ":", "py", ":", "func", ":", "copy", ".", "copy", "and", "then", "copies", "the", "embedded", "and", "extra", "parameters", "over", "and", "returns", "the", "copy", ".", "Subclasses", "should", "override", "this", "method", "if", "the", "default", "approach", "is", "not", "sufficient", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L368-L385
[ "def", "copy", "(", "self", ",", "extra", "=", "None", ")", ":", "if", "extra", "is", "None", ":", "extra", "=", "dict", "(", ")", "that", "=", "copy", ".", "copy", "(", "self", ")", "that", ".", "_paramMap", "=", "{", "}", "that", ".", "_defaultParamMap", "=", "{", "}", "return", "self", ".", "_copyValues", "(", "that", ",", "extra", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params.set
Sets a parameter in the embedded param map.
python/pyspark/ml/param/__init__.py
def set(self, param, value): """ Sets a parameter in the embedded param map. """ self._shouldOwn(param) try: value = param.typeConverter(value) except ValueError as e: raise ValueError('Invalid param value given for param "%s". %s' % (param.name, e)) self._paramMap[param] = value
def set(self, param, value): """ Sets a parameter in the embedded param map. """ self._shouldOwn(param) try: value = param.typeConverter(value) except ValueError as e: raise ValueError('Invalid param value given for param "%s". %s' % (param.name, e)) self._paramMap[param] = value
[ "Sets", "a", "parameter", "in", "the", "embedded", "param", "map", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L387-L396
[ "def", "set", "(", "self", ",", "param", ",", "value", ")", ":", "self", ".", "_shouldOwn", "(", "param", ")", "try", ":", "value", "=", "param", ".", "typeConverter", "(", "value", ")", "except", "ValueError", "as", "e", ":", "raise", "ValueError", "(", "'Invalid param value given for param \"%s\". %s'", "%", "(", "param", ".", "name", ",", "e", ")", ")", "self", ".", "_paramMap", "[", "param", "]", "=", "value" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params._shouldOwn
Validates that the input param belongs to this Params instance.
python/pyspark/ml/param/__init__.py
def _shouldOwn(self, param): """ Validates that the input param belongs to this Params instance. """ if not (self.uid == param.parent and self.hasParam(param.name)): raise ValueError("Param %r does not belong to %r." % (param, self))
def _shouldOwn(self, param): """ Validates that the input param belongs to this Params instance. """ if not (self.uid == param.parent and self.hasParam(param.name)): raise ValueError("Param %r does not belong to %r." % (param, self))
[ "Validates", "that", "the", "input", "param", "belongs", "to", "this", "Params", "instance", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L398-L403
[ "def", "_shouldOwn", "(", "self", ",", "param", ")", ":", "if", "not", "(", "self", ".", "uid", "==", "param", ".", "parent", "and", "self", ".", "hasParam", "(", "param", ".", "name", ")", ")", ":", "raise", "ValueError", "(", "\"Param %r does not belong to %r.\"", "%", "(", "param", ",", "self", ")", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params._resolveParam
Resolves a param and validates the ownership. :param param: param name or the param instance, which must belong to this Params instance :return: resolved param instance
python/pyspark/ml/param/__init__.py
def _resolveParam(self, param): """ Resolves a param and validates the ownership. :param param: param name or the param instance, which must belong to this Params instance :return: resolved param instance """ if isinstance(param, Param): self._shouldOwn(param) return param elif isinstance(param, basestring): return self.getParam(param) else: raise ValueError("Cannot resolve %r as a param." % param)
def _resolveParam(self, param): """ Resolves a param and validates the ownership. :param param: param name or the param instance, which must belong to this Params instance :return: resolved param instance """ if isinstance(param, Param): self._shouldOwn(param) return param elif isinstance(param, basestring): return self.getParam(param) else: raise ValueError("Cannot resolve %r as a param." % param)
[ "Resolves", "a", "param", "and", "validates", "the", "ownership", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L405-L419
[ "def", "_resolveParam", "(", "self", ",", "param", ")", ":", "if", "isinstance", "(", "param", ",", "Param", ")", ":", "self", ".", "_shouldOwn", "(", "param", ")", "return", "param", "elif", "isinstance", "(", "param", ",", "basestring", ")", ":", "return", "self", ".", "getParam", "(", "param", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot resolve %r as a param.\"", "%", "param", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params._set
Sets user-supplied params.
python/pyspark/ml/param/__init__.py
def _set(self, **kwargs): """ Sets user-supplied params. """ for param, value in kwargs.items(): p = getattr(self, param) if value is not None: try: value = p.typeConverter(value) except TypeError as e: raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e)) self._paramMap[p] = value return self
def _set(self, **kwargs): """ Sets user-supplied params. """ for param, value in kwargs.items(): p = getattr(self, param) if value is not None: try: value = p.typeConverter(value) except TypeError as e: raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e)) self._paramMap[p] = value return self
[ "Sets", "user", "-", "supplied", "params", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L431-L443
[ "def", "_set", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "param", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "p", "=", "getattr", "(", "self", ",", "param", ")", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "p", ".", "typeConverter", "(", "value", ")", "except", "TypeError", "as", "e", ":", "raise", "TypeError", "(", "'Invalid param value given for param \"%s\". %s'", "%", "(", "p", ".", "name", ",", "e", ")", ")", "self", ".", "_paramMap", "[", "p", "]", "=", "value", "return", "self" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params._setDefault
Sets default params.
python/pyspark/ml/param/__init__.py
def _setDefault(self, **kwargs): """ Sets default params. """ for param, value in kwargs.items(): p = getattr(self, param) if value is not None and not isinstance(value, JavaObject): try: value = p.typeConverter(value) except TypeError as e: raise TypeError('Invalid default param value given for param "%s". %s' % (p.name, e)) self._defaultParamMap[p] = value return self
def _setDefault(self, **kwargs): """ Sets default params. """ for param, value in kwargs.items(): p = getattr(self, param) if value is not None and not isinstance(value, JavaObject): try: value = p.typeConverter(value) except TypeError as e: raise TypeError('Invalid default param value given for param "%s". %s' % (p.name, e)) self._defaultParamMap[p] = value return self
[ "Sets", "default", "params", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L452-L465
[ "def", "_setDefault", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "param", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "p", "=", "getattr", "(", "self", ",", "param", ")", "if", "value", "is", "not", "None", "and", "not", "isinstance", "(", "value", ",", "JavaObject", ")", ":", "try", ":", "value", "=", "p", ".", "typeConverter", "(", "value", ")", "except", "TypeError", "as", "e", ":", "raise", "TypeError", "(", "'Invalid default param value given for param \"%s\". %s'", "%", "(", "p", ".", "name", ",", "e", ")", ")", "self", ".", "_defaultParamMap", "[", "p", "]", "=", "value", "return", "self" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params._copyValues
Copies param values from this instance to another instance for params shared by them. :param to: the target instance :param extra: extra params to be copied :return: the target instance with param values copied
python/pyspark/ml/param/__init__.py
def _copyValues(self, to, extra=None): """ Copies param values from this instance to another instance for params shared by them. :param to: the target instance :param extra: extra params to be copied :return: the target instance with param values copied """ paramMap = self._paramMap.copy() if extra is not None: paramMap.update(extra) for param in self.params: # copy default params if param in self._defaultParamMap and to.hasParam(param.name): to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param] # copy explicitly set params if param in paramMap and to.hasParam(param.name): to._set(**{param.name: paramMap[param]}) return to
def _copyValues(self, to, extra=None): """ Copies param values from this instance to another instance for params shared by them. :param to: the target instance :param extra: extra params to be copied :return: the target instance with param values copied """ paramMap = self._paramMap.copy() if extra is not None: paramMap.update(extra) for param in self.params: # copy default params if param in self._defaultParamMap and to.hasParam(param.name): to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param] # copy explicitly set params if param in paramMap and to.hasParam(param.name): to._set(**{param.name: paramMap[param]}) return to
[ "Copies", "param", "values", "from", "this", "instance", "to", "another", "instance", "for", "params", "shared", "by", "them", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L467-L486
[ "def", "_copyValues", "(", "self", ",", "to", ",", "extra", "=", "None", ")", ":", "paramMap", "=", "self", ".", "_paramMap", ".", "copy", "(", ")", "if", "extra", "is", "not", "None", ":", "paramMap", ".", "update", "(", "extra", ")", "for", "param", "in", "self", ".", "params", ":", "# copy default params", "if", "param", "in", "self", ".", "_defaultParamMap", "and", "to", ".", "hasParam", "(", "param", ".", "name", ")", ":", "to", ".", "_defaultParamMap", "[", "to", ".", "getParam", "(", "param", ".", "name", ")", "]", "=", "self", ".", "_defaultParamMap", "[", "param", "]", "# copy explicitly set params", "if", "param", "in", "paramMap", "and", "to", ".", "hasParam", "(", "param", ".", "name", ")", ":", "to", ".", "_set", "(", "*", "*", "{", "param", ".", "name", ":", "paramMap", "[", "param", "]", "}", ")", "return", "to" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Params._resetUid
Changes the uid of this instance. This updates both the stored uid and the parent uid of params and param maps. This is used by persistence (loading). :param newUid: new uid to use, which is converted to unicode :return: same instance, but with the uid and Param.parent values updated, including within param maps
python/pyspark/ml/param/__init__.py
def _resetUid(self, newUid): """ Changes the uid of this instance. This updates both the stored uid and the parent uid of params and param maps. This is used by persistence (loading). :param newUid: new uid to use, which is converted to unicode :return: same instance, but with the uid and Param.parent values updated, including within param maps """ newUid = unicode(newUid) self.uid = newUid newDefaultParamMap = dict() newParamMap = dict() for param in self.params: newParam = copy.copy(param) newParam.parent = newUid if param in self._defaultParamMap: newDefaultParamMap[newParam] = self._defaultParamMap[param] if param in self._paramMap: newParamMap[newParam] = self._paramMap[param] param.parent = newUid self._defaultParamMap = newDefaultParamMap self._paramMap = newParamMap return self
def _resetUid(self, newUid): """ Changes the uid of this instance. This updates both the stored uid and the parent uid of params and param maps. This is used by persistence (loading). :param newUid: new uid to use, which is converted to unicode :return: same instance, but with the uid and Param.parent values updated, including within param maps """ newUid = unicode(newUid) self.uid = newUid newDefaultParamMap = dict() newParamMap = dict() for param in self.params: newParam = copy.copy(param) newParam.parent = newUid if param in self._defaultParamMap: newDefaultParamMap[newParam] = self._defaultParamMap[param] if param in self._paramMap: newParamMap[newParam] = self._paramMap[param] param.parent = newUid self._defaultParamMap = newDefaultParamMap self._paramMap = newParamMap return self
[ "Changes", "the", "uid", "of", "this", "instance", ".", "This", "updates", "both", "the", "stored", "uid", "and", "the", "parent", "uid", "of", "params", "and", "param", "maps", ".", "This", "is", "used", "by", "persistence", "(", "loading", ")", ".", ":", "param", "newUid", ":", "new", "uid", "to", "use", "which", "is", "converted", "to", "unicode", ":", "return", ":", "same", "instance", "but", "with", "the", "uid", "and", "Param", ".", "parent", "values", "updated", "including", "within", "param", "maps" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L488-L511
[ "def", "_resetUid", "(", "self", ",", "newUid", ")", ":", "newUid", "=", "unicode", "(", "newUid", ")", "self", ".", "uid", "=", "newUid", "newDefaultParamMap", "=", "dict", "(", ")", "newParamMap", "=", "dict", "(", ")", "for", "param", "in", "self", ".", "params", ":", "newParam", "=", "copy", ".", "copy", "(", "param", ")", "newParam", ".", "parent", "=", "newUid", "if", "param", "in", "self", ".", "_defaultParamMap", ":", "newDefaultParamMap", "[", "newParam", "]", "=", "self", ".", "_defaultParamMap", "[", "param", "]", "if", "param", "in", "self", ".", "_paramMap", ":", "newParamMap", "[", "newParam", "]", "=", "self", ".", "_paramMap", "[", "param", "]", "param", ".", "parent", "=", "newUid", "self", ".", "_defaultParamMap", "=", "newDefaultParamMap", "self", ".", "_paramMap", "=", "newParamMap", "return", "self" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
_to_java_object_rdd
Return an JavaRDD of Object by unpickling It will convert each Python object into Java object by Pyrolite, whenever the RDD is serialized in batch or not.
python/pyspark/ml/common.py
def _to_java_object_rdd(rdd): """ Return an JavaRDD of Object by unpickling It will convert each Python object into Java object by Pyrolite, whenever the RDD is serialized in batch or not. """ rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer())) return rdd.ctx._jvm.org.apache.spark.ml.python.MLSerDe.pythonToJava(rdd._jrdd, True)
def _to_java_object_rdd(rdd): """ Return an JavaRDD of Object by unpickling It will convert each Python object into Java object by Pyrolite, whenever the RDD is serialized in batch or not. """ rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer())) return rdd.ctx._jvm.org.apache.spark.ml.python.MLSerDe.pythonToJava(rdd._jrdd, True)
[ "Return", "an", "JavaRDD", "of", "Object", "by", "unpickling" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/common.py#L60-L67
[ "def", "_to_java_object_rdd", "(", "rdd", ")", ":", "rdd", "=", "rdd", ".", "_reserialize", "(", "AutoBatchedSerializer", "(", "PickleSerializer", "(", ")", ")", ")", "return", "rdd", ".", "ctx", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "ml", ".", "python", ".", "MLSerDe", ".", "pythonToJava", "(", "rdd", ".", "_jrdd", ",", "True", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Broadcast.value
Return the broadcasted value
python/pyspark/broadcast.py
def value(self): """ Return the broadcasted value """ if not hasattr(self, "_value") and self._path is not None: # we only need to decrypt it here when encryption is enabled and # if its on the driver, since executor decryption is handled already if self._sc is not None and self._sc._encryption_enabled: port, auth_secret = self._python_broadcast.setupDecryptionServer() (decrypted_sock_file, _) = local_connect_and_auth(port, auth_secret) self._python_broadcast.waitTillBroadcastDataSent() return self.load(decrypted_sock_file) else: self._value = self.load_from_path(self._path) return self._value
def value(self): """ Return the broadcasted value """ if not hasattr(self, "_value") and self._path is not None: # we only need to decrypt it here when encryption is enabled and # if its on the driver, since executor decryption is handled already if self._sc is not None and self._sc._encryption_enabled: port, auth_secret = self._python_broadcast.setupDecryptionServer() (decrypted_sock_file, _) = local_connect_and_auth(port, auth_secret) self._python_broadcast.waitTillBroadcastDataSent() return self.load(decrypted_sock_file) else: self._value = self.load_from_path(self._path) return self._value
[ "Return", "the", "broadcasted", "value" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/broadcast.py#L135-L148
[ "def", "value", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"_value\"", ")", "and", "self", ".", "_path", "is", "not", "None", ":", "# we only need to decrypt it here when encryption is enabled and", "# if its on the driver, since executor decryption is handled already", "if", "self", ".", "_sc", "is", "not", "None", "and", "self", ".", "_sc", ".", "_encryption_enabled", ":", "port", ",", "auth_secret", "=", "self", ".", "_python_broadcast", ".", "setupDecryptionServer", "(", ")", "(", "decrypted_sock_file", ",", "_", ")", "=", "local_connect_and_auth", "(", "port", ",", "auth_secret", ")", "self", ".", "_python_broadcast", ".", "waitTillBroadcastDataSent", "(", ")", "return", "self", ".", "load", "(", "decrypted_sock_file", ")", "else", ":", "self", ".", "_value", "=", "self", ".", "load_from_path", "(", "self", ".", "_path", ")", "return", "self", ".", "_value" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Broadcast.unpersist
Delete cached copies of this broadcast on the executors. If the broadcast is used after this is called, it will need to be re-sent to each executor. :param blocking: Whether to block until unpersisting has completed
python/pyspark/broadcast.py
def unpersist(self, blocking=False): """ Delete cached copies of this broadcast on the executors. If the broadcast is used after this is called, it will need to be re-sent to each executor. :param blocking: Whether to block until unpersisting has completed """ if self._jbroadcast is None: raise Exception("Broadcast can only be unpersisted in driver") self._jbroadcast.unpersist(blocking)
def unpersist(self, blocking=False): """ Delete cached copies of this broadcast on the executors. If the broadcast is used after this is called, it will need to be re-sent to each executor. :param blocking: Whether to block until unpersisting has completed """ if self._jbroadcast is None: raise Exception("Broadcast can only be unpersisted in driver") self._jbroadcast.unpersist(blocking)
[ "Delete", "cached", "copies", "of", "this", "broadcast", "on", "the", "executors", ".", "If", "the", "broadcast", "is", "used", "after", "this", "is", "called", "it", "will", "need", "to", "be", "re", "-", "sent", "to", "each", "executor", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/broadcast.py#L150-L160
[ "def", "unpersist", "(", "self", ",", "blocking", "=", "False", ")", ":", "if", "self", ".", "_jbroadcast", "is", "None", ":", "raise", "Exception", "(", "\"Broadcast can only be unpersisted in driver\"", ")", "self", ".", "_jbroadcast", ".", "unpersist", "(", "blocking", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
Broadcast.destroy
Destroy all data and metadata related to this broadcast variable. Use this with caution; once a broadcast variable has been destroyed, it cannot be used again. .. versionchanged:: 3.0.0 Added optional argument `blocking` to specify whether to block until all blocks are deleted.
python/pyspark/broadcast.py
def destroy(self, blocking=False): """ Destroy all data and metadata related to this broadcast variable. Use this with caution; once a broadcast variable has been destroyed, it cannot be used again. .. versionchanged:: 3.0.0 Added optional argument `blocking` to specify whether to block until all blocks are deleted. """ if self._jbroadcast is None: raise Exception("Broadcast can only be destroyed in driver") self._jbroadcast.destroy(blocking) os.unlink(self._path)
def destroy(self, blocking=False): """ Destroy all data and metadata related to this broadcast variable. Use this with caution; once a broadcast variable has been destroyed, it cannot be used again. .. versionchanged:: 3.0.0 Added optional argument `blocking` to specify whether to block until all blocks are deleted. """ if self._jbroadcast is None: raise Exception("Broadcast can only be destroyed in driver") self._jbroadcast.destroy(blocking) os.unlink(self._path)
[ "Destroy", "all", "data", "and", "metadata", "related", "to", "this", "broadcast", "variable", ".", "Use", "this", "with", "caution", ";", "once", "a", "broadcast", "variable", "has", "been", "destroyed", "it", "cannot", "be", "used", "again", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/broadcast.py#L162-L175
[ "def", "destroy", "(", "self", ",", "blocking", "=", "False", ")", ":", "if", "self", ".", "_jbroadcast", "is", "None", ":", "raise", "Exception", "(", "\"Broadcast can only be destroyed in driver\"", ")", "self", ".", "_jbroadcast", ".", "destroy", "(", "blocking", ")", "os", ".", "unlink", "(", "self", ".", "_path", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
UserDefinedFunction._wrapped
Wrap this udf with a function and attach docstring from func
python/pyspark/sql/udf.py
def _wrapped(self): """ Wrap this udf with a function and attach docstring from func """ # It is possible for a callable instance without __name__ attribute or/and # __module__ attribute to be wrapped here. For example, functools.partial. In this case, # we should avoid wrapping the attributes from the wrapped function to the wrapper # function. So, we take out these attribute names from the default names to set and # then manually assign it after being wrapped. assignments = tuple( a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__') @functools.wraps(self.func, assigned=assignments) def wrapper(*args): return self(*args) wrapper.__name__ = self._name wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__') else self.func.__class__.__module__) wrapper.func = self.func wrapper.returnType = self.returnType wrapper.evalType = self.evalType wrapper.deterministic = self.deterministic wrapper.asNondeterministic = functools.wraps( self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped()) return wrapper
def _wrapped(self): """ Wrap this udf with a function and attach docstring from func """ # It is possible for a callable instance without __name__ attribute or/and # __module__ attribute to be wrapped here. For example, functools.partial. In this case, # we should avoid wrapping the attributes from the wrapped function to the wrapper # function. So, we take out these attribute names from the default names to set and # then manually assign it after being wrapped. assignments = tuple( a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__') @functools.wraps(self.func, assigned=assignments) def wrapper(*args): return self(*args) wrapper.__name__ = self._name wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__') else self.func.__class__.__module__) wrapper.func = self.func wrapper.returnType = self.returnType wrapper.evalType = self.evalType wrapper.deterministic = self.deterministic wrapper.asNondeterministic = functools.wraps( self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped()) return wrapper
[ "Wrap", "this", "udf", "with", "a", "function", "and", "attach", "docstring", "from", "func" ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/udf.py#L177-L204
[ "def", "_wrapped", "(", "self", ")", ":", "# It is possible for a callable instance without __name__ attribute or/and", "# __module__ attribute to be wrapped here. For example, functools.partial. In this case,", "# we should avoid wrapping the attributes from the wrapped function to the wrapper", "# function. So, we take out these attribute names from the default names to set and", "# then manually assign it after being wrapped.", "assignments", "=", "tuple", "(", "a", "for", "a", "in", "functools", ".", "WRAPPER_ASSIGNMENTS", "if", "a", "!=", "'__name__'", "and", "a", "!=", "'__module__'", ")", "@", "functools", ".", "wraps", "(", "self", ".", "func", ",", "assigned", "=", "assignments", ")", "def", "wrapper", "(", "*", "args", ")", ":", "return", "self", "(", "*", "args", ")", "wrapper", ".", "__name__", "=", "self", ".", "_name", "wrapper", ".", "__module__", "=", "(", "self", ".", "func", ".", "__module__", "if", "hasattr", "(", "self", ".", "func", ",", "'__module__'", ")", "else", "self", ".", "func", ".", "__class__", ".", "__module__", ")", "wrapper", ".", "func", "=", "self", ".", "func", "wrapper", ".", "returnType", "=", "self", ".", "returnType", "wrapper", ".", "evalType", "=", "self", ".", "evalType", "wrapper", ".", "deterministic", "=", "self", ".", "deterministic", "wrapper", ".", "asNondeterministic", "=", "functools", ".", "wraps", "(", "self", ".", "asNondeterministic", ")", "(", "lambda", ":", "self", ".", "asNondeterministic", "(", ")", ".", "_wrapped", "(", ")", ")", "return", "wrapper" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
UDFRegistration.register
Register a Python function (including lambda function) or a user-defined function as a SQL function. :param name: name of the user-defined function in SQL statements. :param f: a Python function, or a user-defined function. The user-defined function can be either row-at-a-time or vectorized. See :meth:`pyspark.sql.functions.udf` and :meth:`pyspark.sql.functions.pandas_udf`. :param returnType: the return type of the registered user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. :return: a user-defined function. To register a nondeterministic Python function, users need to first build a nondeterministic user-defined function for the Python function and then register it as a SQL function. `returnType` can be optionally specified when `f` is a Python function but not when `f` is a user-defined function. Please see below. 1. When `f` is a Python function: `returnType` defaults to string type and can be optionally specified. The produced object must match the specified type. In this case, this API works as if `register(name, f, returnType=StringType())`. >>> strlen = spark.udf.register("stringLengthString", lambda x: len(x)) >>> spark.sql("SELECT stringLengthString('test')").collect() [Row(stringLengthString(test)=u'4')] >>> spark.sql("SELECT 'foo' AS text").select(strlen("text")).collect() [Row(stringLengthString(text)=u'3')] >>> from pyspark.sql.types import IntegerType >>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType()) >>> spark.sql("SELECT stringLengthInt('test')").collect() [Row(stringLengthInt(test)=4)] >>> from pyspark.sql.types import IntegerType >>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType()) >>> spark.sql("SELECT stringLengthInt('test')").collect() [Row(stringLengthInt(test)=4)] 2. When `f` is a user-defined function: Spark uses the return type of the given user-defined function as the return type of the registered user-defined function. `returnType` should not be specified. In this case, this API works as if `register(name, f)`. >>> from pyspark.sql.types import IntegerType >>> from pyspark.sql.functions import udf >>> slen = udf(lambda s: len(s), IntegerType()) >>> _ = spark.udf.register("slen", slen) >>> spark.sql("SELECT slen('test')").collect() [Row(slen(test)=4)] >>> import random >>> from pyspark.sql.functions import udf >>> from pyspark.sql.types import IntegerType >>> random_udf = udf(lambda: random.randint(0, 100), IntegerType()).asNondeterministic() >>> new_random_udf = spark.udf.register("random_udf", random_udf) >>> spark.sql("SELECT random_udf()").collect() # doctest: +SKIP [Row(random_udf()=82)] >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP ... def add_one(x): ... return x + 1 ... >>> _ = spark.udf.register("add_one", add_one) # doctest: +SKIP >>> spark.sql("SELECT add_one(id) FROM range(3)").collect() # doctest: +SKIP [Row(add_one(id)=1), Row(add_one(id)=2), Row(add_one(id)=3)] >>> @pandas_udf("integer", PandasUDFType.GROUPED_AGG) # doctest: +SKIP ... def sum_udf(v): ... return v.sum() ... >>> _ = spark.udf.register("sum_udf", sum_udf) # doctest: +SKIP >>> q = "SELECT sum_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2" >>> spark.sql(q).collect() # doctest: +SKIP [Row(sum_udf(v1)=1), Row(sum_udf(v1)=5)] .. note:: Registration for a user-defined function (case 2.) was added from Spark 2.3.0.
python/pyspark/sql/udf.py
def register(self, name, f, returnType=None): """Register a Python function (including lambda function) or a user-defined function as a SQL function. :param name: name of the user-defined function in SQL statements. :param f: a Python function, or a user-defined function. The user-defined function can be either row-at-a-time or vectorized. See :meth:`pyspark.sql.functions.udf` and :meth:`pyspark.sql.functions.pandas_udf`. :param returnType: the return type of the registered user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. :return: a user-defined function. To register a nondeterministic Python function, users need to first build a nondeterministic user-defined function for the Python function and then register it as a SQL function. `returnType` can be optionally specified when `f` is a Python function but not when `f` is a user-defined function. Please see below. 1. When `f` is a Python function: `returnType` defaults to string type and can be optionally specified. The produced object must match the specified type. In this case, this API works as if `register(name, f, returnType=StringType())`. >>> strlen = spark.udf.register("stringLengthString", lambda x: len(x)) >>> spark.sql("SELECT stringLengthString('test')").collect() [Row(stringLengthString(test)=u'4')] >>> spark.sql("SELECT 'foo' AS text").select(strlen("text")).collect() [Row(stringLengthString(text)=u'3')] >>> from pyspark.sql.types import IntegerType >>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType()) >>> spark.sql("SELECT stringLengthInt('test')").collect() [Row(stringLengthInt(test)=4)] >>> from pyspark.sql.types import IntegerType >>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType()) >>> spark.sql("SELECT stringLengthInt('test')").collect() [Row(stringLengthInt(test)=4)] 2. When `f` is a user-defined function: Spark uses the return type of the given user-defined function as the return type of the registered user-defined function. `returnType` should not be specified. In this case, this API works as if `register(name, f)`. >>> from pyspark.sql.types import IntegerType >>> from pyspark.sql.functions import udf >>> slen = udf(lambda s: len(s), IntegerType()) >>> _ = spark.udf.register("slen", slen) >>> spark.sql("SELECT slen('test')").collect() [Row(slen(test)=4)] >>> import random >>> from pyspark.sql.functions import udf >>> from pyspark.sql.types import IntegerType >>> random_udf = udf(lambda: random.randint(0, 100), IntegerType()).asNondeterministic() >>> new_random_udf = spark.udf.register("random_udf", random_udf) >>> spark.sql("SELECT random_udf()").collect() # doctest: +SKIP [Row(random_udf()=82)] >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP ... def add_one(x): ... return x + 1 ... >>> _ = spark.udf.register("add_one", add_one) # doctest: +SKIP >>> spark.sql("SELECT add_one(id) FROM range(3)").collect() # doctest: +SKIP [Row(add_one(id)=1), Row(add_one(id)=2), Row(add_one(id)=3)] >>> @pandas_udf("integer", PandasUDFType.GROUPED_AGG) # doctest: +SKIP ... def sum_udf(v): ... return v.sum() ... >>> _ = spark.udf.register("sum_udf", sum_udf) # doctest: +SKIP >>> q = "SELECT sum_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2" >>> spark.sql(q).collect() # doctest: +SKIP [Row(sum_udf(v1)=1), Row(sum_udf(v1)=5)] .. note:: Registration for a user-defined function (case 2.) was added from Spark 2.3.0. """ # This is to check whether the input function is from a user-defined function or # Python function. if hasattr(f, 'asNondeterministic'): if returnType is not None: raise TypeError( "Invalid returnType: data type can not be specified when f is" "a user-defined function, but got %s." % returnType) if f.evalType not in [PythonEvalType.SQL_BATCHED_UDF, PythonEvalType.SQL_SCALAR_PANDAS_UDF, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]: raise ValueError( "Invalid f: f must be SQL_BATCHED_UDF, SQL_SCALAR_PANDAS_UDF or " "SQL_GROUPED_AGG_PANDAS_UDF") register_udf = UserDefinedFunction(f.func, returnType=f.returnType, name=name, evalType=f.evalType, deterministic=f.deterministic) return_udf = f else: if returnType is None: returnType = StringType() register_udf = UserDefinedFunction(f, returnType=returnType, name=name, evalType=PythonEvalType.SQL_BATCHED_UDF) return_udf = register_udf._wrapped() self.sparkSession._jsparkSession.udf().registerPython(name, register_udf._judf) return return_udf
def register(self, name, f, returnType=None): """Register a Python function (including lambda function) or a user-defined function as a SQL function. :param name: name of the user-defined function in SQL statements. :param f: a Python function, or a user-defined function. The user-defined function can be either row-at-a-time or vectorized. See :meth:`pyspark.sql.functions.udf` and :meth:`pyspark.sql.functions.pandas_udf`. :param returnType: the return type of the registered user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. :return: a user-defined function. To register a nondeterministic Python function, users need to first build a nondeterministic user-defined function for the Python function and then register it as a SQL function. `returnType` can be optionally specified when `f` is a Python function but not when `f` is a user-defined function. Please see below. 1. When `f` is a Python function: `returnType` defaults to string type and can be optionally specified. The produced object must match the specified type. In this case, this API works as if `register(name, f, returnType=StringType())`. >>> strlen = spark.udf.register("stringLengthString", lambda x: len(x)) >>> spark.sql("SELECT stringLengthString('test')").collect() [Row(stringLengthString(test)=u'4')] >>> spark.sql("SELECT 'foo' AS text").select(strlen("text")).collect() [Row(stringLengthString(text)=u'3')] >>> from pyspark.sql.types import IntegerType >>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType()) >>> spark.sql("SELECT stringLengthInt('test')").collect() [Row(stringLengthInt(test)=4)] >>> from pyspark.sql.types import IntegerType >>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType()) >>> spark.sql("SELECT stringLengthInt('test')").collect() [Row(stringLengthInt(test)=4)] 2. When `f` is a user-defined function: Spark uses the return type of the given user-defined function as the return type of the registered user-defined function. `returnType` should not be specified. In this case, this API works as if `register(name, f)`. >>> from pyspark.sql.types import IntegerType >>> from pyspark.sql.functions import udf >>> slen = udf(lambda s: len(s), IntegerType()) >>> _ = spark.udf.register("slen", slen) >>> spark.sql("SELECT slen('test')").collect() [Row(slen(test)=4)] >>> import random >>> from pyspark.sql.functions import udf >>> from pyspark.sql.types import IntegerType >>> random_udf = udf(lambda: random.randint(0, 100), IntegerType()).asNondeterministic() >>> new_random_udf = spark.udf.register("random_udf", random_udf) >>> spark.sql("SELECT random_udf()").collect() # doctest: +SKIP [Row(random_udf()=82)] >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP ... def add_one(x): ... return x + 1 ... >>> _ = spark.udf.register("add_one", add_one) # doctest: +SKIP >>> spark.sql("SELECT add_one(id) FROM range(3)").collect() # doctest: +SKIP [Row(add_one(id)=1), Row(add_one(id)=2), Row(add_one(id)=3)] >>> @pandas_udf("integer", PandasUDFType.GROUPED_AGG) # doctest: +SKIP ... def sum_udf(v): ... return v.sum() ... >>> _ = spark.udf.register("sum_udf", sum_udf) # doctest: +SKIP >>> q = "SELECT sum_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2" >>> spark.sql(q).collect() # doctest: +SKIP [Row(sum_udf(v1)=1), Row(sum_udf(v1)=5)] .. note:: Registration for a user-defined function (case 2.) was added from Spark 2.3.0. """ # This is to check whether the input function is from a user-defined function or # Python function. if hasattr(f, 'asNondeterministic'): if returnType is not None: raise TypeError( "Invalid returnType: data type can not be specified when f is" "a user-defined function, but got %s." % returnType) if f.evalType not in [PythonEvalType.SQL_BATCHED_UDF, PythonEvalType.SQL_SCALAR_PANDAS_UDF, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]: raise ValueError( "Invalid f: f must be SQL_BATCHED_UDF, SQL_SCALAR_PANDAS_UDF or " "SQL_GROUPED_AGG_PANDAS_UDF") register_udf = UserDefinedFunction(f.func, returnType=f.returnType, name=name, evalType=f.evalType, deterministic=f.deterministic) return_udf = f else: if returnType is None: returnType = StringType() register_udf = UserDefinedFunction(f, returnType=returnType, name=name, evalType=PythonEvalType.SQL_BATCHED_UDF) return_udf = register_udf._wrapped() self.sparkSession._jsparkSession.udf().registerPython(name, register_udf._judf) return return_udf
[ "Register", "a", "Python", "function", "(", "including", "lambda", "function", ")", "or", "a", "user", "-", "defined", "function", "as", "a", "SQL", "function", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/udf.py#L232-L341
[ "def", "register", "(", "self", ",", "name", ",", "f", ",", "returnType", "=", "None", ")", ":", "# This is to check whether the input function is from a user-defined function or", "# Python function.", "if", "hasattr", "(", "f", ",", "'asNondeterministic'", ")", ":", "if", "returnType", "is", "not", "None", ":", "raise", "TypeError", "(", "\"Invalid returnType: data type can not be specified when f is\"", "\"a user-defined function, but got %s.\"", "%", "returnType", ")", "if", "f", ".", "evalType", "not", "in", "[", "PythonEvalType", ".", "SQL_BATCHED_UDF", ",", "PythonEvalType", ".", "SQL_SCALAR_PANDAS_UDF", ",", "PythonEvalType", ".", "SQL_GROUPED_AGG_PANDAS_UDF", "]", ":", "raise", "ValueError", "(", "\"Invalid f: f must be SQL_BATCHED_UDF, SQL_SCALAR_PANDAS_UDF or \"", "\"SQL_GROUPED_AGG_PANDAS_UDF\"", ")", "register_udf", "=", "UserDefinedFunction", "(", "f", ".", "func", ",", "returnType", "=", "f", ".", "returnType", ",", "name", "=", "name", ",", "evalType", "=", "f", ".", "evalType", ",", "deterministic", "=", "f", ".", "deterministic", ")", "return_udf", "=", "f", "else", ":", "if", "returnType", "is", "None", ":", "returnType", "=", "StringType", "(", ")", "register_udf", "=", "UserDefinedFunction", "(", "f", ",", "returnType", "=", "returnType", ",", "name", "=", "name", ",", "evalType", "=", "PythonEvalType", ".", "SQL_BATCHED_UDF", ")", "return_udf", "=", "register_udf", ".", "_wrapped", "(", ")", "self", ".", "sparkSession", ".", "_jsparkSession", ".", "udf", "(", ")", ".", "registerPython", "(", "name", ",", "register_udf", ".", "_judf", ")", "return", "return_udf" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
UDFRegistration.registerJavaFunction
Register a Java user-defined function as a SQL function. In addition to a name and the function itself, the return type can be optionally specified. When the return type is not specified we would infer it via reflection. :param name: name of the user-defined function :param javaClassName: fully qualified name of java class :param returnType: the return type of the registered Java function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. >>> from pyspark.sql.types import IntegerType >>> spark.udf.registerJavaFunction( ... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType()) >>> spark.sql("SELECT javaStringLength('test')").collect() [Row(UDF:javaStringLength(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength") >>> spark.sql("SELECT javaStringLength2('test')").collect() [Row(UDF:javaStringLength2(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer") >>> spark.sql("SELECT javaStringLength3('test')").collect() [Row(UDF:javaStringLength3(test)=4)]
python/pyspark/sql/udf.py
def registerJavaFunction(self, name, javaClassName, returnType=None): """Register a Java user-defined function as a SQL function. In addition to a name and the function itself, the return type can be optionally specified. When the return type is not specified we would infer it via reflection. :param name: name of the user-defined function :param javaClassName: fully qualified name of java class :param returnType: the return type of the registered Java function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. >>> from pyspark.sql.types import IntegerType >>> spark.udf.registerJavaFunction( ... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType()) >>> spark.sql("SELECT javaStringLength('test')").collect() [Row(UDF:javaStringLength(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength") >>> spark.sql("SELECT javaStringLength2('test')").collect() [Row(UDF:javaStringLength2(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer") >>> spark.sql("SELECT javaStringLength3('test')").collect() [Row(UDF:javaStringLength3(test)=4)] """ jdt = None if returnType is not None: if not isinstance(returnType, DataType): returnType = _parse_datatype_string(returnType) jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json()) self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
def registerJavaFunction(self, name, javaClassName, returnType=None): """Register a Java user-defined function as a SQL function. In addition to a name and the function itself, the return type can be optionally specified. When the return type is not specified we would infer it via reflection. :param name: name of the user-defined function :param javaClassName: fully qualified name of java class :param returnType: the return type of the registered Java function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. >>> from pyspark.sql.types import IntegerType >>> spark.udf.registerJavaFunction( ... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType()) >>> spark.sql("SELECT javaStringLength('test')").collect() [Row(UDF:javaStringLength(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength") >>> spark.sql("SELECT javaStringLength2('test')").collect() [Row(UDF:javaStringLength2(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer") >>> spark.sql("SELECT javaStringLength3('test')").collect() [Row(UDF:javaStringLength3(test)=4)] """ jdt = None if returnType is not None: if not isinstance(returnType, DataType): returnType = _parse_datatype_string(returnType) jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json()) self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
[ "Register", "a", "Java", "user", "-", "defined", "function", "as", "a", "SQL", "function", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/udf.py#L345-L378
[ "def", "registerJavaFunction", "(", "self", ",", "name", ",", "javaClassName", ",", "returnType", "=", "None", ")", ":", "jdt", "=", "None", "if", "returnType", "is", "not", "None", ":", "if", "not", "isinstance", "(", "returnType", ",", "DataType", ")", ":", "returnType", "=", "_parse_datatype_string", "(", "returnType", ")", "jdt", "=", "self", ".", "sparkSession", ".", "_jsparkSession", ".", "parseDataType", "(", "returnType", ".", "json", "(", ")", ")", "self", ".", "sparkSession", ".", "_jsparkSession", ".", "udf", "(", ")", ".", "registerJava", "(", "name", ",", "javaClassName", ",", "jdt", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
UDFRegistration.registerJavaUDAF
Register a Java user-defined aggregate function as a SQL function. :param name: name of the user-defined aggregate function :param javaClassName: fully qualified name of java class >>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg") >>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"]) >>> df.createOrReplaceTempView("df") >>> spark.sql("SELECT name, javaUDAF(id) as avg from df group by name").collect() [Row(name=u'b', avg=102.0), Row(name=u'a', avg=102.0)]
python/pyspark/sql/udf.py
def registerJavaUDAF(self, name, javaClassName): """Register a Java user-defined aggregate function as a SQL function. :param name: name of the user-defined aggregate function :param javaClassName: fully qualified name of java class >>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg") >>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"]) >>> df.createOrReplaceTempView("df") >>> spark.sql("SELECT name, javaUDAF(id) as avg from df group by name").collect() [Row(name=u'b', avg=102.0), Row(name=u'a', avg=102.0)] """ self.sparkSession._jsparkSession.udf().registerJavaUDAF(name, javaClassName)
def registerJavaUDAF(self, name, javaClassName): """Register a Java user-defined aggregate function as a SQL function. :param name: name of the user-defined aggregate function :param javaClassName: fully qualified name of java class >>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg") >>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"]) >>> df.createOrReplaceTempView("df") >>> spark.sql("SELECT name, javaUDAF(id) as avg from df group by name").collect() [Row(name=u'b', avg=102.0), Row(name=u'a', avg=102.0)] """ self.sparkSession._jsparkSession.udf().registerJavaUDAF(name, javaClassName)
[ "Register", "a", "Java", "user", "-", "defined", "aggregate", "function", "as", "a", "SQL", "function", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/udf.py#L382-L395
[ "def", "registerJavaUDAF", "(", "self", ",", "name", ",", "javaClassName", ")", ":", "self", ".", "sparkSession", ".", "_jsparkSession", ".", "udf", "(", ")", ".", "registerJavaUDAF", "(", "name", ",", "javaClassName", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
StreamingContext.getOrCreate
Either recreate a StreamingContext from checkpoint data or create a new StreamingContext. If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be recreated from the checkpoint data. If the data does not exist, then the provided setupFunc will be used to create a new context. @param checkpointPath: Checkpoint directory used in an earlier streaming program @param setupFunc: Function to create a new context and setup DStreams
python/pyspark/streaming/context.py
def getOrCreate(cls, checkpointPath, setupFunc): """ Either recreate a StreamingContext from checkpoint data or create a new StreamingContext. If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be recreated from the checkpoint data. If the data does not exist, then the provided setupFunc will be used to create a new context. @param checkpointPath: Checkpoint directory used in an earlier streaming program @param setupFunc: Function to create a new context and setup DStreams """ cls._ensure_initialized() gw = SparkContext._gateway # Check whether valid checkpoint information exists in the given path ssc_option = gw.jvm.StreamingContextPythonHelper().tryRecoverFromCheckpoint(checkpointPath) if ssc_option.isEmpty(): ssc = setupFunc() ssc.checkpoint(checkpointPath) return ssc jssc = gw.jvm.JavaStreamingContext(ssc_option.get()) # If there is already an active instance of Python SparkContext use it, or create a new one if not SparkContext._active_spark_context: jsc = jssc.sparkContext() conf = SparkConf(_jconf=jsc.getConf()) SparkContext(conf=conf, gateway=gw, jsc=jsc) sc = SparkContext._active_spark_context # update ctx in serializer cls._transformerSerializer.ctx = sc return StreamingContext(sc, None, jssc)
def getOrCreate(cls, checkpointPath, setupFunc): """ Either recreate a StreamingContext from checkpoint data or create a new StreamingContext. If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be recreated from the checkpoint data. If the data does not exist, then the provided setupFunc will be used to create a new context. @param checkpointPath: Checkpoint directory used in an earlier streaming program @param setupFunc: Function to create a new context and setup DStreams """ cls._ensure_initialized() gw = SparkContext._gateway # Check whether valid checkpoint information exists in the given path ssc_option = gw.jvm.StreamingContextPythonHelper().tryRecoverFromCheckpoint(checkpointPath) if ssc_option.isEmpty(): ssc = setupFunc() ssc.checkpoint(checkpointPath) return ssc jssc = gw.jvm.JavaStreamingContext(ssc_option.get()) # If there is already an active instance of Python SparkContext use it, or create a new one if not SparkContext._active_spark_context: jsc = jssc.sparkContext() conf = SparkConf(_jconf=jsc.getConf()) SparkContext(conf=conf, gateway=gw, jsc=jsc) sc = SparkContext._active_spark_context # update ctx in serializer cls._transformerSerializer.ctx = sc return StreamingContext(sc, None, jssc)
[ "Either", "recreate", "a", "StreamingContext", "from", "checkpoint", "data", "or", "create", "a", "new", "StreamingContext", ".", "If", "checkpoint", "data", "exists", "in", "the", "provided", "checkpointPath", "then", "StreamingContext", "will", "be", "recreated", "from", "the", "checkpoint", "data", ".", "If", "the", "data", "does", "not", "exist", "then", "the", "provided", "setupFunc", "will", "be", "used", "to", "create", "a", "new", "context", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L88-L120
[ "def", "getOrCreate", "(", "cls", ",", "checkpointPath", ",", "setupFunc", ")", ":", "cls", ".", "_ensure_initialized", "(", ")", "gw", "=", "SparkContext", ".", "_gateway", "# Check whether valid checkpoint information exists in the given path", "ssc_option", "=", "gw", ".", "jvm", ".", "StreamingContextPythonHelper", "(", ")", ".", "tryRecoverFromCheckpoint", "(", "checkpointPath", ")", "if", "ssc_option", ".", "isEmpty", "(", ")", ":", "ssc", "=", "setupFunc", "(", ")", "ssc", ".", "checkpoint", "(", "checkpointPath", ")", "return", "ssc", "jssc", "=", "gw", ".", "jvm", ".", "JavaStreamingContext", "(", "ssc_option", ".", "get", "(", ")", ")", "# If there is already an active instance of Python SparkContext use it, or create a new one", "if", "not", "SparkContext", ".", "_active_spark_context", ":", "jsc", "=", "jssc", ".", "sparkContext", "(", ")", "conf", "=", "SparkConf", "(", "_jconf", "=", "jsc", ".", "getConf", "(", ")", ")", "SparkContext", "(", "conf", "=", "conf", ",", "gateway", "=", "gw", ",", "jsc", "=", "jsc", ")", "sc", "=", "SparkContext", ".", "_active_spark_context", "# update ctx in serializer", "cls", ".", "_transformerSerializer", ".", "ctx", "=", "sc", "return", "StreamingContext", "(", "sc", ",", "None", ",", "jssc", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
StreamingContext.getActive
Return either the currently active StreamingContext (i.e., if there is a context started but not stopped) or None.
python/pyspark/streaming/context.py
def getActive(cls): """ Return either the currently active StreamingContext (i.e., if there is a context started but not stopped) or None. """ activePythonContext = cls._activeContext if activePythonContext is not None: # Verify that the current running Java StreamingContext is active and is the same one # backing the supposedly active Python context activePythonContextJavaId = activePythonContext._jssc.ssc().hashCode() activeJvmContextOption = activePythonContext._jvm.StreamingContext.getActive() if activeJvmContextOption.isEmpty(): cls._activeContext = None elif activeJvmContextOption.get().hashCode() != activePythonContextJavaId: cls._activeContext = None raise Exception("JVM's active JavaStreamingContext is not the JavaStreamingContext " "backing the action Python StreamingContext. This is unexpected.") return cls._activeContext
def getActive(cls): """ Return either the currently active StreamingContext (i.e., if there is a context started but not stopped) or None. """ activePythonContext = cls._activeContext if activePythonContext is not None: # Verify that the current running Java StreamingContext is active and is the same one # backing the supposedly active Python context activePythonContextJavaId = activePythonContext._jssc.ssc().hashCode() activeJvmContextOption = activePythonContext._jvm.StreamingContext.getActive() if activeJvmContextOption.isEmpty(): cls._activeContext = None elif activeJvmContextOption.get().hashCode() != activePythonContextJavaId: cls._activeContext = None raise Exception("JVM's active JavaStreamingContext is not the JavaStreamingContext " "backing the action Python StreamingContext. This is unexpected.") return cls._activeContext
[ "Return", "either", "the", "currently", "active", "StreamingContext", "(", "i", ".", "e", ".", "if", "there", "is", "a", "context", "started", "but", "not", "stopped", ")", "or", "None", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L123-L141
[ "def", "getActive", "(", "cls", ")", ":", "activePythonContext", "=", "cls", ".", "_activeContext", "if", "activePythonContext", "is", "not", "None", ":", "# Verify that the current running Java StreamingContext is active and is the same one", "# backing the supposedly active Python context", "activePythonContextJavaId", "=", "activePythonContext", ".", "_jssc", ".", "ssc", "(", ")", ".", "hashCode", "(", ")", "activeJvmContextOption", "=", "activePythonContext", ".", "_jvm", ".", "StreamingContext", ".", "getActive", "(", ")", "if", "activeJvmContextOption", ".", "isEmpty", "(", ")", ":", "cls", ".", "_activeContext", "=", "None", "elif", "activeJvmContextOption", ".", "get", "(", ")", ".", "hashCode", "(", ")", "!=", "activePythonContextJavaId", ":", "cls", ".", "_activeContext", "=", "None", "raise", "Exception", "(", "\"JVM's active JavaStreamingContext is not the JavaStreamingContext \"", "\"backing the action Python StreamingContext. This is unexpected.\"", ")", "return", "cls", ".", "_activeContext" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
StreamingContext.getActiveOrCreate
Either return the active StreamingContext (i.e. currently started but not stopped), or recreate a StreamingContext from checkpoint data or create a new StreamingContext using the provided setupFunc function. If the checkpointPath is None or does not contain valid checkpoint data, then setupFunc will be called to create a new context and setup DStreams. @param checkpointPath: Checkpoint directory used in an earlier streaming program. Can be None if the intention is to always create a new context when there is no active context. @param setupFunc: Function to create a new JavaStreamingContext and setup DStreams
python/pyspark/streaming/context.py
def getActiveOrCreate(cls, checkpointPath, setupFunc): """ Either return the active StreamingContext (i.e. currently started but not stopped), or recreate a StreamingContext from checkpoint data or create a new StreamingContext using the provided setupFunc function. If the checkpointPath is None or does not contain valid checkpoint data, then setupFunc will be called to create a new context and setup DStreams. @param checkpointPath: Checkpoint directory used in an earlier streaming program. Can be None if the intention is to always create a new context when there is no active context. @param setupFunc: Function to create a new JavaStreamingContext and setup DStreams """ if setupFunc is None: raise Exception("setupFunc cannot be None") activeContext = cls.getActive() if activeContext is not None: return activeContext elif checkpointPath is not None: return cls.getOrCreate(checkpointPath, setupFunc) else: return setupFunc()
def getActiveOrCreate(cls, checkpointPath, setupFunc): """ Either return the active StreamingContext (i.e. currently started but not stopped), or recreate a StreamingContext from checkpoint data or create a new StreamingContext using the provided setupFunc function. If the checkpointPath is None or does not contain valid checkpoint data, then setupFunc will be called to create a new context and setup DStreams. @param checkpointPath: Checkpoint directory used in an earlier streaming program. Can be None if the intention is to always create a new context when there is no active context. @param setupFunc: Function to create a new JavaStreamingContext and setup DStreams """ if setupFunc is None: raise Exception("setupFunc cannot be None") activeContext = cls.getActive() if activeContext is not None: return activeContext elif checkpointPath is not None: return cls.getOrCreate(checkpointPath, setupFunc) else: return setupFunc()
[ "Either", "return", "the", "active", "StreamingContext", "(", "i", ".", "e", ".", "currently", "started", "but", "not", "stopped", ")", "or", "recreate", "a", "StreamingContext", "from", "checkpoint", "data", "or", "create", "a", "new", "StreamingContext", "using", "the", "provided", "setupFunc", "function", ".", "If", "the", "checkpointPath", "is", "None", "or", "does", "not", "contain", "valid", "checkpoint", "data", "then", "setupFunc", "will", "be", "called", "to", "create", "a", "new", "context", "and", "setup", "DStreams", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L144-L166
[ "def", "getActiveOrCreate", "(", "cls", ",", "checkpointPath", ",", "setupFunc", ")", ":", "if", "setupFunc", "is", "None", ":", "raise", "Exception", "(", "\"setupFunc cannot be None\"", ")", "activeContext", "=", "cls", ".", "getActive", "(", ")", "if", "activeContext", "is", "not", "None", ":", "return", "activeContext", "elif", "checkpointPath", "is", "not", "None", ":", "return", "cls", ".", "getOrCreate", "(", "checkpointPath", ",", "setupFunc", ")", "else", ":", "return", "setupFunc", "(", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
StreamingContext.awaitTermination
Wait for the execution to stop. @param timeout: time to wait in seconds
python/pyspark/streaming/context.py
def awaitTermination(self, timeout=None): """ Wait for the execution to stop. @param timeout: time to wait in seconds """ if timeout is None: self._jssc.awaitTermination() else: self._jssc.awaitTerminationOrTimeout(int(timeout * 1000))
def awaitTermination(self, timeout=None): """ Wait for the execution to stop. @param timeout: time to wait in seconds """ if timeout is None: self._jssc.awaitTermination() else: self._jssc.awaitTerminationOrTimeout(int(timeout * 1000))
[ "Wait", "for", "the", "execution", "to", "stop", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L182-L191
[ "def", "awaitTermination", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "None", ":", "self", ".", "_jssc", ".", "awaitTermination", "(", ")", "else", ":", "self", ".", "_jssc", ".", "awaitTerminationOrTimeout", "(", "int", "(", "timeout", "*", "1000", ")", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
StreamingContext.stop
Stop the execution of the streams, with option of ensuring all received data has been processed. @param stopSparkContext: Stop the associated SparkContext or not @param stopGracefully: Stop gracefully by waiting for the processing of all received data to be completed
python/pyspark/streaming/context.py
def stop(self, stopSparkContext=True, stopGraceFully=False): """ Stop the execution of the streams, with option of ensuring all received data has been processed. @param stopSparkContext: Stop the associated SparkContext or not @param stopGracefully: Stop gracefully by waiting for the processing of all received data to be completed """ self._jssc.stop(stopSparkContext, stopGraceFully) StreamingContext._activeContext = None if stopSparkContext: self._sc.stop()
def stop(self, stopSparkContext=True, stopGraceFully=False): """ Stop the execution of the streams, with option of ensuring all received data has been processed. @param stopSparkContext: Stop the associated SparkContext or not @param stopGracefully: Stop gracefully by waiting for the processing of all received data to be completed """ self._jssc.stop(stopSparkContext, stopGraceFully) StreamingContext._activeContext = None if stopSparkContext: self._sc.stop()
[ "Stop", "the", "execution", "of", "the", "streams", "with", "option", "of", "ensuring", "all", "received", "data", "has", "been", "processed", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L203-L215
[ "def", "stop", "(", "self", ",", "stopSparkContext", "=", "True", ",", "stopGraceFully", "=", "False", ")", ":", "self", ".", "_jssc", ".", "stop", "(", "stopSparkContext", ",", "stopGraceFully", ")", "StreamingContext", ".", "_activeContext", "=", "None", "if", "stopSparkContext", ":", "self", ".", "_sc", ".", "stop", "(", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
StreamingContext.socketTextStream
Create an input from TCP source hostname:port. Data is received using a TCP socket and receive byte is interpreted as UTF8 encoded ``\\n`` delimited lines. @param hostname: Hostname to connect to for receiving data @param port: Port to connect to for receiving data @param storageLevel: Storage level to use for storing the received objects
python/pyspark/streaming/context.py
def socketTextStream(self, hostname, port, storageLevel=StorageLevel.MEMORY_AND_DISK_2): """ Create an input from TCP source hostname:port. Data is received using a TCP socket and receive byte is interpreted as UTF8 encoded ``\\n`` delimited lines. @param hostname: Hostname to connect to for receiving data @param port: Port to connect to for receiving data @param storageLevel: Storage level to use for storing the received objects """ jlevel = self._sc._getJavaStorageLevel(storageLevel) return DStream(self._jssc.socketTextStream(hostname, port, jlevel), self, UTF8Deserializer())
def socketTextStream(self, hostname, port, storageLevel=StorageLevel.MEMORY_AND_DISK_2): """ Create an input from TCP source hostname:port. Data is received using a TCP socket and receive byte is interpreted as UTF8 encoded ``\\n`` delimited lines. @param hostname: Hostname to connect to for receiving data @param port: Port to connect to for receiving data @param storageLevel: Storage level to use for storing the received objects """ jlevel = self._sc._getJavaStorageLevel(storageLevel) return DStream(self._jssc.socketTextStream(hostname, port, jlevel), self, UTF8Deserializer())
[ "Create", "an", "input", "from", "TCP", "source", "hostname", ":", "port", ".", "Data", "is", "received", "using", "a", "TCP", "socket", "and", "receive", "byte", "is", "interpreted", "as", "UTF8", "encoded", "\\\\", "n", "delimited", "lines", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L241-L253
[ "def", "socketTextStream", "(", "self", ",", "hostname", ",", "port", ",", "storageLevel", "=", "StorageLevel", ".", "MEMORY_AND_DISK_2", ")", ":", "jlevel", "=", "self", ".", "_sc", ".", "_getJavaStorageLevel", "(", "storageLevel", ")", "return", "DStream", "(", "self", ".", "_jssc", ".", "socketTextStream", "(", "hostname", ",", "port", ",", "jlevel", ")", ",", "self", ",", "UTF8Deserializer", "(", ")", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
StreamingContext.textFileStream
Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as text files. Files must be wrriten to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. The text files must be encoded as UTF-8.
python/pyspark/streaming/context.py
def textFileStream(self, directory): """ Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as text files. Files must be wrriten to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. The text files must be encoded as UTF-8. """ return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer())
def textFileStream(self, directory): """ Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as text files. Files must be wrriten to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. The text files must be encoded as UTF-8. """ return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer())
[ "Create", "an", "input", "stream", "that", "monitors", "a", "Hadoop", "-", "compatible", "file", "system", "for", "new", "files", "and", "reads", "them", "as", "text", "files", ".", "Files", "must", "be", "wrriten", "to", "the", "monitored", "directory", "by", "moving", "them", "from", "another", "location", "within", "the", "same", "file", "system", ".", "File", "names", "starting", "with", ".", "are", "ignored", ".", "The", "text", "files", "must", "be", "encoded", "as", "UTF", "-", "8", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L255-L263
[ "def", "textFileStream", "(", "self", ",", "directory", ")", ":", "return", "DStream", "(", "self", ".", "_jssc", ".", "textFileStream", "(", "directory", ")", ",", "self", ",", "UTF8Deserializer", "(", ")", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
StreamingContext.binaryRecordsStream
Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as flat binary files with records of fixed length. Files must be written to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. @param directory: Directory to load data from @param recordLength: Length of each record in bytes
python/pyspark/streaming/context.py
def binaryRecordsStream(self, directory, recordLength): """ Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as flat binary files with records of fixed length. Files must be written to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. @param directory: Directory to load data from @param recordLength: Length of each record in bytes """ return DStream(self._jssc.binaryRecordsStream(directory, recordLength), self, NoOpSerializer())
def binaryRecordsStream(self, directory, recordLength): """ Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as flat binary files with records of fixed length. Files must be written to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. @param directory: Directory to load data from @param recordLength: Length of each record in bytes """ return DStream(self._jssc.binaryRecordsStream(directory, recordLength), self, NoOpSerializer())
[ "Create", "an", "input", "stream", "that", "monitors", "a", "Hadoop", "-", "compatible", "file", "system", "for", "new", "files", "and", "reads", "them", "as", "flat", "binary", "files", "with", "records", "of", "fixed", "length", ".", "Files", "must", "be", "written", "to", "the", "monitored", "directory", "by", "moving", "them", "from", "another", "location", "within", "the", "same", "file", "system", ".", "File", "names", "starting", "with", ".", "are", "ignored", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L265-L277
[ "def", "binaryRecordsStream", "(", "self", ",", "directory", ",", "recordLength", ")", ":", "return", "DStream", "(", "self", ".", "_jssc", ".", "binaryRecordsStream", "(", "directory", ",", "recordLength", ")", ",", "self", ",", "NoOpSerializer", "(", ")", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
StreamingContext.queueStream
Create an input stream from a queue of RDDs or list. In each batch, it will process either one or all of the RDDs returned by the queue. .. note:: Changes to the queue after the stream is created will not be recognized. @param rdds: Queue of RDDs @param oneAtATime: pick one rdd each time or pick all of them once. @param default: The default rdd if no more in rdds
python/pyspark/streaming/context.py
def queueStream(self, rdds, oneAtATime=True, default=None): """ Create an input stream from a queue of RDDs or list. In each batch, it will process either one or all of the RDDs returned by the queue. .. note:: Changes to the queue after the stream is created will not be recognized. @param rdds: Queue of RDDs @param oneAtATime: pick one rdd each time or pick all of them once. @param default: The default rdd if no more in rdds """ if default and not isinstance(default, RDD): default = self._sc.parallelize(default) if not rdds and default: rdds = [rdds] if rdds and not isinstance(rdds[0], RDD): rdds = [self._sc.parallelize(input) for input in rdds] self._check_serializers(rdds) queue = self._jvm.PythonDStream.toRDDQueue([r._jrdd for r in rdds]) if default: default = default._reserialize(rdds[0]._jrdd_deserializer) jdstream = self._jssc.queueStream(queue, oneAtATime, default._jrdd) else: jdstream = self._jssc.queueStream(queue, oneAtATime) return DStream(jdstream, self, rdds[0]._jrdd_deserializer)
def queueStream(self, rdds, oneAtATime=True, default=None): """ Create an input stream from a queue of RDDs or list. In each batch, it will process either one or all of the RDDs returned by the queue. .. note:: Changes to the queue after the stream is created will not be recognized. @param rdds: Queue of RDDs @param oneAtATime: pick one rdd each time or pick all of them once. @param default: The default rdd if no more in rdds """ if default and not isinstance(default, RDD): default = self._sc.parallelize(default) if not rdds and default: rdds = [rdds] if rdds and not isinstance(rdds[0], RDD): rdds = [self._sc.parallelize(input) for input in rdds] self._check_serializers(rdds) queue = self._jvm.PythonDStream.toRDDQueue([r._jrdd for r in rdds]) if default: default = default._reserialize(rdds[0]._jrdd_deserializer) jdstream = self._jssc.queueStream(queue, oneAtATime, default._jrdd) else: jdstream = self._jssc.queueStream(queue, oneAtATime) return DStream(jdstream, self, rdds[0]._jrdd_deserializer)
[ "Create", "an", "input", "stream", "from", "a", "queue", "of", "RDDs", "or", "list", ".", "In", "each", "batch", "it", "will", "process", "either", "one", "or", "all", "of", "the", "RDDs", "returned", "by", "the", "queue", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L286-L313
[ "def", "queueStream", "(", "self", ",", "rdds", ",", "oneAtATime", "=", "True", ",", "default", "=", "None", ")", ":", "if", "default", "and", "not", "isinstance", "(", "default", ",", "RDD", ")", ":", "default", "=", "self", ".", "_sc", ".", "parallelize", "(", "default", ")", "if", "not", "rdds", "and", "default", ":", "rdds", "=", "[", "rdds", "]", "if", "rdds", "and", "not", "isinstance", "(", "rdds", "[", "0", "]", ",", "RDD", ")", ":", "rdds", "=", "[", "self", ".", "_sc", ".", "parallelize", "(", "input", ")", "for", "input", "in", "rdds", "]", "self", ".", "_check_serializers", "(", "rdds", ")", "queue", "=", "self", ".", "_jvm", ".", "PythonDStream", ".", "toRDDQueue", "(", "[", "r", ".", "_jrdd", "for", "r", "in", "rdds", "]", ")", "if", "default", ":", "default", "=", "default", ".", "_reserialize", "(", "rdds", "[", "0", "]", ".", "_jrdd_deserializer", ")", "jdstream", "=", "self", ".", "_jssc", ".", "queueStream", "(", "queue", ",", "oneAtATime", ",", "default", ".", "_jrdd", ")", "else", ":", "jdstream", "=", "self", ".", "_jssc", ".", "queueStream", "(", "queue", ",", "oneAtATime", ")", "return", "DStream", "(", "jdstream", ",", "self", ",", "rdds", "[", "0", "]", ".", "_jrdd_deserializer", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
StreamingContext.transform
Create a new DStream in which each RDD is generated by applying a function on RDDs of the DStreams. The order of the JavaRDDs in the transform function parameter will be the same as the order of corresponding DStreams in the list.
python/pyspark/streaming/context.py
def transform(self, dstreams, transformFunc): """ Create a new DStream in which each RDD is generated by applying a function on RDDs of the DStreams. The order of the JavaRDDs in the transform function parameter will be the same as the order of corresponding DStreams in the list. """ jdstreams = [d._jdstream for d in dstreams] # change the final serializer to sc.serializer func = TransformFunction(self._sc, lambda t, *rdds: transformFunc(rdds), *[d._jrdd_deserializer for d in dstreams]) jfunc = self._jvm.TransformFunction(func) jdstream = self._jssc.transform(jdstreams, jfunc) return DStream(jdstream, self, self._sc.serializer)
def transform(self, dstreams, transformFunc): """ Create a new DStream in which each RDD is generated by applying a function on RDDs of the DStreams. The order of the JavaRDDs in the transform function parameter will be the same as the order of corresponding DStreams in the list. """ jdstreams = [d._jdstream for d in dstreams] # change the final serializer to sc.serializer func = TransformFunction(self._sc, lambda t, *rdds: transformFunc(rdds), *[d._jrdd_deserializer for d in dstreams]) jfunc = self._jvm.TransformFunction(func) jdstream = self._jssc.transform(jdstreams, jfunc) return DStream(jdstream, self, self._sc.serializer)
[ "Create", "a", "new", "DStream", "in", "which", "each", "RDD", "is", "generated", "by", "applying", "a", "function", "on", "RDDs", "of", "the", "DStreams", ".", "The", "order", "of", "the", "JavaRDDs", "in", "the", "transform", "function", "parameter", "will", "be", "the", "same", "as", "the", "order", "of", "corresponding", "DStreams", "in", "the", "list", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L315-L329
[ "def", "transform", "(", "self", ",", "dstreams", ",", "transformFunc", ")", ":", "jdstreams", "=", "[", "d", ".", "_jdstream", "for", "d", "in", "dstreams", "]", "# change the final serializer to sc.serializer", "func", "=", "TransformFunction", "(", "self", ".", "_sc", ",", "lambda", "t", ",", "*", "rdds", ":", "transformFunc", "(", "rdds", ")", ",", "*", "[", "d", ".", "_jrdd_deserializer", "for", "d", "in", "dstreams", "]", ")", "jfunc", "=", "self", ".", "_jvm", ".", "TransformFunction", "(", "func", ")", "jdstream", "=", "self", ".", "_jssc", ".", "transform", "(", "jdstreams", ",", "jfunc", ")", "return", "DStream", "(", "jdstream", ",", "self", ",", "self", ".", "_sc", ".", "serializer", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
StreamingContext.union
Create a unified DStream from multiple DStreams of the same type and same slide duration.
python/pyspark/streaming/context.py
def union(self, *dstreams): """ Create a unified DStream from multiple DStreams of the same type and same slide duration. """ if not dstreams: raise ValueError("should have at least one DStream to union") if len(dstreams) == 1: return dstreams[0] if len(set(s._jrdd_deserializer for s in dstreams)) > 1: raise ValueError("All DStreams should have same serializer") if len(set(s._slideDuration for s in dstreams)) > 1: raise ValueError("All DStreams should have same slide duration") cls = SparkContext._jvm.org.apache.spark.streaming.api.java.JavaDStream jdstreams = SparkContext._gateway.new_array(cls, len(dstreams)) for i in range(0, len(dstreams)): jdstreams[i] = dstreams[i]._jdstream return DStream(self._jssc.union(jdstreams), self, dstreams[0]._jrdd_deserializer)
def union(self, *dstreams): """ Create a unified DStream from multiple DStreams of the same type and same slide duration. """ if not dstreams: raise ValueError("should have at least one DStream to union") if len(dstreams) == 1: return dstreams[0] if len(set(s._jrdd_deserializer for s in dstreams)) > 1: raise ValueError("All DStreams should have same serializer") if len(set(s._slideDuration for s in dstreams)) > 1: raise ValueError("All DStreams should have same slide duration") cls = SparkContext._jvm.org.apache.spark.streaming.api.java.JavaDStream jdstreams = SparkContext._gateway.new_array(cls, len(dstreams)) for i in range(0, len(dstreams)): jdstreams[i] = dstreams[i]._jdstream return DStream(self._jssc.union(jdstreams), self, dstreams[0]._jrdd_deserializer)
[ "Create", "a", "unified", "DStream", "from", "multiple", "DStreams", "of", "the", "same", "type", "and", "same", "slide", "duration", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L331-L348
[ "def", "union", "(", "self", ",", "*", "dstreams", ")", ":", "if", "not", "dstreams", ":", "raise", "ValueError", "(", "\"should have at least one DStream to union\"", ")", "if", "len", "(", "dstreams", ")", "==", "1", ":", "return", "dstreams", "[", "0", "]", "if", "len", "(", "set", "(", "s", ".", "_jrdd_deserializer", "for", "s", "in", "dstreams", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "\"All DStreams should have same serializer\"", ")", "if", "len", "(", "set", "(", "s", ".", "_slideDuration", "for", "s", "in", "dstreams", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "\"All DStreams should have same slide duration\"", ")", "cls", "=", "SparkContext", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "streaming", ".", "api", ".", "java", ".", "JavaDStream", "jdstreams", "=", "SparkContext", ".", "_gateway", ".", "new_array", "(", "cls", ",", "len", "(", "dstreams", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "dstreams", ")", ")", ":", "jdstreams", "[", "i", "]", "=", "dstreams", "[", "i", "]", ".", "_jdstream", "return", "DStream", "(", "self", ".", "_jssc", ".", "union", "(", "jdstreams", ")", ",", "self", ",", "dstreams", "[", "0", "]", ".", "_jrdd_deserializer", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
StreamingContext.addStreamingListener
Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for receiving system events related to streaming.
python/pyspark/streaming/context.py
def addStreamingListener(self, streamingListener): """ Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for receiving system events related to streaming. """ self._jssc.addStreamingListener(self._jvm.JavaStreamingListenerWrapper( self._jvm.PythonStreamingListenerWrapper(streamingListener)))
def addStreamingListener(self, streamingListener): """ Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for receiving system events related to streaming. """ self._jssc.addStreamingListener(self._jvm.JavaStreamingListenerWrapper( self._jvm.PythonStreamingListenerWrapper(streamingListener)))
[ "Add", "a", "[[", "org", ".", "apache", ".", "spark", ".", "streaming", ".", "scheduler", ".", "StreamingListener", "]]", "object", "for", "receiving", "system", "events", "related", "to", "streaming", "." ]
apache/spark
python
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L350-L356
[ "def", "addStreamingListener", "(", "self", ",", "streamingListener", ")", ":", "self", ".", "_jssc", ".", "addStreamingListener", "(", "self", ".", "_jvm", ".", "JavaStreamingListenerWrapper", "(", "self", ".", "_jvm", ".", "PythonStreamingListenerWrapper", "(", "streamingListener", ")", ")", ")" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
train
load_tf_weights_in_gpt2
Load tf checkpoints in a pytorch model
pytorch_pretrained_bert/modeling_gpt2.py
def load_tf_weights_in_gpt2(model, gpt2_checkpoint_path): """ Load tf checkpoints in a pytorch model """ try: import re import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(gpt2_checkpoint_path) print("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array.squeeze()) for name, array in zip(names, arrays): name = name[6:] # skip "model/" name = name.split('/') pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+\d+', m_name): l = re.split(r'(\d+)', m_name) else: l = [m_name] if l[0] == 'w' or l[0] == 'g': pointer = getattr(pointer, 'weight') elif l[0] == 'b': pointer = getattr(pointer, 'bias') elif l[0] == 'wpe' or l[0] == 'wte': pointer = getattr(pointer, l[0]) pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model
def load_tf_weights_in_gpt2(model, gpt2_checkpoint_path): """ Load tf checkpoints in a pytorch model """ try: import re import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(gpt2_checkpoint_path) print("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array.squeeze()) for name, array in zip(names, arrays): name = name[6:] # skip "model/" name = name.split('/') pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+\d+', m_name): l = re.split(r'(\d+)', m_name) else: l = [m_name] if l[0] == 'w' or l[0] == 'g': pointer = getattr(pointer, 'weight') elif l[0] == 'b': pointer = getattr(pointer, 'bias') elif l[0] == 'wpe' or l[0] == 'wte': pointer = getattr(pointer, l[0]) pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model
[ "Load", "tf", "checkpoints", "in", "a", "pytorch", "model" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_gpt2.py#L45-L96
[ "def", "load_tf_weights_in_gpt2", "(", "model", ",", "gpt2_checkpoint_path", ")", ":", "try", ":", "import", "re", "import", "numpy", "as", "np", "import", "tensorflow", "as", "tf", "except", "ImportError", ":", "print", "(", "\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"", "\"https://www.tensorflow.org/install/ for installation instructions.\"", ")", "raise", "tf_path", "=", "os", ".", "path", ".", "abspath", "(", "gpt2_checkpoint_path", ")", "print", "(", "\"Converting TensorFlow checkpoint from {}\"", ".", "format", "(", "tf_path", ")", ")", "# Load weights from TF model", "init_vars", "=", "tf", ".", "train", ".", "list_variables", "(", "tf_path", ")", "names", "=", "[", "]", "arrays", "=", "[", "]", "for", "name", ",", "shape", "in", "init_vars", ":", "print", "(", "\"Loading TF weight {} with shape {}\"", ".", "format", "(", "name", ",", "shape", ")", ")", "array", "=", "tf", ".", "train", ".", "load_variable", "(", "tf_path", ",", "name", ")", "names", ".", "append", "(", "name", ")", "arrays", ".", "append", "(", "array", ".", "squeeze", "(", ")", ")", "for", "name", ",", "array", "in", "zip", "(", "names", ",", "arrays", ")", ":", "name", "=", "name", "[", "6", ":", "]", "# skip \"model/\"", "name", "=", "name", ".", "split", "(", "'/'", ")", "pointer", "=", "model", "for", "m_name", "in", "name", ":", "if", "re", ".", "fullmatch", "(", "r'[A-Za-z]+\\d+'", ",", "m_name", ")", ":", "l", "=", "re", ".", "split", "(", "r'(\\d+)'", ",", "m_name", ")", "else", ":", "l", "=", "[", "m_name", "]", "if", "l", "[", "0", "]", "==", "'w'", "or", "l", "[", "0", "]", "==", "'g'", ":", "pointer", "=", "getattr", "(", "pointer", ",", "'weight'", ")", "elif", "l", "[", "0", "]", "==", "'b'", ":", "pointer", "=", "getattr", "(", "pointer", ",", "'bias'", ")", "elif", "l", "[", "0", "]", "==", "'wpe'", "or", "l", "[", "0", "]", "==", "'wte'", ":", "pointer", "=", "getattr", "(", "pointer", ",", "l", "[", "0", "]", ")", "pointer", "=", "getattr", "(", "pointer", ",", "'weight'", ")", "else", ":", "pointer", "=", "getattr", "(", "pointer", ",", "l", "[", "0", "]", ")", "if", "len", "(", "l", ")", ">=", "2", ":", "num", "=", "int", "(", "l", "[", "1", "]", ")", "pointer", "=", "pointer", "[", "num", "]", "try", ":", "assert", "pointer", ".", "shape", "==", "array", ".", "shape", "except", "AssertionError", "as", "e", ":", "e", ".", "args", "+=", "(", "pointer", ".", "shape", ",", "array", ".", "shape", ")", "raise", "print", "(", "\"Initialize PyTorch weight {}\"", ".", "format", "(", "name", ")", ")", "pointer", ".", "data", "=", "torch", ".", "from_numpy", "(", "array", ")", "return", "model" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
GPT2Config.from_json_file
Constructs a `GPT2Config` from a json file of parameters.
pytorch_pretrained_bert/modeling_gpt2.py
def from_json_file(cls, json_file): """Constructs a `GPT2Config` from a json file of parameters.""" with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() return cls.from_dict(json.loads(text))
def from_json_file(cls, json_file): """Constructs a `GPT2Config` from a json file of parameters.""" with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() return cls.from_dict(json.loads(text))
[ "Constructs", "a", "GPT2Config", "from", "a", "json", "file", "of", "parameters", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_gpt2.py#L162-L166
[ "def", "from_json_file", "(", "cls", ",", "json_file", ")", ":", "with", "open", "(", "json_file", ",", "\"r\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "reader", ":", "text", "=", "reader", ".", "read", "(", ")", "return", "cls", ".", "from_dict", "(", "json", ".", "loads", "(", "text", ")", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
GPT2Config.to_json_file
Save this instance to a json file.
pytorch_pretrained_bert/modeling_gpt2.py
def to_json_file(self, json_file_path): """ Save this instance to a json file.""" with open(json_file_path, "w", encoding='utf-8') as writer: writer.write(self.to_json_string())
def to_json_file(self, json_file_path): """ Save this instance to a json file.""" with open(json_file_path, "w", encoding='utf-8') as writer: writer.write(self.to_json_string())
[ "Save", "this", "instance", "to", "a", "json", "file", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_gpt2.py#L180-L183
[ "def", "to_json_file", "(", "self", ",", "json_file_path", ")", ":", "with", "open", "(", "json_file_path", ",", "\"w\"", ",", "encoding", "=", "'utf-8'", ")", "as", "writer", ":", "writer", ".", "write", "(", "self", ".", "to_json_string", "(", ")", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
GPT2PreTrainedModel.init_weights
Initialize the weights.
pytorch_pretrained_bert/modeling_gpt2.py
def init_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_()
def init_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_()
[ "Initialize", "the", "weights", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_gpt2.py#L351-L362
[ "def", "init_weights", "(", "self", ",", "module", ")", ":", "if", "isinstance", "(", "module", ",", "(", "nn", ".", "Linear", ",", "nn", ".", "Embedding", ")", ")", ":", "# Slightly different from the TF version which uses truncated_normal for initialization", "# cf https://github.com/pytorch/pytorch/pull/5617", "module", ".", "weight", ".", "data", ".", "normal_", "(", "mean", "=", "0.0", ",", "std", "=", "self", ".", "config", ".", "initializer_range", ")", "elif", "isinstance", "(", "module", ",", "LayerNorm", ")", ":", "module", ".", "bias", ".", "data", ".", "zero_", "(", ")", "module", ".", "weight", ".", "data", ".", "fill_", "(", "1.0", ")", "if", "isinstance", "(", "module", ",", "nn", ".", "Linear", ")", "and", "module", ".", "bias", "is", "not", "None", ":", "module", ".", "bias", ".", "data", ".", "zero_", "(", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
GPT2PreTrainedModel.from_pretrained
Instantiate a GPT2PreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `gpt2` - a path or url to a pretrained model archive containing: . `gpt2_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a GPT2Model instance - a path or url to a pretrained model archive containing: . `gpt2_config.json` a configuration file for the model . a TensorFlow checkpoint with trained weights from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of pre-trained models *inputs, **kwargs: additional input for the specific GPT class
pytorch_pretrained_bert/modeling_gpt2.py
def from_pretrained( cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs ): """ Instantiate a GPT2PreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `gpt2` - a path or url to a pretrained model archive containing: . `gpt2_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a GPT2Model instance - a path or url to a pretrained model archive containing: . `gpt2_config.json` a configuration file for the model . a TensorFlow checkpoint with trained weights from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of pre-trained models *inputs, **kwargs: additional input for the specific GPT class """ if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) resolved_config_file = cached_path(config_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find files {} and {} " "at this path or url.".format( pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, archive_file, config_file ) ) return None if resolved_archive_file == archive_file and resolved_config_file == config_file: logger.info("loading weights file {}".format(archive_file)) logger.info("loading configuration file {}".format(config_file)) else: logger.info("loading weights file {} from cache at {}".format( archive_file, resolved_archive_file)) logger.info("loading configuration file {} from cache at {}".format( config_file, resolved_config_file)) # Load config config = GPT2Config.from_json_file(resolved_config_file) logger.info("Model config {}".format(config)) # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None and not from_tf: state_dict = torch.load(resolved_archive_file, map_location='cpu') if from_tf: # Directly load from a TensorFlow checkpoint (stored as NumPy array) return load_tf_weights_in_gpt2(model, resolved_archive_file) old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if key.endswith(".g"): new_key = key[:-2] + ".weight" elif key.endswith(".b"): new_key = key[:-2] + ".bias" elif key.endswith(".w"): new_key = key[:-2] + ".weight" if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs ) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + ".") start_model = model if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()): start_model = model.transformer load(start_model, prefix="") if len(missing_keys) > 0: logger.info( "Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys) ) if len(unexpected_keys) > 0: logger.info( "Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys) ) if len(error_msgs) > 0: raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs)) ) # Make sure we are still sharing the output and input embeddings after loading weights model.set_tied() return model
def from_pretrained( cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs ): """ Instantiate a GPT2PreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `gpt2` - a path or url to a pretrained model archive containing: . `gpt2_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a GPT2Model instance - a path or url to a pretrained model archive containing: . `gpt2_config.json` a configuration file for the model . a TensorFlow checkpoint with trained weights from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of pre-trained models *inputs, **kwargs: additional input for the specific GPT class """ if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) resolved_config_file = cached_path(config_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find files {} and {} " "at this path or url.".format( pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, archive_file, config_file ) ) return None if resolved_archive_file == archive_file and resolved_config_file == config_file: logger.info("loading weights file {}".format(archive_file)) logger.info("loading configuration file {}".format(config_file)) else: logger.info("loading weights file {} from cache at {}".format( archive_file, resolved_archive_file)) logger.info("loading configuration file {} from cache at {}".format( config_file, resolved_config_file)) # Load config config = GPT2Config.from_json_file(resolved_config_file) logger.info("Model config {}".format(config)) # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None and not from_tf: state_dict = torch.load(resolved_archive_file, map_location='cpu') if from_tf: # Directly load from a TensorFlow checkpoint (stored as NumPy array) return load_tf_weights_in_gpt2(model, resolved_archive_file) old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if key.endswith(".g"): new_key = key[:-2] + ".weight" elif key.endswith(".b"): new_key = key[:-2] + ".bias" elif key.endswith(".w"): new_key = key[:-2] + ".weight" if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs ) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + ".") start_model = model if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()): start_model = model.transformer load(start_model, prefix="") if len(missing_keys) > 0: logger.info( "Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys) ) if len(unexpected_keys) > 0: logger.info( "Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys) ) if len(error_msgs) > 0: raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs)) ) # Make sure we are still sharing the output and input embeddings after loading weights model.set_tied() return model
[ "Instantiate", "a", "GPT2PreTrainedModel", "from", "a", "pre", "-", "trained", "model", "file", "or", "a", "pytorch", "state", "dict", ".", "Download", "and", "cache", "the", "pre", "-", "trained", "model", "file", "if", "needed", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_gpt2.py#L365-L480
[ "def", "from_pretrained", "(", "cls", ",", "pretrained_model_name_or_path", ",", "state_dict", "=", "None", ",", "cache_dir", "=", "None", ",", "from_tf", "=", "False", ",", "*", "inputs", ",", "*", "*", "kwargs", ")", ":", "if", "pretrained_model_name_or_path", "in", "PRETRAINED_MODEL_ARCHIVE_MAP", ":", "archive_file", "=", "PRETRAINED_MODEL_ARCHIVE_MAP", "[", "pretrained_model_name_or_path", "]", "config_file", "=", "PRETRAINED_CONFIG_ARCHIVE_MAP", "[", "pretrained_model_name_or_path", "]", "else", ":", "archive_file", "=", "os", ".", "path", ".", "join", "(", "pretrained_model_name_or_path", ",", "WEIGHTS_NAME", ")", "config_file", "=", "os", ".", "path", ".", "join", "(", "pretrained_model_name_or_path", ",", "CONFIG_NAME", ")", "# redirect to the cache, if necessary", "try", ":", "resolved_archive_file", "=", "cached_path", "(", "archive_file", ",", "cache_dir", "=", "cache_dir", ")", "resolved_config_file", "=", "cached_path", "(", "config_file", ",", "cache_dir", "=", "cache_dir", ")", "except", "EnvironmentError", ":", "logger", ".", "error", "(", "\"Model name '{}' was not found in model name list ({}). \"", "\"We assumed '{}' was a path or url but couldn't find files {} and {} \"", "\"at this path or url.\"", ".", "format", "(", "pretrained_model_name_or_path", ",", "\", \"", ".", "join", "(", "PRETRAINED_MODEL_ARCHIVE_MAP", ".", "keys", "(", ")", ")", ",", "pretrained_model_name_or_path", ",", "archive_file", ",", "config_file", ")", ")", "return", "None", "if", "resolved_archive_file", "==", "archive_file", "and", "resolved_config_file", "==", "config_file", ":", "logger", ".", "info", "(", "\"loading weights file {}\"", ".", "format", "(", "archive_file", ")", ")", "logger", ".", "info", "(", "\"loading configuration file {}\"", ".", "format", "(", "config_file", ")", ")", "else", ":", "logger", ".", "info", "(", "\"loading weights file {} from cache at {}\"", ".", "format", "(", "archive_file", ",", "resolved_archive_file", ")", ")", "logger", ".", "info", "(", "\"loading configuration file {} from cache at {}\"", ".", "format", "(", "config_file", ",", "resolved_config_file", ")", ")", "# Load config", "config", "=", "GPT2Config", ".", "from_json_file", "(", "resolved_config_file", ")", "logger", ".", "info", "(", "\"Model config {}\"", ".", "format", "(", "config", ")", ")", "# Instantiate model.", "model", "=", "cls", "(", "config", ",", "*", "inputs", ",", "*", "*", "kwargs", ")", "if", "state_dict", "is", "None", "and", "not", "from_tf", ":", "state_dict", "=", "torch", ".", "load", "(", "resolved_archive_file", ",", "map_location", "=", "'cpu'", ")", "if", "from_tf", ":", "# Directly load from a TensorFlow checkpoint (stored as NumPy array)", "return", "load_tf_weights_in_gpt2", "(", "model", ",", "resolved_archive_file", ")", "old_keys", "=", "[", "]", "new_keys", "=", "[", "]", "for", "key", "in", "state_dict", ".", "keys", "(", ")", ":", "new_key", "=", "None", "if", "key", ".", "endswith", "(", "\".g\"", ")", ":", "new_key", "=", "key", "[", ":", "-", "2", "]", "+", "\".weight\"", "elif", "key", ".", "endswith", "(", "\".b\"", ")", ":", "new_key", "=", "key", "[", ":", "-", "2", "]", "+", "\".bias\"", "elif", "key", ".", "endswith", "(", "\".w\"", ")", ":", "new_key", "=", "key", "[", ":", "-", "2", "]", "+", "\".weight\"", "if", "new_key", ":", "old_keys", ".", "append", "(", "key", ")", "new_keys", ".", "append", "(", "new_key", ")", "for", "old_key", ",", "new_key", "in", "zip", "(", "old_keys", ",", "new_keys", ")", ":", "state_dict", "[", "new_key", "]", "=", "state_dict", ".", "pop", "(", "old_key", ")", "missing_keys", "=", "[", "]", "unexpected_keys", "=", "[", "]", "error_msgs", "=", "[", "]", "# copy state_dict so _load_from_state_dict can modify it", "metadata", "=", "getattr", "(", "state_dict", ",", "\"_metadata\"", ",", "None", ")", "state_dict", "=", "state_dict", ".", "copy", "(", ")", "if", "metadata", "is", "not", "None", ":", "state_dict", ".", "_metadata", "=", "metadata", "def", "load", "(", "module", ",", "prefix", "=", "\"\"", ")", ":", "local_metadata", "=", "{", "}", "if", "metadata", "is", "None", "else", "metadata", ".", "get", "(", "prefix", "[", ":", "-", "1", "]", ",", "{", "}", ")", "module", ".", "_load_from_state_dict", "(", "state_dict", ",", "prefix", ",", "local_metadata", ",", "True", ",", "missing_keys", ",", "unexpected_keys", ",", "error_msgs", ")", "for", "name", ",", "child", "in", "module", ".", "_modules", ".", "items", "(", ")", ":", "if", "child", "is", "not", "None", ":", "load", "(", "child", ",", "prefix", "+", "name", "+", "\".\"", ")", "start_model", "=", "model", "if", "hasattr", "(", "model", ",", "\"transformer\"", ")", "and", "all", "(", "not", "s", ".", "startswith", "(", "'transformer.'", ")", "for", "s", "in", "state_dict", ".", "keys", "(", ")", ")", ":", "start_model", "=", "model", ".", "transformer", "load", "(", "start_model", ",", "prefix", "=", "\"\"", ")", "if", "len", "(", "missing_keys", ")", ">", "0", ":", "logger", ".", "info", "(", "\"Weights of {} not initialized from pretrained model: {}\"", ".", "format", "(", "model", ".", "__class__", ".", "__name__", ",", "missing_keys", ")", ")", "if", "len", "(", "unexpected_keys", ")", ">", "0", ":", "logger", ".", "info", "(", "\"Weights from pretrained model not used in {}: {}\"", ".", "format", "(", "model", ".", "__class__", ".", "__name__", ",", "unexpected_keys", ")", ")", "if", "len", "(", "error_msgs", ")", ">", "0", ":", "raise", "RuntimeError", "(", "\"Error(s) in loading state_dict for {}:\\n\\t{}\"", ".", "format", "(", "model", ".", "__class__", ".", "__name__", ",", "\"\\n\\t\"", ".", "join", "(", "error_msgs", ")", ")", ")", "# Make sure we are still sharing the output and input embeddings after loading weights", "model", ".", "set_tied", "(", ")", "return", "model" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
convert_examples_to_features
Loads a data file into a list of `InputFeature`s.
examples/extract_features.py
def convert_examples_to_features(examples, seq_length, tokenizer): """Loads a data file into a list of `InputFeature`s.""" features = [] for (ex_index, example) in enumerate(examples): tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > seq_length - 2: tokens_a = tokens_a[0:(seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambigiously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] input_type_ids = [] tokens.append("[CLS]") input_type_ids.append(0) for token in tokens_a: tokens.append(token) input_type_ids.append(0) tokens.append("[SEP]") input_type_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) input_type_ids.append(1) tokens.append("[SEP]") input_type_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < seq_length: input_ids.append(0) input_mask.append(0) input_type_ids.append(0) assert len(input_ids) == seq_length assert len(input_mask) == seq_length assert len(input_type_ids) == seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("unique_id: %s" % (example.unique_id)) logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids])) features.append( InputFeatures( unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids)) return features
def convert_examples_to_features(examples, seq_length, tokenizer): """Loads a data file into a list of `InputFeature`s.""" features = [] for (ex_index, example) in enumerate(examples): tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > seq_length - 2: tokens_a = tokens_a[0:(seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambigiously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] input_type_ids = [] tokens.append("[CLS]") input_type_ids.append(0) for token in tokens_a: tokens.append(token) input_type_ids.append(0) tokens.append("[SEP]") input_type_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) input_type_ids.append(1) tokens.append("[SEP]") input_type_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < seq_length: input_ids.append(0) input_mask.append(0) input_type_ids.append(0) assert len(input_ids) == seq_length assert len(input_mask) == seq_length assert len(input_type_ids) == seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("unique_id: %s" % (example.unique_id)) logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids])) features.append( InputFeatures( unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids)) return features
[ "Loads", "a", "data", "file", "into", "a", "list", "of", "InputFeature", "s", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/extract_features.py#L59-L147
[ "def", "convert_examples_to_features", "(", "examples", ",", "seq_length", ",", "tokenizer", ")", ":", "features", "=", "[", "]", "for", "(", "ex_index", ",", "example", ")", "in", "enumerate", "(", "examples", ")", ":", "tokens_a", "=", "tokenizer", ".", "tokenize", "(", "example", ".", "text_a", ")", "tokens_b", "=", "None", "if", "example", ".", "text_b", ":", "tokens_b", "=", "tokenizer", ".", "tokenize", "(", "example", ".", "text_b", ")", "if", "tokens_b", ":", "# Modifies `tokens_a` and `tokens_b` in place so that the total", "# length is less than the specified length.", "# Account for [CLS], [SEP], [SEP] with \"- 3\"", "_truncate_seq_pair", "(", "tokens_a", ",", "tokens_b", ",", "seq_length", "-", "3", ")", "else", ":", "# Account for [CLS] and [SEP] with \"- 2\"", "if", "len", "(", "tokens_a", ")", ">", "seq_length", "-", "2", ":", "tokens_a", "=", "tokens_a", "[", "0", ":", "(", "seq_length", "-", "2", ")", "]", "# The convention in BERT is:", "# (a) For sequence pairs:", "# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]", "# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1", "# (b) For single sequences:", "# tokens: [CLS] the dog is hairy . [SEP]", "# type_ids: 0 0 0 0 0 0 0", "#", "# Where \"type_ids\" are used to indicate whether this is the first", "# sequence or the second sequence. The embedding vectors for `type=0` and", "# `type=1` were learned during pre-training and are added to the wordpiece", "# embedding vector (and position vector). This is not *strictly* necessary", "# since the [SEP] token unambigiously separates the sequences, but it makes", "# it easier for the model to learn the concept of sequences.", "#", "# For classification tasks, the first vector (corresponding to [CLS]) is", "# used as as the \"sentence vector\". Note that this only makes sense because", "# the entire model is fine-tuned.", "tokens", "=", "[", "]", "input_type_ids", "=", "[", "]", "tokens", ".", "append", "(", "\"[CLS]\"", ")", "input_type_ids", ".", "append", "(", "0", ")", "for", "token", "in", "tokens_a", ":", "tokens", ".", "append", "(", "token", ")", "input_type_ids", ".", "append", "(", "0", ")", "tokens", ".", "append", "(", "\"[SEP]\"", ")", "input_type_ids", ".", "append", "(", "0", ")", "if", "tokens_b", ":", "for", "token", "in", "tokens_b", ":", "tokens", ".", "append", "(", "token", ")", "input_type_ids", ".", "append", "(", "1", ")", "tokens", ".", "append", "(", "\"[SEP]\"", ")", "input_type_ids", ".", "append", "(", "1", ")", "input_ids", "=", "tokenizer", ".", "convert_tokens_to_ids", "(", "tokens", ")", "# The mask has 1 for real tokens and 0 for padding tokens. Only real", "# tokens are attended to.", "input_mask", "=", "[", "1", "]", "*", "len", "(", "input_ids", ")", "# Zero-pad up to the sequence length.", "while", "len", "(", "input_ids", ")", "<", "seq_length", ":", "input_ids", ".", "append", "(", "0", ")", "input_mask", ".", "append", "(", "0", ")", "input_type_ids", ".", "append", "(", "0", ")", "assert", "len", "(", "input_ids", ")", "==", "seq_length", "assert", "len", "(", "input_mask", ")", "==", "seq_length", "assert", "len", "(", "input_type_ids", ")", "==", "seq_length", "if", "ex_index", "<", "5", ":", "logger", ".", "info", "(", "\"*** Example ***\"", ")", "logger", ".", "info", "(", "\"unique_id: %s\"", "%", "(", "example", ".", "unique_id", ")", ")", "logger", ".", "info", "(", "\"tokens: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "tokens", "]", ")", ")", "logger", ".", "info", "(", "\"input_ids: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "input_ids", "]", ")", ")", "logger", ".", "info", "(", "\"input_mask: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "input_mask", "]", ")", ")", "logger", ".", "info", "(", "\"input_type_ids: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "input_type_ids", "]", ")", ")", "features", ".", "append", "(", "InputFeatures", "(", "unique_id", "=", "example", ".", "unique_id", ",", "tokens", "=", "tokens", ",", "input_ids", "=", "input_ids", ",", "input_mask", "=", "input_mask", ",", "input_type_ids", "=", "input_type_ids", ")", ")", "return", "features" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
read_examples
Read a list of `InputExample`s from an input file.
examples/extract_features.py
def read_examples(input_file): """Read a list of `InputExample`s from an input file.""" examples = [] unique_id = 0 with open(input_file, "r", encoding='utf-8') as reader: while True: line = reader.readline() if not line: break line = line.strip() text_a = None text_b = None m = re.match(r"^(.*) \|\|\| (.*)$", line) if m is None: text_a = line else: text_a = m.group(1) text_b = m.group(2) examples.append( InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)) unique_id += 1 return examples
def read_examples(input_file): """Read a list of `InputExample`s from an input file.""" examples = [] unique_id = 0 with open(input_file, "r", encoding='utf-8') as reader: while True: line = reader.readline() if not line: break line = line.strip() text_a = None text_b = None m = re.match(r"^(.*) \|\|\| (.*)$", line) if m is None: text_a = line else: text_a = m.group(1) text_b = m.group(2) examples.append( InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)) unique_id += 1 return examples
[ "Read", "a", "list", "of", "InputExample", "s", "from", "an", "input", "file", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/extract_features.py#L167-L188
[ "def", "read_examples", "(", "input_file", ")", ":", "examples", "=", "[", "]", "unique_id", "=", "0", "with", "open", "(", "input_file", ",", "\"r\"", ",", "encoding", "=", "'utf-8'", ")", "as", "reader", ":", "while", "True", ":", "line", "=", "reader", ".", "readline", "(", ")", "if", "not", "line", ":", "break", "line", "=", "line", ".", "strip", "(", ")", "text_a", "=", "None", "text_b", "=", "None", "m", "=", "re", ".", "match", "(", "r\"^(.*) \\|\\|\\| (.*)$\"", ",", "line", ")", "if", "m", "is", "None", ":", "text_a", "=", "line", "else", ":", "text_a", "=", "m", ".", "group", "(", "1", ")", "text_b", "=", "m", ".", "group", "(", "2", ")", "examples", ".", "append", "(", "InputExample", "(", "unique_id", "=", "unique_id", ",", "text_a", "=", "text_a", ",", "text_b", "=", "text_b", ")", ")", "unique_id", "+=", "1", "return", "examples" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
read_squad_examples
Read a SQuAD json file into a list of SquadExample.
examples/run_squad.py
def read_squad_examples(input_file, is_training, version_2_with_negative): """Read a SQuAD json file into a list of SquadExample.""" with open(input_file, "r", encoding='utf-8') as reader: input_data = json.load(reader)["data"] def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None end_position = None orig_answer_text = None is_impossible = False if is_training: if version_2_with_negative: is_impossible = qa["is_impossible"] if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] answer_offset = answer["answer_start"] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] # Only add answers where the text can be exactly recovered from the # document. If this CAN'T happen it's likely due to weird Unicode # stuff so we will just skip the example. # # Note that this means for training mode, every example is NOT # guaranteed to be preserved. actual_text = " ".join(doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( whitespace_tokenize(orig_answer_text)) if actual_text.find(cleaned_answer_text) == -1: logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = -1 end_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible) examples.append(example) return examples
def read_squad_examples(input_file, is_training, version_2_with_negative): """Read a SQuAD json file into a list of SquadExample.""" with open(input_file, "r", encoding='utf-8') as reader: input_data = json.load(reader)["data"] def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None end_position = None orig_answer_text = None is_impossible = False if is_training: if version_2_with_negative: is_impossible = qa["is_impossible"] if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] answer_offset = answer["answer_start"] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] # Only add answers where the text can be exactly recovered from the # document. If this CAN'T happen it's likely due to weird Unicode # stuff so we will just skip the example. # # Note that this means for training mode, every example is NOT # guaranteed to be preserved. actual_text = " ".join(doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( whitespace_tokenize(orig_answer_text)) if actual_text.find(cleaned_answer_text) == -1: logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = -1 end_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible) examples.append(example) return examples
[ "Read", "a", "SQuAD", "json", "file", "into", "a", "list", "of", "SquadExample", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_squad.py#L122-L197
[ "def", "read_squad_examples", "(", "input_file", ",", "is_training", ",", "version_2_with_negative", ")", ":", "with", "open", "(", "input_file", ",", "\"r\"", ",", "encoding", "=", "'utf-8'", ")", "as", "reader", ":", "input_data", "=", "json", ".", "load", "(", "reader", ")", "[", "\"data\"", "]", "def", "is_whitespace", "(", "c", ")", ":", "if", "c", "==", "\" \"", "or", "c", "==", "\"\\t\"", "or", "c", "==", "\"\\r\"", "or", "c", "==", "\"\\n\"", "or", "ord", "(", "c", ")", "==", "0x202F", ":", "return", "True", "return", "False", "examples", "=", "[", "]", "for", "entry", "in", "input_data", ":", "for", "paragraph", "in", "entry", "[", "\"paragraphs\"", "]", ":", "paragraph_text", "=", "paragraph", "[", "\"context\"", "]", "doc_tokens", "=", "[", "]", "char_to_word_offset", "=", "[", "]", "prev_is_whitespace", "=", "True", "for", "c", "in", "paragraph_text", ":", "if", "is_whitespace", "(", "c", ")", ":", "prev_is_whitespace", "=", "True", "else", ":", "if", "prev_is_whitespace", ":", "doc_tokens", ".", "append", "(", "c", ")", "else", ":", "doc_tokens", "[", "-", "1", "]", "+=", "c", "prev_is_whitespace", "=", "False", "char_to_word_offset", ".", "append", "(", "len", "(", "doc_tokens", ")", "-", "1", ")", "for", "qa", "in", "paragraph", "[", "\"qas\"", "]", ":", "qas_id", "=", "qa", "[", "\"id\"", "]", "question_text", "=", "qa", "[", "\"question\"", "]", "start_position", "=", "None", "end_position", "=", "None", "orig_answer_text", "=", "None", "is_impossible", "=", "False", "if", "is_training", ":", "if", "version_2_with_negative", ":", "is_impossible", "=", "qa", "[", "\"is_impossible\"", "]", "if", "(", "len", "(", "qa", "[", "\"answers\"", "]", ")", "!=", "1", ")", "and", "(", "not", "is_impossible", ")", ":", "raise", "ValueError", "(", "\"For training, each question should have exactly 1 answer.\"", ")", "if", "not", "is_impossible", ":", "answer", "=", "qa", "[", "\"answers\"", "]", "[", "0", "]", "orig_answer_text", "=", "answer", "[", "\"text\"", "]", "answer_offset", "=", "answer", "[", "\"answer_start\"", "]", "answer_length", "=", "len", "(", "orig_answer_text", ")", "start_position", "=", "char_to_word_offset", "[", "answer_offset", "]", "end_position", "=", "char_to_word_offset", "[", "answer_offset", "+", "answer_length", "-", "1", "]", "# Only add answers where the text can be exactly recovered from the", "# document. If this CAN'T happen it's likely due to weird Unicode", "# stuff so we will just skip the example.", "#", "# Note that this means for training mode, every example is NOT", "# guaranteed to be preserved.", "actual_text", "=", "\" \"", ".", "join", "(", "doc_tokens", "[", "start_position", ":", "(", "end_position", "+", "1", ")", "]", ")", "cleaned_answer_text", "=", "\" \"", ".", "join", "(", "whitespace_tokenize", "(", "orig_answer_text", ")", ")", "if", "actual_text", ".", "find", "(", "cleaned_answer_text", ")", "==", "-", "1", ":", "logger", ".", "warning", "(", "\"Could not find answer: '%s' vs. '%s'\"", ",", "actual_text", ",", "cleaned_answer_text", ")", "continue", "else", ":", "start_position", "=", "-", "1", "end_position", "=", "-", "1", "orig_answer_text", "=", "\"\"", "example", "=", "SquadExample", "(", "qas_id", "=", "qas_id", ",", "question_text", "=", "question_text", ",", "doc_tokens", "=", "doc_tokens", ",", "orig_answer_text", "=", "orig_answer_text", ",", "start_position", "=", "start_position", ",", "end_position", "=", "end_position", ",", "is_impossible", "=", "is_impossible", ")", "examples", ".", "append", "(", "example", ")", "return", "examples" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
convert_examples_to_features
Loads a data file into a list of `InputBatch`s.
examples/run_squad.py
def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training): """Loads a data file into a list of `InputBatch`s.""" unique_id = 1000000000 features = [] for (example_index, example) in enumerate(examples): query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in query_tokens: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length start_position = None end_position = None if is_training and not example.is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = 0 end_position = 0 else: doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and example.is_impossible: start_position = 0 end_position = 0 if example_index < 20: logger.info("*** Example ***") logger.info("unique_id: %s" % (unique_id)) logger.info("example_index: %s" % (example_index)) logger.info("doc_span_index: %s" % (doc_span_index)) logger.info("tokens: %s" % " ".join(tokens)) logger.info("token_to_orig_map: %s" % " ".join([ "%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()])) logger.info("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in token_is_max_context.items() ])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info( "input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) if is_training and example.is_impossible: logger.info("impossible example") if is_training and not example.is_impossible: answer_text = " ".join(tokens[start_position:(end_position + 1)]) logger.info("start_position: %d" % (start_position)) logger.info("end_position: %d" % (end_position)) logger.info( "answer: %s" % (answer_text)) features.append( InputFeatures( unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, start_position=start_position, end_position=end_position, is_impossible=example.is_impossible)) unique_id += 1 return features
def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training): """Loads a data file into a list of `InputBatch`s.""" unique_id = 1000000000 features = [] for (example_index, example) in enumerate(examples): query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in query_tokens: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length start_position = None end_position = None if is_training and not example.is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = 0 end_position = 0 else: doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and example.is_impossible: start_position = 0 end_position = 0 if example_index < 20: logger.info("*** Example ***") logger.info("unique_id: %s" % (unique_id)) logger.info("example_index: %s" % (example_index)) logger.info("doc_span_index: %s" % (doc_span_index)) logger.info("tokens: %s" % " ".join(tokens)) logger.info("token_to_orig_map: %s" % " ".join([ "%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()])) logger.info("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in token_is_max_context.items() ])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info( "input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) if is_training and example.is_impossible: logger.info("impossible example") if is_training and not example.is_impossible: answer_text = " ".join(tokens[start_position:(end_position + 1)]) logger.info("start_position: %d" % (start_position)) logger.info("end_position: %d" % (end_position)) logger.info( "answer: %s" % (answer_text)) features.append( InputFeatures( unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, start_position=start_position, end_position=end_position, is_impossible=example.is_impossible)) unique_id += 1 return features
[ "Loads", "a", "data", "file", "into", "a", "list", "of", "InputBatch", "s", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_squad.py#L200-L360
[ "def", "convert_examples_to_features", "(", "examples", ",", "tokenizer", ",", "max_seq_length", ",", "doc_stride", ",", "max_query_length", ",", "is_training", ")", ":", "unique_id", "=", "1000000000", "features", "=", "[", "]", "for", "(", "example_index", ",", "example", ")", "in", "enumerate", "(", "examples", ")", ":", "query_tokens", "=", "tokenizer", ".", "tokenize", "(", "example", ".", "question_text", ")", "if", "len", "(", "query_tokens", ")", ">", "max_query_length", ":", "query_tokens", "=", "query_tokens", "[", "0", ":", "max_query_length", "]", "tok_to_orig_index", "=", "[", "]", "orig_to_tok_index", "=", "[", "]", "all_doc_tokens", "=", "[", "]", "for", "(", "i", ",", "token", ")", "in", "enumerate", "(", "example", ".", "doc_tokens", ")", ":", "orig_to_tok_index", ".", "append", "(", "len", "(", "all_doc_tokens", ")", ")", "sub_tokens", "=", "tokenizer", ".", "tokenize", "(", "token", ")", "for", "sub_token", "in", "sub_tokens", ":", "tok_to_orig_index", ".", "append", "(", "i", ")", "all_doc_tokens", ".", "append", "(", "sub_token", ")", "tok_start_position", "=", "None", "tok_end_position", "=", "None", "if", "is_training", "and", "example", ".", "is_impossible", ":", "tok_start_position", "=", "-", "1", "tok_end_position", "=", "-", "1", "if", "is_training", "and", "not", "example", ".", "is_impossible", ":", "tok_start_position", "=", "orig_to_tok_index", "[", "example", ".", "start_position", "]", "if", "example", ".", "end_position", "<", "len", "(", "example", ".", "doc_tokens", ")", "-", "1", ":", "tok_end_position", "=", "orig_to_tok_index", "[", "example", ".", "end_position", "+", "1", "]", "-", "1", "else", ":", "tok_end_position", "=", "len", "(", "all_doc_tokens", ")", "-", "1", "(", "tok_start_position", ",", "tok_end_position", ")", "=", "_improve_answer_span", "(", "all_doc_tokens", ",", "tok_start_position", ",", "tok_end_position", ",", "tokenizer", ",", "example", ".", "orig_answer_text", ")", "# The -3 accounts for [CLS], [SEP] and [SEP]", "max_tokens_for_doc", "=", "max_seq_length", "-", "len", "(", "query_tokens", ")", "-", "3", "# We can have documents that are longer than the maximum sequence length.", "# To deal with this we do a sliding window approach, where we take chunks", "# of the up to our max length with a stride of `doc_stride`.", "_DocSpan", "=", "collections", ".", "namedtuple", "(", "# pylint: disable=invalid-name", "\"DocSpan\"", ",", "[", "\"start\"", ",", "\"length\"", "]", ")", "doc_spans", "=", "[", "]", "start_offset", "=", "0", "while", "start_offset", "<", "len", "(", "all_doc_tokens", ")", ":", "length", "=", "len", "(", "all_doc_tokens", ")", "-", "start_offset", "if", "length", ">", "max_tokens_for_doc", ":", "length", "=", "max_tokens_for_doc", "doc_spans", ".", "append", "(", "_DocSpan", "(", "start", "=", "start_offset", ",", "length", "=", "length", ")", ")", "if", "start_offset", "+", "length", "==", "len", "(", "all_doc_tokens", ")", ":", "break", "start_offset", "+=", "min", "(", "length", ",", "doc_stride", ")", "for", "(", "doc_span_index", ",", "doc_span", ")", "in", "enumerate", "(", "doc_spans", ")", ":", "tokens", "=", "[", "]", "token_to_orig_map", "=", "{", "}", "token_is_max_context", "=", "{", "}", "segment_ids", "=", "[", "]", "tokens", ".", "append", "(", "\"[CLS]\"", ")", "segment_ids", ".", "append", "(", "0", ")", "for", "token", "in", "query_tokens", ":", "tokens", ".", "append", "(", "token", ")", "segment_ids", ".", "append", "(", "0", ")", "tokens", ".", "append", "(", "\"[SEP]\"", ")", "segment_ids", ".", "append", "(", "0", ")", "for", "i", "in", "range", "(", "doc_span", ".", "length", ")", ":", "split_token_index", "=", "doc_span", ".", "start", "+", "i", "token_to_orig_map", "[", "len", "(", "tokens", ")", "]", "=", "tok_to_orig_index", "[", "split_token_index", "]", "is_max_context", "=", "_check_is_max_context", "(", "doc_spans", ",", "doc_span_index", ",", "split_token_index", ")", "token_is_max_context", "[", "len", "(", "tokens", ")", "]", "=", "is_max_context", "tokens", ".", "append", "(", "all_doc_tokens", "[", "split_token_index", "]", ")", "segment_ids", ".", "append", "(", "1", ")", "tokens", ".", "append", "(", "\"[SEP]\"", ")", "segment_ids", ".", "append", "(", "1", ")", "input_ids", "=", "tokenizer", ".", "convert_tokens_to_ids", "(", "tokens", ")", "# The mask has 1 for real tokens and 0 for padding tokens. Only real", "# tokens are attended to.", "input_mask", "=", "[", "1", "]", "*", "len", "(", "input_ids", ")", "# Zero-pad up to the sequence length.", "while", "len", "(", "input_ids", ")", "<", "max_seq_length", ":", "input_ids", ".", "append", "(", "0", ")", "input_mask", ".", "append", "(", "0", ")", "segment_ids", ".", "append", "(", "0", ")", "assert", "len", "(", "input_ids", ")", "==", "max_seq_length", "assert", "len", "(", "input_mask", ")", "==", "max_seq_length", "assert", "len", "(", "segment_ids", ")", "==", "max_seq_length", "start_position", "=", "None", "end_position", "=", "None", "if", "is_training", "and", "not", "example", ".", "is_impossible", ":", "# For training, if our document chunk does not contain an annotation", "# we throw it out, since there is nothing to predict.", "doc_start", "=", "doc_span", ".", "start", "doc_end", "=", "doc_span", ".", "start", "+", "doc_span", ".", "length", "-", "1", "out_of_span", "=", "False", "if", "not", "(", "tok_start_position", ">=", "doc_start", "and", "tok_end_position", "<=", "doc_end", ")", ":", "out_of_span", "=", "True", "if", "out_of_span", ":", "start_position", "=", "0", "end_position", "=", "0", "else", ":", "doc_offset", "=", "len", "(", "query_tokens", ")", "+", "2", "start_position", "=", "tok_start_position", "-", "doc_start", "+", "doc_offset", "end_position", "=", "tok_end_position", "-", "doc_start", "+", "doc_offset", "if", "is_training", "and", "example", ".", "is_impossible", ":", "start_position", "=", "0", "end_position", "=", "0", "if", "example_index", "<", "20", ":", "logger", ".", "info", "(", "\"*** Example ***\"", ")", "logger", ".", "info", "(", "\"unique_id: %s\"", "%", "(", "unique_id", ")", ")", "logger", ".", "info", "(", "\"example_index: %s\"", "%", "(", "example_index", ")", ")", "logger", ".", "info", "(", "\"doc_span_index: %s\"", "%", "(", "doc_span_index", ")", ")", "logger", ".", "info", "(", "\"tokens: %s\"", "%", "\" \"", ".", "join", "(", "tokens", ")", ")", "logger", ".", "info", "(", "\"token_to_orig_map: %s\"", "%", "\" \"", ".", "join", "(", "[", "\"%d:%d\"", "%", "(", "x", ",", "y", ")", "for", "(", "x", ",", "y", ")", "in", "token_to_orig_map", ".", "items", "(", ")", "]", ")", ")", "logger", ".", "info", "(", "\"token_is_max_context: %s\"", "%", "\" \"", ".", "join", "(", "[", "\"%d:%s\"", "%", "(", "x", ",", "y", ")", "for", "(", "x", ",", "y", ")", "in", "token_is_max_context", ".", "items", "(", ")", "]", ")", ")", "logger", ".", "info", "(", "\"input_ids: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "input_ids", "]", ")", ")", "logger", ".", "info", "(", "\"input_mask: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "input_mask", "]", ")", ")", "logger", ".", "info", "(", "\"segment_ids: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "segment_ids", "]", ")", ")", "if", "is_training", "and", "example", ".", "is_impossible", ":", "logger", ".", "info", "(", "\"impossible example\"", ")", "if", "is_training", "and", "not", "example", ".", "is_impossible", ":", "answer_text", "=", "\" \"", ".", "join", "(", "tokens", "[", "start_position", ":", "(", "end_position", "+", "1", ")", "]", ")", "logger", ".", "info", "(", "\"start_position: %d\"", "%", "(", "start_position", ")", ")", "logger", ".", "info", "(", "\"end_position: %d\"", "%", "(", "end_position", ")", ")", "logger", ".", "info", "(", "\"answer: %s\"", "%", "(", "answer_text", ")", ")", "features", ".", "append", "(", "InputFeatures", "(", "unique_id", "=", "unique_id", ",", "example_index", "=", "example_index", ",", "doc_span_index", "=", "doc_span_index", ",", "tokens", "=", "tokens", ",", "token_to_orig_map", "=", "token_to_orig_map", ",", "token_is_max_context", "=", "token_is_max_context", ",", "input_ids", "=", "input_ids", ",", "input_mask", "=", "input_mask", ",", "segment_ids", "=", "segment_ids", ",", "start_position", "=", "start_position", ",", "end_position", "=", "end_position", ",", "is_impossible", "=", "example", ".", "is_impossible", ")", ")", "unique_id", "+=", "1", "return", "features" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
_improve_answer_span
Returns tokenized answer spans that better match the annotated answer.
examples/run_squad.py
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end)
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end)
[ "Returns", "tokenized", "answer", "spans", "that", "better", "match", "the", "annotated", "answer", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_squad.py#L363-L397
[ "def", "_improve_answer_span", "(", "doc_tokens", ",", "input_start", ",", "input_end", ",", "tokenizer", ",", "orig_answer_text", ")", ":", "# The SQuAD annotations are character based. We first project them to", "# whitespace-tokenized words. But then after WordPiece tokenization, we can", "# often find a \"better match\". For example:", "#", "# Question: What year was John Smith born?", "# Context: The leader was John Smith (1895-1943).", "# Answer: 1895", "#", "# The original whitespace-tokenized answer will be \"(1895-1943).\". However", "# after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match", "# the exact answer, 1895.", "#", "# However, this is not always possible. Consider the following:", "#", "# Question: What country is the top exporter of electornics?", "# Context: The Japanese electronics industry is the lagest in the world.", "# Answer: Japan", "#", "# In this case, the annotator chose \"Japan\" as a character sub-span of", "# the word \"Japanese\". Since our WordPiece tokenizer does not split", "# \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare", "# in SQuAD, but does happen.", "tok_answer_text", "=", "\" \"", ".", "join", "(", "tokenizer", ".", "tokenize", "(", "orig_answer_text", ")", ")", "for", "new_start", "in", "range", "(", "input_start", ",", "input_end", "+", "1", ")", ":", "for", "new_end", "in", "range", "(", "input_end", ",", "new_start", "-", "1", ",", "-", "1", ")", ":", "text_span", "=", "\" \"", ".", "join", "(", "doc_tokens", "[", "new_start", ":", "(", "new_end", "+", "1", ")", "]", ")", "if", "text_span", "==", "tok_answer_text", ":", "return", "(", "new_start", ",", "new_end", ")", "return", "(", "input_start", ",", "input_end", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
_check_is_max_context
Check if this is the 'max context' doc span for the token.
examples/run_squad.py
def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index
def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index
[ "Check", "if", "this", "is", "the", "max", "context", "doc", "span", "for", "the", "token", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_squad.py#L400-L434
[ "def", "_check_is_max_context", "(", "doc_spans", ",", "cur_span_index", ",", "position", ")", ":", "# Because of the sliding window approach taken to scoring documents, a single", "# token can appear in multiple documents. E.g.", "# Doc: the man went to the store and bought a gallon of milk", "# Span A: the man went to the", "# Span B: to the store and bought", "# Span C: and bought a gallon of", "# ...", "#", "# Now the word 'bought' will have two scores from spans B and C. We only", "# want to consider the score with \"maximum context\", which we define as", "# the *minimum* of its left and right context (the *sum* of left and", "# right context will always be the same, of course).", "#", "# In the example the maximum context for 'bought' would be span C since", "# it has 1 left context and 3 right context, while span B has 4 left context", "# and 0 right context.", "best_score", "=", "None", "best_span_index", "=", "None", "for", "(", "span_index", ",", "doc_span", ")", "in", "enumerate", "(", "doc_spans", ")", ":", "end", "=", "doc_span", ".", "start", "+", "doc_span", ".", "length", "-", "1", "if", "position", "<", "doc_span", ".", "start", ":", "continue", "if", "position", ">", "end", ":", "continue", "num_left_context", "=", "position", "-", "doc_span", ".", "start", "num_right_context", "=", "end", "-", "position", "score", "=", "min", "(", "num_left_context", ",", "num_right_context", ")", "+", "0.01", "*", "doc_span", ".", "length", "if", "best_score", "is", "None", "or", "score", ">", "best_score", ":", "best_score", "=", "score", "best_span_index", "=", "span_index", "return", "cur_span_index", "==", "best_span_index" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
write_predictions
Write final predictions to the json file and log-odds of null if needed.
examples/run_squad.py
def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold): """Write final predictions to the json file and log-odds of null if needed.""" logger.info("Writing predictions to: %s" % (output_prediction_file)) logger.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min null score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant if version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) if version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't include the empty option in the n-best, include it if version_2_with_negative: if "" not in seen_predictions: nbest.append( _NbestPrediction( text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could only have single null prediction. # So we just create a nonce prediction in this case to avoid failure. if len(nbest)==1: nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 if not version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - ( best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff if score_diff > null_score_diff_threshold: all_predictions[example.qas_id] = "" else: all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold): """Write final predictions to the json file and log-odds of null if needed.""" logger.info("Writing predictions to: %s" % (output_prediction_file)) logger.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min null score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant if version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) if version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't include the empty option in the n-best, include it if version_2_with_negative: if "" not in seen_predictions: nbest.append( _NbestPrediction( text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could only have single null prediction. # So we just create a nonce prediction in this case to avoid failure. if len(nbest)==1: nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 if not version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - ( best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff if score_diff > null_score_diff_threshold: all_predictions[example.qas_id] = "" else: all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
[ "Write", "final", "predictions", "to", "the", "json", "file", "and", "log", "-", "odds", "of", "null", "if", "needed", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_squad.py#L441-L630
[ "def", "write_predictions", "(", "all_examples", ",", "all_features", ",", "all_results", ",", "n_best_size", ",", "max_answer_length", ",", "do_lower_case", ",", "output_prediction_file", ",", "output_nbest_file", ",", "output_null_log_odds_file", ",", "verbose_logging", ",", "version_2_with_negative", ",", "null_score_diff_threshold", ")", ":", "logger", ".", "info", "(", "\"Writing predictions to: %s\"", "%", "(", "output_prediction_file", ")", ")", "logger", ".", "info", "(", "\"Writing nbest to: %s\"", "%", "(", "output_nbest_file", ")", ")", "example_index_to_features", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "feature", "in", "all_features", ":", "example_index_to_features", "[", "feature", ".", "example_index", "]", ".", "append", "(", "feature", ")", "unique_id_to_result", "=", "{", "}", "for", "result", "in", "all_results", ":", "unique_id_to_result", "[", "result", ".", "unique_id", "]", "=", "result", "_PrelimPrediction", "=", "collections", ".", "namedtuple", "(", "# pylint: disable=invalid-name", "\"PrelimPrediction\"", ",", "[", "\"feature_index\"", ",", "\"start_index\"", ",", "\"end_index\"", ",", "\"start_logit\"", ",", "\"end_logit\"", "]", ")", "all_predictions", "=", "collections", ".", "OrderedDict", "(", ")", "all_nbest_json", "=", "collections", ".", "OrderedDict", "(", ")", "scores_diff_json", "=", "collections", ".", "OrderedDict", "(", ")", "for", "(", "example_index", ",", "example", ")", "in", "enumerate", "(", "all_examples", ")", ":", "features", "=", "example_index_to_features", "[", "example_index", "]", "prelim_predictions", "=", "[", "]", "# keep track of the minimum score of null start+end of position 0", "score_null", "=", "1000000", "# large and positive", "min_null_feature_index", "=", "0", "# the paragraph slice with min null score", "null_start_logit", "=", "0", "# the start logit at the slice with min null score", "null_end_logit", "=", "0", "# the end logit at the slice with min null score", "for", "(", "feature_index", ",", "feature", ")", "in", "enumerate", "(", "features", ")", ":", "result", "=", "unique_id_to_result", "[", "feature", ".", "unique_id", "]", "start_indexes", "=", "_get_best_indexes", "(", "result", ".", "start_logits", ",", "n_best_size", ")", "end_indexes", "=", "_get_best_indexes", "(", "result", ".", "end_logits", ",", "n_best_size", ")", "# if we could have irrelevant answers, get the min score of irrelevant", "if", "version_2_with_negative", ":", "feature_null_score", "=", "result", ".", "start_logits", "[", "0", "]", "+", "result", ".", "end_logits", "[", "0", "]", "if", "feature_null_score", "<", "score_null", ":", "score_null", "=", "feature_null_score", "min_null_feature_index", "=", "feature_index", "null_start_logit", "=", "result", ".", "start_logits", "[", "0", "]", "null_end_logit", "=", "result", ".", "end_logits", "[", "0", "]", "for", "start_index", "in", "start_indexes", ":", "for", "end_index", "in", "end_indexes", ":", "# We could hypothetically create invalid predictions, e.g., predict", "# that the start of the span is in the question. We throw out all", "# invalid predictions.", "if", "start_index", ">=", "len", "(", "feature", ".", "tokens", ")", ":", "continue", "if", "end_index", ">=", "len", "(", "feature", ".", "tokens", ")", ":", "continue", "if", "start_index", "not", "in", "feature", ".", "token_to_orig_map", ":", "continue", "if", "end_index", "not", "in", "feature", ".", "token_to_orig_map", ":", "continue", "if", "not", "feature", ".", "token_is_max_context", ".", "get", "(", "start_index", ",", "False", ")", ":", "continue", "if", "end_index", "<", "start_index", ":", "continue", "length", "=", "end_index", "-", "start_index", "+", "1", "if", "length", ">", "max_answer_length", ":", "continue", "prelim_predictions", ".", "append", "(", "_PrelimPrediction", "(", "feature_index", "=", "feature_index", ",", "start_index", "=", "start_index", ",", "end_index", "=", "end_index", ",", "start_logit", "=", "result", ".", "start_logits", "[", "start_index", "]", ",", "end_logit", "=", "result", ".", "end_logits", "[", "end_index", "]", ")", ")", "if", "version_2_with_negative", ":", "prelim_predictions", ".", "append", "(", "_PrelimPrediction", "(", "feature_index", "=", "min_null_feature_index", ",", "start_index", "=", "0", ",", "end_index", "=", "0", ",", "start_logit", "=", "null_start_logit", ",", "end_logit", "=", "null_end_logit", ")", ")", "prelim_predictions", "=", "sorted", "(", "prelim_predictions", ",", "key", "=", "lambda", "x", ":", "(", "x", ".", "start_logit", "+", "x", ".", "end_logit", ")", ",", "reverse", "=", "True", ")", "_NbestPrediction", "=", "collections", ".", "namedtuple", "(", "# pylint: disable=invalid-name", "\"NbestPrediction\"", ",", "[", "\"text\"", ",", "\"start_logit\"", ",", "\"end_logit\"", "]", ")", "seen_predictions", "=", "{", "}", "nbest", "=", "[", "]", "for", "pred", "in", "prelim_predictions", ":", "if", "len", "(", "nbest", ")", ">=", "n_best_size", ":", "break", "feature", "=", "features", "[", "pred", ".", "feature_index", "]", "if", "pred", ".", "start_index", ">", "0", ":", "# this is a non-null prediction", "tok_tokens", "=", "feature", ".", "tokens", "[", "pred", ".", "start_index", ":", "(", "pred", ".", "end_index", "+", "1", ")", "]", "orig_doc_start", "=", "feature", ".", "token_to_orig_map", "[", "pred", ".", "start_index", "]", "orig_doc_end", "=", "feature", ".", "token_to_orig_map", "[", "pred", ".", "end_index", "]", "orig_tokens", "=", "example", ".", "doc_tokens", "[", "orig_doc_start", ":", "(", "orig_doc_end", "+", "1", ")", "]", "tok_text", "=", "\" \"", ".", "join", "(", "tok_tokens", ")", "# De-tokenize WordPieces that have been split off.", "tok_text", "=", "tok_text", ".", "replace", "(", "\" ##\"", ",", "\"\"", ")", "tok_text", "=", "tok_text", ".", "replace", "(", "\"##\"", ",", "\"\"", ")", "# Clean whitespace", "tok_text", "=", "tok_text", ".", "strip", "(", ")", "tok_text", "=", "\" \"", ".", "join", "(", "tok_text", ".", "split", "(", ")", ")", "orig_text", "=", "\" \"", ".", "join", "(", "orig_tokens", ")", "final_text", "=", "get_final_text", "(", "tok_text", ",", "orig_text", ",", "do_lower_case", ",", "verbose_logging", ")", "if", "final_text", "in", "seen_predictions", ":", "continue", "seen_predictions", "[", "final_text", "]", "=", "True", "else", ":", "final_text", "=", "\"\"", "seen_predictions", "[", "final_text", "]", "=", "True", "nbest", ".", "append", "(", "_NbestPrediction", "(", "text", "=", "final_text", ",", "start_logit", "=", "pred", ".", "start_logit", ",", "end_logit", "=", "pred", ".", "end_logit", ")", ")", "# if we didn't include the empty option in the n-best, include it", "if", "version_2_with_negative", ":", "if", "\"\"", "not", "in", "seen_predictions", ":", "nbest", ".", "append", "(", "_NbestPrediction", "(", "text", "=", "\"\"", ",", "start_logit", "=", "null_start_logit", ",", "end_logit", "=", "null_end_logit", ")", ")", "# In very rare edge cases we could only have single null prediction.", "# So we just create a nonce prediction in this case to avoid failure.", "if", "len", "(", "nbest", ")", "==", "1", ":", "nbest", ".", "insert", "(", "0", ",", "_NbestPrediction", "(", "text", "=", "\"empty\"", ",", "start_logit", "=", "0.0", ",", "end_logit", "=", "0.0", ")", ")", "# In very rare edge cases we could have no valid predictions. So we", "# just create a nonce prediction in this case to avoid failure.", "if", "not", "nbest", ":", "nbest", ".", "append", "(", "_NbestPrediction", "(", "text", "=", "\"empty\"", ",", "start_logit", "=", "0.0", ",", "end_logit", "=", "0.0", ")", ")", "assert", "len", "(", "nbest", ")", ">=", "1", "total_scores", "=", "[", "]", "best_non_null_entry", "=", "None", "for", "entry", "in", "nbest", ":", "total_scores", ".", "append", "(", "entry", ".", "start_logit", "+", "entry", ".", "end_logit", ")", "if", "not", "best_non_null_entry", ":", "if", "entry", ".", "text", ":", "best_non_null_entry", "=", "entry", "probs", "=", "_compute_softmax", "(", "total_scores", ")", "nbest_json", "=", "[", "]", "for", "(", "i", ",", "entry", ")", "in", "enumerate", "(", "nbest", ")", ":", "output", "=", "collections", ".", "OrderedDict", "(", ")", "output", "[", "\"text\"", "]", "=", "entry", ".", "text", "output", "[", "\"probability\"", "]", "=", "probs", "[", "i", "]", "output", "[", "\"start_logit\"", "]", "=", "entry", ".", "start_logit", "output", "[", "\"end_logit\"", "]", "=", "entry", ".", "end_logit", "nbest_json", ".", "append", "(", "output", ")", "assert", "len", "(", "nbest_json", ")", ">=", "1", "if", "not", "version_2_with_negative", ":", "all_predictions", "[", "example", ".", "qas_id", "]", "=", "nbest_json", "[", "0", "]", "[", "\"text\"", "]", "else", ":", "# predict \"\" iff the null score - the score of best non-null > threshold", "score_diff", "=", "score_null", "-", "best_non_null_entry", ".", "start_logit", "-", "(", "best_non_null_entry", ".", "end_logit", ")", "scores_diff_json", "[", "example", ".", "qas_id", "]", "=", "score_diff", "if", "score_diff", ">", "null_score_diff_threshold", ":", "all_predictions", "[", "example", ".", "qas_id", "]", "=", "\"\"", "else", ":", "all_predictions", "[", "example", ".", "qas_id", "]", "=", "best_non_null_entry", ".", "text", "all_nbest_json", "[", "example", ".", "qas_id", "]", "=", "nbest_json", "with", "open", "(", "output_prediction_file", ",", "\"w\"", ")", "as", "writer", ":", "writer", ".", "write", "(", "json", ".", "dumps", "(", "all_predictions", ",", "indent", "=", "4", ")", "+", "\"\\n\"", ")", "with", "open", "(", "output_nbest_file", ",", "\"w\"", ")", "as", "writer", ":", "writer", ".", "write", "(", "json", ".", "dumps", "(", "all_nbest_json", ",", "indent", "=", "4", ")", "+", "\"\\n\"", ")", "if", "version_2_with_negative", ":", "with", "open", "(", "output_null_log_odds_file", ",", "\"w\"", ")", "as", "writer", ":", "writer", ".", "write", "(", "json", ".", "dumps", "(", "scores_diff_json", ",", "indent", "=", "4", ")", "+", "\"\\n\"", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
get_final_text
Project the tokenized prediction back to the original text.
examples/run_squad.py
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heuristic between # `pred_text` and `orig_text` to get a character-to-character alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if verbose_logging: logger.info( "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if verbose_logging: logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in tok_ns_to_s_map.items(): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if verbose_logging: logger.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if verbose_logging: logger.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position:(orig_end_position + 1)] return output_text
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heuristic between # `pred_text` and `orig_text` to get a character-to-character alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if verbose_logging: logger.info( "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if verbose_logging: logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in tok_ns_to_s_map.items(): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if verbose_logging: logger.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if verbose_logging: logger.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position:(orig_end_position + 1)] return output_text
[ "Project", "the", "tokenized", "prediction", "back", "to", "the", "original", "text", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_squad.py#L633-L726
[ "def", "get_final_text", "(", "pred_text", ",", "orig_text", ",", "do_lower_case", ",", "verbose_logging", "=", "False", ")", ":", "# When we created the data, we kept track of the alignment between original", "# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So", "# now `orig_text` contains the span of our original text corresponding to the", "# span that we predicted.", "#", "# However, `orig_text` may contain extra characters that we don't want in", "# our prediction.", "#", "# For example, let's say:", "# pred_text = steve smith", "# orig_text = Steve Smith's", "#", "# We don't want to return `orig_text` because it contains the extra \"'s\".", "#", "# We don't want to return `pred_text` because it's already been normalized", "# (the SQuAD eval script also does punctuation stripping/lower casing but", "# our tokenizer does additional normalization like stripping accent", "# characters).", "#", "# What we really want to return is \"Steve Smith\".", "#", "# Therefore, we have to apply a semi-complicated alignment heuristic between", "# `pred_text` and `orig_text` to get a character-to-character alignment. This", "# can fail in certain cases in which case we just return `orig_text`.", "def", "_strip_spaces", "(", "text", ")", ":", "ns_chars", "=", "[", "]", "ns_to_s_map", "=", "collections", ".", "OrderedDict", "(", ")", "for", "(", "i", ",", "c", ")", "in", "enumerate", "(", "text", ")", ":", "if", "c", "==", "\" \"", ":", "continue", "ns_to_s_map", "[", "len", "(", "ns_chars", ")", "]", "=", "i", "ns_chars", ".", "append", "(", "c", ")", "ns_text", "=", "\"\"", ".", "join", "(", "ns_chars", ")", "return", "(", "ns_text", ",", "ns_to_s_map", ")", "# We first tokenize `orig_text`, strip whitespace from the result", "# and `pred_text`, and check if they are the same length. If they are", "# NOT the same length, the heuristic has failed. If they are the same", "# length, we assume the characters are one-to-one aligned.", "tokenizer", "=", "BasicTokenizer", "(", "do_lower_case", "=", "do_lower_case", ")", "tok_text", "=", "\" \"", ".", "join", "(", "tokenizer", ".", "tokenize", "(", "orig_text", ")", ")", "start_position", "=", "tok_text", ".", "find", "(", "pred_text", ")", "if", "start_position", "==", "-", "1", ":", "if", "verbose_logging", ":", "logger", ".", "info", "(", "\"Unable to find text: '%s' in '%s'\"", "%", "(", "pred_text", ",", "orig_text", ")", ")", "return", "orig_text", "end_position", "=", "start_position", "+", "len", "(", "pred_text", ")", "-", "1", "(", "orig_ns_text", ",", "orig_ns_to_s_map", ")", "=", "_strip_spaces", "(", "orig_text", ")", "(", "tok_ns_text", ",", "tok_ns_to_s_map", ")", "=", "_strip_spaces", "(", "tok_text", ")", "if", "len", "(", "orig_ns_text", ")", "!=", "len", "(", "tok_ns_text", ")", ":", "if", "verbose_logging", ":", "logger", ".", "info", "(", "\"Length not equal after stripping spaces: '%s' vs '%s'\"", ",", "orig_ns_text", ",", "tok_ns_text", ")", "return", "orig_text", "# We then project the characters in `pred_text` back to `orig_text` using", "# the character-to-character alignment.", "tok_s_to_ns_map", "=", "{", "}", "for", "(", "i", ",", "tok_index", ")", "in", "tok_ns_to_s_map", ".", "items", "(", ")", ":", "tok_s_to_ns_map", "[", "tok_index", "]", "=", "i", "orig_start_position", "=", "None", "if", "start_position", "in", "tok_s_to_ns_map", ":", "ns_start_position", "=", "tok_s_to_ns_map", "[", "start_position", "]", "if", "ns_start_position", "in", "orig_ns_to_s_map", ":", "orig_start_position", "=", "orig_ns_to_s_map", "[", "ns_start_position", "]", "if", "orig_start_position", "is", "None", ":", "if", "verbose_logging", ":", "logger", ".", "info", "(", "\"Couldn't map start position\"", ")", "return", "orig_text", "orig_end_position", "=", "None", "if", "end_position", "in", "tok_s_to_ns_map", ":", "ns_end_position", "=", "tok_s_to_ns_map", "[", "end_position", "]", "if", "ns_end_position", "in", "orig_ns_to_s_map", ":", "orig_end_position", "=", "orig_ns_to_s_map", "[", "ns_end_position", "]", "if", "orig_end_position", "is", "None", ":", "if", "verbose_logging", ":", "logger", ".", "info", "(", "\"Couldn't map end position\"", ")", "return", "orig_text", "output_text", "=", "orig_text", "[", "orig_start_position", ":", "(", "orig_end_position", "+", "1", ")", "]", "return", "output_text" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
_get_best_indexes
Get the n-best logits from a list.
examples/run_squad.py
def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes
def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes
[ "Get", "the", "n", "-", "best", "logits", "from", "a", "list", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_squad.py#L729-L738
[ "def", "_get_best_indexes", "(", "logits", ",", "n_best_size", ")", ":", "index_and_score", "=", "sorted", "(", "enumerate", "(", "logits", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ",", "reverse", "=", "True", ")", "best_indexes", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "index_and_score", ")", ")", ":", "if", "i", ">=", "n_best_size", ":", "break", "best_indexes", ".", "append", "(", "index_and_score", "[", "i", "]", "[", "0", "]", ")", "return", "best_indexes" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
_compute_softmax
Compute softmax probability over raw logits.
examples/run_squad.py
def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs
def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs
[ "Compute", "softmax", "probability", "over", "raw", "logits", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_squad.py#L741-L761
[ "def", "_compute_softmax", "(", "scores", ")", ":", "if", "not", "scores", ":", "return", "[", "]", "max_score", "=", "None", "for", "score", "in", "scores", ":", "if", "max_score", "is", "None", "or", "score", ">", "max_score", ":", "max_score", "=", "score", "exp_scores", "=", "[", "]", "total_sum", "=", "0.0", "for", "score", "in", "scores", ":", "x", "=", "math", ".", "exp", "(", "score", "-", "max_score", ")", "exp_scores", ".", "append", "(", "x", ")", "total_sum", "+=", "x", "probs", "=", "[", "]", "for", "score", "in", "exp_scores", ":", "probs", ".", "append", "(", "score", "/", "total_sum", ")", "return", "probs" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
convert_examples_to_features
Loads a data file into a list of `InputBatch`s.
examples/run_swag.py
def convert_examples_to_features(examples, tokenizer, max_seq_length, is_training): """Loads a data file into a list of `InputBatch`s.""" # Swag is a multiple choice task. To perform this task using Bert, # we will use the formatting proposed in "Improving Language # Understanding by Generative Pre-Training" and suggested by # @jacobdevlin-google in this issue # https://github.com/google-research/bert/issues/38. # # Each choice will correspond to a sample on which we run the # inference. For a given Swag example, we will create the 4 # following inputs: # - [CLS] context [SEP] choice_1 [SEP] # - [CLS] context [SEP] choice_2 [SEP] # - [CLS] context [SEP] choice_3 [SEP] # - [CLS] context [SEP] choice_4 [SEP] # The model will output a single value for each input. To get the # final decision of the model, we will run a softmax over these 4 # outputs. features = [] for example_index, example in enumerate(examples): context_tokens = tokenizer.tokenize(example.context_sentence) start_ending_tokens = tokenizer.tokenize(example.start_ending) choices_features = [] for ending_index, ending in enumerate(example.endings): # We create a copy of the context tokens in order to be # able to shrink it according to ending_tokens context_tokens_choice = context_tokens[:] ending_tokens = start_ending_tokens + tokenizer.tokenize(ending) # Modifies `context_tokens_choice` and `ending_tokens` in # place so that the total length is less than the # specified length. Account for [CLS], [SEP], [SEP] with # "- 3" _truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3) tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"] segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length choices_features.append((tokens, input_ids, input_mask, segment_ids)) label = example.label if example_index < 5: logger.info("*** Example ***") logger.info("swag_id: {}".format(example.swag_id)) for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features): logger.info("choice: {}".format(choice_idx)) logger.info("tokens: {}".format(' '.join(tokens))) logger.info("input_ids: {}".format(' '.join(map(str, input_ids)))) logger.info("input_mask: {}".format(' '.join(map(str, input_mask)))) logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids)))) if is_training: logger.info("label: {}".format(label)) features.append( InputFeatures( example_id = example.swag_id, choices_features = choices_features, label = label ) ) return features
def convert_examples_to_features(examples, tokenizer, max_seq_length, is_training): """Loads a data file into a list of `InputBatch`s.""" # Swag is a multiple choice task. To perform this task using Bert, # we will use the formatting proposed in "Improving Language # Understanding by Generative Pre-Training" and suggested by # @jacobdevlin-google in this issue # https://github.com/google-research/bert/issues/38. # # Each choice will correspond to a sample on which we run the # inference. For a given Swag example, we will create the 4 # following inputs: # - [CLS] context [SEP] choice_1 [SEP] # - [CLS] context [SEP] choice_2 [SEP] # - [CLS] context [SEP] choice_3 [SEP] # - [CLS] context [SEP] choice_4 [SEP] # The model will output a single value for each input. To get the # final decision of the model, we will run a softmax over these 4 # outputs. features = [] for example_index, example in enumerate(examples): context_tokens = tokenizer.tokenize(example.context_sentence) start_ending_tokens = tokenizer.tokenize(example.start_ending) choices_features = [] for ending_index, ending in enumerate(example.endings): # We create a copy of the context tokens in order to be # able to shrink it according to ending_tokens context_tokens_choice = context_tokens[:] ending_tokens = start_ending_tokens + tokenizer.tokenize(ending) # Modifies `context_tokens_choice` and `ending_tokens` in # place so that the total length is less than the # specified length. Account for [CLS], [SEP], [SEP] with # "- 3" _truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3) tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"] segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length choices_features.append((tokens, input_ids, input_mask, segment_ids)) label = example.label if example_index < 5: logger.info("*** Example ***") logger.info("swag_id: {}".format(example.swag_id)) for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features): logger.info("choice: {}".format(choice_idx)) logger.info("tokens: {}".format(' '.join(tokens))) logger.info("input_ids: {}".format(' '.join(map(str, input_ids)))) logger.info("input_mask: {}".format(' '.join(map(str, input_mask)))) logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids)))) if is_training: logger.info("label: {}".format(label)) features.append( InputFeatures( example_id = example.swag_id, choices_features = choices_features, label = label ) ) return features
[ "Loads", "a", "data", "file", "into", "a", "list", "of", "InputBatch", "s", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_swag.py#L138-L214
[ "def", "convert_examples_to_features", "(", "examples", ",", "tokenizer", ",", "max_seq_length", ",", "is_training", ")", ":", "# Swag is a multiple choice task. To perform this task using Bert,", "# we will use the formatting proposed in \"Improving Language", "# Understanding by Generative Pre-Training\" and suggested by", "# @jacobdevlin-google in this issue", "# https://github.com/google-research/bert/issues/38.", "#", "# Each choice will correspond to a sample on which we run the", "# inference. For a given Swag example, we will create the 4", "# following inputs:", "# - [CLS] context [SEP] choice_1 [SEP]", "# - [CLS] context [SEP] choice_2 [SEP]", "# - [CLS] context [SEP] choice_3 [SEP]", "# - [CLS] context [SEP] choice_4 [SEP]", "# The model will output a single value for each input. To get the", "# final decision of the model, we will run a softmax over these 4", "# outputs.", "features", "=", "[", "]", "for", "example_index", ",", "example", "in", "enumerate", "(", "examples", ")", ":", "context_tokens", "=", "tokenizer", ".", "tokenize", "(", "example", ".", "context_sentence", ")", "start_ending_tokens", "=", "tokenizer", ".", "tokenize", "(", "example", ".", "start_ending", ")", "choices_features", "=", "[", "]", "for", "ending_index", ",", "ending", "in", "enumerate", "(", "example", ".", "endings", ")", ":", "# We create a copy of the context tokens in order to be", "# able to shrink it according to ending_tokens", "context_tokens_choice", "=", "context_tokens", "[", ":", "]", "ending_tokens", "=", "start_ending_tokens", "+", "tokenizer", ".", "tokenize", "(", "ending", ")", "# Modifies `context_tokens_choice` and `ending_tokens` in", "# place so that the total length is less than the", "# specified length. Account for [CLS], [SEP], [SEP] with", "# \"- 3\"", "_truncate_seq_pair", "(", "context_tokens_choice", ",", "ending_tokens", ",", "max_seq_length", "-", "3", ")", "tokens", "=", "[", "\"[CLS]\"", "]", "+", "context_tokens_choice", "+", "[", "\"[SEP]\"", "]", "+", "ending_tokens", "+", "[", "\"[SEP]\"", "]", "segment_ids", "=", "[", "0", "]", "*", "(", "len", "(", "context_tokens_choice", ")", "+", "2", ")", "+", "[", "1", "]", "*", "(", "len", "(", "ending_tokens", ")", "+", "1", ")", "input_ids", "=", "tokenizer", ".", "convert_tokens_to_ids", "(", "tokens", ")", "input_mask", "=", "[", "1", "]", "*", "len", "(", "input_ids", ")", "# Zero-pad up to the sequence length.", "padding", "=", "[", "0", "]", "*", "(", "max_seq_length", "-", "len", "(", "input_ids", ")", ")", "input_ids", "+=", "padding", "input_mask", "+=", "padding", "segment_ids", "+=", "padding", "assert", "len", "(", "input_ids", ")", "==", "max_seq_length", "assert", "len", "(", "input_mask", ")", "==", "max_seq_length", "assert", "len", "(", "segment_ids", ")", "==", "max_seq_length", "choices_features", ".", "append", "(", "(", "tokens", ",", "input_ids", ",", "input_mask", ",", "segment_ids", ")", ")", "label", "=", "example", ".", "label", "if", "example_index", "<", "5", ":", "logger", ".", "info", "(", "\"*** Example ***\"", ")", "logger", ".", "info", "(", "\"swag_id: {}\"", ".", "format", "(", "example", ".", "swag_id", ")", ")", "for", "choice_idx", ",", "(", "tokens", ",", "input_ids", ",", "input_mask", ",", "segment_ids", ")", "in", "enumerate", "(", "choices_features", ")", ":", "logger", ".", "info", "(", "\"choice: {}\"", ".", "format", "(", "choice_idx", ")", ")", "logger", ".", "info", "(", "\"tokens: {}\"", ".", "format", "(", "' '", ".", "join", "(", "tokens", ")", ")", ")", "logger", ".", "info", "(", "\"input_ids: {}\"", ".", "format", "(", "' '", ".", "join", "(", "map", "(", "str", ",", "input_ids", ")", ")", ")", ")", "logger", ".", "info", "(", "\"input_mask: {}\"", ".", "format", "(", "' '", ".", "join", "(", "map", "(", "str", ",", "input_mask", ")", ")", ")", ")", "logger", ".", "info", "(", "\"segment_ids: {}\"", ".", "format", "(", "' '", ".", "join", "(", "map", "(", "str", ",", "segment_ids", ")", ")", ")", ")", "if", "is_training", ":", "logger", ".", "info", "(", "\"label: {}\"", ".", "format", "(", "label", ")", ")", "features", ".", "append", "(", "InputFeatures", "(", "example_id", "=", "example", ".", "swag_id", ",", "choices_features", "=", "choices_features", ",", "label", "=", "label", ")", ")", "return", "features" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
convert_examples_to_features
Loads a data file into a list of `InputBatch`s.
examples/run_classifier.py
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode): """Loads a data file into a list of `InputBatch`s.""" label_map = {label : i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = ["[CLS]"] + tokens_a + ["[SEP]"] segment_ids = [0] * len(tokens) if tokens_b: tokens += tokens_b + ["[SEP]"] segment_ids += [1] * (len(tokens_b) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if output_mode == "classification": label_id = label_map[example.label] elif output_mode == "regression": label_id = float(example.label) else: raise KeyError(output_mode) if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("label: %s (id = %d)" % (example.label, label_id)) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) return features
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode): """Loads a data file into a list of `InputBatch`s.""" label_map = {label : i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = ["[CLS]"] + tokens_a + ["[SEP]"] segment_ids = [0] * len(tokens) if tokens_b: tokens += tokens_b + ["[SEP]"] segment_ids += [1] * (len(tokens_b) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if output_mode == "classification": label_id = label_map[example.label] elif output_mode == "regression": label_id = float(example.label) else: raise KeyError(output_mode) if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("label: %s (id = %d)" % (example.label, label_id)) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) return features
[ "Loads", "a", "data", "file", "into", "a", "list", "of", "InputBatch", "s", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_classifier.py#L405-L494
[ "def", "convert_examples_to_features", "(", "examples", ",", "label_list", ",", "max_seq_length", ",", "tokenizer", ",", "output_mode", ")", ":", "label_map", "=", "{", "label", ":", "i", "for", "i", ",", "label", "in", "enumerate", "(", "label_list", ")", "}", "features", "=", "[", "]", "for", "(", "ex_index", ",", "example", ")", "in", "enumerate", "(", "examples", ")", ":", "if", "ex_index", "%", "10000", "==", "0", ":", "logger", ".", "info", "(", "\"Writing example %d of %d\"", "%", "(", "ex_index", ",", "len", "(", "examples", ")", ")", ")", "tokens_a", "=", "tokenizer", ".", "tokenize", "(", "example", ".", "text_a", ")", "tokens_b", "=", "None", "if", "example", ".", "text_b", ":", "tokens_b", "=", "tokenizer", ".", "tokenize", "(", "example", ".", "text_b", ")", "# Modifies `tokens_a` and `tokens_b` in place so that the total", "# length is less than the specified length.", "# Account for [CLS], [SEP], [SEP] with \"- 3\"", "_truncate_seq_pair", "(", "tokens_a", ",", "tokens_b", ",", "max_seq_length", "-", "3", ")", "else", ":", "# Account for [CLS] and [SEP] with \"- 2\"", "if", "len", "(", "tokens_a", ")", ">", "max_seq_length", "-", "2", ":", "tokens_a", "=", "tokens_a", "[", ":", "(", "max_seq_length", "-", "2", ")", "]", "# The convention in BERT is:", "# (a) For sequence pairs:", "# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]", "# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1", "# (b) For single sequences:", "# tokens: [CLS] the dog is hairy . [SEP]", "# type_ids: 0 0 0 0 0 0 0", "#", "# Where \"type_ids\" are used to indicate whether this is the first", "# sequence or the second sequence. The embedding vectors for `type=0` and", "# `type=1` were learned during pre-training and are added to the wordpiece", "# embedding vector (and position vector). This is not *strictly* necessary", "# since the [SEP] token unambiguously separates the sequences, but it makes", "# it easier for the model to learn the concept of sequences.", "#", "# For classification tasks, the first vector (corresponding to [CLS]) is", "# used as as the \"sentence vector\". Note that this only makes sense because", "# the entire model is fine-tuned.", "tokens", "=", "[", "\"[CLS]\"", "]", "+", "tokens_a", "+", "[", "\"[SEP]\"", "]", "segment_ids", "=", "[", "0", "]", "*", "len", "(", "tokens", ")", "if", "tokens_b", ":", "tokens", "+=", "tokens_b", "+", "[", "\"[SEP]\"", "]", "segment_ids", "+=", "[", "1", "]", "*", "(", "len", "(", "tokens_b", ")", "+", "1", ")", "input_ids", "=", "tokenizer", ".", "convert_tokens_to_ids", "(", "tokens", ")", "# The mask has 1 for real tokens and 0 for padding tokens. Only real", "# tokens are attended to.", "input_mask", "=", "[", "1", "]", "*", "len", "(", "input_ids", ")", "# Zero-pad up to the sequence length.", "padding", "=", "[", "0", "]", "*", "(", "max_seq_length", "-", "len", "(", "input_ids", ")", ")", "input_ids", "+=", "padding", "input_mask", "+=", "padding", "segment_ids", "+=", "padding", "assert", "len", "(", "input_ids", ")", "==", "max_seq_length", "assert", "len", "(", "input_mask", ")", "==", "max_seq_length", "assert", "len", "(", "segment_ids", ")", "==", "max_seq_length", "if", "output_mode", "==", "\"classification\"", ":", "label_id", "=", "label_map", "[", "example", ".", "label", "]", "elif", "output_mode", "==", "\"regression\"", ":", "label_id", "=", "float", "(", "example", ".", "label", ")", "else", ":", "raise", "KeyError", "(", "output_mode", ")", "if", "ex_index", "<", "5", ":", "logger", ".", "info", "(", "\"*** Example ***\"", ")", "logger", ".", "info", "(", "\"guid: %s\"", "%", "(", "example", ".", "guid", ")", ")", "logger", ".", "info", "(", "\"tokens: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "tokens", "]", ")", ")", "logger", ".", "info", "(", "\"input_ids: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "input_ids", "]", ")", ")", "logger", ".", "info", "(", "\"input_mask: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "input_mask", "]", ")", ")", "logger", ".", "info", "(", "\"segment_ids: %s\"", "%", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "segment_ids", "]", ")", ")", "logger", ".", "info", "(", "\"label: %s (id = %d)\"", "%", "(", "example", ".", "label", ",", "label_id", ")", ")", "features", ".", "append", "(", "InputFeatures", "(", "input_ids", "=", "input_ids", ",", "input_mask", "=", "input_mask", ",", "segment_ids", "=", "segment_ids", ",", "label_id", "=", "label_id", ")", ")", "return", "features" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
DataProcessor._read_tsv
Reads a tab separated value file.
examples/run_classifier.py
def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines
def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines
[ "Reads", "a", "tab", "separated", "value", "file", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_classifier.py#L93-L102
[ "def", "_read_tsv", "(", "cls", ",", "input_file", ",", "quotechar", "=", "None", ")", ":", "with", "open", "(", "input_file", ",", "\"r\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "f", ":", "reader", "=", "csv", ".", "reader", "(", "f", ",", "delimiter", "=", "\"\\t\"", ",", "quotechar", "=", "quotechar", ")", "lines", "=", "[", "]", "for", "line", "in", "reader", ":", "if", "sys", ".", "version_info", "[", "0", "]", "==", "2", ":", "line", "=", "list", "(", "unicode", "(", "cell", ",", "'utf-8'", ")", "for", "cell", "in", "line", ")", "lines", ".", "append", "(", "line", ")", "return", "lines" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
MrpcProcessor.get_train_examples
See base class.
examples/run_classifier.py
def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv"))) return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv"))) return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
[ "See", "base", "class", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_classifier.py#L108-L112
[ "def", "get_train_examples", "(", "self", ",", "data_dir", ")", ":", "logger", ".", "info", "(", "\"LOOKING AT {}\"", ".", "format", "(", "os", ".", "path", ".", "join", "(", "data_dir", ",", "\"train.tsv\"", ")", ")", ")", "return", "self", ".", "_create_examples", "(", "self", ".", "_read_tsv", "(", "os", ".", "path", ".", "join", "(", "data_dir", ",", "\"train.tsv\"", ")", ")", ",", "\"train\"", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
MrpcProcessor._create_examples
Creates examples for the training and dev sets.
examples/run_classifier.py
def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[3] text_b = line[4] label = line[0] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples
def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[3] text_b = line[4] label = line[0] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples
[ "Creates", "examples", "for", "the", "training", "and", "dev", "sets", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_classifier.py#L123-L135
[ "def", "_create_examples", "(", "self", ",", "lines", ",", "set_type", ")", ":", "examples", "=", "[", "]", "for", "(", "i", ",", "line", ")", "in", "enumerate", "(", "lines", ")", ":", "if", "i", "==", "0", ":", "continue", "guid", "=", "\"%s-%s\"", "%", "(", "set_type", ",", "i", ")", "text_a", "=", "line", "[", "3", "]", "text_b", "=", "line", "[", "4", "]", "label", "=", "line", "[", "0", "]", "examples", ".", "append", "(", "InputExample", "(", "guid", "=", "guid", ",", "text_a", "=", "text_a", ",", "text_b", "=", "text_b", ",", "label", "=", "label", ")", ")", "return", "examples" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
MnliProcessor.get_train_examples
See base class.
examples/run_classifier.py
def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
[ "See", "base", "class", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_classifier.py#L141-L144
[ "def", "get_train_examples", "(", "self", ",", "data_dir", ")", ":", "return", "self", ".", "_create_examples", "(", "self", ".", "_read_tsv", "(", "os", ".", "path", ".", "join", "(", "data_dir", ",", "\"train.tsv\"", ")", ")", ",", "\"train\"", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
MnliProcessor.get_dev_examples
See base class.
examples/run_classifier.py
def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched")
def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched")
[ "See", "base", "class", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_classifier.py#L146-L150
[ "def", "get_dev_examples", "(", "self", ",", "data_dir", ")", ":", "return", "self", ".", "_create_examples", "(", "self", ".", "_read_tsv", "(", "os", ".", "path", ".", "join", "(", "data_dir", ",", "\"dev_matched.tsv\"", ")", ")", ",", "\"dev_matched\"", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
top_k_logits
Masks everything but the k top entries as -infinity (1e10). Used to mask logits such that e^-infinity -> 0 won't contribute to the sum of the denominator.
examples/run_gpt2.py
def top_k_logits(logits, k): """ Masks everything but the k top entries as -infinity (1e10). Used to mask logits such that e^-infinity -> 0 won't contribute to the sum of the denominator. """ if k == 0: return logits else: values = torch.topk(logits, k)[0] batch_mins = values[:, -1].view(-1, 1).expand_as(logits) return torch.where(logits < batch_mins, torch.ones_like(logits) * -1e10, logits)
def top_k_logits(logits, k): """ Masks everything but the k top entries as -infinity (1e10). Used to mask logits such that e^-infinity -> 0 won't contribute to the sum of the denominator. """ if k == 0: return logits else: values = torch.topk(logits, k)[0] batch_mins = values[:, -1].view(-1, 1).expand_as(logits) return torch.where(logits < batch_mins, torch.ones_like(logits) * -1e10, logits)
[ "Masks", "everything", "but", "the", "k", "top", "entries", "as", "-", "infinity", "(", "1e10", ")", ".", "Used", "to", "mask", "logits", "such", "that", "e^", "-", "infinity", "-", ">", "0", "won", "t", "contribute", "to", "the", "sum", "of", "the", "denominator", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_gpt2.py#L18-L29
[ "def", "top_k_logits", "(", "logits", ",", "k", ")", ":", "if", "k", "==", "0", ":", "return", "logits", "else", ":", "values", "=", "torch", ".", "topk", "(", "logits", ",", "k", ")", "[", "0", "]", "batch_mins", "=", "values", "[", ":", ",", "-", "1", "]", ".", "view", "(", "-", "1", ",", "1", ")", ".", "expand_as", "(", "logits", ")", "return", "torch", ".", "where", "(", "logits", "<", "batch_mins", ",", "torch", ".", "ones_like", "(", "logits", ")", "*", "-", "1e10", ",", "logits", ")" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
load_tf_weights_in_bert
Load tf checkpoints in a pytorch model
pytorch_pretrained_bert/modeling.py
def load_tf_weights_in_bert(model, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model """ try: import re import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(tf_checkpoint_path) print("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split('/') # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in ["adam_v", "adam_m", "global_step"] for n in name): print("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+_\d+', m_name): l = re.split(r'_(\d+)', m_name) else: l = [m_name] if l[0] == 'kernel' or l[0] == 'gamma': pointer = getattr(pointer, 'weight') elif l[0] == 'output_bias' or l[0] == 'beta': pointer = getattr(pointer, 'bias') elif l[0] == 'output_weights': pointer = getattr(pointer, 'weight') elif l[0] == 'squad': pointer = getattr(pointer, 'classifier') else: try: pointer = getattr(pointer, l[0]) except AttributeError: print("Skipping {}".format("/".join(name))) continue if len(l) >= 2: num = int(l[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model
def load_tf_weights_in_bert(model, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model """ try: import re import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(tf_checkpoint_path) print("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split('/') # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in ["adam_v", "adam_m", "global_step"] for n in name): print("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+_\d+', m_name): l = re.split(r'_(\d+)', m_name) else: l = [m_name] if l[0] == 'kernel' or l[0] == 'gamma': pointer = getattr(pointer, 'weight') elif l[0] == 'output_bias' or l[0] == 'beta': pointer = getattr(pointer, 'bias') elif l[0] == 'output_weights': pointer = getattr(pointer, 'weight') elif l[0] == 'squad': pointer = getattr(pointer, 'classifier') else: try: pointer = getattr(pointer, l[0]) except AttributeError: print("Skipping {}".format("/".join(name))) continue if len(l) >= 2: num = int(l[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model
[ "Load", "tf", "checkpoints", "in", "a", "pytorch", "model" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling.py#L51-L115
[ "def", "load_tf_weights_in_bert", "(", "model", ",", "tf_checkpoint_path", ")", ":", "try", ":", "import", "re", "import", "numpy", "as", "np", "import", "tensorflow", "as", "tf", "except", "ImportError", ":", "print", "(", "\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"", "\"https://www.tensorflow.org/install/ for installation instructions.\"", ")", "raise", "tf_path", "=", "os", ".", "path", ".", "abspath", "(", "tf_checkpoint_path", ")", "print", "(", "\"Converting TensorFlow checkpoint from {}\"", ".", "format", "(", "tf_path", ")", ")", "# Load weights from TF model", "init_vars", "=", "tf", ".", "train", ".", "list_variables", "(", "tf_path", ")", "names", "=", "[", "]", "arrays", "=", "[", "]", "for", "name", ",", "shape", "in", "init_vars", ":", "print", "(", "\"Loading TF weight {} with shape {}\"", ".", "format", "(", "name", ",", "shape", ")", ")", "array", "=", "tf", ".", "train", ".", "load_variable", "(", "tf_path", ",", "name", ")", "names", ".", "append", "(", "name", ")", "arrays", ".", "append", "(", "array", ")", "for", "name", ",", "array", "in", "zip", "(", "names", ",", "arrays", ")", ":", "name", "=", "name", ".", "split", "(", "'/'", ")", "# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v", "# which are not required for using pretrained model", "if", "any", "(", "n", "in", "[", "\"adam_v\"", ",", "\"adam_m\"", ",", "\"global_step\"", "]", "for", "n", "in", "name", ")", ":", "print", "(", "\"Skipping {}\"", ".", "format", "(", "\"/\"", ".", "join", "(", "name", ")", ")", ")", "continue", "pointer", "=", "model", "for", "m_name", "in", "name", ":", "if", "re", ".", "fullmatch", "(", "r'[A-Za-z]+_\\d+'", ",", "m_name", ")", ":", "l", "=", "re", ".", "split", "(", "r'_(\\d+)'", ",", "m_name", ")", "else", ":", "l", "=", "[", "m_name", "]", "if", "l", "[", "0", "]", "==", "'kernel'", "or", "l", "[", "0", "]", "==", "'gamma'", ":", "pointer", "=", "getattr", "(", "pointer", ",", "'weight'", ")", "elif", "l", "[", "0", "]", "==", "'output_bias'", "or", "l", "[", "0", "]", "==", "'beta'", ":", "pointer", "=", "getattr", "(", "pointer", ",", "'bias'", ")", "elif", "l", "[", "0", "]", "==", "'output_weights'", ":", "pointer", "=", "getattr", "(", "pointer", ",", "'weight'", ")", "elif", "l", "[", "0", "]", "==", "'squad'", ":", "pointer", "=", "getattr", "(", "pointer", ",", "'classifier'", ")", "else", ":", "try", ":", "pointer", "=", "getattr", "(", "pointer", ",", "l", "[", "0", "]", ")", "except", "AttributeError", ":", "print", "(", "\"Skipping {}\"", ".", "format", "(", "\"/\"", ".", "join", "(", "name", ")", ")", ")", "continue", "if", "len", "(", "l", ")", ">=", "2", ":", "num", "=", "int", "(", "l", "[", "1", "]", ")", "pointer", "=", "pointer", "[", "num", "]", "if", "m_name", "[", "-", "11", ":", "]", "==", "'_embeddings'", ":", "pointer", "=", "getattr", "(", "pointer", ",", "'weight'", ")", "elif", "m_name", "==", "'kernel'", ":", "array", "=", "np", ".", "transpose", "(", "array", ")", "try", ":", "assert", "pointer", ".", "shape", "==", "array", ".", "shape", "except", "AssertionError", "as", "e", ":", "e", ".", "args", "+=", "(", "pointer", ".", "shape", ",", "array", ".", "shape", ")", "raise", "print", "(", "\"Initialize PyTorch weight {}\"", ".", "format", "(", "name", ")", ")", "pointer", ".", "data", "=", "torch", ".", "from_numpy", "(", "array", ")", "return", "model" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
BertPreTrainedModel.from_pretrained
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-large-cased` . `bert-base-multilingual-uncased` . `bert-base-multilingual-cased` . `bert-base-chinese` - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification)
pytorch_pretrained_bert/modeling.py
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): """ Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-large-cased` . `bert-base-multilingual-uncased` . `bert-base-multilingual-cased` . `bert-base-chinese` - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ state_dict = kwargs.get('state_dict', None) kwargs.pop('state_dict', None) cache_dir = kwargs.get('cache_dir', None) kwargs.pop('cache_dir', None) from_tf = kwargs.get('from_tf', False) kwargs.pop('from_tf', None) if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = pretrained_model_name_or_path # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), archive_file)) return None if resolved_archive_file == archive_file: logger.info("loading archive file {}".format(archive_file)) else: logger.info("loading archive file {} from cache at {}".format( archive_file, resolved_archive_file)) tempdir = None if os.path.isdir(resolved_archive_file) or from_tf: serialization_dir = resolved_archive_file else: # Extract archive to temp dir tempdir = tempfile.mkdtemp() logger.info("extracting archive file {} to temp dir {}".format( resolved_archive_file, tempdir)) with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) serialization_dir = tempdir # Load config config_file = os.path.join(serialization_dir, CONFIG_NAME) if not os.path.exists(config_file): # Backward compatibility with old naming format config_file = os.path.join(serialization_dir, BERT_CONFIG_NAME) config = BertConfig.from_json_file(config_file) logger.info("Model config {}".format(config)) # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None and not from_tf: weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) state_dict = torch.load(weights_path, map_location='cpu') if tempdir: # Clean up temp dir shutil.rmtree(tempdir) if from_tf: # Directly load from a TensorFlow checkpoint weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME) return load_tf_weights_in_bert(model, weights_path) # Load from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') start_prefix = '' if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()): start_prefix = 'bert.' load(model, prefix=start_prefix) if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( model.__class__.__name__, "\n\t".join(error_msgs))) return model
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): """ Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-large-cased` . `bert-base-multilingual-uncased` . `bert-base-multilingual-cased` . `bert-base-chinese` - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ state_dict = kwargs.get('state_dict', None) kwargs.pop('state_dict', None) cache_dir = kwargs.get('cache_dir', None) kwargs.pop('cache_dir', None) from_tf = kwargs.get('from_tf', False) kwargs.pop('from_tf', None) if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = pretrained_model_name_or_path # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), archive_file)) return None if resolved_archive_file == archive_file: logger.info("loading archive file {}".format(archive_file)) else: logger.info("loading archive file {} from cache at {}".format( archive_file, resolved_archive_file)) tempdir = None if os.path.isdir(resolved_archive_file) or from_tf: serialization_dir = resolved_archive_file else: # Extract archive to temp dir tempdir = tempfile.mkdtemp() logger.info("extracting archive file {} to temp dir {}".format( resolved_archive_file, tempdir)) with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) serialization_dir = tempdir # Load config config_file = os.path.join(serialization_dir, CONFIG_NAME) if not os.path.exists(config_file): # Backward compatibility with old naming format config_file = os.path.join(serialization_dir, BERT_CONFIG_NAME) config = BertConfig.from_json_file(config_file) logger.info("Model config {}".format(config)) # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None and not from_tf: weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) state_dict = torch.load(weights_path, map_location='cpu') if tempdir: # Clean up temp dir shutil.rmtree(tempdir) if from_tf: # Directly load from a TensorFlow checkpoint weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME) return load_tf_weights_in_bert(model, weights_path) # Load from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') start_prefix = '' if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()): start_prefix = 'bert.' load(model, prefix=start_prefix) if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( model.__class__.__name__, "\n\t".join(error_msgs))) return model
[ "Instantiate", "a", "BertPreTrainedModel", "from", "a", "pre", "-", "trained", "model", "file", "or", "a", "pytorch", "state", "dict", ".", "Download", "and", "cache", "the", "pre", "-", "trained", "model", "file", "if", "needed", "." ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling.py#L526-L655
[ "def", "from_pretrained", "(", "cls", ",", "pretrained_model_name_or_path", ",", "*", "inputs", ",", "*", "*", "kwargs", ")", ":", "state_dict", "=", "kwargs", ".", "get", "(", "'state_dict'", ",", "None", ")", "kwargs", ".", "pop", "(", "'state_dict'", ",", "None", ")", "cache_dir", "=", "kwargs", ".", "get", "(", "'cache_dir'", ",", "None", ")", "kwargs", ".", "pop", "(", "'cache_dir'", ",", "None", ")", "from_tf", "=", "kwargs", ".", "get", "(", "'from_tf'", ",", "False", ")", "kwargs", ".", "pop", "(", "'from_tf'", ",", "None", ")", "if", "pretrained_model_name_or_path", "in", "PRETRAINED_MODEL_ARCHIVE_MAP", ":", "archive_file", "=", "PRETRAINED_MODEL_ARCHIVE_MAP", "[", "pretrained_model_name_or_path", "]", "else", ":", "archive_file", "=", "pretrained_model_name_or_path", "# redirect to the cache, if necessary", "try", ":", "resolved_archive_file", "=", "cached_path", "(", "archive_file", ",", "cache_dir", "=", "cache_dir", ")", "except", "EnvironmentError", ":", "logger", ".", "error", "(", "\"Model name '{}' was not found in model name list ({}). \"", "\"We assumed '{}' was a path or url but couldn't find any file \"", "\"associated to this path or url.\"", ".", "format", "(", "pretrained_model_name_or_path", ",", "', '", ".", "join", "(", "PRETRAINED_MODEL_ARCHIVE_MAP", ".", "keys", "(", ")", ")", ",", "archive_file", ")", ")", "return", "None", "if", "resolved_archive_file", "==", "archive_file", ":", "logger", ".", "info", "(", "\"loading archive file {}\"", ".", "format", "(", "archive_file", ")", ")", "else", ":", "logger", ".", "info", "(", "\"loading archive file {} from cache at {}\"", ".", "format", "(", "archive_file", ",", "resolved_archive_file", ")", ")", "tempdir", "=", "None", "if", "os", ".", "path", ".", "isdir", "(", "resolved_archive_file", ")", "or", "from_tf", ":", "serialization_dir", "=", "resolved_archive_file", "else", ":", "# Extract archive to temp dir", "tempdir", "=", "tempfile", ".", "mkdtemp", "(", ")", "logger", ".", "info", "(", "\"extracting archive file {} to temp dir {}\"", ".", "format", "(", "resolved_archive_file", ",", "tempdir", ")", ")", "with", "tarfile", ".", "open", "(", "resolved_archive_file", ",", "'r:gz'", ")", "as", "archive", ":", "archive", ".", "extractall", "(", "tempdir", ")", "serialization_dir", "=", "tempdir", "# Load config", "config_file", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "CONFIG_NAME", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "config_file", ")", ":", "# Backward compatibility with old naming format", "config_file", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "BERT_CONFIG_NAME", ")", "config", "=", "BertConfig", ".", "from_json_file", "(", "config_file", ")", "logger", ".", "info", "(", "\"Model config {}\"", ".", "format", "(", "config", ")", ")", "# Instantiate model.", "model", "=", "cls", "(", "config", ",", "*", "inputs", ",", "*", "*", "kwargs", ")", "if", "state_dict", "is", "None", "and", "not", "from_tf", ":", "weights_path", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "WEIGHTS_NAME", ")", "state_dict", "=", "torch", ".", "load", "(", "weights_path", ",", "map_location", "=", "'cpu'", ")", "if", "tempdir", ":", "# Clean up temp dir", "shutil", ".", "rmtree", "(", "tempdir", ")", "if", "from_tf", ":", "# Directly load from a TensorFlow checkpoint", "weights_path", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "TF_WEIGHTS_NAME", ")", "return", "load_tf_weights_in_bert", "(", "model", ",", "weights_path", ")", "# Load from a PyTorch state_dict", "old_keys", "=", "[", "]", "new_keys", "=", "[", "]", "for", "key", "in", "state_dict", ".", "keys", "(", ")", ":", "new_key", "=", "None", "if", "'gamma'", "in", "key", ":", "new_key", "=", "key", ".", "replace", "(", "'gamma'", ",", "'weight'", ")", "if", "'beta'", "in", "key", ":", "new_key", "=", "key", ".", "replace", "(", "'beta'", ",", "'bias'", ")", "if", "new_key", ":", "old_keys", ".", "append", "(", "key", ")", "new_keys", ".", "append", "(", "new_key", ")", "for", "old_key", ",", "new_key", "in", "zip", "(", "old_keys", ",", "new_keys", ")", ":", "state_dict", "[", "new_key", "]", "=", "state_dict", ".", "pop", "(", "old_key", ")", "missing_keys", "=", "[", "]", "unexpected_keys", "=", "[", "]", "error_msgs", "=", "[", "]", "# copy state_dict so _load_from_state_dict can modify it", "metadata", "=", "getattr", "(", "state_dict", ",", "'_metadata'", ",", "None", ")", "state_dict", "=", "state_dict", ".", "copy", "(", ")", "if", "metadata", "is", "not", "None", ":", "state_dict", ".", "_metadata", "=", "metadata", "def", "load", "(", "module", ",", "prefix", "=", "''", ")", ":", "local_metadata", "=", "{", "}", "if", "metadata", "is", "None", "else", "metadata", ".", "get", "(", "prefix", "[", ":", "-", "1", "]", ",", "{", "}", ")", "module", ".", "_load_from_state_dict", "(", "state_dict", ",", "prefix", ",", "local_metadata", ",", "True", ",", "missing_keys", ",", "unexpected_keys", ",", "error_msgs", ")", "for", "name", ",", "child", "in", "module", ".", "_modules", ".", "items", "(", ")", ":", "if", "child", "is", "not", "None", ":", "load", "(", "child", ",", "prefix", "+", "name", "+", "'.'", ")", "start_prefix", "=", "''", "if", "not", "hasattr", "(", "model", ",", "'bert'", ")", "and", "any", "(", "s", ".", "startswith", "(", "'bert.'", ")", "for", "s", "in", "state_dict", ".", "keys", "(", ")", ")", ":", "start_prefix", "=", "'bert.'", "load", "(", "model", ",", "prefix", "=", "start_prefix", ")", "if", "len", "(", "missing_keys", ")", ">", "0", ":", "logger", ".", "info", "(", "\"Weights of {} not initialized from pretrained model: {}\"", ".", "format", "(", "model", ".", "__class__", ".", "__name__", ",", "missing_keys", ")", ")", "if", "len", "(", "unexpected_keys", ")", ">", "0", ":", "logger", ".", "info", "(", "\"Weights from pretrained model not used in {}: {}\"", ".", "format", "(", "model", ".", "__class__", ".", "__name__", ",", "unexpected_keys", ")", ")", "if", "len", "(", "error_msgs", ")", ">", "0", ":", "raise", "RuntimeError", "(", "'Error(s) in loading state_dict for {}:\\n\\t{}'", ".", "format", "(", "model", ".", "__class__", ".", "__name__", ",", "\"\\n\\t\"", ".", "join", "(", "error_msgs", ")", ")", ")", "return", "model" ]
b832d5bb8a6dfc5965015b828e577677eace601e
train
load_tf_weights_in_openai_gpt
Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
pytorch_pretrained_bert/modeling_openai.py
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path): """ Load tf pre-trained weights in a pytorch model (from NumPy arrays here) """ import re import numpy as np print("Loading weights...") names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8')) shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8')) offsets = np.cumsum([np.prod(shape) for shape in shapes]) init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)] init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1] init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)] # This was used when we had a single embedding matrix for positions and tokens # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0) # del init_params[1] init_params = [arr.squeeze() for arr in init_params] try: assert model.tokens_embed.weight.shape == init_params[1].shape assert model.positions_embed.weight.shape == init_params[0].shape except AssertionError as e: e.args += (model.tokens_embed.weight.shape, init_params[1].shape) e.args += (model.positions_embed.weight.shape, init_params[0].shape) raise model.tokens_embed.weight.data = torch.from_numpy(init_params[1]) model.positions_embed.weight.data = torch.from_numpy(init_params[0]) names.pop(0) # Pop position and token embedding arrays init_params.pop(0) init_params.pop(0) for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]): name = name[6:] # skip "model/" assert name[-2:] == ":0" name = name[:-2] name = name.split('/') pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+\d+', m_name): l = re.split(r'(\d+)', m_name) else: l = [m_name] if l[0] == 'g': pointer = getattr(pointer, 'weight') elif l[0] == 'b': pointer = getattr(pointer, 'bias') elif l[0] == 'w': pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path): """ Load tf pre-trained weights in a pytorch model (from NumPy arrays here) """ import re import numpy as np print("Loading weights...") names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8')) shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8')) offsets = np.cumsum([np.prod(shape) for shape in shapes]) init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)] init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1] init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)] # This was used when we had a single embedding matrix for positions and tokens # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0) # del init_params[1] init_params = [arr.squeeze() for arr in init_params] try: assert model.tokens_embed.weight.shape == init_params[1].shape assert model.positions_embed.weight.shape == init_params[0].shape except AssertionError as e: e.args += (model.tokens_embed.weight.shape, init_params[1].shape) e.args += (model.positions_embed.weight.shape, init_params[0].shape) raise model.tokens_embed.weight.data = torch.from_numpy(init_params[1]) model.positions_embed.weight.data = torch.from_numpy(init_params[0]) names.pop(0) # Pop position and token embedding arrays init_params.pop(0) init_params.pop(0) for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]): name = name[6:] # skip "model/" assert name[-2:] == ":0" name = name[:-2] name = name.split('/') pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+\d+', m_name): l = re.split(r'(\d+)', m_name) else: l = [m_name] if l[0] == 'g': pointer = getattr(pointer, 'weight') elif l[0] == 'b': pointer = getattr(pointer, 'bias') elif l[0] == 'w': pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model
[ "Load", "tf", "pre", "-", "trained", "weights", "in", "a", "pytorch", "model", "(", "from", "NumPy", "arrays", "here", ")" ]
huggingface/pytorch-pretrained-BERT
python
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_openai.py#L46-L113
[ "def", "load_tf_weights_in_openai_gpt", "(", "model", ",", "openai_checkpoint_folder_path", ")", ":", "import", "re", "import", "numpy", "as", "np", "print", "(", "\"Loading weights...\"", ")", "names", "=", "json", ".", "load", "(", "open", "(", "openai_checkpoint_folder_path", "+", "'/parameters_names.json'", ",", "\"r\"", ",", "encoding", "=", "'utf-8'", ")", ")", "shapes", "=", "json", ".", "load", "(", "open", "(", "openai_checkpoint_folder_path", "+", "'/params_shapes.json'", ",", "\"r\"", ",", "encoding", "=", "'utf-8'", ")", ")", "offsets", "=", "np", ".", "cumsum", "(", "[", "np", ".", "prod", "(", "shape", ")", "for", "shape", "in", "shapes", "]", ")", "init_params", "=", "[", "np", ".", "load", "(", "openai_checkpoint_folder_path", "+", "'/params_{}.npy'", ".", "format", "(", "n", ")", ")", "for", "n", "in", "range", "(", "10", ")", "]", "init_params", "=", "np", ".", "split", "(", "np", ".", "concatenate", "(", "init_params", ",", "0", ")", ",", "offsets", ")", "[", ":", "-", "1", "]", "init_params", "=", "[", "param", ".", "reshape", "(", "shape", ")", "for", "param", ",", "shape", "in", "zip", "(", "init_params", ",", "shapes", ")", "]", "# This was used when we had a single embedding matrix for positions and tokens", "# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)", "# del init_params[1]", "init_params", "=", "[", "arr", ".", "squeeze", "(", ")", "for", "arr", "in", "init_params", "]", "try", ":", "assert", "model", ".", "tokens_embed", ".", "weight", ".", "shape", "==", "init_params", "[", "1", "]", ".", "shape", "assert", "model", ".", "positions_embed", ".", "weight", ".", "shape", "==", "init_params", "[", "0", "]", ".", "shape", "except", "AssertionError", "as", "e", ":", "e", ".", "args", "+=", "(", "model", ".", "tokens_embed", ".", "weight", ".", "shape", ",", "init_params", "[", "1", "]", ".", "shape", ")", "e", ".", "args", "+=", "(", "model", ".", "positions_embed", ".", "weight", ".", "shape", ",", "init_params", "[", "0", "]", ".", "shape", ")", "raise", "model", ".", "tokens_embed", ".", "weight", ".", "data", "=", "torch", ".", "from_numpy", "(", "init_params", "[", "1", "]", ")", "model", ".", "positions_embed", ".", "weight", ".", "data", "=", "torch", ".", "from_numpy", "(", "init_params", "[", "0", "]", ")", "names", ".", "pop", "(", "0", ")", "# Pop position and token embedding arrays", "init_params", ".", "pop", "(", "0", ")", "init_params", ".", "pop", "(", "0", ")", "for", "name", ",", "array", "in", "zip", "(", "names", ",", "init_params", ")", ":", "# names[1:n_transfer], init_params[1:n_transfer]):", "name", "=", "name", "[", "6", ":", "]", "# skip \"model/\"", "assert", "name", "[", "-", "2", ":", "]", "==", "\":0\"", "name", "=", "name", "[", ":", "-", "2", "]", "name", "=", "name", ".", "split", "(", "'/'", ")", "pointer", "=", "model", "for", "m_name", "in", "name", ":", "if", "re", ".", "fullmatch", "(", "r'[A-Za-z]+\\d+'", ",", "m_name", ")", ":", "l", "=", "re", ".", "split", "(", "r'(\\d+)'", ",", "m_name", ")", "else", ":", "l", "=", "[", "m_name", "]", "if", "l", "[", "0", "]", "==", "'g'", ":", "pointer", "=", "getattr", "(", "pointer", ",", "'weight'", ")", "elif", "l", "[", "0", "]", "==", "'b'", ":", "pointer", "=", "getattr", "(", "pointer", ",", "'bias'", ")", "elif", "l", "[", "0", "]", "==", "'w'", ":", "pointer", "=", "getattr", "(", "pointer", ",", "'weight'", ")", "else", ":", "pointer", "=", "getattr", "(", "pointer", ",", "l", "[", "0", "]", ")", "if", "len", "(", "l", ")", ">=", "2", ":", "num", "=", "int", "(", "l", "[", "1", "]", ")", "pointer", "=", "pointer", "[", "num", "]", "try", ":", "assert", "pointer", ".", "shape", "==", "array", ".", "shape", "except", "AssertionError", "as", "e", ":", "e", ".", "args", "+=", "(", "pointer", ".", "shape", ",", "array", ".", "shape", ")", "raise", "try", ":", "assert", "pointer", ".", "shape", "==", "array", ".", "shape", "except", "AssertionError", "as", "e", ":", "e", ".", "args", "+=", "(", "pointer", ".", "shape", ",", "array", ".", "shape", ")", "raise", "print", "(", "\"Initialize PyTorch weight {}\"", ".", "format", "(", "name", ")", ")", "pointer", ".", "data", "=", "torch", ".", "from_numpy", "(", "array", ")", "return", "model" ]
b832d5bb8a6dfc5965015b828e577677eace601e