id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
18,900
|
basho/riak-python-client
|
riak/mapreduce.py
|
RiakMapReduce.reduce_slice
|
def reduce_slice(self, start, end, options=None):
"""
Adds the Javascript built-in ``Riak.reduceSlice`` to the
query as a reduce phase.
:param start: the beginning of the slice
:type start: integer
:param end: the end of the slice
:type end: integer
:param options: phase options, containing 'language', 'keep'
flag, and/or 'arg'.
:type options: dict
"""
if options is None:
options = dict()
options['arg'] = [start, end]
return self.reduce("Riak.reduceSlice", options=options)
|
python
|
def reduce_slice(self, start, end, options=None):
"""
Adds the Javascript built-in ``Riak.reduceSlice`` to the
query as a reduce phase.
:param start: the beginning of the slice
:type start: integer
:param end: the end of the slice
:type end: integer
:param options: phase options, containing 'language', 'keep'
flag, and/or 'arg'.
:type options: dict
"""
if options is None:
options = dict()
options['arg'] = [start, end]
return self.reduce("Riak.reduceSlice", options=options)
|
[
"def",
"reduce_slice",
"(",
"self",
",",
"start",
",",
"end",
",",
"options",
"=",
"None",
")",
":",
"if",
"options",
"is",
"None",
":",
"options",
"=",
"dict",
"(",
")",
"options",
"[",
"'arg'",
"]",
"=",
"[",
"start",
",",
"end",
"]",
"return",
"self",
".",
"reduce",
"(",
"\"Riak.reduceSlice\"",
",",
"options",
"=",
"options",
")"
] |
Adds the Javascript built-in ``Riak.reduceSlice`` to the
query as a reduce phase.
:param start: the beginning of the slice
:type start: integer
:param end: the end of the slice
:type end: integer
:param options: phase options, containing 'language', 'keep'
flag, and/or 'arg'.
:type options: dict
|
[
"Adds",
"the",
"Javascript",
"built",
"-",
"in",
"Riak",
".",
"reduceSlice",
"to",
"the",
"query",
"as",
"a",
"reduce",
"phase",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/mapreduce.py#L500-L517
|
18,901
|
basho/riak-python-client
|
riak/mapreduce.py
|
RiakMapReducePhase.to_array
|
def to_array(self):
"""
Convert the RiakMapReducePhase to a format that can be output
into JSON. Used internally.
:rtype: dict
"""
stepdef = {'keep': self._keep,
'language': self._language,
'arg': self._arg}
if self._language == 'javascript':
if isinstance(self._function, list):
stepdef['bucket'] = self._function[0]
stepdef['key'] = self._function[1]
elif isinstance(self._function, string_types):
if ("{" in self._function):
stepdef['source'] = self._function
else:
stepdef['name'] = self._function
elif (self._language == 'erlang' and isinstance(self._function, list)):
stepdef['module'] = self._function[0]
stepdef['function'] = self._function[1]
elif (self._language == 'erlang' and
isinstance(self._function, string_types)):
stepdef['source'] = self._function
return {self._type: stepdef}
|
python
|
def to_array(self):
"""
Convert the RiakMapReducePhase to a format that can be output
into JSON. Used internally.
:rtype: dict
"""
stepdef = {'keep': self._keep,
'language': self._language,
'arg': self._arg}
if self._language == 'javascript':
if isinstance(self._function, list):
stepdef['bucket'] = self._function[0]
stepdef['key'] = self._function[1]
elif isinstance(self._function, string_types):
if ("{" in self._function):
stepdef['source'] = self._function
else:
stepdef['name'] = self._function
elif (self._language == 'erlang' and isinstance(self._function, list)):
stepdef['module'] = self._function[0]
stepdef['function'] = self._function[1]
elif (self._language == 'erlang' and
isinstance(self._function, string_types)):
stepdef['source'] = self._function
return {self._type: stepdef}
|
[
"def",
"to_array",
"(",
"self",
")",
":",
"stepdef",
"=",
"{",
"'keep'",
":",
"self",
".",
"_keep",
",",
"'language'",
":",
"self",
".",
"_language",
",",
"'arg'",
":",
"self",
".",
"_arg",
"}",
"if",
"self",
".",
"_language",
"==",
"'javascript'",
":",
"if",
"isinstance",
"(",
"self",
".",
"_function",
",",
"list",
")",
":",
"stepdef",
"[",
"'bucket'",
"]",
"=",
"self",
".",
"_function",
"[",
"0",
"]",
"stepdef",
"[",
"'key'",
"]",
"=",
"self",
".",
"_function",
"[",
"1",
"]",
"elif",
"isinstance",
"(",
"self",
".",
"_function",
",",
"string_types",
")",
":",
"if",
"(",
"\"{\"",
"in",
"self",
".",
"_function",
")",
":",
"stepdef",
"[",
"'source'",
"]",
"=",
"self",
".",
"_function",
"else",
":",
"stepdef",
"[",
"'name'",
"]",
"=",
"self",
".",
"_function",
"elif",
"(",
"self",
".",
"_language",
"==",
"'erlang'",
"and",
"isinstance",
"(",
"self",
".",
"_function",
",",
"list",
")",
")",
":",
"stepdef",
"[",
"'module'",
"]",
"=",
"self",
".",
"_function",
"[",
"0",
"]",
"stepdef",
"[",
"'function'",
"]",
"=",
"self",
".",
"_function",
"[",
"1",
"]",
"elif",
"(",
"self",
".",
"_language",
"==",
"'erlang'",
"and",
"isinstance",
"(",
"self",
".",
"_function",
",",
"string_types",
")",
")",
":",
"stepdef",
"[",
"'source'",
"]",
"=",
"self",
".",
"_function",
"return",
"{",
"self",
".",
"_type",
":",
"stepdef",
"}"
] |
Convert the RiakMapReducePhase to a format that can be output
into JSON. Used internally.
:rtype: dict
|
[
"Convert",
"the",
"RiakMapReducePhase",
"to",
"a",
"format",
"that",
"can",
"be",
"output",
"into",
"JSON",
".",
"Used",
"internally",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/mapreduce.py#L569-L598
|
18,902
|
basho/riak-python-client
|
riak/mapreduce.py
|
RiakLinkPhase.to_array
|
def to_array(self):
"""
Convert the RiakLinkPhase to a format that can be output into
JSON. Used internally.
"""
stepdef = {'bucket': self._bucket,
'tag': self._tag,
'keep': self._keep}
return {'link': stepdef}
|
python
|
def to_array(self):
"""
Convert the RiakLinkPhase to a format that can be output into
JSON. Used internally.
"""
stepdef = {'bucket': self._bucket,
'tag': self._tag,
'keep': self._keep}
return {'link': stepdef}
|
[
"def",
"to_array",
"(",
"self",
")",
":",
"stepdef",
"=",
"{",
"'bucket'",
":",
"self",
".",
"_bucket",
",",
"'tag'",
":",
"self",
".",
"_tag",
",",
"'keep'",
":",
"self",
".",
"_keep",
"}",
"return",
"{",
"'link'",
":",
"stepdef",
"}"
] |
Convert the RiakLinkPhase to a format that can be output into
JSON. Used internally.
|
[
"Convert",
"the",
"RiakLinkPhase",
"to",
"a",
"format",
"that",
"can",
"be",
"output",
"into",
"JSON",
".",
"Used",
"internally",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/mapreduce.py#L626-L634
|
18,903
|
basho/riak-python-client
|
riak/resolver.py
|
last_written_resolver
|
def last_written_resolver(riak_object):
"""
A conflict-resolution function that resolves by selecting the most
recently-modified sibling by timestamp.
:param riak_object: an object-in-conflict that will be resolved
:type riak_object: :class:`RiakObject <riak.riak_object.RiakObject>`
"""
riak_object.siblings = [max(riak_object.siblings,
key=lambda x: x.last_modified), ]
|
python
|
def last_written_resolver(riak_object):
"""
A conflict-resolution function that resolves by selecting the most
recently-modified sibling by timestamp.
:param riak_object: an object-in-conflict that will be resolved
:type riak_object: :class:`RiakObject <riak.riak_object.RiakObject>`
"""
riak_object.siblings = [max(riak_object.siblings,
key=lambda x: x.last_modified), ]
|
[
"def",
"last_written_resolver",
"(",
"riak_object",
")",
":",
"riak_object",
".",
"siblings",
"=",
"[",
"max",
"(",
"riak_object",
".",
"siblings",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"last_modified",
")",
",",
"]"
] |
A conflict-resolution function that resolves by selecting the most
recently-modified sibling by timestamp.
:param riak_object: an object-in-conflict that will be resolved
:type riak_object: :class:`RiakObject <riak.riak_object.RiakObject>`
|
[
"A",
"conflict",
"-",
"resolution",
"function",
"that",
"resolves",
"by",
"selecting",
"the",
"most",
"recently",
"-",
"modified",
"sibling",
"by",
"timestamp",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/resolver.py#L31-L40
|
18,904
|
basho/riak-python-client
|
riak/transports/security.py
|
verify_cb
|
def verify_cb(conn, cert, errnum, depth, ok):
"""
The default OpenSSL certificate verification callback.
"""
if not ok:
raise SecurityError("Could not verify CA certificate {0}"
.format(cert.get_subject()))
return ok
|
python
|
def verify_cb(conn, cert, errnum, depth, ok):
"""
The default OpenSSL certificate verification callback.
"""
if not ok:
raise SecurityError("Could not verify CA certificate {0}"
.format(cert.get_subject()))
return ok
|
[
"def",
"verify_cb",
"(",
"conn",
",",
"cert",
",",
"errnum",
",",
"depth",
",",
"ok",
")",
":",
"if",
"not",
"ok",
":",
"raise",
"SecurityError",
"(",
"\"Could not verify CA certificate {0}\"",
".",
"format",
"(",
"cert",
".",
"get_subject",
"(",
")",
")",
")",
"return",
"ok"
] |
The default OpenSSL certificate verification callback.
|
[
"The",
"default",
"OpenSSL",
"certificate",
"verification",
"callback",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/security.py#L27-L34
|
18,905
|
basho/riak-python-client
|
riak/client/index_page.py
|
IndexPage.next_page
|
def next_page(self, timeout=None, stream=None):
"""
Fetches the next page using the same parameters as the
original query.
Note that if streaming was used before, it will be used again
unless overridden.
:param stream: whether to enable streaming. `True` enables,
`False` disables, `None` uses previous value.
:type stream: boolean
:param timeout: a timeout value in milliseconds, or 'infinity'
:type timeout: int
"""
if not self.continuation:
raise ValueError("Cannot get next index page, no continuation")
if stream is not None:
self.stream = stream
args = {'bucket': self.bucket,
'index': self.index,
'startkey': self.startkey,
'endkey': self.endkey,
'return_terms': self.return_terms,
'max_results': self.max_results,
'continuation': self.continuation,
'timeout': timeout,
'term_regex': self.term_regex}
if self.stream:
return self.client.stream_index(**args)
else:
return self.client.get_index(**args)
|
python
|
def next_page(self, timeout=None, stream=None):
"""
Fetches the next page using the same parameters as the
original query.
Note that if streaming was used before, it will be used again
unless overridden.
:param stream: whether to enable streaming. `True` enables,
`False` disables, `None` uses previous value.
:type stream: boolean
:param timeout: a timeout value in milliseconds, or 'infinity'
:type timeout: int
"""
if not self.continuation:
raise ValueError("Cannot get next index page, no continuation")
if stream is not None:
self.stream = stream
args = {'bucket': self.bucket,
'index': self.index,
'startkey': self.startkey,
'endkey': self.endkey,
'return_terms': self.return_terms,
'max_results': self.max_results,
'continuation': self.continuation,
'timeout': timeout,
'term_regex': self.term_regex}
if self.stream:
return self.client.stream_index(**args)
else:
return self.client.get_index(**args)
|
[
"def",
"next_page",
"(",
"self",
",",
"timeout",
"=",
"None",
",",
"stream",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"continuation",
":",
"raise",
"ValueError",
"(",
"\"Cannot get next index page, no continuation\"",
")",
"if",
"stream",
"is",
"not",
"None",
":",
"self",
".",
"stream",
"=",
"stream",
"args",
"=",
"{",
"'bucket'",
":",
"self",
".",
"bucket",
",",
"'index'",
":",
"self",
".",
"index",
",",
"'startkey'",
":",
"self",
".",
"startkey",
",",
"'endkey'",
":",
"self",
".",
"endkey",
",",
"'return_terms'",
":",
"self",
".",
"return_terms",
",",
"'max_results'",
":",
"self",
".",
"max_results",
",",
"'continuation'",
":",
"self",
".",
"continuation",
",",
"'timeout'",
":",
"timeout",
",",
"'term_regex'",
":",
"self",
".",
"term_regex",
"}",
"if",
"self",
".",
"stream",
":",
"return",
"self",
".",
"client",
".",
"stream_index",
"(",
"*",
"*",
"args",
")",
"else",
":",
"return",
"self",
".",
"client",
".",
"get_index",
"(",
"*",
"*",
"args",
")"
] |
Fetches the next page using the same parameters as the
original query.
Note that if streaming was used before, it will be used again
unless overridden.
:param stream: whether to enable streaming. `True` enables,
`False` disables, `None` uses previous value.
:type stream: boolean
:param timeout: a timeout value in milliseconds, or 'infinity'
:type timeout: int
|
[
"Fetches",
"the",
"next",
"page",
"using",
"the",
"same",
"parameters",
"as",
"the",
"original",
"query",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/index_page.py#L117-L150
|
18,906
|
basho/riak-python-client
|
riak/client/operations.py
|
_validate_timeout
|
def _validate_timeout(timeout, infinity_ok=False):
"""
Raises an exception if the given timeout is an invalid value.
"""
if timeout is None:
return
if timeout == 'infinity':
if infinity_ok:
return
else:
raise ValueError(
'timeout must be a positive integer '
'("infinity" is not valid)')
if isinstance(timeout, six.integer_types) and timeout > 0:
return
raise ValueError('timeout must be a positive integer')
|
python
|
def _validate_timeout(timeout, infinity_ok=False):
"""
Raises an exception if the given timeout is an invalid value.
"""
if timeout is None:
return
if timeout == 'infinity':
if infinity_ok:
return
else:
raise ValueError(
'timeout must be a positive integer '
'("infinity" is not valid)')
if isinstance(timeout, six.integer_types) and timeout > 0:
return
raise ValueError('timeout must be a positive integer')
|
[
"def",
"_validate_timeout",
"(",
"timeout",
",",
"infinity_ok",
"=",
"False",
")",
":",
"if",
"timeout",
"is",
"None",
":",
"return",
"if",
"timeout",
"==",
"'infinity'",
":",
"if",
"infinity_ok",
":",
"return",
"else",
":",
"raise",
"ValueError",
"(",
"'timeout must be a positive integer '",
"'(\"infinity\" is not valid)'",
")",
"if",
"isinstance",
"(",
"timeout",
",",
"six",
".",
"integer_types",
")",
"and",
"timeout",
">",
"0",
":",
"return",
"raise",
"ValueError",
"(",
"'timeout must be a positive integer'",
")"
] |
Raises an exception if the given timeout is an invalid value.
|
[
"Raises",
"an",
"exception",
"if",
"the",
"given",
"timeout",
"is",
"an",
"invalid",
"value",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/operations.py#L1270-L1288
|
18,907
|
basho/riak-python-client
|
riak/client/operations.py
|
RiakClientOperations.stream_buckets
|
def stream_buckets(self, bucket_type=None, timeout=None):
"""
Streams the list of buckets. This is a generator method that
should be iterated over.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might not be
returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(client.stream_buckets()) as buckets:
for bucket_list in buckets:
do_something(bucket_list)
# Explicit close()
stream = client.stream_buckets()
for bucket_list in stream:
do_something(bucket_list)
stream.close()
:param bucket_type: the optional containing bucket type
:type bucket_type: :class:`~riak.bucket.BucketType`
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: iterator that yields lists of :class:`RiakBucket
<riak.bucket.RiakBucket>` instances
"""
if not riak.disable_list_exceptions:
raise ListError()
_validate_timeout(timeout)
if bucket_type:
bucketfn = self._bucket_type_bucket_builder
else:
bucketfn = self._default_type_bucket_builder
def make_op(transport):
return transport.stream_buckets(
bucket_type=bucket_type, timeout=timeout)
for bucket_list in self._stream_with_retry(make_op):
bucket_list = [bucketfn(bytes_to_str(name), bucket_type)
for name in bucket_list]
if len(bucket_list) > 0:
yield bucket_list
|
python
|
def stream_buckets(self, bucket_type=None, timeout=None):
"""
Streams the list of buckets. This is a generator method that
should be iterated over.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might not be
returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(client.stream_buckets()) as buckets:
for bucket_list in buckets:
do_something(bucket_list)
# Explicit close()
stream = client.stream_buckets()
for bucket_list in stream:
do_something(bucket_list)
stream.close()
:param bucket_type: the optional containing bucket type
:type bucket_type: :class:`~riak.bucket.BucketType`
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: iterator that yields lists of :class:`RiakBucket
<riak.bucket.RiakBucket>` instances
"""
if not riak.disable_list_exceptions:
raise ListError()
_validate_timeout(timeout)
if bucket_type:
bucketfn = self._bucket_type_bucket_builder
else:
bucketfn = self._default_type_bucket_builder
def make_op(transport):
return transport.stream_buckets(
bucket_type=bucket_type, timeout=timeout)
for bucket_list in self._stream_with_retry(make_op):
bucket_list = [bucketfn(bytes_to_str(name), bucket_type)
for name in bucket_list]
if len(bucket_list) > 0:
yield bucket_list
|
[
"def",
"stream_buckets",
"(",
"self",
",",
"bucket_type",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"riak",
".",
"disable_list_exceptions",
":",
"raise",
"ListError",
"(",
")",
"_validate_timeout",
"(",
"timeout",
")",
"if",
"bucket_type",
":",
"bucketfn",
"=",
"self",
".",
"_bucket_type_bucket_builder",
"else",
":",
"bucketfn",
"=",
"self",
".",
"_default_type_bucket_builder",
"def",
"make_op",
"(",
"transport",
")",
":",
"return",
"transport",
".",
"stream_buckets",
"(",
"bucket_type",
"=",
"bucket_type",
",",
"timeout",
"=",
"timeout",
")",
"for",
"bucket_list",
"in",
"self",
".",
"_stream_with_retry",
"(",
"make_op",
")",
":",
"bucket_list",
"=",
"[",
"bucketfn",
"(",
"bytes_to_str",
"(",
"name",
")",
",",
"bucket_type",
")",
"for",
"name",
"in",
"bucket_list",
"]",
"if",
"len",
"(",
"bucket_list",
")",
">",
"0",
":",
"yield",
"bucket_list"
] |
Streams the list of buckets. This is a generator method that
should be iterated over.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might not be
returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(client.stream_buckets()) as buckets:
for bucket_list in buckets:
do_something(bucket_list)
# Explicit close()
stream = client.stream_buckets()
for bucket_list in stream:
do_something(bucket_list)
stream.close()
:param bucket_type: the optional containing bucket type
:type bucket_type: :class:`~riak.bucket.BucketType`
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: iterator that yields lists of :class:`RiakBucket
<riak.bucket.RiakBucket>` instances
|
[
"Streams",
"the",
"list",
"of",
"buckets",
".",
"This",
"is",
"a",
"generator",
"method",
"that",
"should",
"be",
"iterated",
"over",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/operations.py#L72-L125
|
18,908
|
basho/riak-python-client
|
riak/client/operations.py
|
RiakClientOperations.stream_index
|
def stream_index(self, bucket, index, startkey, endkey=None,
return_terms=None, max_results=None, continuation=None,
timeout=None, term_regex=None):
"""
Queries a secondary index, streaming matching keys through an
iterator.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might not be
returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(client.stream_index(mybucket, 'name_bin',
'Smith')) as index:
for key in index:
do_something(key)
# Explicit close()
stream = client.stream_index(mybucket, 'name_bin', 'Smith')
for key in stream:
do_something(key)
stream.close()
:param bucket: the bucket whose index will be queried
:type bucket: RiakBucket
:param index: the index to query
:type index: string
:param startkey: the sole key to query, or beginning of the query range
:type startkey: string, integer
:param endkey: the end of the query range (optional if equality)
:type endkey: string, integer
:param return_terms: whether to include the secondary index value
:type return_terms: boolean
:param max_results: the maximum number of results to return (page size)
:type max_results: integer
:param continuation: the opaque continuation returned from a
previous paginated request
:type continuation: string
:param timeout: a timeout value in milliseconds, or 'infinity'
:type timeout: int
:param term_regex: a regular expression used to filter index terms
:type term_regex: string
:rtype: :class:`~riak.client.index_page.IndexPage`
"""
# TODO FUTURE: implement "retry on connection closed"
# as in stream_mapred
_validate_timeout(timeout, infinity_ok=True)
page = IndexPage(self, bucket, index, startkey, endkey,
return_terms, max_results, term_regex)
page.stream = True
resource = self._acquire()
transport = resource.object
page.results = transport.stream_index(
bucket, index, startkey, endkey, return_terms=return_terms,
max_results=max_results, continuation=continuation,
timeout=timeout, term_regex=term_regex)
page.results.attach(resource)
return page
|
python
|
def stream_index(self, bucket, index, startkey, endkey=None,
return_terms=None, max_results=None, continuation=None,
timeout=None, term_regex=None):
"""
Queries a secondary index, streaming matching keys through an
iterator.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might not be
returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(client.stream_index(mybucket, 'name_bin',
'Smith')) as index:
for key in index:
do_something(key)
# Explicit close()
stream = client.stream_index(mybucket, 'name_bin', 'Smith')
for key in stream:
do_something(key)
stream.close()
:param bucket: the bucket whose index will be queried
:type bucket: RiakBucket
:param index: the index to query
:type index: string
:param startkey: the sole key to query, or beginning of the query range
:type startkey: string, integer
:param endkey: the end of the query range (optional if equality)
:type endkey: string, integer
:param return_terms: whether to include the secondary index value
:type return_terms: boolean
:param max_results: the maximum number of results to return (page size)
:type max_results: integer
:param continuation: the opaque continuation returned from a
previous paginated request
:type continuation: string
:param timeout: a timeout value in milliseconds, or 'infinity'
:type timeout: int
:param term_regex: a regular expression used to filter index terms
:type term_regex: string
:rtype: :class:`~riak.client.index_page.IndexPage`
"""
# TODO FUTURE: implement "retry on connection closed"
# as in stream_mapred
_validate_timeout(timeout, infinity_ok=True)
page = IndexPage(self, bucket, index, startkey, endkey,
return_terms, max_results, term_regex)
page.stream = True
resource = self._acquire()
transport = resource.object
page.results = transport.stream_index(
bucket, index, startkey, endkey, return_terms=return_terms,
max_results=max_results, continuation=continuation,
timeout=timeout, term_regex=term_regex)
page.results.attach(resource)
return page
|
[
"def",
"stream_index",
"(",
"self",
",",
"bucket",
",",
"index",
",",
"startkey",
",",
"endkey",
"=",
"None",
",",
"return_terms",
"=",
"None",
",",
"max_results",
"=",
"None",
",",
"continuation",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"term_regex",
"=",
"None",
")",
":",
"# TODO FUTURE: implement \"retry on connection closed\"",
"# as in stream_mapred",
"_validate_timeout",
"(",
"timeout",
",",
"infinity_ok",
"=",
"True",
")",
"page",
"=",
"IndexPage",
"(",
"self",
",",
"bucket",
",",
"index",
",",
"startkey",
",",
"endkey",
",",
"return_terms",
",",
"max_results",
",",
"term_regex",
")",
"page",
".",
"stream",
"=",
"True",
"resource",
"=",
"self",
".",
"_acquire",
"(",
")",
"transport",
"=",
"resource",
".",
"object",
"page",
".",
"results",
"=",
"transport",
".",
"stream_index",
"(",
"bucket",
",",
"index",
",",
"startkey",
",",
"endkey",
",",
"return_terms",
"=",
"return_terms",
",",
"max_results",
"=",
"max_results",
",",
"continuation",
"=",
"continuation",
",",
"timeout",
"=",
"timeout",
",",
"term_regex",
"=",
"term_regex",
")",
"page",
".",
"results",
".",
"attach",
"(",
"resource",
")",
"return",
"page"
] |
Queries a secondary index, streaming matching keys through an
iterator.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might not be
returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(client.stream_index(mybucket, 'name_bin',
'Smith')) as index:
for key in index:
do_something(key)
# Explicit close()
stream = client.stream_index(mybucket, 'name_bin', 'Smith')
for key in stream:
do_something(key)
stream.close()
:param bucket: the bucket whose index will be queried
:type bucket: RiakBucket
:param index: the index to query
:type index: string
:param startkey: the sole key to query, or beginning of the query range
:type startkey: string, integer
:param endkey: the end of the query range (optional if equality)
:type endkey: string, integer
:param return_terms: whether to include the secondary index value
:type return_terms: boolean
:param max_results: the maximum number of results to return (page size)
:type max_results: integer
:param continuation: the opaque continuation returned from a
previous paginated request
:type continuation: string
:param timeout: a timeout value in milliseconds, or 'infinity'
:type timeout: int
:param term_regex: a regular expression used to filter index terms
:type term_regex: string
:rtype: :class:`~riak.client.index_page.IndexPage`
|
[
"Queries",
"a",
"secondary",
"index",
"streaming",
"matching",
"keys",
"through",
"an",
"iterator",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/operations.py#L238-L301
|
18,909
|
basho/riak-python-client
|
riak/client/operations.py
|
RiakClientOperations.stream_keys
|
def stream_keys(self, bucket, timeout=None):
"""
Lists all keys in a bucket via a stream. This is a generator
method which should be iterated over.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might
not be returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(client.stream_keys(mybucket)) as keys:
for key_list in keys:
do_something(key_list)
# Explicit close()
stream = client.stream_keys(mybucket)
for key_list in stream:
do_something(key_list)
stream.close()
:param bucket: the bucket whose properties will be set
:type bucket: RiakBucket
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: iterator
"""
if not riak.disable_list_exceptions:
raise ListError()
_validate_timeout(timeout)
def make_op(transport):
return transport.stream_keys(bucket, timeout=timeout)
for keylist in self._stream_with_retry(make_op):
if len(keylist) > 0:
if six.PY2:
yield keylist
else:
yield [bytes_to_str(item) for item in keylist]
|
python
|
def stream_keys(self, bucket, timeout=None):
"""
Lists all keys in a bucket via a stream. This is a generator
method which should be iterated over.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might
not be returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(client.stream_keys(mybucket)) as keys:
for key_list in keys:
do_something(key_list)
# Explicit close()
stream = client.stream_keys(mybucket)
for key_list in stream:
do_something(key_list)
stream.close()
:param bucket: the bucket whose properties will be set
:type bucket: RiakBucket
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: iterator
"""
if not riak.disable_list_exceptions:
raise ListError()
_validate_timeout(timeout)
def make_op(transport):
return transport.stream_keys(bucket, timeout=timeout)
for keylist in self._stream_with_retry(make_op):
if len(keylist) > 0:
if six.PY2:
yield keylist
else:
yield [bytes_to_str(item) for item in keylist]
|
[
"def",
"stream_keys",
"(",
"self",
",",
"bucket",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"riak",
".",
"disable_list_exceptions",
":",
"raise",
"ListError",
"(",
")",
"_validate_timeout",
"(",
"timeout",
")",
"def",
"make_op",
"(",
"transport",
")",
":",
"return",
"transport",
".",
"stream_keys",
"(",
"bucket",
",",
"timeout",
"=",
"timeout",
")",
"for",
"keylist",
"in",
"self",
".",
"_stream_with_retry",
"(",
"make_op",
")",
":",
"if",
"len",
"(",
"keylist",
")",
">",
"0",
":",
"if",
"six",
".",
"PY2",
":",
"yield",
"keylist",
"else",
":",
"yield",
"[",
"bytes_to_str",
"(",
"item",
")",
"for",
"item",
"in",
"keylist",
"]"
] |
Lists all keys in a bucket via a stream. This is a generator
method which should be iterated over.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might
not be returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(client.stream_keys(mybucket)) as keys:
for key_list in keys:
do_something(key_list)
# Explicit close()
stream = client.stream_keys(mybucket)
for key_list in stream:
do_something(key_list)
stream.close()
:param bucket: the bucket whose properties will be set
:type bucket: RiakBucket
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: iterator
|
[
"Lists",
"all",
"keys",
"in",
"a",
"bucket",
"via",
"a",
"stream",
".",
"This",
"is",
"a",
"generator",
"method",
"which",
"should",
"be",
"iterated",
"over",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/operations.py#L484-L530
|
18,910
|
basho/riak-python-client
|
riak/client/operations.py
|
RiakClientOperations.ts_stream_keys
|
def ts_stream_keys(self, table, timeout=None):
"""
Lists all keys in a time series table via a stream. This is a
generator method which should be iterated over.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might
not be returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(client.ts_stream_keys(mytable)) as keys:
for key_list in keys:
do_something(key_list)
# Explicit close()
stream = client.ts_stream_keys(mytable)
for key_list in stream:
do_something(key_list)
stream.close()
:param table: the table from which to stream keys
:type table: string or :class:`Table <riak.table.Table>`
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: iterator
"""
if not riak.disable_list_exceptions:
raise ListError()
t = table
if isinstance(t, six.string_types):
t = Table(self, table)
_validate_timeout(timeout)
resource = self._acquire()
transport = resource.object
stream = transport.ts_stream_keys(t, timeout)
stream.attach(resource)
try:
for keylist in stream:
if len(keylist) > 0:
yield keylist
finally:
stream.close()
|
python
|
def ts_stream_keys(self, table, timeout=None):
"""
Lists all keys in a time series table via a stream. This is a
generator method which should be iterated over.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might
not be returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(client.ts_stream_keys(mytable)) as keys:
for key_list in keys:
do_something(key_list)
# Explicit close()
stream = client.ts_stream_keys(mytable)
for key_list in stream:
do_something(key_list)
stream.close()
:param table: the table from which to stream keys
:type table: string or :class:`Table <riak.table.Table>`
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: iterator
"""
if not riak.disable_list_exceptions:
raise ListError()
t = table
if isinstance(t, six.string_types):
t = Table(self, table)
_validate_timeout(timeout)
resource = self._acquire()
transport = resource.object
stream = transport.ts_stream_keys(t, timeout)
stream.attach(resource)
try:
for keylist in stream:
if len(keylist) > 0:
yield keylist
finally:
stream.close()
|
[
"def",
"ts_stream_keys",
"(",
"self",
",",
"table",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"riak",
".",
"disable_list_exceptions",
":",
"raise",
"ListError",
"(",
")",
"t",
"=",
"table",
"if",
"isinstance",
"(",
"t",
",",
"six",
".",
"string_types",
")",
":",
"t",
"=",
"Table",
"(",
"self",
",",
"table",
")",
"_validate_timeout",
"(",
"timeout",
")",
"resource",
"=",
"self",
".",
"_acquire",
"(",
")",
"transport",
"=",
"resource",
".",
"object",
"stream",
"=",
"transport",
".",
"ts_stream_keys",
"(",
"t",
",",
"timeout",
")",
"stream",
".",
"attach",
"(",
"resource",
")",
"try",
":",
"for",
"keylist",
"in",
"stream",
":",
"if",
"len",
"(",
"keylist",
")",
">",
"0",
":",
"yield",
"keylist",
"finally",
":",
"stream",
".",
"close",
"(",
")"
] |
Lists all keys in a time series table via a stream. This is a
generator method which should be iterated over.
The caller should explicitly close the returned iterator,
either using :func:`contextlib.closing` or calling ``close()``
explicitly. Consuming the entire iterator will also close the
stream. If it does not, the associated connection might
not be returned to the pool. Example::
from contextlib import closing
# Using contextlib.closing
with closing(client.ts_stream_keys(mytable)) as keys:
for key_list in keys:
do_something(key_list)
# Explicit close()
stream = client.ts_stream_keys(mytable)
for key_list in stream:
do_something(key_list)
stream.close()
:param table: the table from which to stream keys
:type table: string or :class:`Table <riak.table.Table>`
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: iterator
|
[
"Lists",
"all",
"keys",
"in",
"a",
"time",
"series",
"table",
"via",
"a",
"stream",
".",
"This",
"is",
"a",
"generator",
"method",
"which",
"should",
"be",
"iterated",
"over",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/operations.py#L665-L713
|
18,911
|
basho/riak-python-client
|
riak/client/operations.py
|
RiakClientOperations.multiget
|
def multiget(self, pairs, **params):
"""Fetches many keys in parallel via threads.
:param pairs: list of bucket_type/bucket/key tuple triples
:type pairs: list
:param params: additional request flags, e.g. r, pr
:type params: dict
:rtype: list of :class:`RiakObjects <riak.riak_object.RiakObject>`,
:class:`Datatypes <riak.datatypes.Datatype>`, or tuples of
bucket_type, bucket, key, and the exception raised on fetch
"""
if self._multiget_pool:
params['pool'] = self._multiget_pool
return riak.client.multi.multiget(self, pairs, **params)
|
python
|
def multiget(self, pairs, **params):
"""Fetches many keys in parallel via threads.
:param pairs: list of bucket_type/bucket/key tuple triples
:type pairs: list
:param params: additional request flags, e.g. r, pr
:type params: dict
:rtype: list of :class:`RiakObjects <riak.riak_object.RiakObject>`,
:class:`Datatypes <riak.datatypes.Datatype>`, or tuples of
bucket_type, bucket, key, and the exception raised on fetch
"""
if self._multiget_pool:
params['pool'] = self._multiget_pool
return riak.client.multi.multiget(self, pairs, **params)
|
[
"def",
"multiget",
"(",
"self",
",",
"pairs",
",",
"*",
"*",
"params",
")",
":",
"if",
"self",
".",
"_multiget_pool",
":",
"params",
"[",
"'pool'",
"]",
"=",
"self",
".",
"_multiget_pool",
"return",
"riak",
".",
"client",
".",
"multi",
".",
"multiget",
"(",
"self",
",",
"pairs",
",",
"*",
"*",
"params",
")"
] |
Fetches many keys in parallel via threads.
:param pairs: list of bucket_type/bucket/key tuple triples
:type pairs: list
:param params: additional request flags, e.g. r, pr
:type params: dict
:rtype: list of :class:`RiakObjects <riak.riak_object.RiakObject>`,
:class:`Datatypes <riak.datatypes.Datatype>`, or tuples of
bucket_type, bucket, key, and the exception raised on fetch
|
[
"Fetches",
"many",
"keys",
"in",
"parallel",
"via",
"threads",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/operations.py#L1003-L1016
|
18,912
|
basho/riak-python-client
|
riak/client/operations.py
|
RiakClientOperations.multiput
|
def multiput(self, objs, **params):
"""
Stores objects in parallel via threads.
:param objs: the objects to store
:type objs: list of `RiakObject <riak.riak_object.RiakObject>`
:param params: additional request flags, e.g. w, dw, pw
:type params: dict
:rtype: list of boolean or
:class:`RiakObjects <riak.riak_object.RiakObject>`,
"""
if self._multiput_pool:
params['pool'] = self._multiput_pool
return riak.client.multi.multiput(self, objs, **params)
|
python
|
def multiput(self, objs, **params):
"""
Stores objects in parallel via threads.
:param objs: the objects to store
:type objs: list of `RiakObject <riak.riak_object.RiakObject>`
:param params: additional request flags, e.g. w, dw, pw
:type params: dict
:rtype: list of boolean or
:class:`RiakObjects <riak.riak_object.RiakObject>`,
"""
if self._multiput_pool:
params['pool'] = self._multiput_pool
return riak.client.multi.multiput(self, objs, **params)
|
[
"def",
"multiput",
"(",
"self",
",",
"objs",
",",
"*",
"*",
"params",
")",
":",
"if",
"self",
".",
"_multiput_pool",
":",
"params",
"[",
"'pool'",
"]",
"=",
"self",
".",
"_multiput_pool",
"return",
"riak",
".",
"client",
".",
"multi",
".",
"multiput",
"(",
"self",
",",
"objs",
",",
"*",
"*",
"params",
")"
] |
Stores objects in parallel via threads.
:param objs: the objects to store
:type objs: list of `RiakObject <riak.riak_object.RiakObject>`
:param params: additional request flags, e.g. w, dw, pw
:type params: dict
:rtype: list of boolean or
:class:`RiakObjects <riak.riak_object.RiakObject>`,
|
[
"Stores",
"objects",
"in",
"parallel",
"via",
"threads",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/operations.py#L1018-L1031
|
18,913
|
basho/riak-python-client
|
riak/client/operations.py
|
RiakClientOperations.fetch_datatype
|
def fetch_datatype(self, bucket, key, r=None, pr=None,
basic_quorum=None, notfound_ok=None,
timeout=None, include_context=None):
"""
Fetches the value of a Riak Datatype.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket of the datatype, which must belong to a
:class:`~riak.bucket.BucketType`
:type bucket: :class:`~riak.bucket.RiakBucket`
:param key: the key of the datatype
:type key: string
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool, None
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool, None
:param timeout: a timeout value in milliseconds
:type timeout: int, None
:param include_context: whether to return the opaque context
as well as the value, which is useful for removal operations
on sets and maps
:type include_context: bool, None
:rtype: :class:`~riak.datatypes.Datatype`
"""
dtype, value, context = self._fetch_datatype(
bucket, key, r=r, pr=pr, basic_quorum=basic_quorum,
notfound_ok=notfound_ok, timeout=timeout,
include_context=include_context)
return TYPES[dtype](bucket=bucket, key=key, value=value,
context=context)
|
python
|
def fetch_datatype(self, bucket, key, r=None, pr=None,
basic_quorum=None, notfound_ok=None,
timeout=None, include_context=None):
"""
Fetches the value of a Riak Datatype.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket of the datatype, which must belong to a
:class:`~riak.bucket.BucketType`
:type bucket: :class:`~riak.bucket.RiakBucket`
:param key: the key of the datatype
:type key: string
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool, None
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool, None
:param timeout: a timeout value in milliseconds
:type timeout: int, None
:param include_context: whether to return the opaque context
as well as the value, which is useful for removal operations
on sets and maps
:type include_context: bool, None
:rtype: :class:`~riak.datatypes.Datatype`
"""
dtype, value, context = self._fetch_datatype(
bucket, key, r=r, pr=pr, basic_quorum=basic_quorum,
notfound_ok=notfound_ok, timeout=timeout,
include_context=include_context)
return TYPES[dtype](bucket=bucket, key=key, value=value,
context=context)
|
[
"def",
"fetch_datatype",
"(",
"self",
",",
"bucket",
",",
"key",
",",
"r",
"=",
"None",
",",
"pr",
"=",
"None",
",",
"basic_quorum",
"=",
"None",
",",
"notfound_ok",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"include_context",
"=",
"None",
")",
":",
"dtype",
",",
"value",
",",
"context",
"=",
"self",
".",
"_fetch_datatype",
"(",
"bucket",
",",
"key",
",",
"r",
"=",
"r",
",",
"pr",
"=",
"pr",
",",
"basic_quorum",
"=",
"basic_quorum",
",",
"notfound_ok",
"=",
"notfound_ok",
",",
"timeout",
"=",
"timeout",
",",
"include_context",
"=",
"include_context",
")",
"return",
"TYPES",
"[",
"dtype",
"]",
"(",
"bucket",
"=",
"bucket",
",",
"key",
"=",
"key",
",",
"value",
"=",
"value",
",",
"context",
"=",
"context",
")"
] |
Fetches the value of a Riak Datatype.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket of the datatype, which must belong to a
:class:`~riak.bucket.BucketType`
:type bucket: :class:`~riak.bucket.RiakBucket`
:param key: the key of the datatype
:type key: string
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool, None
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool, None
:param timeout: a timeout value in milliseconds
:type timeout: int, None
:param include_context: whether to return the opaque context
as well as the value, which is useful for removal operations
on sets and maps
:type include_context: bool, None
:rtype: :class:`~riak.datatypes.Datatype`
|
[
"Fetches",
"the",
"value",
"of",
"a",
"Riak",
"Datatype",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/operations.py#L1106-L1143
|
18,914
|
basho/riak-python-client
|
riak/client/operations.py
|
RiakClientOperations.update_datatype
|
def update_datatype(self, datatype, w=None, dw=None, pw=None,
return_body=None, timeout=None,
include_context=None):
"""
Sends an update to a Riak Datatype to the server. This operation is not
idempotent and so will not be retried automatically.
:param datatype: the datatype with pending updates
:type datatype: :class:`~riak.datatypes.Datatype`
:param w: the write quorum
:type w: integer, string, None
:param dw: the durable write quorum
:type dw: integer, string, None
:param pw: the primary write quorum
:type pw: integer, string, None
:param timeout: a timeout value in milliseconds
:type timeout: int
:param include_context: whether to return the opaque context
as well as the value, which is useful for removal operations
on sets and maps
:type include_context: bool
:rtype: tuple of datatype, opaque value and opaque context
"""
_validate_timeout(timeout)
with self._transport() as transport:
return transport.update_datatype(datatype, w=w, dw=dw, pw=pw,
return_body=return_body,
timeout=timeout,
include_context=include_context)
|
python
|
def update_datatype(self, datatype, w=None, dw=None, pw=None,
return_body=None, timeout=None,
include_context=None):
"""
Sends an update to a Riak Datatype to the server. This operation is not
idempotent and so will not be retried automatically.
:param datatype: the datatype with pending updates
:type datatype: :class:`~riak.datatypes.Datatype`
:param w: the write quorum
:type w: integer, string, None
:param dw: the durable write quorum
:type dw: integer, string, None
:param pw: the primary write quorum
:type pw: integer, string, None
:param timeout: a timeout value in milliseconds
:type timeout: int
:param include_context: whether to return the opaque context
as well as the value, which is useful for removal operations
on sets and maps
:type include_context: bool
:rtype: tuple of datatype, opaque value and opaque context
"""
_validate_timeout(timeout)
with self._transport() as transport:
return transport.update_datatype(datatype, w=w, dw=dw, pw=pw,
return_body=return_body,
timeout=timeout,
include_context=include_context)
|
[
"def",
"update_datatype",
"(",
"self",
",",
"datatype",
",",
"w",
"=",
"None",
",",
"dw",
"=",
"None",
",",
"pw",
"=",
"None",
",",
"return_body",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"include_context",
"=",
"None",
")",
":",
"_validate_timeout",
"(",
"timeout",
")",
"with",
"self",
".",
"_transport",
"(",
")",
"as",
"transport",
":",
"return",
"transport",
".",
"update_datatype",
"(",
"datatype",
",",
"w",
"=",
"w",
",",
"dw",
"=",
"dw",
",",
"pw",
"=",
"pw",
",",
"return_body",
"=",
"return_body",
",",
"timeout",
"=",
"timeout",
",",
"include_context",
"=",
"include_context",
")"
] |
Sends an update to a Riak Datatype to the server. This operation is not
idempotent and so will not be retried automatically.
:param datatype: the datatype with pending updates
:type datatype: :class:`~riak.datatypes.Datatype`
:param w: the write quorum
:type w: integer, string, None
:param dw: the durable write quorum
:type dw: integer, string, None
:param pw: the primary write quorum
:type pw: integer, string, None
:param timeout: a timeout value in milliseconds
:type timeout: int
:param include_context: whether to return the opaque context
as well as the value, which is useful for removal operations
on sets and maps
:type include_context: bool
:rtype: tuple of datatype, opaque value and opaque context
|
[
"Sends",
"an",
"update",
"to",
"a",
"Riak",
"Datatype",
"to",
"the",
"server",
".",
"This",
"operation",
"is",
"not",
"idempotent",
"and",
"so",
"will",
"not",
"be",
"retried",
"automatically",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/operations.py#L1145-L1175
|
18,915
|
basho/riak-python-client
|
riak/transports/tcp/connection.py
|
TcpConnection._non_connect_send_recv
|
def _non_connect_send_recv(self, msg_code, data=None):
"""
Similar to self._send_recv, but doesn't try to initiate a connection,
thus preventing an infinite loop.
"""
self._non_connect_send_msg(msg_code, data)
return self._recv_msg()
|
python
|
def _non_connect_send_recv(self, msg_code, data=None):
"""
Similar to self._send_recv, but doesn't try to initiate a connection,
thus preventing an infinite loop.
"""
self._non_connect_send_msg(msg_code, data)
return self._recv_msg()
|
[
"def",
"_non_connect_send_recv",
"(",
"self",
",",
"msg_code",
",",
"data",
"=",
"None",
")",
":",
"self",
".",
"_non_connect_send_msg",
"(",
"msg_code",
",",
"data",
")",
"return",
"self",
".",
"_recv_msg",
"(",
")"
] |
Similar to self._send_recv, but doesn't try to initiate a connection,
thus preventing an infinite loop.
|
[
"Similar",
"to",
"self",
".",
"_send_recv",
"but",
"doesn",
"t",
"try",
"to",
"initiate",
"a",
"connection",
"thus",
"preventing",
"an",
"infinite",
"loop",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/tcp/connection.py#L53-L59
|
18,916
|
basho/riak-python-client
|
riak/transports/tcp/connection.py
|
TcpConnection._non_connect_send_msg
|
def _non_connect_send_msg(self, msg_code, data):
"""
Similar to self._send, but doesn't try to initiate a connection,
thus preventing an infinite loop.
"""
try:
self._socket.sendall(self._encode_msg(msg_code, data))
except (IOError, socket.error) as e:
if e.errno == errno.EPIPE:
raise ConnectionClosed(e)
else:
raise
|
python
|
def _non_connect_send_msg(self, msg_code, data):
"""
Similar to self._send, but doesn't try to initiate a connection,
thus preventing an infinite loop.
"""
try:
self._socket.sendall(self._encode_msg(msg_code, data))
except (IOError, socket.error) as e:
if e.errno == errno.EPIPE:
raise ConnectionClosed(e)
else:
raise
|
[
"def",
"_non_connect_send_msg",
"(",
"self",
",",
"msg_code",
",",
"data",
")",
":",
"try",
":",
"self",
".",
"_socket",
".",
"sendall",
"(",
"self",
".",
"_encode_msg",
"(",
"msg_code",
",",
"data",
")",
")",
"except",
"(",
"IOError",
",",
"socket",
".",
"error",
")",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"EPIPE",
":",
"raise",
"ConnectionClosed",
"(",
"e",
")",
"else",
":",
"raise"
] |
Similar to self._send, but doesn't try to initiate a connection,
thus preventing an infinite loop.
|
[
"Similar",
"to",
"self",
".",
"_send",
"but",
"doesn",
"t",
"try",
"to",
"initiate",
"a",
"connection",
"thus",
"preventing",
"an",
"infinite",
"loop",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/tcp/connection.py#L65-L76
|
18,917
|
basho/riak-python-client
|
riak/transports/tcp/connection.py
|
TcpConnection._init_security
|
def _init_security(self):
"""
Initialize a secure connection to the server.
"""
if not self._starttls():
raise SecurityError("Could not start TLS connection")
# _ssh_handshake() will throw an exception upon failure
self._ssl_handshake()
if not self._auth():
raise SecurityError("Could not authorize connection")
|
python
|
def _init_security(self):
"""
Initialize a secure connection to the server.
"""
if not self._starttls():
raise SecurityError("Could not start TLS connection")
# _ssh_handshake() will throw an exception upon failure
self._ssl_handshake()
if not self._auth():
raise SecurityError("Could not authorize connection")
|
[
"def",
"_init_security",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_starttls",
"(",
")",
":",
"raise",
"SecurityError",
"(",
"\"Could not start TLS connection\"",
")",
"# _ssh_handshake() will throw an exception upon failure",
"self",
".",
"_ssl_handshake",
"(",
")",
"if",
"not",
"self",
".",
"_auth",
"(",
")",
":",
"raise",
"SecurityError",
"(",
"\"Could not authorize connection\"",
")"
] |
Initialize a secure connection to the server.
|
[
"Initialize",
"a",
"secure",
"connection",
"to",
"the",
"server",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/tcp/connection.py#L82-L91
|
18,918
|
basho/riak-python-client
|
riak/transports/tcp/connection.py
|
TcpConnection._starttls
|
def _starttls(self):
"""
Exchange a STARTTLS message with Riak to initiate secure communications
return True is Riak responds with a STARTTLS response, False otherwise
"""
resp_code, _ = self._non_connect_send_recv(
riak.pb.messages.MSG_CODE_START_TLS)
if resp_code == riak.pb.messages.MSG_CODE_START_TLS:
return True
else:
return False
|
python
|
def _starttls(self):
"""
Exchange a STARTTLS message with Riak to initiate secure communications
return True is Riak responds with a STARTTLS response, False otherwise
"""
resp_code, _ = self._non_connect_send_recv(
riak.pb.messages.MSG_CODE_START_TLS)
if resp_code == riak.pb.messages.MSG_CODE_START_TLS:
return True
else:
return False
|
[
"def",
"_starttls",
"(",
"self",
")",
":",
"resp_code",
",",
"_",
"=",
"self",
".",
"_non_connect_send_recv",
"(",
"riak",
".",
"pb",
".",
"messages",
".",
"MSG_CODE_START_TLS",
")",
"if",
"resp_code",
"==",
"riak",
".",
"pb",
".",
"messages",
".",
"MSG_CODE_START_TLS",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
Exchange a STARTTLS message with Riak to initiate secure communications
return True is Riak responds with a STARTTLS response, False otherwise
|
[
"Exchange",
"a",
"STARTTLS",
"message",
"with",
"Riak",
"to",
"initiate",
"secure",
"communications",
"return",
"True",
"is",
"Riak",
"responds",
"with",
"a",
"STARTTLS",
"response",
"False",
"otherwise"
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/tcp/connection.py#L93-L103
|
18,919
|
basho/riak-python-client
|
riak/transports/tcp/connection.py
|
TcpConnection.close
|
def close(self):
"""
Closes the underlying socket of the PB connection.
"""
if self._socket:
if USE_STDLIB_SSL:
# NB: Python 2.7.8 and earlier does not have a compatible
# shutdown() method due to the SSL lib
try:
self._socket.shutdown(socket.SHUT_RDWR)
except EnvironmentError:
# NB: sometimes these exceptions are raised if the initial
# connection didn't succeed correctly, or if shutdown() is
# called after the connection dies
logging.debug('Exception occurred while shutting '
'down socket.', exc_info=True)
self._socket.close()
del self._socket
|
python
|
def close(self):
"""
Closes the underlying socket of the PB connection.
"""
if self._socket:
if USE_STDLIB_SSL:
# NB: Python 2.7.8 and earlier does not have a compatible
# shutdown() method due to the SSL lib
try:
self._socket.shutdown(socket.SHUT_RDWR)
except EnvironmentError:
# NB: sometimes these exceptions are raised if the initial
# connection didn't succeed correctly, or if shutdown() is
# called after the connection dies
logging.debug('Exception occurred while shutting '
'down socket.', exc_info=True)
self._socket.close()
del self._socket
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_socket",
":",
"if",
"USE_STDLIB_SSL",
":",
"# NB: Python 2.7.8 and earlier does not have a compatible",
"# shutdown() method due to the SSL lib",
"try",
":",
"self",
".",
"_socket",
".",
"shutdown",
"(",
"socket",
".",
"SHUT_RDWR",
")",
"except",
"EnvironmentError",
":",
"# NB: sometimes these exceptions are raised if the initial",
"# connection didn't succeed correctly, or if shutdown() is",
"# called after the connection dies",
"logging",
".",
"debug",
"(",
"'Exception occurred while shutting '",
"'down socket.'",
",",
"exc_info",
"=",
"True",
")",
"self",
".",
"_socket",
".",
"close",
"(",
")",
"del",
"self",
".",
"_socket"
] |
Closes the underlying socket of the PB connection.
|
[
"Closes",
"the",
"underlying",
"socket",
"of",
"the",
"PB",
"connection",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/tcp/connection.py#L266-L283
|
18,920
|
basho/riak-python-client
|
riak/riak_object.py
|
content_property
|
def content_property(name, doc=None):
"""
Delegates a property to the first sibling in a RiakObject, raising
an error when the object is in conflict.
"""
def _setter(self, value):
if len(self.siblings) == 0:
# In this case, assume that what the user wants is to
# create a new sibling inside an empty object.
self.siblings = [RiakContent(self)]
if len(self.siblings) != 1:
raise ConflictError()
setattr(self.siblings[0], name, value)
def _getter(self):
if len(self.siblings) == 0:
return
if len(self.siblings) != 1:
raise ConflictError()
return getattr(self.siblings[0], name)
return property(_getter, _setter, doc=doc)
|
python
|
def content_property(name, doc=None):
"""
Delegates a property to the first sibling in a RiakObject, raising
an error when the object is in conflict.
"""
def _setter(self, value):
if len(self.siblings) == 0:
# In this case, assume that what the user wants is to
# create a new sibling inside an empty object.
self.siblings = [RiakContent(self)]
if len(self.siblings) != 1:
raise ConflictError()
setattr(self.siblings[0], name, value)
def _getter(self):
if len(self.siblings) == 0:
return
if len(self.siblings) != 1:
raise ConflictError()
return getattr(self.siblings[0], name)
return property(_getter, _setter, doc=doc)
|
[
"def",
"content_property",
"(",
"name",
",",
"doc",
"=",
"None",
")",
":",
"def",
"_setter",
"(",
"self",
",",
"value",
")",
":",
"if",
"len",
"(",
"self",
".",
"siblings",
")",
"==",
"0",
":",
"# In this case, assume that what the user wants is to",
"# create a new sibling inside an empty object.",
"self",
".",
"siblings",
"=",
"[",
"RiakContent",
"(",
"self",
")",
"]",
"if",
"len",
"(",
"self",
".",
"siblings",
")",
"!=",
"1",
":",
"raise",
"ConflictError",
"(",
")",
"setattr",
"(",
"self",
".",
"siblings",
"[",
"0",
"]",
",",
"name",
",",
"value",
")",
"def",
"_getter",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"siblings",
")",
"==",
"0",
":",
"return",
"if",
"len",
"(",
"self",
".",
"siblings",
")",
"!=",
"1",
":",
"raise",
"ConflictError",
"(",
")",
"return",
"getattr",
"(",
"self",
".",
"siblings",
"[",
"0",
"]",
",",
"name",
")",
"return",
"property",
"(",
"_getter",
",",
"_setter",
",",
"doc",
"=",
"doc",
")"
] |
Delegates a property to the first sibling in a RiakObject, raising
an error when the object is in conflict.
|
[
"Delegates",
"a",
"property",
"to",
"the",
"first",
"sibling",
"in",
"a",
"RiakObject",
"raising",
"an",
"error",
"when",
"the",
"object",
"is",
"in",
"conflict",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/riak_object.py#L22-L43
|
18,921
|
basho/riak-python-client
|
riak/riak_object.py
|
content_method
|
def content_method(name):
"""
Delegates a method to the first sibling in a RiakObject, raising
an error when the object is in conflict.
"""
def _delegate(self, *args, **kwargs):
if len(self.siblings) != 1:
raise ConflictError()
return getattr(self.siblings[0], name).__call__(*args, **kwargs)
_delegate.__doc__ = getattr(RiakContent, name).__doc__
return _delegate
|
python
|
def content_method(name):
"""
Delegates a method to the first sibling in a RiakObject, raising
an error when the object is in conflict.
"""
def _delegate(self, *args, **kwargs):
if len(self.siblings) != 1:
raise ConflictError()
return getattr(self.siblings[0], name).__call__(*args, **kwargs)
_delegate.__doc__ = getattr(RiakContent, name).__doc__
return _delegate
|
[
"def",
"content_method",
"(",
"name",
")",
":",
"def",
"_delegate",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"self",
".",
"siblings",
")",
"!=",
"1",
":",
"raise",
"ConflictError",
"(",
")",
"return",
"getattr",
"(",
"self",
".",
"siblings",
"[",
"0",
"]",
",",
"name",
")",
".",
"__call__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"_delegate",
".",
"__doc__",
"=",
"getattr",
"(",
"RiakContent",
",",
"name",
")",
".",
"__doc__",
"return",
"_delegate"
] |
Delegates a method to the first sibling in a RiakObject, raising
an error when the object is in conflict.
|
[
"Delegates",
"a",
"method",
"to",
"the",
"first",
"sibling",
"in",
"a",
"RiakObject",
"raising",
"an",
"error",
"when",
"the",
"object",
"is",
"in",
"conflict",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/riak_object.py#L46-L58
|
18,922
|
basho/riak-python-client
|
riak/riak_object.py
|
RiakObject.store
|
def store(self, w=None, dw=None, pw=None, return_body=True,
if_none_match=False, timeout=None):
"""
Store the object in Riak. When this operation completes, the
object could contain new metadata and possibly new data if Riak
contains a newer version of the object according to the object's
vector clock.
:param w: W-value, wait for this many partitions to respond
before returning to client.
:type w: integer
:param dw: DW-value, wait for this many partitions to
confirm the write before returning to client.
:type dw: integer
:param pw: PW-value, require this many primary partitions to
be available before performing the put
:type pw: integer
:param return_body: if the newly stored object should be
retrieved
:type return_body: bool
:param if_none_match: Should the object be stored only if
there is no key previously defined
:type if_none_match: bool
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: :class:`RiakObject` """
if len(self.siblings) != 1:
raise ConflictError("Attempting to store an invalid object, "
"resolve the siblings first")
self.client.put(self, w=w, dw=dw, pw=pw,
return_body=return_body,
if_none_match=if_none_match,
timeout=timeout)
return self
|
python
|
def store(self, w=None, dw=None, pw=None, return_body=True,
if_none_match=False, timeout=None):
"""
Store the object in Riak. When this operation completes, the
object could contain new metadata and possibly new data if Riak
contains a newer version of the object according to the object's
vector clock.
:param w: W-value, wait for this many partitions to respond
before returning to client.
:type w: integer
:param dw: DW-value, wait for this many partitions to
confirm the write before returning to client.
:type dw: integer
:param pw: PW-value, require this many primary partitions to
be available before performing the put
:type pw: integer
:param return_body: if the newly stored object should be
retrieved
:type return_body: bool
:param if_none_match: Should the object be stored only if
there is no key previously defined
:type if_none_match: bool
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: :class:`RiakObject` """
if len(self.siblings) != 1:
raise ConflictError("Attempting to store an invalid object, "
"resolve the siblings first")
self.client.put(self, w=w, dw=dw, pw=pw,
return_body=return_body,
if_none_match=if_none_match,
timeout=timeout)
return self
|
[
"def",
"store",
"(",
"self",
",",
"w",
"=",
"None",
",",
"dw",
"=",
"None",
",",
"pw",
"=",
"None",
",",
"return_body",
"=",
"True",
",",
"if_none_match",
"=",
"False",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"len",
"(",
"self",
".",
"siblings",
")",
"!=",
"1",
":",
"raise",
"ConflictError",
"(",
"\"Attempting to store an invalid object, \"",
"\"resolve the siblings first\"",
")",
"self",
".",
"client",
".",
"put",
"(",
"self",
",",
"w",
"=",
"w",
",",
"dw",
"=",
"dw",
",",
"pw",
"=",
"pw",
",",
"return_body",
"=",
"return_body",
",",
"if_none_match",
"=",
"if_none_match",
",",
"timeout",
"=",
"timeout",
")",
"return",
"self"
] |
Store the object in Riak. When this operation completes, the
object could contain new metadata and possibly new data if Riak
contains a newer version of the object according to the object's
vector clock.
:param w: W-value, wait for this many partitions to respond
before returning to client.
:type w: integer
:param dw: DW-value, wait for this many partitions to
confirm the write before returning to client.
:type dw: integer
:param pw: PW-value, require this many primary partitions to
be available before performing the put
:type pw: integer
:param return_body: if the newly stored object should be
retrieved
:type return_body: bool
:param if_none_match: Should the object be stored only if
there is no key previously defined
:type if_none_match: bool
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: :class:`RiakObject`
|
[
"Store",
"the",
"object",
"in",
"Riak",
".",
"When",
"this",
"operation",
"completes",
"the",
"object",
"could",
"contain",
"new",
"metadata",
"and",
"possibly",
"new",
"data",
"if",
"Riak",
"contains",
"a",
"newer",
"version",
"of",
"the",
"object",
"according",
"to",
"the",
"object",
"s",
"vector",
"clock",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/riak_object.py#L247-L283
|
18,923
|
basho/riak-python-client
|
riak/riak_object.py
|
RiakObject.reload
|
def reload(self, r=None, pr=None, timeout=None, basic_quorum=None,
notfound_ok=None, head_only=False):
"""
Reload the object from Riak. When this operation completes, the
object could contain new metadata and a new value, if the object
was updated in Riak since it was last retrieved.
.. note:: Even if the key is not found in Riak, this will
return a :class:`RiakObject`. Check the :attr:`exists`
property to see if the key was found.
:param r: R-Value, wait for this many partitions to respond
before returning to client.
:type r: integer
:param pr: PR-value, require this many primary partitions to
be available before performing the read that
precedes the put
:type pr: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param head_only: whether to fetch without value, so only metadata
(only available on PB transport)
:type head_only: bool
:rtype: :class:`RiakObject`
"""
self.client.get(self, r=r, pr=pr, timeout=timeout, head_only=head_only)
return self
|
python
|
def reload(self, r=None, pr=None, timeout=None, basic_quorum=None,
notfound_ok=None, head_only=False):
"""
Reload the object from Riak. When this operation completes, the
object could contain new metadata and a new value, if the object
was updated in Riak since it was last retrieved.
.. note:: Even if the key is not found in Riak, this will
return a :class:`RiakObject`. Check the :attr:`exists`
property to see if the key was found.
:param r: R-Value, wait for this many partitions to respond
before returning to client.
:type r: integer
:param pr: PR-value, require this many primary partitions to
be available before performing the read that
precedes the put
:type pr: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param head_only: whether to fetch without value, so only metadata
(only available on PB transport)
:type head_only: bool
:rtype: :class:`RiakObject`
"""
self.client.get(self, r=r, pr=pr, timeout=timeout, head_only=head_only)
return self
|
[
"def",
"reload",
"(",
"self",
",",
"r",
"=",
"None",
",",
"pr",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"basic_quorum",
"=",
"None",
",",
"notfound_ok",
"=",
"None",
",",
"head_only",
"=",
"False",
")",
":",
"self",
".",
"client",
".",
"get",
"(",
"self",
",",
"r",
"=",
"r",
",",
"pr",
"=",
"pr",
",",
"timeout",
"=",
"timeout",
",",
"head_only",
"=",
"head_only",
")",
"return",
"self"
] |
Reload the object from Riak. When this operation completes, the
object could contain new metadata and a new value, if the object
was updated in Riak since it was last retrieved.
.. note:: Even if the key is not found in Riak, this will
return a :class:`RiakObject`. Check the :attr:`exists`
property to see if the key was found.
:param r: R-Value, wait for this many partitions to respond
before returning to client.
:type r: integer
:param pr: PR-value, require this many primary partitions to
be available before performing the read that
precedes the put
:type pr: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param head_only: whether to fetch without value, so only metadata
(only available on PB transport)
:type head_only: bool
:rtype: :class:`RiakObject`
|
[
"Reload",
"the",
"object",
"from",
"Riak",
".",
"When",
"this",
"operation",
"completes",
"the",
"object",
"could",
"contain",
"new",
"metadata",
"and",
"a",
"new",
"value",
"if",
"the",
"object",
"was",
"updated",
"in",
"Riak",
"since",
"it",
"was",
"last",
"retrieved",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/riak_object.py#L285-L317
|
18,924
|
basho/riak-python-client
|
riak/riak_object.py
|
RiakObject.delete
|
def delete(self, r=None, w=None, dw=None, pr=None, pw=None,
timeout=None):
"""
Delete this object from Riak.
:param r: R-value, wait for this many partitions to read object
before performing the put
:type r: integer
:param w: W-value, wait for this many partitions to respond
before returning to client.
:type w: integer
:param dw: DW-value, wait for this many partitions to
confirm the write before returning to client.
:type dw: integer
:param pr: PR-value, require this many primary partitions to
be available before performing the read that
precedes the put
:type pr: integer
:param pw: PW-value, require this many primary partitions to
be available before performing the put
:type pw: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: :class:`RiakObject`
"""
self.client.delete(self, r=r, w=w, dw=dw, pr=pr, pw=pw,
timeout=timeout)
self.clear()
return self
|
python
|
def delete(self, r=None, w=None, dw=None, pr=None, pw=None,
timeout=None):
"""
Delete this object from Riak.
:param r: R-value, wait for this many partitions to read object
before performing the put
:type r: integer
:param w: W-value, wait for this many partitions to respond
before returning to client.
:type w: integer
:param dw: DW-value, wait for this many partitions to
confirm the write before returning to client.
:type dw: integer
:param pr: PR-value, require this many primary partitions to
be available before performing the read that
precedes the put
:type pr: integer
:param pw: PW-value, require this many primary partitions to
be available before performing the put
:type pw: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: :class:`RiakObject`
"""
self.client.delete(self, r=r, w=w, dw=dw, pr=pr, pw=pw,
timeout=timeout)
self.clear()
return self
|
[
"def",
"delete",
"(",
"self",
",",
"r",
"=",
"None",
",",
"w",
"=",
"None",
",",
"dw",
"=",
"None",
",",
"pr",
"=",
"None",
",",
"pw",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"client",
".",
"delete",
"(",
"self",
",",
"r",
"=",
"r",
",",
"w",
"=",
"w",
",",
"dw",
"=",
"dw",
",",
"pr",
"=",
"pr",
",",
"pw",
"=",
"pw",
",",
"timeout",
"=",
"timeout",
")",
"self",
".",
"clear",
"(",
")",
"return",
"self"
] |
Delete this object from Riak.
:param r: R-value, wait for this many partitions to read object
before performing the put
:type r: integer
:param w: W-value, wait for this many partitions to respond
before returning to client.
:type w: integer
:param dw: DW-value, wait for this many partitions to
confirm the write before returning to client.
:type dw: integer
:param pr: PR-value, require this many primary partitions to
be available before performing the read that
precedes the put
:type pr: integer
:param pw: PW-value, require this many primary partitions to
be available before performing the put
:type pw: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: :class:`RiakObject`
|
[
"Delete",
"this",
"object",
"from",
"Riak",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/riak_object.py#L319-L348
|
18,925
|
basho/riak-python-client
|
riak/bucket.py
|
RiakBucket.get_encoder
|
def get_encoder(self, content_type):
"""
Get the encoding function for the provided content type for
this bucket.
:param content_type: the requested media type
:type content_type: str
:param content_type: Content type requested
"""
if content_type in self._encoders:
return self._encoders[content_type]
else:
return self._client.get_encoder(content_type)
|
python
|
def get_encoder(self, content_type):
"""
Get the encoding function for the provided content type for
this bucket.
:param content_type: the requested media type
:type content_type: str
:param content_type: Content type requested
"""
if content_type in self._encoders:
return self._encoders[content_type]
else:
return self._client.get_encoder(content_type)
|
[
"def",
"get_encoder",
"(",
"self",
",",
"content_type",
")",
":",
"if",
"content_type",
"in",
"self",
".",
"_encoders",
":",
"return",
"self",
".",
"_encoders",
"[",
"content_type",
"]",
"else",
":",
"return",
"self",
".",
"_client",
".",
"get_encoder",
"(",
"content_type",
")"
] |
Get the encoding function for the provided content type for
this bucket.
:param content_type: the requested media type
:type content_type: str
:param content_type: Content type requested
|
[
"Get",
"the",
"encoding",
"function",
"for",
"the",
"provided",
"content",
"type",
"for",
"this",
"bucket",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/bucket.py#L88-L100
|
18,926
|
basho/riak-python-client
|
riak/bucket.py
|
RiakBucket.get_decoder
|
def get_decoder(self, content_type):
"""
Get the decoding function for the provided content type for
this bucket.
:param content_type: the requested media type
:type content_type: str
:rtype: function
"""
if content_type in self._decoders:
return self._decoders[content_type]
else:
return self._client.get_decoder(content_type)
|
python
|
def get_decoder(self, content_type):
"""
Get the decoding function for the provided content type for
this bucket.
:param content_type: the requested media type
:type content_type: str
:rtype: function
"""
if content_type in self._decoders:
return self._decoders[content_type]
else:
return self._client.get_decoder(content_type)
|
[
"def",
"get_decoder",
"(",
"self",
",",
"content_type",
")",
":",
"if",
"content_type",
"in",
"self",
".",
"_decoders",
":",
"return",
"self",
".",
"_decoders",
"[",
"content_type",
"]",
"else",
":",
"return",
"self",
".",
"_client",
".",
"get_decoder",
"(",
"content_type",
")"
] |
Get the decoding function for the provided content type for
this bucket.
:param content_type: the requested media type
:type content_type: str
:rtype: function
|
[
"Get",
"the",
"decoding",
"function",
"for",
"the",
"provided",
"content",
"type",
"for",
"this",
"bucket",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/bucket.py#L116-L128
|
18,927
|
basho/riak-python-client
|
riak/bucket.py
|
RiakBucket.multiget
|
def multiget(self, keys, r=None, pr=None, timeout=None,
basic_quorum=None, notfound_ok=None,
head_only=False):
"""
Retrieves a list of keys belonging to this bucket in parallel.
:param keys: the keys to fetch
:type keys: list
:param r: R-Value for the requests (defaults to bucket's R)
:type r: integer
:param pr: PR-Value for the requests (defaults to bucket's PR)
:type pr: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param head_only: whether to fetch without value, so only metadata
(only available on PB transport)
:type head_only: bool
:rtype: list of :class:`RiakObjects <riak.riak_object.RiakObject>`,
:class:`Datatypes <riak.datatypes.Datatype>`, or tuples of
bucket_type, bucket, key, and the exception raised on fetch
"""
bkeys = [(self.bucket_type.name, self.name, key) for key in keys]
return self._client.multiget(bkeys, r=r, pr=pr, timeout=timeout,
basic_quorum=basic_quorum,
notfound_ok=notfound_ok,
head_only=head_only)
|
python
|
def multiget(self, keys, r=None, pr=None, timeout=None,
basic_quorum=None, notfound_ok=None,
head_only=False):
"""
Retrieves a list of keys belonging to this bucket in parallel.
:param keys: the keys to fetch
:type keys: list
:param r: R-Value for the requests (defaults to bucket's R)
:type r: integer
:param pr: PR-Value for the requests (defaults to bucket's PR)
:type pr: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param head_only: whether to fetch without value, so only metadata
(only available on PB transport)
:type head_only: bool
:rtype: list of :class:`RiakObjects <riak.riak_object.RiakObject>`,
:class:`Datatypes <riak.datatypes.Datatype>`, or tuples of
bucket_type, bucket, key, and the exception raised on fetch
"""
bkeys = [(self.bucket_type.name, self.name, key) for key in keys]
return self._client.multiget(bkeys, r=r, pr=pr, timeout=timeout,
basic_quorum=basic_quorum,
notfound_ok=notfound_ok,
head_only=head_only)
|
[
"def",
"multiget",
"(",
"self",
",",
"keys",
",",
"r",
"=",
"None",
",",
"pr",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"basic_quorum",
"=",
"None",
",",
"notfound_ok",
"=",
"None",
",",
"head_only",
"=",
"False",
")",
":",
"bkeys",
"=",
"[",
"(",
"self",
".",
"bucket_type",
".",
"name",
",",
"self",
".",
"name",
",",
"key",
")",
"for",
"key",
"in",
"keys",
"]",
"return",
"self",
".",
"_client",
".",
"multiget",
"(",
"bkeys",
",",
"r",
"=",
"r",
",",
"pr",
"=",
"pr",
",",
"timeout",
"=",
"timeout",
",",
"basic_quorum",
"=",
"basic_quorum",
",",
"notfound_ok",
"=",
"notfound_ok",
",",
"head_only",
"=",
"head_only",
")"
] |
Retrieves a list of keys belonging to this bucket in parallel.
:param keys: the keys to fetch
:type keys: list
:param r: R-Value for the requests (defaults to bucket's R)
:type r: integer
:param pr: PR-Value for the requests (defaults to bucket's PR)
:type pr: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param head_only: whether to fetch without value, so only metadata
(only available on PB transport)
:type head_only: bool
:rtype: list of :class:`RiakObjects <riak.riak_object.RiakObject>`,
:class:`Datatypes <riak.datatypes.Datatype>`, or tuples of
bucket_type, bucket, key, and the exception raised on fetch
|
[
"Retrieves",
"a",
"list",
"of",
"keys",
"belonging",
"to",
"this",
"bucket",
"in",
"parallel",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/bucket.py#L238-L268
|
18,928
|
basho/riak-python-client
|
riak/bucket.py
|
BucketType.stream_buckets
|
def stream_buckets(self, timeout=None):
"""
Streams the list of buckets under this bucket-type. This is a
generator method that should be iterated over.
The caller must close the stream when finished. See
:meth:`RiakClient.stream_buckets()
<riak.client.RiakClient.stream_buckets>` for more details.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: iterator that yields lists of :class:`RiakBucket
<riak.bucket.RiakBucket>` instances
"""
return self._client.stream_buckets(bucket_type=self, timeout=timeout)
|
python
|
def stream_buckets(self, timeout=None):
"""
Streams the list of buckets under this bucket-type. This is a
generator method that should be iterated over.
The caller must close the stream when finished. See
:meth:`RiakClient.stream_buckets()
<riak.client.RiakClient.stream_buckets>` for more details.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: iterator that yields lists of :class:`RiakBucket
<riak.bucket.RiakBucket>` instances
"""
return self._client.stream_buckets(bucket_type=self, timeout=timeout)
|
[
"def",
"stream_buckets",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"return",
"self",
".",
"_client",
".",
"stream_buckets",
"(",
"bucket_type",
"=",
"self",
",",
"timeout",
"=",
"timeout",
")"
] |
Streams the list of buckets under this bucket-type. This is a
generator method that should be iterated over.
The caller must close the stream when finished. See
:meth:`RiakClient.stream_buckets()
<riak.client.RiakClient.stream_buckets>` for more details.
.. warning:: Do not use this in production, as it requires
traversing through all keys stored in a cluster.
:param timeout: a timeout value in milliseconds
:type timeout: int
:rtype: iterator that yields lists of :class:`RiakBucket
<riak.bucket.RiakBucket>` instances
|
[
"Streams",
"the",
"list",
"of",
"buckets",
"under",
"this",
"bucket",
"-",
"type",
".",
"This",
"is",
"a",
"generator",
"method",
"that",
"should",
"be",
"iterated",
"over",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/bucket.py#L712-L729
|
18,929
|
basho/riak-python-client
|
riak/node.py
|
Decaying.incr
|
def incr(self, d):
"""
Increases the value by the argument.
:param d: the value to increase by
:type d: float
"""
with self.lock:
self.p = self.value() + d
|
python
|
def incr(self, d):
"""
Increases the value by the argument.
:param d: the value to increase by
:type d: float
"""
with self.lock:
self.p = self.value() + d
|
[
"def",
"incr",
"(",
"self",
",",
"d",
")",
":",
"with",
"self",
".",
"lock",
":",
"self",
".",
"p",
"=",
"self",
".",
"value",
"(",
")",
"+",
"d"
] |
Increases the value by the argument.
:param d: the value to increase by
:type d: float
|
[
"Increases",
"the",
"value",
"by",
"the",
"argument",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/node.py#L46-L54
|
18,930
|
basho/riak-python-client
|
riak/transports/transport.py
|
Transport.make_random_client_id
|
def make_random_client_id(self):
"""
Returns a random client identifier
"""
if PY2:
return ('py_%s' %
base64.b64encode(str(random.randint(1, 0x40000000))))
else:
return ('py_%s' %
base64.b64encode(bytes(str(random.randint(1, 0x40000000)),
'ascii')))
|
python
|
def make_random_client_id(self):
"""
Returns a random client identifier
"""
if PY2:
return ('py_%s' %
base64.b64encode(str(random.randint(1, 0x40000000))))
else:
return ('py_%s' %
base64.b64encode(bytes(str(random.randint(1, 0x40000000)),
'ascii')))
|
[
"def",
"make_random_client_id",
"(",
"self",
")",
":",
"if",
"PY2",
":",
"return",
"(",
"'py_%s'",
"%",
"base64",
".",
"b64encode",
"(",
"str",
"(",
"random",
".",
"randint",
"(",
"1",
",",
"0x40000000",
")",
")",
")",
")",
"else",
":",
"return",
"(",
"'py_%s'",
"%",
"base64",
".",
"b64encode",
"(",
"bytes",
"(",
"str",
"(",
"random",
".",
"randint",
"(",
"1",
",",
"0x40000000",
")",
")",
",",
"'ascii'",
")",
")",
")"
] |
Returns a random client identifier
|
[
"Returns",
"a",
"random",
"client",
"identifier"
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/transport.py#L42-L52
|
18,931
|
basho/riak-python-client
|
riak/transports/transport.py
|
Transport.get
|
def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None,
notfound_ok=None, head_only=False):
"""
Fetches an object.
"""
raise NotImplementedError
|
python
|
def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None,
notfound_ok=None, head_only=False):
"""
Fetches an object.
"""
raise NotImplementedError
|
[
"def",
"get",
"(",
"self",
",",
"robj",
",",
"r",
"=",
"None",
",",
"pr",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"basic_quorum",
"=",
"None",
",",
"notfound_ok",
"=",
"None",
",",
"head_only",
"=",
"False",
")",
":",
"raise",
"NotImplementedError"
] |
Fetches an object.
|
[
"Fetches",
"an",
"object",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/transport.py#L70-L75
|
18,932
|
basho/riak-python-client
|
riak/transports/transport.py
|
Transport.put
|
def put(self, robj, w=None, dw=None, pw=None, return_body=None,
if_none_match=None, timeout=None):
"""
Stores an object.
"""
raise NotImplementedError
|
python
|
def put(self, robj, w=None, dw=None, pw=None, return_body=None,
if_none_match=None, timeout=None):
"""
Stores an object.
"""
raise NotImplementedError
|
[
"def",
"put",
"(",
"self",
",",
"robj",
",",
"w",
"=",
"None",
",",
"dw",
"=",
"None",
",",
"pw",
"=",
"None",
",",
"return_body",
"=",
"None",
",",
"if_none_match",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"raise",
"NotImplementedError"
] |
Stores an object.
|
[
"Stores",
"an",
"object",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/transport.py#L77-L82
|
18,933
|
basho/riak-python-client
|
riak/transports/transport.py
|
Transport.delete
|
def delete(self, robj, rw=None, r=None, w=None, dw=None, pr=None,
pw=None, timeout=None):
"""
Deletes an object.
"""
raise NotImplementedError
|
python
|
def delete(self, robj, rw=None, r=None, w=None, dw=None, pr=None,
pw=None, timeout=None):
"""
Deletes an object.
"""
raise NotImplementedError
|
[
"def",
"delete",
"(",
"self",
",",
"robj",
",",
"rw",
"=",
"None",
",",
"r",
"=",
"None",
",",
"w",
"=",
"None",
",",
"dw",
"=",
"None",
",",
"pr",
"=",
"None",
",",
"pw",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"raise",
"NotImplementedError"
] |
Deletes an object.
|
[
"Deletes",
"an",
"object",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/transport.py#L84-L89
|
18,934
|
basho/riak-python-client
|
riak/transports/transport.py
|
Transport.update_counter
|
def update_counter(self, bucket, key, value, w=None, dw=None, pw=None,
returnvalue=False):
"""
Updates a counter by the given value.
"""
raise NotImplementedError
|
python
|
def update_counter(self, bucket, key, value, w=None, dw=None, pw=None,
returnvalue=False):
"""
Updates a counter by the given value.
"""
raise NotImplementedError
|
[
"def",
"update_counter",
"(",
"self",
",",
"bucket",
",",
"key",
",",
"value",
",",
"w",
"=",
"None",
",",
"dw",
"=",
"None",
",",
"pw",
"=",
"None",
",",
"returnvalue",
"=",
"False",
")",
":",
"raise",
"NotImplementedError"
] |
Updates a counter by the given value.
|
[
"Updates",
"a",
"counter",
"by",
"the",
"given",
"value",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/transport.py#L285-L290
|
18,935
|
basho/riak-python-client
|
riak/transports/transport.py
|
Transport.fetch_datatype
|
def fetch_datatype(self, bucket, key, r=None, pr=None, basic_quorum=None,
notfound_ok=None, timeout=None, include_context=None):
"""
Fetches a Riak Datatype.
"""
raise NotImplementedError
|
python
|
def fetch_datatype(self, bucket, key, r=None, pr=None, basic_quorum=None,
notfound_ok=None, timeout=None, include_context=None):
"""
Fetches a Riak Datatype.
"""
raise NotImplementedError
|
[
"def",
"fetch_datatype",
"(",
"self",
",",
"bucket",
",",
"key",
",",
"r",
"=",
"None",
",",
"pr",
"=",
"None",
",",
"basic_quorum",
"=",
"None",
",",
"notfound_ok",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"include_context",
"=",
"None",
")",
":",
"raise",
"NotImplementedError"
] |
Fetches a Riak Datatype.
|
[
"Fetches",
"a",
"Riak",
"Datatype",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/transport.py#L292-L297
|
18,936
|
basho/riak-python-client
|
riak/transports/transport.py
|
Transport.update_datatype
|
def update_datatype(self, datatype, w=None, dw=None, pw=None,
return_body=None, timeout=None, include_context=None):
"""
Updates a Riak Datatype by sending local operations to the server.
"""
raise NotImplementedError
|
python
|
def update_datatype(self, datatype, w=None, dw=None, pw=None,
return_body=None, timeout=None, include_context=None):
"""
Updates a Riak Datatype by sending local operations to the server.
"""
raise NotImplementedError
|
[
"def",
"update_datatype",
"(",
"self",
",",
"datatype",
",",
"w",
"=",
"None",
",",
"dw",
"=",
"None",
",",
"pw",
"=",
"None",
",",
"return_body",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"include_context",
"=",
"None",
")",
":",
"raise",
"NotImplementedError"
] |
Updates a Riak Datatype by sending local operations to the server.
|
[
"Updates",
"a",
"Riak",
"Datatype",
"by",
"sending",
"local",
"operations",
"to",
"the",
"server",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/transport.py#L299-L304
|
18,937
|
basho/riak-python-client
|
riak/transports/transport.py
|
Transport._search_mapred_emu
|
def _search_mapred_emu(self, index, query):
"""
Emulates a search request via MapReduce. Used in the case
where the transport supports MapReduce but has no native
search capability.
"""
phases = []
if not self.phaseless_mapred():
phases.append({'language': 'erlang',
'module': 'riak_kv_mapreduce',
'function': 'reduce_identity',
'keep': True})
mr_result = self.mapred({'module': 'riak_search',
'function': 'mapred_search',
'arg': [index, query]},
phases)
result = {'num_found': len(mr_result),
'max_score': 0.0,
'docs': []}
for bucket, key, data in mr_result:
if u'score' in data and data[u'score'][0] > result['max_score']:
result['max_score'] = data[u'score'][0]
result['docs'].append({u'id': key})
return result
|
python
|
def _search_mapred_emu(self, index, query):
"""
Emulates a search request via MapReduce. Used in the case
where the transport supports MapReduce but has no native
search capability.
"""
phases = []
if not self.phaseless_mapred():
phases.append({'language': 'erlang',
'module': 'riak_kv_mapreduce',
'function': 'reduce_identity',
'keep': True})
mr_result = self.mapred({'module': 'riak_search',
'function': 'mapred_search',
'arg': [index, query]},
phases)
result = {'num_found': len(mr_result),
'max_score': 0.0,
'docs': []}
for bucket, key, data in mr_result:
if u'score' in data and data[u'score'][0] > result['max_score']:
result['max_score'] = data[u'score'][0]
result['docs'].append({u'id': key})
return result
|
[
"def",
"_search_mapred_emu",
"(",
"self",
",",
"index",
",",
"query",
")",
":",
"phases",
"=",
"[",
"]",
"if",
"not",
"self",
".",
"phaseless_mapred",
"(",
")",
":",
"phases",
".",
"append",
"(",
"{",
"'language'",
":",
"'erlang'",
",",
"'module'",
":",
"'riak_kv_mapreduce'",
",",
"'function'",
":",
"'reduce_identity'",
",",
"'keep'",
":",
"True",
"}",
")",
"mr_result",
"=",
"self",
".",
"mapred",
"(",
"{",
"'module'",
":",
"'riak_search'",
",",
"'function'",
":",
"'mapred_search'",
",",
"'arg'",
":",
"[",
"index",
",",
"query",
"]",
"}",
",",
"phases",
")",
"result",
"=",
"{",
"'num_found'",
":",
"len",
"(",
"mr_result",
")",
",",
"'max_score'",
":",
"0.0",
",",
"'docs'",
":",
"[",
"]",
"}",
"for",
"bucket",
",",
"key",
",",
"data",
"in",
"mr_result",
":",
"if",
"u'score'",
"in",
"data",
"and",
"data",
"[",
"u'score'",
"]",
"[",
"0",
"]",
">",
"result",
"[",
"'max_score'",
"]",
":",
"result",
"[",
"'max_score'",
"]",
"=",
"data",
"[",
"u'score'",
"]",
"[",
"0",
"]",
"result",
"[",
"'docs'",
"]",
".",
"append",
"(",
"{",
"u'id'",
":",
"key",
"}",
")",
"return",
"result"
] |
Emulates a search request via MapReduce. Used in the case
where the transport supports MapReduce but has no native
search capability.
|
[
"Emulates",
"a",
"search",
"request",
"via",
"MapReduce",
".",
"Used",
"in",
"the",
"case",
"where",
"the",
"transport",
"supports",
"MapReduce",
"but",
"has",
"no",
"native",
"search",
"capability",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/transport.py#L313-L336
|
18,938
|
basho/riak-python-client
|
riak/transports/transport.py
|
Transport._get_index_mapred_emu
|
def _get_index_mapred_emu(self, bucket, index, startkey, endkey=None):
"""
Emulates a secondary index request via MapReduce. Used in the
case where the transport supports MapReduce but has no native
secondary index query capability.
"""
phases = []
if not self.phaseless_mapred():
phases.append({'language': 'erlang',
'module': 'riak_kv_mapreduce',
'function': 'reduce_identity',
'keep': True})
if endkey:
result = self.mapred({'bucket': bucket,
'index': index,
'start': startkey,
'end': endkey},
phases)
else:
result = self.mapred({'bucket': bucket,
'index': index,
'key': startkey},
phases)
return [key for resultbucket, key in result]
|
python
|
def _get_index_mapred_emu(self, bucket, index, startkey, endkey=None):
"""
Emulates a secondary index request via MapReduce. Used in the
case where the transport supports MapReduce but has no native
secondary index query capability.
"""
phases = []
if not self.phaseless_mapred():
phases.append({'language': 'erlang',
'module': 'riak_kv_mapreduce',
'function': 'reduce_identity',
'keep': True})
if endkey:
result = self.mapred({'bucket': bucket,
'index': index,
'start': startkey,
'end': endkey},
phases)
else:
result = self.mapred({'bucket': bucket,
'index': index,
'key': startkey},
phases)
return [key for resultbucket, key in result]
|
[
"def",
"_get_index_mapred_emu",
"(",
"self",
",",
"bucket",
",",
"index",
",",
"startkey",
",",
"endkey",
"=",
"None",
")",
":",
"phases",
"=",
"[",
"]",
"if",
"not",
"self",
".",
"phaseless_mapred",
"(",
")",
":",
"phases",
".",
"append",
"(",
"{",
"'language'",
":",
"'erlang'",
",",
"'module'",
":",
"'riak_kv_mapreduce'",
",",
"'function'",
":",
"'reduce_identity'",
",",
"'keep'",
":",
"True",
"}",
")",
"if",
"endkey",
":",
"result",
"=",
"self",
".",
"mapred",
"(",
"{",
"'bucket'",
":",
"bucket",
",",
"'index'",
":",
"index",
",",
"'start'",
":",
"startkey",
",",
"'end'",
":",
"endkey",
"}",
",",
"phases",
")",
"else",
":",
"result",
"=",
"self",
".",
"mapred",
"(",
"{",
"'bucket'",
":",
"bucket",
",",
"'index'",
":",
"index",
",",
"'key'",
":",
"startkey",
"}",
",",
"phases",
")",
"return",
"[",
"key",
"for",
"resultbucket",
",",
"key",
"in",
"result",
"]"
] |
Emulates a secondary index request via MapReduce. Used in the
case where the transport supports MapReduce but has no native
secondary index query capability.
|
[
"Emulates",
"a",
"secondary",
"index",
"request",
"via",
"MapReduce",
".",
"Used",
"in",
"the",
"case",
"where",
"the",
"transport",
"supports",
"MapReduce",
"but",
"has",
"no",
"native",
"secondary",
"index",
"query",
"capability",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/transport.py#L339-L362
|
18,939
|
basho/riak-python-client
|
riak/codecs/http.py
|
HttpCodec._parse_body
|
def _parse_body(self, robj, response, expected_statuses):
"""
Parse the body of an object response and populate the object.
"""
# If no response given, then return.
if response is None:
return None
status, headers, data = response
# Check if the server is down(status==0)
if not status:
m = 'Could not contact Riak Server: http://{0}:{1}!'.format(
self._node.host, self._node.http_port)
raise RiakError(m)
# Make sure expected code came back
self.check_http_code(status, expected_statuses)
if 'x-riak-vclock' in headers:
robj.vclock = VClock(headers['x-riak-vclock'], 'base64')
# If 404(Not Found), then clear the object.
if status == 404:
robj.siblings = []
return None
# If 201 Created, we need to extract the location and set the
# key on the object.
elif status == 201:
robj.key = headers['location'].strip().split('/')[-1]
# If 300(Siblings), apply the siblings to the object
elif status == 300:
ctype, params = parse_header(headers['content-type'])
if ctype == 'multipart/mixed':
if six.PY3:
data = bytes_to_str(data)
boundary = re.compile('\r?\n--%s(?:--)?\r?\n' %
re.escape(params['boundary']))
parts = [message_from_string(p)
for p in re.split(boundary, data)[1:-1]]
robj.siblings = [self._parse_sibling(RiakContent(robj),
part.items(),
part.get_payload())
for part in parts]
# Invoke sibling-resolution logic
if robj.resolver is not None:
robj.resolver(robj)
return robj
else:
raise Exception('unexpected sibling response format: {0}'.
format(ctype))
robj.siblings = [self._parse_sibling(RiakContent(robj),
headers.items(),
data)]
return robj
|
python
|
def _parse_body(self, robj, response, expected_statuses):
"""
Parse the body of an object response and populate the object.
"""
# If no response given, then return.
if response is None:
return None
status, headers, data = response
# Check if the server is down(status==0)
if not status:
m = 'Could not contact Riak Server: http://{0}:{1}!'.format(
self._node.host, self._node.http_port)
raise RiakError(m)
# Make sure expected code came back
self.check_http_code(status, expected_statuses)
if 'x-riak-vclock' in headers:
robj.vclock = VClock(headers['x-riak-vclock'], 'base64')
# If 404(Not Found), then clear the object.
if status == 404:
robj.siblings = []
return None
# If 201 Created, we need to extract the location and set the
# key on the object.
elif status == 201:
robj.key = headers['location'].strip().split('/')[-1]
# If 300(Siblings), apply the siblings to the object
elif status == 300:
ctype, params = parse_header(headers['content-type'])
if ctype == 'multipart/mixed':
if six.PY3:
data = bytes_to_str(data)
boundary = re.compile('\r?\n--%s(?:--)?\r?\n' %
re.escape(params['boundary']))
parts = [message_from_string(p)
for p in re.split(boundary, data)[1:-1]]
robj.siblings = [self._parse_sibling(RiakContent(robj),
part.items(),
part.get_payload())
for part in parts]
# Invoke sibling-resolution logic
if robj.resolver is not None:
robj.resolver(robj)
return robj
else:
raise Exception('unexpected sibling response format: {0}'.
format(ctype))
robj.siblings = [self._parse_sibling(RiakContent(robj),
headers.items(),
data)]
return robj
|
[
"def",
"_parse_body",
"(",
"self",
",",
"robj",
",",
"response",
",",
"expected_statuses",
")",
":",
"# If no response given, then return.",
"if",
"response",
"is",
"None",
":",
"return",
"None",
"status",
",",
"headers",
",",
"data",
"=",
"response",
"# Check if the server is down(status==0)",
"if",
"not",
"status",
":",
"m",
"=",
"'Could not contact Riak Server: http://{0}:{1}!'",
".",
"format",
"(",
"self",
".",
"_node",
".",
"host",
",",
"self",
".",
"_node",
".",
"http_port",
")",
"raise",
"RiakError",
"(",
"m",
")",
"# Make sure expected code came back",
"self",
".",
"check_http_code",
"(",
"status",
",",
"expected_statuses",
")",
"if",
"'x-riak-vclock'",
"in",
"headers",
":",
"robj",
".",
"vclock",
"=",
"VClock",
"(",
"headers",
"[",
"'x-riak-vclock'",
"]",
",",
"'base64'",
")",
"# If 404(Not Found), then clear the object.",
"if",
"status",
"==",
"404",
":",
"robj",
".",
"siblings",
"=",
"[",
"]",
"return",
"None",
"# If 201 Created, we need to extract the location and set the",
"# key on the object.",
"elif",
"status",
"==",
"201",
":",
"robj",
".",
"key",
"=",
"headers",
"[",
"'location'",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"# If 300(Siblings), apply the siblings to the object",
"elif",
"status",
"==",
"300",
":",
"ctype",
",",
"params",
"=",
"parse_header",
"(",
"headers",
"[",
"'content-type'",
"]",
")",
"if",
"ctype",
"==",
"'multipart/mixed'",
":",
"if",
"six",
".",
"PY3",
":",
"data",
"=",
"bytes_to_str",
"(",
"data",
")",
"boundary",
"=",
"re",
".",
"compile",
"(",
"'\\r?\\n--%s(?:--)?\\r?\\n'",
"%",
"re",
".",
"escape",
"(",
"params",
"[",
"'boundary'",
"]",
")",
")",
"parts",
"=",
"[",
"message_from_string",
"(",
"p",
")",
"for",
"p",
"in",
"re",
".",
"split",
"(",
"boundary",
",",
"data",
")",
"[",
"1",
":",
"-",
"1",
"]",
"]",
"robj",
".",
"siblings",
"=",
"[",
"self",
".",
"_parse_sibling",
"(",
"RiakContent",
"(",
"robj",
")",
",",
"part",
".",
"items",
"(",
")",
",",
"part",
".",
"get_payload",
"(",
")",
")",
"for",
"part",
"in",
"parts",
"]",
"# Invoke sibling-resolution logic",
"if",
"robj",
".",
"resolver",
"is",
"not",
"None",
":",
"robj",
".",
"resolver",
"(",
"robj",
")",
"return",
"robj",
"else",
":",
"raise",
"Exception",
"(",
"'unexpected sibling response format: {0}'",
".",
"format",
"(",
"ctype",
")",
")",
"robj",
".",
"siblings",
"=",
"[",
"self",
".",
"_parse_sibling",
"(",
"RiakContent",
"(",
"robj",
")",
",",
"headers",
".",
"items",
"(",
")",
",",
"data",
")",
"]",
"return",
"robj"
] |
Parse the body of an object response and populate the object.
|
[
"Parse",
"the",
"body",
"of",
"an",
"object",
"response",
"and",
"populate",
"the",
"object",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/codecs/http.py#L46-L104
|
18,940
|
basho/riak-python-client
|
riak/codecs/http.py
|
HttpCodec._parse_sibling
|
def _parse_sibling(self, sibling, headers, data):
"""
Parses a single sibling out of a response.
"""
sibling.exists = True
# Parse the headers...
for header, value in headers:
header = header.lower()
if header == 'content-type':
sibling.content_type, sibling.charset = \
self._parse_content_type(value)
elif header == 'etag':
sibling.etag = value
elif header == 'link':
sibling.links = self._parse_links(value)
elif header == 'last-modified':
sibling.last_modified = mktime_tz(parsedate_tz(value))
elif header.startswith('x-riak-meta-'):
metakey = header.replace('x-riak-meta-', '')
sibling.usermeta[metakey] = value
elif header.startswith('x-riak-index-'):
field = header.replace('x-riak-index-', '')
reader = csv.reader([value], skipinitialspace=True)
for line in reader:
for token in line:
token = decode_index_value(field, token)
sibling.add_index(field, token)
elif header == 'x-riak-deleted':
sibling.exists = False
sibling.encoded_data = data
return sibling
|
python
|
def _parse_sibling(self, sibling, headers, data):
"""
Parses a single sibling out of a response.
"""
sibling.exists = True
# Parse the headers...
for header, value in headers:
header = header.lower()
if header == 'content-type':
sibling.content_type, sibling.charset = \
self._parse_content_type(value)
elif header == 'etag':
sibling.etag = value
elif header == 'link':
sibling.links = self._parse_links(value)
elif header == 'last-modified':
sibling.last_modified = mktime_tz(parsedate_tz(value))
elif header.startswith('x-riak-meta-'):
metakey = header.replace('x-riak-meta-', '')
sibling.usermeta[metakey] = value
elif header.startswith('x-riak-index-'):
field = header.replace('x-riak-index-', '')
reader = csv.reader([value], skipinitialspace=True)
for line in reader:
for token in line:
token = decode_index_value(field, token)
sibling.add_index(field, token)
elif header == 'x-riak-deleted':
sibling.exists = False
sibling.encoded_data = data
return sibling
|
[
"def",
"_parse_sibling",
"(",
"self",
",",
"sibling",
",",
"headers",
",",
"data",
")",
":",
"sibling",
".",
"exists",
"=",
"True",
"# Parse the headers...",
"for",
"header",
",",
"value",
"in",
"headers",
":",
"header",
"=",
"header",
".",
"lower",
"(",
")",
"if",
"header",
"==",
"'content-type'",
":",
"sibling",
".",
"content_type",
",",
"sibling",
".",
"charset",
"=",
"self",
".",
"_parse_content_type",
"(",
"value",
")",
"elif",
"header",
"==",
"'etag'",
":",
"sibling",
".",
"etag",
"=",
"value",
"elif",
"header",
"==",
"'link'",
":",
"sibling",
".",
"links",
"=",
"self",
".",
"_parse_links",
"(",
"value",
")",
"elif",
"header",
"==",
"'last-modified'",
":",
"sibling",
".",
"last_modified",
"=",
"mktime_tz",
"(",
"parsedate_tz",
"(",
"value",
")",
")",
"elif",
"header",
".",
"startswith",
"(",
"'x-riak-meta-'",
")",
":",
"metakey",
"=",
"header",
".",
"replace",
"(",
"'x-riak-meta-'",
",",
"''",
")",
"sibling",
".",
"usermeta",
"[",
"metakey",
"]",
"=",
"value",
"elif",
"header",
".",
"startswith",
"(",
"'x-riak-index-'",
")",
":",
"field",
"=",
"header",
".",
"replace",
"(",
"'x-riak-index-'",
",",
"''",
")",
"reader",
"=",
"csv",
".",
"reader",
"(",
"[",
"value",
"]",
",",
"skipinitialspace",
"=",
"True",
")",
"for",
"line",
"in",
"reader",
":",
"for",
"token",
"in",
"line",
":",
"token",
"=",
"decode_index_value",
"(",
"field",
",",
"token",
")",
"sibling",
".",
"add_index",
"(",
"field",
",",
"token",
")",
"elif",
"header",
"==",
"'x-riak-deleted'",
":",
"sibling",
".",
"exists",
"=",
"False",
"sibling",
".",
"encoded_data",
"=",
"data",
"return",
"sibling"
] |
Parses a single sibling out of a response.
|
[
"Parses",
"a",
"single",
"sibling",
"out",
"of",
"a",
"response",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/codecs/http.py#L106-L140
|
18,941
|
basho/riak-python-client
|
riak/codecs/http.py
|
HttpCodec._to_link_header
|
def _to_link_header(self, link):
"""
Convert the link tuple to a link header string. Used internally.
"""
try:
bucket, key, tag = link
except ValueError:
raise RiakError("Invalid link tuple %s" % link)
tag = tag if tag is not None else bucket
url = self.object_path(bucket, key)
header = '<%s>; riaktag="%s"' % (url, tag)
return header
|
python
|
def _to_link_header(self, link):
"""
Convert the link tuple to a link header string. Used internally.
"""
try:
bucket, key, tag = link
except ValueError:
raise RiakError("Invalid link tuple %s" % link)
tag = tag if tag is not None else bucket
url = self.object_path(bucket, key)
header = '<%s>; riaktag="%s"' % (url, tag)
return header
|
[
"def",
"_to_link_header",
"(",
"self",
",",
"link",
")",
":",
"try",
":",
"bucket",
",",
"key",
",",
"tag",
"=",
"link",
"except",
"ValueError",
":",
"raise",
"RiakError",
"(",
"\"Invalid link tuple %s\"",
"%",
"link",
")",
"tag",
"=",
"tag",
"if",
"tag",
"is",
"not",
"None",
"else",
"bucket",
"url",
"=",
"self",
".",
"object_path",
"(",
"bucket",
",",
"key",
")",
"header",
"=",
"'<%s>; riaktag=\"%s\"'",
"%",
"(",
"url",
",",
"tag",
")",
"return",
"header"
] |
Convert the link tuple to a link header string. Used internally.
|
[
"Convert",
"the",
"link",
"tuple",
"to",
"a",
"link",
"header",
"string",
".",
"Used",
"internally",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/codecs/http.py#L142-L153
|
18,942
|
basho/riak-python-client
|
riak/codecs/http.py
|
HttpCodec._normalize_json_search_response
|
def _normalize_json_search_response(self, json):
"""
Normalizes a JSON search response so that PB and HTTP have the
same return value
"""
result = {}
if 'facet_counts' in json:
result['facet_counts'] = json[u'facet_counts']
if 'grouped' in json:
result['grouped'] = json[u'grouped']
if 'stats' in json:
result['stats'] = json[u'stats']
if u'response' in json:
result['num_found'] = json[u'response'][u'numFound']
result['max_score'] = float(json[u'response'][u'maxScore'])
docs = []
for doc in json[u'response'][u'docs']:
resdoc = {}
if u'_yz_rk' in doc:
# Is this a Riak 2.0 result?
resdoc = doc
else:
# Riak Search 1.0 Legacy assumptions about format
resdoc[u'id'] = doc[u'id']
if u'fields' in doc:
for k, v in six.iteritems(doc[u'fields']):
resdoc[k] = v
docs.append(resdoc)
result['docs'] = docs
return result
|
python
|
def _normalize_json_search_response(self, json):
"""
Normalizes a JSON search response so that PB and HTTP have the
same return value
"""
result = {}
if 'facet_counts' in json:
result['facet_counts'] = json[u'facet_counts']
if 'grouped' in json:
result['grouped'] = json[u'grouped']
if 'stats' in json:
result['stats'] = json[u'stats']
if u'response' in json:
result['num_found'] = json[u'response'][u'numFound']
result['max_score'] = float(json[u'response'][u'maxScore'])
docs = []
for doc in json[u'response'][u'docs']:
resdoc = {}
if u'_yz_rk' in doc:
# Is this a Riak 2.0 result?
resdoc = doc
else:
# Riak Search 1.0 Legacy assumptions about format
resdoc[u'id'] = doc[u'id']
if u'fields' in doc:
for k, v in six.iteritems(doc[u'fields']):
resdoc[k] = v
docs.append(resdoc)
result['docs'] = docs
return result
|
[
"def",
"_normalize_json_search_response",
"(",
"self",
",",
"json",
")",
":",
"result",
"=",
"{",
"}",
"if",
"'facet_counts'",
"in",
"json",
":",
"result",
"[",
"'facet_counts'",
"]",
"=",
"json",
"[",
"u'facet_counts'",
"]",
"if",
"'grouped'",
"in",
"json",
":",
"result",
"[",
"'grouped'",
"]",
"=",
"json",
"[",
"u'grouped'",
"]",
"if",
"'stats'",
"in",
"json",
":",
"result",
"[",
"'stats'",
"]",
"=",
"json",
"[",
"u'stats'",
"]",
"if",
"u'response'",
"in",
"json",
":",
"result",
"[",
"'num_found'",
"]",
"=",
"json",
"[",
"u'response'",
"]",
"[",
"u'numFound'",
"]",
"result",
"[",
"'max_score'",
"]",
"=",
"float",
"(",
"json",
"[",
"u'response'",
"]",
"[",
"u'maxScore'",
"]",
")",
"docs",
"=",
"[",
"]",
"for",
"doc",
"in",
"json",
"[",
"u'response'",
"]",
"[",
"u'docs'",
"]",
":",
"resdoc",
"=",
"{",
"}",
"if",
"u'_yz_rk'",
"in",
"doc",
":",
"# Is this a Riak 2.0 result?",
"resdoc",
"=",
"doc",
"else",
":",
"# Riak Search 1.0 Legacy assumptions about format",
"resdoc",
"[",
"u'id'",
"]",
"=",
"doc",
"[",
"u'id'",
"]",
"if",
"u'fields'",
"in",
"doc",
":",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"doc",
"[",
"u'fields'",
"]",
")",
":",
"resdoc",
"[",
"k",
"]",
"=",
"v",
"docs",
".",
"append",
"(",
"resdoc",
")",
"result",
"[",
"'docs'",
"]",
"=",
"docs",
"return",
"result"
] |
Normalizes a JSON search response so that PB and HTTP have the
same return value
|
[
"Normalizes",
"a",
"JSON",
"search",
"response",
"so",
"that",
"PB",
"and",
"HTTP",
"have",
"the",
"same",
"return",
"value"
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/codecs/http.py#L223-L252
|
18,943
|
basho/riak-python-client
|
riak/codecs/http.py
|
HttpCodec._normalize_xml_search_response
|
def _normalize_xml_search_response(self, xml):
"""
Normalizes an XML search response so that PB and HTTP have the
same return value
"""
target = XMLSearchResult()
parser = ElementTree.XMLParser(target=target)
parser.feed(xml)
return parser.close()
|
python
|
def _normalize_xml_search_response(self, xml):
"""
Normalizes an XML search response so that PB and HTTP have the
same return value
"""
target = XMLSearchResult()
parser = ElementTree.XMLParser(target=target)
parser.feed(xml)
return parser.close()
|
[
"def",
"_normalize_xml_search_response",
"(",
"self",
",",
"xml",
")",
":",
"target",
"=",
"XMLSearchResult",
"(",
")",
"parser",
"=",
"ElementTree",
".",
"XMLParser",
"(",
"target",
"=",
"target",
")",
"parser",
".",
"feed",
"(",
"xml",
")",
"return",
"parser",
".",
"close",
"(",
")"
] |
Normalizes an XML search response so that PB and HTTP have the
same return value
|
[
"Normalizes",
"an",
"XML",
"search",
"response",
"so",
"that",
"PB",
"and",
"HTTP",
"have",
"the",
"same",
"return",
"value"
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/codecs/http.py#L254-L262
|
18,944
|
basho/riak-python-client
|
riak/transports/http/__init__.py
|
NoNagleHTTPConnection.connect
|
def connect(self):
"""
Set TCP_NODELAY on socket
"""
HTTPConnection.connect(self)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
python
|
def connect(self):
"""
Set TCP_NODELAY on socket
"""
HTTPConnection.connect(self)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
[
"def",
"connect",
"(",
"self",
")",
":",
"HTTPConnection",
".",
"connect",
"(",
"self",
")",
"self",
".",
"sock",
".",
"setsockopt",
"(",
"socket",
".",
"IPPROTO_TCP",
",",
"socket",
".",
"TCP_NODELAY",
",",
"1",
")"
] |
Set TCP_NODELAY on socket
|
[
"Set",
"TCP_NODELAY",
"on",
"socket"
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/http/__init__.py#L52-L57
|
18,945
|
basho/riak-python-client
|
riak/client/transport.py
|
RiakClientTransport._with_retries
|
def _with_retries(self, pool, fn):
"""
Performs the passed function with retries against the given pool.
:param pool: the connection pool to use
:type pool: Pool
:param fn: the function to pass a transport
:type fn: function
"""
skip_nodes = []
def _skip_bad_nodes(transport):
return transport._node not in skip_nodes
retry_count = self.retries - 1
first_try = True
current_try = 0
while True:
try:
with pool.transaction(
_filter=_skip_bad_nodes,
yield_resource=True) as resource:
transport = resource.object
try:
return fn(transport)
except (IOError, HTTPException, ConnectionClosed) as e:
resource.errored = True
if _is_retryable(e):
transport._node.error_rate.incr(1)
skip_nodes.append(transport._node)
if first_try:
continue
else:
raise BadResource(e)
else:
raise
except BadResource as e:
if current_try < retry_count:
resource.errored = True
current_try += 1
continue
else:
# Re-raise the inner exception
raise e.args[0]
finally:
first_try = False
|
python
|
def _with_retries(self, pool, fn):
"""
Performs the passed function with retries against the given pool.
:param pool: the connection pool to use
:type pool: Pool
:param fn: the function to pass a transport
:type fn: function
"""
skip_nodes = []
def _skip_bad_nodes(transport):
return transport._node not in skip_nodes
retry_count = self.retries - 1
first_try = True
current_try = 0
while True:
try:
with pool.transaction(
_filter=_skip_bad_nodes,
yield_resource=True) as resource:
transport = resource.object
try:
return fn(transport)
except (IOError, HTTPException, ConnectionClosed) as e:
resource.errored = True
if _is_retryable(e):
transport._node.error_rate.incr(1)
skip_nodes.append(transport._node)
if first_try:
continue
else:
raise BadResource(e)
else:
raise
except BadResource as e:
if current_try < retry_count:
resource.errored = True
current_try += 1
continue
else:
# Re-raise the inner exception
raise e.args[0]
finally:
first_try = False
|
[
"def",
"_with_retries",
"(",
"self",
",",
"pool",
",",
"fn",
")",
":",
"skip_nodes",
"=",
"[",
"]",
"def",
"_skip_bad_nodes",
"(",
"transport",
")",
":",
"return",
"transport",
".",
"_node",
"not",
"in",
"skip_nodes",
"retry_count",
"=",
"self",
".",
"retries",
"-",
"1",
"first_try",
"=",
"True",
"current_try",
"=",
"0",
"while",
"True",
":",
"try",
":",
"with",
"pool",
".",
"transaction",
"(",
"_filter",
"=",
"_skip_bad_nodes",
",",
"yield_resource",
"=",
"True",
")",
"as",
"resource",
":",
"transport",
"=",
"resource",
".",
"object",
"try",
":",
"return",
"fn",
"(",
"transport",
")",
"except",
"(",
"IOError",
",",
"HTTPException",
",",
"ConnectionClosed",
")",
"as",
"e",
":",
"resource",
".",
"errored",
"=",
"True",
"if",
"_is_retryable",
"(",
"e",
")",
":",
"transport",
".",
"_node",
".",
"error_rate",
".",
"incr",
"(",
"1",
")",
"skip_nodes",
".",
"append",
"(",
"transport",
".",
"_node",
")",
"if",
"first_try",
":",
"continue",
"else",
":",
"raise",
"BadResource",
"(",
"e",
")",
"else",
":",
"raise",
"except",
"BadResource",
"as",
"e",
":",
"if",
"current_try",
"<",
"retry_count",
":",
"resource",
".",
"errored",
"=",
"True",
"current_try",
"+=",
"1",
"continue",
"else",
":",
"# Re-raise the inner exception",
"raise",
"e",
".",
"args",
"[",
"0",
"]",
"finally",
":",
"first_try",
"=",
"False"
] |
Performs the passed function with retries against the given pool.
:param pool: the connection pool to use
:type pool: Pool
:param fn: the function to pass a transport
:type fn: function
|
[
"Performs",
"the",
"passed",
"function",
"with",
"retries",
"against",
"the",
"given",
"pool",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/transport.py#L143-L188
|
18,946
|
basho/riak-python-client
|
riak/client/transport.py
|
RiakClientTransport._choose_pool
|
def _choose_pool(self, protocol=None):
"""
Selects a connection pool according to the default protocol
and the passed one.
:param protocol: the protocol to use
:type protocol: string
:rtype: Pool
"""
if not protocol:
protocol = self.protocol
if protocol == 'http':
pool = self._http_pool
elif protocol == 'tcp' or protocol == 'pbc':
pool = self._tcp_pool
else:
raise ValueError("invalid protocol %s" % protocol)
if pool is None or self._closed:
# NB: GH-500, this can happen if client is closed
raise RuntimeError("Client is closed.")
return pool
|
python
|
def _choose_pool(self, protocol=None):
"""
Selects a connection pool according to the default protocol
and the passed one.
:param protocol: the protocol to use
:type protocol: string
:rtype: Pool
"""
if not protocol:
protocol = self.protocol
if protocol == 'http':
pool = self._http_pool
elif protocol == 'tcp' or protocol == 'pbc':
pool = self._tcp_pool
else:
raise ValueError("invalid protocol %s" % protocol)
if pool is None or self._closed:
# NB: GH-500, this can happen if client is closed
raise RuntimeError("Client is closed.")
return pool
|
[
"def",
"_choose_pool",
"(",
"self",
",",
"protocol",
"=",
"None",
")",
":",
"if",
"not",
"protocol",
":",
"protocol",
"=",
"self",
".",
"protocol",
"if",
"protocol",
"==",
"'http'",
":",
"pool",
"=",
"self",
".",
"_http_pool",
"elif",
"protocol",
"==",
"'tcp'",
"or",
"protocol",
"==",
"'pbc'",
":",
"pool",
"=",
"self",
".",
"_tcp_pool",
"else",
":",
"raise",
"ValueError",
"(",
"\"invalid protocol %s\"",
"%",
"protocol",
")",
"if",
"pool",
"is",
"None",
"or",
"self",
".",
"_closed",
":",
"# NB: GH-500, this can happen if client is closed",
"raise",
"RuntimeError",
"(",
"\"Client is closed.\"",
")",
"return",
"pool"
] |
Selects a connection pool according to the default protocol
and the passed one.
:param protocol: the protocol to use
:type protocol: string
:rtype: Pool
|
[
"Selects",
"a",
"connection",
"pool",
"according",
"to",
"the",
"default",
"protocol",
"and",
"the",
"passed",
"one",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/transport.py#L190-L210
|
18,947
|
basho/riak-python-client
|
riak/client/__init__.py
|
default_encoder
|
def default_encoder(obj):
"""
Default encoder for JSON datatypes, which returns UTF-8 encoded
json instead of the default bloated backslash u XXXX escaped ASCII strings.
"""
if isinstance(obj, bytes):
return json.dumps(bytes_to_str(obj),
ensure_ascii=False).encode("utf-8")
else:
return json.dumps(obj, ensure_ascii=False).encode("utf-8")
|
python
|
def default_encoder(obj):
"""
Default encoder for JSON datatypes, which returns UTF-8 encoded
json instead of the default bloated backslash u XXXX escaped ASCII strings.
"""
if isinstance(obj, bytes):
return json.dumps(bytes_to_str(obj),
ensure_ascii=False).encode("utf-8")
else:
return json.dumps(obj, ensure_ascii=False).encode("utf-8")
|
[
"def",
"default_encoder",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"bytes",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"bytes_to_str",
"(",
"obj",
")",
",",
"ensure_ascii",
"=",
"False",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
"else",
":",
"return",
"json",
".",
"dumps",
"(",
"obj",
",",
"ensure_ascii",
"=",
"False",
")",
".",
"encode",
"(",
"\"utf-8\"",
")"
] |
Default encoder for JSON datatypes, which returns UTF-8 encoded
json instead of the default bloated backslash u XXXX escaped ASCII strings.
|
[
"Default",
"encoder",
"for",
"JSON",
"datatypes",
"which",
"returns",
"UTF",
"-",
"8",
"encoded",
"json",
"instead",
"of",
"the",
"default",
"bloated",
"backslash",
"u",
"XXXX",
"escaped",
"ASCII",
"strings",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/__init__.py#L37-L46
|
18,948
|
basho/riak-python-client
|
riak/client/__init__.py
|
RiakClient.close
|
def close(self):
"""
Iterate through all of the connections and close each one.
"""
if not self._closed:
self._closed = True
self._stop_multi_pools()
if self._http_pool is not None:
self._http_pool.clear()
self._http_pool = None
if self._tcp_pool is not None:
self._tcp_pool.clear()
self._tcp_pool = None
|
python
|
def close(self):
"""
Iterate through all of the connections and close each one.
"""
if not self._closed:
self._closed = True
self._stop_multi_pools()
if self._http_pool is not None:
self._http_pool.clear()
self._http_pool = None
if self._tcp_pool is not None:
self._tcp_pool.clear()
self._tcp_pool = None
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_closed",
":",
"self",
".",
"_closed",
"=",
"True",
"self",
".",
"_stop_multi_pools",
"(",
")",
"if",
"self",
".",
"_http_pool",
"is",
"not",
"None",
":",
"self",
".",
"_http_pool",
".",
"clear",
"(",
")",
"self",
".",
"_http_pool",
"=",
"None",
"if",
"self",
".",
"_tcp_pool",
"is",
"not",
"None",
":",
"self",
".",
"_tcp_pool",
".",
"clear",
"(",
")",
"self",
".",
"_tcp_pool",
"=",
"None"
] |
Iterate through all of the connections and close each one.
|
[
"Iterate",
"through",
"all",
"of",
"the",
"connections",
"and",
"close",
"each",
"one",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/__init__.py#L319-L331
|
18,949
|
basho/riak-python-client
|
riak/client/__init__.py
|
RiakClient._create_credentials
|
def _create_credentials(self, n):
"""
Create security credentials, if necessary.
"""
if not n:
return n
elif isinstance(n, SecurityCreds):
return n
elif isinstance(n, dict):
return SecurityCreds(**n)
else:
raise TypeError("%s is not a valid security configuration"
% repr(n))
|
python
|
def _create_credentials(self, n):
"""
Create security credentials, if necessary.
"""
if not n:
return n
elif isinstance(n, SecurityCreds):
return n
elif isinstance(n, dict):
return SecurityCreds(**n)
else:
raise TypeError("%s is not a valid security configuration"
% repr(n))
|
[
"def",
"_create_credentials",
"(",
"self",
",",
"n",
")",
":",
"if",
"not",
"n",
":",
"return",
"n",
"elif",
"isinstance",
"(",
"n",
",",
"SecurityCreds",
")",
":",
"return",
"n",
"elif",
"isinstance",
"(",
"n",
",",
"dict",
")",
":",
"return",
"SecurityCreds",
"(",
"*",
"*",
"n",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"%s is not a valid security configuration\"",
"%",
"repr",
"(",
"n",
")",
")"
] |
Create security credentials, if necessary.
|
[
"Create",
"security",
"credentials",
"if",
"necessary",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/__init__.py#L355-L367
|
18,950
|
basho/riak-python-client
|
riak/transports/http/connection.py
|
HttpConnection._connect
|
def _connect(self):
"""
Use the appropriate connection class; optionally with security.
"""
timeout = None
if self._options is not None and 'timeout' in self._options:
timeout = self._options['timeout']
if self._client._credentials:
self._connection = self._connection_class(
host=self._node.host,
port=self._node.http_port,
credentials=self._client._credentials,
timeout=timeout)
else:
self._connection = self._connection_class(
host=self._node.host,
port=self._node.http_port,
timeout=timeout)
# Forces the population of stats and resources before any
# other requests are made.
self.server_version
|
python
|
def _connect(self):
"""
Use the appropriate connection class; optionally with security.
"""
timeout = None
if self._options is not None and 'timeout' in self._options:
timeout = self._options['timeout']
if self._client._credentials:
self._connection = self._connection_class(
host=self._node.host,
port=self._node.http_port,
credentials=self._client._credentials,
timeout=timeout)
else:
self._connection = self._connection_class(
host=self._node.host,
port=self._node.http_port,
timeout=timeout)
# Forces the population of stats and resources before any
# other requests are made.
self.server_version
|
[
"def",
"_connect",
"(",
"self",
")",
":",
"timeout",
"=",
"None",
"if",
"self",
".",
"_options",
"is",
"not",
"None",
"and",
"'timeout'",
"in",
"self",
".",
"_options",
":",
"timeout",
"=",
"self",
".",
"_options",
"[",
"'timeout'",
"]",
"if",
"self",
".",
"_client",
".",
"_credentials",
":",
"self",
".",
"_connection",
"=",
"self",
".",
"_connection_class",
"(",
"host",
"=",
"self",
".",
"_node",
".",
"host",
",",
"port",
"=",
"self",
".",
"_node",
".",
"http_port",
",",
"credentials",
"=",
"self",
".",
"_client",
".",
"_credentials",
",",
"timeout",
"=",
"timeout",
")",
"else",
":",
"self",
".",
"_connection",
"=",
"self",
".",
"_connection_class",
"(",
"host",
"=",
"self",
".",
"_node",
".",
"host",
",",
"port",
"=",
"self",
".",
"_node",
".",
"http_port",
",",
"timeout",
"=",
"timeout",
")",
"# Forces the population of stats and resources before any",
"# other requests are made.",
"self",
".",
"server_version"
] |
Use the appropriate connection class; optionally with security.
|
[
"Use",
"the",
"appropriate",
"connection",
"class",
";",
"optionally",
"with",
"security",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/http/connection.py#L65-L86
|
18,951
|
basho/riak-python-client
|
riak/transports/http/connection.py
|
HttpConnection._security_auth_headers
|
def _security_auth_headers(self, username, password, headers):
"""
Add in the requisite HTTP Authentication Headers
:param username: Riak Security Username
:type str
:param password: Riak Security Password
:type str
:param headers: Dictionary of headers
:type dict
"""
userColonPassword = username + ":" + password
b64UserColonPassword = base64. \
b64encode(str_to_bytes(userColonPassword)).decode("ascii")
headers['Authorization'] = 'Basic %s' % b64UserColonPassword
|
python
|
def _security_auth_headers(self, username, password, headers):
"""
Add in the requisite HTTP Authentication Headers
:param username: Riak Security Username
:type str
:param password: Riak Security Password
:type str
:param headers: Dictionary of headers
:type dict
"""
userColonPassword = username + ":" + password
b64UserColonPassword = base64. \
b64encode(str_to_bytes(userColonPassword)).decode("ascii")
headers['Authorization'] = 'Basic %s' % b64UserColonPassword
|
[
"def",
"_security_auth_headers",
"(",
"self",
",",
"username",
",",
"password",
",",
"headers",
")",
":",
"userColonPassword",
"=",
"username",
"+",
"\":\"",
"+",
"password",
"b64UserColonPassword",
"=",
"base64",
".",
"b64encode",
"(",
"str_to_bytes",
"(",
"userColonPassword",
")",
")",
".",
"decode",
"(",
"\"ascii\"",
")",
"headers",
"[",
"'Authorization'",
"]",
"=",
"'Basic %s'",
"%",
"b64UserColonPassword"
] |
Add in the requisite HTTP Authentication Headers
:param username: Riak Security Username
:type str
:param password: Riak Security Password
:type str
:param headers: Dictionary of headers
:type dict
|
[
"Add",
"in",
"the",
"requisite",
"HTTP",
"Authentication",
"Headers"
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/http/connection.py#L101-L115
|
18,952
|
basho/riak-python-client
|
riak/table.py
|
Table.query
|
def query(self, query, interpolations=None):
"""
Queries a timeseries table.
:param query: The timeseries query.
:type query: string
:rtype: :class:`TsObject <riak.ts_object.TsObject>`
"""
return self._client.ts_query(self, query, interpolations)
|
python
|
def query(self, query, interpolations=None):
"""
Queries a timeseries table.
:param query: The timeseries query.
:type query: string
:rtype: :class:`TsObject <riak.ts_object.TsObject>`
"""
return self._client.ts_query(self, query, interpolations)
|
[
"def",
"query",
"(",
"self",
",",
"query",
",",
"interpolations",
"=",
"None",
")",
":",
"return",
"self",
".",
"_client",
".",
"ts_query",
"(",
"self",
",",
"query",
",",
"interpolations",
")"
] |
Queries a timeseries table.
:param query: The timeseries query.
:type query: string
:rtype: :class:`TsObject <riak.ts_object.TsObject>`
|
[
"Queries",
"a",
"timeseries",
"table",
"."
] |
91de13a16607cdf553d1a194e762734e3bec4231
|
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/table.py#L94-L102
|
18,953
|
adamrehn/ue4cli
|
ue4cli/ConfigurationManager.py
|
ConfigurationManager.getConfigDirectory
|
def getConfigDirectory():
"""
Determines the platform-specific config directory location for ue4cli
"""
if platform.system() == 'Windows':
return os.path.join(os.environ['APPDATA'], 'ue4cli')
else:
return os.path.join(os.environ['HOME'], '.config', 'ue4cli')
|
python
|
def getConfigDirectory():
"""
Determines the platform-specific config directory location for ue4cli
"""
if platform.system() == 'Windows':
return os.path.join(os.environ['APPDATA'], 'ue4cli')
else:
return os.path.join(os.environ['HOME'], '.config', 'ue4cli')
|
[
"def",
"getConfigDirectory",
"(",
")",
":",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"'Windows'",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
"[",
"'APPDATA'",
"]",
",",
"'ue4cli'",
")",
"else",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
"[",
"'HOME'",
"]",
",",
"'.config'",
",",
"'ue4cli'",
")"
] |
Determines the platform-specific config directory location for ue4cli
|
[
"Determines",
"the",
"platform",
"-",
"specific",
"config",
"directory",
"location",
"for",
"ue4cli"
] |
f1c34502c96059e36757b7433da7e98760a75a6f
|
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/ConfigurationManager.py#L10-L17
|
18,954
|
adamrehn/ue4cli
|
ue4cli/ConfigurationManager.py
|
ConfigurationManager.setConfigKey
|
def setConfigKey(key, value):
"""
Sets the config data value for the specified dictionary key
"""
configFile = ConfigurationManager._configFile()
return JsonDataManager(configFile).setKey(key, value)
|
python
|
def setConfigKey(key, value):
"""
Sets the config data value for the specified dictionary key
"""
configFile = ConfigurationManager._configFile()
return JsonDataManager(configFile).setKey(key, value)
|
[
"def",
"setConfigKey",
"(",
"key",
",",
"value",
")",
":",
"configFile",
"=",
"ConfigurationManager",
".",
"_configFile",
"(",
")",
"return",
"JsonDataManager",
"(",
"configFile",
")",
".",
"setKey",
"(",
"key",
",",
"value",
")"
] |
Sets the config data value for the specified dictionary key
|
[
"Sets",
"the",
"config",
"data",
"value",
"for",
"the",
"specified",
"dictionary",
"key"
] |
f1c34502c96059e36757b7433da7e98760a75a6f
|
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/ConfigurationManager.py#L28-L33
|
18,955
|
adamrehn/ue4cli
|
ue4cli/CachedDataManager.py
|
CachedDataManager.clearCache
|
def clearCache():
"""
Clears any cached data we have stored about specific engine versions
"""
if os.path.exists(CachedDataManager._cacheDir()) == True:
shutil.rmtree(CachedDataManager._cacheDir())
|
python
|
def clearCache():
"""
Clears any cached data we have stored about specific engine versions
"""
if os.path.exists(CachedDataManager._cacheDir()) == True:
shutil.rmtree(CachedDataManager._cacheDir())
|
[
"def",
"clearCache",
"(",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"CachedDataManager",
".",
"_cacheDir",
"(",
")",
")",
"==",
"True",
":",
"shutil",
".",
"rmtree",
"(",
"CachedDataManager",
".",
"_cacheDir",
"(",
")",
")"
] |
Clears any cached data we have stored about specific engine versions
|
[
"Clears",
"any",
"cached",
"data",
"we",
"have",
"stored",
"about",
"specific",
"engine",
"versions"
] |
f1c34502c96059e36757b7433da7e98760a75a6f
|
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/CachedDataManager.py#L11-L16
|
18,956
|
adamrehn/ue4cli
|
ue4cli/CachedDataManager.py
|
CachedDataManager.getCachedDataKey
|
def getCachedDataKey(engineVersionHash, key):
"""
Retrieves the cached data value for the specified engine version hash and dictionary key
"""
cacheFile = CachedDataManager._cacheFileForHash(engineVersionHash)
return JsonDataManager(cacheFile).getKey(key)
|
python
|
def getCachedDataKey(engineVersionHash, key):
"""
Retrieves the cached data value for the specified engine version hash and dictionary key
"""
cacheFile = CachedDataManager._cacheFileForHash(engineVersionHash)
return JsonDataManager(cacheFile).getKey(key)
|
[
"def",
"getCachedDataKey",
"(",
"engineVersionHash",
",",
"key",
")",
":",
"cacheFile",
"=",
"CachedDataManager",
".",
"_cacheFileForHash",
"(",
"engineVersionHash",
")",
"return",
"JsonDataManager",
"(",
"cacheFile",
")",
".",
"getKey",
"(",
"key",
")"
] |
Retrieves the cached data value for the specified engine version hash and dictionary key
|
[
"Retrieves",
"the",
"cached",
"data",
"value",
"for",
"the",
"specified",
"engine",
"version",
"hash",
"and",
"dictionary",
"key"
] |
f1c34502c96059e36757b7433da7e98760a75a6f
|
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/CachedDataManager.py#L19-L24
|
18,957
|
adamrehn/ue4cli
|
ue4cli/CachedDataManager.py
|
CachedDataManager.setCachedDataKey
|
def setCachedDataKey(engineVersionHash, key, value):
"""
Sets the cached data value for the specified engine version hash and dictionary key
"""
cacheFile = CachedDataManager._cacheFileForHash(engineVersionHash)
return JsonDataManager(cacheFile).setKey(key, value)
|
python
|
def setCachedDataKey(engineVersionHash, key, value):
"""
Sets the cached data value for the specified engine version hash and dictionary key
"""
cacheFile = CachedDataManager._cacheFileForHash(engineVersionHash)
return JsonDataManager(cacheFile).setKey(key, value)
|
[
"def",
"setCachedDataKey",
"(",
"engineVersionHash",
",",
"key",
",",
"value",
")",
":",
"cacheFile",
"=",
"CachedDataManager",
".",
"_cacheFileForHash",
"(",
"engineVersionHash",
")",
"return",
"JsonDataManager",
"(",
"cacheFile",
")",
".",
"setKey",
"(",
"key",
",",
"value",
")"
] |
Sets the cached data value for the specified engine version hash and dictionary key
|
[
"Sets",
"the",
"cached",
"data",
"value",
"for",
"the",
"specified",
"engine",
"version",
"hash",
"and",
"dictionary",
"key"
] |
f1c34502c96059e36757b7433da7e98760a75a6f
|
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/CachedDataManager.py#L27-L32
|
18,958
|
adamrehn/ue4cli
|
ue4cli/Utility.py
|
Utility.writeFile
|
def writeFile(filename, data):
"""
Writes data to a file
"""
with open(filename, 'wb') as f:
f.write(data.encode('utf-8'))
|
python
|
def writeFile(filename, data):
"""
Writes data to a file
"""
with open(filename, 'wb') as f:
f.write(data.encode('utf-8'))
|
[
"def",
"writeFile",
"(",
"filename",
",",
"data",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"data",
".",
"encode",
"(",
"'utf-8'",
")",
")"
] |
Writes data to a file
|
[
"Writes",
"data",
"to",
"a",
"file"
] |
f1c34502c96059e36757b7433da7e98760a75a6f
|
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/Utility.py#L34-L39
|
18,959
|
adamrehn/ue4cli
|
ue4cli/Utility.py
|
Utility.patchFile
|
def patchFile(filename, replacements):
"""
Applies the supplied list of replacements to a file
"""
patched = Utility.readFile(filename)
# Perform each of the replacements in the supplied dictionary
for key in replacements:
patched = patched.replace(key, replacements[key])
Utility.writeFile(filename, patched)
|
python
|
def patchFile(filename, replacements):
"""
Applies the supplied list of replacements to a file
"""
patched = Utility.readFile(filename)
# Perform each of the replacements in the supplied dictionary
for key in replacements:
patched = patched.replace(key, replacements[key])
Utility.writeFile(filename, patched)
|
[
"def",
"patchFile",
"(",
"filename",
",",
"replacements",
")",
":",
"patched",
"=",
"Utility",
".",
"readFile",
"(",
"filename",
")",
"# Perform each of the replacements in the supplied dictionary",
"for",
"key",
"in",
"replacements",
":",
"patched",
"=",
"patched",
".",
"replace",
"(",
"key",
",",
"replacements",
"[",
"key",
"]",
")",
"Utility",
".",
"writeFile",
"(",
"filename",
",",
"patched",
")"
] |
Applies the supplied list of replacements to a file
|
[
"Applies",
"the",
"supplied",
"list",
"of",
"replacements",
"to",
"a",
"file"
] |
f1c34502c96059e36757b7433da7e98760a75a6f
|
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/Utility.py#L42-L52
|
18,960
|
adamrehn/ue4cli
|
ue4cli/Utility.py
|
Utility.escapePathForShell
|
def escapePathForShell(path):
"""
Escapes a filesystem path for use as a command-line argument
"""
if platform.system() == 'Windows':
return '"{}"'.format(path.replace('"', '""'))
else:
return shellescape.quote(path)
|
python
|
def escapePathForShell(path):
"""
Escapes a filesystem path for use as a command-line argument
"""
if platform.system() == 'Windows':
return '"{}"'.format(path.replace('"', '""'))
else:
return shellescape.quote(path)
|
[
"def",
"escapePathForShell",
"(",
"path",
")",
":",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"'Windows'",
":",
"return",
"'\"{}\"'",
".",
"format",
"(",
"path",
".",
"replace",
"(",
"'\"'",
",",
"'\"\"'",
")",
")",
"else",
":",
"return",
"shellescape",
".",
"quote",
"(",
"path",
")"
] |
Escapes a filesystem path for use as a command-line argument
|
[
"Escapes",
"a",
"filesystem",
"path",
"for",
"use",
"as",
"a",
"command",
"-",
"line",
"argument"
] |
f1c34502c96059e36757b7433da7e98760a75a6f
|
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/Utility.py#L62-L69
|
18,961
|
adamrehn/ue4cli
|
ue4cli/Utility.py
|
Utility.join
|
def join(delim, items, quotes=False):
"""
Joins the supplied list of strings after removing any empty strings from the list
"""
transform = lambda s: s
if quotes == True:
transform = lambda s: s if ' ' not in s else '"{}"'.format(s)
stripped = list([transform(i) for i in items if len(i) > 0])
if len(stripped) > 0:
return delim.join(stripped)
return ''
|
python
|
def join(delim, items, quotes=False):
"""
Joins the supplied list of strings after removing any empty strings from the list
"""
transform = lambda s: s
if quotes == True:
transform = lambda s: s if ' ' not in s else '"{}"'.format(s)
stripped = list([transform(i) for i in items if len(i) > 0])
if len(stripped) > 0:
return delim.join(stripped)
return ''
|
[
"def",
"join",
"(",
"delim",
",",
"items",
",",
"quotes",
"=",
"False",
")",
":",
"transform",
"=",
"lambda",
"s",
":",
"s",
"if",
"quotes",
"==",
"True",
":",
"transform",
"=",
"lambda",
"s",
":",
"s",
"if",
"' '",
"not",
"in",
"s",
"else",
"'\"{}\"'",
".",
"format",
"(",
"s",
")",
"stripped",
"=",
"list",
"(",
"[",
"transform",
"(",
"i",
")",
"for",
"i",
"in",
"items",
"if",
"len",
"(",
"i",
")",
">",
"0",
"]",
")",
"if",
"len",
"(",
"stripped",
")",
">",
"0",
":",
"return",
"delim",
".",
"join",
"(",
"stripped",
")",
"return",
"''"
] |
Joins the supplied list of strings after removing any empty strings from the list
|
[
"Joins",
"the",
"supplied",
"list",
"of",
"strings",
"after",
"removing",
"any",
"empty",
"strings",
"from",
"the",
"list"
] |
f1c34502c96059e36757b7433da7e98760a75a6f
|
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/Utility.py#L72-L83
|
18,962
|
ageitgey/face_recognition
|
examples/face_recognition_knn.py
|
train
|
def train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False):
"""
Trains a k-nearest neighbors classifier for face recognition.
:param train_dir: directory that contains a sub-directory for each known person, with its name.
(View in source code to see train_dir example tree structure)
Structure:
<train_dir>/
├── <person1>/
│ ├── <somename1>.jpeg
│ ├── <somename2>.jpeg
│ ├── ...
├── <person2>/
│ ├── <somename1>.jpeg
│ └── <somename2>.jpeg
└── ...
:param model_save_path: (optional) path to save model on disk
:param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified
:param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
:param verbose: verbosity of training
:return: returns knn classifier that was trained on the given data.
"""
X = []
y = []
# Loop through each person in the training set
for class_dir in os.listdir(train_dir):
if not os.path.isdir(os.path.join(train_dir, class_dir)):
continue
# Loop through each training image for the current person
for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)):
image = face_recognition.load_image_file(img_path)
face_bounding_boxes = face_recognition.face_locations(image)
if len(face_bounding_boxes) != 1:
# If there are no people (or too many people) in a training image, skip the image.
if verbose:
print("Image {} not suitable for training: {}".format(img_path, "Didn't find a face" if len(face_bounding_boxes) < 1 else "Found more than one face"))
else:
# Add face encoding for current image to the training set
X.append(face_recognition.face_encodings(image, known_face_locations=face_bounding_boxes)[0])
y.append(class_dir)
# Determine how many neighbors to use for weighting in the KNN classifier
if n_neighbors is None:
n_neighbors = int(round(math.sqrt(len(X))))
if verbose:
print("Chose n_neighbors automatically:", n_neighbors)
# Create and train the KNN classifier
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')
knn_clf.fit(X, y)
# Save the trained KNN classifier
if model_save_path is not None:
with open(model_save_path, 'wb') as f:
pickle.dump(knn_clf, f)
return knn_clf
|
python
|
def train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False):
"""
Trains a k-nearest neighbors classifier for face recognition.
:param train_dir: directory that contains a sub-directory for each known person, with its name.
(View in source code to see train_dir example tree structure)
Structure:
<train_dir>/
├── <person1>/
│ ├── <somename1>.jpeg
│ ├── <somename2>.jpeg
│ ├── ...
├── <person2>/
│ ├── <somename1>.jpeg
│ └── <somename2>.jpeg
└── ...
:param model_save_path: (optional) path to save model on disk
:param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified
:param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
:param verbose: verbosity of training
:return: returns knn classifier that was trained on the given data.
"""
X = []
y = []
# Loop through each person in the training set
for class_dir in os.listdir(train_dir):
if not os.path.isdir(os.path.join(train_dir, class_dir)):
continue
# Loop through each training image for the current person
for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)):
image = face_recognition.load_image_file(img_path)
face_bounding_boxes = face_recognition.face_locations(image)
if len(face_bounding_boxes) != 1:
# If there are no people (or too many people) in a training image, skip the image.
if verbose:
print("Image {} not suitable for training: {}".format(img_path, "Didn't find a face" if len(face_bounding_boxes) < 1 else "Found more than one face"))
else:
# Add face encoding for current image to the training set
X.append(face_recognition.face_encodings(image, known_face_locations=face_bounding_boxes)[0])
y.append(class_dir)
# Determine how many neighbors to use for weighting in the KNN classifier
if n_neighbors is None:
n_neighbors = int(round(math.sqrt(len(X))))
if verbose:
print("Chose n_neighbors automatically:", n_neighbors)
# Create and train the KNN classifier
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')
knn_clf.fit(X, y)
# Save the trained KNN classifier
if model_save_path is not None:
with open(model_save_path, 'wb') as f:
pickle.dump(knn_clf, f)
return knn_clf
|
[
"def",
"train",
"(",
"train_dir",
",",
"model_save_path",
"=",
"None",
",",
"n_neighbors",
"=",
"None",
",",
"knn_algo",
"=",
"'ball_tree'",
",",
"verbose",
"=",
"False",
")",
":",
"X",
"=",
"[",
"]",
"y",
"=",
"[",
"]",
"# Loop through each person in the training set",
"for",
"class_dir",
"in",
"os",
".",
"listdir",
"(",
"train_dir",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"train_dir",
",",
"class_dir",
")",
")",
":",
"continue",
"# Loop through each training image for the current person",
"for",
"img_path",
"in",
"image_files_in_folder",
"(",
"os",
".",
"path",
".",
"join",
"(",
"train_dir",
",",
"class_dir",
")",
")",
":",
"image",
"=",
"face_recognition",
".",
"load_image_file",
"(",
"img_path",
")",
"face_bounding_boxes",
"=",
"face_recognition",
".",
"face_locations",
"(",
"image",
")",
"if",
"len",
"(",
"face_bounding_boxes",
")",
"!=",
"1",
":",
"# If there are no people (or too many people) in a training image, skip the image.",
"if",
"verbose",
":",
"print",
"(",
"\"Image {} not suitable for training: {}\"",
".",
"format",
"(",
"img_path",
",",
"\"Didn't find a face\"",
"if",
"len",
"(",
"face_bounding_boxes",
")",
"<",
"1",
"else",
"\"Found more than one face\"",
")",
")",
"else",
":",
"# Add face encoding for current image to the training set",
"X",
".",
"append",
"(",
"face_recognition",
".",
"face_encodings",
"(",
"image",
",",
"known_face_locations",
"=",
"face_bounding_boxes",
")",
"[",
"0",
"]",
")",
"y",
".",
"append",
"(",
"class_dir",
")",
"# Determine how many neighbors to use for weighting in the KNN classifier",
"if",
"n_neighbors",
"is",
"None",
":",
"n_neighbors",
"=",
"int",
"(",
"round",
"(",
"math",
".",
"sqrt",
"(",
"len",
"(",
"X",
")",
")",
")",
")",
"if",
"verbose",
":",
"print",
"(",
"\"Chose n_neighbors automatically:\"",
",",
"n_neighbors",
")",
"# Create and train the KNN classifier",
"knn_clf",
"=",
"neighbors",
".",
"KNeighborsClassifier",
"(",
"n_neighbors",
"=",
"n_neighbors",
",",
"algorithm",
"=",
"knn_algo",
",",
"weights",
"=",
"'distance'",
")",
"knn_clf",
".",
"fit",
"(",
"X",
",",
"y",
")",
"# Save the trained KNN classifier",
"if",
"model_save_path",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"model_save_path",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"knn_clf",
",",
"f",
")",
"return",
"knn_clf"
] |
Trains a k-nearest neighbors classifier for face recognition.
:param train_dir: directory that contains a sub-directory for each known person, with its name.
(View in source code to see train_dir example tree structure)
Structure:
<train_dir>/
├── <person1>/
│ ├── <somename1>.jpeg
│ ├── <somename2>.jpeg
│ ├── ...
├── <person2>/
│ ├── <somename1>.jpeg
│ └── <somename2>.jpeg
└── ...
:param model_save_path: (optional) path to save model on disk
:param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified
:param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
:param verbose: verbosity of training
:return: returns knn classifier that was trained on the given data.
|
[
"Trains",
"a",
"k",
"-",
"nearest",
"neighbors",
"classifier",
"for",
"face",
"recognition",
"."
] |
c96b010c02f15e8eeb0f71308c641179ac1f19bb
|
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/examples/face_recognition_knn.py#L46-L108
|
18,963
|
ageitgey/face_recognition
|
examples/face_recognition_knn.py
|
predict
|
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6):
"""
Recognizes faces in given image using a trained KNN classifier
:param X_img_path: path to image to be recognized
:param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
:param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
:param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance
of mis-classifying an unknown person as a known one.
:return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
For faces of unrecognized persons, the name 'unknown' will be returned.
"""
if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:
raise Exception("Invalid image path: {}".format(X_img_path))
if knn_clf is None and model_path is None:
raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")
# Load a trained KNN model (if one was passed in)
if knn_clf is None:
with open(model_path, 'rb') as f:
knn_clf = pickle.load(f)
# Load image file and find face locations
X_img = face_recognition.load_image_file(X_img_path)
X_face_locations = face_recognition.face_locations(X_img)
# If no faces are found in the image, return an empty result.
if len(X_face_locations) == 0:
return []
# Find encodings for faces in the test iamge
faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)
# Use the KNN model to find the best matches for the test face
closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]
# Predict classes and remove classifications that aren't within the threshold
return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
|
python
|
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6):
"""
Recognizes faces in given image using a trained KNN classifier
:param X_img_path: path to image to be recognized
:param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
:param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
:param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance
of mis-classifying an unknown person as a known one.
:return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
For faces of unrecognized persons, the name 'unknown' will be returned.
"""
if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:
raise Exception("Invalid image path: {}".format(X_img_path))
if knn_clf is None and model_path is None:
raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")
# Load a trained KNN model (if one was passed in)
if knn_clf is None:
with open(model_path, 'rb') as f:
knn_clf = pickle.load(f)
# Load image file and find face locations
X_img = face_recognition.load_image_file(X_img_path)
X_face_locations = face_recognition.face_locations(X_img)
# If no faces are found in the image, return an empty result.
if len(X_face_locations) == 0:
return []
# Find encodings for faces in the test iamge
faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)
# Use the KNN model to find the best matches for the test face
closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]
# Predict classes and remove classifications that aren't within the threshold
return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
|
[
"def",
"predict",
"(",
"X_img_path",
",",
"knn_clf",
"=",
"None",
",",
"model_path",
"=",
"None",
",",
"distance_threshold",
"=",
"0.6",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"X_img_path",
")",
"or",
"os",
".",
"path",
".",
"splitext",
"(",
"X_img_path",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
"not",
"in",
"ALLOWED_EXTENSIONS",
":",
"raise",
"Exception",
"(",
"\"Invalid image path: {}\"",
".",
"format",
"(",
"X_img_path",
")",
")",
"if",
"knn_clf",
"is",
"None",
"and",
"model_path",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Must supply knn classifier either thourgh knn_clf or model_path\"",
")",
"# Load a trained KNN model (if one was passed in)",
"if",
"knn_clf",
"is",
"None",
":",
"with",
"open",
"(",
"model_path",
",",
"'rb'",
")",
"as",
"f",
":",
"knn_clf",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"# Load image file and find face locations",
"X_img",
"=",
"face_recognition",
".",
"load_image_file",
"(",
"X_img_path",
")",
"X_face_locations",
"=",
"face_recognition",
".",
"face_locations",
"(",
"X_img",
")",
"# If no faces are found in the image, return an empty result.",
"if",
"len",
"(",
"X_face_locations",
")",
"==",
"0",
":",
"return",
"[",
"]",
"# Find encodings for faces in the test iamge",
"faces_encodings",
"=",
"face_recognition",
".",
"face_encodings",
"(",
"X_img",
",",
"known_face_locations",
"=",
"X_face_locations",
")",
"# Use the KNN model to find the best matches for the test face",
"closest_distances",
"=",
"knn_clf",
".",
"kneighbors",
"(",
"faces_encodings",
",",
"n_neighbors",
"=",
"1",
")",
"are_matches",
"=",
"[",
"closest_distances",
"[",
"0",
"]",
"[",
"i",
"]",
"[",
"0",
"]",
"<=",
"distance_threshold",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"X_face_locations",
")",
")",
"]",
"# Predict classes and remove classifications that aren't within the threshold",
"return",
"[",
"(",
"pred",
",",
"loc",
")",
"if",
"rec",
"else",
"(",
"\"unknown\"",
",",
"loc",
")",
"for",
"pred",
",",
"loc",
",",
"rec",
"in",
"zip",
"(",
"knn_clf",
".",
"predict",
"(",
"faces_encodings",
")",
",",
"X_face_locations",
",",
"are_matches",
")",
"]"
] |
Recognizes faces in given image using a trained KNN classifier
:param X_img_path: path to image to be recognized
:param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
:param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
:param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance
of mis-classifying an unknown person as a known one.
:return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
For faces of unrecognized persons, the name 'unknown' will be returned.
|
[
"Recognizes",
"faces",
"in",
"given",
"image",
"using",
"a",
"trained",
"KNN",
"classifier"
] |
c96b010c02f15e8eeb0f71308c641179ac1f19bb
|
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/examples/face_recognition_knn.py#L111-L150
|
18,964
|
ageitgey/face_recognition
|
examples/face_recognition_knn.py
|
show_prediction_labels_on_image
|
def show_prediction_labels_on_image(img_path, predictions):
"""
Shows the face recognition results visually.
:param img_path: path to image to be recognized
:param predictions: results of the predict function
:return:
"""
pil_image = Image.open(img_path).convert("RGB")
draw = ImageDraw.Draw(pil_image)
for name, (top, right, bottom, left) in predictions:
# Draw a box around the face using the Pillow module
draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))
# There's a bug in Pillow where it blows up with non-UTF-8 text
# when using the default bitmap font
name = name.encode("UTF-8")
# Draw a label with a name below the face
text_width, text_height = draw.textsize(name)
draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))
draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))
# Remove the drawing library from memory as per the Pillow docs
del draw
# Display the resulting image
pil_image.show()
|
python
|
def show_prediction_labels_on_image(img_path, predictions):
"""
Shows the face recognition results visually.
:param img_path: path to image to be recognized
:param predictions: results of the predict function
:return:
"""
pil_image = Image.open(img_path).convert("RGB")
draw = ImageDraw.Draw(pil_image)
for name, (top, right, bottom, left) in predictions:
# Draw a box around the face using the Pillow module
draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))
# There's a bug in Pillow where it blows up with non-UTF-8 text
# when using the default bitmap font
name = name.encode("UTF-8")
# Draw a label with a name below the face
text_width, text_height = draw.textsize(name)
draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))
draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))
# Remove the drawing library from memory as per the Pillow docs
del draw
# Display the resulting image
pil_image.show()
|
[
"def",
"show_prediction_labels_on_image",
"(",
"img_path",
",",
"predictions",
")",
":",
"pil_image",
"=",
"Image",
".",
"open",
"(",
"img_path",
")",
".",
"convert",
"(",
"\"RGB\"",
")",
"draw",
"=",
"ImageDraw",
".",
"Draw",
"(",
"pil_image",
")",
"for",
"name",
",",
"(",
"top",
",",
"right",
",",
"bottom",
",",
"left",
")",
"in",
"predictions",
":",
"# Draw a box around the face using the Pillow module",
"draw",
".",
"rectangle",
"(",
"(",
"(",
"left",
",",
"top",
")",
",",
"(",
"right",
",",
"bottom",
")",
")",
",",
"outline",
"=",
"(",
"0",
",",
"0",
",",
"255",
")",
")",
"# There's a bug in Pillow where it blows up with non-UTF-8 text",
"# when using the default bitmap font",
"name",
"=",
"name",
".",
"encode",
"(",
"\"UTF-8\"",
")",
"# Draw a label with a name below the face",
"text_width",
",",
"text_height",
"=",
"draw",
".",
"textsize",
"(",
"name",
")",
"draw",
".",
"rectangle",
"(",
"(",
"(",
"left",
",",
"bottom",
"-",
"text_height",
"-",
"10",
")",
",",
"(",
"right",
",",
"bottom",
")",
")",
",",
"fill",
"=",
"(",
"0",
",",
"0",
",",
"255",
")",
",",
"outline",
"=",
"(",
"0",
",",
"0",
",",
"255",
")",
")",
"draw",
".",
"text",
"(",
"(",
"left",
"+",
"6",
",",
"bottom",
"-",
"text_height",
"-",
"5",
")",
",",
"name",
",",
"fill",
"=",
"(",
"255",
",",
"255",
",",
"255",
",",
"255",
")",
")",
"# Remove the drawing library from memory as per the Pillow docs",
"del",
"draw",
"# Display the resulting image",
"pil_image",
".",
"show",
"(",
")"
] |
Shows the face recognition results visually.
:param img_path: path to image to be recognized
:param predictions: results of the predict function
:return:
|
[
"Shows",
"the",
"face",
"recognition",
"results",
"visually",
"."
] |
c96b010c02f15e8eeb0f71308c641179ac1f19bb
|
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/examples/face_recognition_knn.py#L153-L181
|
18,965
|
ageitgey/face_recognition
|
face_recognition/api.py
|
face_distance
|
def face_distance(face_encodings, face_to_compare):
"""
Given a list of face encodings, compare them to a known face encoding and get a euclidean distance
for each comparison face. The distance tells you how similar the faces are.
:param faces: List of face encodings to compare
:param face_to_compare: A face encoding to compare against
:return: A numpy ndarray with the distance for each face in the same order as the 'faces' array
"""
if len(face_encodings) == 0:
return np.empty((0))
return np.linalg.norm(face_encodings - face_to_compare, axis=1)
|
python
|
def face_distance(face_encodings, face_to_compare):
"""
Given a list of face encodings, compare them to a known face encoding and get a euclidean distance
for each comparison face. The distance tells you how similar the faces are.
:param faces: List of face encodings to compare
:param face_to_compare: A face encoding to compare against
:return: A numpy ndarray with the distance for each face in the same order as the 'faces' array
"""
if len(face_encodings) == 0:
return np.empty((0))
return np.linalg.norm(face_encodings - face_to_compare, axis=1)
|
[
"def",
"face_distance",
"(",
"face_encodings",
",",
"face_to_compare",
")",
":",
"if",
"len",
"(",
"face_encodings",
")",
"==",
"0",
":",
"return",
"np",
".",
"empty",
"(",
"(",
"0",
")",
")",
"return",
"np",
".",
"linalg",
".",
"norm",
"(",
"face_encodings",
"-",
"face_to_compare",
",",
"axis",
"=",
"1",
")"
] |
Given a list of face encodings, compare them to a known face encoding and get a euclidean distance
for each comparison face. The distance tells you how similar the faces are.
:param faces: List of face encodings to compare
:param face_to_compare: A face encoding to compare against
:return: A numpy ndarray with the distance for each face in the same order as the 'faces' array
|
[
"Given",
"a",
"list",
"of",
"face",
"encodings",
"compare",
"them",
"to",
"a",
"known",
"face",
"encoding",
"and",
"get",
"a",
"euclidean",
"distance",
"for",
"each",
"comparison",
"face",
".",
"The",
"distance",
"tells",
"you",
"how",
"similar",
"the",
"faces",
"are",
"."
] |
c96b010c02f15e8eeb0f71308c641179ac1f19bb
|
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L63-L75
|
18,966
|
ageitgey/face_recognition
|
face_recognition/api.py
|
batch_face_locations
|
def batch_face_locations(images, number_of_times_to_upsample=1, batch_size=128):
"""
Returns an 2d array of bounding boxes of human faces in a image using the cnn face detector
If you are using a GPU, this can give you much faster results since the GPU
can process batches of images at once. If you aren't using a GPU, you don't need this function.
:param img: A list of images (each as a numpy array)
:param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces.
:param batch_size: How many images to include in each GPU processing batch.
:return: A list of tuples of found face locations in css (top, right, bottom, left) order
"""
def convert_cnn_detections_to_css(detections):
return [_trim_css_to_bounds(_rect_to_css(face.rect), images[0].shape) for face in detections]
raw_detections_batched = _raw_face_locations_batched(images, number_of_times_to_upsample, batch_size)
return list(map(convert_cnn_detections_to_css, raw_detections_batched))
|
python
|
def batch_face_locations(images, number_of_times_to_upsample=1, batch_size=128):
"""
Returns an 2d array of bounding boxes of human faces in a image using the cnn face detector
If you are using a GPU, this can give you much faster results since the GPU
can process batches of images at once. If you aren't using a GPU, you don't need this function.
:param img: A list of images (each as a numpy array)
:param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces.
:param batch_size: How many images to include in each GPU processing batch.
:return: A list of tuples of found face locations in css (top, right, bottom, left) order
"""
def convert_cnn_detections_to_css(detections):
return [_trim_css_to_bounds(_rect_to_css(face.rect), images[0].shape) for face in detections]
raw_detections_batched = _raw_face_locations_batched(images, number_of_times_to_upsample, batch_size)
return list(map(convert_cnn_detections_to_css, raw_detections_batched))
|
[
"def",
"batch_face_locations",
"(",
"images",
",",
"number_of_times_to_upsample",
"=",
"1",
",",
"batch_size",
"=",
"128",
")",
":",
"def",
"convert_cnn_detections_to_css",
"(",
"detections",
")",
":",
"return",
"[",
"_trim_css_to_bounds",
"(",
"_rect_to_css",
"(",
"face",
".",
"rect",
")",
",",
"images",
"[",
"0",
"]",
".",
"shape",
")",
"for",
"face",
"in",
"detections",
"]",
"raw_detections_batched",
"=",
"_raw_face_locations_batched",
"(",
"images",
",",
"number_of_times_to_upsample",
",",
"batch_size",
")",
"return",
"list",
"(",
"map",
"(",
"convert_cnn_detections_to_css",
",",
"raw_detections_batched",
")",
")"
] |
Returns an 2d array of bounding boxes of human faces in a image using the cnn face detector
If you are using a GPU, this can give you much faster results since the GPU
can process batches of images at once. If you aren't using a GPU, you don't need this function.
:param img: A list of images (each as a numpy array)
:param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces.
:param batch_size: How many images to include in each GPU processing batch.
:return: A list of tuples of found face locations in css (top, right, bottom, left) order
|
[
"Returns",
"an",
"2d",
"array",
"of",
"bounding",
"boxes",
"of",
"human",
"faces",
"in",
"a",
"image",
"using",
"the",
"cnn",
"face",
"detector",
"If",
"you",
"are",
"using",
"a",
"GPU",
"this",
"can",
"give",
"you",
"much",
"faster",
"results",
"since",
"the",
"GPU",
"can",
"process",
"batches",
"of",
"images",
"at",
"once",
".",
"If",
"you",
"aren",
"t",
"using",
"a",
"GPU",
"you",
"don",
"t",
"need",
"this",
"function",
"."
] |
c96b010c02f15e8eeb0f71308c641179ac1f19bb
|
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L135-L151
|
18,967
|
ageitgey/face_recognition
|
face_recognition/api.py
|
face_encodings
|
def face_encodings(face_image, known_face_locations=None, num_jitters=1):
"""
Given an image, return the 128-dimension face encoding for each face in the image.
:param face_image: The image that contains one or more faces
:param known_face_locations: Optional - the bounding boxes of each face if you already know them.
:param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower)
:return: A list of 128-dimensional face encodings (one for each face in the image)
"""
raw_landmarks = _raw_face_landmarks(face_image, known_face_locations, model="small")
return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
|
python
|
def face_encodings(face_image, known_face_locations=None, num_jitters=1):
"""
Given an image, return the 128-dimension face encoding for each face in the image.
:param face_image: The image that contains one or more faces
:param known_face_locations: Optional - the bounding boxes of each face if you already know them.
:param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower)
:return: A list of 128-dimensional face encodings (one for each face in the image)
"""
raw_landmarks = _raw_face_landmarks(face_image, known_face_locations, model="small")
return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
|
[
"def",
"face_encodings",
"(",
"face_image",
",",
"known_face_locations",
"=",
"None",
",",
"num_jitters",
"=",
"1",
")",
":",
"raw_landmarks",
"=",
"_raw_face_landmarks",
"(",
"face_image",
",",
"known_face_locations",
",",
"model",
"=",
"\"small\"",
")",
"return",
"[",
"np",
".",
"array",
"(",
"face_encoder",
".",
"compute_face_descriptor",
"(",
"face_image",
",",
"raw_landmark_set",
",",
"num_jitters",
")",
")",
"for",
"raw_landmark_set",
"in",
"raw_landmarks",
"]"
] |
Given an image, return the 128-dimension face encoding for each face in the image.
:param face_image: The image that contains one or more faces
:param known_face_locations: Optional - the bounding boxes of each face if you already know them.
:param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower)
:return: A list of 128-dimensional face encodings (one for each face in the image)
|
[
"Given",
"an",
"image",
"return",
"the",
"128",
"-",
"dimension",
"face",
"encoding",
"for",
"each",
"face",
"in",
"the",
"image",
"."
] |
c96b010c02f15e8eeb0f71308c641179ac1f19bb
|
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L203-L213
|
18,968
|
apache/spark
|
python/pyspark/sql/types.py
|
_int_size_to_type
|
def _int_size_to_type(size):
"""
Return the Catalyst datatype from the size of integers.
"""
if size <= 8:
return ByteType
if size <= 16:
return ShortType
if size <= 32:
return IntegerType
if size <= 64:
return LongType
|
python
|
def _int_size_to_type(size):
"""
Return the Catalyst datatype from the size of integers.
"""
if size <= 8:
return ByteType
if size <= 16:
return ShortType
if size <= 32:
return IntegerType
if size <= 64:
return LongType
|
[
"def",
"_int_size_to_type",
"(",
"size",
")",
":",
"if",
"size",
"<=",
"8",
":",
"return",
"ByteType",
"if",
"size",
"<=",
"16",
":",
"return",
"ShortType",
"if",
"size",
"<=",
"32",
":",
"return",
"IntegerType",
"if",
"size",
"<=",
"64",
":",
"return",
"LongType"
] |
Return the Catalyst datatype from the size of integers.
|
[
"Return",
"the",
"Catalyst",
"datatype",
"from",
"the",
"size",
"of",
"integers",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L944-L955
|
18,969
|
apache/spark
|
python/pyspark/sql/types.py
|
_infer_type
|
def _infer_type(obj):
"""Infer the DataType from obj
"""
if obj is None:
return NullType()
if hasattr(obj, '__UDT__'):
return obj.__UDT__
dataType = _type_mappings.get(type(obj))
if dataType is DecimalType:
# the precision and scale of `obj` may be different from row to row.
return DecimalType(38, 18)
elif dataType is not None:
return dataType()
if isinstance(obj, dict):
for key, value in obj.items():
if key is not None and value is not None:
return MapType(_infer_type(key), _infer_type(value), True)
return MapType(NullType(), NullType(), True)
elif isinstance(obj, list):
for v in obj:
if v is not None:
return ArrayType(_infer_type(obj[0]), True)
return ArrayType(NullType(), True)
elif isinstance(obj, array):
if obj.typecode in _array_type_mappings:
return ArrayType(_array_type_mappings[obj.typecode](), False)
else:
raise TypeError("not supported type: array(%s)" % obj.typecode)
else:
try:
return _infer_schema(obj)
except TypeError:
raise TypeError("not supported type: %s" % type(obj))
|
python
|
def _infer_type(obj):
"""Infer the DataType from obj
"""
if obj is None:
return NullType()
if hasattr(obj, '__UDT__'):
return obj.__UDT__
dataType = _type_mappings.get(type(obj))
if dataType is DecimalType:
# the precision and scale of `obj` may be different from row to row.
return DecimalType(38, 18)
elif dataType is not None:
return dataType()
if isinstance(obj, dict):
for key, value in obj.items():
if key is not None and value is not None:
return MapType(_infer_type(key), _infer_type(value), True)
return MapType(NullType(), NullType(), True)
elif isinstance(obj, list):
for v in obj:
if v is not None:
return ArrayType(_infer_type(obj[0]), True)
return ArrayType(NullType(), True)
elif isinstance(obj, array):
if obj.typecode in _array_type_mappings:
return ArrayType(_array_type_mappings[obj.typecode](), False)
else:
raise TypeError("not supported type: array(%s)" % obj.typecode)
else:
try:
return _infer_schema(obj)
except TypeError:
raise TypeError("not supported type: %s" % type(obj))
|
[
"def",
"_infer_type",
"(",
"obj",
")",
":",
"if",
"obj",
"is",
"None",
":",
"return",
"NullType",
"(",
")",
"if",
"hasattr",
"(",
"obj",
",",
"'__UDT__'",
")",
":",
"return",
"obj",
".",
"__UDT__",
"dataType",
"=",
"_type_mappings",
".",
"get",
"(",
"type",
"(",
"obj",
")",
")",
"if",
"dataType",
"is",
"DecimalType",
":",
"# the precision and scale of `obj` may be different from row to row.",
"return",
"DecimalType",
"(",
"38",
",",
"18",
")",
"elif",
"dataType",
"is",
"not",
"None",
":",
"return",
"dataType",
"(",
")",
"if",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"for",
"key",
",",
"value",
"in",
"obj",
".",
"items",
"(",
")",
":",
"if",
"key",
"is",
"not",
"None",
"and",
"value",
"is",
"not",
"None",
":",
"return",
"MapType",
"(",
"_infer_type",
"(",
"key",
")",
",",
"_infer_type",
"(",
"value",
")",
",",
"True",
")",
"return",
"MapType",
"(",
"NullType",
"(",
")",
",",
"NullType",
"(",
")",
",",
"True",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"for",
"v",
"in",
"obj",
":",
"if",
"v",
"is",
"not",
"None",
":",
"return",
"ArrayType",
"(",
"_infer_type",
"(",
"obj",
"[",
"0",
"]",
")",
",",
"True",
")",
"return",
"ArrayType",
"(",
"NullType",
"(",
")",
",",
"True",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"array",
")",
":",
"if",
"obj",
".",
"typecode",
"in",
"_array_type_mappings",
":",
"return",
"ArrayType",
"(",
"_array_type_mappings",
"[",
"obj",
".",
"typecode",
"]",
"(",
")",
",",
"False",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"not supported type: array(%s)\"",
"%",
"obj",
".",
"typecode",
")",
"else",
":",
"try",
":",
"return",
"_infer_schema",
"(",
"obj",
")",
"except",
"TypeError",
":",
"raise",
"TypeError",
"(",
"\"not supported type: %s\"",
"%",
"type",
"(",
"obj",
")",
")"
] |
Infer the DataType from obj
|
[
"Infer",
"the",
"DataType",
"from",
"obj"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1003-L1038
|
18,970
|
apache/spark
|
python/pyspark/sql/types.py
|
_has_nulltype
|
def _has_nulltype(dt):
""" Return whether there is NullType in `dt` or not """
if isinstance(dt, StructType):
return any(_has_nulltype(f.dataType) for f in dt.fields)
elif isinstance(dt, ArrayType):
return _has_nulltype((dt.elementType))
elif isinstance(dt, MapType):
return _has_nulltype(dt.keyType) or _has_nulltype(dt.valueType)
else:
return isinstance(dt, NullType)
|
python
|
def _has_nulltype(dt):
""" Return whether there is NullType in `dt` or not """
if isinstance(dt, StructType):
return any(_has_nulltype(f.dataType) for f in dt.fields)
elif isinstance(dt, ArrayType):
return _has_nulltype((dt.elementType))
elif isinstance(dt, MapType):
return _has_nulltype(dt.keyType) or _has_nulltype(dt.valueType)
else:
return isinstance(dt, NullType)
|
[
"def",
"_has_nulltype",
"(",
"dt",
")",
":",
"if",
"isinstance",
"(",
"dt",
",",
"StructType",
")",
":",
"return",
"any",
"(",
"_has_nulltype",
"(",
"f",
".",
"dataType",
")",
"for",
"f",
"in",
"dt",
".",
"fields",
")",
"elif",
"isinstance",
"(",
"dt",
",",
"ArrayType",
")",
":",
"return",
"_has_nulltype",
"(",
"(",
"dt",
".",
"elementType",
")",
")",
"elif",
"isinstance",
"(",
"dt",
",",
"MapType",
")",
":",
"return",
"_has_nulltype",
"(",
"dt",
".",
"keyType",
")",
"or",
"_has_nulltype",
"(",
"dt",
".",
"valueType",
")",
"else",
":",
"return",
"isinstance",
"(",
"dt",
",",
"NullType",
")"
] |
Return whether there is NullType in `dt` or not
|
[
"Return",
"whether",
"there",
"is",
"NullType",
"in",
"dt",
"or",
"not"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1068-L1077
|
18,971
|
apache/spark
|
python/pyspark/sql/types.py
|
_create_converter
|
def _create_converter(dataType):
"""Create a converter to drop the names of fields in obj """
if not _need_converter(dataType):
return lambda x: x
if isinstance(dataType, ArrayType):
conv = _create_converter(dataType.elementType)
return lambda row: [conv(v) for v in row]
elif isinstance(dataType, MapType):
kconv = _create_converter(dataType.keyType)
vconv = _create_converter(dataType.valueType)
return lambda row: dict((kconv(k), vconv(v)) for k, v in row.items())
elif isinstance(dataType, NullType):
return lambda x: None
elif not isinstance(dataType, StructType):
return lambda x: x
# dataType must be StructType
names = [f.name for f in dataType.fields]
converters = [_create_converter(f.dataType) for f in dataType.fields]
convert_fields = any(_need_converter(f.dataType) for f in dataType.fields)
def convert_struct(obj):
if obj is None:
return
if isinstance(obj, (tuple, list)):
if convert_fields:
return tuple(conv(v) for v, conv in zip(obj, converters))
else:
return tuple(obj)
if isinstance(obj, dict):
d = obj
elif hasattr(obj, "__dict__"): # object
d = obj.__dict__
else:
raise TypeError("Unexpected obj type: %s" % type(obj))
if convert_fields:
return tuple([conv(d.get(name)) for name, conv in zip(names, converters)])
else:
return tuple([d.get(name) for name in names])
return convert_struct
|
python
|
def _create_converter(dataType):
"""Create a converter to drop the names of fields in obj """
if not _need_converter(dataType):
return lambda x: x
if isinstance(dataType, ArrayType):
conv = _create_converter(dataType.elementType)
return lambda row: [conv(v) for v in row]
elif isinstance(dataType, MapType):
kconv = _create_converter(dataType.keyType)
vconv = _create_converter(dataType.valueType)
return lambda row: dict((kconv(k), vconv(v)) for k, v in row.items())
elif isinstance(dataType, NullType):
return lambda x: None
elif not isinstance(dataType, StructType):
return lambda x: x
# dataType must be StructType
names = [f.name for f in dataType.fields]
converters = [_create_converter(f.dataType) for f in dataType.fields]
convert_fields = any(_need_converter(f.dataType) for f in dataType.fields)
def convert_struct(obj):
if obj is None:
return
if isinstance(obj, (tuple, list)):
if convert_fields:
return tuple(conv(v) for v, conv in zip(obj, converters))
else:
return tuple(obj)
if isinstance(obj, dict):
d = obj
elif hasattr(obj, "__dict__"): # object
d = obj.__dict__
else:
raise TypeError("Unexpected obj type: %s" % type(obj))
if convert_fields:
return tuple([conv(d.get(name)) for name, conv in zip(names, converters)])
else:
return tuple([d.get(name) for name in names])
return convert_struct
|
[
"def",
"_create_converter",
"(",
"dataType",
")",
":",
"if",
"not",
"_need_converter",
"(",
"dataType",
")",
":",
"return",
"lambda",
"x",
":",
"x",
"if",
"isinstance",
"(",
"dataType",
",",
"ArrayType",
")",
":",
"conv",
"=",
"_create_converter",
"(",
"dataType",
".",
"elementType",
")",
"return",
"lambda",
"row",
":",
"[",
"conv",
"(",
"v",
")",
"for",
"v",
"in",
"row",
"]",
"elif",
"isinstance",
"(",
"dataType",
",",
"MapType",
")",
":",
"kconv",
"=",
"_create_converter",
"(",
"dataType",
".",
"keyType",
")",
"vconv",
"=",
"_create_converter",
"(",
"dataType",
".",
"valueType",
")",
"return",
"lambda",
"row",
":",
"dict",
"(",
"(",
"kconv",
"(",
"k",
")",
",",
"vconv",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"row",
".",
"items",
"(",
")",
")",
"elif",
"isinstance",
"(",
"dataType",
",",
"NullType",
")",
":",
"return",
"lambda",
"x",
":",
"None",
"elif",
"not",
"isinstance",
"(",
"dataType",
",",
"StructType",
")",
":",
"return",
"lambda",
"x",
":",
"x",
"# dataType must be StructType",
"names",
"=",
"[",
"f",
".",
"name",
"for",
"f",
"in",
"dataType",
".",
"fields",
"]",
"converters",
"=",
"[",
"_create_converter",
"(",
"f",
".",
"dataType",
")",
"for",
"f",
"in",
"dataType",
".",
"fields",
"]",
"convert_fields",
"=",
"any",
"(",
"_need_converter",
"(",
"f",
".",
"dataType",
")",
"for",
"f",
"in",
"dataType",
".",
"fields",
")",
"def",
"convert_struct",
"(",
"obj",
")",
":",
"if",
"obj",
"is",
"None",
":",
"return",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"if",
"convert_fields",
":",
"return",
"tuple",
"(",
"conv",
"(",
"v",
")",
"for",
"v",
",",
"conv",
"in",
"zip",
"(",
"obj",
",",
"converters",
")",
")",
"else",
":",
"return",
"tuple",
"(",
"obj",
")",
"if",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"d",
"=",
"obj",
"elif",
"hasattr",
"(",
"obj",
",",
"\"__dict__\"",
")",
":",
"# object",
"d",
"=",
"obj",
".",
"__dict__",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unexpected obj type: %s\"",
"%",
"type",
"(",
"obj",
")",
")",
"if",
"convert_fields",
":",
"return",
"tuple",
"(",
"[",
"conv",
"(",
"d",
".",
"get",
"(",
"name",
")",
")",
"for",
"name",
",",
"conv",
"in",
"zip",
"(",
"names",
",",
"converters",
")",
"]",
")",
"else",
":",
"return",
"tuple",
"(",
"[",
"d",
".",
"get",
"(",
"name",
")",
"for",
"name",
"in",
"names",
"]",
")",
"return",
"convert_struct"
] |
Create a converter to drop the names of fields in obj
|
[
"Create",
"a",
"converter",
"to",
"drop",
"the",
"names",
"of",
"fields",
"in",
"obj"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1133-L1180
|
18,972
|
apache/spark
|
python/pyspark/sql/types.py
|
to_arrow_type
|
def to_arrow_type(dt):
""" Convert Spark data type to pyarrow type
"""
import pyarrow as pa
if type(dt) == BooleanType:
arrow_type = pa.bool_()
elif type(dt) == ByteType:
arrow_type = pa.int8()
elif type(dt) == ShortType:
arrow_type = pa.int16()
elif type(dt) == IntegerType:
arrow_type = pa.int32()
elif type(dt) == LongType:
arrow_type = pa.int64()
elif type(dt) == FloatType:
arrow_type = pa.float32()
elif type(dt) == DoubleType:
arrow_type = pa.float64()
elif type(dt) == DecimalType:
arrow_type = pa.decimal128(dt.precision, dt.scale)
elif type(dt) == StringType:
arrow_type = pa.string()
elif type(dt) == BinaryType:
arrow_type = pa.binary()
elif type(dt) == DateType:
arrow_type = pa.date32()
elif type(dt) == TimestampType:
# Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read
arrow_type = pa.timestamp('us', tz='UTC')
elif type(dt) == ArrayType:
if type(dt.elementType) in [StructType, TimestampType]:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
arrow_type = pa.list_(to_arrow_type(dt.elementType))
elif type(dt) == StructType:
if any(type(field.dataType) == StructType for field in dt):
raise TypeError("Nested StructType not supported in conversion to Arrow")
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in dt]
arrow_type = pa.struct(fields)
else:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
return arrow_type
|
python
|
def to_arrow_type(dt):
""" Convert Spark data type to pyarrow type
"""
import pyarrow as pa
if type(dt) == BooleanType:
arrow_type = pa.bool_()
elif type(dt) == ByteType:
arrow_type = pa.int8()
elif type(dt) == ShortType:
arrow_type = pa.int16()
elif type(dt) == IntegerType:
arrow_type = pa.int32()
elif type(dt) == LongType:
arrow_type = pa.int64()
elif type(dt) == FloatType:
arrow_type = pa.float32()
elif type(dt) == DoubleType:
arrow_type = pa.float64()
elif type(dt) == DecimalType:
arrow_type = pa.decimal128(dt.precision, dt.scale)
elif type(dt) == StringType:
arrow_type = pa.string()
elif type(dt) == BinaryType:
arrow_type = pa.binary()
elif type(dt) == DateType:
arrow_type = pa.date32()
elif type(dt) == TimestampType:
# Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read
arrow_type = pa.timestamp('us', tz='UTC')
elif type(dt) == ArrayType:
if type(dt.elementType) in [StructType, TimestampType]:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
arrow_type = pa.list_(to_arrow_type(dt.elementType))
elif type(dt) == StructType:
if any(type(field.dataType) == StructType for field in dt):
raise TypeError("Nested StructType not supported in conversion to Arrow")
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in dt]
arrow_type = pa.struct(fields)
else:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
return arrow_type
|
[
"def",
"to_arrow_type",
"(",
"dt",
")",
":",
"import",
"pyarrow",
"as",
"pa",
"if",
"type",
"(",
"dt",
")",
"==",
"BooleanType",
":",
"arrow_type",
"=",
"pa",
".",
"bool_",
"(",
")",
"elif",
"type",
"(",
"dt",
")",
"==",
"ByteType",
":",
"arrow_type",
"=",
"pa",
".",
"int8",
"(",
")",
"elif",
"type",
"(",
"dt",
")",
"==",
"ShortType",
":",
"arrow_type",
"=",
"pa",
".",
"int16",
"(",
")",
"elif",
"type",
"(",
"dt",
")",
"==",
"IntegerType",
":",
"arrow_type",
"=",
"pa",
".",
"int32",
"(",
")",
"elif",
"type",
"(",
"dt",
")",
"==",
"LongType",
":",
"arrow_type",
"=",
"pa",
".",
"int64",
"(",
")",
"elif",
"type",
"(",
"dt",
")",
"==",
"FloatType",
":",
"arrow_type",
"=",
"pa",
".",
"float32",
"(",
")",
"elif",
"type",
"(",
"dt",
")",
"==",
"DoubleType",
":",
"arrow_type",
"=",
"pa",
".",
"float64",
"(",
")",
"elif",
"type",
"(",
"dt",
")",
"==",
"DecimalType",
":",
"arrow_type",
"=",
"pa",
".",
"decimal128",
"(",
"dt",
".",
"precision",
",",
"dt",
".",
"scale",
")",
"elif",
"type",
"(",
"dt",
")",
"==",
"StringType",
":",
"arrow_type",
"=",
"pa",
".",
"string",
"(",
")",
"elif",
"type",
"(",
"dt",
")",
"==",
"BinaryType",
":",
"arrow_type",
"=",
"pa",
".",
"binary",
"(",
")",
"elif",
"type",
"(",
"dt",
")",
"==",
"DateType",
":",
"arrow_type",
"=",
"pa",
".",
"date32",
"(",
")",
"elif",
"type",
"(",
"dt",
")",
"==",
"TimestampType",
":",
"# Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read",
"arrow_type",
"=",
"pa",
".",
"timestamp",
"(",
"'us'",
",",
"tz",
"=",
"'UTC'",
")",
"elif",
"type",
"(",
"dt",
")",
"==",
"ArrayType",
":",
"if",
"type",
"(",
"dt",
".",
"elementType",
")",
"in",
"[",
"StructType",
",",
"TimestampType",
"]",
":",
"raise",
"TypeError",
"(",
"\"Unsupported type in conversion to Arrow: \"",
"+",
"str",
"(",
"dt",
")",
")",
"arrow_type",
"=",
"pa",
".",
"list_",
"(",
"to_arrow_type",
"(",
"dt",
".",
"elementType",
")",
")",
"elif",
"type",
"(",
"dt",
")",
"==",
"StructType",
":",
"if",
"any",
"(",
"type",
"(",
"field",
".",
"dataType",
")",
"==",
"StructType",
"for",
"field",
"in",
"dt",
")",
":",
"raise",
"TypeError",
"(",
"\"Nested StructType not supported in conversion to Arrow\"",
")",
"fields",
"=",
"[",
"pa",
".",
"field",
"(",
"field",
".",
"name",
",",
"to_arrow_type",
"(",
"field",
".",
"dataType",
")",
",",
"nullable",
"=",
"field",
".",
"nullable",
")",
"for",
"field",
"in",
"dt",
"]",
"arrow_type",
"=",
"pa",
".",
"struct",
"(",
"fields",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unsupported type in conversion to Arrow: \"",
"+",
"str",
"(",
"dt",
")",
")",
"return",
"arrow_type"
] |
Convert Spark data type to pyarrow type
|
[
"Convert",
"Spark",
"data",
"type",
"to",
"pyarrow",
"type"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1581-L1622
|
18,973
|
apache/spark
|
python/pyspark/sql/types.py
|
to_arrow_schema
|
def to_arrow_schema(schema):
""" Convert a schema from Spark to Arrow
"""
import pyarrow as pa
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in schema]
return pa.schema(fields)
|
python
|
def to_arrow_schema(schema):
""" Convert a schema from Spark to Arrow
"""
import pyarrow as pa
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in schema]
return pa.schema(fields)
|
[
"def",
"to_arrow_schema",
"(",
"schema",
")",
":",
"import",
"pyarrow",
"as",
"pa",
"fields",
"=",
"[",
"pa",
".",
"field",
"(",
"field",
".",
"name",
",",
"to_arrow_type",
"(",
"field",
".",
"dataType",
")",
",",
"nullable",
"=",
"field",
".",
"nullable",
")",
"for",
"field",
"in",
"schema",
"]",
"return",
"pa",
".",
"schema",
"(",
"fields",
")"
] |
Convert a schema from Spark to Arrow
|
[
"Convert",
"a",
"schema",
"from",
"Spark",
"to",
"Arrow"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1625-L1631
|
18,974
|
apache/spark
|
python/pyspark/sql/types.py
|
from_arrow_type
|
def from_arrow_type(at):
""" Convert pyarrow type to Spark data type.
"""
import pyarrow.types as types
if types.is_boolean(at):
spark_type = BooleanType()
elif types.is_int8(at):
spark_type = ByteType()
elif types.is_int16(at):
spark_type = ShortType()
elif types.is_int32(at):
spark_type = IntegerType()
elif types.is_int64(at):
spark_type = LongType()
elif types.is_float32(at):
spark_type = FloatType()
elif types.is_float64(at):
spark_type = DoubleType()
elif types.is_decimal(at):
spark_type = DecimalType(precision=at.precision, scale=at.scale)
elif types.is_string(at):
spark_type = StringType()
elif types.is_binary(at):
spark_type = BinaryType()
elif types.is_date32(at):
spark_type = DateType()
elif types.is_timestamp(at):
spark_type = TimestampType()
elif types.is_list(at):
if types.is_timestamp(at.value_type):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
spark_type = ArrayType(from_arrow_type(at.value_type))
elif types.is_struct(at):
if any(types.is_struct(field.type) for field in at):
raise TypeError("Nested StructType not supported in conversion from Arrow: " + str(at))
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in at])
else:
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
return spark_type
|
python
|
def from_arrow_type(at):
""" Convert pyarrow type to Spark data type.
"""
import pyarrow.types as types
if types.is_boolean(at):
spark_type = BooleanType()
elif types.is_int8(at):
spark_type = ByteType()
elif types.is_int16(at):
spark_type = ShortType()
elif types.is_int32(at):
spark_type = IntegerType()
elif types.is_int64(at):
spark_type = LongType()
elif types.is_float32(at):
spark_type = FloatType()
elif types.is_float64(at):
spark_type = DoubleType()
elif types.is_decimal(at):
spark_type = DecimalType(precision=at.precision, scale=at.scale)
elif types.is_string(at):
spark_type = StringType()
elif types.is_binary(at):
spark_type = BinaryType()
elif types.is_date32(at):
spark_type = DateType()
elif types.is_timestamp(at):
spark_type = TimestampType()
elif types.is_list(at):
if types.is_timestamp(at.value_type):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
spark_type = ArrayType(from_arrow_type(at.value_type))
elif types.is_struct(at):
if any(types.is_struct(field.type) for field in at):
raise TypeError("Nested StructType not supported in conversion from Arrow: " + str(at))
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in at])
else:
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
return spark_type
|
[
"def",
"from_arrow_type",
"(",
"at",
")",
":",
"import",
"pyarrow",
".",
"types",
"as",
"types",
"if",
"types",
".",
"is_boolean",
"(",
"at",
")",
":",
"spark_type",
"=",
"BooleanType",
"(",
")",
"elif",
"types",
".",
"is_int8",
"(",
"at",
")",
":",
"spark_type",
"=",
"ByteType",
"(",
")",
"elif",
"types",
".",
"is_int16",
"(",
"at",
")",
":",
"spark_type",
"=",
"ShortType",
"(",
")",
"elif",
"types",
".",
"is_int32",
"(",
"at",
")",
":",
"spark_type",
"=",
"IntegerType",
"(",
")",
"elif",
"types",
".",
"is_int64",
"(",
"at",
")",
":",
"spark_type",
"=",
"LongType",
"(",
")",
"elif",
"types",
".",
"is_float32",
"(",
"at",
")",
":",
"spark_type",
"=",
"FloatType",
"(",
")",
"elif",
"types",
".",
"is_float64",
"(",
"at",
")",
":",
"spark_type",
"=",
"DoubleType",
"(",
")",
"elif",
"types",
".",
"is_decimal",
"(",
"at",
")",
":",
"spark_type",
"=",
"DecimalType",
"(",
"precision",
"=",
"at",
".",
"precision",
",",
"scale",
"=",
"at",
".",
"scale",
")",
"elif",
"types",
".",
"is_string",
"(",
"at",
")",
":",
"spark_type",
"=",
"StringType",
"(",
")",
"elif",
"types",
".",
"is_binary",
"(",
"at",
")",
":",
"spark_type",
"=",
"BinaryType",
"(",
")",
"elif",
"types",
".",
"is_date32",
"(",
"at",
")",
":",
"spark_type",
"=",
"DateType",
"(",
")",
"elif",
"types",
".",
"is_timestamp",
"(",
"at",
")",
":",
"spark_type",
"=",
"TimestampType",
"(",
")",
"elif",
"types",
".",
"is_list",
"(",
"at",
")",
":",
"if",
"types",
".",
"is_timestamp",
"(",
"at",
".",
"value_type",
")",
":",
"raise",
"TypeError",
"(",
"\"Unsupported type in conversion from Arrow: \"",
"+",
"str",
"(",
"at",
")",
")",
"spark_type",
"=",
"ArrayType",
"(",
"from_arrow_type",
"(",
"at",
".",
"value_type",
")",
")",
"elif",
"types",
".",
"is_struct",
"(",
"at",
")",
":",
"if",
"any",
"(",
"types",
".",
"is_struct",
"(",
"field",
".",
"type",
")",
"for",
"field",
"in",
"at",
")",
":",
"raise",
"TypeError",
"(",
"\"Nested StructType not supported in conversion from Arrow: \"",
"+",
"str",
"(",
"at",
")",
")",
"return",
"StructType",
"(",
"[",
"StructField",
"(",
"field",
".",
"name",
",",
"from_arrow_type",
"(",
"field",
".",
"type",
")",
",",
"nullable",
"=",
"field",
".",
"nullable",
")",
"for",
"field",
"in",
"at",
"]",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Unsupported type in conversion from Arrow: \"",
"+",
"str",
"(",
"at",
")",
")",
"return",
"spark_type"
] |
Convert pyarrow type to Spark data type.
|
[
"Convert",
"pyarrow",
"type",
"to",
"Spark",
"data",
"type",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1634-L1674
|
18,975
|
apache/spark
|
python/pyspark/sql/types.py
|
from_arrow_schema
|
def from_arrow_schema(arrow_schema):
""" Convert schema from Arrow to Spark.
"""
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in arrow_schema])
|
python
|
def from_arrow_schema(arrow_schema):
""" Convert schema from Arrow to Spark.
"""
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in arrow_schema])
|
[
"def",
"from_arrow_schema",
"(",
"arrow_schema",
")",
":",
"return",
"StructType",
"(",
"[",
"StructField",
"(",
"field",
".",
"name",
",",
"from_arrow_type",
"(",
"field",
".",
"type",
")",
",",
"nullable",
"=",
"field",
".",
"nullable",
")",
"for",
"field",
"in",
"arrow_schema",
"]",
")"
] |
Convert schema from Arrow to Spark.
|
[
"Convert",
"schema",
"from",
"Arrow",
"to",
"Spark",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1677-L1682
|
18,976
|
apache/spark
|
python/pyspark/sql/types.py
|
_check_series_localize_timestamps
|
def _check_series_localize_timestamps(s, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
:param s: pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series that have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64tz_dtype
tz = timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(tz).dt.tz_localize(None)
else:
return s
|
python
|
def _check_series_localize_timestamps(s, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
:param s: pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series that have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64tz_dtype
tz = timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(tz).dt.tz_localize(None)
else:
return s
|
[
"def",
"_check_series_localize_timestamps",
"(",
"s",
",",
"timezone",
")",
":",
"from",
"pyspark",
".",
"sql",
".",
"utils",
"import",
"require_minimum_pandas_version",
"require_minimum_pandas_version",
"(",
")",
"from",
"pandas",
".",
"api",
".",
"types",
"import",
"is_datetime64tz_dtype",
"tz",
"=",
"timezone",
"or",
"_get_local_timezone",
"(",
")",
"# TODO: handle nested timestamps, such as ArrayType(TimestampType())?",
"if",
"is_datetime64tz_dtype",
"(",
"s",
".",
"dtype",
")",
":",
"return",
"s",
".",
"dt",
".",
"tz_convert",
"(",
"tz",
")",
".",
"dt",
".",
"tz_localize",
"(",
"None",
")",
"else",
":",
"return",
"s"
] |
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
:param s: pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series that have been converted to tz-naive
|
[
"Convert",
"timezone",
"aware",
"timestamps",
"to",
"timezone",
"-",
"naive",
"in",
"the",
"specified",
"timezone",
"or",
"local",
"timezone",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1700-L1720
|
18,977
|
apache/spark
|
python/pyspark/sql/types.py
|
_check_dataframe_localize_timestamps
|
def _check_dataframe_localize_timestamps(pdf, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone
:param pdf: pandas.DataFrame
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.DataFrame where any timezone aware columns have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
for column, series in pdf.iteritems():
pdf[column] = _check_series_localize_timestamps(series, timezone)
return pdf
|
python
|
def _check_dataframe_localize_timestamps(pdf, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone
:param pdf: pandas.DataFrame
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.DataFrame where any timezone aware columns have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
for column, series in pdf.iteritems():
pdf[column] = _check_series_localize_timestamps(series, timezone)
return pdf
|
[
"def",
"_check_dataframe_localize_timestamps",
"(",
"pdf",
",",
"timezone",
")",
":",
"from",
"pyspark",
".",
"sql",
".",
"utils",
"import",
"require_minimum_pandas_version",
"require_minimum_pandas_version",
"(",
")",
"for",
"column",
",",
"series",
"in",
"pdf",
".",
"iteritems",
"(",
")",
":",
"pdf",
"[",
"column",
"]",
"=",
"_check_series_localize_timestamps",
"(",
"series",
",",
"timezone",
")",
"return",
"pdf"
] |
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone
:param pdf: pandas.DataFrame
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.DataFrame where any timezone aware columns have been converted to tz-naive
|
[
"Convert",
"timezone",
"aware",
"timestamps",
"to",
"timezone",
"-",
"naive",
"in",
"the",
"specified",
"timezone",
"or",
"local",
"timezone"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1723-L1736
|
18,978
|
apache/spark
|
python/pyspark/sql/types.py
|
_check_series_convert_timestamps_internal
|
def _check_series_convert_timestamps_internal(s, timezone):
"""
Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for
Spark internal storage
:param s: a pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been UTC normalized without a time zone
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64_dtype(s.dtype):
# When tz_localize a tz-naive timestamp, the result is ambiguous if the tz-naive
# timestamp is during the hour when the clock is adjusted backward during due to
# daylight saving time (dst).
# E.g., for America/New_York, the clock is adjusted backward on 2015-11-01 2:00 to
# 2015-11-01 1:00 from dst-time to standard time, and therefore, when tz_localize
# a tz-naive timestamp 2015-11-01 1:30 with America/New_York timezone, it can be either
# dst time (2015-01-01 1:30-0400) or standard time (2015-11-01 1:30-0500).
#
# Here we explicit choose to use standard time. This matches the default behavior of
# pytz.
#
# Here are some code to help understand this behavior:
# >>> import datetime
# >>> import pandas as pd
# >>> import pytz
# >>>
# >>> t = datetime.datetime(2015, 11, 1, 1, 30)
# >>> ts = pd.Series([t])
# >>> tz = pytz.timezone('America/New_York')
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=True)
# 0 2015-11-01 01:30:00-04:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=False)
# 0 2015-11-01 01:30:00-05:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> str(tz.localize(t))
# '2015-11-01 01:30:00-05:00'
tz = timezone or _get_local_timezone()
return s.dt.tz_localize(tz, ambiguous=False).dt.tz_convert('UTC')
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert('UTC')
else:
return s
|
python
|
def _check_series_convert_timestamps_internal(s, timezone):
"""
Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for
Spark internal storage
:param s: a pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been UTC normalized without a time zone
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64_dtype(s.dtype):
# When tz_localize a tz-naive timestamp, the result is ambiguous if the tz-naive
# timestamp is during the hour when the clock is adjusted backward during due to
# daylight saving time (dst).
# E.g., for America/New_York, the clock is adjusted backward on 2015-11-01 2:00 to
# 2015-11-01 1:00 from dst-time to standard time, and therefore, when tz_localize
# a tz-naive timestamp 2015-11-01 1:30 with America/New_York timezone, it can be either
# dst time (2015-01-01 1:30-0400) or standard time (2015-11-01 1:30-0500).
#
# Here we explicit choose to use standard time. This matches the default behavior of
# pytz.
#
# Here are some code to help understand this behavior:
# >>> import datetime
# >>> import pandas as pd
# >>> import pytz
# >>>
# >>> t = datetime.datetime(2015, 11, 1, 1, 30)
# >>> ts = pd.Series([t])
# >>> tz = pytz.timezone('America/New_York')
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=True)
# 0 2015-11-01 01:30:00-04:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=False)
# 0 2015-11-01 01:30:00-05:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> str(tz.localize(t))
# '2015-11-01 01:30:00-05:00'
tz = timezone or _get_local_timezone()
return s.dt.tz_localize(tz, ambiguous=False).dt.tz_convert('UTC')
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert('UTC')
else:
return s
|
[
"def",
"_check_series_convert_timestamps_internal",
"(",
"s",
",",
"timezone",
")",
":",
"from",
"pyspark",
".",
"sql",
".",
"utils",
"import",
"require_minimum_pandas_version",
"require_minimum_pandas_version",
"(",
")",
"from",
"pandas",
".",
"api",
".",
"types",
"import",
"is_datetime64_dtype",
",",
"is_datetime64tz_dtype",
"# TODO: handle nested timestamps, such as ArrayType(TimestampType())?",
"if",
"is_datetime64_dtype",
"(",
"s",
".",
"dtype",
")",
":",
"# When tz_localize a tz-naive timestamp, the result is ambiguous if the tz-naive",
"# timestamp is during the hour when the clock is adjusted backward during due to",
"# daylight saving time (dst).",
"# E.g., for America/New_York, the clock is adjusted backward on 2015-11-01 2:00 to",
"# 2015-11-01 1:00 from dst-time to standard time, and therefore, when tz_localize",
"# a tz-naive timestamp 2015-11-01 1:30 with America/New_York timezone, it can be either",
"# dst time (2015-01-01 1:30-0400) or standard time (2015-11-01 1:30-0500).",
"#",
"# Here we explicit choose to use standard time. This matches the default behavior of",
"# pytz.",
"#",
"# Here are some code to help understand this behavior:",
"# >>> import datetime",
"# >>> import pandas as pd",
"# >>> import pytz",
"# >>>",
"# >>> t = datetime.datetime(2015, 11, 1, 1, 30)",
"# >>> ts = pd.Series([t])",
"# >>> tz = pytz.timezone('America/New_York')",
"# >>>",
"# >>> ts.dt.tz_localize(tz, ambiguous=True)",
"# 0 2015-11-01 01:30:00-04:00",
"# dtype: datetime64[ns, America/New_York]",
"# >>>",
"# >>> ts.dt.tz_localize(tz, ambiguous=False)",
"# 0 2015-11-01 01:30:00-05:00",
"# dtype: datetime64[ns, America/New_York]",
"# >>>",
"# >>> str(tz.localize(t))",
"# '2015-11-01 01:30:00-05:00'",
"tz",
"=",
"timezone",
"or",
"_get_local_timezone",
"(",
")",
"return",
"s",
".",
"dt",
".",
"tz_localize",
"(",
"tz",
",",
"ambiguous",
"=",
"False",
")",
".",
"dt",
".",
"tz_convert",
"(",
"'UTC'",
")",
"elif",
"is_datetime64tz_dtype",
"(",
"s",
".",
"dtype",
")",
":",
"return",
"s",
".",
"dt",
".",
"tz_convert",
"(",
"'UTC'",
")",
"else",
":",
"return",
"s"
] |
Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for
Spark internal storage
:param s: a pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been UTC normalized without a time zone
|
[
"Convert",
"a",
"tz",
"-",
"naive",
"timestamp",
"in",
"the",
"specified",
"timezone",
"or",
"local",
"timezone",
"to",
"UTC",
"normalized",
"for",
"Spark",
"internal",
"storage"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1739-L1789
|
18,979
|
apache/spark
|
python/pyspark/sql/types.py
|
_check_series_convert_timestamps_localize
|
def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param from_timezone: the timezone to convert from. if None then use local timezone
:param to_timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
from pandas.api.types import is_datetime64tz_dtype, is_datetime64_dtype
from_tz = from_timezone or _get_local_timezone()
to_tz = to_timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(to_tz).dt.tz_localize(None)
elif is_datetime64_dtype(s.dtype) and from_tz != to_tz:
# `s.dt.tz_localize('tzlocal()')` doesn't work properly when including NaT.
return s.apply(
lambda ts: ts.tz_localize(from_tz, ambiguous=False).tz_convert(to_tz).tz_localize(None)
if ts is not pd.NaT else pd.NaT)
else:
return s
|
python
|
def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param from_timezone: the timezone to convert from. if None then use local timezone
:param to_timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
from pandas.api.types import is_datetime64tz_dtype, is_datetime64_dtype
from_tz = from_timezone or _get_local_timezone()
to_tz = to_timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(to_tz).dt.tz_localize(None)
elif is_datetime64_dtype(s.dtype) and from_tz != to_tz:
# `s.dt.tz_localize('tzlocal()')` doesn't work properly when including NaT.
return s.apply(
lambda ts: ts.tz_localize(from_tz, ambiguous=False).tz_convert(to_tz).tz_localize(None)
if ts is not pd.NaT else pd.NaT)
else:
return s
|
[
"def",
"_check_series_convert_timestamps_localize",
"(",
"s",
",",
"from_timezone",
",",
"to_timezone",
")",
":",
"from",
"pyspark",
".",
"sql",
".",
"utils",
"import",
"require_minimum_pandas_version",
"require_minimum_pandas_version",
"(",
")",
"import",
"pandas",
"as",
"pd",
"from",
"pandas",
".",
"api",
".",
"types",
"import",
"is_datetime64tz_dtype",
",",
"is_datetime64_dtype",
"from_tz",
"=",
"from_timezone",
"or",
"_get_local_timezone",
"(",
")",
"to_tz",
"=",
"to_timezone",
"or",
"_get_local_timezone",
"(",
")",
"# TODO: handle nested timestamps, such as ArrayType(TimestampType())?",
"if",
"is_datetime64tz_dtype",
"(",
"s",
".",
"dtype",
")",
":",
"return",
"s",
".",
"dt",
".",
"tz_convert",
"(",
"to_tz",
")",
".",
"dt",
".",
"tz_localize",
"(",
"None",
")",
"elif",
"is_datetime64_dtype",
"(",
"s",
".",
"dtype",
")",
"and",
"from_tz",
"!=",
"to_tz",
":",
"# `s.dt.tz_localize('tzlocal()')` doesn't work properly when including NaT.",
"return",
"s",
".",
"apply",
"(",
"lambda",
"ts",
":",
"ts",
".",
"tz_localize",
"(",
"from_tz",
",",
"ambiguous",
"=",
"False",
")",
".",
"tz_convert",
"(",
"to_tz",
")",
".",
"tz_localize",
"(",
"None",
")",
"if",
"ts",
"is",
"not",
"pd",
".",
"NaT",
"else",
"pd",
".",
"NaT",
")",
"else",
":",
"return",
"s"
] |
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param from_timezone: the timezone to convert from. if None then use local timezone
:param to_timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
|
[
"Convert",
"timestamp",
"to",
"timezone",
"-",
"naive",
"in",
"the",
"specified",
"timezone",
"or",
"local",
"timezone"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1792-L1817
|
18,980
|
apache/spark
|
python/pyspark/sql/types.py
|
Row.asDict
|
def asDict(self, recursive=False):
"""
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
"""
if not hasattr(self, "__fields__"):
raise TypeError("Cannot convert a Row class into dict")
if recursive:
def conv(obj):
if isinstance(obj, Row):
return obj.asDict(True)
elif isinstance(obj, list):
return [conv(o) for o in obj]
elif isinstance(obj, dict):
return dict((k, conv(v)) for k, v in obj.items())
else:
return obj
return dict(zip(self.__fields__, (conv(o) for o in self)))
else:
return dict(zip(self.__fields__, self))
|
python
|
def asDict(self, recursive=False):
"""
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
"""
if not hasattr(self, "__fields__"):
raise TypeError("Cannot convert a Row class into dict")
if recursive:
def conv(obj):
if isinstance(obj, Row):
return obj.asDict(True)
elif isinstance(obj, list):
return [conv(o) for o in obj]
elif isinstance(obj, dict):
return dict((k, conv(v)) for k, v in obj.items())
else:
return obj
return dict(zip(self.__fields__, (conv(o) for o in self)))
else:
return dict(zip(self.__fields__, self))
|
[
"def",
"asDict",
"(",
"self",
",",
"recursive",
"=",
"False",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"__fields__\"",
")",
":",
"raise",
"TypeError",
"(",
"\"Cannot convert a Row class into dict\"",
")",
"if",
"recursive",
":",
"def",
"conv",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"Row",
")",
":",
"return",
"obj",
".",
"asDict",
"(",
"True",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"return",
"[",
"conv",
"(",
"o",
")",
"for",
"o",
"in",
"obj",
"]",
"elif",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"return",
"dict",
"(",
"(",
"k",
",",
"conv",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"obj",
".",
"items",
"(",
")",
")",
"else",
":",
"return",
"obj",
"return",
"dict",
"(",
"zip",
"(",
"self",
".",
"__fields__",
",",
"(",
"conv",
"(",
"o",
")",
"for",
"o",
"in",
"self",
")",
")",
")",
"else",
":",
"return",
"dict",
"(",
"zip",
"(",
"self",
".",
"__fields__",
",",
"self",
")",
")"
] |
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
|
[
"Return",
"as",
"an",
"dict"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1463-L1492
|
18,981
|
apache/spark
|
python/pyspark/shuffle.py
|
_get_local_dirs
|
def _get_local_dirs(sub):
""" Get all the directories """
path = os.environ.get("SPARK_LOCAL_DIRS", "/tmp")
dirs = path.split(",")
if len(dirs) > 1:
# different order in different processes and instances
rnd = random.Random(os.getpid() + id(dirs))
random.shuffle(dirs, rnd.random)
return [os.path.join(d, "python", str(os.getpid()), sub) for d in dirs]
|
python
|
def _get_local_dirs(sub):
""" Get all the directories """
path = os.environ.get("SPARK_LOCAL_DIRS", "/tmp")
dirs = path.split(",")
if len(dirs) > 1:
# different order in different processes and instances
rnd = random.Random(os.getpid() + id(dirs))
random.shuffle(dirs, rnd.random)
return [os.path.join(d, "python", str(os.getpid()), sub) for d in dirs]
|
[
"def",
"_get_local_dirs",
"(",
"sub",
")",
":",
"path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"SPARK_LOCAL_DIRS\"",
",",
"\"/tmp\"",
")",
"dirs",
"=",
"path",
".",
"split",
"(",
"\",\"",
")",
"if",
"len",
"(",
"dirs",
")",
">",
"1",
":",
"# different order in different processes and instances",
"rnd",
"=",
"random",
".",
"Random",
"(",
"os",
".",
"getpid",
"(",
")",
"+",
"id",
"(",
"dirs",
")",
")",
"random",
".",
"shuffle",
"(",
"dirs",
",",
"rnd",
".",
"random",
")",
"return",
"[",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"\"python\"",
",",
"str",
"(",
"os",
".",
"getpid",
"(",
")",
")",
",",
"sub",
")",
"for",
"d",
"in",
"dirs",
"]"
] |
Get all the directories
|
[
"Get",
"all",
"the",
"directories"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L71-L79
|
18,982
|
apache/spark
|
python/pyspark/shuffle.py
|
ExternalMerger.mergeValues
|
def mergeValues(self, iterator):
""" Combine the items by creator and combiner """
# speedup attribute lookup
creator, comb = self.agg.createCombiner, self.agg.mergeValue
c, data, pdata, hfun, batch = 0, self.data, self.pdata, self._partition, self.batch
limit = self.memory_limit
for k, v in iterator:
d = pdata[hfun(k)] if pdata else data
d[k] = comb(d[k], v) if k in d else creator(v)
c += 1
if c >= batch:
if get_used_memory() >= limit:
self._spill()
limit = self._next_limit()
batch /= 2
c = 0
else:
batch *= 1.5
if get_used_memory() >= limit:
self._spill()
|
python
|
def mergeValues(self, iterator):
""" Combine the items by creator and combiner """
# speedup attribute lookup
creator, comb = self.agg.createCombiner, self.agg.mergeValue
c, data, pdata, hfun, batch = 0, self.data, self.pdata, self._partition, self.batch
limit = self.memory_limit
for k, v in iterator:
d = pdata[hfun(k)] if pdata else data
d[k] = comb(d[k], v) if k in d else creator(v)
c += 1
if c >= batch:
if get_used_memory() >= limit:
self._spill()
limit = self._next_limit()
batch /= 2
c = 0
else:
batch *= 1.5
if get_used_memory() >= limit:
self._spill()
|
[
"def",
"mergeValues",
"(",
"self",
",",
"iterator",
")",
":",
"# speedup attribute lookup",
"creator",
",",
"comb",
"=",
"self",
".",
"agg",
".",
"createCombiner",
",",
"self",
".",
"agg",
".",
"mergeValue",
"c",
",",
"data",
",",
"pdata",
",",
"hfun",
",",
"batch",
"=",
"0",
",",
"self",
".",
"data",
",",
"self",
".",
"pdata",
",",
"self",
".",
"_partition",
",",
"self",
".",
"batch",
"limit",
"=",
"self",
".",
"memory_limit",
"for",
"k",
",",
"v",
"in",
"iterator",
":",
"d",
"=",
"pdata",
"[",
"hfun",
"(",
"k",
")",
"]",
"if",
"pdata",
"else",
"data",
"d",
"[",
"k",
"]",
"=",
"comb",
"(",
"d",
"[",
"k",
"]",
",",
"v",
")",
"if",
"k",
"in",
"d",
"else",
"creator",
"(",
"v",
")",
"c",
"+=",
"1",
"if",
"c",
">=",
"batch",
":",
"if",
"get_used_memory",
"(",
")",
">=",
"limit",
":",
"self",
".",
"_spill",
"(",
")",
"limit",
"=",
"self",
".",
"_next_limit",
"(",
")",
"batch",
"/=",
"2",
"c",
"=",
"0",
"else",
":",
"batch",
"*=",
"1.5",
"if",
"get_used_memory",
"(",
")",
">=",
"limit",
":",
"self",
".",
"_spill",
"(",
")"
] |
Combine the items by creator and combiner
|
[
"Combine",
"the",
"items",
"by",
"creator",
"and",
"combiner"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L231-L253
|
18,983
|
apache/spark
|
python/pyspark/shuffle.py
|
ExternalMerger.items
|
def items(self):
""" Return all merged items as iterator """
if not self.pdata and not self.spills:
return iter(self.data.items())
return self._external_items()
|
python
|
def items(self):
""" Return all merged items as iterator """
if not self.pdata and not self.spills:
return iter(self.data.items())
return self._external_items()
|
[
"def",
"items",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"pdata",
"and",
"not",
"self",
".",
"spills",
":",
"return",
"iter",
"(",
"self",
".",
"data",
".",
"items",
"(",
")",
")",
"return",
"self",
".",
"_external_items",
"(",
")"
] |
Return all merged items as iterator
|
[
"Return",
"all",
"merged",
"items",
"as",
"iterator"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L339-L343
|
18,984
|
apache/spark
|
python/pyspark/shuffle.py
|
ExternalMerger._external_items
|
def _external_items(self):
""" Return all partitioned items as iterator """
assert not self.data
if any(self.pdata):
self._spill()
# disable partitioning and spilling when merge combiners from disk
self.pdata = []
try:
for i in range(self.partitions):
for v in self._merged_items(i):
yield v
self.data.clear()
# remove the merged partition
for j in range(self.spills):
path = self._get_spill_dir(j)
os.remove(os.path.join(path, str(i)))
finally:
self._cleanup()
|
python
|
def _external_items(self):
""" Return all partitioned items as iterator """
assert not self.data
if any(self.pdata):
self._spill()
# disable partitioning and spilling when merge combiners from disk
self.pdata = []
try:
for i in range(self.partitions):
for v in self._merged_items(i):
yield v
self.data.clear()
# remove the merged partition
for j in range(self.spills):
path = self._get_spill_dir(j)
os.remove(os.path.join(path, str(i)))
finally:
self._cleanup()
|
[
"def",
"_external_items",
"(",
"self",
")",
":",
"assert",
"not",
"self",
".",
"data",
"if",
"any",
"(",
"self",
".",
"pdata",
")",
":",
"self",
".",
"_spill",
"(",
")",
"# disable partitioning and spilling when merge combiners from disk",
"self",
".",
"pdata",
"=",
"[",
"]",
"try",
":",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"partitions",
")",
":",
"for",
"v",
"in",
"self",
".",
"_merged_items",
"(",
"i",
")",
":",
"yield",
"v",
"self",
".",
"data",
".",
"clear",
"(",
")",
"# remove the merged partition",
"for",
"j",
"in",
"range",
"(",
"self",
".",
"spills",
")",
":",
"path",
"=",
"self",
".",
"_get_spill_dir",
"(",
"j",
")",
"os",
".",
"remove",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"str",
"(",
"i",
")",
")",
")",
"finally",
":",
"self",
".",
"_cleanup",
"(",
")"
] |
Return all partitioned items as iterator
|
[
"Return",
"all",
"partitioned",
"items",
"as",
"iterator"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L345-L364
|
18,985
|
apache/spark
|
python/pyspark/shuffle.py
|
ExternalMerger._recursive_merged_items
|
def _recursive_merged_items(self, index):
"""
merge the partitioned items and return the as iterator
If one partition can not be fit in memory, then them will be
partitioned and merged recursively.
"""
subdirs = [os.path.join(d, "parts", str(index)) for d in self.localdirs]
m = ExternalMerger(self.agg, self.memory_limit, self.serializer, subdirs,
self.scale * self.partitions, self.partitions, self.batch)
m.pdata = [{} for _ in range(self.partitions)]
limit = self._next_limit()
for j in range(self.spills):
path = self._get_spill_dir(j)
p = os.path.join(path, str(index))
with open(p, 'rb') as f:
m.mergeCombiners(self.serializer.load_stream(f), 0)
if get_used_memory() > limit:
m._spill()
limit = self._next_limit()
return m._external_items()
|
python
|
def _recursive_merged_items(self, index):
"""
merge the partitioned items and return the as iterator
If one partition can not be fit in memory, then them will be
partitioned and merged recursively.
"""
subdirs = [os.path.join(d, "parts", str(index)) for d in self.localdirs]
m = ExternalMerger(self.agg, self.memory_limit, self.serializer, subdirs,
self.scale * self.partitions, self.partitions, self.batch)
m.pdata = [{} for _ in range(self.partitions)]
limit = self._next_limit()
for j in range(self.spills):
path = self._get_spill_dir(j)
p = os.path.join(path, str(index))
with open(p, 'rb') as f:
m.mergeCombiners(self.serializer.load_stream(f), 0)
if get_used_memory() > limit:
m._spill()
limit = self._next_limit()
return m._external_items()
|
[
"def",
"_recursive_merged_items",
"(",
"self",
",",
"index",
")",
":",
"subdirs",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"\"parts\"",
",",
"str",
"(",
"index",
")",
")",
"for",
"d",
"in",
"self",
".",
"localdirs",
"]",
"m",
"=",
"ExternalMerger",
"(",
"self",
".",
"agg",
",",
"self",
".",
"memory_limit",
",",
"self",
".",
"serializer",
",",
"subdirs",
",",
"self",
".",
"scale",
"*",
"self",
".",
"partitions",
",",
"self",
".",
"partitions",
",",
"self",
".",
"batch",
")",
"m",
".",
"pdata",
"=",
"[",
"{",
"}",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"partitions",
")",
"]",
"limit",
"=",
"self",
".",
"_next_limit",
"(",
")",
"for",
"j",
"in",
"range",
"(",
"self",
".",
"spills",
")",
":",
"path",
"=",
"self",
".",
"_get_spill_dir",
"(",
"j",
")",
"p",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"str",
"(",
"index",
")",
")",
"with",
"open",
"(",
"p",
",",
"'rb'",
")",
"as",
"f",
":",
"m",
".",
"mergeCombiners",
"(",
"self",
".",
"serializer",
".",
"load_stream",
"(",
"f",
")",
",",
"0",
")",
"if",
"get_used_memory",
"(",
")",
">",
"limit",
":",
"m",
".",
"_spill",
"(",
")",
"limit",
"=",
"self",
".",
"_next_limit",
"(",
")",
"return",
"m",
".",
"_external_items",
"(",
")"
] |
merge the partitioned items and return the as iterator
If one partition can not be fit in memory, then them will be
partitioned and merged recursively.
|
[
"merge",
"the",
"partitioned",
"items",
"and",
"return",
"the",
"as",
"iterator"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L386-L409
|
18,986
|
apache/spark
|
python/pyspark/shuffle.py
|
ExternalSorter.sorted
|
def sorted(self, iterator, key=None, reverse=False):
"""
Sort the elements in iterator, do external sort when the memory
goes above the limit.
"""
global MemoryBytesSpilled, DiskBytesSpilled
batch, limit = 100, self._next_limit()
chunks, current_chunk = [], []
iterator = iter(iterator)
while True:
# pick elements in batch
chunk = list(itertools.islice(iterator, batch))
current_chunk.extend(chunk)
if len(chunk) < batch:
break
used_memory = get_used_memory()
if used_memory > limit:
# sort them inplace will save memory
current_chunk.sort(key=key, reverse=reverse)
path = self._get_path(len(chunks))
with open(path, 'wb') as f:
self.serializer.dump_stream(current_chunk, f)
def load(f):
for v in self.serializer.load_stream(f):
yield v
# close the file explicit once we consume all the items
# to avoid ResourceWarning in Python3
f.close()
chunks.append(load(open(path, 'rb')))
current_chunk = []
MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20
DiskBytesSpilled += os.path.getsize(path)
os.unlink(path) # data will be deleted after close
elif not chunks:
batch = min(int(batch * 1.5), 10000)
current_chunk.sort(key=key, reverse=reverse)
if not chunks:
return current_chunk
if current_chunk:
chunks.append(iter(current_chunk))
return heapq.merge(chunks, key=key, reverse=reverse)
|
python
|
def sorted(self, iterator, key=None, reverse=False):
"""
Sort the elements in iterator, do external sort when the memory
goes above the limit.
"""
global MemoryBytesSpilled, DiskBytesSpilled
batch, limit = 100, self._next_limit()
chunks, current_chunk = [], []
iterator = iter(iterator)
while True:
# pick elements in batch
chunk = list(itertools.islice(iterator, batch))
current_chunk.extend(chunk)
if len(chunk) < batch:
break
used_memory = get_used_memory()
if used_memory > limit:
# sort them inplace will save memory
current_chunk.sort(key=key, reverse=reverse)
path = self._get_path(len(chunks))
with open(path, 'wb') as f:
self.serializer.dump_stream(current_chunk, f)
def load(f):
for v in self.serializer.load_stream(f):
yield v
# close the file explicit once we consume all the items
# to avoid ResourceWarning in Python3
f.close()
chunks.append(load(open(path, 'rb')))
current_chunk = []
MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20
DiskBytesSpilled += os.path.getsize(path)
os.unlink(path) # data will be deleted after close
elif not chunks:
batch = min(int(batch * 1.5), 10000)
current_chunk.sort(key=key, reverse=reverse)
if not chunks:
return current_chunk
if current_chunk:
chunks.append(iter(current_chunk))
return heapq.merge(chunks, key=key, reverse=reverse)
|
[
"def",
"sorted",
"(",
"self",
",",
"iterator",
",",
"key",
"=",
"None",
",",
"reverse",
"=",
"False",
")",
":",
"global",
"MemoryBytesSpilled",
",",
"DiskBytesSpilled",
"batch",
",",
"limit",
"=",
"100",
",",
"self",
".",
"_next_limit",
"(",
")",
"chunks",
",",
"current_chunk",
"=",
"[",
"]",
",",
"[",
"]",
"iterator",
"=",
"iter",
"(",
"iterator",
")",
"while",
"True",
":",
"# pick elements in batch",
"chunk",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"iterator",
",",
"batch",
")",
")",
"current_chunk",
".",
"extend",
"(",
"chunk",
")",
"if",
"len",
"(",
"chunk",
")",
"<",
"batch",
":",
"break",
"used_memory",
"=",
"get_used_memory",
"(",
")",
"if",
"used_memory",
">",
"limit",
":",
"# sort them inplace will save memory",
"current_chunk",
".",
"sort",
"(",
"key",
"=",
"key",
",",
"reverse",
"=",
"reverse",
")",
"path",
"=",
"self",
".",
"_get_path",
"(",
"len",
"(",
"chunks",
")",
")",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"f",
":",
"self",
".",
"serializer",
".",
"dump_stream",
"(",
"current_chunk",
",",
"f",
")",
"def",
"load",
"(",
"f",
")",
":",
"for",
"v",
"in",
"self",
".",
"serializer",
".",
"load_stream",
"(",
"f",
")",
":",
"yield",
"v",
"# close the file explicit once we consume all the items",
"# to avoid ResourceWarning in Python3",
"f",
".",
"close",
"(",
")",
"chunks",
".",
"append",
"(",
"load",
"(",
"open",
"(",
"path",
",",
"'rb'",
")",
")",
")",
"current_chunk",
"=",
"[",
"]",
"MemoryBytesSpilled",
"+=",
"max",
"(",
"used_memory",
"-",
"get_used_memory",
"(",
")",
",",
"0",
")",
"<<",
"20",
"DiskBytesSpilled",
"+=",
"os",
".",
"path",
".",
"getsize",
"(",
"path",
")",
"os",
".",
"unlink",
"(",
"path",
")",
"# data will be deleted after close",
"elif",
"not",
"chunks",
":",
"batch",
"=",
"min",
"(",
"int",
"(",
"batch",
"*",
"1.5",
")",
",",
"10000",
")",
"current_chunk",
".",
"sort",
"(",
"key",
"=",
"key",
",",
"reverse",
"=",
"reverse",
")",
"if",
"not",
"chunks",
":",
"return",
"current_chunk",
"if",
"current_chunk",
":",
"chunks",
".",
"append",
"(",
"iter",
"(",
"current_chunk",
")",
")",
"return",
"heapq",
".",
"merge",
"(",
"chunks",
",",
"key",
"=",
"key",
",",
"reverse",
"=",
"reverse",
")"
] |
Sort the elements in iterator, do external sort when the memory
goes above the limit.
|
[
"Sort",
"the",
"elements",
"in",
"iterator",
"do",
"external",
"sort",
"when",
"the",
"memory",
"goes",
"above",
"the",
"limit",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L455-L501
|
18,987
|
apache/spark
|
python/pyspark/shuffle.py
|
ExternalList._spill
|
def _spill(self):
""" dump the values into disk """
global MemoryBytesSpilled, DiskBytesSpilled
if self._file is None:
self._open_file()
used_memory = get_used_memory()
pos = self._file.tell()
self._ser.dump_stream(self.values, self._file)
self.values = []
gc.collect()
DiskBytesSpilled += self._file.tell() - pos
MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20
|
python
|
def _spill(self):
""" dump the values into disk """
global MemoryBytesSpilled, DiskBytesSpilled
if self._file is None:
self._open_file()
used_memory = get_used_memory()
pos = self._file.tell()
self._ser.dump_stream(self.values, self._file)
self.values = []
gc.collect()
DiskBytesSpilled += self._file.tell() - pos
MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20
|
[
"def",
"_spill",
"(",
"self",
")",
":",
"global",
"MemoryBytesSpilled",
",",
"DiskBytesSpilled",
"if",
"self",
".",
"_file",
"is",
"None",
":",
"self",
".",
"_open_file",
"(",
")",
"used_memory",
"=",
"get_used_memory",
"(",
")",
"pos",
"=",
"self",
".",
"_file",
".",
"tell",
"(",
")",
"self",
".",
"_ser",
".",
"dump_stream",
"(",
"self",
".",
"values",
",",
"self",
".",
"_file",
")",
"self",
".",
"values",
"=",
"[",
"]",
"gc",
".",
"collect",
"(",
")",
"DiskBytesSpilled",
"+=",
"self",
".",
"_file",
".",
"tell",
"(",
")",
"-",
"pos",
"MemoryBytesSpilled",
"+=",
"max",
"(",
"used_memory",
"-",
"get_used_memory",
"(",
")",
",",
"0",
")",
"<<",
"20"
] |
dump the values into disk
|
[
"dump",
"the",
"values",
"into",
"disk"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L590-L602
|
18,988
|
apache/spark
|
python/pyspark/shuffle.py
|
ExternalGroupBy._merge_sorted_items
|
def _merge_sorted_items(self, index):
""" load a partition from disk, then sort and group by key """
def load_partition(j):
path = self._get_spill_dir(j)
p = os.path.join(path, str(index))
with open(p, 'rb', 65536) as f:
for v in self.serializer.load_stream(f):
yield v
disk_items = [load_partition(j) for j in range(self.spills)]
if self._sorted:
# all the partitions are already sorted
sorted_items = heapq.merge(disk_items, key=operator.itemgetter(0))
else:
# Flatten the combined values, so it will not consume huge
# memory during merging sort.
ser = self.flattened_serializer()
sorter = ExternalSorter(self.memory_limit, ser)
sorted_items = sorter.sorted(itertools.chain(*disk_items),
key=operator.itemgetter(0))
return ((k, vs) for k, vs in GroupByKey(sorted_items))
|
python
|
def _merge_sorted_items(self, index):
""" load a partition from disk, then sort and group by key """
def load_partition(j):
path = self._get_spill_dir(j)
p = os.path.join(path, str(index))
with open(p, 'rb', 65536) as f:
for v in self.serializer.load_stream(f):
yield v
disk_items = [load_partition(j) for j in range(self.spills)]
if self._sorted:
# all the partitions are already sorted
sorted_items = heapq.merge(disk_items, key=operator.itemgetter(0))
else:
# Flatten the combined values, so it will not consume huge
# memory during merging sort.
ser = self.flattened_serializer()
sorter = ExternalSorter(self.memory_limit, ser)
sorted_items = sorter.sorted(itertools.chain(*disk_items),
key=operator.itemgetter(0))
return ((k, vs) for k, vs in GroupByKey(sorted_items))
|
[
"def",
"_merge_sorted_items",
"(",
"self",
",",
"index",
")",
":",
"def",
"load_partition",
"(",
"j",
")",
":",
"path",
"=",
"self",
".",
"_get_spill_dir",
"(",
"j",
")",
"p",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"str",
"(",
"index",
")",
")",
"with",
"open",
"(",
"p",
",",
"'rb'",
",",
"65536",
")",
"as",
"f",
":",
"for",
"v",
"in",
"self",
".",
"serializer",
".",
"load_stream",
"(",
"f",
")",
":",
"yield",
"v",
"disk_items",
"=",
"[",
"load_partition",
"(",
"j",
")",
"for",
"j",
"in",
"range",
"(",
"self",
".",
"spills",
")",
"]",
"if",
"self",
".",
"_sorted",
":",
"# all the partitions are already sorted",
"sorted_items",
"=",
"heapq",
".",
"merge",
"(",
"disk_items",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"0",
")",
")",
"else",
":",
"# Flatten the combined values, so it will not consume huge",
"# memory during merging sort.",
"ser",
"=",
"self",
".",
"flattened_serializer",
"(",
")",
"sorter",
"=",
"ExternalSorter",
"(",
"self",
".",
"memory_limit",
",",
"ser",
")",
"sorted_items",
"=",
"sorter",
".",
"sorted",
"(",
"itertools",
".",
"chain",
"(",
"*",
"disk_items",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"0",
")",
")",
"return",
"(",
"(",
"k",
",",
"vs",
")",
"for",
"k",
",",
"vs",
"in",
"GroupByKey",
"(",
"sorted_items",
")",
")"
] |
load a partition from disk, then sort and group by key
|
[
"load",
"a",
"partition",
"from",
"disk",
"then",
"sort",
"and",
"group",
"by",
"key"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L786-L808
|
18,989
|
apache/spark
|
python/pyspark/rdd.py
|
portable_hash
|
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
|
python
|
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
|
[
"def",
"portable_hash",
"(",
"x",
")",
":",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
"2",
",",
"3",
")",
"and",
"'PYTHONHASHSEED'",
"not",
"in",
"os",
".",
"environ",
":",
"raise",
"Exception",
"(",
"\"Randomness of hash of string should be disabled via PYTHONHASHSEED\"",
")",
"if",
"x",
"is",
"None",
":",
"return",
"0",
"if",
"isinstance",
"(",
"x",
",",
"tuple",
")",
":",
"h",
"=",
"0x345678",
"for",
"i",
"in",
"x",
":",
"h",
"^=",
"portable_hash",
"(",
"i",
")",
"h",
"*=",
"1000003",
"h",
"&=",
"sys",
".",
"maxsize",
"h",
"^=",
"len",
"(",
"x",
")",
"if",
"h",
"==",
"-",
"1",
":",
"h",
"=",
"-",
"2",
"return",
"int",
"(",
"h",
")",
"return",
"hash",
"(",
"x",
")"
] |
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
|
[
"This",
"function",
"returns",
"consistent",
"hash",
"code",
"for",
"builtin",
"types",
"especially",
"for",
"None",
"and",
"tuple",
"with",
"None",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L78-L106
|
18,990
|
apache/spark
|
python/pyspark/rdd.py
|
ignore_unicode_prefix
|
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
|
python
|
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
|
[
"def",
"ignore_unicode_prefix",
"(",
"f",
")",
":",
"if",
"sys",
".",
"version",
">=",
"'3'",
":",
"# the representation of unicode string in Python 3 does not have prefix 'u',",
"# so remove the prefix 'u' for doc tests",
"literal_re",
"=",
"re",
".",
"compile",
"(",
"r\"(\\W|^)[uU](['])\"",
",",
"re",
".",
"UNICODE",
")",
"f",
".",
"__doc__",
"=",
"literal_re",
".",
"sub",
"(",
"r'\\1\\2'",
",",
"f",
".",
"__doc__",
")",
"return",
"f"
] |
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
|
[
"Ignore",
"the",
"u",
"prefix",
"of",
"string",
"in",
"doc",
"tests",
"to",
"make",
"it",
"works",
"in",
"both",
"python",
"2",
"and",
"3"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L150-L160
|
18,991
|
apache/spark
|
python/pyspark/rdd.py
|
RDD.unpersist
|
def unpersist(self, blocking=False):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
self.is_cached = False
self._jrdd.unpersist(blocking)
return self
|
python
|
def unpersist(self, blocking=False):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
self.is_cached = False
self._jrdd.unpersist(blocking)
return self
|
[
"def",
"unpersist",
"(",
"self",
",",
"blocking",
"=",
"False",
")",
":",
"self",
".",
"is_cached",
"=",
"False",
"self",
".",
"_jrdd",
".",
"unpersist",
"(",
"blocking",
")",
"return",
"self"
] |
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
|
[
"Mark",
"the",
"RDD",
"as",
"non",
"-",
"persistent",
"and",
"remove",
"all",
"blocks",
"for",
"it",
"from",
"memory",
"and",
"disk",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L247-L258
|
18,992
|
apache/spark
|
python/pyspark/rdd.py
|
RDD.getCheckpointFile
|
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
|
python
|
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
|
[
"def",
"getCheckpointFile",
"(",
"self",
")",
":",
"checkpointFile",
"=",
"self",
".",
"_jrdd",
".",
"rdd",
"(",
")",
".",
"getCheckpointFile",
"(",
")",
"if",
"checkpointFile",
".",
"isDefined",
"(",
")",
":",
"return",
"checkpointFile",
".",
"get",
"(",
")"
] |
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
|
[
"Gets",
"the",
"name",
"of",
"the",
"file",
"to",
"which",
"this",
"RDD",
"was",
"checkpointed"
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L307-L315
|
18,993
|
apache/spark
|
python/pyspark/rdd.py
|
RDD.map
|
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
|
python
|
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
|
[
"def",
"map",
"(",
"self",
",",
"f",
",",
"preservesPartitioning",
"=",
"False",
")",
":",
"def",
"func",
"(",
"_",
",",
"iterator",
")",
":",
"return",
"map",
"(",
"fail_on_stopiteration",
"(",
"f",
")",
",",
"iterator",
")",
"return",
"self",
".",
"mapPartitionsWithIndex",
"(",
"func",
",",
"preservesPartitioning",
")"
] |
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
|
[
"Return",
"a",
"new",
"RDD",
"by",
"applying",
"a",
"function",
"to",
"each",
"element",
"of",
"this",
"RDD",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L317-L327
|
18,994
|
apache/spark
|
python/pyspark/rdd.py
|
RDD.flatMap
|
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
|
python
|
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
|
[
"def",
"flatMap",
"(",
"self",
",",
"f",
",",
"preservesPartitioning",
"=",
"False",
")",
":",
"def",
"func",
"(",
"s",
",",
"iterator",
")",
":",
"return",
"chain",
".",
"from_iterable",
"(",
"map",
"(",
"fail_on_stopiteration",
"(",
"f",
")",
",",
"iterator",
")",
")",
"return",
"self",
".",
"mapPartitionsWithIndex",
"(",
"func",
",",
"preservesPartitioning",
")"
] |
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
|
[
"Return",
"a",
"new",
"RDD",
"by",
"first",
"applying",
"a",
"function",
"to",
"all",
"elements",
"of",
"this",
"RDD",
"and",
"then",
"flattening",
"the",
"results",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L329-L342
|
18,995
|
apache/spark
|
python/pyspark/rdd.py
|
RDD.mapPartitions
|
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
|
python
|
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
|
[
"def",
"mapPartitions",
"(",
"self",
",",
"f",
",",
"preservesPartitioning",
"=",
"False",
")",
":",
"def",
"func",
"(",
"s",
",",
"iterator",
")",
":",
"return",
"f",
"(",
"iterator",
")",
"return",
"self",
".",
"mapPartitionsWithIndex",
"(",
"func",
",",
"preservesPartitioning",
")"
] |
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
|
[
"Return",
"a",
"new",
"RDD",
"by",
"applying",
"a",
"function",
"to",
"each",
"partition",
"of",
"this",
"RDD",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L344-L355
|
18,996
|
apache/spark
|
python/pyspark/rdd.py
|
RDD.distinct
|
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
|
python
|
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
|
[
"def",
"distinct",
"(",
"self",
",",
"numPartitions",
"=",
"None",
")",
":",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"(",
"x",
",",
"None",
")",
")",
".",
"reduceByKey",
"(",
"lambda",
"x",
",",
"_",
":",
"x",
",",
"numPartitions",
")",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")"
] |
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
|
[
"Return",
"a",
"new",
"RDD",
"containing",
"the",
"distinct",
"elements",
"in",
"this",
"RDD",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L407-L416
|
18,997
|
apache/spark
|
python/pyspark/rdd.py
|
RDD.sample
|
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
|
python
|
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
|
[
"def",
"sample",
"(",
"self",
",",
"withReplacement",
",",
"fraction",
",",
"seed",
"=",
"None",
")",
":",
"assert",
"fraction",
">=",
"0.0",
",",
"\"Negative fraction value: %s\"",
"%",
"fraction",
"return",
"self",
".",
"mapPartitionsWithIndex",
"(",
"RDDSampler",
"(",
"withReplacement",
",",
"fraction",
",",
"seed",
")",
".",
"func",
",",
"True",
")"
] |
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
|
[
"Return",
"a",
"sampled",
"subset",
"of",
"this",
"RDD",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L418-L436
|
18,998
|
apache/spark
|
python/pyspark/rdd.py
|
RDD.randomSplit
|
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
|
python
|
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
|
[
"def",
"randomSplit",
"(",
"self",
",",
"weights",
",",
"seed",
"=",
"None",
")",
":",
"s",
"=",
"float",
"(",
"sum",
"(",
"weights",
")",
")",
"cweights",
"=",
"[",
"0.0",
"]",
"for",
"w",
"in",
"weights",
":",
"cweights",
".",
"append",
"(",
"cweights",
"[",
"-",
"1",
"]",
"+",
"w",
"/",
"s",
")",
"if",
"seed",
"is",
"None",
":",
"seed",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"2",
"**",
"32",
"-",
"1",
")",
"return",
"[",
"self",
".",
"mapPartitionsWithIndex",
"(",
"RDDRangeSampler",
"(",
"lb",
",",
"ub",
",",
"seed",
")",
".",
"func",
",",
"True",
")",
"for",
"lb",
",",
"ub",
"in",
"zip",
"(",
"cweights",
",",
"cweights",
"[",
"1",
":",
"]",
")",
"]"
] |
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
|
[
"Randomly",
"splits",
"this",
"RDD",
"with",
"the",
"provided",
"weights",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L438-L462
|
18,999
|
apache/spark
|
python/pyspark/rdd.py
|
RDD.takeSample
|
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
|
python
|
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
|
[
"def",
"takeSample",
"(",
"self",
",",
"withReplacement",
",",
"num",
",",
"seed",
"=",
"None",
")",
":",
"numStDev",
"=",
"10.0",
"if",
"num",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Sample size cannot be negative.\"",
")",
"elif",
"num",
"==",
"0",
":",
"return",
"[",
"]",
"initialCount",
"=",
"self",
".",
"count",
"(",
")",
"if",
"initialCount",
"==",
"0",
":",
"return",
"[",
"]",
"rand",
"=",
"random",
".",
"Random",
"(",
"seed",
")",
"if",
"(",
"not",
"withReplacement",
")",
"and",
"num",
">=",
"initialCount",
":",
"# shuffle current RDD and return",
"samples",
"=",
"self",
".",
"collect",
"(",
")",
"rand",
".",
"shuffle",
"(",
"samples",
")",
"return",
"samples",
"maxSampleSize",
"=",
"sys",
".",
"maxsize",
"-",
"int",
"(",
"numStDev",
"*",
"sqrt",
"(",
"sys",
".",
"maxsize",
")",
")",
"if",
"num",
">",
"maxSampleSize",
":",
"raise",
"ValueError",
"(",
"\"Sample size cannot be greater than %d.\"",
"%",
"maxSampleSize",
")",
"fraction",
"=",
"RDD",
".",
"_computeFractionForSampleSize",
"(",
"num",
",",
"initialCount",
",",
"withReplacement",
")",
"samples",
"=",
"self",
".",
"sample",
"(",
"withReplacement",
",",
"fraction",
",",
"seed",
")",
".",
"collect",
"(",
")",
"# If the first sample didn't turn out large enough, keep trying to take samples;",
"# this shouldn't happen often because we use a big multiplier for their initial size.",
"# See: scala/spark/RDD.scala",
"while",
"len",
"(",
"samples",
")",
"<",
"num",
":",
"# TODO: add log warning for when more than one iteration was run",
"seed",
"=",
"rand",
".",
"randint",
"(",
"0",
",",
"sys",
".",
"maxsize",
")",
"samples",
"=",
"self",
".",
"sample",
"(",
"withReplacement",
",",
"fraction",
",",
"seed",
")",
".",
"collect",
"(",
")",
"rand",
".",
"shuffle",
"(",
"samples",
")",
"return",
"samples",
"[",
"0",
":",
"num",
"]"
] |
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
|
[
"Return",
"a",
"fixed",
"-",
"size",
"sampled",
"subset",
"of",
"this",
"RDD",
"."
] |
618d6bff71073c8c93501ab7392c3cc579730f0b
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L465-L518
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.