id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
30,200
tensorflow/hub
tensorflow_hub/saved_model_lib.py
SavedModelHandler.add_graph_copy
def add_graph_copy(self, graph, tags=None): """Adds a copy of Graph with the specified set of tags.""" with graph.as_default(): # Remove default attrs so that Modules created by a tensorflow version # with ops that have new attrs that are left to their default values can # still be loaded by older versions unware of those attributes. meta_graph = tf_v1.train.export_meta_graph(strip_default_attrs=True) _export_tags(meta_graph, tags) _export_signatures(meta_graph) _export_module_attachments(meta_graph) self._proto.meta_graphs.extend([meta_graph])
python
def add_graph_copy(self, graph, tags=None): """Adds a copy of Graph with the specified set of tags.""" with graph.as_default(): # Remove default attrs so that Modules created by a tensorflow version # with ops that have new attrs that are left to their default values can # still be loaded by older versions unware of those attributes. meta_graph = tf_v1.train.export_meta_graph(strip_default_attrs=True) _export_tags(meta_graph, tags) _export_signatures(meta_graph) _export_module_attachments(meta_graph) self._proto.meta_graphs.extend([meta_graph])
[ "def", "add_graph_copy", "(", "self", ",", "graph", ",", "tags", "=", "None", ")", ":", "with", "graph", ".", "as_default", "(", ")", ":", "# Remove default attrs so that Modules created by a tensorflow version", "# with ops that have new attrs that are left to their default ...
Adds a copy of Graph with the specified set of tags.
[ "Adds", "a", "copy", "of", "Graph", "with", "the", "specified", "set", "of", "tags", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/saved_model_lib.py#L357-L367
30,201
tensorflow/hub
tensorflow_hub/saved_model_lib.py
SavedModelHandler.get_meta_graph_copy
def get_meta_graph_copy(self, tags=None): """Returns a copy of a MetaGraph with the identical set of tags.""" meta_graph = self.get_meta_graph(tags) copy = tf_v1.MetaGraphDef() copy.CopyFrom(meta_graph) return copy
python
def get_meta_graph_copy(self, tags=None): """Returns a copy of a MetaGraph with the identical set of tags.""" meta_graph = self.get_meta_graph(tags) copy = tf_v1.MetaGraphDef() copy.CopyFrom(meta_graph) return copy
[ "def", "get_meta_graph_copy", "(", "self", ",", "tags", "=", "None", ")", ":", "meta_graph", "=", "self", ".", "get_meta_graph", "(", "tags", ")", "copy", "=", "tf_v1", ".", "MetaGraphDef", "(", ")", "copy", ".", "CopyFrom", "(", "meta_graph", ")", "retu...
Returns a copy of a MetaGraph with the identical set of tags.
[ "Returns", "a", "copy", "of", "a", "MetaGraph", "with", "the", "identical", "set", "of", "tags", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/saved_model_lib.py#L372-L377
30,202
tensorflow/hub
tensorflow_hub/saved_model_lib.py
SavedModelHandler.get_tags
def get_tags(self): """Returns a list of set of tags.""" return sorted([frozenset(meta_graph.meta_info_def.tags) for meta_graph in self.meta_graphs])
python
def get_tags(self): """Returns a list of set of tags.""" return sorted([frozenset(meta_graph.meta_info_def.tags) for meta_graph in self.meta_graphs])
[ "def", "get_tags", "(", "self", ")", ":", "return", "sorted", "(", "[", "frozenset", "(", "meta_graph", ".", "meta_info_def", ".", "tags", ")", "for", "meta_graph", "in", "self", ".", "meta_graphs", "]", ")" ]
Returns a list of set of tags.
[ "Returns", "a", "list", "of", "set", "of", "tags", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/saved_model_lib.py#L383-L386
30,203
tensorflow/hub
tensorflow_hub/saved_model_lib.py
SavedModelHandler.export
def export(self, path, variables_saver=None): """Exports to SavedModel directory. Args: path: path where to export the SavedModel to. variables_saver: lambda that receives a directory path where to export checkpoints of variables. """ # Operate on a copy of self._proto since it needs to be modified. proto = saved_model_pb2.SavedModel() proto.CopyFrom(self._proto) assets_map = _make_assets_key_collection(proto, path) self._save_all_assets(path, assets_map) self._save_variables(path, variables_saver) self._save_proto(path, proto)
python
def export(self, path, variables_saver=None): """Exports to SavedModel directory. Args: path: path where to export the SavedModel to. variables_saver: lambda that receives a directory path where to export checkpoints of variables. """ # Operate on a copy of self._proto since it needs to be modified. proto = saved_model_pb2.SavedModel() proto.CopyFrom(self._proto) assets_map = _make_assets_key_collection(proto, path) self._save_all_assets(path, assets_map) self._save_variables(path, variables_saver) self._save_proto(path, proto)
[ "def", "export", "(", "self", ",", "path", ",", "variables_saver", "=", "None", ")", ":", "# Operate on a copy of self._proto since it needs to be modified.", "proto", "=", "saved_model_pb2", ".", "SavedModel", "(", ")", "proto", ".", "CopyFrom", "(", "self", ".", ...
Exports to SavedModel directory. Args: path: path where to export the SavedModel to. variables_saver: lambda that receives a directory path where to export checkpoints of variables.
[ "Exports", "to", "SavedModel", "directory", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/saved_model_lib.py#L391-L406
30,204
tensorflow/hub
tensorflow_hub/saved_model_lib.py
SavedModelHandler.get_meta_graph
def get_meta_graph(self, tags=None): """Returns the matching MetaGraphDef or raises KeyError.""" matches = [meta_graph for meta_graph in self.meta_graphs if set(meta_graph.meta_info_def.tags) == set(tags or [])] if not matches: raise KeyError("SavedModelHandler has no graph with tags: %r" % tags) if len(matches) != 1: raise KeyError( "SavedModelHandler has multiple graphs with tags %r" % tags) return matches[0]
python
def get_meta_graph(self, tags=None): """Returns the matching MetaGraphDef or raises KeyError.""" matches = [meta_graph for meta_graph in self.meta_graphs if set(meta_graph.meta_info_def.tags) == set(tags or [])] if not matches: raise KeyError("SavedModelHandler has no graph with tags: %r" % tags) if len(matches) != 1: raise KeyError( "SavedModelHandler has multiple graphs with tags %r" % tags) return matches[0]
[ "def", "get_meta_graph", "(", "self", ",", "tags", "=", "None", ")", ":", "matches", "=", "[", "meta_graph", "for", "meta_graph", "in", "self", ".", "meta_graphs", "if", "set", "(", "meta_graph", ".", "meta_info_def", ".", "tags", ")", "==", "set", "(", ...
Returns the matching MetaGraphDef or raises KeyError.
[ "Returns", "the", "matching", "MetaGraphDef", "or", "raises", "KeyError", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/saved_model_lib.py#L408-L418
30,205
tensorflow/hub
tensorflow_hub/module.py
_convert_dict_inputs
def _convert_dict_inputs(inputs, tensor_info_map): """Converts from inputs into dict of input tensors. This handles: - putting inputs into a dict, per _prepare_dict_inputs(), - converting all input values into tensors compatible with the expected input tensor (dtype, shape). - check sparse/non-sparse tensor types. Args: inputs: inputs fed to Module.__call__(). tensor_info_map: A map from string to `tensor_info.ParsedTensorInfo` describing the signature inputs. Returns: A dict of tensors to feed to the signature instantiation. Raises: TypeError: If it fails to convert the input values into a dict of tensors to feed to the signature instantiation. """ dict_inputs = _prepare_dict_inputs(inputs, tensor_info_map) return tensor_info.convert_dict_to_compatible_tensor(dict_inputs, tensor_info_map)
python
def _convert_dict_inputs(inputs, tensor_info_map): """Converts from inputs into dict of input tensors. This handles: - putting inputs into a dict, per _prepare_dict_inputs(), - converting all input values into tensors compatible with the expected input tensor (dtype, shape). - check sparse/non-sparse tensor types. Args: inputs: inputs fed to Module.__call__(). tensor_info_map: A map from string to `tensor_info.ParsedTensorInfo` describing the signature inputs. Returns: A dict of tensors to feed to the signature instantiation. Raises: TypeError: If it fails to convert the input values into a dict of tensors to feed to the signature instantiation. """ dict_inputs = _prepare_dict_inputs(inputs, tensor_info_map) return tensor_info.convert_dict_to_compatible_tensor(dict_inputs, tensor_info_map)
[ "def", "_convert_dict_inputs", "(", "inputs", ",", "tensor_info_map", ")", ":", "dict_inputs", "=", "_prepare_dict_inputs", "(", "inputs", ",", "tensor_info_map", ")", "return", "tensor_info", ".", "convert_dict_to_compatible_tensor", "(", "dict_inputs", ",", "tensor_in...
Converts from inputs into dict of input tensors. This handles: - putting inputs into a dict, per _prepare_dict_inputs(), - converting all input values into tensors compatible with the expected input tensor (dtype, shape). - check sparse/non-sparse tensor types. Args: inputs: inputs fed to Module.__call__(). tensor_info_map: A map from string to `tensor_info.ParsedTensorInfo` describing the signature inputs. Returns: A dict of tensors to feed to the signature instantiation. Raises: TypeError: If it fails to convert the input values into a dict of tensors to feed to the signature instantiation.
[ "Converts", "from", "inputs", "into", "dict", "of", "input", "tensors", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/module.py#L424-L447
30,206
tensorflow/hub
tensorflow_hub/module.py
eval_function_for_module
def eval_function_for_module(spec, tags=None): """Context manager that yields a function to directly evaluate a Module. This creates a separate graph, in which all of the signatures of the module are instantiated. Then, it creates a session and initializes the module variables. Finally, it returns a function which can be used to evaluate the module signatures. The function returned by eval_function_for_module has the same syntax as Module.__call__ , except that inputs and outputs are not tensors but actual values as used with Session.run(). ```python with hub.eval_function_for_module("/tmp/text-embedding") as f: # The module can be directly evaluated using f without constructing a graph. embeddings = f(["Hello world!",], signature="mysignature") ``` Args: spec: A ModuleSpec defining the Module to instantiate or a path where to load a ModuleSpec from via `load_module_spec`. tags: A set of strings specifying the graph variant to use. Yields: A function whose keyword arguments are fed into the tfhub module and which returns a dictionary with the value of the output tensors. Raises: RuntimeError: explaning the reason why it failed to instantiate the Module. ValueError: if the requested graph variant does not exists. """ # We create a separate graph and add all the signatures of the module to it. original_graph = tf_v1.get_default_graph() with tf.Graph().as_default(): module = Module(spec, tags=tags) input_tensors_per_signature = {} output_tensors_per_signature = {} for signature in module.get_signature_names(): # We scope with the signature name as different signatures will likely # contain tensors with the same name (e.g. the input and output tensors). with tf_v1.variable_scope(signature): input_tensors = {} for name, tensorinfo in module.get_input_info_dict(signature).items(): # We need to be care with the shape as it may be fully-known, # partially-known or even unknown. shape = tensorinfo.get_shape() effective_shape = None if shape.dims is None else shape.as_list() if tensorinfo.is_sparse: input_tensors[name] = tf_v1.sparse_placeholder( tensorinfo.dtype, shape=effective_shape, name=name) else: input_tensors[name] = tf_v1.placeholder( tensorinfo.dtype, shape=effective_shape, name=name) input_tensors_per_signature[signature] = input_tensors output_tensors_per_signature[signature] = module( input_tensors_per_signature[signature], signature=signature, as_dict=True) # Evaluating the tfhub module requires an active tensorflow session. with tf_v1.train.SingularMonitoredSession() as sess: def func( inputs=None, _sentinel=None, # pylint: disable=invalid-name signature=None, as_dict=None): """Function that directly evaluates a signature in the module.""" signature = signature or "default" input_tensors = input_tensors_per_signature[signature] dict_inputs = _prepare_dict_inputs(inputs, input_tensors) # The input arguments are directly fed into the session. feed_dict = { input_tensors[key]: value for key, value in dict_inputs.items() } output = output_tensors_per_signature[signature] output = _prepare_outputs(output, as_dict) return sess.run(output, feed_dict=feed_dict) with original_graph.as_default(): # Yield the function since that will keep the session alive until the # user exits the context. yield func
python
def eval_function_for_module(spec, tags=None): """Context manager that yields a function to directly evaluate a Module. This creates a separate graph, in which all of the signatures of the module are instantiated. Then, it creates a session and initializes the module variables. Finally, it returns a function which can be used to evaluate the module signatures. The function returned by eval_function_for_module has the same syntax as Module.__call__ , except that inputs and outputs are not tensors but actual values as used with Session.run(). ```python with hub.eval_function_for_module("/tmp/text-embedding") as f: # The module can be directly evaluated using f without constructing a graph. embeddings = f(["Hello world!",], signature="mysignature") ``` Args: spec: A ModuleSpec defining the Module to instantiate or a path where to load a ModuleSpec from via `load_module_spec`. tags: A set of strings specifying the graph variant to use. Yields: A function whose keyword arguments are fed into the tfhub module and which returns a dictionary with the value of the output tensors. Raises: RuntimeError: explaning the reason why it failed to instantiate the Module. ValueError: if the requested graph variant does not exists. """ # We create a separate graph and add all the signatures of the module to it. original_graph = tf_v1.get_default_graph() with tf.Graph().as_default(): module = Module(spec, tags=tags) input_tensors_per_signature = {} output_tensors_per_signature = {} for signature in module.get_signature_names(): # We scope with the signature name as different signatures will likely # contain tensors with the same name (e.g. the input and output tensors). with tf_v1.variable_scope(signature): input_tensors = {} for name, tensorinfo in module.get_input_info_dict(signature).items(): # We need to be care with the shape as it may be fully-known, # partially-known or even unknown. shape = tensorinfo.get_shape() effective_shape = None if shape.dims is None else shape.as_list() if tensorinfo.is_sparse: input_tensors[name] = tf_v1.sparse_placeholder( tensorinfo.dtype, shape=effective_shape, name=name) else: input_tensors[name] = tf_v1.placeholder( tensorinfo.dtype, shape=effective_shape, name=name) input_tensors_per_signature[signature] = input_tensors output_tensors_per_signature[signature] = module( input_tensors_per_signature[signature], signature=signature, as_dict=True) # Evaluating the tfhub module requires an active tensorflow session. with tf_v1.train.SingularMonitoredSession() as sess: def func( inputs=None, _sentinel=None, # pylint: disable=invalid-name signature=None, as_dict=None): """Function that directly evaluates a signature in the module.""" signature = signature or "default" input_tensors = input_tensors_per_signature[signature] dict_inputs = _prepare_dict_inputs(inputs, input_tensors) # The input arguments are directly fed into the session. feed_dict = { input_tensors[key]: value for key, value in dict_inputs.items() } output = output_tensors_per_signature[signature] output = _prepare_outputs(output, as_dict) return sess.run(output, feed_dict=feed_dict) with original_graph.as_default(): # Yield the function since that will keep the session alive until the # user exits the context. yield func
[ "def", "eval_function_for_module", "(", "spec", ",", "tags", "=", "None", ")", ":", "# We create a separate graph and add all the signatures of the module to it.", "original_graph", "=", "tf_v1", ".", "get_default_graph", "(", ")", "with", "tf", ".", "Graph", "(", ")", ...
Context manager that yields a function to directly evaluate a Module. This creates a separate graph, in which all of the signatures of the module are instantiated. Then, it creates a session and initializes the module variables. Finally, it returns a function which can be used to evaluate the module signatures. The function returned by eval_function_for_module has the same syntax as Module.__call__ , except that inputs and outputs are not tensors but actual values as used with Session.run(). ```python with hub.eval_function_for_module("/tmp/text-embedding") as f: # The module can be directly evaluated using f without constructing a graph. embeddings = f(["Hello world!",], signature="mysignature") ``` Args: spec: A ModuleSpec defining the Module to instantiate or a path where to load a ModuleSpec from via `load_module_spec`. tags: A set of strings specifying the graph variant to use. Yields: A function whose keyword arguments are fed into the tfhub module and which returns a dictionary with the value of the output tensors. Raises: RuntimeError: explaning the reason why it failed to instantiate the Module. ValueError: if the requested graph variant does not exists.
[ "Context", "manager", "that", "yields", "a", "function", "to", "directly", "evaluate", "a", "Module", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/module.py#L474-L559
30,207
tensorflow/hub
tensorflow_hub/module.py
Module.get_input_info_dict
def get_input_info_dict(self, signature=None): """Describes the inputs required by a signature. Args: signature: A string with the signature to get inputs information for. If None, the default signature is used if defined. Returns: The result of ModuleSpec.get_input_info_dict() for the given signature, and the graph variant selected by `tags` when this Module was initialized. Raises: KeyError: if there is no such signature. """ return self._spec.get_input_info_dict(signature=signature, tags=self._tags)
python
def get_input_info_dict(self, signature=None): """Describes the inputs required by a signature. Args: signature: A string with the signature to get inputs information for. If None, the default signature is used if defined. Returns: The result of ModuleSpec.get_input_info_dict() for the given signature, and the graph variant selected by `tags` when this Module was initialized. Raises: KeyError: if there is no such signature. """ return self._spec.get_input_info_dict(signature=signature, tags=self._tags)
[ "def", "get_input_info_dict", "(", "self", ",", "signature", "=", "None", ")", ":", "return", "self", ".", "_spec", ".", "get_input_info_dict", "(", "signature", "=", "signature", ",", "tags", "=", "self", ".", "_tags", ")" ]
Describes the inputs required by a signature. Args: signature: A string with the signature to get inputs information for. If None, the default signature is used if defined. Returns: The result of ModuleSpec.get_input_info_dict() for the given signature, and the graph variant selected by `tags` when this Module was initialized. Raises: KeyError: if there is no such signature.
[ "Describes", "the", "inputs", "required", "by", "a", "signature", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/module.py#L257-L271
30,208
tensorflow/hub
tensorflow_hub/module.py
Module.get_output_info_dict
def get_output_info_dict(self, signature=None): """Describes the outputs provided by a signature. Args: signature: A string with the signature to get ouputs information for. If None, the default signature is used if defined. Returns: The result of ModuleSpec.get_output_info_dict() for the given signature, and the graph variant selected by `tags` when this Module was initialized. Raises: KeyError: if there is no such signature. """ return self._spec.get_output_info_dict(signature=signature, tags=self._tags)
python
def get_output_info_dict(self, signature=None): """Describes the outputs provided by a signature. Args: signature: A string with the signature to get ouputs information for. If None, the default signature is used if defined. Returns: The result of ModuleSpec.get_output_info_dict() for the given signature, and the graph variant selected by `tags` when this Module was initialized. Raises: KeyError: if there is no such signature. """ return self._spec.get_output_info_dict(signature=signature, tags=self._tags)
[ "def", "get_output_info_dict", "(", "self", ",", "signature", "=", "None", ")", ":", "return", "self", ".", "_spec", ".", "get_output_info_dict", "(", "signature", "=", "signature", ",", "tags", "=", "self", ".", "_tags", ")" ]
Describes the outputs provided by a signature. Args: signature: A string with the signature to get ouputs information for. If None, the default signature is used if defined. Returns: The result of ModuleSpec.get_output_info_dict() for the given signature, and the graph variant selected by `tags` when this Module was initialized. Raises: KeyError: if there is no such signature.
[ "Describes", "the", "outputs", "provided", "by", "a", "signature", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/module.py#L273-L287
30,209
tensorflow/hub
tensorflow_hub/module.py
Module.export
def export(self, path, session): """Exports the module with the variables from the session in `path`. Note that it is the module definition in the ModuleSpec used to create this module that gets exported. The session is only used to provide the value of variables. Args: path: path where to export the module to. session: session where to export the variables from. Raises: RuntimeError: if there is an issue during the export. """ if self._graph is not tf_v1.get_default_graph(): raise RuntimeError("default graph differs from the graph where the " "module was instantiated.") if self._graph is not session.graph: raise RuntimeError("session graph differs from the graph where the " "module was instantiated.") self._impl.export(path, session)
python
def export(self, path, session): """Exports the module with the variables from the session in `path`. Note that it is the module definition in the ModuleSpec used to create this module that gets exported. The session is only used to provide the value of variables. Args: path: path where to export the module to. session: session where to export the variables from. Raises: RuntimeError: if there is an issue during the export. """ if self._graph is not tf_v1.get_default_graph(): raise RuntimeError("default graph differs from the graph where the " "module was instantiated.") if self._graph is not session.graph: raise RuntimeError("session graph differs from the graph where the " "module was instantiated.") self._impl.export(path, session)
[ "def", "export", "(", "self", ",", "path", ",", "session", ")", ":", "if", "self", ".", "_graph", "is", "not", "tf_v1", ".", "get_default_graph", "(", ")", ":", "raise", "RuntimeError", "(", "\"default graph differs from the graph where the \"", "\"module was inst...
Exports the module with the variables from the session in `path`. Note that it is the module definition in the ModuleSpec used to create this module that gets exported. The session is only used to provide the value of variables. Args: path: path where to export the module to. session: session where to export the variables from. Raises: RuntimeError: if there is an issue during the export.
[ "Exports", "the", "module", "with", "the", "variables", "from", "the", "session", "in", "path", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/module.py#L294-L314
30,210
tensorflow/hub
tensorflow_hub/module.py
Module.variables
def variables(self): """Returns the list of all tf.Variables created by module instantiation.""" result = [] for _, value in sorted(self.variable_map.items()): if isinstance(value, list): result.extend(value) else: result.append(value) return result
python
def variables(self): """Returns the list of all tf.Variables created by module instantiation.""" result = [] for _, value in sorted(self.variable_map.items()): if isinstance(value, list): result.extend(value) else: result.append(value) return result
[ "def", "variables", "(", "self", ")", ":", "result", "=", "[", "]", "for", "_", ",", "value", "in", "sorted", "(", "self", ".", "variable_map", ".", "items", "(", ")", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "result", "...
Returns the list of all tf.Variables created by module instantiation.
[ "Returns", "the", "list", "of", "all", "tf", ".", "Variables", "created", "by", "module", "instantiation", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/module.py#L341-L349
30,211
tensorflow/hub
tensorflow_hub/feature_column.py
text_embedding_column
def text_embedding_column(key, module_spec, trainable=False): """Uses a Module to construct a dense representation from a text feature. This feature column can be used on an input feature whose values are strings of arbitrary size. The result of this feature column is the result of passing its `input` through the module `m` instantiated from `module_spec`, as per `result = m(input)`. The `result` must have dtype float32 and shape `[batch_size, num_features]` with a known value of num_features. Example: ```python comment = text_embedding_column("comment", "/tmp/text-module") feature_columns = [comment, ...] ... features = { "comment": np.array(["wow, much amazing", "so easy", ...]), ... } labels = np.array([[1], [0], ...]) # If running TF 2.x, use `tf.compat.v1.estimator.inputs.numpy_input_fn` input_fn = tf.estimator.inputs.numpy_input_fn(features, labels, shuffle=True) estimator = tf.estimator.DNNClassifier(hidden_units, feature_columns) estimator.train(input_fn, max_steps=100) ``` Args: key: A string or `_FeatureColumn` identifying the text feature. module_spec: A ModuleSpec defining the Module to instantiate or a path where to load a ModuleSpec via `load_module_spec` trainable: Whether or not the Module is trainable. False by default, meaning the pre-trained weights are frozen. This is different from the ordinary tf.feature_column.embedding_column(), but that one is intended for training from scratch. Returns: `_DenseColumn` that converts from text input. Raises: ValueError: if module_spec is not suitable for use in this feature column. """ module_spec = module.as_module_spec(module_spec) _check_module_is_text_embedding(module_spec) return _TextEmbeddingColumn(key=key, module_spec=module_spec, trainable=trainable)
python
def text_embedding_column(key, module_spec, trainable=False): """Uses a Module to construct a dense representation from a text feature. This feature column can be used on an input feature whose values are strings of arbitrary size. The result of this feature column is the result of passing its `input` through the module `m` instantiated from `module_spec`, as per `result = m(input)`. The `result` must have dtype float32 and shape `[batch_size, num_features]` with a known value of num_features. Example: ```python comment = text_embedding_column("comment", "/tmp/text-module") feature_columns = [comment, ...] ... features = { "comment": np.array(["wow, much amazing", "so easy", ...]), ... } labels = np.array([[1], [0], ...]) # If running TF 2.x, use `tf.compat.v1.estimator.inputs.numpy_input_fn` input_fn = tf.estimator.inputs.numpy_input_fn(features, labels, shuffle=True) estimator = tf.estimator.DNNClassifier(hidden_units, feature_columns) estimator.train(input_fn, max_steps=100) ``` Args: key: A string or `_FeatureColumn` identifying the text feature. module_spec: A ModuleSpec defining the Module to instantiate or a path where to load a ModuleSpec via `load_module_spec` trainable: Whether or not the Module is trainable. False by default, meaning the pre-trained weights are frozen. This is different from the ordinary tf.feature_column.embedding_column(), but that one is intended for training from scratch. Returns: `_DenseColumn` that converts from text input. Raises: ValueError: if module_spec is not suitable for use in this feature column. """ module_spec = module.as_module_spec(module_spec) _check_module_is_text_embedding(module_spec) return _TextEmbeddingColumn(key=key, module_spec=module_spec, trainable=trainable)
[ "def", "text_embedding_column", "(", "key", ",", "module_spec", ",", "trainable", "=", "False", ")", ":", "module_spec", "=", "module", ".", "as_module_spec", "(", "module_spec", ")", "_check_module_is_text_embedding", "(", "module_spec", ")", "return", "_TextEmbedd...
Uses a Module to construct a dense representation from a text feature. This feature column can be used on an input feature whose values are strings of arbitrary size. The result of this feature column is the result of passing its `input` through the module `m` instantiated from `module_spec`, as per `result = m(input)`. The `result` must have dtype float32 and shape `[batch_size, num_features]` with a known value of num_features. Example: ```python comment = text_embedding_column("comment", "/tmp/text-module") feature_columns = [comment, ...] ... features = { "comment": np.array(["wow, much amazing", "so easy", ...]), ... } labels = np.array([[1], [0], ...]) # If running TF 2.x, use `tf.compat.v1.estimator.inputs.numpy_input_fn` input_fn = tf.estimator.inputs.numpy_input_fn(features, labels, shuffle=True) estimator = tf.estimator.DNNClassifier(hidden_units, feature_columns) estimator.train(input_fn, max_steps=100) ``` Args: key: A string or `_FeatureColumn` identifying the text feature. module_spec: A ModuleSpec defining the Module to instantiate or a path where to load a ModuleSpec via `load_module_spec` trainable: Whether or not the Module is trainable. False by default, meaning the pre-trained weights are frozen. This is different from the ordinary tf.feature_column.embedding_column(), but that one is intended for training from scratch. Returns: `_DenseColumn` that converts from text input. Raises: ValueError: if module_spec is not suitable for use in this feature column.
[ "Uses", "a", "Module", "to", "construct", "a", "dense", "representation", "from", "a", "text", "feature", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/feature_column.py#L33-L80
30,212
tensorflow/hub
tensorflow_hub/feature_column.py
_check_module_is_text_embedding
def _check_module_is_text_embedding(module_spec): """Raises ValueError if `module_spec` is not a text-embedding module. Args: module_spec: A `ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with Tensor(string, shape=(?,)) -> Tensor(float32, shape=(?,K)). """ issues = [] # Find issues with signature inputs. input_info_dict = module_spec.get_input_info_dict() if len(input_info_dict) != 1: issues.append("Module default signature must require only one input") else: input_info, = input_info_dict.values() input_shape = input_info.get_shape() if not (input_info.dtype == tf.string and input_shape.ndims == 1 and input_shape.as_list() == [None]): issues.append( "Module default signature must have only one input " "tf.Tensor(shape=(?,), dtype=string)" ) # Find issues with signature outputs. output_info_dict = module_spec.get_output_info_dict() if "default" not in output_info_dict: issues.append("Module default signature must have a 'default' output.") else: output_info = output_info_dict["default"] output_shape = output_info.get_shape() if not (output_info.dtype == tf.float32 and output_shape.ndims == 2 and not output_shape.as_list()[0] and output_shape.as_list()[1]): issues.append( "Module default signature must have a 'default' output of " "tf.Tensor(shape=(?,K), dtype=float32)." ) if issues: raise ValueError("Module is not a text-embedding: %r" % issues)
python
def _check_module_is_text_embedding(module_spec): """Raises ValueError if `module_spec` is not a text-embedding module. Args: module_spec: A `ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with Tensor(string, shape=(?,)) -> Tensor(float32, shape=(?,K)). """ issues = [] # Find issues with signature inputs. input_info_dict = module_spec.get_input_info_dict() if len(input_info_dict) != 1: issues.append("Module default signature must require only one input") else: input_info, = input_info_dict.values() input_shape = input_info.get_shape() if not (input_info.dtype == tf.string and input_shape.ndims == 1 and input_shape.as_list() == [None]): issues.append( "Module default signature must have only one input " "tf.Tensor(shape=(?,), dtype=string)" ) # Find issues with signature outputs. output_info_dict = module_spec.get_output_info_dict() if "default" not in output_info_dict: issues.append("Module default signature must have a 'default' output.") else: output_info = output_info_dict["default"] output_shape = output_info.get_shape() if not (output_info.dtype == tf.float32 and output_shape.ndims == 2 and not output_shape.as_list()[0] and output_shape.as_list()[1]): issues.append( "Module default signature must have a 'default' output of " "tf.Tensor(shape=(?,K), dtype=float32)." ) if issues: raise ValueError("Module is not a text-embedding: %r" % issues)
[ "def", "_check_module_is_text_embedding", "(", "module_spec", ")", ":", "issues", "=", "[", "]", "# Find issues with signature inputs.", "input_info_dict", "=", "module_spec", ".", "get_input_info_dict", "(", ")", "if", "len", "(", "input_info_dict", ")", "!=", "1", ...
Raises ValueError if `module_spec` is not a text-embedding module. Args: module_spec: A `ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with Tensor(string, shape=(?,)) -> Tensor(float32, shape=(?,K)).
[ "Raises", "ValueError", "if", "module_spec", "is", "not", "a", "text", "-", "embedding", "module", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/feature_column.py#L83-L124
30,213
tensorflow/hub
tensorflow_hub/feature_column.py
image_embedding_column
def image_embedding_column(key, module_spec): """Uses a Module to get a dense 1-D representation from the pixels of images. This feature column can be used on images, represented as float32 tensors of RGB pixel data in the range [0,1]. This can be read from a numeric_column() if the tf.Example input data happens to have decoded images, all with the same shape [height, width, 3]. More commonly, the input_fn will have code to explicitly decode images, resize them (possibly after performing data augmentation such as random crops etc.), and provide a batch of shape [batch_size, height, width, 3]. The result of this feature column is the result of passing its `input` through the module `m` instantiated from `module_spec`, as per `result = m({"images": input})`. The `result` must have dtype float32 and shape `[batch_size, num_features]` with a known value of num_features. Example: ```python image_column = hub.image_embedding_column("embeddings", "/tmp/image-module") feature_columns = [image_column, ...] estimator = tf.estimator.LinearClassifier(feature_columns, ...) height, width = hub.get_expected_image_size(image_column.module_spec) input_fn = ... # Provides "embeddings" with shape [None, height, width, 3]. estimator.train(input_fn, ...) ``` Args: key: A string or `_FeatureColumn` identifying the input image data. module_spec: A string handle or a `ModuleSpec` identifying the module. Returns: `_DenseColumn` that converts from pixel data. Raises: ValueError: if module_spec is not suitable for use in this feature column. """ module_spec = module.as_module_spec(module_spec) _check_module_is_image_embedding(module_spec) return _ImageEmbeddingColumn(key=key, module_spec=module_spec)
python
def image_embedding_column(key, module_spec): """Uses a Module to get a dense 1-D representation from the pixels of images. This feature column can be used on images, represented as float32 tensors of RGB pixel data in the range [0,1]. This can be read from a numeric_column() if the tf.Example input data happens to have decoded images, all with the same shape [height, width, 3]. More commonly, the input_fn will have code to explicitly decode images, resize them (possibly after performing data augmentation such as random crops etc.), and provide a batch of shape [batch_size, height, width, 3]. The result of this feature column is the result of passing its `input` through the module `m` instantiated from `module_spec`, as per `result = m({"images": input})`. The `result` must have dtype float32 and shape `[batch_size, num_features]` with a known value of num_features. Example: ```python image_column = hub.image_embedding_column("embeddings", "/tmp/image-module") feature_columns = [image_column, ...] estimator = tf.estimator.LinearClassifier(feature_columns, ...) height, width = hub.get_expected_image_size(image_column.module_spec) input_fn = ... # Provides "embeddings" with shape [None, height, width, 3]. estimator.train(input_fn, ...) ``` Args: key: A string or `_FeatureColumn` identifying the input image data. module_spec: A string handle or a `ModuleSpec` identifying the module. Returns: `_DenseColumn` that converts from pixel data. Raises: ValueError: if module_spec is not suitable for use in this feature column. """ module_spec = module.as_module_spec(module_spec) _check_module_is_image_embedding(module_spec) return _ImageEmbeddingColumn(key=key, module_spec=module_spec)
[ "def", "image_embedding_column", "(", "key", ",", "module_spec", ")", ":", "module_spec", "=", "module", ".", "as_module_spec", "(", "module_spec", ")", "_check_module_is_image_embedding", "(", "module_spec", ")", "return", "_ImageEmbeddingColumn", "(", "key", "=", ...
Uses a Module to get a dense 1-D representation from the pixels of images. This feature column can be used on images, represented as float32 tensors of RGB pixel data in the range [0,1]. This can be read from a numeric_column() if the tf.Example input data happens to have decoded images, all with the same shape [height, width, 3]. More commonly, the input_fn will have code to explicitly decode images, resize them (possibly after performing data augmentation such as random crops etc.), and provide a batch of shape [batch_size, height, width, 3]. The result of this feature column is the result of passing its `input` through the module `m` instantiated from `module_spec`, as per `result = m({"images": input})`. The `result` must have dtype float32 and shape `[batch_size, num_features]` with a known value of num_features. Example: ```python image_column = hub.image_embedding_column("embeddings", "/tmp/image-module") feature_columns = [image_column, ...] estimator = tf.estimator.LinearClassifier(feature_columns, ...) height, width = hub.get_expected_image_size(image_column.module_spec) input_fn = ... # Provides "embeddings" with shape [None, height, width, 3]. estimator.train(input_fn, ...) ``` Args: key: A string or `_FeatureColumn` identifying the input image data. module_spec: A string handle or a `ModuleSpec` identifying the module. Returns: `_DenseColumn` that converts from pixel data. Raises: ValueError: if module_spec is not suitable for use in this feature column.
[ "Uses", "a", "Module", "to", "get", "a", "dense", "1", "-", "D", "representation", "from", "the", "pixels", "of", "images", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/feature_column.py#L162-L201
30,214
tensorflow/hub
tensorflow_hub/feature_column.py
_check_module_is_image_embedding
def _check_module_is_image_embedding(module_spec): """Raises ValueError if `module_spec` is not usable as image embedding. Args: module_spec: A `_ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with mappingan "images" input to a Tensor(float32, shape=(_,K)). """ issues = [] # Find issues with "default" signature inputs. The common signatures for # image models prescribe a specific name; we trust it if we find it # and if we can do the necessary inference of input shapes from it. input_info_dict = module_spec.get_input_info_dict() if (list(input_info_dict.keys()) != ["images"] or input_info_dict["images"].dtype != tf.float32): issues.append("Module 'default' signature must require a single input, " "which must have type float32 and name 'images'.") else: try: image_util.get_expected_image_size(module_spec) except ValueError as e: issues.append("Module does not support hub.get_expected_image_size(); " "original error was:\n" + str(e)) # Raised again below. # Find issues with "default" signature outputs. We test that the dtype and # shape is appropriate for use in input_layer(). output_info_dict = module_spec.get_output_info_dict() if "default" not in output_info_dict: issues.append("Module 'default' signature must have a 'default' output.") else: output_type = output_info_dict["default"].dtype output_shape = output_info_dict["default"].get_shape() if not (output_type == tf.float32 and output_shape.ndims == 2 and output_shape.dims[1].value): issues.append("Module 'default' signature must have a 'default' output " "of tf.Tensor(shape=(_,K), dtype=float32).") if issues: raise ValueError("Module is not usable as image embedding: %r" % issues)
python
def _check_module_is_image_embedding(module_spec): """Raises ValueError if `module_spec` is not usable as image embedding. Args: module_spec: A `_ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with mappingan "images" input to a Tensor(float32, shape=(_,K)). """ issues = [] # Find issues with "default" signature inputs. The common signatures for # image models prescribe a specific name; we trust it if we find it # and if we can do the necessary inference of input shapes from it. input_info_dict = module_spec.get_input_info_dict() if (list(input_info_dict.keys()) != ["images"] or input_info_dict["images"].dtype != tf.float32): issues.append("Module 'default' signature must require a single input, " "which must have type float32 and name 'images'.") else: try: image_util.get_expected_image_size(module_spec) except ValueError as e: issues.append("Module does not support hub.get_expected_image_size(); " "original error was:\n" + str(e)) # Raised again below. # Find issues with "default" signature outputs. We test that the dtype and # shape is appropriate for use in input_layer(). output_info_dict = module_spec.get_output_info_dict() if "default" not in output_info_dict: issues.append("Module 'default' signature must have a 'default' output.") else: output_type = output_info_dict["default"].dtype output_shape = output_info_dict["default"].get_shape() if not (output_type == tf.float32 and output_shape.ndims == 2 and output_shape.dims[1].value): issues.append("Module 'default' signature must have a 'default' output " "of tf.Tensor(shape=(_,K), dtype=float32).") if issues: raise ValueError("Module is not usable as image embedding: %r" % issues)
[ "def", "_check_module_is_image_embedding", "(", "module_spec", ")", ":", "issues", "=", "[", "]", "# Find issues with \"default\" signature inputs. The common signatures for", "# image models prescribe a specific name; we trust it if we find it", "# and if we can do the necessary inference o...
Raises ValueError if `module_spec` is not usable as image embedding. Args: module_spec: A `_ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with mappingan "images" input to a Tensor(float32, shape=(_,K)).
[ "Raises", "ValueError", "if", "module_spec", "is", "not", "usable", "as", "image", "embedding", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/feature_column.py#L204-L245
30,215
tensorflow/hub
tensorflow_hub/feature_column.py
_TextEmbeddingColumn.name
def name(self): """Returns string. Used for variable_scope and naming.""" if not hasattr(self, "_name"): self._name = "{}_hub_module_embedding".format(self.key) return self._name
python
def name(self): """Returns string. Used for variable_scope and naming.""" if not hasattr(self, "_name"): self._name = "{}_hub_module_embedding".format(self.key) return self._name
[ "def", "name", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"_name\"", ")", ":", "self", ".", "_name", "=", "\"{}_hub_module_embedding\"", ".", "format", "(", "self", ".", "key", ")", "return", "self", ".", "_name" ]
Returns string. Used for variable_scope and naming.
[ "Returns", "string", ".", "Used", "for", "variable_scope", "and", "naming", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/feature_column.py#L134-L138
30,216
tensorflow/hub
tensorflow_hub/feature_column.py
_TextEmbeddingColumn._get_dense_tensor
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): """Returns a `Tensor`.""" del weight_collections text_batch = tf.reshape(inputs.get(self), shape=[-1]) m = module.Module(self.module_spec, trainable=self.trainable and trainable) return m(text_batch)
python
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): """Returns a `Tensor`.""" del weight_collections text_batch = tf.reshape(inputs.get(self), shape=[-1]) m = module.Module(self.module_spec, trainable=self.trainable and trainable) return m(text_batch)
[ "def", "_get_dense_tensor", "(", "self", ",", "inputs", ",", "weight_collections", "=", "None", ",", "trainable", "=", "None", ")", ":", "del", "weight_collections", "text_batch", "=", "tf", ".", "reshape", "(", "inputs", ".", "get", "(", "self", ")", ",",...
Returns a `Tensor`.
[ "Returns", "a", "Tensor", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/feature_column.py#L154-L159
30,217
tensorflow/hub
tensorflow_hub/feature_column.py
_ImageEmbeddingColumn._parse_example_spec
def _parse_example_spec(self): """Returns a `tf.Example` parsing spec as dict.""" height, width = image_util.get_expected_image_size(self.module_spec) input_shape = [height, width, 3] return {self.key: tf_v1.FixedLenFeature(input_shape, tf.float32)}
python
def _parse_example_spec(self): """Returns a `tf.Example` parsing spec as dict.""" height, width = image_util.get_expected_image_size(self.module_spec) input_shape = [height, width, 3] return {self.key: tf_v1.FixedLenFeature(input_shape, tf.float32)}
[ "def", "_parse_example_spec", "(", "self", ")", ":", "height", ",", "width", "=", "image_util", ".", "get_expected_image_size", "(", "self", ".", "module_spec", ")", "input_shape", "=", "[", "height", ",", "width", ",", "3", "]", "return", "{", "self", "."...
Returns a `tf.Example` parsing spec as dict.
[ "Returns", "a", "tf", ".", "Example", "parsing", "spec", "as", "dict", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/feature_column.py#L265-L269
30,218
tensorflow/hub
tensorflow_hub/resolver.py
tfhub_cache_dir
def tfhub_cache_dir(default_cache_dir=None, use_temp=False): """Returns cache directory. Returns cache directory from either TFHUB_CACHE_DIR environment variable or --tfhub_cache_dir or default, if set. Args: default_cache_dir: Default cache location to use if neither TFHUB_CACHE_DIR environment variable nor --tfhub_cache_dir are not specified. use_temp: bool, Optional to enable using system's temp directory as a module cache directory if neither default_cache_dir nor --tfhub_cache_dir nor TFHUB_CACHE_DIR environment variable are specified . """ # Note: We are using FLAGS["tfhub_cache_dir"] (and not FLAGS.tfhub_cache_dir) # to access the flag value in order to avoid parsing argv list. The flags # should have been parsed by now in main() by tf.app.run(). If that was not # the case (say in Colab env) we skip flag parsing because argv may contain # unknown flags. cache_dir = ( os.getenv(_TFHUB_CACHE_DIR, "") or FLAGS["tfhub_cache_dir"].value or default_cache_dir) if not cache_dir and use_temp: # Place all TF-Hub modules under <system's temp>/tfhub_modules. cache_dir = os.path.join(tempfile.gettempdir(), "tfhub_modules") if cache_dir: logging.log_first_n(logging.INFO, "Using %s to cache modules.", 1, cache_dir) return cache_dir
python
def tfhub_cache_dir(default_cache_dir=None, use_temp=False): """Returns cache directory. Returns cache directory from either TFHUB_CACHE_DIR environment variable or --tfhub_cache_dir or default, if set. Args: default_cache_dir: Default cache location to use if neither TFHUB_CACHE_DIR environment variable nor --tfhub_cache_dir are not specified. use_temp: bool, Optional to enable using system's temp directory as a module cache directory if neither default_cache_dir nor --tfhub_cache_dir nor TFHUB_CACHE_DIR environment variable are specified . """ # Note: We are using FLAGS["tfhub_cache_dir"] (and not FLAGS.tfhub_cache_dir) # to access the flag value in order to avoid parsing argv list. The flags # should have been parsed by now in main() by tf.app.run(). If that was not # the case (say in Colab env) we skip flag parsing because argv may contain # unknown flags. cache_dir = ( os.getenv(_TFHUB_CACHE_DIR, "") or FLAGS["tfhub_cache_dir"].value or default_cache_dir) if not cache_dir and use_temp: # Place all TF-Hub modules under <system's temp>/tfhub_modules. cache_dir = os.path.join(tempfile.gettempdir(), "tfhub_modules") if cache_dir: logging.log_first_n(logging.INFO, "Using %s to cache modules.", 1, cache_dir) return cache_dir
[ "def", "tfhub_cache_dir", "(", "default_cache_dir", "=", "None", ",", "use_temp", "=", "False", ")", ":", "# Note: We are using FLAGS[\"tfhub_cache_dir\"] (and not FLAGS.tfhub_cache_dir)", "# to access the flag value in order to avoid parsing argv list. The flags", "# should have been pa...
Returns cache directory. Returns cache directory from either TFHUB_CACHE_DIR environment variable or --tfhub_cache_dir or default, if set. Args: default_cache_dir: Default cache location to use if neither TFHUB_CACHE_DIR environment variable nor --tfhub_cache_dir are not specified. use_temp: bool, Optional to enable using system's temp directory as a module cache directory if neither default_cache_dir nor --tfhub_cache_dir nor TFHUB_CACHE_DIR environment variable are specified .
[ "Returns", "cache", "directory", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/resolver.py#L50-L80
30,219
tensorflow/hub
tensorflow_hub/resolver.py
create_local_module_dir
def create_local_module_dir(cache_dir, module_name): """Creates and returns the name of directory where to cache a module.""" tf_v1.gfile.MakeDirs(cache_dir) return os.path.join(cache_dir, module_name)
python
def create_local_module_dir(cache_dir, module_name): """Creates and returns the name of directory where to cache a module.""" tf_v1.gfile.MakeDirs(cache_dir) return os.path.join(cache_dir, module_name)
[ "def", "create_local_module_dir", "(", "cache_dir", ",", "module_name", ")", ":", "tf_v1", ".", "gfile", ".", "MakeDirs", "(", "cache_dir", ")", "return", "os", ".", "path", ".", "join", "(", "cache_dir", ",", "module_name", ")" ]
Creates and returns the name of directory where to cache a module.
[ "Creates", "and", "returns", "the", "name", "of", "directory", "where", "to", "cache", "a", "module", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/resolver.py#L83-L86
30,220
tensorflow/hub
tensorflow_hub/resolver.py
_write_module_descriptor_file
def _write_module_descriptor_file(handle, module_dir): """Writes a descriptor file about the directory containing a module. Args: handle: Module name/handle. module_dir: Directory where a module was downloaded. """ readme = _module_descriptor_file(module_dir) readme_content = ( "Module: %s\nDownload Time: %s\nDownloader Hostname: %s (PID:%d)" % (handle, str(datetime.datetime.today()), socket.gethostname(), os.getpid())) # The descriptor file has no semantic meaning so we allow 'overwrite' since # there is a chance that another process might have written the file (and # crashed), we just overwrite it. tf_utils.atomic_write_string_to_file(readme, readme_content, overwrite=True)
python
def _write_module_descriptor_file(handle, module_dir): """Writes a descriptor file about the directory containing a module. Args: handle: Module name/handle. module_dir: Directory where a module was downloaded. """ readme = _module_descriptor_file(module_dir) readme_content = ( "Module: %s\nDownload Time: %s\nDownloader Hostname: %s (PID:%d)" % (handle, str(datetime.datetime.today()), socket.gethostname(), os.getpid())) # The descriptor file has no semantic meaning so we allow 'overwrite' since # there is a chance that another process might have written the file (and # crashed), we just overwrite it. tf_utils.atomic_write_string_to_file(readme, readme_content, overwrite=True)
[ "def", "_write_module_descriptor_file", "(", "handle", ",", "module_dir", ")", ":", "readme", "=", "_module_descriptor_file", "(", "module_dir", ")", "readme_content", "=", "(", "\"Module: %s\\nDownload Time: %s\\nDownloader Hostname: %s (PID:%d)\"", "%", "(", "handle", ","...
Writes a descriptor file about the directory containing a module. Args: handle: Module name/handle. module_dir: Directory where a module was downloaded.
[ "Writes", "a", "descriptor", "file", "about", "the", "directory", "containing", "a", "module", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/resolver.py#L219-L234
30,221
tensorflow/hub
tensorflow_hub/resolver.py
_locked_tmp_dir_size
def _locked_tmp_dir_size(lock_filename): """Returns the size of the temp dir pointed to by the given lock file.""" task_uid = _task_uid_from_lock_file(lock_filename) try: return _dir_size( _temp_download_dir(_module_dir(lock_filename), task_uid)) except tf.errors.NotFoundError: return 0
python
def _locked_tmp_dir_size(lock_filename): """Returns the size of the temp dir pointed to by the given lock file.""" task_uid = _task_uid_from_lock_file(lock_filename) try: return _dir_size( _temp_download_dir(_module_dir(lock_filename), task_uid)) except tf.errors.NotFoundError: return 0
[ "def", "_locked_tmp_dir_size", "(", "lock_filename", ")", ":", "task_uid", "=", "_task_uid_from_lock_file", "(", "lock_filename", ")", "try", ":", "return", "_dir_size", "(", "_temp_download_dir", "(", "_module_dir", "(", "lock_filename", ")", ",", "task_uid", ")", ...
Returns the size of the temp dir pointed to by the given lock file.
[ "Returns", "the", "size", "of", "the", "temp", "dir", "pointed", "to", "by", "the", "given", "lock", "file", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/resolver.py#L283-L290
30,222
tensorflow/hub
tensorflow_hub/resolver.py
_wait_for_lock_to_disappear
def _wait_for_lock_to_disappear(handle, lock_file, lock_file_timeout_sec): """Waits for the lock file to disappear. The lock file was created by another process that is performing a download into its own temporary directory. The name of this temp directory is sha1(<module>).<uuid>.tmp where <uuid> comes from the lock file. Args: handle: The location from where a module is being download. lock_file: Lock file created by another process downloading this module. lock_file_timeout_sec: The amount of time to wait (in seconds) before we can declare that the other downloaded has been abandoned. The download is declared abandoned if there is no file size change in the temporary directory within the last 'lock_file_timeout_sec'. """ locked_tmp_dir_size = 0 locked_tmp_dir_size_check_time = time.time() lock_file_content = None while tf_v1.gfile.Exists(lock_file): try: logging.log_every_n( logging.INFO, "Module '%s' already being downloaded by '%s'. Waiting.", 10, handle, tf_utils.read_file_to_string(lock_file)) if (time.time() - locked_tmp_dir_size_check_time > lock_file_timeout_sec): # Check whether the holder of the current lock downloaded anything # in its temporary directory in the last 'lock_file_timeout_sec'. cur_locked_tmp_dir_size = _locked_tmp_dir_size(lock_file) cur_lock_file_content = tf_utils.read_file_to_string(lock_file) if (cur_locked_tmp_dir_size == locked_tmp_dir_size and cur_lock_file_content == lock_file_content): # There is was no data downloaded in the past # 'lock_file_timeout_sec'. Steal the lock and proceed with the # local download. logging.warning("Deleting lock file %s due to inactivity.", lock_file) tf_v1.gfile.Remove(lock_file) break locked_tmp_dir_size = cur_locked_tmp_dir_size locked_tmp_dir_size_check_time = time.time() lock_file_content = cur_lock_file_content except tf.errors.NotFoundError: # Lock file or temp directory were deleted during check. Continue # to check whether download succeeded or we need to start our own # download. pass finally: time.sleep(5)
python
def _wait_for_lock_to_disappear(handle, lock_file, lock_file_timeout_sec): """Waits for the lock file to disappear. The lock file was created by another process that is performing a download into its own temporary directory. The name of this temp directory is sha1(<module>).<uuid>.tmp where <uuid> comes from the lock file. Args: handle: The location from where a module is being download. lock_file: Lock file created by another process downloading this module. lock_file_timeout_sec: The amount of time to wait (in seconds) before we can declare that the other downloaded has been abandoned. The download is declared abandoned if there is no file size change in the temporary directory within the last 'lock_file_timeout_sec'. """ locked_tmp_dir_size = 0 locked_tmp_dir_size_check_time = time.time() lock_file_content = None while tf_v1.gfile.Exists(lock_file): try: logging.log_every_n( logging.INFO, "Module '%s' already being downloaded by '%s'. Waiting.", 10, handle, tf_utils.read_file_to_string(lock_file)) if (time.time() - locked_tmp_dir_size_check_time > lock_file_timeout_sec): # Check whether the holder of the current lock downloaded anything # in its temporary directory in the last 'lock_file_timeout_sec'. cur_locked_tmp_dir_size = _locked_tmp_dir_size(lock_file) cur_lock_file_content = tf_utils.read_file_to_string(lock_file) if (cur_locked_tmp_dir_size == locked_tmp_dir_size and cur_lock_file_content == lock_file_content): # There is was no data downloaded in the past # 'lock_file_timeout_sec'. Steal the lock and proceed with the # local download. logging.warning("Deleting lock file %s due to inactivity.", lock_file) tf_v1.gfile.Remove(lock_file) break locked_tmp_dir_size = cur_locked_tmp_dir_size locked_tmp_dir_size_check_time = time.time() lock_file_content = cur_lock_file_content except tf.errors.NotFoundError: # Lock file or temp directory were deleted during check. Continue # to check whether download succeeded or we need to start our own # download. pass finally: time.sleep(5)
[ "def", "_wait_for_lock_to_disappear", "(", "handle", ",", "lock_file", ",", "lock_file_timeout_sec", ")", ":", "locked_tmp_dir_size", "=", "0", "locked_tmp_dir_size_check_time", "=", "time", ".", "time", "(", ")", "lock_file_content", "=", "None", "while", "tf_v1", ...
Waits for the lock file to disappear. The lock file was created by another process that is performing a download into its own temporary directory. The name of this temp directory is sha1(<module>).<uuid>.tmp where <uuid> comes from the lock file. Args: handle: The location from where a module is being download. lock_file: Lock file created by another process downloading this module. lock_file_timeout_sec: The amount of time to wait (in seconds) before we can declare that the other downloaded has been abandoned. The download is declared abandoned if there is no file size change in the temporary directory within the last 'lock_file_timeout_sec'.
[ "Waits", "for", "the", "lock", "file", "to", "disappear", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/resolver.py#L293-L342
30,223
tensorflow/hub
tensorflow_hub/resolver.py
atomic_download
def atomic_download(handle, download_fn, module_dir, lock_file_timeout_sec=10 * 60): """Returns the path to a Module directory for a given TF-Hub Module handle. Args: handle: (string) Location of a TF-Hub Module. download_fn: Callback function that actually performs download. The callback receives two arguments, handle and the location of a temporary directory to download the content into. module_dir: Directory where to download the module files to. lock_file_timeout_sec: The amount of time we give the current holder of the lock to make progress in downloading a module. If no progress is made, the lock is revoked. Returns: A string containing the path to a TF-Hub Module directory. Raises: ValueError: if the Module is not found. """ lock_file = _lock_filename(module_dir) task_uid = uuid.uuid4().hex lock_contents = _lock_file_contents(task_uid) tmp_dir = _temp_download_dir(module_dir, task_uid) # Attempt to protect against cases of processes being cancelled with # KeyboardInterrupt by using a try/finally clause to remove the lock # and tmp_dir. try: while True: try: tf_utils.atomic_write_string_to_file(lock_file, lock_contents, overwrite=False) # Must test condition again, since another process could have created # the module and deleted the old lock file since last test. if tf_v1.gfile.Exists(module_dir): # Lock file will be deleted in the finally-clause. return module_dir break # Proceed to downloading the module. except tf.errors.OpError: pass # Wait for lock file to disappear. _wait_for_lock_to_disappear(handle, lock_file, lock_file_timeout_sec) # At this point we either deleted a lock or a lock got removed by the # owner or another process. Perform one more iteration of the while-loop, # we would either terminate due tf_v1.gfile.Exists(module_dir) or because # we would obtain a lock ourselves, or wait again for the lock to # disappear. # Lock file acquired. logging.info("Downloading TF-Hub Module '%s'.", handle) tf_v1.gfile.MakeDirs(tmp_dir) download_fn(handle, tmp_dir) # Write module descriptor to capture information about which module was # downloaded by whom and when. The file stored at the same level as a # directory in order to keep the content of the 'model_dir' exactly as it # was define by the module publisher. # # Note: The descriptor is written purely to help the end-user to identify # which directory belongs to which module. The descriptor is not part of the # module caching protocol and no code in the TF-Hub library reads its # content. _write_module_descriptor_file(handle, module_dir) try: tf_v1.gfile.Rename(tmp_dir, module_dir) logging.info("Downloaded TF-Hub Module '%s'.", handle) except tf.errors.AlreadyExistsError: logging.warning("Module already exists in %s", module_dir) finally: try: # Temp directory is owned by the current process, remove it. tf_v1.gfile.DeleteRecursively(tmp_dir) except tf.errors.NotFoundError: pass try: contents = tf_utils.read_file_to_string(lock_file) except tf.errors.NotFoundError: contents = "" if contents == lock_contents: # Lock file exists and is owned by this process. try: tf_v1.gfile.Remove(lock_file) except tf.errors.NotFoundError: pass return module_dir
python
def atomic_download(handle, download_fn, module_dir, lock_file_timeout_sec=10 * 60): """Returns the path to a Module directory for a given TF-Hub Module handle. Args: handle: (string) Location of a TF-Hub Module. download_fn: Callback function that actually performs download. The callback receives two arguments, handle and the location of a temporary directory to download the content into. module_dir: Directory where to download the module files to. lock_file_timeout_sec: The amount of time we give the current holder of the lock to make progress in downloading a module. If no progress is made, the lock is revoked. Returns: A string containing the path to a TF-Hub Module directory. Raises: ValueError: if the Module is not found. """ lock_file = _lock_filename(module_dir) task_uid = uuid.uuid4().hex lock_contents = _lock_file_contents(task_uid) tmp_dir = _temp_download_dir(module_dir, task_uid) # Attempt to protect against cases of processes being cancelled with # KeyboardInterrupt by using a try/finally clause to remove the lock # and tmp_dir. try: while True: try: tf_utils.atomic_write_string_to_file(lock_file, lock_contents, overwrite=False) # Must test condition again, since another process could have created # the module and deleted the old lock file since last test. if tf_v1.gfile.Exists(module_dir): # Lock file will be deleted in the finally-clause. return module_dir break # Proceed to downloading the module. except tf.errors.OpError: pass # Wait for lock file to disappear. _wait_for_lock_to_disappear(handle, lock_file, lock_file_timeout_sec) # At this point we either deleted a lock or a lock got removed by the # owner or another process. Perform one more iteration of the while-loop, # we would either terminate due tf_v1.gfile.Exists(module_dir) or because # we would obtain a lock ourselves, or wait again for the lock to # disappear. # Lock file acquired. logging.info("Downloading TF-Hub Module '%s'.", handle) tf_v1.gfile.MakeDirs(tmp_dir) download_fn(handle, tmp_dir) # Write module descriptor to capture information about which module was # downloaded by whom and when. The file stored at the same level as a # directory in order to keep the content of the 'model_dir' exactly as it # was define by the module publisher. # # Note: The descriptor is written purely to help the end-user to identify # which directory belongs to which module. The descriptor is not part of the # module caching protocol and no code in the TF-Hub library reads its # content. _write_module_descriptor_file(handle, module_dir) try: tf_v1.gfile.Rename(tmp_dir, module_dir) logging.info("Downloaded TF-Hub Module '%s'.", handle) except tf.errors.AlreadyExistsError: logging.warning("Module already exists in %s", module_dir) finally: try: # Temp directory is owned by the current process, remove it. tf_v1.gfile.DeleteRecursively(tmp_dir) except tf.errors.NotFoundError: pass try: contents = tf_utils.read_file_to_string(lock_file) except tf.errors.NotFoundError: contents = "" if contents == lock_contents: # Lock file exists and is owned by this process. try: tf_v1.gfile.Remove(lock_file) except tf.errors.NotFoundError: pass return module_dir
[ "def", "atomic_download", "(", "handle", ",", "download_fn", ",", "module_dir", ",", "lock_file_timeout_sec", "=", "10", "*", "60", ")", ":", "lock_file", "=", "_lock_filename", "(", "module_dir", ")", "task_uid", "=", "uuid", ".", "uuid4", "(", ")", ".", ...
Returns the path to a Module directory for a given TF-Hub Module handle. Args: handle: (string) Location of a TF-Hub Module. download_fn: Callback function that actually performs download. The callback receives two arguments, handle and the location of a temporary directory to download the content into. module_dir: Directory where to download the module files to. lock_file_timeout_sec: The amount of time we give the current holder of the lock to make progress in downloading a module. If no progress is made, the lock is revoked. Returns: A string containing the path to a TF-Hub Module directory. Raises: ValueError: if the Module is not found.
[ "Returns", "the", "path", "to", "a", "Module", "directory", "for", "a", "given", "TF", "-", "Hub", "Module", "handle", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/resolver.py#L345-L434
30,224
tensorflow/hub
tensorflow_hub/resolver.py
DownloadManager._print_download_progress_msg
def _print_download_progress_msg(self, msg, flush=False): """Prints a message about download progress either to the console or TF log. Args: msg: Message to print. flush: Indicates whether to flush the output (only used in interactive mode). """ if self._interactive_mode(): # Print progress message to console overwriting previous progress # message. self._max_prog_str = max(self._max_prog_str, len(msg)) sys.stdout.write("\r%-{}s".format(self._max_prog_str) % msg) sys.stdout.flush() if flush: print("\n") else: # Interactive progress tracking is disabled. Print progress to the # standard TF log. logging.info(msg)
python
def _print_download_progress_msg(self, msg, flush=False): """Prints a message about download progress either to the console or TF log. Args: msg: Message to print. flush: Indicates whether to flush the output (only used in interactive mode). """ if self._interactive_mode(): # Print progress message to console overwriting previous progress # message. self._max_prog_str = max(self._max_prog_str, len(msg)) sys.stdout.write("\r%-{}s".format(self._max_prog_str) % msg) sys.stdout.flush() if flush: print("\n") else: # Interactive progress tracking is disabled. Print progress to the # standard TF log. logging.info(msg)
[ "def", "_print_download_progress_msg", "(", "self", ",", "msg", ",", "flush", "=", "False", ")", ":", "if", "self", ".", "_interactive_mode", "(", ")", ":", "# Print progress message to console overwriting previous progress", "# message.", "self", ".", "_max_prog_str", ...
Prints a message about download progress either to the console or TF log. Args: msg: Message to print. flush: Indicates whether to flush the output (only used in interactive mode).
[ "Prints", "a", "message", "about", "download", "progress", "either", "to", "the", "console", "or", "TF", "log", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/resolver.py#L103-L122
30,225
tensorflow/hub
tensorflow_hub/resolver.py
DownloadManager._log_progress
def _log_progress(self, bytes_downloaded): """Logs progress information about ongoing module download. Args: bytes_downloaded: Number of bytes downloaded. """ self._total_bytes_downloaded += bytes_downloaded now = time.time() if (self._interactive_mode() or now - self._last_progress_msg_print_time > 15): # Print progress message every 15 secs or if interactive progress # tracking is enabled. self._print_download_progress_msg( "Downloading %s: %s" % (self._url, tf_utils.bytes_to_readable_str( self._total_bytes_downloaded, True))) self._last_progress_msg_print_time = now
python
def _log_progress(self, bytes_downloaded): """Logs progress information about ongoing module download. Args: bytes_downloaded: Number of bytes downloaded. """ self._total_bytes_downloaded += bytes_downloaded now = time.time() if (self._interactive_mode() or now - self._last_progress_msg_print_time > 15): # Print progress message every 15 secs or if interactive progress # tracking is enabled. self._print_download_progress_msg( "Downloading %s: %s" % (self._url, tf_utils.bytes_to_readable_str( self._total_bytes_downloaded, True))) self._last_progress_msg_print_time = now
[ "def", "_log_progress", "(", "self", ",", "bytes_downloaded", ")", ":", "self", ".", "_total_bytes_downloaded", "+=", "bytes_downloaded", "now", "=", "time", ".", "time", "(", ")", "if", "(", "self", ".", "_interactive_mode", "(", ")", "or", "now", "-", "s...
Logs progress information about ongoing module download. Args: bytes_downloaded: Number of bytes downloaded.
[ "Logs", "progress", "information", "about", "ongoing", "module", "download", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/resolver.py#L124-L140
30,226
tensorflow/hub
tensorflow_hub/resolver.py
DownloadManager._extract_file
def _extract_file(self, tgz, tarinfo, dst_path, buffer_size=10<<20): """Extracts 'tarinfo' from 'tgz' and writes to 'dst_path'.""" src = tgz.extractfile(tarinfo) dst = tf_v1.gfile.GFile(dst_path, "wb") while 1: buf = src.read(buffer_size) if not buf: break dst.write(buf) self._log_progress(len(buf)) dst.close() src.close()
python
def _extract_file(self, tgz, tarinfo, dst_path, buffer_size=10<<20): """Extracts 'tarinfo' from 'tgz' and writes to 'dst_path'.""" src = tgz.extractfile(tarinfo) dst = tf_v1.gfile.GFile(dst_path, "wb") while 1: buf = src.read(buffer_size) if not buf: break dst.write(buf) self._log_progress(len(buf)) dst.close() src.close()
[ "def", "_extract_file", "(", "self", ",", "tgz", ",", "tarinfo", ",", "dst_path", ",", "buffer_size", "=", "10", "<<", "20", ")", ":", "src", "=", "tgz", ".", "extractfile", "(", "tarinfo", ")", "dst", "=", "tf_v1", ".", "gfile", ".", "GFile", "(", ...
Extracts 'tarinfo' from 'tgz' and writes to 'dst_path'.
[ "Extracts", "tarinfo", "from", "tgz", "and", "writes", "to", "dst_path", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/resolver.py#L146-L157
30,227
tensorflow/hub
tensorflow_hub/resolver.py
DownloadManager.download_and_uncompress
def download_and_uncompress(self, fileobj, dst_path): """Streams the content for the 'fileobj' and stores the result in dst_path. Args: fileobj: File handle pointing to .tar/.tar.gz content. dst_path: Absolute path where to store uncompressed data from 'fileobj'. Raises: ValueError: Unknown object encountered inside the TAR file. """ try: with tarfile.open(mode="r|*", fileobj=fileobj) as tgz: for tarinfo in tgz: abs_target_path = _merge_relative_path(dst_path, tarinfo.name) if tarinfo.isfile(): self._extract_file(tgz, tarinfo, abs_target_path) elif tarinfo.isdir(): tf_v1.gfile.MakeDirs(abs_target_path) else: # We do not support symlinks and other uncommon objects. raise ValueError( "Unexpected object type in tar archive: %s" % tarinfo.type) total_size_str = tf_utils.bytes_to_readable_str( self._total_bytes_downloaded, True) self._print_download_progress_msg( "Downloaded %s, Total size: %s" % (self._url, total_size_str), flush=True) except tarfile.ReadError: raise IOError("%s does not appear to be a valid module." % self._url)
python
def download_and_uncompress(self, fileobj, dst_path): """Streams the content for the 'fileobj' and stores the result in dst_path. Args: fileobj: File handle pointing to .tar/.tar.gz content. dst_path: Absolute path where to store uncompressed data from 'fileobj'. Raises: ValueError: Unknown object encountered inside the TAR file. """ try: with tarfile.open(mode="r|*", fileobj=fileobj) as tgz: for tarinfo in tgz: abs_target_path = _merge_relative_path(dst_path, tarinfo.name) if tarinfo.isfile(): self._extract_file(tgz, tarinfo, abs_target_path) elif tarinfo.isdir(): tf_v1.gfile.MakeDirs(abs_target_path) else: # We do not support symlinks and other uncommon objects. raise ValueError( "Unexpected object type in tar archive: %s" % tarinfo.type) total_size_str = tf_utils.bytes_to_readable_str( self._total_bytes_downloaded, True) self._print_download_progress_msg( "Downloaded %s, Total size: %s" % (self._url, total_size_str), flush=True) except tarfile.ReadError: raise IOError("%s does not appear to be a valid module." % self._url)
[ "def", "download_and_uncompress", "(", "self", ",", "fileobj", ",", "dst_path", ")", ":", "try", ":", "with", "tarfile", ".", "open", "(", "mode", "=", "\"r|*\"", ",", "fileobj", "=", "fileobj", ")", "as", "tgz", ":", "for", "tarinfo", "in", "tgz", ":"...
Streams the content for the 'fileobj' and stores the result in dst_path. Args: fileobj: File handle pointing to .tar/.tar.gz content. dst_path: Absolute path where to store uncompressed data from 'fileobj'. Raises: ValueError: Unknown object encountered inside the TAR file.
[ "Streams", "the", "content", "for", "the", "fileobj", "and", "stores", "the", "result", "in", "dst_path", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/resolver.py#L159-L189
30,228
tensorflow/hub
tensorflow_hub/meta_graph_lib.py
prepend_name_scope
def prepend_name_scope(name, import_scope): """Prepends name scope to a name.""" # Based on tensorflow/python/framework/ops.py implementation. if import_scope: try: str_to_replace = r"([\^]|loc:@|^)(.*)" return re.sub(str_to_replace, r"\1" + import_scope + r"/\2", tf.compat.as_str_any(name)) except TypeError as e: # If the name is not of a type we can process, simply return it. logging.warning(e) return name else: return name
python
def prepend_name_scope(name, import_scope): """Prepends name scope to a name.""" # Based on tensorflow/python/framework/ops.py implementation. if import_scope: try: str_to_replace = r"([\^]|loc:@|^)(.*)" return re.sub(str_to_replace, r"\1" + import_scope + r"/\2", tf.compat.as_str_any(name)) except TypeError as e: # If the name is not of a type we can process, simply return it. logging.warning(e) return name else: return name
[ "def", "prepend_name_scope", "(", "name", ",", "import_scope", ")", ":", "# Based on tensorflow/python/framework/ops.py implementation.", "if", "import_scope", ":", "try", ":", "str_to_replace", "=", "r\"([\\^]|loc:@|^)(.*)\"", "return", "re", ".", "sub", "(", "str_to_rep...
Prepends name scope to a name.
[ "Prepends", "name", "scope", "to", "a", "name", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/meta_graph_lib.py#L32-L45
30,229
tensorflow/hub
tensorflow_hub/meta_graph_lib.py
prefix_shared_name_attributes
def prefix_shared_name_attributes(meta_graph, absolute_import_scope): """In-place prefixes shared_name attributes of nodes.""" shared_name_attr = "shared_name" for node in meta_graph.graph_def.node: shared_name_value = node.attr.get(shared_name_attr, None) if shared_name_value and shared_name_value.HasField("s"): if shared_name_value.s: node.attr[shared_name_attr].s = tf.compat.as_bytes( prepend_name_scope( shared_name_value.s, import_scope=absolute_import_scope))
python
def prefix_shared_name_attributes(meta_graph, absolute_import_scope): """In-place prefixes shared_name attributes of nodes.""" shared_name_attr = "shared_name" for node in meta_graph.graph_def.node: shared_name_value = node.attr.get(shared_name_attr, None) if shared_name_value and shared_name_value.HasField("s"): if shared_name_value.s: node.attr[shared_name_attr].s = tf.compat.as_bytes( prepend_name_scope( shared_name_value.s, import_scope=absolute_import_scope))
[ "def", "prefix_shared_name_attributes", "(", "meta_graph", ",", "absolute_import_scope", ")", ":", "shared_name_attr", "=", "\"shared_name\"", "for", "node", "in", "meta_graph", ".", "graph_def", ".", "node", ":", "shared_name_value", "=", "node", ".", "attr", ".", ...
In-place prefixes shared_name attributes of nodes.
[ "In", "-", "place", "prefixes", "shared_name", "attributes", "of", "nodes", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/meta_graph_lib.py#L48-L57
30,230
tensorflow/hub
tensorflow_hub/meta_graph_lib.py
mark_backward
def mark_backward(output_tensor, used_node_names): """Function to propagate backwards in the graph and mark nodes as used. Traverses recursively through the graph from the end tensor, through the op that generates the tensor, and then to the input tensors that feed the op. Nodes encountered are stored in used_node_names. Args: output_tensor: A Tensor which we start the propagation. used_node_names: A list of strings, stores the name of nodes we've marked as visited. """ op = output_tensor.op if op.name in used_node_names: return used_node_names.add(op.name) for input_tensor in op.inputs: mark_backward(input_tensor, used_node_names) for control_input_op in op.control_inputs: used_node_names.add(control_input_op.name) for input_tensor in control_input_op.inputs: mark_backward(input_tensor, used_node_names)
python
def mark_backward(output_tensor, used_node_names): """Function to propagate backwards in the graph and mark nodes as used. Traverses recursively through the graph from the end tensor, through the op that generates the tensor, and then to the input tensors that feed the op. Nodes encountered are stored in used_node_names. Args: output_tensor: A Tensor which we start the propagation. used_node_names: A list of strings, stores the name of nodes we've marked as visited. """ op = output_tensor.op if op.name in used_node_names: return used_node_names.add(op.name) for input_tensor in op.inputs: mark_backward(input_tensor, used_node_names) for control_input_op in op.control_inputs: used_node_names.add(control_input_op.name) for input_tensor in control_input_op.inputs: mark_backward(input_tensor, used_node_names)
[ "def", "mark_backward", "(", "output_tensor", ",", "used_node_names", ")", ":", "op", "=", "output_tensor", ".", "op", "if", "op", ".", "name", "in", "used_node_names", ":", "return", "used_node_names", ".", "add", "(", "op", ".", "name", ")", "for", "inpu...
Function to propagate backwards in the graph and mark nodes as used. Traverses recursively through the graph from the end tensor, through the op that generates the tensor, and then to the input tensors that feed the op. Nodes encountered are stored in used_node_names. Args: output_tensor: A Tensor which we start the propagation. used_node_names: A list of strings, stores the name of nodes we've marked as visited.
[ "Function", "to", "propagate", "backwards", "in", "the", "graph", "and", "mark", "nodes", "as", "used", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/meta_graph_lib.py#L60-L81
30,231
tensorflow/hub
tensorflow_hub/meta_graph_lib.py
prune_unused_nodes
def prune_unused_nodes(meta_graph, signature_def): """Function to prune unused ops given a signature def. This function does a graph traversal through from all outputs as defined in the signature_def to collect all used nodes. Then, any nodes which are unused can be discarded. This is useful for graph which are executing eagerly or on TPUs. Args: meta_graph: The input/output MetaGraphDef for which we wish to prune. signature_def: A SignatureDef which specifies the outputs from which we wish to start graph traversal. """ # Instantiate a temporary empty graph so that we have access to Graph API # and import the meta_graph. graph = tf_v1.Graph() with graph.as_default(): tf_v1.train.import_meta_graph(meta_graph, input_map={}, import_scope="") # Traverse from all outputs and mark all nodes. used_node_names = set() for _, tensor_def in signature_def.outputs.items(): output_tensor = graph.get_tensor_by_name(tensor_def.name) mark_backward(output_tensor, used_node_names) # Filter out all nodes in the meta_graph that are not used. node_filter_in_list = [] for node in meta_graph.graph_def.node: # Make a special exception for VarHandleOp. Removing VarhandleOps # will make the graph not importable as they often leave nodes hanging. # These will be disconnected through the feedmap when importing the # metagraph. if node.name in used_node_names or node.op == "VarHandleOp": node_filter_in_list.append(node) del meta_graph.graph_def.node[:] meta_graph.graph_def.node.extend(node_filter_in_list) del graph
python
def prune_unused_nodes(meta_graph, signature_def): """Function to prune unused ops given a signature def. This function does a graph traversal through from all outputs as defined in the signature_def to collect all used nodes. Then, any nodes which are unused can be discarded. This is useful for graph which are executing eagerly or on TPUs. Args: meta_graph: The input/output MetaGraphDef for which we wish to prune. signature_def: A SignatureDef which specifies the outputs from which we wish to start graph traversal. """ # Instantiate a temporary empty graph so that we have access to Graph API # and import the meta_graph. graph = tf_v1.Graph() with graph.as_default(): tf_v1.train.import_meta_graph(meta_graph, input_map={}, import_scope="") # Traverse from all outputs and mark all nodes. used_node_names = set() for _, tensor_def in signature_def.outputs.items(): output_tensor = graph.get_tensor_by_name(tensor_def.name) mark_backward(output_tensor, used_node_names) # Filter out all nodes in the meta_graph that are not used. node_filter_in_list = [] for node in meta_graph.graph_def.node: # Make a special exception for VarHandleOp. Removing VarhandleOps # will make the graph not importable as they often leave nodes hanging. # These will be disconnected through the feedmap when importing the # metagraph. if node.name in used_node_names or node.op == "VarHandleOp": node_filter_in_list.append(node) del meta_graph.graph_def.node[:] meta_graph.graph_def.node.extend(node_filter_in_list) del graph
[ "def", "prune_unused_nodes", "(", "meta_graph", ",", "signature_def", ")", ":", "# Instantiate a temporary empty graph so that we have access to Graph API", "# and import the meta_graph.", "graph", "=", "tf_v1", ".", "Graph", "(", ")", "with", "graph", ".", "as_default", "(...
Function to prune unused ops given a signature def. This function does a graph traversal through from all outputs as defined in the signature_def to collect all used nodes. Then, any nodes which are unused can be discarded. This is useful for graph which are executing eagerly or on TPUs. Args: meta_graph: The input/output MetaGraphDef for which we wish to prune. signature_def: A SignatureDef which specifies the outputs from which we wish to start graph traversal.
[ "Function", "to", "prune", "unused", "ops", "given", "a", "signature", "def", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/meta_graph_lib.py#L84-L118
30,232
tensorflow/hub
tensorflow_hub/meta_graph_lib.py
prune_feed_map
def prune_feed_map(meta_graph, feed_map): """Function to prune the feedmap of nodes which no longer exist.""" node_names = [x.name + ":0" for x in meta_graph.graph_def.node] keys_to_delete = [] for k, _ in feed_map.items(): if k not in node_names: keys_to_delete.append(k) for k in keys_to_delete: del feed_map[k]
python
def prune_feed_map(meta_graph, feed_map): """Function to prune the feedmap of nodes which no longer exist.""" node_names = [x.name + ":0" for x in meta_graph.graph_def.node] keys_to_delete = [] for k, _ in feed_map.items(): if k not in node_names: keys_to_delete.append(k) for k in keys_to_delete: del feed_map[k]
[ "def", "prune_feed_map", "(", "meta_graph", ",", "feed_map", ")", ":", "node_names", "=", "[", "x", ".", "name", "+", "\":0\"", "for", "x", "in", "meta_graph", ".", "graph_def", ".", "node", "]", "keys_to_delete", "=", "[", "]", "for", "k", ",", "_", ...
Function to prune the feedmap of nodes which no longer exist.
[ "Function", "to", "prune", "the", "feedmap", "of", "nodes", "which", "no", "longer", "exist", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/meta_graph_lib.py#L121-L129
30,233
tensorflow/hub
tensorflow_hub/tf_utils.py
atomic_write_string_to_file
def atomic_write_string_to_file(filename, contents, overwrite): """Writes to `filename` atomically. This means that when `filename` appears in the filesystem, it will contain all of `contents`. With write_string_to_file, it is possible for the file to appear in the filesystem with `contents` only partially written. Accomplished by writing to a temp file and then renaming it. Args: filename: string, pathname for a file contents: string, contents that need to be written to the file overwrite: boolean, if false it's an error for `filename` to be occupied by an existing file. """ temp_pathname = (tf.compat.as_bytes(filename) + tf.compat.as_bytes(".tmp") + tf.compat.as_bytes(uuid.uuid4().hex)) with tf_v1.gfile.GFile(temp_pathname, mode="w") as f: f.write(contents) try: tf_v1.gfile.Rename(temp_pathname, filename, overwrite) except tf.errors.OpError: tf_v1.gfile.Remove(temp_pathname) raise
python
def atomic_write_string_to_file(filename, contents, overwrite): """Writes to `filename` atomically. This means that when `filename` appears in the filesystem, it will contain all of `contents`. With write_string_to_file, it is possible for the file to appear in the filesystem with `contents` only partially written. Accomplished by writing to a temp file and then renaming it. Args: filename: string, pathname for a file contents: string, contents that need to be written to the file overwrite: boolean, if false it's an error for `filename` to be occupied by an existing file. """ temp_pathname = (tf.compat.as_bytes(filename) + tf.compat.as_bytes(".tmp") + tf.compat.as_bytes(uuid.uuid4().hex)) with tf_v1.gfile.GFile(temp_pathname, mode="w") as f: f.write(contents) try: tf_v1.gfile.Rename(temp_pathname, filename, overwrite) except tf.errors.OpError: tf_v1.gfile.Remove(temp_pathname) raise
[ "def", "atomic_write_string_to_file", "(", "filename", ",", "contents", ",", "overwrite", ")", ":", "temp_pathname", "=", "(", "tf", ".", "compat", ".", "as_bytes", "(", "filename", ")", "+", "tf", ".", "compat", ".", "as_bytes", "(", "\".tmp\"", ")", "+",...
Writes to `filename` atomically. This means that when `filename` appears in the filesystem, it will contain all of `contents`. With write_string_to_file, it is possible for the file to appear in the filesystem with `contents` only partially written. Accomplished by writing to a temp file and then renaming it. Args: filename: string, pathname for a file contents: string, contents that need to be written to the file overwrite: boolean, if false it's an error for `filename` to be occupied by an existing file.
[ "Writes", "to", "filename", "atomically", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/tf_utils.py#L40-L64
30,234
tensorflow/hub
tensorflow_hub/tf_utils.py
get_timestamped_export_dir
def get_timestamped_export_dir(export_dir_base): """Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name. """ attempts = 0 while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS: export_timestamp = int(time.time()) export_dir = os.path.join( tf.compat.as_bytes(export_dir_base), tf.compat.as_bytes(str(export_timestamp))) if not tf_v1.gfile.Exists(export_dir): # Collisions are still possible (though extremely unlikely): this # directory is not actually created yet, but it will be almost # instantly on return from this function. return export_dir time.sleep(1) attempts += 1 logging.warn( "Export directory %s already exists; retrying (attempt %d/%d)", export_dir, attempts, MAX_DIRECTORY_CREATION_ATTEMPTS) raise RuntimeError("Failed to obtain a unique export directory name after " "%d attempts.".MAX_DIRECTORY_CREATION_ATTEMPTS)
python
def get_timestamped_export_dir(export_dir_base): """Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name. """ attempts = 0 while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS: export_timestamp = int(time.time()) export_dir = os.path.join( tf.compat.as_bytes(export_dir_base), tf.compat.as_bytes(str(export_timestamp))) if not tf_v1.gfile.Exists(export_dir): # Collisions are still possible (though extremely unlikely): this # directory is not actually created yet, but it will be almost # instantly on return from this function. return export_dir time.sleep(1) attempts += 1 logging.warn( "Export directory %s already exists; retrying (attempt %d/%d)", export_dir, attempts, MAX_DIRECTORY_CREATION_ATTEMPTS) raise RuntimeError("Failed to obtain a unique export directory name after " "%d attempts.".MAX_DIRECTORY_CREATION_ATTEMPTS)
[ "def", "get_timestamped_export_dir", "(", "export_dir_base", ")", ":", "attempts", "=", "0", "while", "attempts", "<", "MAX_DIRECTORY_CREATION_ATTEMPTS", ":", "export_timestamp", "=", "int", "(", "time", ".", "time", "(", ")", ")", "export_dir", "=", "os", ".", ...
Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name.
[ "Builds", "a", "path", "to", "a", "new", "subdirectory", "within", "the", "base", "directory", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/tf_utils.py#L74-L110
30,235
tensorflow/hub
tensorflow_hub/tf_utils.py
get_temp_export_dir
def get_temp_export_dir(timestamped_export_dir): """Builds a directory name based on the argument but starting with 'temp-'. This relies on the fact that TensorFlow Serving ignores subdirectories of the base directory that can't be parsed as integers. Args: timestamped_export_dir: the name of the eventual export directory, e.g. /foo/bar/<timestamp> Returns: A sister directory prefixed with 'temp-', e.g. /foo/bar/temp-<timestamp>. """ (dirname, basename) = os.path.split(timestamped_export_dir) temp_export_dir = os.path.join( tf.compat.as_bytes(dirname), tf.compat.as_bytes("temp-{}".format(basename))) return temp_export_dir
python
def get_temp_export_dir(timestamped_export_dir): """Builds a directory name based on the argument but starting with 'temp-'. This relies on the fact that TensorFlow Serving ignores subdirectories of the base directory that can't be parsed as integers. Args: timestamped_export_dir: the name of the eventual export directory, e.g. /foo/bar/<timestamp> Returns: A sister directory prefixed with 'temp-', e.g. /foo/bar/temp-<timestamp>. """ (dirname, basename) = os.path.split(timestamped_export_dir) temp_export_dir = os.path.join( tf.compat.as_bytes(dirname), tf.compat.as_bytes("temp-{}".format(basename))) return temp_export_dir
[ "def", "get_temp_export_dir", "(", "timestamped_export_dir", ")", ":", "(", "dirname", ",", "basename", ")", "=", "os", ".", "path", ".", "split", "(", "timestamped_export_dir", ")", "temp_export_dir", "=", "os", ".", "path", ".", "join", "(", "tf", ".", "...
Builds a directory name based on the argument but starting with 'temp-'. This relies on the fact that TensorFlow Serving ignores subdirectories of the base directory that can't be parsed as integers. Args: timestamped_export_dir: the name of the eventual export directory, e.g. /foo/bar/<timestamp> Returns: A sister directory prefixed with 'temp-', e.g. /foo/bar/temp-<timestamp>.
[ "Builds", "a", "directory", "name", "based", "on", "the", "argument", "but", "starting", "with", "temp", "-", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/tf_utils.py#L113-L130
30,236
tensorflow/hub
tensorflow_hub/tf_utils.py
garbage_collect_exports
def garbage_collect_exports(export_dir_base, exports_to_keep): """Deletes older exports, retaining only a given number of the most recent. Export subdirectories are assumed to be named with monotonically increasing integers; the most recent are taken to be those with the largest values. Args: export_dir_base: the base directory under which each export is in a versioned subdirectory. exports_to_keep: Number of exports to keep. Older exports will be garbage collected. Set to None to disable. """ if exports_to_keep is None: return version_paths = [] # List of tuples (version, path) for filename in tf_v1.gfile.ListDirectory(export_dir_base): path = os.path.join( tf.compat.as_bytes(export_dir_base), tf.compat.as_bytes(filename)) if len(filename) == 10 and filename.isdigit(): version_paths.append((int(filename), path)) oldest_version_path = sorted(version_paths)[:-exports_to_keep] for _, path in oldest_version_path: try: tf_v1.gfile.DeleteRecursively(path) except tf.errors.NotFoundError as e: logging.warn("Can not delete %s recursively: %s", path, e)
python
def garbage_collect_exports(export_dir_base, exports_to_keep): """Deletes older exports, retaining only a given number of the most recent. Export subdirectories are assumed to be named with monotonically increasing integers; the most recent are taken to be those with the largest values. Args: export_dir_base: the base directory under which each export is in a versioned subdirectory. exports_to_keep: Number of exports to keep. Older exports will be garbage collected. Set to None to disable. """ if exports_to_keep is None: return version_paths = [] # List of tuples (version, path) for filename in tf_v1.gfile.ListDirectory(export_dir_base): path = os.path.join( tf.compat.as_bytes(export_dir_base), tf.compat.as_bytes(filename)) if len(filename) == 10 and filename.isdigit(): version_paths.append((int(filename), path)) oldest_version_path = sorted(version_paths)[:-exports_to_keep] for _, path in oldest_version_path: try: tf_v1.gfile.DeleteRecursively(path) except tf.errors.NotFoundError as e: logging.warn("Can not delete %s recursively: %s", path, e)
[ "def", "garbage_collect_exports", "(", "export_dir_base", ",", "exports_to_keep", ")", ":", "if", "exports_to_keep", "is", "None", ":", "return", "version_paths", "=", "[", "]", "# List of tuples (version, path)", "for", "filename", "in", "tf_v1", ".", "gfile", ".",...
Deletes older exports, retaining only a given number of the most recent. Export subdirectories are assumed to be named with monotonically increasing integers; the most recent are taken to be those with the largest values. Args: export_dir_base: the base directory under which each export is in a versioned subdirectory. exports_to_keep: Number of exports to keep. Older exports will be garbage collected. Set to None to disable.
[ "Deletes", "older", "exports", "retaining", "only", "a", "given", "number", "of", "the", "most", "recent", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/tf_utils.py#L135-L162
30,237
tensorflow/hub
tensorflow_hub/tf_utils.py
bytes_to_readable_str
def bytes_to_readable_str(num_bytes, include_b=False): """Generate a human-readable string representing number of bytes. The units B, kB, MB and GB are used. Args: num_bytes: (`int` or None) Number of bytes. include_b: (`bool`) Include the letter B at the end of the unit. Returns: (`str`) A string representing the number of bytes in a human-readable way, including a unit at the end. """ if num_bytes is None: return str(num_bytes) if num_bytes < 1024: result = "%d" % num_bytes elif num_bytes < 1048576: result = "%.2fk" % (num_bytes / float(1 << 10)) elif num_bytes < 1073741824: result = "%.2fM" % (num_bytes / float(1 << 20)) else: result = "%.2fG" % (num_bytes / float(1 << 30)) if include_b: result += "B" return result
python
def bytes_to_readable_str(num_bytes, include_b=False): """Generate a human-readable string representing number of bytes. The units B, kB, MB and GB are used. Args: num_bytes: (`int` or None) Number of bytes. include_b: (`bool`) Include the letter B at the end of the unit. Returns: (`str`) A string representing the number of bytes in a human-readable way, including a unit at the end. """ if num_bytes is None: return str(num_bytes) if num_bytes < 1024: result = "%d" % num_bytes elif num_bytes < 1048576: result = "%.2fk" % (num_bytes / float(1 << 10)) elif num_bytes < 1073741824: result = "%.2fM" % (num_bytes / float(1 << 20)) else: result = "%.2fG" % (num_bytes / float(1 << 30)) if include_b: result += "B" return result
[ "def", "bytes_to_readable_str", "(", "num_bytes", ",", "include_b", "=", "False", ")", ":", "if", "num_bytes", "is", "None", ":", "return", "str", "(", "num_bytes", ")", "if", "num_bytes", "<", "1024", ":", "result", "=", "\"%d\"", "%", "num_bytes", "elif"...
Generate a human-readable string representing number of bytes. The units B, kB, MB and GB are used. Args: num_bytes: (`int` or None) Number of bytes. include_b: (`bool`) Include the letter B at the end of the unit. Returns: (`str`) A string representing the number of bytes in a human-readable way, including a unit at the end.
[ "Generate", "a", "human", "-", "readable", "string", "representing", "number", "of", "bytes", "." ]
09f45963f6787322967b6fec61459f3ac56fbb27
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/tf_utils.py#L165-L192
30,238
pytest-dev/pytest
scripts/release.py
announce
def announce(version): """Generates a new release announcement entry in the docs.""" # Get our list of authors stdout = check_output(["git", "describe", "--abbrev=0", "--tags"]) stdout = stdout.decode("utf-8") last_version = stdout.strip() stdout = check_output( ["git", "log", "{}..HEAD".format(last_version), "--format=%aN"] ) stdout = stdout.decode("utf-8") contributors = set(stdout.splitlines()) template_name = ( "release.minor.rst" if version.endswith(".0") else "release.patch.rst" ) template_text = ( Path(__file__).parent.joinpath(template_name).read_text(encoding="UTF-8") ) contributors_text = ( "\n".join("* {}".format(name) for name in sorted(contributors)) + "\n" ) text = template_text.format(version=version, contributors=contributors_text) target = Path(__file__).parent.joinpath( "../doc/en/announce/release-{}.rst".format(version) ) target.write_text(text, encoding="UTF-8") print(f"{Fore.CYAN}[generate.announce] {Fore.RESET}Generated {target.name}") # Update index with the new release entry index_path = Path(__file__).parent.joinpath("../doc/en/announce/index.rst") lines = index_path.read_text(encoding="UTF-8").splitlines() indent = " " for index, line in enumerate(lines): if line.startswith("{}release-".format(indent)): new_line = indent + target.stem if line != new_line: lines.insert(index, new_line) index_path.write_text("\n".join(lines) + "\n", encoding="UTF-8") print( f"{Fore.CYAN}[generate.announce] {Fore.RESET}Updated {index_path.name}" ) else: print( f"{Fore.CYAN}[generate.announce] {Fore.RESET}Skip {index_path.name} (already contains release)" ) break check_call(["git", "add", str(target)])
python
def announce(version): """Generates a new release announcement entry in the docs.""" # Get our list of authors stdout = check_output(["git", "describe", "--abbrev=0", "--tags"]) stdout = stdout.decode("utf-8") last_version = stdout.strip() stdout = check_output( ["git", "log", "{}..HEAD".format(last_version), "--format=%aN"] ) stdout = stdout.decode("utf-8") contributors = set(stdout.splitlines()) template_name = ( "release.minor.rst" if version.endswith(".0") else "release.patch.rst" ) template_text = ( Path(__file__).parent.joinpath(template_name).read_text(encoding="UTF-8") ) contributors_text = ( "\n".join("* {}".format(name) for name in sorted(contributors)) + "\n" ) text = template_text.format(version=version, contributors=contributors_text) target = Path(__file__).parent.joinpath( "../doc/en/announce/release-{}.rst".format(version) ) target.write_text(text, encoding="UTF-8") print(f"{Fore.CYAN}[generate.announce] {Fore.RESET}Generated {target.name}") # Update index with the new release entry index_path = Path(__file__).parent.joinpath("../doc/en/announce/index.rst") lines = index_path.read_text(encoding="UTF-8").splitlines() indent = " " for index, line in enumerate(lines): if line.startswith("{}release-".format(indent)): new_line = indent + target.stem if line != new_line: lines.insert(index, new_line) index_path.write_text("\n".join(lines) + "\n", encoding="UTF-8") print( f"{Fore.CYAN}[generate.announce] {Fore.RESET}Updated {index_path.name}" ) else: print( f"{Fore.CYAN}[generate.announce] {Fore.RESET}Skip {index_path.name} (already contains release)" ) break check_call(["git", "add", str(target)])
[ "def", "announce", "(", "version", ")", ":", "# Get our list of authors", "stdout", "=", "check_output", "(", "[", "\"git\"", ",", "\"describe\"", ",", "\"--abbrev=0\"", ",", "\"--tags\"", "]", ")", "stdout", "=", "stdout", ".", "decode", "(", "\"utf-8\"", ")"...
Generates a new release announcement entry in the docs.
[ "Generates", "a", "new", "release", "announcement", "entry", "in", "the", "docs", "." ]
204004c8b8b743110a5f12f2bfa31154e0f59815
https://github.com/pytest-dev/pytest/blob/204004c8b8b743110a5f12f2bfa31154e0f59815/scripts/release.py#L14-L65
30,239
kubernetes-client/python
kubernetes/client/models/v1alpha1_webhook_client_config.py
V1alpha1WebhookClientConfig.ca_bundle
def ca_bundle(self, ca_bundle): """ Sets the ca_bundle of this V1alpha1WebhookClientConfig. `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. :param ca_bundle: The ca_bundle of this V1alpha1WebhookClientConfig. :type: str """ if ca_bundle is not None and not re.search('^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', ca_bundle): raise ValueError("Invalid value for `ca_bundle`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") self._ca_bundle = ca_bundle
python
def ca_bundle(self, ca_bundle): """ Sets the ca_bundle of this V1alpha1WebhookClientConfig. `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. :param ca_bundle: The ca_bundle of this V1alpha1WebhookClientConfig. :type: str """ if ca_bundle is not None and not re.search('^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', ca_bundle): raise ValueError("Invalid value for `ca_bundle`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") self._ca_bundle = ca_bundle
[ "def", "ca_bundle", "(", "self", ",", "ca_bundle", ")", ":", "if", "ca_bundle", "is", "not", "None", "and", "not", "re", ".", "search", "(", "'^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$'", ",", "ca_bundle", ")", ":", "raise", "ValueError"...
Sets the ca_bundle of this V1alpha1WebhookClientConfig. `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. :param ca_bundle: The ca_bundle of this V1alpha1WebhookClientConfig. :type: str
[ "Sets", "the", "ca_bundle", "of", "this", "V1alpha1WebhookClientConfig", ".", "caBundle", "is", "a", "PEM", "encoded", "CA", "bundle", "which", "will", "be", "used", "to", "validate", "the", "webhook", "s", "server", "certificate", ".", "If", "unspecified", "s...
5e512ff564c244c50cab780d821542ed56aa965a
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/models/v1alpha1_webhook_client_config.py#L74-L85
30,240
kubernetes-client/python
kubernetes/client/models/runtime_raw_extension.py
RuntimeRawExtension.raw
def raw(self, raw): """ Sets the raw of this RuntimeRawExtension. Raw is the underlying serialization of this object. :param raw: The raw of this RuntimeRawExtension. :type: str """ if raw is None: raise ValueError("Invalid value for `raw`, must not be `None`") if raw is not None and not re.search('^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', raw): raise ValueError("Invalid value for `raw`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") self._raw = raw
python
def raw(self, raw): """ Sets the raw of this RuntimeRawExtension. Raw is the underlying serialization of this object. :param raw: The raw of this RuntimeRawExtension. :type: str """ if raw is None: raise ValueError("Invalid value for `raw`, must not be `None`") if raw is not None and not re.search('^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', raw): raise ValueError("Invalid value for `raw`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") self._raw = raw
[ "def", "raw", "(", "self", ",", "raw", ")", ":", "if", "raw", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `raw`, must not be `None`\"", ")", "if", "raw", "is", "not", "None", "and", "not", "re", ".", "search", "(", "'^(?:[A-Za-z0-9+\\...
Sets the raw of this RuntimeRawExtension. Raw is the underlying serialization of this object. :param raw: The raw of this RuntimeRawExtension. :type: str
[ "Sets", "the", "raw", "of", "this", "RuntimeRawExtension", ".", "Raw", "is", "the", "underlying", "serialization", "of", "this", "object", "." ]
5e512ff564c244c50cab780d821542ed56aa965a
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/models/runtime_raw_extension.py#L63-L76
30,241
kubernetes-client/python
kubernetes/client/api_client.py
ApiClient.pool
def pool(self): """Create thread pool on first request avoids instantiating unused threadpool for blocking clients. """ if self._pool is None: self._pool = ThreadPool(self.pool_threads) return self._pool
python
def pool(self): """Create thread pool on first request avoids instantiating unused threadpool for blocking clients. """ if self._pool is None: self._pool = ThreadPool(self.pool_threads) return self._pool
[ "def", "pool", "(", "self", ")", ":", "if", "self", ".", "_pool", "is", "None", ":", "self", ".", "_pool", "=", "ThreadPool", "(", "self", ".", "pool_threads", ")", "return", "self", ".", "_pool" ]
Create thread pool on first request avoids instantiating unused threadpool for blocking clients.
[ "Create", "thread", "pool", "on", "first", "request", "avoids", "instantiating", "unused", "threadpool", "for", "blocking", "clients", "." ]
5e512ff564c244c50cab780d821542ed56aa965a
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/api_client.py#L85-L91
30,242
kubernetes-client/python
kubernetes/client/configuration.py
Configuration.debug
def debug(self, value): """ Sets the debug status. :param value: The debug status, True or False. :type: bool """ self.__debug = value if self.__debug: # if debug status is True, turn on debug logging for _, logger in iteritems(self.logger): logger.setLevel(logging.DEBUG) # turn on httplib debug httplib.HTTPConnection.debuglevel = 1 else: # if debug status is False, turn off debug logging, # setting log level to default `logging.WARNING` for _, logger in iteritems(self.logger): logger.setLevel(logging.WARNING) # turn off httplib debug httplib.HTTPConnection.debuglevel = 0
python
def debug(self, value): """ Sets the debug status. :param value: The debug status, True or False. :type: bool """ self.__debug = value if self.__debug: # if debug status is True, turn on debug logging for _, logger in iteritems(self.logger): logger.setLevel(logging.DEBUG) # turn on httplib debug httplib.HTTPConnection.debuglevel = 1 else: # if debug status is False, turn off debug logging, # setting log level to default `logging.WARNING` for _, logger in iteritems(self.logger): logger.setLevel(logging.WARNING) # turn off httplib debug httplib.HTTPConnection.debuglevel = 0
[ "def", "debug", "(", "self", ",", "value", ")", ":", "self", ".", "__debug", "=", "value", "if", "self", ".", "__debug", ":", "# if debug status is True, turn on debug logging", "for", "_", ",", "logger", "in", "iteritems", "(", "self", ".", "logger", ")", ...
Sets the debug status. :param value: The debug status, True or False. :type: bool
[ "Sets", "the", "debug", "status", "." ]
5e512ff564c244c50cab780d821542ed56aa965a
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/configuration.py#L153-L173
30,243
kubernetes-client/python
kubernetes/client/configuration.py
Configuration.logger_format
def logger_format(self, value): """ Sets the logger_format. The logger_formatter will be updated when sets logger_format. :param value: The format string. :type: str """ self.__logger_format = value self.logger_formatter = logging.Formatter(self.__logger_format)
python
def logger_format(self, value): """ Sets the logger_format. The logger_formatter will be updated when sets logger_format. :param value: The format string. :type: str """ self.__logger_format = value self.logger_formatter = logging.Formatter(self.__logger_format)
[ "def", "logger_format", "(", "self", ",", "value", ")", ":", "self", ".", "__logger_format", "=", "value", "self", ".", "logger_formatter", "=", "logging", ".", "Formatter", "(", "self", ".", "__logger_format", ")" ]
Sets the logger_format. The logger_formatter will be updated when sets logger_format. :param value: The format string. :type: str
[ "Sets", "the", "logger_format", "." ]
5e512ff564c244c50cab780d821542ed56aa965a
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/configuration.py#L183-L193
30,244
kubernetes-client/python
kubernetes/client/models/v1beta1_certificate_signing_request_status.py
V1beta1CertificateSigningRequestStatus.certificate
def certificate(self, certificate): """ Sets the certificate of this V1beta1CertificateSigningRequestStatus. If request was approved, the controller will place the issued certificate here. :param certificate: The certificate of this V1beta1CertificateSigningRequestStatus. :type: str """ if certificate is not None and not re.search('^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', certificate): raise ValueError("Invalid value for `certificate`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") self._certificate = certificate
python
def certificate(self, certificate): """ Sets the certificate of this V1beta1CertificateSigningRequestStatus. If request was approved, the controller will place the issued certificate here. :param certificate: The certificate of this V1beta1CertificateSigningRequestStatus. :type: str """ if certificate is not None and not re.search('^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', certificate): raise ValueError("Invalid value for `certificate`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") self._certificate = certificate
[ "def", "certificate", "(", "self", ",", "certificate", ")", ":", "if", "certificate", "is", "not", "None", "and", "not", "re", ".", "search", "(", "'^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$'", ",", "certificate", ")", ":", "raise", "Val...
Sets the certificate of this V1beta1CertificateSigningRequestStatus. If request was approved, the controller will place the issued certificate here. :param certificate: The certificate of this V1beta1CertificateSigningRequestStatus. :type: str
[ "Sets", "the", "certificate", "of", "this", "V1beta1CertificateSigningRequestStatus", ".", "If", "request", "was", "approved", "the", "controller", "will", "place", "the", "issued", "certificate", "here", "." ]
5e512ff564c244c50cab780d821542ed56aa965a
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/models/v1beta1_certificate_signing_request_status.py#L69-L80
30,245
bokeh/bokeh
bokeh/core/property/wrappers.py
notify_owner
def notify_owner(func): ''' A decorator for mutating methods of property container classes that notifies owners of the property container about mutating changes. Args: func (callable) : the container method to wrap in a notification Returns: wrapped method Examples: A ``__setitem__`` could be wrapped like this: .. code-block:: python # x[i] = y @notify_owner def __setitem__(self, i, y): return super(PropertyValueDict, self).__setitem__(i, y) The returned wrapped method will have a docstring indicating what original method it is wrapping. ''' def wrapper(self, *args, **kwargs): old = self._saved_copy() result = func(self, *args, **kwargs) self._notify_owners(old) return result wrapper.__doc__ = "Container method ``%s`` instrumented to notify property owners" % func.__name__ return wrapper
python
def notify_owner(func): ''' A decorator for mutating methods of property container classes that notifies owners of the property container about mutating changes. Args: func (callable) : the container method to wrap in a notification Returns: wrapped method Examples: A ``__setitem__`` could be wrapped like this: .. code-block:: python # x[i] = y @notify_owner def __setitem__(self, i, y): return super(PropertyValueDict, self).__setitem__(i, y) The returned wrapped method will have a docstring indicating what original method it is wrapping. ''' def wrapper(self, *args, **kwargs): old = self._saved_copy() result = func(self, *args, **kwargs) self._notify_owners(old) return result wrapper.__doc__ = "Container method ``%s`` instrumented to notify property owners" % func.__name__ return wrapper
[ "def", "notify_owner", "(", "func", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "old", "=", "self", ".", "_saved_copy", "(", ")", "result", "=", "func", "(", "self", ",", "*", "args", ",", "*", ...
A decorator for mutating methods of property container classes that notifies owners of the property container about mutating changes. Args: func (callable) : the container method to wrap in a notification Returns: wrapped method Examples: A ``__setitem__`` could be wrapped like this: .. code-block:: python # x[i] = y @notify_owner def __setitem__(self, i, y): return super(PropertyValueDict, self).__setitem__(i, y) The returned wrapped method will have a docstring indicating what original method it is wrapping.
[ "A", "decorator", "for", "mutating", "methods", "of", "property", "container", "classes", "that", "notifies", "owners", "of", "the", "property", "container", "about", "mutating", "changes", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/wrappers.py#L97-L128
30,246
bokeh/bokeh
bokeh/core/property/wrappers.py
PropertyValueColumnData._stream
def _stream(self, doc, source, new_data, rollover=None, setter=None): ''' Internal implementation to handle special-casing stream events on ``ColumnDataSource`` columns. Normally any changes to the ``.data`` dict attribute on a ``ColumnDataSource`` triggers a notification, causing all of the data to be synchronized between server and clients. The ``.stream`` method on column data sources exists to provide a more efficient way to perform streaming (i.e. append-only) updates to a data source, without having to perform a full synchronization, which would needlessly re-send all the data. To accomplish this, this function bypasses the wrapped methods on ``PropertyValueDict`` and uses the unwrapped versions on the dict superclass directly. It then explicitly makes a notification, adding a special ``ColumnsStreamedEvent`` hint to the message containing only the small streamed data that BokehJS needs in order to efficiently synchronize. .. warning:: This function assumes the integrity of ``new_data`` has already been verified. ''' old = self._saved_copy() # TODO (bev) Currently this reports old differently for array vs list # For arrays is reports the actual old value. For lists, the old value # is actually the already updated value. This is because the method # self._saved_copy() makes a shallow copy. for k, v in new_data.items(): if isinstance(self[k], np.ndarray) or isinstance(new_data[k], np.ndarray): data = np.append(self[k], new_data[k]) if rollover and len(data) > rollover: data = data[-rollover:] super(PropertyValueDict, self).__setitem__(k, data) else: L = self[k] L.extend(new_data[k]) if rollover is not None: del L[:-rollover] from ...document.events import ColumnsStreamedEvent self._notify_owners(old, hint=ColumnsStreamedEvent(doc, source, new_data, rollover, setter))
python
def _stream(self, doc, source, new_data, rollover=None, setter=None): ''' Internal implementation to handle special-casing stream events on ``ColumnDataSource`` columns. Normally any changes to the ``.data`` dict attribute on a ``ColumnDataSource`` triggers a notification, causing all of the data to be synchronized between server and clients. The ``.stream`` method on column data sources exists to provide a more efficient way to perform streaming (i.e. append-only) updates to a data source, without having to perform a full synchronization, which would needlessly re-send all the data. To accomplish this, this function bypasses the wrapped methods on ``PropertyValueDict`` and uses the unwrapped versions on the dict superclass directly. It then explicitly makes a notification, adding a special ``ColumnsStreamedEvent`` hint to the message containing only the small streamed data that BokehJS needs in order to efficiently synchronize. .. warning:: This function assumes the integrity of ``new_data`` has already been verified. ''' old = self._saved_copy() # TODO (bev) Currently this reports old differently for array vs list # For arrays is reports the actual old value. For lists, the old value # is actually the already updated value. This is because the method # self._saved_copy() makes a shallow copy. for k, v in new_data.items(): if isinstance(self[k], np.ndarray) or isinstance(new_data[k], np.ndarray): data = np.append(self[k], new_data[k]) if rollover and len(data) > rollover: data = data[-rollover:] super(PropertyValueDict, self).__setitem__(k, data) else: L = self[k] L.extend(new_data[k]) if rollover is not None: del L[:-rollover] from ...document.events import ColumnsStreamedEvent self._notify_owners(old, hint=ColumnsStreamedEvent(doc, source, new_data, rollover, setter))
[ "def", "_stream", "(", "self", ",", "doc", ",", "source", ",", "new_data", ",", "rollover", "=", "None", ",", "setter", "=", "None", ")", ":", "old", "=", "self", ".", "_saved_copy", "(", ")", "# TODO (bev) Currently this reports old differently for array vs lis...
Internal implementation to handle special-casing stream events on ``ColumnDataSource`` columns. Normally any changes to the ``.data`` dict attribute on a ``ColumnDataSource`` triggers a notification, causing all of the data to be synchronized between server and clients. The ``.stream`` method on column data sources exists to provide a more efficient way to perform streaming (i.e. append-only) updates to a data source, without having to perform a full synchronization, which would needlessly re-send all the data. To accomplish this, this function bypasses the wrapped methods on ``PropertyValueDict`` and uses the unwrapped versions on the dict superclass directly. It then explicitly makes a notification, adding a special ``ColumnsStreamedEvent`` hint to the message containing only the small streamed data that BokehJS needs in order to efficiently synchronize. .. warning:: This function assumes the integrity of ``new_data`` has already been verified.
[ "Internal", "implementation", "to", "handle", "special", "-", "casing", "stream", "events", "on", "ColumnDataSource", "columns", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/wrappers.py#L398-L444
30,247
bokeh/bokeh
bokeh/core/property/wrappers.py
PropertyValueColumnData._patch
def _patch(self, doc, source, patches, setter=None): ''' Internal implementation to handle special-casing patch events on ``ColumnDataSource`` columns. Normally any changes to the ``.data`` dict attribute on a ``ColumnDataSource`` triggers a notification, causing all of the data to be synchronized between server and clients. The ``.patch`` method on column data sources exists to provide a more efficient way to perform patching (i.e. random access) updates to a data source, without having to perform a full synchronization, which would needlessly re-send all the data. To accomplish this, this function bypasses the wrapped methods on ``PropertyValueDict`` and uses the unwrapped versions on the dict superclass directly. It then explicitly makes a notification, adding a special ``ColumnsPatchedEvent`` hint to the message containing only the small patched data that BokehJS needs in order to efficiently synchronize. .. warning:: This function assumes the integrity of ``patches`` has already been verified. ''' old = self._saved_copy() for name, patch in patches.items(): for ind, value in patch: if isinstance(ind, (int, slice)): self[name][ind] = value else: shape = self[name][ind[0]][tuple(ind[1:])].shape self[name][ind[0]][tuple(ind[1:])] = np.array(value, copy=False).reshape(shape) from ...document.events import ColumnsPatchedEvent self._notify_owners(old, hint=ColumnsPatchedEvent(doc, source, patches, setter))
python
def _patch(self, doc, source, patches, setter=None): ''' Internal implementation to handle special-casing patch events on ``ColumnDataSource`` columns. Normally any changes to the ``.data`` dict attribute on a ``ColumnDataSource`` triggers a notification, causing all of the data to be synchronized between server and clients. The ``.patch`` method on column data sources exists to provide a more efficient way to perform patching (i.e. random access) updates to a data source, without having to perform a full synchronization, which would needlessly re-send all the data. To accomplish this, this function bypasses the wrapped methods on ``PropertyValueDict`` and uses the unwrapped versions on the dict superclass directly. It then explicitly makes a notification, adding a special ``ColumnsPatchedEvent`` hint to the message containing only the small patched data that BokehJS needs in order to efficiently synchronize. .. warning:: This function assumes the integrity of ``patches`` has already been verified. ''' old = self._saved_copy() for name, patch in patches.items(): for ind, value in patch: if isinstance(ind, (int, slice)): self[name][ind] = value else: shape = self[name][ind[0]][tuple(ind[1:])].shape self[name][ind[0]][tuple(ind[1:])] = np.array(value, copy=False).reshape(shape) from ...document.events import ColumnsPatchedEvent self._notify_owners(old, hint=ColumnsPatchedEvent(doc, source, patches, setter))
[ "def", "_patch", "(", "self", ",", "doc", ",", "source", ",", "patches", ",", "setter", "=", "None", ")", ":", "old", "=", "self", ".", "_saved_copy", "(", ")", "for", "name", ",", "patch", "in", "patches", ".", "items", "(", ")", ":", "for", "in...
Internal implementation to handle special-casing patch events on ``ColumnDataSource`` columns. Normally any changes to the ``.data`` dict attribute on a ``ColumnDataSource`` triggers a notification, causing all of the data to be synchronized between server and clients. The ``.patch`` method on column data sources exists to provide a more efficient way to perform patching (i.e. random access) updates to a data source, without having to perform a full synchronization, which would needlessly re-send all the data. To accomplish this, this function bypasses the wrapped methods on ``PropertyValueDict`` and uses the unwrapped versions on the dict superclass directly. It then explicitly makes a notification, adding a special ``ColumnsPatchedEvent`` hint to the message containing only the small patched data that BokehJS needs in order to efficiently synchronize. .. warning:: This function assumes the integrity of ``patches`` has already been verified.
[ "Internal", "implementation", "to", "handle", "special", "-", "casing", "patch", "events", "on", "ColumnDataSource", "columns", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/wrappers.py#L447-L485
30,248
bokeh/bokeh
bokeh/__init__.py
license
def license(): ''' Print the Bokeh license to the console. Returns: None ''' from os.path import join with open(join(__path__[0], 'LICENSE.txt')) as lic: print(lic.read())
python
def license(): ''' Print the Bokeh license to the console. Returns: None ''' from os.path import join with open(join(__path__[0], 'LICENSE.txt')) as lic: print(lic.read())
[ "def", "license", "(", ")", ":", "from", "os", ".", "path", "import", "join", "with", "open", "(", "join", "(", "__path__", "[", "0", "]", ",", "'LICENSE.txt'", ")", ")", "as", "lic", ":", "print", "(", "lic", ".", "read", "(", ")", ")" ]
Print the Bokeh license to the console. Returns: None
[ "Print", "the", "Bokeh", "license", "to", "the", "console", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/__init__.py#L51-L60
30,249
bokeh/bokeh
bokeh/core/property/bases.py
Property._copy_default
def _copy_default(cls, default): ''' Return a copy of the default, or a new value if the default is specified by a function. ''' if not isinstance(default, types.FunctionType): return copy(default) else: return default()
python
def _copy_default(cls, default): ''' Return a copy of the default, or a new value if the default is specified by a function. ''' if not isinstance(default, types.FunctionType): return copy(default) else: return default()
[ "def", "_copy_default", "(", "cls", ",", "default", ")", ":", "if", "not", "isinstance", "(", "default", ",", "types", ".", "FunctionType", ")", ":", "return", "copy", "(", "default", ")", "else", ":", "return", "default", "(", ")" ]
Return a copy of the default, or a new value if the default is specified by a function.
[ "Return", "a", "copy", "of", "the", "default", "or", "a", "new", "value", "if", "the", "default", "is", "specified", "by", "a", "function", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/bases.py#L153-L161
30,250
bokeh/bokeh
bokeh/core/property/bases.py
Property.matches
def matches(self, new, old): ''' Whether two parameters match values. If either ``new`` or ``old`` is a NumPy array or Pandas Series or Index, then the result of ``np.array_equal`` will determine if the values match. Otherwise, the result of standard Python equality will be returned. Returns: True, if new and old match, False otherwise ''' if isinstance(new, np.ndarray) or isinstance(old, np.ndarray): return np.array_equal(new, old) if pd: if isinstance(new, pd.Series) or isinstance(old, pd.Series): return np.array_equal(new, old) if isinstance(new, pd.Index) or isinstance(old, pd.Index): return np.array_equal(new, old) try: # this handles the special but common case where there is a dict with array # or series as values (e.g. the .data property of a ColumnDataSource) if isinstance(new, dict) and isinstance(old, dict): if set(new.keys()) != set(old.keys()): return False return all(self.matches(new[k], old[k]) for k in new) return new == old # if the comparison fails for some reason, just punt and return no-match except ValueError: return False
python
def matches(self, new, old): ''' Whether two parameters match values. If either ``new`` or ``old`` is a NumPy array or Pandas Series or Index, then the result of ``np.array_equal`` will determine if the values match. Otherwise, the result of standard Python equality will be returned. Returns: True, if new and old match, False otherwise ''' if isinstance(new, np.ndarray) or isinstance(old, np.ndarray): return np.array_equal(new, old) if pd: if isinstance(new, pd.Series) or isinstance(old, pd.Series): return np.array_equal(new, old) if isinstance(new, pd.Index) or isinstance(old, pd.Index): return np.array_equal(new, old) try: # this handles the special but common case where there is a dict with array # or series as values (e.g. the .data property of a ColumnDataSource) if isinstance(new, dict) and isinstance(old, dict): if set(new.keys()) != set(old.keys()): return False return all(self.matches(new[k], old[k]) for k in new) return new == old # if the comparison fails for some reason, just punt and return no-match except ValueError: return False
[ "def", "matches", "(", "self", ",", "new", ",", "old", ")", ":", "if", "isinstance", "(", "new", ",", "np", ".", "ndarray", ")", "or", "isinstance", "(", "old", ",", "np", ".", "ndarray", ")", ":", "return", "np", ".", "array_equal", "(", "new", ...
Whether two parameters match values. If either ``new`` or ``old`` is a NumPy array or Pandas Series or Index, then the result of ``np.array_equal`` will determine if the values match. Otherwise, the result of standard Python equality will be returned. Returns: True, if new and old match, False otherwise
[ "Whether", "two", "parameters", "match", "values", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/bases.py#L206-L241
30,251
bokeh/bokeh
bokeh/core/property/bases.py
Property.is_valid
def is_valid(self, value): ''' Whether the value passes validation Args: value (obj) : the value to validate against this property type Returns: True if valid, False otherwise ''' try: if validation_on(): self.validate(value, False) except ValueError: return False else: return True
python
def is_valid(self, value): ''' Whether the value passes validation Args: value (obj) : the value to validate against this property type Returns: True if valid, False otherwise ''' try: if validation_on(): self.validate(value, False) except ValueError: return False else: return True
[ "def", "is_valid", "(", "self", ",", "value", ")", ":", "try", ":", "if", "validation_on", "(", ")", ":", "self", ".", "validate", "(", "value", ",", "False", ")", "except", "ValueError", ":", "return", "False", "else", ":", "return", "True" ]
Whether the value passes validation Args: value (obj) : the value to validate against this property type Returns: True if valid, False otherwise
[ "Whether", "the", "value", "passes", "validation" ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/bases.py#L292-L308
30,252
bokeh/bokeh
bokeh/core/property/bases.py
Property.accepts
def accepts(self, tp, converter): ''' Declare that other types may be converted to this property type. Args: tp (Property) : A type that may be converted automatically to this property type. converter (callable) : A function accepting ``value`` to perform conversion of the value to this property type. Returns: self ''' tp = ParameterizedProperty._validate_type_param(tp) self.alternatives.append((tp, converter)) return self
python
def accepts(self, tp, converter): ''' Declare that other types may be converted to this property type. Args: tp (Property) : A type that may be converted automatically to this property type. converter (callable) : A function accepting ``value`` to perform conversion of the value to this property type. Returns: self ''' tp = ParameterizedProperty._validate_type_param(tp) self.alternatives.append((tp, converter)) return self
[ "def", "accepts", "(", "self", ",", "tp", ",", "converter", ")", ":", "tp", "=", "ParameterizedProperty", ".", "_validate_type_param", "(", "tp", ")", "self", ".", "alternatives", ".", "append", "(", "(", "tp", ",", "converter", ")", ")", "return", "self...
Declare that other types may be converted to this property type. Args: tp (Property) : A type that may be converted automatically to this property type. converter (callable) : A function accepting ``value`` to perform conversion of the value to this property type. Returns: self
[ "Declare", "that", "other", "types", "may", "be", "converted", "to", "this", "property", "type", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/bases.py#L354-L373
30,253
bokeh/bokeh
bokeh/core/property/bases.py
Property.asserts
def asserts(self, fn, msg_or_fn): ''' Assert that prepared values satisfy given conditions. Assertions are intended in enforce conditions beyond simple value type validation. For instance, this method can be use to assert that the columns of a ``ColumnDataSource`` all collectively have the same length at all times. Args: fn (callable) : A function accepting ``(obj, value)`` that returns True if the value passes the assertion, or False otherwise. msg_or_fn (str or callable) : A message to print in case the assertion fails, or a function accepting ``(obj, name, value)`` to call in in case the assertion fails. Returns: self ''' self.assertions.append((fn, msg_or_fn)) return self
python
def asserts(self, fn, msg_or_fn): ''' Assert that prepared values satisfy given conditions. Assertions are intended in enforce conditions beyond simple value type validation. For instance, this method can be use to assert that the columns of a ``ColumnDataSource`` all collectively have the same length at all times. Args: fn (callable) : A function accepting ``(obj, value)`` that returns True if the value passes the assertion, or False otherwise. msg_or_fn (str or callable) : A message to print in case the assertion fails, or a function accepting ``(obj, name, value)`` to call in in case the assertion fails. Returns: self ''' self.assertions.append((fn, msg_or_fn)) return self
[ "def", "asserts", "(", "self", ",", "fn", ",", "msg_or_fn", ")", ":", "self", ".", "assertions", ".", "append", "(", "(", "fn", ",", "msg_or_fn", ")", ")", "return", "self" ]
Assert that prepared values satisfy given conditions. Assertions are intended in enforce conditions beyond simple value type validation. For instance, this method can be use to assert that the columns of a ``ColumnDataSource`` all collectively have the same length at all times. Args: fn (callable) : A function accepting ``(obj, value)`` that returns True if the value passes the assertion, or False otherwise. msg_or_fn (str or callable) : A message to print in case the assertion fails, or a function accepting ``(obj, name, value)`` to call in in case the assertion fails. Returns: self
[ "Assert", "that", "prepared", "values", "satisfy", "given", "conditions", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/bases.py#L375-L398
30,254
bokeh/bokeh
bokeh/application/handlers/code.py
CodeHandler.url_path
def url_path(self): ''' The last path component for the basename of the configured filename. ''' if self.failed: return None else: # TODO should fix invalid URL characters return '/' + os.path.splitext(os.path.basename(self._runner.path))[0]
python
def url_path(self): ''' The last path component for the basename of the configured filename. ''' if self.failed: return None else: # TODO should fix invalid URL characters return '/' + os.path.splitext(os.path.basename(self._runner.path))[0]
[ "def", "url_path", "(", "self", ")", ":", "if", "self", ".", "failed", ":", "return", "None", "else", ":", "# TODO should fix invalid URL characters", "return", "'/'", "+", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", ...
The last path component for the basename of the configured filename.
[ "The", "last", "path", "component", "for", "the", "basename", "of", "the", "configured", "filename", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/application/handlers/code.py#L176-L184
30,255
bokeh/bokeh
bokeh/core/property/dataspec.py
UnitsSpec.make_descriptors
def make_descriptors(self, base_name): ''' Return a list of ``PropertyDescriptor`` instances to install on a class, in order to delegate attribute access to this property. Unlike simpler property types, ``UnitsSpec`` returns multiple descriptors to install. In particular, descriptors for the base property as well as the associated units property are returned. Args: name (str) : the name of the property these descriptors are for Returns: list[PropertyDescriptor] The descriptors returned are collected by the ``MetaHasProps`` metaclass and added to ``HasProps`` subclasses during class creation. ''' units_name = base_name + "_units" units_props = self._units_type.make_descriptors(units_name) return units_props + [ UnitsSpecPropertyDescriptor(base_name, self, units_props[0]) ]
python
def make_descriptors(self, base_name): ''' Return a list of ``PropertyDescriptor`` instances to install on a class, in order to delegate attribute access to this property. Unlike simpler property types, ``UnitsSpec`` returns multiple descriptors to install. In particular, descriptors for the base property as well as the associated units property are returned. Args: name (str) : the name of the property these descriptors are for Returns: list[PropertyDescriptor] The descriptors returned are collected by the ``MetaHasProps`` metaclass and added to ``HasProps`` subclasses during class creation. ''' units_name = base_name + "_units" units_props = self._units_type.make_descriptors(units_name) return units_props + [ UnitsSpecPropertyDescriptor(base_name, self, units_props[0]) ]
[ "def", "make_descriptors", "(", "self", ",", "base_name", ")", ":", "units_name", "=", "base_name", "+", "\"_units\"", "units_props", "=", "self", ".", "_units_type", ".", "make_descriptors", "(", "units_name", ")", "return", "units_props", "+", "[", "UnitsSpecP...
Return a list of ``PropertyDescriptor`` instances to install on a class, in order to delegate attribute access to this property. Unlike simpler property types, ``UnitsSpec`` returns multiple descriptors to install. In particular, descriptors for the base property as well as the associated units property are returned. Args: name (str) : the name of the property these descriptors are for Returns: list[PropertyDescriptor] The descriptors returned are collected by the ``MetaHasProps`` metaclass and added to ``HasProps`` subclasses during class creation.
[ "Return", "a", "list", "of", "PropertyDescriptor", "instances", "to", "install", "on", "a", "class", "in", "order", "to", "delegate", "attribute", "access", "to", "this", "property", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/dataspec.py#L363-L382
30,256
bokeh/bokeh
bokeh/core/property/dataspec.py
ColorSpec.isconst
def isconst(cls, val): ''' Whether the value is a string color literal. Checks for a well-formed hexadecimal color value or a named color. Args: val (str) : the value to check Returns: True, if the value is a string color literal ''' return isinstance(val, string_types) and \ ((len(val) == 7 and val[0] == "#") or val in enums.NamedColor)
python
def isconst(cls, val): ''' Whether the value is a string color literal. Checks for a well-formed hexadecimal color value or a named color. Args: val (str) : the value to check Returns: True, if the value is a string color literal ''' return isinstance(val, string_types) and \ ((len(val) == 7 and val[0] == "#") or val in enums.NamedColor)
[ "def", "isconst", "(", "cls", ",", "val", ")", ":", "return", "isinstance", "(", "val", ",", "string_types", ")", "and", "(", "(", "len", "(", "val", ")", "==", "7", "and", "val", "[", "0", "]", "==", "\"#\"", ")", "or", "val", "in", "enums", "...
Whether the value is a string color literal. Checks for a well-formed hexadecimal color value or a named color. Args: val (str) : the value to check Returns: True, if the value is a string color literal
[ "Whether", "the", "value", "is", "a", "string", "color", "literal", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/dataspec.py#L553-L566
30,257
bokeh/bokeh
scripts/issues.py
save_object
def save_object(filename, obj): """Compresses and pickles given object to the given filename.""" logging.info('saving {}...'.format(filename)) try: with gzip.GzipFile(filename, 'wb') as f: f.write(pickle.dumps(obj, 1)) except Exception as e: logging.error('save failure: {}'.format(e)) raise
python
def save_object(filename, obj): """Compresses and pickles given object to the given filename.""" logging.info('saving {}...'.format(filename)) try: with gzip.GzipFile(filename, 'wb') as f: f.write(pickle.dumps(obj, 1)) except Exception as e: logging.error('save failure: {}'.format(e)) raise
[ "def", "save_object", "(", "filename", ",", "obj", ")", ":", "logging", ".", "info", "(", "'saving {}...'", ".", "format", "(", "filename", ")", ")", "try", ":", "with", "gzip", ".", "GzipFile", "(", "filename", ",", "'wb'", ")", "as", "f", ":", "f",...
Compresses and pickles given object to the given filename.
[ "Compresses", "and", "pickles", "given", "object", "to", "the", "given", "filename", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L47-L55
30,258
bokeh/bokeh
scripts/issues.py
load_object
def load_object(filename): """Unpickles and decompresses the given filename and returns the created object.""" logging.info('loading {}...'.format(filename)) try: with gzip.GzipFile(filename, 'rb') as f: buf = '' while True: data = f.read() if data == '': break buf += data return pickle.loads(buf) except Exception as e: logging.error('load failure: {}'.format(e)) raise
python
def load_object(filename): """Unpickles and decompresses the given filename and returns the created object.""" logging.info('loading {}...'.format(filename)) try: with gzip.GzipFile(filename, 'rb') as f: buf = '' while True: data = f.read() if data == '': break buf += data return pickle.loads(buf) except Exception as e: logging.error('load failure: {}'.format(e)) raise
[ "def", "load_object", "(", "filename", ")", ":", "logging", ".", "info", "(", "'loading {}...'", ".", "format", "(", "filename", ")", ")", "try", ":", "with", "gzip", ".", "GzipFile", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "buf", "=", "''...
Unpickles and decompresses the given filename and returns the created object.
[ "Unpickles", "and", "decompresses", "the", "given", "filename", "and", "returns", "the", "created", "object", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L58-L72
30,259
bokeh/bokeh
scripts/issues.py
issue_section
def issue_section(issue): """Returns the section heading for the issue, or None if this issue should be ignored.""" labels = issue.get('labels', []) for label in labels: if not label['name'].startswith('type: '): continue if label['name'] in LOG_SECTION: return LOG_SECTION[label['name']] elif label['name'] in IGNORE_ISSUE_TYPE: return None else: logging.warning('unknown issue type: "{}" for: {}'.format(label['name'], issue_line(issue))) return None
python
def issue_section(issue): """Returns the section heading for the issue, or None if this issue should be ignored.""" labels = issue.get('labels', []) for label in labels: if not label['name'].startswith('type: '): continue if label['name'] in LOG_SECTION: return LOG_SECTION[label['name']] elif label['name'] in IGNORE_ISSUE_TYPE: return None else: logging.warning('unknown issue type: "{}" for: {}'.format(label['name'], issue_line(issue))) return None
[ "def", "issue_section", "(", "issue", ")", ":", "labels", "=", "issue", ".", "get", "(", "'labels'", ",", "[", "]", ")", "for", "label", "in", "labels", ":", "if", "not", "label", "[", "'name'", "]", ".", "startswith", "(", "'type: '", ")", ":", "c...
Returns the section heading for the issue, or None if this issue should be ignored.
[ "Returns", "the", "section", "heading", "for", "the", "issue", "or", "None", "if", "this", "issue", "should", "be", "ignored", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L92-L106
30,260
bokeh/bokeh
scripts/issues.py
issue_tags
def issue_tags(issue): """Returns list of tags for this issue.""" labels = issue.get('labels', []) return [label['name'].replace('tag: ', '') for label in labels if label['name'].startswith('tag: ')]
python
def issue_tags(issue): """Returns list of tags for this issue.""" labels = issue.get('labels', []) return [label['name'].replace('tag: ', '') for label in labels if label['name'].startswith('tag: ')]
[ "def", "issue_tags", "(", "issue", ")", ":", "labels", "=", "issue", ".", "get", "(", "'labels'", ",", "[", "]", ")", "return", "[", "label", "[", "'name'", "]", ".", "replace", "(", "'tag: '", ",", "''", ")", "for", "label", "in", "labels", "if", ...
Returns list of tags for this issue.
[ "Returns", "list", "of", "tags", "for", "this", "issue", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L109-L112
30,261
bokeh/bokeh
scripts/issues.py
closed_issue
def closed_issue(issue, after=None): """Returns True iff this issue was closed after given date. If after not given, only checks if issue is closed.""" if issue['state'] == 'closed': if after is None or parse_timestamp(issue['closed_at']) > after: return True return False
python
def closed_issue(issue, after=None): """Returns True iff this issue was closed after given date. If after not given, only checks if issue is closed.""" if issue['state'] == 'closed': if after is None or parse_timestamp(issue['closed_at']) > after: return True return False
[ "def", "closed_issue", "(", "issue", ",", "after", "=", "None", ")", ":", "if", "issue", "[", "'state'", "]", "==", "'closed'", ":", "if", "after", "is", "None", "or", "parse_timestamp", "(", "issue", "[", "'closed_at'", "]", ")", ">", "after", ":", ...
Returns True iff this issue was closed after given date. If after not given, only checks if issue is closed.
[ "Returns", "True", "iff", "this", "issue", "was", "closed", "after", "given", "date", ".", "If", "after", "not", "given", "only", "checks", "if", "issue", "is", "closed", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L115-L120
30,262
bokeh/bokeh
scripts/issues.py
relevent_issue
def relevent_issue(issue, after): """Returns True iff this issue is something we should show in the changelog.""" return (closed_issue(issue, after) and issue_completed(issue) and issue_section(issue))
python
def relevent_issue(issue, after): """Returns True iff this issue is something we should show in the changelog.""" return (closed_issue(issue, after) and issue_completed(issue) and issue_section(issue))
[ "def", "relevent_issue", "(", "issue", ",", "after", ")", ":", "return", "(", "closed_issue", "(", "issue", ",", "after", ")", "and", "issue_completed", "(", "issue", ")", "and", "issue_section", "(", "issue", ")", ")" ]
Returns True iff this issue is something we should show in the changelog.
[ "Returns", "True", "iff", "this", "issue", "is", "something", "we", "should", "show", "in", "the", "changelog", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L123-L127
30,263
bokeh/bokeh
scripts/issues.py
all_issues
def all_issues(issues): """Yields unique set of issues given a list of issues.""" logging.info('finding issues...') seen = set() for issue in issues: if issue['title'] not in seen: seen.add(issue['title']) yield issue
python
def all_issues(issues): """Yields unique set of issues given a list of issues.""" logging.info('finding issues...') seen = set() for issue in issues: if issue['title'] not in seen: seen.add(issue['title']) yield issue
[ "def", "all_issues", "(", "issues", ")", ":", "logging", ".", "info", "(", "'finding issues...'", ")", "seen", "=", "set", "(", ")", "for", "issue", "in", "issues", ":", "if", "issue", "[", "'title'", "]", "not", "in", "seen", ":", "seen", ".", "add"...
Yields unique set of issues given a list of issues.
[ "Yields", "unique", "set", "of", "issues", "given", "a", "list", "of", "issues", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L150-L157
30,264
bokeh/bokeh
scripts/issues.py
get_issues_url
def get_issues_url(page, after): """Returns github API URL for querying tags.""" template = '{base_url}/{owner}/{repo}/issues?state=closed&per_page=100&page={page}&since={after}' return template.format(page=page, after=after.isoformat(), **API_PARAMS)
python
def get_issues_url(page, after): """Returns github API URL for querying tags.""" template = '{base_url}/{owner}/{repo}/issues?state=closed&per_page=100&page={page}&since={after}' return template.format(page=page, after=after.isoformat(), **API_PARAMS)
[ "def", "get_issues_url", "(", "page", ",", "after", ")", ":", "template", "=", "'{base_url}/{owner}/{repo}/issues?state=closed&per_page=100&page={page}&since={after}'", "return", "template", ".", "format", "(", "page", "=", "page", ",", "after", "=", "after", ".", "is...
Returns github API URL for querying tags.
[ "Returns", "github", "API", "URL", "for", "querying", "tags", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L168-L171
30,265
bokeh/bokeh
scripts/issues.py
parse_timestamp
def parse_timestamp(timestamp): """Parse ISO8601 timestamps given by github API.""" dt = dateutil.parser.parse(timestamp) return dt.astimezone(dateutil.tz.tzutc())
python
def parse_timestamp(timestamp): """Parse ISO8601 timestamps given by github API.""" dt = dateutil.parser.parse(timestamp) return dt.astimezone(dateutil.tz.tzutc())
[ "def", "parse_timestamp", "(", "timestamp", ")", ":", "dt", "=", "dateutil", ".", "parser", ".", "parse", "(", "timestamp", ")", "return", "dt", ".", "astimezone", "(", "dateutil", ".", "tz", ".", "tzutc", "(", ")", ")" ]
Parse ISO8601 timestamps given by github API.
[ "Parse", "ISO8601", "timestamps", "given", "by", "github", "API", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L179-L182
30,266
bokeh/bokeh
scripts/issues.py
read_url
def read_url(url): """Reads given URL as JSON and returns data as loaded python object.""" logging.debug('reading {url} ...'.format(url=url)) token = os.environ.get("BOKEH_GITHUB_API_TOKEN") headers = {} if token: headers['Authorization'] = 'token %s' % token request = Request(url, headers=headers) response = urlopen(request).read() return json.loads(response.decode("UTF-8"))
python
def read_url(url): """Reads given URL as JSON and returns data as loaded python object.""" logging.debug('reading {url} ...'.format(url=url)) token = os.environ.get("BOKEH_GITHUB_API_TOKEN") headers = {} if token: headers['Authorization'] = 'token %s' % token request = Request(url, headers=headers) response = urlopen(request).read() return json.loads(response.decode("UTF-8"))
[ "def", "read_url", "(", "url", ")", ":", "logging", ".", "debug", "(", "'reading {url} ...'", ".", "format", "(", "url", "=", "url", ")", ")", "token", "=", "os", ".", "environ", ".", "get", "(", "\"BOKEH_GITHUB_API_TOKEN\"", ")", "headers", "=", "{", ...
Reads given URL as JSON and returns data as loaded python object.
[ "Reads", "given", "URL", "as", "JSON", "and", "returns", "data", "as", "loaded", "python", "object", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L185-L194
30,267
bokeh/bokeh
scripts/issues.py
query_all_issues
def query_all_issues(after): """Hits the github API for all closed issues after the given date, returns the data.""" page = count(1) data = [] while True: page_data = query_issues(next(page), after) if not page_data: break data.extend(page_data) return data
python
def query_all_issues(after): """Hits the github API for all closed issues after the given date, returns the data.""" page = count(1) data = [] while True: page_data = query_issues(next(page), after) if not page_data: break data.extend(page_data) return data
[ "def", "query_all_issues", "(", "after", ")", ":", "page", "=", "count", "(", "1", ")", "data", "=", "[", "]", "while", "True", ":", "page_data", "=", "query_issues", "(", "next", "(", "page", ")", ",", "after", ")", "if", "not", "page_data", ":", ...
Hits the github API for all closed issues after the given date, returns the data.
[ "Hits", "the", "github", "API", "for", "all", "closed", "issues", "after", "the", "given", "date", "returns", "the", "data", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L207-L216
30,268
bokeh/bokeh
scripts/issues.py
dateof
def dateof(tag_name, tags): """Given a list of tags, returns the datetime of the tag with the given name; Otherwise None.""" for tag in tags: if tag['name'] == tag_name: commit = read_url(tag['commit']['url']) return parse_timestamp(commit['commit']['committer']['date']) return None
python
def dateof(tag_name, tags): """Given a list of tags, returns the datetime of the tag with the given name; Otherwise None.""" for tag in tags: if tag['name'] == tag_name: commit = read_url(tag['commit']['url']) return parse_timestamp(commit['commit']['committer']['date']) return None
[ "def", "dateof", "(", "tag_name", ",", "tags", ")", ":", "for", "tag", "in", "tags", ":", "if", "tag", "[", "'name'", "]", "==", "tag_name", ":", "commit", "=", "read_url", "(", "tag", "[", "'commit'", "]", "[", "'url'", "]", ")", "return", "parse_...
Given a list of tags, returns the datetime of the tag with the given name; Otherwise None.
[ "Given", "a", "list", "of", "tags", "returns", "the", "datetime", "of", "the", "tag", "with", "the", "given", "name", ";", "Otherwise", "None", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L219-L225
30,269
bokeh/bokeh
scripts/issues.py
get_data
def get_data(query_func, load_data=False, save_data=False): """Gets data from query_func, optionally saving that data to a file; or loads data from a file.""" if hasattr(query_func, '__name__'): func_name = query_func.__name__ elif hasattr(query_func, 'func'): func_name = query_func.func.__name__ pickle_file = '{}.pickle'.format(func_name) if load_data: data = load_object(pickle_file) else: data = query_func() if save_data: save_object(pickle_file, data) return data
python
def get_data(query_func, load_data=False, save_data=False): """Gets data from query_func, optionally saving that data to a file; or loads data from a file.""" if hasattr(query_func, '__name__'): func_name = query_func.__name__ elif hasattr(query_func, 'func'): func_name = query_func.func.__name__ pickle_file = '{}.pickle'.format(func_name) if load_data: data = load_object(pickle_file) else: data = query_func() if save_data: save_object(pickle_file, data) return data
[ "def", "get_data", "(", "query_func", ",", "load_data", "=", "False", ",", "save_data", "=", "False", ")", ":", "if", "hasattr", "(", "query_func", ",", "'__name__'", ")", ":", "func_name", "=", "query_func", ".", "__name__", "elif", "hasattr", "(", "query...
Gets data from query_func, optionally saving that data to a file; or loads data from a file.
[ "Gets", "data", "from", "query_func", "optionally", "saving", "that", "data", "to", "a", "file", ";", "or", "loads", "data", "from", "a", "file", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L228-L243
30,270
bokeh/bokeh
scripts/issues.py
check_issues
def check_issues(issues, after=None): """Checks issues for BEP 1 compliance.""" issues = closed_issues(issues, after) if after else all_issues(issues) issues = sorted(issues, key=ISSUES_SORT_KEY) have_warnings = False for section, issue_group in groupby(issues, key=ISSUES_BY_SECTION): for issue in issue_group: have_warnings |= check_issue(issue, after) return have_warnings
python
def check_issues(issues, after=None): """Checks issues for BEP 1 compliance.""" issues = closed_issues(issues, after) if after else all_issues(issues) issues = sorted(issues, key=ISSUES_SORT_KEY) have_warnings = False for section, issue_group in groupby(issues, key=ISSUES_BY_SECTION): for issue in issue_group: have_warnings |= check_issue(issue, after) return have_warnings
[ "def", "check_issues", "(", "issues", ",", "after", "=", "None", ")", ":", "issues", "=", "closed_issues", "(", "issues", ",", "after", ")", "if", "after", "else", "all_issues", "(", "issues", ")", "issues", "=", "sorted", "(", "issues", ",", "key", "=...
Checks issues for BEP 1 compliance.
[ "Checks", "issues", "for", "BEP", "1", "compliance", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L270-L281
30,271
bokeh/bokeh
scripts/issues.py
issue_line
def issue_line(issue): """Returns log line for given issue.""" template = '#{number} {tags}{title}' tags = issue_tags(issue) params = { 'title': issue['title'].capitalize().rstrip('.'), 'number': issue['number'], 'tags': ' '.join('[{}]'.format(tag) for tag in tags) + (' ' if tags else '') } return template.format(**params)
python
def issue_line(issue): """Returns log line for given issue.""" template = '#{number} {tags}{title}' tags = issue_tags(issue) params = { 'title': issue['title'].capitalize().rstrip('.'), 'number': issue['number'], 'tags': ' '.join('[{}]'.format(tag) for tag in tags) + (' ' if tags else '') } return template.format(**params)
[ "def", "issue_line", "(", "issue", ")", ":", "template", "=", "'#{number} {tags}{title}'", "tags", "=", "issue_tags", "(", "issue", ")", "params", "=", "{", "'title'", ":", "issue", "[", "'title'", "]", ".", "capitalize", "(", ")", ".", "rstrip", "(", "'...
Returns log line for given issue.
[ "Returns", "log", "line", "for", "given", "issue", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L286-L295
30,272
bokeh/bokeh
scripts/issues.py
generate_changelog
def generate_changelog(issues, after, heading, rtag=False): """Prints out changelog.""" relevent = relevant_issues(issues, after) relevent = sorted(relevent, key=ISSUES_BY_SECTION) def write(func, endofline="", append=""): func(heading + '\n' + '-' * 20 + endofline) for section, issue_group in groupby(relevent, key=ISSUES_BY_SECTION): func(' * {}:'.format(section) + endofline) for issue in reversed(list(issue_group)): func(' - {}'.format(issue_line(issue)) + endofline) func(endofline + append) if rtag is not False: with open("../CHANGELOG", "r+") as f: content = f.read() f.seek(0) write(f.write, '\n', content) else: write(print)
python
def generate_changelog(issues, after, heading, rtag=False): """Prints out changelog.""" relevent = relevant_issues(issues, after) relevent = sorted(relevent, key=ISSUES_BY_SECTION) def write(func, endofline="", append=""): func(heading + '\n' + '-' * 20 + endofline) for section, issue_group in groupby(relevent, key=ISSUES_BY_SECTION): func(' * {}:'.format(section) + endofline) for issue in reversed(list(issue_group)): func(' - {}'.format(issue_line(issue)) + endofline) func(endofline + append) if rtag is not False: with open("../CHANGELOG", "r+") as f: content = f.read() f.seek(0) write(f.write, '\n', content) else: write(print)
[ "def", "generate_changelog", "(", "issues", ",", "after", ",", "heading", ",", "rtag", "=", "False", ")", ":", "relevent", "=", "relevant_issues", "(", "issues", ",", "after", ")", "relevent", "=", "sorted", "(", "relevent", ",", "key", "=", "ISSUES_BY_SEC...
Prints out changelog.
[ "Prints", "out", "changelog", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L298-L317
30,273
bokeh/bokeh
bokeh/colors/rgb.py
RGB.to_css
def to_css(self): ''' Generate the CSS representation of this RGB color. Returns: str, ``"rgb(...)"`` or ``"rgba(...)"`` ''' if self.a == 1.0: return "rgb(%d, %d, %d)" % (self.r, self.g, self.b) else: return "rgba(%d, %d, %d, %s)" % (self.r, self.g, self.b, self.a)
python
def to_css(self): ''' Generate the CSS representation of this RGB color. Returns: str, ``"rgb(...)"`` or ``"rgba(...)"`` ''' if self.a == 1.0: return "rgb(%d, %d, %d)" % (self.r, self.g, self.b) else: return "rgba(%d, %d, %d, %s)" % (self.r, self.g, self.b, self.a)
[ "def", "to_css", "(", "self", ")", ":", "if", "self", ".", "a", "==", "1.0", ":", "return", "\"rgb(%d, %d, %d)\"", "%", "(", "self", ".", "r", ",", "self", ".", "g", ",", "self", ".", "b", ")", "else", ":", "return", "\"rgba(%d, %d, %d, %s)\"", "%", ...
Generate the CSS representation of this RGB color. Returns: str, ``"rgb(...)"`` or ``"rgba(...)"``
[ "Generate", "the", "CSS", "representation", "of", "this", "RGB", "color", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/colors/rgb.py#L110-L120
30,274
bokeh/bokeh
bokeh/colors/rgb.py
RGB.to_hsl
def to_hsl(self): ''' Return a corresponding HSL color for this RGB color. Returns: :class:`~bokeh.colors.rgb.RGB` ''' from .hsl import HSL # prevent circular import h, l, s = colorsys.rgb_to_hls(float(self.r)/255, float(self.g)/255, float(self.b)/255) return HSL(round(h*360), s, l, self.a)
python
def to_hsl(self): ''' Return a corresponding HSL color for this RGB color. Returns: :class:`~bokeh.colors.rgb.RGB` ''' from .hsl import HSL # prevent circular import h, l, s = colorsys.rgb_to_hls(float(self.r)/255, float(self.g)/255, float(self.b)/255) return HSL(round(h*360), s, l, self.a)
[ "def", "to_hsl", "(", "self", ")", ":", "from", ".", "hsl", "import", "HSL", "# prevent circular import", "h", ",", "l", ",", "s", "=", "colorsys", ".", "rgb_to_hls", "(", "float", "(", "self", ".", "r", ")", "/", "255", ",", "float", "(", "self", ...
Return a corresponding HSL color for this RGB color. Returns: :class:`~bokeh.colors.rgb.RGB`
[ "Return", "a", "corresponding", "HSL", "color", "for", "this", "RGB", "color", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/colors/rgb.py#L134-L143
30,275
bokeh/bokeh
bokeh/util/tornado.py
yield_for_all_futures
def yield_for_all_futures(result): """ Converts result into a Future by collapsing any futures inside result. If result is a Future we yield until it's done, then if the value inside the Future is another Future we yield until it's done as well, and so on. """ while True: # This is needed for Tornado >= 4.5 where convert_yielded will no # longer raise BadYieldError on None if result is None: break try: future = gen.convert_yielded(result) except gen.BadYieldError: # result is not a yieldable thing, we are done break else: result = yield future raise gen.Return(result)
python
def yield_for_all_futures(result): """ Converts result into a Future by collapsing any futures inside result. If result is a Future we yield until it's done, then if the value inside the Future is another Future we yield until it's done as well, and so on. """ while True: # This is needed for Tornado >= 4.5 where convert_yielded will no # longer raise BadYieldError on None if result is None: break try: future = gen.convert_yielded(result) except gen.BadYieldError: # result is not a yieldable thing, we are done break else: result = yield future raise gen.Return(result)
[ "def", "yield_for_all_futures", "(", "result", ")", ":", "while", "True", ":", "# This is needed for Tornado >= 4.5 where convert_yielded will no", "# longer raise BadYieldError on None", "if", "result", "is", "None", ":", "break", "try", ":", "future", "=", "gen", ".", ...
Converts result into a Future by collapsing any futures inside result. If result is a Future we yield until it's done, then if the value inside the Future is another Future we yield until it's done as well, and so on.
[ "Converts", "result", "into", "a", "Future", "by", "collapsing", "any", "futures", "inside", "result", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/tornado.py#L49-L70
30,276
bokeh/bokeh
bokeh/util/tornado.py
_CallbackGroup.remove_all_callbacks
def remove_all_callbacks(self): """ Removes all registered callbacks.""" for cb_id in list(self._next_tick_callback_removers.keys()): self.remove_next_tick_callback(cb_id) for cb_id in list(self._timeout_callback_removers.keys()): self.remove_timeout_callback(cb_id) for cb_id in list(self._periodic_callback_removers.keys()): self.remove_periodic_callback(cb_id)
python
def remove_all_callbacks(self): """ Removes all registered callbacks.""" for cb_id in list(self._next_tick_callback_removers.keys()): self.remove_next_tick_callback(cb_id) for cb_id in list(self._timeout_callback_removers.keys()): self.remove_timeout_callback(cb_id) for cb_id in list(self._periodic_callback_removers.keys()): self.remove_periodic_callback(cb_id)
[ "def", "remove_all_callbacks", "(", "self", ")", ":", "for", "cb_id", "in", "list", "(", "self", ".", "_next_tick_callback_removers", ".", "keys", "(", ")", ")", ":", "self", ".", "remove_next_tick_callback", "(", "cb_id", ")", "for", "cb_id", "in", "list", ...
Removes all registered callbacks.
[ "Removes", "all", "registered", "callbacks", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/tornado.py#L164-L171
30,277
bokeh/bokeh
bokeh/util/tornado.py
_CallbackGroup.add_next_tick_callback
def add_next_tick_callback(self, callback, callback_id=None): """ Adds a callback to be run on the next tick. Returns an ID that can be used with remove_next_tick_callback.""" def wrapper(*args, **kwargs): # this 'removed' flag is a hack because Tornado has no way # to remove a "next tick" callback added with # IOLoop.add_callback. So instead we make our wrapper skip # invoking the callback. if not wrapper.removed: self.remove_next_tick_callback(callback_id) return callback(*args, **kwargs) else: return None wrapper.removed = False def remover(): wrapper.removed = True callback_id = self._assign_remover(callback, callback_id, self._next_tick_callback_removers, remover) self._loop.add_callback(wrapper) return callback_id
python
def add_next_tick_callback(self, callback, callback_id=None): """ Adds a callback to be run on the next tick. Returns an ID that can be used with remove_next_tick_callback.""" def wrapper(*args, **kwargs): # this 'removed' flag is a hack because Tornado has no way # to remove a "next tick" callback added with # IOLoop.add_callback. So instead we make our wrapper skip # invoking the callback. if not wrapper.removed: self.remove_next_tick_callback(callback_id) return callback(*args, **kwargs) else: return None wrapper.removed = False def remover(): wrapper.removed = True callback_id = self._assign_remover(callback, callback_id, self._next_tick_callback_removers, remover) self._loop.add_callback(wrapper) return callback_id
[ "def", "add_next_tick_callback", "(", "self", ",", "callback", ",", "callback_id", "=", "None", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# this 'removed' flag is a hack because Tornado has no way", "# to remove a \"next tick\" ...
Adds a callback to be run on the next tick. Returns an ID that can be used with remove_next_tick_callback.
[ "Adds", "a", "callback", "to", "be", "run", "on", "the", "next", "tick", ".", "Returns", "an", "ID", "that", "can", "be", "used", "with", "remove_next_tick_callback", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/tornado.py#L207-L228
30,278
bokeh/bokeh
bokeh/util/tornado.py
_CallbackGroup.add_timeout_callback
def add_timeout_callback(self, callback, timeout_milliseconds, callback_id=None): """ Adds a callback to be run once after timeout_milliseconds. Returns an ID that can be used with remove_timeout_callback.""" def wrapper(*args, **kwargs): self.remove_timeout_callback(callback_id) return callback(*args, **kwargs) handle = None def remover(): if handle is not None: self._loop.remove_timeout(handle) callback_id = self._assign_remover(callback, callback_id, self._timeout_callback_removers, remover) handle = self._loop.call_later(timeout_milliseconds / 1000.0, wrapper) return callback_id
python
def add_timeout_callback(self, callback, timeout_milliseconds, callback_id=None): """ Adds a callback to be run once after timeout_milliseconds. Returns an ID that can be used with remove_timeout_callback.""" def wrapper(*args, **kwargs): self.remove_timeout_callback(callback_id) return callback(*args, **kwargs) handle = None def remover(): if handle is not None: self._loop.remove_timeout(handle) callback_id = self._assign_remover(callback, callback_id, self._timeout_callback_removers, remover) handle = self._loop.call_later(timeout_milliseconds / 1000.0, wrapper) return callback_id
[ "def", "add_timeout_callback", "(", "self", ",", "callback", ",", "timeout_milliseconds", ",", "callback_id", "=", "None", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "remove_timeout_callback", "(", "callback...
Adds a callback to be run once after timeout_milliseconds. Returns an ID that can be used with remove_timeout_callback.
[ "Adds", "a", "callback", "to", "be", "run", "once", "after", "timeout_milliseconds", ".", "Returns", "an", "ID", "that", "can", "be", "used", "with", "remove_timeout_callback", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/tornado.py#L234-L249
30,279
bokeh/bokeh
bokeh/util/tornado.py
_CallbackGroup.add_periodic_callback
def add_periodic_callback(self, callback, period_milliseconds, callback_id=None): """ Adds a callback to be run every period_milliseconds until it is removed. Returns an ID that can be used with remove_periodic_callback.""" cb = _AsyncPeriodic(callback, period_milliseconds, io_loop=self._loop) callback_id = self._assign_remover(callback, callback_id, self._periodic_callback_removers, cb.stop) cb.start() return callback_id
python
def add_periodic_callback(self, callback, period_milliseconds, callback_id=None): """ Adds a callback to be run every period_milliseconds until it is removed. Returns an ID that can be used with remove_periodic_callback.""" cb = _AsyncPeriodic(callback, period_milliseconds, io_loop=self._loop) callback_id = self._assign_remover(callback, callback_id, self._periodic_callback_removers, cb.stop) cb.start() return callback_id
[ "def", "add_periodic_callback", "(", "self", ",", "callback", ",", "period_milliseconds", ",", "callback_id", "=", "None", ")", ":", "cb", "=", "_AsyncPeriodic", "(", "callback", ",", "period_milliseconds", ",", "io_loop", "=", "self", ".", "_loop", ")", "call...
Adds a callback to be run every period_milliseconds until it is removed. Returns an ID that can be used with remove_periodic_callback.
[ "Adds", "a", "callback", "to", "be", "run", "every", "period_milliseconds", "until", "it", "is", "removed", ".", "Returns", "an", "ID", "that", "can", "be", "used", "with", "remove_periodic_callback", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/tornado.py#L255-L262
30,280
bokeh/bokeh
bokeh/sphinxext/bokeh_github.py
bokeh_tree
def bokeh_tree(name, rawtext, text, lineno, inliner, options=None, content=None): ''' Link to a URL in the Bokeh GitHub tree, pointing to appropriate tags for releases, or to master otherwise. The link text is simply the URL path supplied, so typical usage might look like: .. code-block:: none All of the examples are located in the :bokeh-tree:`examples` subdirectory of your Bokeh checkout. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. ''' app = inliner.document.settings.env.app tag = app.env.config['version'] if '-' in tag: tag = 'master' url = "%s/tree/%s/%s" % (_BOKEH_GH, tag, text) options = options or {} set_classes(options) node = nodes.reference(rawtext, text, refuri=url, **options) return [node], []
python
def bokeh_tree(name, rawtext, text, lineno, inliner, options=None, content=None): ''' Link to a URL in the Bokeh GitHub tree, pointing to appropriate tags for releases, or to master otherwise. The link text is simply the URL path supplied, so typical usage might look like: .. code-block:: none All of the examples are located in the :bokeh-tree:`examples` subdirectory of your Bokeh checkout. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. ''' app = inliner.document.settings.env.app tag = app.env.config['version'] if '-' in tag: tag = 'master' url = "%s/tree/%s/%s" % (_BOKEH_GH, tag, text) options = options or {} set_classes(options) node = nodes.reference(rawtext, text, refuri=url, **options) return [node], []
[ "def", "bokeh_tree", "(", "name", ",", "rawtext", ",", "text", ",", "lineno", ",", "inliner", ",", "options", "=", "None", ",", "content", "=", "None", ")", ":", "app", "=", "inliner", ".", "document", ".", "settings", ".", "env", ".", "app", "tag", ...
Link to a URL in the Bokeh GitHub tree, pointing to appropriate tags for releases, or to master otherwise. The link text is simply the URL path supplied, so typical usage might look like: .. code-block:: none All of the examples are located in the :bokeh-tree:`examples` subdirectory of your Bokeh checkout. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty.
[ "Link", "to", "a", "URL", "in", "the", "Bokeh", "GitHub", "tree", "pointing", "to", "appropriate", "tags", "for", "releases", "or", "to", "master", "otherwise", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/sphinxext/bokeh_github.py#L135-L162
30,281
bokeh/bokeh
bokeh/sphinxext/bokeh_github.py
_make_gh_link_node
def _make_gh_link_node(app, rawtext, role, kind, api_type, id, options=None): ''' Return a link to a Bokeh Github resource. Args: app (Sphinx app) : current app rawtext (str) : text being replaced with link node. role (str) : role name kind (str) : resource type (issue, pull, etc.) api_type (str) : type for api link id : (str) : id of the resource to link to options (dict) : options dictionary passed to role function ''' url = "%s/%s/%s" % (_BOKEH_GH, api_type, id) options = options or {} set_classes(options) node = nodes.reference( rawtext, kind + utils.unescape(id), refuri=url, **options) return node
python
def _make_gh_link_node(app, rawtext, role, kind, api_type, id, options=None): ''' Return a link to a Bokeh Github resource. Args: app (Sphinx app) : current app rawtext (str) : text being replaced with link node. role (str) : role name kind (str) : resource type (issue, pull, etc.) api_type (str) : type for api link id : (str) : id of the resource to link to options (dict) : options dictionary passed to role function ''' url = "%s/%s/%s" % (_BOKEH_GH, api_type, id) options = options or {} set_classes(options) node = nodes.reference( rawtext, kind + utils.unescape(id), refuri=url, **options) return node
[ "def", "_make_gh_link_node", "(", "app", ",", "rawtext", ",", "role", ",", "kind", ",", "api_type", ",", "id", ",", "options", "=", "None", ")", ":", "url", "=", "\"%s/%s/%s\"", "%", "(", "_BOKEH_GH", ",", "api_type", ",", "id", ")", "options", "=", ...
Return a link to a Bokeh Github resource. Args: app (Sphinx app) : current app rawtext (str) : text being replaced with link node. role (str) : role name kind (str) : resource type (issue, pull, etc.) api_type (str) : type for api link id : (str) : id of the resource to link to options (dict) : options dictionary passed to role function
[ "Return", "a", "link", "to", "a", "Bokeh", "Github", "resource", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/sphinxext/bokeh_github.py#L177-L195
30,282
bokeh/bokeh
_setup_support.py
show_bokehjs
def show_bokehjs(bokehjs_action, develop=False): ''' Print a useful report after setuptools output describing where and how BokehJS is installed. Args: bokehjs_action (str) : one of 'built', 'installed', or 'packaged' how (or if) BokehJS was installed into the python source tree develop (bool, optional) : whether the command was for "develop" mode (default: False) Returns: None ''' print() if develop: print("Installed Bokeh for DEVELOPMENT:") else: print("Installed Bokeh:") if bokehjs_action in ['built', 'installed']: print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if bokehjs_action=='built' else bright(yellow("PREVIOUSLY")))) else: print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED"))) print()
python
def show_bokehjs(bokehjs_action, develop=False): ''' Print a useful report after setuptools output describing where and how BokehJS is installed. Args: bokehjs_action (str) : one of 'built', 'installed', or 'packaged' how (or if) BokehJS was installed into the python source tree develop (bool, optional) : whether the command was for "develop" mode (default: False) Returns: None ''' print() if develop: print("Installed Bokeh for DEVELOPMENT:") else: print("Installed Bokeh:") if bokehjs_action in ['built', 'installed']: print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if bokehjs_action=='built' else bright(yellow("PREVIOUSLY")))) else: print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED"))) print()
[ "def", "show_bokehjs", "(", "bokehjs_action", ",", "develop", "=", "False", ")", ":", "print", "(", ")", "if", "develop", ":", "print", "(", "\"Installed Bokeh for DEVELOPMENT:\"", ")", "else", ":", "print", "(", "\"Installed Bokeh:\"", ")", "if", "bokehjs_actio...
Print a useful report after setuptools output describing where and how BokehJS is installed. Args: bokehjs_action (str) : one of 'built', 'installed', or 'packaged' how (or if) BokehJS was installed into the python source tree develop (bool, optional) : whether the command was for "develop" mode (default: False) Returns: None
[ "Print", "a", "useful", "report", "after", "setuptools", "output", "describing", "where", "and", "how", "BokehJS", "is", "installed", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/_setup_support.py#L50-L74
30,283
bokeh/bokeh
_setup_support.py
show_help
def show_help(bokehjs_action): ''' Print information about extra Bokeh-specific command line options. Args: bokehjs_action (str) : one of 'built', 'installed', or 'packaged' how (or if) BokehJS was installed into the python source tree Returns: None ''' print() if bokehjs_action in ['built', 'installed']: print("Bokeh-specific options available with 'install' or 'develop':") print() print(" --build-js build and install a fresh BokehJS") print(" --install-js install only last previously built BokehJS") else: print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'") print() print("No extra Bokeh-specific options are available.") print()
python
def show_help(bokehjs_action): ''' Print information about extra Bokeh-specific command line options. Args: bokehjs_action (str) : one of 'built', 'installed', or 'packaged' how (or if) BokehJS was installed into the python source tree Returns: None ''' print() if bokehjs_action in ['built', 'installed']: print("Bokeh-specific options available with 'install' or 'develop':") print() print(" --build-js build and install a fresh BokehJS") print(" --install-js install only last previously built BokehJS") else: print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'") print() print("No extra Bokeh-specific options are available.") print()
[ "def", "show_help", "(", "bokehjs_action", ")", ":", "print", "(", ")", "if", "bokehjs_action", "in", "[", "'built'", ",", "'installed'", "]", ":", "print", "(", "\"Bokeh-specific options available with 'install' or 'develop':\"", ")", "print", "(", ")", "print", ...
Print information about extra Bokeh-specific command line options. Args: bokehjs_action (str) : one of 'built', 'installed', or 'packaged' how (or if) BokehJS was installed into the python source tree Returns: None
[ "Print", "information", "about", "extra", "Bokeh", "-", "specific", "command", "line", "options", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/_setup_support.py#L76-L97
30,284
bokeh/bokeh
_setup_support.py
fixup_building_sdist
def fixup_building_sdist(): ''' Check for 'sdist' and ensure we always build BokehJS when packaging Source distributions do not ship with BokehJS source code, but must ship with a pre-built BokehJS library. This function modifies ``sys.argv`` as necessary so that ``--build-js`` IS present, and ``--install-js` is NOT. Returns: None ''' if "sdist" in sys.argv: if "--install-js" in sys.argv: print("Removing '--install-js' incompatible with 'sdist'") sys.argv.remove('--install-js') if "--build-js" not in sys.argv: print("Adding '--build-js' required for 'sdist'") sys.argv.append('--build-js')
python
def fixup_building_sdist(): ''' Check for 'sdist' and ensure we always build BokehJS when packaging Source distributions do not ship with BokehJS source code, but must ship with a pre-built BokehJS library. This function modifies ``sys.argv`` as necessary so that ``--build-js`` IS present, and ``--install-js` is NOT. Returns: None ''' if "sdist" in sys.argv: if "--install-js" in sys.argv: print("Removing '--install-js' incompatible with 'sdist'") sys.argv.remove('--install-js') if "--build-js" not in sys.argv: print("Adding '--build-js' required for 'sdist'") sys.argv.append('--build-js')
[ "def", "fixup_building_sdist", "(", ")", ":", "if", "\"sdist\"", "in", "sys", ".", "argv", ":", "if", "\"--install-js\"", "in", "sys", ".", "argv", ":", "print", "(", "\"Removing '--install-js' incompatible with 'sdist'\"", ")", "sys", ".", "argv", ".", "remove"...
Check for 'sdist' and ensure we always build BokehJS when packaging Source distributions do not ship with BokehJS source code, but must ship with a pre-built BokehJS library. This function modifies ``sys.argv`` as necessary so that ``--build-js`` IS present, and ``--install-js` is NOT. Returns: None
[ "Check", "for", "sdist", "and", "ensure", "we", "always", "build", "BokehJS", "when", "packaging" ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/_setup_support.py#L157-L174
30,285
bokeh/bokeh
_setup_support.py
fixup_for_packaged
def fixup_for_packaged(): ''' If we are installing FROM an sdist, then a pre-built BokehJS is already installed in the python source tree. The command line options ``--build-js`` or ``--install-js`` are removed from ``sys.argv``, with a warning. Also adds ``--existing-js`` to ``sys.argv`` to signal that BokehJS is already packaged. Returns: None ''' if exists(join(ROOT, 'PKG-INFO')): if "--build-js" in sys.argv or "--install-js" in sys.argv: print(SDIST_BUILD_WARNING) if "--build-js" in sys.argv: sys.argv.remove('--build-js') if "--install-js" in sys.argv: sys.argv.remove('--install-js') if "--existing-js" not in sys.argv: sys.argv.append('--existing-js')
python
def fixup_for_packaged(): ''' If we are installing FROM an sdist, then a pre-built BokehJS is already installed in the python source tree. The command line options ``--build-js`` or ``--install-js`` are removed from ``sys.argv``, with a warning. Also adds ``--existing-js`` to ``sys.argv`` to signal that BokehJS is already packaged. Returns: None ''' if exists(join(ROOT, 'PKG-INFO')): if "--build-js" in sys.argv or "--install-js" in sys.argv: print(SDIST_BUILD_WARNING) if "--build-js" in sys.argv: sys.argv.remove('--build-js') if "--install-js" in sys.argv: sys.argv.remove('--install-js') if "--existing-js" not in sys.argv: sys.argv.append('--existing-js')
[ "def", "fixup_for_packaged", "(", ")", ":", "if", "exists", "(", "join", "(", "ROOT", ",", "'PKG-INFO'", ")", ")", ":", "if", "\"--build-js\"", "in", "sys", ".", "argv", "or", "\"--install-js\"", "in", "sys", ".", "argv", ":", "print", "(", "SDIST_BUILD_...
If we are installing FROM an sdist, then a pre-built BokehJS is already installed in the python source tree. The command line options ``--build-js`` or ``--install-js`` are removed from ``sys.argv``, with a warning. Also adds ``--existing-js`` to ``sys.argv`` to signal that BokehJS is already packaged. Returns: None
[ "If", "we", "are", "installing", "FROM", "an", "sdist", "then", "a", "pre", "-", "built", "BokehJS", "is", "already", "installed", "in", "the", "python", "source", "tree", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/_setup_support.py#L176-L198
30,286
bokeh/bokeh
_setup_support.py
get_cmdclass
def get_cmdclass(): ''' A ``cmdclass`` that works around a setuptools deficiency. There is no need to build wheels when installing a package, however some versions of setuptools seem to mandate this. This is a hacky workaround that modifies the ``cmdclass`` returned by versioneer so that not having wheel installed is not a fatal error. ''' cmdclass = versioneer.get_cmdclass() try: from wheel.bdist_wheel import bdist_wheel except ImportError: # pip is not claiming for bdist_wheel when wheel is not installed bdist_wheel = None if bdist_wheel is not None: cmdclass["bdist_wheel"] = bdist_wheel return cmdclass
python
def get_cmdclass(): ''' A ``cmdclass`` that works around a setuptools deficiency. There is no need to build wheels when installing a package, however some versions of setuptools seem to mandate this. This is a hacky workaround that modifies the ``cmdclass`` returned by versioneer so that not having wheel installed is not a fatal error. ''' cmdclass = versioneer.get_cmdclass() try: from wheel.bdist_wheel import bdist_wheel except ImportError: # pip is not claiming for bdist_wheel when wheel is not installed bdist_wheel = None if bdist_wheel is not None: cmdclass["bdist_wheel"] = bdist_wheel return cmdclass
[ "def", "get_cmdclass", "(", ")", ":", "cmdclass", "=", "versioneer", ".", "get_cmdclass", "(", ")", "try", ":", "from", "wheel", ".", "bdist_wheel", "import", "bdist_wheel", "except", "ImportError", ":", "# pip is not claiming for bdist_wheel when wheel is not installed...
A ``cmdclass`` that works around a setuptools deficiency. There is no need to build wheels when installing a package, however some versions of setuptools seem to mandate this. This is a hacky workaround that modifies the ``cmdclass`` returned by versioneer so that not having wheel installed is not a fatal error.
[ "A", "cmdclass", "that", "works", "around", "a", "setuptools", "deficiency", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/_setup_support.py#L203-L223
30,287
bokeh/bokeh
_setup_support.py
jsbuild_prompt
def jsbuild_prompt(): ''' Prompt users whether to build a new BokehJS or install an existing one. Returns: bool : True, if a new build is requested, False otherwise ''' print(BOKEHJS_BUILD_PROMPT) mapping = {"1": True, "2": False} value = input("Choice? ") while value not in mapping: print("Input '%s' not understood. Valid choices: 1, 2\n" % value) value = input("Choice? ") return mapping[value]
python
def jsbuild_prompt(): ''' Prompt users whether to build a new BokehJS or install an existing one. Returns: bool : True, if a new build is requested, False otherwise ''' print(BOKEHJS_BUILD_PROMPT) mapping = {"1": True, "2": False} value = input("Choice? ") while value not in mapping: print("Input '%s' not understood. Valid choices: 1, 2\n" % value) value = input("Choice? ") return mapping[value]
[ "def", "jsbuild_prompt", "(", ")", ":", "print", "(", "BOKEHJS_BUILD_PROMPT", ")", "mapping", "=", "{", "\"1\"", ":", "True", ",", "\"2\"", ":", "False", "}", "value", "=", "input", "(", "\"Choice? \"", ")", "while", "value", "not", "in", "mapping", ":",...
Prompt users whether to build a new BokehJS or install an existing one. Returns: bool : True, if a new build is requested, False otherwise
[ "Prompt", "users", "whether", "to", "build", "a", "new", "BokehJS", "or", "install", "an", "existing", "one", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/_setup_support.py#L245-L258
30,288
bokeh/bokeh
_setup_support.py
install_js
def install_js(): ''' Copy built BokehJS files into the Python source tree. Returns: None ''' target_jsdir = join(SERVER, 'static', 'js') target_cssdir = join(SERVER, 'static', 'css') target_tslibdir = join(SERVER, 'static', 'lib') STATIC_ASSETS = [ join(JS, 'bokeh.js'), join(JS, 'bokeh.min.js'), join(CSS, 'bokeh.css'), join(CSS, 'bokeh.min.css'), ] if not all(exists(a) for a in STATIC_ASSETS): print(BOKEHJS_INSTALL_FAIL) sys.exit(1) if exists(target_jsdir): shutil.rmtree(target_jsdir) shutil.copytree(JS, target_jsdir) if exists(target_cssdir): shutil.rmtree(target_cssdir) shutil.copytree(CSS, target_cssdir) if exists(target_tslibdir): shutil.rmtree(target_tslibdir) if exists(TSLIB): # keep in sync with bokehjs/src/compiler/compile.ts lib = { "lib.es5.d.ts", "lib.dom.d.ts", "lib.es2015.core.d.ts", "lib.es2015.promise.d.ts", "lib.es2015.symbol.d.ts", "lib.es2015.iterable.d.ts", } shutil.copytree(TSLIB, target_tslibdir, ignore=lambda _, files: [ f for f in files if f not in lib ])
python
def install_js(): ''' Copy built BokehJS files into the Python source tree. Returns: None ''' target_jsdir = join(SERVER, 'static', 'js') target_cssdir = join(SERVER, 'static', 'css') target_tslibdir = join(SERVER, 'static', 'lib') STATIC_ASSETS = [ join(JS, 'bokeh.js'), join(JS, 'bokeh.min.js'), join(CSS, 'bokeh.css'), join(CSS, 'bokeh.min.css'), ] if not all(exists(a) for a in STATIC_ASSETS): print(BOKEHJS_INSTALL_FAIL) sys.exit(1) if exists(target_jsdir): shutil.rmtree(target_jsdir) shutil.copytree(JS, target_jsdir) if exists(target_cssdir): shutil.rmtree(target_cssdir) shutil.copytree(CSS, target_cssdir) if exists(target_tslibdir): shutil.rmtree(target_tslibdir) if exists(TSLIB): # keep in sync with bokehjs/src/compiler/compile.ts lib = { "lib.es5.d.ts", "lib.dom.d.ts", "lib.es2015.core.d.ts", "lib.es2015.promise.d.ts", "lib.es2015.symbol.d.ts", "lib.es2015.iterable.d.ts", } shutil.copytree(TSLIB, target_tslibdir, ignore=lambda _, files: [ f for f in files if f not in lib ])
[ "def", "install_js", "(", ")", ":", "target_jsdir", "=", "join", "(", "SERVER", ",", "'static'", ",", "'js'", ")", "target_cssdir", "=", "join", "(", "SERVER", ",", "'static'", ",", "'css'", ")", "target_tslibdir", "=", "join", "(", "SERVER", ",", "'stat...
Copy built BokehJS files into the Python source tree. Returns: None
[ "Copy", "built", "BokehJS", "files", "into", "the", "Python", "source", "tree", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/_setup_support.py#L340-L381
30,289
bokeh/bokeh
bokeh/util/hex.py
hexbin
def hexbin(x, y, size, orientation="pointytop", aspect_scale=1): ''' Perform an equal-weight binning of data points into hexagonal tiles. For more sophisticated use cases, e.g. weighted binning or scaling individual tiles proportional to some other quantity, consider using HoloViews. Args: x (array[float]) : A NumPy array of x-coordinates for binning y (array[float]) : A NumPy array of y-coordinates for binning size (float) : The size of the hexagonal tiling. The size is defined as the distance from the center of a hexagon to the top corner for "pointytop" orientation, or from the center to a side corner for "flattop" orientation. orientation (str, optional) : Whether the hex tile orientation should be "pointytop" or "flattop". (default: "pointytop") aspect_scale (float, optional) : Match a plot's aspect ratio scaling. When working with a plot with ``aspect_scale != 1``, this parameter can be set to match the plot, in order to draw regular hexagons (instead of "stretched" ones). This is roughly equivalent to binning in "screen space", and it may be better to use axis-aligned rectangular bins when plot aspect scales are not one. Returns: DataFrame The resulting DataFrame will have columns *q* and *r* that specify hexagon tile locations in axial coordinates, and a column *counts* that provides the count for each tile. .. warning:: Hex binning only functions on linear scales, i.e. not on log plots. ''' pd = import_required('pandas','hexbin requires pandas to be installed') q, r = cartesian_to_axial(x, y, size, orientation, aspect_scale=aspect_scale) df = pd.DataFrame(dict(r=r, q=q)) return df.groupby(['q', 'r']).size().reset_index(name='counts')
python
def hexbin(x, y, size, orientation="pointytop", aspect_scale=1): ''' Perform an equal-weight binning of data points into hexagonal tiles. For more sophisticated use cases, e.g. weighted binning or scaling individual tiles proportional to some other quantity, consider using HoloViews. Args: x (array[float]) : A NumPy array of x-coordinates for binning y (array[float]) : A NumPy array of y-coordinates for binning size (float) : The size of the hexagonal tiling. The size is defined as the distance from the center of a hexagon to the top corner for "pointytop" orientation, or from the center to a side corner for "flattop" orientation. orientation (str, optional) : Whether the hex tile orientation should be "pointytop" or "flattop". (default: "pointytop") aspect_scale (float, optional) : Match a plot's aspect ratio scaling. When working with a plot with ``aspect_scale != 1``, this parameter can be set to match the plot, in order to draw regular hexagons (instead of "stretched" ones). This is roughly equivalent to binning in "screen space", and it may be better to use axis-aligned rectangular bins when plot aspect scales are not one. Returns: DataFrame The resulting DataFrame will have columns *q* and *r* that specify hexagon tile locations in axial coordinates, and a column *counts* that provides the count for each tile. .. warning:: Hex binning only functions on linear scales, i.e. not on log plots. ''' pd = import_required('pandas','hexbin requires pandas to be installed') q, r = cartesian_to_axial(x, y, size, orientation, aspect_scale=aspect_scale) df = pd.DataFrame(dict(r=r, q=q)) return df.groupby(['q', 'r']).size().reset_index(name='counts')
[ "def", "hexbin", "(", "x", ",", "y", ",", "size", ",", "orientation", "=", "\"pointytop\"", ",", "aspect_scale", "=", "1", ")", ":", "pd", "=", "import_required", "(", "'pandas'", ",", "'hexbin requires pandas to be installed'", ")", "q", ",", "r", "=", "c...
Perform an equal-weight binning of data points into hexagonal tiles. For more sophisticated use cases, e.g. weighted binning or scaling individual tiles proportional to some other quantity, consider using HoloViews. Args: x (array[float]) : A NumPy array of x-coordinates for binning y (array[float]) : A NumPy array of y-coordinates for binning size (float) : The size of the hexagonal tiling. The size is defined as the distance from the center of a hexagon to the top corner for "pointytop" orientation, or from the center to a side corner for "flattop" orientation. orientation (str, optional) : Whether the hex tile orientation should be "pointytop" or "flattop". (default: "pointytop") aspect_scale (float, optional) : Match a plot's aspect ratio scaling. When working with a plot with ``aspect_scale != 1``, this parameter can be set to match the plot, in order to draw regular hexagons (instead of "stretched" ones). This is roughly equivalent to binning in "screen space", and it may be better to use axis-aligned rectangular bins when plot aspect scales are not one. Returns: DataFrame The resulting DataFrame will have columns *q* and *r* that specify hexagon tile locations in axial coordinates, and a column *counts* that provides the count for each tile. .. warning:: Hex binning only functions on linear scales, i.e. not on log plots.
[ "Perform", "an", "equal", "-", "weight", "binning", "of", "data", "points", "into", "hexagonal", "tiles", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/hex.py#L152-L204
30,290
bokeh/bokeh
bokeh/core/property/container.py
ColumnData.from_json
def from_json(self, json, models=None): ''' Decodes column source data encoded as lists or base64 strings. ''' if json is None: return None elif not isinstance(json, dict): raise DeserializationError("%s expected a dict or None, got %s" % (self, json)) new_data = {} for key, value in json.items(): key = self.keys_type.from_json(key, models) if isinstance(value, dict) and '__ndarray__' in value: new_data[key] = decode_base64_dict(value) elif isinstance(value, list) and any(isinstance(el, dict) and '__ndarray__' in el for el in value): new_list = [] for el in value: if isinstance(el, dict) and '__ndarray__' in el: el = decode_base64_dict(el) elif isinstance(el, list): el = self.values_type.from_json(el) new_list.append(el) new_data[key] = new_list else: new_data[key] = self.values_type.from_json(value, models) return new_data
python
def from_json(self, json, models=None): ''' Decodes column source data encoded as lists or base64 strings. ''' if json is None: return None elif not isinstance(json, dict): raise DeserializationError("%s expected a dict or None, got %s" % (self, json)) new_data = {} for key, value in json.items(): key = self.keys_type.from_json(key, models) if isinstance(value, dict) and '__ndarray__' in value: new_data[key] = decode_base64_dict(value) elif isinstance(value, list) and any(isinstance(el, dict) and '__ndarray__' in el for el in value): new_list = [] for el in value: if isinstance(el, dict) and '__ndarray__' in el: el = decode_base64_dict(el) elif isinstance(el, list): el = self.values_type.from_json(el) new_list.append(el) new_data[key] = new_list else: new_data[key] = self.values_type.from_json(value, models) return new_data
[ "def", "from_json", "(", "self", ",", "json", ",", "models", "=", "None", ")", ":", "if", "json", "is", "None", ":", "return", "None", "elif", "not", "isinstance", "(", "json", ",", "dict", ")", ":", "raise", "DeserializationError", "(", "\"%s expected a...
Decodes column source data encoded as lists or base64 strings.
[ "Decodes", "column", "source", "data", "encoded", "as", "lists", "or", "base64", "strings", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/container.py#L234-L257
30,291
bokeh/bokeh
bokeh/util/serialization.py
convert_timedelta_type
def convert_timedelta_type(obj): ''' Convert any recognized timedelta value to floating point absolute milliseconds. Arg: obj (object) : the object to convert Returns: float : milliseconds ''' if isinstance(obj, dt.timedelta): return obj.total_seconds() * 1000. elif isinstance(obj, np.timedelta64): return (obj / NP_MS_DELTA)
python
def convert_timedelta_type(obj): ''' Convert any recognized timedelta value to floating point absolute milliseconds. Arg: obj (object) : the object to convert Returns: float : milliseconds ''' if isinstance(obj, dt.timedelta): return obj.total_seconds() * 1000. elif isinstance(obj, np.timedelta64): return (obj / NP_MS_DELTA)
[ "def", "convert_timedelta_type", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "dt", ".", "timedelta", ")", ":", "return", "obj", ".", "total_seconds", "(", ")", "*", "1000.", "elif", "isinstance", "(", "obj", ",", "np", ".", "timedelta64", ...
Convert any recognized timedelta value to floating point absolute milliseconds. Arg: obj (object) : the object to convert Returns: float : milliseconds
[ "Convert", "any", "recognized", "timedelta", "value", "to", "floating", "point", "absolute", "milliseconds", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/serialization.py#L136-L150
30,292
bokeh/bokeh
bokeh/util/serialization.py
convert_datetime_type
def convert_datetime_type(obj): ''' Convert any recognized date, time, or datetime value to floating point milliseconds since epoch. Arg: obj (object) : the object to convert Returns: float : milliseconds ''' # Pandas NaT if pd and obj is pd.NaT: return np.nan # Pandas Period if pd and isinstance(obj, pd.Period): return obj.to_timestamp().value / 10**6.0 # Pandas Timestamp if pd and isinstance(obj, _pd_timestamp): return obj.value / 10**6.0 # Pandas Timedelta elif pd and isinstance(obj, pd.Timedelta): return obj.value / 10**6.0 # Datetime (datetime is a subclass of date) elif isinstance(obj, dt.datetime): diff = obj.replace(tzinfo=None) - DT_EPOCH return diff.total_seconds() * 1000. # Date elif isinstance(obj, dt.date): return (dt.datetime(*obj.timetuple()[:6]) - DT_EPOCH).total_seconds() * 1000 # NumPy datetime64 elif isinstance(obj, np.datetime64): epoch_delta = obj - NP_EPOCH return (epoch_delta / NP_MS_DELTA) # Time elif isinstance(obj, dt.time): return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.
python
def convert_datetime_type(obj): ''' Convert any recognized date, time, or datetime value to floating point milliseconds since epoch. Arg: obj (object) : the object to convert Returns: float : milliseconds ''' # Pandas NaT if pd and obj is pd.NaT: return np.nan # Pandas Period if pd and isinstance(obj, pd.Period): return obj.to_timestamp().value / 10**6.0 # Pandas Timestamp if pd and isinstance(obj, _pd_timestamp): return obj.value / 10**6.0 # Pandas Timedelta elif pd and isinstance(obj, pd.Timedelta): return obj.value / 10**6.0 # Datetime (datetime is a subclass of date) elif isinstance(obj, dt.datetime): diff = obj.replace(tzinfo=None) - DT_EPOCH return diff.total_seconds() * 1000. # Date elif isinstance(obj, dt.date): return (dt.datetime(*obj.timetuple()[:6]) - DT_EPOCH).total_seconds() * 1000 # NumPy datetime64 elif isinstance(obj, np.datetime64): epoch_delta = obj - NP_EPOCH return (epoch_delta / NP_MS_DELTA) # Time elif isinstance(obj, dt.time): return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.
[ "def", "convert_datetime_type", "(", "obj", ")", ":", "# Pandas NaT", "if", "pd", "and", "obj", "is", "pd", ".", "NaT", ":", "return", "np", ".", "nan", "# Pandas Period", "if", "pd", "and", "isinstance", "(", "obj", ",", "pd", ".", "Period", ")", ":",...
Convert any recognized date, time, or datetime value to floating point milliseconds since epoch. Arg: obj (object) : the object to convert Returns: float : milliseconds
[ "Convert", "any", "recognized", "date", "time", "or", "datetime", "value", "to", "floating", "point", "milliseconds", "since", "epoch", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/serialization.py#L152-L193
30,293
bokeh/bokeh
bokeh/util/serialization.py
convert_datetime_array
def convert_datetime_array(array): ''' Convert NumPy datetime arrays to arrays to milliseconds since epoch. Args: array : (obj) A NumPy array of datetime to convert If the value passed in is not a NumPy array, it will be returned as-is. Returns: array ''' if not isinstance(array, np.ndarray): return array try: dt2001 = np.datetime64('2001') legacy_datetime64 = (dt2001.astype('int64') == dt2001.astype('datetime64[ms]').astype('int64')) except AttributeError as e: if e.args == ("'module' object has no attribute 'datetime64'",): # for compatibility with PyPy that doesn't have datetime64 if 'PyPy' in sys.version: legacy_datetime64 = False pass else: raise e else: raise e # not quite correct, truncates to ms.. if array.dtype.kind == 'M': if legacy_datetime64: if array.dtype == np.dtype('datetime64[ns]'): array = array.astype('int64') / 10**6.0 else: array = array.astype('datetime64[us]').astype('int64') / 1000. elif array.dtype.kind == 'm': array = array.astype('timedelta64[us]').astype('int64') / 1000. return array
python
def convert_datetime_array(array): ''' Convert NumPy datetime arrays to arrays to milliseconds since epoch. Args: array : (obj) A NumPy array of datetime to convert If the value passed in is not a NumPy array, it will be returned as-is. Returns: array ''' if not isinstance(array, np.ndarray): return array try: dt2001 = np.datetime64('2001') legacy_datetime64 = (dt2001.astype('int64') == dt2001.astype('datetime64[ms]').astype('int64')) except AttributeError as e: if e.args == ("'module' object has no attribute 'datetime64'",): # for compatibility with PyPy that doesn't have datetime64 if 'PyPy' in sys.version: legacy_datetime64 = False pass else: raise e else: raise e # not quite correct, truncates to ms.. if array.dtype.kind == 'M': if legacy_datetime64: if array.dtype == np.dtype('datetime64[ns]'): array = array.astype('int64') / 10**6.0 else: array = array.astype('datetime64[us]').astype('int64') / 1000. elif array.dtype.kind == 'm': array = array.astype('timedelta64[us]').astype('int64') / 1000. return array
[ "def", "convert_datetime_array", "(", "array", ")", ":", "if", "not", "isinstance", "(", "array", ",", "np", ".", "ndarray", ")", ":", "return", "array", "try", ":", "dt2001", "=", "np", ".", "datetime64", "(", "'2001'", ")", "legacy_datetime64", "=", "(...
Convert NumPy datetime arrays to arrays to milliseconds since epoch. Args: array : (obj) A NumPy array of datetime to convert If the value passed in is not a NumPy array, it will be returned as-is. Returns: array
[ "Convert", "NumPy", "datetime", "arrays", "to", "arrays", "to", "milliseconds", "since", "epoch", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/serialization.py#L195-L238
30,294
bokeh/bokeh
bokeh/util/serialization.py
make_id
def make_id(): ''' Return a new unique ID for a Bokeh object. Normally this function will return simple monotonically increasing integer IDs (as strings) for identifying Bokeh objects within a Document. However, if it is desirable to have globally unique for every object, this behavior can be overridden by setting the environment variable ``BOKEH_SIMPLE_IDS=no``. Returns: str ''' global _simple_id if settings.simple_ids(True): with _simple_id_lock: _simple_id += 1 return str(_simple_id) else: return make_globally_unique_id()
python
def make_id(): ''' Return a new unique ID for a Bokeh object. Normally this function will return simple monotonically increasing integer IDs (as strings) for identifying Bokeh objects within a Document. However, if it is desirable to have globally unique for every object, this behavior can be overridden by setting the environment variable ``BOKEH_SIMPLE_IDS=no``. Returns: str ''' global _simple_id if settings.simple_ids(True): with _simple_id_lock: _simple_id += 1 return str(_simple_id) else: return make_globally_unique_id()
[ "def", "make_id", "(", ")", ":", "global", "_simple_id", "if", "settings", ".", "simple_ids", "(", "True", ")", ":", "with", "_simple_id_lock", ":", "_simple_id", "+=", "1", "return", "str", "(", "_simple_id", ")", "else", ":", "return", "make_globally_uniqu...
Return a new unique ID for a Bokeh object. Normally this function will return simple monotonically increasing integer IDs (as strings) for identifying Bokeh objects within a Document. However, if it is desirable to have globally unique for every object, this behavior can be overridden by setting the environment variable ``BOKEH_SIMPLE_IDS=no``. Returns: str
[ "Return", "a", "new", "unique", "ID", "for", "a", "Bokeh", "object", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/serialization.py#L240-L259
30,295
bokeh/bokeh
bokeh/util/serialization.py
transform_array
def transform_array(array, force_list=False, buffers=None): ''' Transform a NumPy arrays into serialized format Converts un-serializable dtypes and returns JSON serializable format Args: array (np.ndarray) : a NumPy array to be transformed force_list (bool, optional) : whether to only output to standard lists This function can encode some dtypes using a binary encoding, but setting this argument to True will override that and cause only standard Python lists to be emitted. (default: False) buffers (set, optional) : If binary buffers are desired, the buffers parameter may be provided, and any columns that may be sent as binary buffers will be added to the set. If None, then only base64 encoding will be used (default: None) If force_list is True, then this value will be ignored, and no buffers will be generated. **This is an "out" parameter**. The values it contains will be modified in-place. Returns: JSON ''' array = convert_datetime_array(array) return serialize_array(array, force_list=force_list, buffers=buffers)
python
def transform_array(array, force_list=False, buffers=None): ''' Transform a NumPy arrays into serialized format Converts un-serializable dtypes and returns JSON serializable format Args: array (np.ndarray) : a NumPy array to be transformed force_list (bool, optional) : whether to only output to standard lists This function can encode some dtypes using a binary encoding, but setting this argument to True will override that and cause only standard Python lists to be emitted. (default: False) buffers (set, optional) : If binary buffers are desired, the buffers parameter may be provided, and any columns that may be sent as binary buffers will be added to the set. If None, then only base64 encoding will be used (default: None) If force_list is True, then this value will be ignored, and no buffers will be generated. **This is an "out" parameter**. The values it contains will be modified in-place. Returns: JSON ''' array = convert_datetime_array(array) return serialize_array(array, force_list=force_list, buffers=buffers)
[ "def", "transform_array", "(", "array", ",", "force_list", "=", "False", ",", "buffers", "=", "None", ")", ":", "array", "=", "convert_datetime_array", "(", "array", ")", "return", "serialize_array", "(", "array", ",", "force_list", "=", "force_list", ",", "...
Transform a NumPy arrays into serialized format Converts un-serializable dtypes and returns JSON serializable format Args: array (np.ndarray) : a NumPy array to be transformed force_list (bool, optional) : whether to only output to standard lists This function can encode some dtypes using a binary encoding, but setting this argument to True will override that and cause only standard Python lists to be emitted. (default: False) buffers (set, optional) : If binary buffers are desired, the buffers parameter may be provided, and any columns that may be sent as binary buffers will be added to the set. If None, then only base64 encoding will be used (default: None) If force_list is True, then this value will be ignored, and no buffers will be generated. **This is an "out" parameter**. The values it contains will be modified in-place. Returns: JSON
[ "Transform", "a", "NumPy", "arrays", "into", "serialized", "format" ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/serialization.py#L295-L328
30,296
bokeh/bokeh
bokeh/util/serialization.py
transform_array_to_list
def transform_array_to_list(array): ''' Transforms a NumPy array into a list of values Args: array (np.nadarray) : the NumPy array series to transform Returns: list or dict ''' if (array.dtype.kind in ('u', 'i', 'f') and (~np.isfinite(array)).any()): transformed = array.astype('object') transformed[np.isnan(array)] = 'NaN' transformed[np.isposinf(array)] = 'Infinity' transformed[np.isneginf(array)] = '-Infinity' return transformed.tolist() elif (array.dtype.kind == 'O' and pd and pd.isnull(array).any()): transformed = array.astype('object') transformed[pd.isnull(array)] = 'NaN' return transformed.tolist() return array.tolist()
python
def transform_array_to_list(array): ''' Transforms a NumPy array into a list of values Args: array (np.nadarray) : the NumPy array series to transform Returns: list or dict ''' if (array.dtype.kind in ('u', 'i', 'f') and (~np.isfinite(array)).any()): transformed = array.astype('object') transformed[np.isnan(array)] = 'NaN' transformed[np.isposinf(array)] = 'Infinity' transformed[np.isneginf(array)] = '-Infinity' return transformed.tolist() elif (array.dtype.kind == 'O' and pd and pd.isnull(array).any()): transformed = array.astype('object') transformed[pd.isnull(array)] = 'NaN' return transformed.tolist() return array.tolist()
[ "def", "transform_array_to_list", "(", "array", ")", ":", "if", "(", "array", ".", "dtype", ".", "kind", "in", "(", "'u'", ",", "'i'", ",", "'f'", ")", "and", "(", "~", "np", ".", "isfinite", "(", "array", ")", ")", ".", "any", "(", ")", ")", "...
Transforms a NumPy array into a list of values Args: array (np.nadarray) : the NumPy array series to transform Returns: list or dict
[ "Transforms", "a", "NumPy", "array", "into", "a", "list", "of", "values" ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/serialization.py#L330-L350
30,297
bokeh/bokeh
bokeh/util/serialization.py
transform_series
def transform_series(series, force_list=False, buffers=None): ''' Transforms a Pandas series into serialized form Args: series (pd.Series) : the Pandas series to transform force_list (bool, optional) : whether to only output to standard lists This function can encode some dtypes using a binary encoding, but setting this argument to True will override that and cause only standard Python lists to be emitted. (default: False) buffers (set, optional) : If binary buffers are desired, the buffers parameter may be provided, and any columns that may be sent as binary buffers will be added to the set. If None, then only base64 encoding will be used (default: None) If force_list is True, then this value will be ignored, and no buffers will be generated. **This is an "out" parameter**. The values it contains will be modified in-place. Returns: list or dict ''' # not checking for pd here, this function should only be called if it # is already known that series is a Pandas Series type if isinstance(series, pd.PeriodIndex): vals = series.to_timestamp().values else: vals = series.values return transform_array(vals, force_list=force_list, buffers=buffers)
python
def transform_series(series, force_list=False, buffers=None): ''' Transforms a Pandas series into serialized form Args: series (pd.Series) : the Pandas series to transform force_list (bool, optional) : whether to only output to standard lists This function can encode some dtypes using a binary encoding, but setting this argument to True will override that and cause only standard Python lists to be emitted. (default: False) buffers (set, optional) : If binary buffers are desired, the buffers parameter may be provided, and any columns that may be sent as binary buffers will be added to the set. If None, then only base64 encoding will be used (default: None) If force_list is True, then this value will be ignored, and no buffers will be generated. **This is an "out" parameter**. The values it contains will be modified in-place. Returns: list or dict ''' # not checking for pd here, this function should only be called if it # is already known that series is a Pandas Series type if isinstance(series, pd.PeriodIndex): vals = series.to_timestamp().values else: vals = series.values return transform_array(vals, force_list=force_list, buffers=buffers)
[ "def", "transform_series", "(", "series", ",", "force_list", "=", "False", ",", "buffers", "=", "None", ")", ":", "# not checking for pd here, this function should only be called if it", "# is already known that series is a Pandas Series type", "if", "isinstance", "(", "series"...
Transforms a Pandas series into serialized form Args: series (pd.Series) : the Pandas series to transform force_list (bool, optional) : whether to only output to standard lists This function can encode some dtypes using a binary encoding, but setting this argument to True will override that and cause only standard Python lists to be emitted. (default: False) buffers (set, optional) : If binary buffers are desired, the buffers parameter may be provided, and any columns that may be sent as binary buffers will be added to the set. If None, then only base64 encoding will be used (default: None) If force_list is True, then this value will be ignored, and no buffers will be generated. **This is an "out" parameter**. The values it contains will be modified in-place. Returns: list or dict
[ "Transforms", "a", "Pandas", "series", "into", "serialized", "form" ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/serialization.py#L352-L384
30,298
bokeh/bokeh
bokeh/util/serialization.py
serialize_array
def serialize_array(array, force_list=False, buffers=None): ''' Transforms a NumPy array into serialized form. Args: array (np.ndarray) : the NumPy array to transform force_list (bool, optional) : whether to only output to standard lists This function can encode some dtypes using a binary encoding, but setting this argument to True will override that and cause only standard Python lists to be emitted. (default: False) buffers (set, optional) : If binary buffers are desired, the buffers parameter may be provided, and any columns that may be sent as binary buffers will be added to the set. If None, then only base64 encoding will be used (default: None) If force_list is True, then this value will be ignored, and no buffers will be generated. **This is an "out" parameter**. The values it contains will be modified in-place. Returns: list or dict ''' if isinstance(array, np.ma.MaskedArray): array = array.filled(np.nan) # Set masked values to nan if (array_encoding_disabled(array) or force_list): return transform_array_to_list(array) if not array.flags['C_CONTIGUOUS']: array = np.ascontiguousarray(array) if buffers is None: return encode_base64_dict(array) else: return encode_binary_dict(array, buffers)
python
def serialize_array(array, force_list=False, buffers=None): ''' Transforms a NumPy array into serialized form. Args: array (np.ndarray) : the NumPy array to transform force_list (bool, optional) : whether to only output to standard lists This function can encode some dtypes using a binary encoding, but setting this argument to True will override that and cause only standard Python lists to be emitted. (default: False) buffers (set, optional) : If binary buffers are desired, the buffers parameter may be provided, and any columns that may be sent as binary buffers will be added to the set. If None, then only base64 encoding will be used (default: None) If force_list is True, then this value will be ignored, and no buffers will be generated. **This is an "out" parameter**. The values it contains will be modified in-place. Returns: list or dict ''' if isinstance(array, np.ma.MaskedArray): array = array.filled(np.nan) # Set masked values to nan if (array_encoding_disabled(array) or force_list): return transform_array_to_list(array) if not array.flags['C_CONTIGUOUS']: array = np.ascontiguousarray(array) if buffers is None: return encode_base64_dict(array) else: return encode_binary_dict(array, buffers)
[ "def", "serialize_array", "(", "array", ",", "force_list", "=", "False", ",", "buffers", "=", "None", ")", ":", "if", "isinstance", "(", "array", ",", "np", ".", "ma", ".", "MaskedArray", ")", ":", "array", "=", "array", ".", "filled", "(", "np", "."...
Transforms a NumPy array into serialized form. Args: array (np.ndarray) : the NumPy array to transform force_list (bool, optional) : whether to only output to standard lists This function can encode some dtypes using a binary encoding, but setting this argument to True will override that and cause only standard Python lists to be emitted. (default: False) buffers (set, optional) : If binary buffers are desired, the buffers parameter may be provided, and any columns that may be sent as binary buffers will be added to the set. If None, then only base64 encoding will be used (default: None) If force_list is True, then this value will be ignored, and no buffers will be generated. **This is an "out" parameter**. The values it contains will be modified in-place. Returns: list or dict
[ "Transforms", "a", "NumPy", "array", "into", "serialized", "form", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/serialization.py#L386-L421
30,299
bokeh/bokeh
bokeh/util/serialization.py
traverse_data
def traverse_data(obj, use_numpy=True, buffers=None): ''' Recursively traverse an object until a flat list is found. If NumPy is available, the flat list is converted to a numpy array and passed to transform_array() to handle ``nan``, ``inf``, and ``-inf``. Otherwise, iterate through all items, converting non-JSON items Args: obj (list) : a list of values or lists use_numpy (bool, optional) toggle NumPy as a dependency for testing This argument is only useful for testing (default: True) ''' if use_numpy and all(isinstance(el, np.ndarray) for el in obj): return [transform_array(el, buffers=buffers) for el in obj] obj_copy = [] for item in obj: # Check the base/common case first for performance reasons # Also use type(x) is float because it's faster than isinstance if type(item) is float: if math.isnan(item): item = 'NaN' elif math.isinf(item): if item > 0: item = 'Infinity' else: item = '-Infinity' obj_copy.append(item) elif isinstance(item, (list, tuple)): # check less common type second obj_copy.append(traverse_data(item)) else: obj_copy.append(item) return obj_copy
python
def traverse_data(obj, use_numpy=True, buffers=None): ''' Recursively traverse an object until a flat list is found. If NumPy is available, the flat list is converted to a numpy array and passed to transform_array() to handle ``nan``, ``inf``, and ``-inf``. Otherwise, iterate through all items, converting non-JSON items Args: obj (list) : a list of values or lists use_numpy (bool, optional) toggle NumPy as a dependency for testing This argument is only useful for testing (default: True) ''' if use_numpy and all(isinstance(el, np.ndarray) for el in obj): return [transform_array(el, buffers=buffers) for el in obj] obj_copy = [] for item in obj: # Check the base/common case first for performance reasons # Also use type(x) is float because it's faster than isinstance if type(item) is float: if math.isnan(item): item = 'NaN' elif math.isinf(item): if item > 0: item = 'Infinity' else: item = '-Infinity' obj_copy.append(item) elif isinstance(item, (list, tuple)): # check less common type second obj_copy.append(traverse_data(item)) else: obj_copy.append(item) return obj_copy
[ "def", "traverse_data", "(", "obj", ",", "use_numpy", "=", "True", ",", "buffers", "=", "None", ")", ":", "if", "use_numpy", "and", "all", "(", "isinstance", "(", "el", ",", "np", ".", "ndarray", ")", "for", "el", "in", "obj", ")", ":", "return", "...
Recursively traverse an object until a flat list is found. If NumPy is available, the flat list is converted to a numpy array and passed to transform_array() to handle ``nan``, ``inf``, and ``-inf``. Otherwise, iterate through all items, converting non-JSON items Args: obj (list) : a list of values or lists use_numpy (bool, optional) toggle NumPy as a dependency for testing This argument is only useful for testing (default: True)
[ "Recursively", "traverse", "an", "object", "until", "a", "flat", "list", "is", "found", "." ]
dc8cf49e4e4302fd38537ad089ece81fbcca4737
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/serialization.py#L423-L456