partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
train
CSVLogger._init
CSV outputted with Headers as first set of results.
python/ray/tune/logger.py
def _init(self): """CSV outputted with Headers as first set of results.""" # Note that we assume params.json was already created by JsonLogger progress_file = os.path.join(self.logdir, "progress.csv") self._continuing = os.path.exists(progress_file) self._file = open(progress_file, "a") self._csv_out = None
def _init(self): """CSV outputted with Headers as first set of results.""" # Note that we assume params.json was already created by JsonLogger progress_file = os.path.join(self.logdir, "progress.csv") self._continuing = os.path.exists(progress_file) self._file = open(progress_file, "a") self._csv_out = None
[ "CSV", "outputted", "with", "Headers", "as", "first", "set", "of", "results", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/logger.py#L156-L162
[ "def", "_init", "(", "self", ")", ":", "# Note that we assume params.json was already created by JsonLogger", "progress_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "logdir", ",", "\"progress.csv\"", ")", "self", ".", "_continuing", "=", "os", "."...
4eade036a0505e244c976f36aaa2d64386b5129b
train
UnifiedLogger.sync_results_to_new_location
Sends the current log directory to the remote node. Syncing will not occur if the cluster is not started with the Ray autoscaler.
python/ray/tune/logger.py
def sync_results_to_new_location(self, worker_ip): """Sends the current log directory to the remote node. Syncing will not occur if the cluster is not started with the Ray autoscaler. """ if worker_ip != self._log_syncer.worker_ip: self._log_syncer.set_worker_ip(worker_ip) self._log_syncer.sync_to_worker_if_possible()
def sync_results_to_new_location(self, worker_ip): """Sends the current log directory to the remote node. Syncing will not occur if the cluster is not started with the Ray autoscaler. """ if worker_ip != self._log_syncer.worker_ip: self._log_syncer.set_worker_ip(worker_ip) self._log_syncer.sync_to_worker_if_possible()
[ "Sends", "the", "current", "log", "directory", "to", "the", "remote", "node", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/logger.py#L241-L249
[ "def", "sync_results_to_new_location", "(", "self", ",", "worker_ip", ")", ":", "if", "worker_ip", "!=", "self", ".", "_log_syncer", ".", "worker_ip", ":", "self", ".", "_log_syncer", ".", "set_worker_ip", "(", "worker_ip", ")", "self", ".", "_log_syncer", "."...
4eade036a0505e244c976f36aaa2d64386b5129b
train
deep_insert
Inserts value into config by path, generating intermediate dictionaries. Example: >>> deep_insert(path.split("."), value, {})
python/ray/tune/automl/search_policy.py
def deep_insert(path_list, value, config): """Inserts value into config by path, generating intermediate dictionaries. Example: >>> deep_insert(path.split("."), value, {}) """ if len(path_list) > 1: inside_config = config.setdefault(path_list[0], {}) deep_insert(path_list[1:], value, inside_config) else: config[path_list[0]] = value
def deep_insert(path_list, value, config): """Inserts value into config by path, generating intermediate dictionaries. Example: >>> deep_insert(path.split("."), value, {}) """ if len(path_list) > 1: inside_config = config.setdefault(path_list[0], {}) deep_insert(path_list[1:], value, inside_config) else: config[path_list[0]] = value
[ "Inserts", "value", "into", "config", "by", "path", "generating", "intermediate", "dictionaries", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automl/search_policy.py#L18-L28
[ "def", "deep_insert", "(", "path_list", ",", "value", ",", "config", ")", ":", "if", "len", "(", "path_list", ")", ">", "1", ":", "inside_config", "=", "config", ".", "setdefault", "(", "path_list", "[", "0", "]", ",", "{", "}", ")", "deep_insert", "...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionDescriptor.from_bytes_list
Create a FunctionDescriptor instance from list of bytes. This function is used to create the function descriptor from backend data. Args: cls: Current class which is required argument for classmethod. function_descriptor_list: list of bytes to represent the function descriptor. Returns: The FunctionDescriptor instance created from the bytes list.
python/ray/function_manager.py
def from_bytes_list(cls, function_descriptor_list): """Create a FunctionDescriptor instance from list of bytes. This function is used to create the function descriptor from backend data. Args: cls: Current class which is required argument for classmethod. function_descriptor_list: list of bytes to represent the function descriptor. Returns: The FunctionDescriptor instance created from the bytes list. """ assert isinstance(function_descriptor_list, list) if len(function_descriptor_list) == 0: # This is a function descriptor of driver task. return FunctionDescriptor.for_driver_task() elif (len(function_descriptor_list) == 3 or len(function_descriptor_list) == 4): module_name = ensure_str(function_descriptor_list[0]) class_name = ensure_str(function_descriptor_list[1]) function_name = ensure_str(function_descriptor_list[2]) if len(function_descriptor_list) == 4: return cls(module_name, function_name, class_name, function_descriptor_list[3]) else: return cls(module_name, function_name, class_name) else: raise Exception( "Invalid input for FunctionDescriptor.from_bytes_list")
def from_bytes_list(cls, function_descriptor_list): """Create a FunctionDescriptor instance from list of bytes. This function is used to create the function descriptor from backend data. Args: cls: Current class which is required argument for classmethod. function_descriptor_list: list of bytes to represent the function descriptor. Returns: The FunctionDescriptor instance created from the bytes list. """ assert isinstance(function_descriptor_list, list) if len(function_descriptor_list) == 0: # This is a function descriptor of driver task. return FunctionDescriptor.for_driver_task() elif (len(function_descriptor_list) == 3 or len(function_descriptor_list) == 4): module_name = ensure_str(function_descriptor_list[0]) class_name = ensure_str(function_descriptor_list[1]) function_name = ensure_str(function_descriptor_list[2]) if len(function_descriptor_list) == 4: return cls(module_name, function_name, class_name, function_descriptor_list[3]) else: return cls(module_name, function_name, class_name) else: raise Exception( "Invalid input for FunctionDescriptor.from_bytes_list")
[ "Create", "a", "FunctionDescriptor", "instance", "from", "list", "of", "bytes", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L73-L103
[ "def", "from_bytes_list", "(", "cls", ",", "function_descriptor_list", ")", ":", "assert", "isinstance", "(", "function_descriptor_list", ",", "list", ")", "if", "len", "(", "function_descriptor_list", ")", "==", "0", ":", "# This is a function descriptor of driver task...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionDescriptor.from_function
Create a FunctionDescriptor from a function instance. This function is used to create the function descriptor from a python function. If a function is a class function, it should not be used by this function. Args: cls: Current class which is required argument for classmethod. function: the python function used to create the function descriptor. Returns: The FunctionDescriptor instance created according to the function.
python/ray/function_manager.py
def from_function(cls, function): """Create a FunctionDescriptor from a function instance. This function is used to create the function descriptor from a python function. If a function is a class function, it should not be used by this function. Args: cls: Current class which is required argument for classmethod. function: the python function used to create the function descriptor. Returns: The FunctionDescriptor instance created according to the function. """ module_name = function.__module__ function_name = function.__name__ class_name = "" function_source_hasher = hashlib.sha1() try: # If we are running a script or are in IPython, include the source # code in the hash. source = inspect.getsource(function) if sys.version_info[0] >= 3: source = source.encode() function_source_hasher.update(source) function_source_hash = function_source_hasher.digest() except (IOError, OSError, TypeError): # Source code may not be available: # e.g. Cython or Python interpreter. function_source_hash = b"" return cls(module_name, function_name, class_name, function_source_hash)
def from_function(cls, function): """Create a FunctionDescriptor from a function instance. This function is used to create the function descriptor from a python function. If a function is a class function, it should not be used by this function. Args: cls: Current class which is required argument for classmethod. function: the python function used to create the function descriptor. Returns: The FunctionDescriptor instance created according to the function. """ module_name = function.__module__ function_name = function.__name__ class_name = "" function_source_hasher = hashlib.sha1() try: # If we are running a script or are in IPython, include the source # code in the hash. source = inspect.getsource(function) if sys.version_info[0] >= 3: source = source.encode() function_source_hasher.update(source) function_source_hash = function_source_hasher.digest() except (IOError, OSError, TypeError): # Source code may not be available: # e.g. Cython or Python interpreter. function_source_hash = b"" return cls(module_name, function_name, class_name, function_source_hash)
[ "Create", "a", "FunctionDescriptor", "from", "a", "function", "instance", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L106-L140
[ "def", "from_function", "(", "cls", ",", "function", ")", ":", "module_name", "=", "function", ".", "__module__", "function_name", "=", "function", ".", "__name__", "class_name", "=", "\"\"", "function_source_hasher", "=", "hashlib", ".", "sha1", "(", ")", "tr...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionDescriptor.from_class
Create a FunctionDescriptor from a class. Args: cls: Current class which is required argument for classmethod. target_class: the python class used to create the function descriptor. Returns: The FunctionDescriptor instance created according to the class.
python/ray/function_manager.py
def from_class(cls, target_class): """Create a FunctionDescriptor from a class. Args: cls: Current class which is required argument for classmethod. target_class: the python class used to create the function descriptor. Returns: The FunctionDescriptor instance created according to the class. """ module_name = target_class.__module__ class_name = target_class.__name__ return cls(module_name, "__init__", class_name)
def from_class(cls, target_class): """Create a FunctionDescriptor from a class. Args: cls: Current class which is required argument for classmethod. target_class: the python class used to create the function descriptor. Returns: The FunctionDescriptor instance created according to the class. """ module_name = target_class.__module__ class_name = target_class.__name__ return cls(module_name, "__init__", class_name)
[ "Create", "a", "FunctionDescriptor", "from", "a", "class", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L143-L156
[ "def", "from_class", "(", "cls", ",", "target_class", ")", ":", "module_name", "=", "target_class", ".", "__module__", "class_name", "=", "target_class", ".", "__name__", "return", "cls", "(", "module_name", ",", "\"__init__\"", ",", "class_name", ")" ]
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionDescriptor.is_for_driver_task
See whether this function descriptor is for a driver or not. Returns: True if this function descriptor is for driver tasks.
python/ray/function_manager.py
def is_for_driver_task(self): """See whether this function descriptor is for a driver or not. Returns: True if this function descriptor is for driver tasks. """ return all( len(x) == 0 for x in [self.module_name, self.class_name, self.function_name])
def is_for_driver_task(self): """See whether this function descriptor is for a driver or not. Returns: True if this function descriptor is for driver tasks. """ return all( len(x) == 0 for x in [self.module_name, self.class_name, self.function_name])
[ "See", "whether", "this", "function", "descriptor", "is", "for", "a", "driver", "or", "not", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L164-L172
[ "def", "is_for_driver_task", "(", "self", ")", ":", "return", "all", "(", "len", "(", "x", ")", "==", "0", "for", "x", "in", "[", "self", ".", "module_name", ",", "self", ".", "class_name", ",", "self", ".", "function_name", "]", ")" ]
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionDescriptor._get_function_id
Calculate the function id of current function descriptor. This function id is calculated from all the fields of function descriptor. Returns: ray.ObjectID to represent the function descriptor.
python/ray/function_manager.py
def _get_function_id(self): """Calculate the function id of current function descriptor. This function id is calculated from all the fields of function descriptor. Returns: ray.ObjectID to represent the function descriptor. """ if self.is_for_driver_task: return ray.FunctionID.nil() function_id_hash = hashlib.sha1() # Include the function module and name in the hash. function_id_hash.update(self.module_name.encode("ascii")) function_id_hash.update(self.function_name.encode("ascii")) function_id_hash.update(self.class_name.encode("ascii")) function_id_hash.update(self._function_source_hash) # Compute the function ID. function_id = function_id_hash.digest() return ray.FunctionID(function_id)
def _get_function_id(self): """Calculate the function id of current function descriptor. This function id is calculated from all the fields of function descriptor. Returns: ray.ObjectID to represent the function descriptor. """ if self.is_for_driver_task: return ray.FunctionID.nil() function_id_hash = hashlib.sha1() # Include the function module and name in the hash. function_id_hash.update(self.module_name.encode("ascii")) function_id_hash.update(self.function_name.encode("ascii")) function_id_hash.update(self.class_name.encode("ascii")) function_id_hash.update(self._function_source_hash) # Compute the function ID. function_id = function_id_hash.digest() return ray.FunctionID(function_id)
[ "Calculate", "the", "function", "id", "of", "current", "function", "descriptor", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L221-L240
[ "def", "_get_function_id", "(", "self", ")", ":", "if", "self", ".", "is_for_driver_task", ":", "return", "ray", ".", "FunctionID", ".", "nil", "(", ")", "function_id_hash", "=", "hashlib", ".", "sha1", "(", ")", "# Include the function module and name in the hash...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionDescriptor.get_function_descriptor_list
Return a list of bytes representing the function descriptor. This function is used to pass this function descriptor to backend. Returns: A list of bytes.
python/ray/function_manager.py
def get_function_descriptor_list(self): """Return a list of bytes representing the function descriptor. This function is used to pass this function descriptor to backend. Returns: A list of bytes. """ descriptor_list = [] if self.is_for_driver_task: # Driver task returns an empty list. return descriptor_list else: descriptor_list.append(self.module_name.encode("ascii")) descriptor_list.append(self.class_name.encode("ascii")) descriptor_list.append(self.function_name.encode("ascii")) if len(self._function_source_hash) != 0: descriptor_list.append(self._function_source_hash) return descriptor_list
def get_function_descriptor_list(self): """Return a list of bytes representing the function descriptor. This function is used to pass this function descriptor to backend. Returns: A list of bytes. """ descriptor_list = [] if self.is_for_driver_task: # Driver task returns an empty list. return descriptor_list else: descriptor_list.append(self.module_name.encode("ascii")) descriptor_list.append(self.class_name.encode("ascii")) descriptor_list.append(self.function_name.encode("ascii")) if len(self._function_source_hash) != 0: descriptor_list.append(self._function_source_hash) return descriptor_list
[ "Return", "a", "list", "of", "bytes", "representing", "the", "function", "descriptor", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L242-L260
[ "def", "get_function_descriptor_list", "(", "self", ")", ":", "descriptor_list", "=", "[", "]", "if", "self", ".", "is_for_driver_task", ":", "# Driver task returns an empty list.", "return", "descriptor_list", "else", ":", "descriptor_list", ".", "append", "(", "self...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionActorManager.export_cached
Export cached remote functions Note: this should be called only once when worker is connected.
python/ray/function_manager.py
def export_cached(self): """Export cached remote functions Note: this should be called only once when worker is connected. """ for remote_function in self._functions_to_export: self._do_export(remote_function) self._functions_to_export = None for info in self._actors_to_export: (key, actor_class_info) = info self._publish_actor_class_to_key(key, actor_class_info)
def export_cached(self): """Export cached remote functions Note: this should be called only once when worker is connected. """ for remote_function in self._functions_to_export: self._do_export(remote_function) self._functions_to_export = None for info in self._actors_to_export: (key, actor_class_info) = info self._publish_actor_class_to_key(key, actor_class_info)
[ "Export", "cached", "remote", "functions" ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L318-L328
[ "def", "export_cached", "(", "self", ")", ":", "for", "remote_function", "in", "self", ".", "_functions_to_export", ":", "self", ".", "_do_export", "(", "remote_function", ")", "self", ".", "_functions_to_export", "=", "None", "for", "info", "in", "self", ".",...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionActorManager.export
Export a remote function. Args: remote_function: the RemoteFunction object.
python/ray/function_manager.py
def export(self, remote_function): """Export a remote function. Args: remote_function: the RemoteFunction object. """ if self._worker.mode is None: # If the worker isn't connected, cache the function # and export it later. self._functions_to_export.append(remote_function) return if self._worker.mode != ray.worker.SCRIPT_MODE: # Don't need to export if the worker is not a driver. return self._do_export(remote_function)
def export(self, remote_function): """Export a remote function. Args: remote_function: the RemoteFunction object. """ if self._worker.mode is None: # If the worker isn't connected, cache the function # and export it later. self._functions_to_export.append(remote_function) return if self._worker.mode != ray.worker.SCRIPT_MODE: # Don't need to export if the worker is not a driver. return self._do_export(remote_function)
[ "Export", "a", "remote", "function", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L334-L348
[ "def", "export", "(", "self", ",", "remote_function", ")", ":", "if", "self", ".", "_worker", ".", "mode", "is", "None", ":", "# If the worker isn't connected, cache the function", "# and export it later.", "self", ".", "_functions_to_export", ".", "append", "(", "r...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionActorManager._do_export
Pickle a remote function and export it to redis. Args: remote_function: the RemoteFunction object.
python/ray/function_manager.py
def _do_export(self, remote_function): """Pickle a remote function and export it to redis. Args: remote_function: the RemoteFunction object. """ if self._worker.load_code_from_local: return # Work around limitations of Python pickling. function = remote_function._function function_name_global_valid = function.__name__ in function.__globals__ function_name_global_value = function.__globals__.get( function.__name__) # Allow the function to reference itself as a global variable if not is_cython(function): function.__globals__[function.__name__] = remote_function try: pickled_function = pickle.dumps(function) finally: # Undo our changes if function_name_global_valid: function.__globals__[function.__name__] = ( function_name_global_value) else: del function.__globals__[function.__name__] check_oversized_pickle(pickled_function, remote_function._function_name, "remote function", self._worker) key = (b"RemoteFunction:" + self._worker.task_driver_id.binary() + b":" + remote_function._function_descriptor.function_id.binary()) self._worker.redis_client.hmset( key, { "driver_id": self._worker.task_driver_id.binary(), "function_id": remote_function._function_descriptor. function_id.binary(), "name": remote_function._function_name, "module": function.__module__, "function": pickled_function, "max_calls": remote_function._max_calls }) self._worker.redis_client.rpush("Exports", key)
def _do_export(self, remote_function): """Pickle a remote function and export it to redis. Args: remote_function: the RemoteFunction object. """ if self._worker.load_code_from_local: return # Work around limitations of Python pickling. function = remote_function._function function_name_global_valid = function.__name__ in function.__globals__ function_name_global_value = function.__globals__.get( function.__name__) # Allow the function to reference itself as a global variable if not is_cython(function): function.__globals__[function.__name__] = remote_function try: pickled_function = pickle.dumps(function) finally: # Undo our changes if function_name_global_valid: function.__globals__[function.__name__] = ( function_name_global_value) else: del function.__globals__[function.__name__] check_oversized_pickle(pickled_function, remote_function._function_name, "remote function", self._worker) key = (b"RemoteFunction:" + self._worker.task_driver_id.binary() + b":" + remote_function._function_descriptor.function_id.binary()) self._worker.redis_client.hmset( key, { "driver_id": self._worker.task_driver_id.binary(), "function_id": remote_function._function_descriptor. function_id.binary(), "name": remote_function._function_name, "module": function.__module__, "function": pickled_function, "max_calls": remote_function._max_calls }) self._worker.redis_client.rpush("Exports", key)
[ "Pickle", "a", "remote", "function", "and", "export", "it", "to", "redis", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L350-L391
[ "def", "_do_export", "(", "self", ",", "remote_function", ")", ":", "if", "self", ".", "_worker", ".", "load_code_from_local", ":", "return", "# Work around limitations of Python pickling.", "function", "=", "remote_function", ".", "_function", "function_name_global_valid...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionActorManager.fetch_and_register_remote_function
Import a remote function.
python/ray/function_manager.py
def fetch_and_register_remote_function(self, key): """Import a remote function.""" (driver_id_str, function_id_str, function_name, serialized_function, num_return_vals, module, resources, max_calls) = self._worker.redis_client.hmget(key, [ "driver_id", "function_id", "name", "function", "num_return_vals", "module", "resources", "max_calls" ]) function_id = ray.FunctionID(function_id_str) driver_id = ray.DriverID(driver_id_str) function_name = decode(function_name) max_calls = int(max_calls) module = decode(module) # This is a placeholder in case the function can't be unpickled. This # will be overwritten if the function is successfully registered. def f(): raise Exception("This function was not imported properly.") # This function is called by ImportThread. This operation needs to be # atomic. Otherwise, there is race condition. Another thread may use # the temporary function above before the real function is ready. with self.lock: self._function_execution_info[driver_id][function_id] = ( FunctionExecutionInfo( function=f, function_name=function_name, max_calls=max_calls)) self._num_task_executions[driver_id][function_id] = 0 try: function = pickle.loads(serialized_function) except Exception: # If an exception was thrown when the remote function was # imported, we record the traceback and notify the scheduler # of the failure. traceback_str = format_error_message(traceback.format_exc()) # Log the error message. push_error_to_driver( self._worker, ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR, "Failed to unpickle the remote function '{}' with " "function ID {}. Traceback:\n{}".format( function_name, function_id.hex(), traceback_str), driver_id=driver_id) else: # The below line is necessary. Because in the driver process, # if the function is defined in the file where the python # script was started from, its module is `__main__`. # However in the worker process, the `__main__` module is a # different module, which is `default_worker.py` function.__module__ = module self._function_execution_info[driver_id][function_id] = ( FunctionExecutionInfo( function=function, function_name=function_name, max_calls=max_calls)) # Add the function to the function table. self._worker.redis_client.rpush( b"FunctionTable:" + function_id.binary(), self._worker.worker_id)
def fetch_and_register_remote_function(self, key): """Import a remote function.""" (driver_id_str, function_id_str, function_name, serialized_function, num_return_vals, module, resources, max_calls) = self._worker.redis_client.hmget(key, [ "driver_id", "function_id", "name", "function", "num_return_vals", "module", "resources", "max_calls" ]) function_id = ray.FunctionID(function_id_str) driver_id = ray.DriverID(driver_id_str) function_name = decode(function_name) max_calls = int(max_calls) module = decode(module) # This is a placeholder in case the function can't be unpickled. This # will be overwritten if the function is successfully registered. def f(): raise Exception("This function was not imported properly.") # This function is called by ImportThread. This operation needs to be # atomic. Otherwise, there is race condition. Another thread may use # the temporary function above before the real function is ready. with self.lock: self._function_execution_info[driver_id][function_id] = ( FunctionExecutionInfo( function=f, function_name=function_name, max_calls=max_calls)) self._num_task_executions[driver_id][function_id] = 0 try: function = pickle.loads(serialized_function) except Exception: # If an exception was thrown when the remote function was # imported, we record the traceback and notify the scheduler # of the failure. traceback_str = format_error_message(traceback.format_exc()) # Log the error message. push_error_to_driver( self._worker, ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR, "Failed to unpickle the remote function '{}' with " "function ID {}. Traceback:\n{}".format( function_name, function_id.hex(), traceback_str), driver_id=driver_id) else: # The below line is necessary. Because in the driver process, # if the function is defined in the file where the python # script was started from, its module is `__main__`. # However in the worker process, the `__main__` module is a # different module, which is `default_worker.py` function.__module__ = module self._function_execution_info[driver_id][function_id] = ( FunctionExecutionInfo( function=function, function_name=function_name, max_calls=max_calls)) # Add the function to the function table. self._worker.redis_client.rpush( b"FunctionTable:" + function_id.binary(), self._worker.worker_id)
[ "Import", "a", "remote", "function", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L393-L453
[ "def", "fetch_and_register_remote_function", "(", "self", ",", "key", ")", ":", "(", "driver_id_str", ",", "function_id_str", ",", "function_name", ",", "serialized_function", ",", "num_return_vals", ",", "module", ",", "resources", ",", "max_calls", ")", "=", "se...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionActorManager.get_execution_info
Get the FunctionExecutionInfo of a remote function. Args: driver_id: ID of the driver that the function belongs to. function_descriptor: The FunctionDescriptor of the function to get. Returns: A FunctionExecutionInfo object.
python/ray/function_manager.py
def get_execution_info(self, driver_id, function_descriptor): """Get the FunctionExecutionInfo of a remote function. Args: driver_id: ID of the driver that the function belongs to. function_descriptor: The FunctionDescriptor of the function to get. Returns: A FunctionExecutionInfo object. """ if self._worker.load_code_from_local: # Load function from local code. # Currently, we don't support isolating code by drivers, # thus always set driver ID to NIL here. driver_id = ray.DriverID.nil() if not function_descriptor.is_actor_method(): self._load_function_from_local(driver_id, function_descriptor) else: # Load function from GCS. # Wait until the function to be executed has actually been # registered on this worker. We will push warnings to the user if # we spend too long in this loop. # The driver function may not be found in sys.path. Try to load # the function from GCS. with profiling.profile("wait_for_function"): self._wait_for_function(function_descriptor, driver_id) try: function_id = function_descriptor.function_id info = self._function_execution_info[driver_id][function_id] except KeyError as e: message = ("Error occurs in get_execution_info: " "driver_id: %s, function_descriptor: %s. Message: %s" % (driver_id, function_descriptor, e)) raise KeyError(message) return info
def get_execution_info(self, driver_id, function_descriptor): """Get the FunctionExecutionInfo of a remote function. Args: driver_id: ID of the driver that the function belongs to. function_descriptor: The FunctionDescriptor of the function to get. Returns: A FunctionExecutionInfo object. """ if self._worker.load_code_from_local: # Load function from local code. # Currently, we don't support isolating code by drivers, # thus always set driver ID to NIL here. driver_id = ray.DriverID.nil() if not function_descriptor.is_actor_method(): self._load_function_from_local(driver_id, function_descriptor) else: # Load function from GCS. # Wait until the function to be executed has actually been # registered on this worker. We will push warnings to the user if # we spend too long in this loop. # The driver function may not be found in sys.path. Try to load # the function from GCS. with profiling.profile("wait_for_function"): self._wait_for_function(function_descriptor, driver_id) try: function_id = function_descriptor.function_id info = self._function_execution_info[driver_id][function_id] except KeyError as e: message = ("Error occurs in get_execution_info: " "driver_id: %s, function_descriptor: %s. Message: %s" % (driver_id, function_descriptor, e)) raise KeyError(message) return info
[ "Get", "the", "FunctionExecutionInfo", "of", "a", "remote", "function", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L455-L489
[ "def", "get_execution_info", "(", "self", ",", "driver_id", ",", "function_descriptor", ")", ":", "if", "self", ".", "_worker", ".", "load_code_from_local", ":", "# Load function from local code.", "# Currently, we don't support isolating code by drivers,", "# thus always set d...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionActorManager._wait_for_function
Wait until the function to be executed is present on this worker. This method will simply loop until the import thread has imported the relevant function. If we spend too long in this loop, that may indicate a problem somewhere and we will push an error message to the user. If this worker is an actor, then this will wait until the actor has been defined. Args: function_descriptor : The FunctionDescriptor of the function that we want to execute. driver_id (str): The ID of the driver to push the error message to if this times out.
python/ray/function_manager.py
def _wait_for_function(self, function_descriptor, driver_id, timeout=10): """Wait until the function to be executed is present on this worker. This method will simply loop until the import thread has imported the relevant function. If we spend too long in this loop, that may indicate a problem somewhere and we will push an error message to the user. If this worker is an actor, then this will wait until the actor has been defined. Args: function_descriptor : The FunctionDescriptor of the function that we want to execute. driver_id (str): The ID of the driver to push the error message to if this times out. """ start_time = time.time() # Only send the warning once. warning_sent = False while True: with self.lock: if (self._worker.actor_id.is_nil() and (function_descriptor.function_id in self._function_execution_info[driver_id])): break elif not self._worker.actor_id.is_nil() and ( self._worker.actor_id in self._worker.actors): break if time.time() - start_time > timeout: warning_message = ("This worker was asked to execute a " "function that it does not have " "registered. You may have to restart " "Ray.") if not warning_sent: ray.utils.push_error_to_driver( self._worker, ray_constants.WAIT_FOR_FUNCTION_PUSH_ERROR, warning_message, driver_id=driver_id) warning_sent = True time.sleep(0.001)
def _wait_for_function(self, function_descriptor, driver_id, timeout=10): """Wait until the function to be executed is present on this worker. This method will simply loop until the import thread has imported the relevant function. If we spend too long in this loop, that may indicate a problem somewhere and we will push an error message to the user. If this worker is an actor, then this will wait until the actor has been defined. Args: function_descriptor : The FunctionDescriptor of the function that we want to execute. driver_id (str): The ID of the driver to push the error message to if this times out. """ start_time = time.time() # Only send the warning once. warning_sent = False while True: with self.lock: if (self._worker.actor_id.is_nil() and (function_descriptor.function_id in self._function_execution_info[driver_id])): break elif not self._worker.actor_id.is_nil() and ( self._worker.actor_id in self._worker.actors): break if time.time() - start_time > timeout: warning_message = ("This worker was asked to execute a " "function that it does not have " "registered. You may have to restart " "Ray.") if not warning_sent: ray.utils.push_error_to_driver( self._worker, ray_constants.WAIT_FOR_FUNCTION_PUSH_ERROR, warning_message, driver_id=driver_id) warning_sent = True time.sleep(0.001)
[ "Wait", "until", "the", "function", "to", "be", "executed", "is", "present", "on", "this", "worker", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L518-L558
[ "def", "_wait_for_function", "(", "self", ",", "function_descriptor", ",", "driver_id", ",", "timeout", "=", "10", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "# Only send the warning once.", "warning_sent", "=", "False", "while", "True", ":", ...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionActorManager._publish_actor_class_to_key
Push an actor class definition to Redis. The is factored out as a separate function because it is also called on cached actor class definitions when a worker connects for the first time. Args: key: The key to store the actor class info at. actor_class_info: Information about the actor class.
python/ray/function_manager.py
def _publish_actor_class_to_key(self, key, actor_class_info): """Push an actor class definition to Redis. The is factored out as a separate function because it is also called on cached actor class definitions when a worker connects for the first time. Args: key: The key to store the actor class info at. actor_class_info: Information about the actor class. """ # We set the driver ID here because it may not have been available when # the actor class was defined. self._worker.redis_client.hmset(key, actor_class_info) self._worker.redis_client.rpush("Exports", key)
def _publish_actor_class_to_key(self, key, actor_class_info): """Push an actor class definition to Redis. The is factored out as a separate function because it is also called on cached actor class definitions when a worker connects for the first time. Args: key: The key to store the actor class info at. actor_class_info: Information about the actor class. """ # We set the driver ID here because it may not have been available when # the actor class was defined. self._worker.redis_client.hmset(key, actor_class_info) self._worker.redis_client.rpush("Exports", key)
[ "Push", "an", "actor", "class", "definition", "to", "Redis", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L560-L574
[ "def", "_publish_actor_class_to_key", "(", "self", ",", "key", ",", "actor_class_info", ")", ":", "# We set the driver ID here because it may not have been available when", "# the actor class was defined.", "self", ".", "_worker", ".", "redis_client", ".", "hmset", "(", "key"...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionActorManager.load_actor_class
Load the actor class. Args: driver_id: Driver ID of the actor. function_descriptor: Function descriptor of the actor constructor. Returns: The actor class.
python/ray/function_manager.py
def load_actor_class(self, driver_id, function_descriptor): """Load the actor class. Args: driver_id: Driver ID of the actor. function_descriptor: Function descriptor of the actor constructor. Returns: The actor class. """ function_id = function_descriptor.function_id # Check if the actor class already exists in the cache. actor_class = self._loaded_actor_classes.get(function_id, None) if actor_class is None: # Load actor class. if self._worker.load_code_from_local: driver_id = ray.DriverID.nil() # Load actor class from local code. actor_class = self._load_actor_from_local( driver_id, function_descriptor) else: # Load actor class from GCS. actor_class = self._load_actor_class_from_gcs( driver_id, function_descriptor) # Save the loaded actor class in cache. self._loaded_actor_classes[function_id] = actor_class # Generate execution info for the methods of this actor class. module_name = function_descriptor.module_name actor_class_name = function_descriptor.class_name actor_methods = inspect.getmembers( actor_class, predicate=is_function_or_method) for actor_method_name, actor_method in actor_methods: method_descriptor = FunctionDescriptor( module_name, actor_method_name, actor_class_name) method_id = method_descriptor.function_id executor = self._make_actor_method_executor( actor_method_name, actor_method, actor_imported=True, ) self._function_execution_info[driver_id][method_id] = ( FunctionExecutionInfo( function=executor, function_name=actor_method_name, max_calls=0, )) self._num_task_executions[driver_id][method_id] = 0 self._num_task_executions[driver_id][function_id] = 0 return actor_class
def load_actor_class(self, driver_id, function_descriptor): """Load the actor class. Args: driver_id: Driver ID of the actor. function_descriptor: Function descriptor of the actor constructor. Returns: The actor class. """ function_id = function_descriptor.function_id # Check if the actor class already exists in the cache. actor_class = self._loaded_actor_classes.get(function_id, None) if actor_class is None: # Load actor class. if self._worker.load_code_from_local: driver_id = ray.DriverID.nil() # Load actor class from local code. actor_class = self._load_actor_from_local( driver_id, function_descriptor) else: # Load actor class from GCS. actor_class = self._load_actor_class_from_gcs( driver_id, function_descriptor) # Save the loaded actor class in cache. self._loaded_actor_classes[function_id] = actor_class # Generate execution info for the methods of this actor class. module_name = function_descriptor.module_name actor_class_name = function_descriptor.class_name actor_methods = inspect.getmembers( actor_class, predicate=is_function_or_method) for actor_method_name, actor_method in actor_methods: method_descriptor = FunctionDescriptor( module_name, actor_method_name, actor_class_name) method_id = method_descriptor.function_id executor = self._make_actor_method_executor( actor_method_name, actor_method, actor_imported=True, ) self._function_execution_info[driver_id][method_id] = ( FunctionExecutionInfo( function=executor, function_name=actor_method_name, max_calls=0, )) self._num_task_executions[driver_id][method_id] = 0 self._num_task_executions[driver_id][function_id] = 0 return actor_class
[ "Load", "the", "actor", "class", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L619-L668
[ "def", "load_actor_class", "(", "self", ",", "driver_id", ",", "function_descriptor", ")", ":", "function_id", "=", "function_descriptor", ".", "function_id", "# Check if the actor class already exists in the cache.", "actor_class", "=", "self", ".", "_loaded_actor_classes", ...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionActorManager._load_actor_from_local
Load actor class from local code.
python/ray/function_manager.py
def _load_actor_from_local(self, driver_id, function_descriptor): """Load actor class from local code.""" module_name, class_name = (function_descriptor.module_name, function_descriptor.class_name) try: module = importlib.import_module(module_name) actor_class = getattr(module, class_name) if isinstance(actor_class, ray.actor.ActorClass): return actor_class._modified_class else: return actor_class except Exception: logger.exception( "Failed to load actor_class %s.".format(class_name)) raise Exception( "Actor {} failed to be imported from local code.".format( class_name))
def _load_actor_from_local(self, driver_id, function_descriptor): """Load actor class from local code.""" module_name, class_name = (function_descriptor.module_name, function_descriptor.class_name) try: module = importlib.import_module(module_name) actor_class = getattr(module, class_name) if isinstance(actor_class, ray.actor.ActorClass): return actor_class._modified_class else: return actor_class except Exception: logger.exception( "Failed to load actor_class %s.".format(class_name)) raise Exception( "Actor {} failed to be imported from local code.".format( class_name))
[ "Load", "actor", "class", "from", "local", "code", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L670-L686
[ "def", "_load_actor_from_local", "(", "self", ",", "driver_id", ",", "function_descriptor", ")", ":", "module_name", ",", "class_name", "=", "(", "function_descriptor", ".", "module_name", ",", "function_descriptor", ".", "class_name", ")", "try", ":", "module", "...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionActorManager._load_actor_class_from_gcs
Load actor class from GCS.
python/ray/function_manager.py
def _load_actor_class_from_gcs(self, driver_id, function_descriptor): """Load actor class from GCS.""" key = (b"ActorClass:" + driver_id.binary() + b":" + function_descriptor.function_id.binary()) # Wait for the actor class key to have been imported by the # import thread. TODO(rkn): It shouldn't be possible to end # up in an infinite loop here, but we should push an error to # the driver if too much time is spent here. while key not in self.imported_actor_classes: time.sleep(0.001) # Fetch raw data from GCS. (driver_id_str, class_name, module, pickled_class, actor_method_names) = self._worker.redis_client.hmget( key, [ "driver_id", "class_name", "module", "class", "actor_method_names" ]) class_name = ensure_str(class_name) module_name = ensure_str(module) driver_id = ray.DriverID(driver_id_str) actor_method_names = json.loads(ensure_str(actor_method_names)) actor_class = None try: with self.lock: actor_class = pickle.loads(pickled_class) except Exception: logger.exception( "Failed to load actor class %s.".format(class_name)) # The actor class failed to be unpickled, create a fake actor # class instead (just to produce error messages and to prevent # the driver from hanging). actor_class = self._create_fake_actor_class( class_name, actor_method_names) # If an exception was thrown when the actor was imported, we record # the traceback and notify the scheduler of the failure. traceback_str = ray.utils.format_error_message( traceback.format_exc()) # Log the error message. push_error_to_driver( self._worker, ray_constants.REGISTER_ACTOR_PUSH_ERROR, "Failed to unpickle actor class '{}' for actor ID {}. " "Traceback:\n{}".format(class_name, self._worker.actor_id.hex(), traceback_str), driver_id) # TODO(rkn): In the future, it might make sense to have the worker # exit here. However, currently that would lead to hanging if # someone calls ray.get on a method invoked on the actor. # The below line is necessary. Because in the driver process, # if the function is defined in the file where the python script # was started from, its module is `__main__`. # However in the worker process, the `__main__` module is a # different module, which is `default_worker.py` actor_class.__module__ = module_name return actor_class
def _load_actor_class_from_gcs(self, driver_id, function_descriptor): """Load actor class from GCS.""" key = (b"ActorClass:" + driver_id.binary() + b":" + function_descriptor.function_id.binary()) # Wait for the actor class key to have been imported by the # import thread. TODO(rkn): It shouldn't be possible to end # up in an infinite loop here, but we should push an error to # the driver if too much time is spent here. while key not in self.imported_actor_classes: time.sleep(0.001) # Fetch raw data from GCS. (driver_id_str, class_name, module, pickled_class, actor_method_names) = self._worker.redis_client.hmget( key, [ "driver_id", "class_name", "module", "class", "actor_method_names" ]) class_name = ensure_str(class_name) module_name = ensure_str(module) driver_id = ray.DriverID(driver_id_str) actor_method_names = json.loads(ensure_str(actor_method_names)) actor_class = None try: with self.lock: actor_class = pickle.loads(pickled_class) except Exception: logger.exception( "Failed to load actor class %s.".format(class_name)) # The actor class failed to be unpickled, create a fake actor # class instead (just to produce error messages and to prevent # the driver from hanging). actor_class = self._create_fake_actor_class( class_name, actor_method_names) # If an exception was thrown when the actor was imported, we record # the traceback and notify the scheduler of the failure. traceback_str = ray.utils.format_error_message( traceback.format_exc()) # Log the error message. push_error_to_driver( self._worker, ray_constants.REGISTER_ACTOR_PUSH_ERROR, "Failed to unpickle actor class '{}' for actor ID {}. " "Traceback:\n{}".format(class_name, self._worker.actor_id.hex(), traceback_str), driver_id) # TODO(rkn): In the future, it might make sense to have the worker # exit here. However, currently that would lead to hanging if # someone calls ray.get on a method invoked on the actor. # The below line is necessary. Because in the driver process, # if the function is defined in the file where the python script # was started from, its module is `__main__`. # However in the worker process, the `__main__` module is a # different module, which is `default_worker.py` actor_class.__module__ = module_name return actor_class
[ "Load", "actor", "class", "from", "GCS", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L702-L759
[ "def", "_load_actor_class_from_gcs", "(", "self", ",", "driver_id", ",", "function_descriptor", ")", ":", "key", "=", "(", "b\"ActorClass:\"", "+", "driver_id", ".", "binary", "(", ")", "+", "b\":\"", "+", "function_descriptor", ".", "function_id", ".", "binary"...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionActorManager._make_actor_method_executor
Make an executor that wraps a user-defined actor method. The wrapped method updates the worker's internal state and performs any necessary checkpointing operations. Args: method_name (str): The name of the actor method. method (instancemethod): The actor method to wrap. This should be a method defined on the actor class and should therefore take an instance of the actor as the first argument. actor_imported (bool): Whether the actor has been imported. Checkpointing operations will not be run if this is set to False. Returns: A function that executes the given actor method on the worker's stored instance of the actor. The function also updates the worker's internal state to record the executed method.
python/ray/function_manager.py
def _make_actor_method_executor(self, method_name, method, actor_imported): """Make an executor that wraps a user-defined actor method. The wrapped method updates the worker's internal state and performs any necessary checkpointing operations. Args: method_name (str): The name of the actor method. method (instancemethod): The actor method to wrap. This should be a method defined on the actor class and should therefore take an instance of the actor as the first argument. actor_imported (bool): Whether the actor has been imported. Checkpointing operations will not be run if this is set to False. Returns: A function that executes the given actor method on the worker's stored instance of the actor. The function also updates the worker's internal state to record the executed method. """ def actor_method_executor(dummy_return_id, actor, *args): # Update the actor's task counter to reflect the task we're about # to execute. self._worker.actor_task_counter += 1 # Execute the assigned method and save a checkpoint if necessary. try: if is_class_method(method): method_returns = method(*args) else: method_returns = method(actor, *args) except Exception as e: # Save the checkpoint before allowing the method exception # to be thrown, but don't save the checkpoint for actor # creation task. if (isinstance(actor, ray.actor.Checkpointable) and self._worker.actor_task_counter != 1): self._save_and_log_checkpoint(actor) raise e else: # Handle any checkpointing operations before storing the # method's return values. # NOTE(swang): If method_returns is a pointer to the actor's # state and the checkpointing operations can modify the return # values if they mutate the actor's state. Is this okay? if isinstance(actor, ray.actor.Checkpointable): # If this is the first task to execute on the actor, try to # resume from a checkpoint. if self._worker.actor_task_counter == 1: if actor_imported: self._restore_and_log_checkpoint(actor) else: # Save the checkpoint before returning the method's # return values. self._save_and_log_checkpoint(actor) return method_returns return actor_method_executor
def _make_actor_method_executor(self, method_name, method, actor_imported): """Make an executor that wraps a user-defined actor method. The wrapped method updates the worker's internal state and performs any necessary checkpointing operations. Args: method_name (str): The name of the actor method. method (instancemethod): The actor method to wrap. This should be a method defined on the actor class and should therefore take an instance of the actor as the first argument. actor_imported (bool): Whether the actor has been imported. Checkpointing operations will not be run if this is set to False. Returns: A function that executes the given actor method on the worker's stored instance of the actor. The function also updates the worker's internal state to record the executed method. """ def actor_method_executor(dummy_return_id, actor, *args): # Update the actor's task counter to reflect the task we're about # to execute. self._worker.actor_task_counter += 1 # Execute the assigned method and save a checkpoint if necessary. try: if is_class_method(method): method_returns = method(*args) else: method_returns = method(actor, *args) except Exception as e: # Save the checkpoint before allowing the method exception # to be thrown, but don't save the checkpoint for actor # creation task. if (isinstance(actor, ray.actor.Checkpointable) and self._worker.actor_task_counter != 1): self._save_and_log_checkpoint(actor) raise e else: # Handle any checkpointing operations before storing the # method's return values. # NOTE(swang): If method_returns is a pointer to the actor's # state and the checkpointing operations can modify the return # values if they mutate the actor's state. Is this okay? if isinstance(actor, ray.actor.Checkpointable): # If this is the first task to execute on the actor, try to # resume from a checkpoint. if self._worker.actor_task_counter == 1: if actor_imported: self._restore_and_log_checkpoint(actor) else: # Save the checkpoint before returning the method's # return values. self._save_and_log_checkpoint(actor) return method_returns return actor_method_executor
[ "Make", "an", "executor", "that", "wraps", "a", "user", "-", "defined", "actor", "method", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L761-L819
[ "def", "_make_actor_method_executor", "(", "self", ",", "method_name", ",", "method", ",", "actor_imported", ")", ":", "def", "actor_method_executor", "(", "dummy_return_id", ",", "actor", ",", "*", "args", ")", ":", "# Update the actor's task counter to reflect the tas...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionActorManager._save_and_log_checkpoint
Save an actor checkpoint if necessary and log any errors. Args: actor: The actor to checkpoint. Returns: The result of the actor's user-defined `save_checkpoint` method.
python/ray/function_manager.py
def _save_and_log_checkpoint(self, actor): """Save an actor checkpoint if necessary and log any errors. Args: actor: The actor to checkpoint. Returns: The result of the actor's user-defined `save_checkpoint` method. """ actor_id = self._worker.actor_id checkpoint_info = self._worker.actor_checkpoint_info[actor_id] checkpoint_info.num_tasks_since_last_checkpoint += 1 now = int(1000 * time.time()) checkpoint_context = ray.actor.CheckpointContext( actor_id, checkpoint_info.num_tasks_since_last_checkpoint, now - checkpoint_info.last_checkpoint_timestamp) # If we should take a checkpoint, notify raylet to prepare a checkpoint # and then call `save_checkpoint`. if actor.should_checkpoint(checkpoint_context): try: now = int(1000 * time.time()) checkpoint_id = (self._worker.raylet_client. prepare_actor_checkpoint(actor_id)) checkpoint_info.checkpoint_ids.append(checkpoint_id) actor.save_checkpoint(actor_id, checkpoint_id) if (len(checkpoint_info.checkpoint_ids) > ray._config.num_actor_checkpoints_to_keep()): actor.checkpoint_expired( actor_id, checkpoint_info.checkpoint_ids.pop(0), ) checkpoint_info.num_tasks_since_last_checkpoint = 0 checkpoint_info.last_checkpoint_timestamp = now except Exception: # Checkpoint save or reload failed. Notify the driver. traceback_str = ray.utils.format_error_message( traceback.format_exc()) ray.utils.push_error_to_driver( self._worker, ray_constants.CHECKPOINT_PUSH_ERROR, traceback_str, driver_id=self._worker.task_driver_id)
def _save_and_log_checkpoint(self, actor): """Save an actor checkpoint if necessary and log any errors. Args: actor: The actor to checkpoint. Returns: The result of the actor's user-defined `save_checkpoint` method. """ actor_id = self._worker.actor_id checkpoint_info = self._worker.actor_checkpoint_info[actor_id] checkpoint_info.num_tasks_since_last_checkpoint += 1 now = int(1000 * time.time()) checkpoint_context = ray.actor.CheckpointContext( actor_id, checkpoint_info.num_tasks_since_last_checkpoint, now - checkpoint_info.last_checkpoint_timestamp) # If we should take a checkpoint, notify raylet to prepare a checkpoint # and then call `save_checkpoint`. if actor.should_checkpoint(checkpoint_context): try: now = int(1000 * time.time()) checkpoint_id = (self._worker.raylet_client. prepare_actor_checkpoint(actor_id)) checkpoint_info.checkpoint_ids.append(checkpoint_id) actor.save_checkpoint(actor_id, checkpoint_id) if (len(checkpoint_info.checkpoint_ids) > ray._config.num_actor_checkpoints_to_keep()): actor.checkpoint_expired( actor_id, checkpoint_info.checkpoint_ids.pop(0), ) checkpoint_info.num_tasks_since_last_checkpoint = 0 checkpoint_info.last_checkpoint_timestamp = now except Exception: # Checkpoint save or reload failed. Notify the driver. traceback_str = ray.utils.format_error_message( traceback.format_exc()) ray.utils.push_error_to_driver( self._worker, ray_constants.CHECKPOINT_PUSH_ERROR, traceback_str, driver_id=self._worker.task_driver_id)
[ "Save", "an", "actor", "checkpoint", "if", "necessary", "and", "log", "any", "errors", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L821-L862
[ "def", "_save_and_log_checkpoint", "(", "self", ",", "actor", ")", ":", "actor_id", "=", "self", ".", "_worker", ".", "actor_id", "checkpoint_info", "=", "self", ".", "_worker", ".", "actor_checkpoint_info", "[", "actor_id", "]", "checkpoint_info", ".", "num_tas...
4eade036a0505e244c976f36aaa2d64386b5129b
train
FunctionActorManager._restore_and_log_checkpoint
Restore an actor from a checkpoint if available and log any errors. This should only be called on workers that have just executed an actor creation task. Args: actor: The actor to restore from a checkpoint.
python/ray/function_manager.py
def _restore_and_log_checkpoint(self, actor): """Restore an actor from a checkpoint if available and log any errors. This should only be called on workers that have just executed an actor creation task. Args: actor: The actor to restore from a checkpoint. """ actor_id = self._worker.actor_id try: checkpoints = ray.actor.get_checkpoints_for_actor(actor_id) if len(checkpoints) > 0: # If we found previously saved checkpoints for this actor, # call the `load_checkpoint` callback. checkpoint_id = actor.load_checkpoint(actor_id, checkpoints) if checkpoint_id is not None: # Check that the returned checkpoint id is in the # `available_checkpoints` list. msg = ( "`load_checkpoint` must return a checkpoint id that " + "exists in the `available_checkpoints` list, or eone.") assert any(checkpoint_id == checkpoint.checkpoint_id for checkpoint in checkpoints), msg # Notify raylet that this actor has been resumed from # a checkpoint. (self._worker.raylet_client. notify_actor_resumed_from_checkpoint( actor_id, checkpoint_id)) except Exception: # Checkpoint save or reload failed. Notify the driver. traceback_str = ray.utils.format_error_message( traceback.format_exc()) ray.utils.push_error_to_driver( self._worker, ray_constants.CHECKPOINT_PUSH_ERROR, traceback_str, driver_id=self._worker.task_driver_id)
def _restore_and_log_checkpoint(self, actor): """Restore an actor from a checkpoint if available and log any errors. This should only be called on workers that have just executed an actor creation task. Args: actor: The actor to restore from a checkpoint. """ actor_id = self._worker.actor_id try: checkpoints = ray.actor.get_checkpoints_for_actor(actor_id) if len(checkpoints) > 0: # If we found previously saved checkpoints for this actor, # call the `load_checkpoint` callback. checkpoint_id = actor.load_checkpoint(actor_id, checkpoints) if checkpoint_id is not None: # Check that the returned checkpoint id is in the # `available_checkpoints` list. msg = ( "`load_checkpoint` must return a checkpoint id that " + "exists in the `available_checkpoints` list, or eone.") assert any(checkpoint_id == checkpoint.checkpoint_id for checkpoint in checkpoints), msg # Notify raylet that this actor has been resumed from # a checkpoint. (self._worker.raylet_client. notify_actor_resumed_from_checkpoint( actor_id, checkpoint_id)) except Exception: # Checkpoint save or reload failed. Notify the driver. traceback_str = ray.utils.format_error_message( traceback.format_exc()) ray.utils.push_error_to_driver( self._worker, ray_constants.CHECKPOINT_PUSH_ERROR, traceback_str, driver_id=self._worker.task_driver_id)
[ "Restore", "an", "actor", "from", "a", "checkpoint", "if", "available", "and", "log", "any", "errors", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L864-L901
[ "def", "_restore_and_log_checkpoint", "(", "self", ",", "actor", ")", ":", "actor_id", "=", "self", ".", "_worker", ".", "actor_id", "try", ":", "checkpoints", "=", "ray", ".", "actor", ".", "get_checkpoints_for_actor", "(", "actor_id", ")", "if", "len", "("...
4eade036a0505e244c976f36aaa2d64386b5129b
train
_env_runner
This implements the common experience collection logic. Args: base_env (BaseEnv): env implementing BaseEnv. extra_batch_callback (fn): function to send extra batch data to. policies (dict): Map of policy ids to PolicyGraph instances. policy_mapping_fn (func): Function that maps agent ids to policy ids. This is called when an agent first enters the environment. The agent is then "bound" to the returned policy for the episode. unroll_length (int): Number of episode steps before `SampleBatch` is yielded. Set to infinity to yield complete episodes. horizon (int): Horizon of the episode. preprocessors (dict): Map of policy id to preprocessor for the observations prior to filtering. obs_filters (dict): Map of policy id to filter used to process observations for the policy. clip_rewards (bool): Whether to clip rewards before postprocessing. pack (bool): Whether to pack multiple episodes into each batch. This guarantees batches will be exactly `unroll_length` in size. clip_actions (bool): Whether to clip actions to the space range. callbacks (dict): User callbacks to run on episode events. tf_sess (Session|None): Optional tensorflow session to use for batching TF policy evaluations. perf_stats (PerfStats): Record perf stats into this object. soft_horizon (bool): Calculate rewards but don't reset the environment when the horizon is hit. Yields: rollout (SampleBatch): Object containing state, action, reward, terminal condition, and other fields as dictated by `policy`.
python/ray/rllib/evaluation/sampler.py
def _env_runner(base_env, extra_batch_callback, policies, policy_mapping_fn, unroll_length, horizon, preprocessors, obs_filters, clip_rewards, clip_actions, pack, callbacks, tf_sess, perf_stats, soft_horizon): """This implements the common experience collection logic. Args: base_env (BaseEnv): env implementing BaseEnv. extra_batch_callback (fn): function to send extra batch data to. policies (dict): Map of policy ids to PolicyGraph instances. policy_mapping_fn (func): Function that maps agent ids to policy ids. This is called when an agent first enters the environment. The agent is then "bound" to the returned policy for the episode. unroll_length (int): Number of episode steps before `SampleBatch` is yielded. Set to infinity to yield complete episodes. horizon (int): Horizon of the episode. preprocessors (dict): Map of policy id to preprocessor for the observations prior to filtering. obs_filters (dict): Map of policy id to filter used to process observations for the policy. clip_rewards (bool): Whether to clip rewards before postprocessing. pack (bool): Whether to pack multiple episodes into each batch. This guarantees batches will be exactly `unroll_length` in size. clip_actions (bool): Whether to clip actions to the space range. callbacks (dict): User callbacks to run on episode events. tf_sess (Session|None): Optional tensorflow session to use for batching TF policy evaluations. perf_stats (PerfStats): Record perf stats into this object. soft_horizon (bool): Calculate rewards but don't reset the environment when the horizon is hit. Yields: rollout (SampleBatch): Object containing state, action, reward, terminal condition, and other fields as dictated by `policy`. """ try: if not horizon: horizon = (base_env.get_unwrapped()[0].spec.max_episode_steps) except Exception: logger.debug("no episode horizon specified, assuming inf") if not horizon: horizon = float("inf") # Pool of batch builders, which can be shared across episodes to pack # trajectory data. batch_builder_pool = [] def get_batch_builder(): if batch_builder_pool: return batch_builder_pool.pop() else: return MultiAgentSampleBatchBuilder( policies, clip_rewards, callbacks.get("on_postprocess_traj")) def new_episode(): episode = MultiAgentEpisode(policies, policy_mapping_fn, get_batch_builder, extra_batch_callback) if callbacks.get("on_episode_start"): callbacks["on_episode_start"]({ "env": base_env, "policy": policies, "episode": episode, }) return episode active_episodes = defaultdict(new_episode) while True: perf_stats.iters += 1 t0 = time.time() # Get observations from all ready agents unfiltered_obs, rewards, dones, infos, off_policy_actions = \ base_env.poll() perf_stats.env_wait_time += time.time() - t0 if log_once("env_returns"): logger.info("Raw obs from env: {}".format( summarize(unfiltered_obs))) logger.info("Info return from env: {}".format(summarize(infos))) # Process observations and prepare for policy evaluation t1 = time.time() active_envs, to_eval, outputs = _process_observations( base_env, policies, batch_builder_pool, active_episodes, unfiltered_obs, rewards, dones, infos, off_policy_actions, horizon, preprocessors, obs_filters, unroll_length, pack, callbacks, soft_horizon) perf_stats.processing_time += time.time() - t1 for o in outputs: yield o # Do batched policy eval t2 = time.time() eval_results = _do_policy_eval(tf_sess, to_eval, policies, active_episodes) perf_stats.inference_time += time.time() - t2 # Process results and update episode state t3 = time.time() actions_to_send = _process_policy_eval_results( to_eval, eval_results, active_episodes, active_envs, off_policy_actions, policies, clip_actions) perf_stats.processing_time += time.time() - t3 # Return computed actions to ready envs. We also send to envs that have # taken off-policy actions; those envs are free to ignore the action. t4 = time.time() base_env.send_actions(actions_to_send) perf_stats.env_wait_time += time.time() - t4
def _env_runner(base_env, extra_batch_callback, policies, policy_mapping_fn, unroll_length, horizon, preprocessors, obs_filters, clip_rewards, clip_actions, pack, callbacks, tf_sess, perf_stats, soft_horizon): """This implements the common experience collection logic. Args: base_env (BaseEnv): env implementing BaseEnv. extra_batch_callback (fn): function to send extra batch data to. policies (dict): Map of policy ids to PolicyGraph instances. policy_mapping_fn (func): Function that maps agent ids to policy ids. This is called when an agent first enters the environment. The agent is then "bound" to the returned policy for the episode. unroll_length (int): Number of episode steps before `SampleBatch` is yielded. Set to infinity to yield complete episodes. horizon (int): Horizon of the episode. preprocessors (dict): Map of policy id to preprocessor for the observations prior to filtering. obs_filters (dict): Map of policy id to filter used to process observations for the policy. clip_rewards (bool): Whether to clip rewards before postprocessing. pack (bool): Whether to pack multiple episodes into each batch. This guarantees batches will be exactly `unroll_length` in size. clip_actions (bool): Whether to clip actions to the space range. callbacks (dict): User callbacks to run on episode events. tf_sess (Session|None): Optional tensorflow session to use for batching TF policy evaluations. perf_stats (PerfStats): Record perf stats into this object. soft_horizon (bool): Calculate rewards but don't reset the environment when the horizon is hit. Yields: rollout (SampleBatch): Object containing state, action, reward, terminal condition, and other fields as dictated by `policy`. """ try: if not horizon: horizon = (base_env.get_unwrapped()[0].spec.max_episode_steps) except Exception: logger.debug("no episode horizon specified, assuming inf") if not horizon: horizon = float("inf") # Pool of batch builders, which can be shared across episodes to pack # trajectory data. batch_builder_pool = [] def get_batch_builder(): if batch_builder_pool: return batch_builder_pool.pop() else: return MultiAgentSampleBatchBuilder( policies, clip_rewards, callbacks.get("on_postprocess_traj")) def new_episode(): episode = MultiAgentEpisode(policies, policy_mapping_fn, get_batch_builder, extra_batch_callback) if callbacks.get("on_episode_start"): callbacks["on_episode_start"]({ "env": base_env, "policy": policies, "episode": episode, }) return episode active_episodes = defaultdict(new_episode) while True: perf_stats.iters += 1 t0 = time.time() # Get observations from all ready agents unfiltered_obs, rewards, dones, infos, off_policy_actions = \ base_env.poll() perf_stats.env_wait_time += time.time() - t0 if log_once("env_returns"): logger.info("Raw obs from env: {}".format( summarize(unfiltered_obs))) logger.info("Info return from env: {}".format(summarize(infos))) # Process observations and prepare for policy evaluation t1 = time.time() active_envs, to_eval, outputs = _process_observations( base_env, policies, batch_builder_pool, active_episodes, unfiltered_obs, rewards, dones, infos, off_policy_actions, horizon, preprocessors, obs_filters, unroll_length, pack, callbacks, soft_horizon) perf_stats.processing_time += time.time() - t1 for o in outputs: yield o # Do batched policy eval t2 = time.time() eval_results = _do_policy_eval(tf_sess, to_eval, policies, active_episodes) perf_stats.inference_time += time.time() - t2 # Process results and update episode state t3 = time.time() actions_to_send = _process_policy_eval_results( to_eval, eval_results, active_episodes, active_envs, off_policy_actions, policies, clip_actions) perf_stats.processing_time += time.time() - t3 # Return computed actions to ready envs. We also send to envs that have # taken off-policy actions; those envs are free to ignore the action. t4 = time.time() base_env.send_actions(actions_to_send) perf_stats.env_wait_time += time.time() - t4
[ "This", "implements", "the", "common", "experience", "collection", "logic", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/evaluation/sampler.py#L230-L339
[ "def", "_env_runner", "(", "base_env", ",", "extra_batch_callback", ",", "policies", ",", "policy_mapping_fn", ",", "unroll_length", ",", "horizon", ",", "preprocessors", ",", "obs_filters", ",", "clip_rewards", ",", "clip_actions", ",", "pack", ",", "callbacks", ...
4eade036a0505e244c976f36aaa2d64386b5129b
train
_process_observations
Record new data from the environment and prepare for policy evaluation. Returns: active_envs: set of non-terminated env ids to_eval: map of policy_id to list of agent PolicyEvalData outputs: list of metrics and samples to return from the sampler
python/ray/rllib/evaluation/sampler.py
def _process_observations(base_env, policies, batch_builder_pool, active_episodes, unfiltered_obs, rewards, dones, infos, off_policy_actions, horizon, preprocessors, obs_filters, unroll_length, pack, callbacks, soft_horizon): """Record new data from the environment and prepare for policy evaluation. Returns: active_envs: set of non-terminated env ids to_eval: map of policy_id to list of agent PolicyEvalData outputs: list of metrics and samples to return from the sampler """ active_envs = set() to_eval = defaultdict(list) outputs = [] # For each environment for env_id, agent_obs in unfiltered_obs.items(): new_episode = env_id not in active_episodes episode = active_episodes[env_id] if not new_episode: episode.length += 1 episode.batch_builder.count += 1 episode._add_agent_rewards(rewards[env_id]) if (episode.batch_builder.total() > max(1000, unroll_length * 10) and log_once("large_batch_warning")): logger.warning( "More than {} observations for {} env steps ".format( episode.batch_builder.total(), episode.batch_builder.count) + "are buffered in " "the sampler. If this is more than you expected, check that " "that you set a horizon on your environment correctly. Note " "that in multi-agent environments, `sample_batch_size` sets " "the batch size based on environment steps, not the steps of " "individual agents, which can result in unexpectedly large " "batches.") # Check episode termination conditions if dones[env_id]["__all__"] or episode.length >= horizon: hit_horizon = (episode.length >= horizon and not dones[env_id]["__all__"]) all_done = True atari_metrics = _fetch_atari_metrics(base_env) if atari_metrics is not None: for m in atari_metrics: outputs.append( m._replace(custom_metrics=episode.custom_metrics)) else: outputs.append( RolloutMetrics(episode.length, episode.total_reward, dict(episode.agent_rewards), episode.custom_metrics, {})) else: hit_horizon = False all_done = False active_envs.add(env_id) # For each agent in the environment for agent_id, raw_obs in agent_obs.items(): policy_id = episode.policy_for(agent_id) prep_obs = _get_or_raise(preprocessors, policy_id).transform(raw_obs) if log_once("prep_obs"): logger.info("Preprocessed obs: {}".format(summarize(prep_obs))) filtered_obs = _get_or_raise(obs_filters, policy_id)(prep_obs) if log_once("filtered_obs"): logger.info("Filtered obs: {}".format(summarize(filtered_obs))) agent_done = bool(all_done or dones[env_id].get(agent_id)) if not agent_done: to_eval[policy_id].append( PolicyEvalData(env_id, agent_id, filtered_obs, infos[env_id].get(agent_id, {}), episode.rnn_state_for(agent_id), episode.last_action_for(agent_id), rewards[env_id][agent_id] or 0.0)) last_observation = episode.last_observation_for(agent_id) episode._set_last_observation(agent_id, filtered_obs) episode._set_last_raw_obs(agent_id, raw_obs) episode._set_last_info(agent_id, infos[env_id].get(agent_id, {})) # Record transition info if applicable if (last_observation is not None and infos[env_id].get( agent_id, {}).get("training_enabled", True)): episode.batch_builder.add_values( agent_id, policy_id, t=episode.length - 1, eps_id=episode.episode_id, agent_index=episode._agent_index(agent_id), obs=last_observation, actions=episode.last_action_for(agent_id), rewards=rewards[env_id][agent_id], prev_actions=episode.prev_action_for(agent_id), prev_rewards=episode.prev_reward_for(agent_id), dones=(False if (hit_horizon and soft_horizon) else agent_done), infos=infos[env_id].get(agent_id, {}), new_obs=filtered_obs, **episode.last_pi_info_for(agent_id)) # Invoke the step callback after the step is logged to the episode if callbacks.get("on_episode_step"): callbacks["on_episode_step"]({"env": base_env, "episode": episode}) # Cut the batch if we're not packing multiple episodes into one, # or if we've exceeded the requested batch size. if episode.batch_builder.has_pending_data(): if dones[env_id]["__all__"]: episode.batch_builder.check_missing_dones() if (all_done and not pack) or \ episode.batch_builder.count >= unroll_length: outputs.append(episode.batch_builder.build_and_reset(episode)) elif all_done: # Make sure postprocessor stays within one episode episode.batch_builder.postprocess_batch_so_far(episode) if all_done: # Handle episode termination batch_builder_pool.append(episode.batch_builder) if callbacks.get("on_episode_end"): callbacks["on_episode_end"]({ "env": base_env, "policy": policies, "episode": episode }) if hit_horizon and soft_horizon: episode.soft_reset() resetted_obs = agent_obs else: del active_episodes[env_id] resetted_obs = base_env.try_reset(env_id) if resetted_obs is None: # Reset not supported, drop this env from the ready list if horizon != float("inf"): raise ValueError( "Setting episode horizon requires reset() support " "from the environment.") elif resetted_obs != ASYNC_RESET_RETURN: # Creates a new episode if this is not async return # If reset is async, we will get its result in some future poll episode = active_episodes[env_id] for agent_id, raw_obs in resetted_obs.items(): policy_id = episode.policy_for(agent_id) policy = _get_or_raise(policies, policy_id) prep_obs = _get_or_raise(preprocessors, policy_id).transform(raw_obs) filtered_obs = _get_or_raise(obs_filters, policy_id)(prep_obs) episode._set_last_observation(agent_id, filtered_obs) to_eval[policy_id].append( PolicyEvalData( env_id, agent_id, filtered_obs, episode.last_info_for(agent_id) or {}, episode.rnn_state_for(agent_id), np.zeros_like( _flatten_action(policy.action_space.sample())), 0.0)) return active_envs, to_eval, outputs
def _process_observations(base_env, policies, batch_builder_pool, active_episodes, unfiltered_obs, rewards, dones, infos, off_policy_actions, horizon, preprocessors, obs_filters, unroll_length, pack, callbacks, soft_horizon): """Record new data from the environment and prepare for policy evaluation. Returns: active_envs: set of non-terminated env ids to_eval: map of policy_id to list of agent PolicyEvalData outputs: list of metrics and samples to return from the sampler """ active_envs = set() to_eval = defaultdict(list) outputs = [] # For each environment for env_id, agent_obs in unfiltered_obs.items(): new_episode = env_id not in active_episodes episode = active_episodes[env_id] if not new_episode: episode.length += 1 episode.batch_builder.count += 1 episode._add_agent_rewards(rewards[env_id]) if (episode.batch_builder.total() > max(1000, unroll_length * 10) and log_once("large_batch_warning")): logger.warning( "More than {} observations for {} env steps ".format( episode.batch_builder.total(), episode.batch_builder.count) + "are buffered in " "the sampler. If this is more than you expected, check that " "that you set a horizon on your environment correctly. Note " "that in multi-agent environments, `sample_batch_size` sets " "the batch size based on environment steps, not the steps of " "individual agents, which can result in unexpectedly large " "batches.") # Check episode termination conditions if dones[env_id]["__all__"] or episode.length >= horizon: hit_horizon = (episode.length >= horizon and not dones[env_id]["__all__"]) all_done = True atari_metrics = _fetch_atari_metrics(base_env) if atari_metrics is not None: for m in atari_metrics: outputs.append( m._replace(custom_metrics=episode.custom_metrics)) else: outputs.append( RolloutMetrics(episode.length, episode.total_reward, dict(episode.agent_rewards), episode.custom_metrics, {})) else: hit_horizon = False all_done = False active_envs.add(env_id) # For each agent in the environment for agent_id, raw_obs in agent_obs.items(): policy_id = episode.policy_for(agent_id) prep_obs = _get_or_raise(preprocessors, policy_id).transform(raw_obs) if log_once("prep_obs"): logger.info("Preprocessed obs: {}".format(summarize(prep_obs))) filtered_obs = _get_or_raise(obs_filters, policy_id)(prep_obs) if log_once("filtered_obs"): logger.info("Filtered obs: {}".format(summarize(filtered_obs))) agent_done = bool(all_done or dones[env_id].get(agent_id)) if not agent_done: to_eval[policy_id].append( PolicyEvalData(env_id, agent_id, filtered_obs, infos[env_id].get(agent_id, {}), episode.rnn_state_for(agent_id), episode.last_action_for(agent_id), rewards[env_id][agent_id] or 0.0)) last_observation = episode.last_observation_for(agent_id) episode._set_last_observation(agent_id, filtered_obs) episode._set_last_raw_obs(agent_id, raw_obs) episode._set_last_info(agent_id, infos[env_id].get(agent_id, {})) # Record transition info if applicable if (last_observation is not None and infos[env_id].get( agent_id, {}).get("training_enabled", True)): episode.batch_builder.add_values( agent_id, policy_id, t=episode.length - 1, eps_id=episode.episode_id, agent_index=episode._agent_index(agent_id), obs=last_observation, actions=episode.last_action_for(agent_id), rewards=rewards[env_id][agent_id], prev_actions=episode.prev_action_for(agent_id), prev_rewards=episode.prev_reward_for(agent_id), dones=(False if (hit_horizon and soft_horizon) else agent_done), infos=infos[env_id].get(agent_id, {}), new_obs=filtered_obs, **episode.last_pi_info_for(agent_id)) # Invoke the step callback after the step is logged to the episode if callbacks.get("on_episode_step"): callbacks["on_episode_step"]({"env": base_env, "episode": episode}) # Cut the batch if we're not packing multiple episodes into one, # or if we've exceeded the requested batch size. if episode.batch_builder.has_pending_data(): if dones[env_id]["__all__"]: episode.batch_builder.check_missing_dones() if (all_done and not pack) or \ episode.batch_builder.count >= unroll_length: outputs.append(episode.batch_builder.build_and_reset(episode)) elif all_done: # Make sure postprocessor stays within one episode episode.batch_builder.postprocess_batch_so_far(episode) if all_done: # Handle episode termination batch_builder_pool.append(episode.batch_builder) if callbacks.get("on_episode_end"): callbacks["on_episode_end"]({ "env": base_env, "policy": policies, "episode": episode }) if hit_horizon and soft_horizon: episode.soft_reset() resetted_obs = agent_obs else: del active_episodes[env_id] resetted_obs = base_env.try_reset(env_id) if resetted_obs is None: # Reset not supported, drop this env from the ready list if horizon != float("inf"): raise ValueError( "Setting episode horizon requires reset() support " "from the environment.") elif resetted_obs != ASYNC_RESET_RETURN: # Creates a new episode if this is not async return # If reset is async, we will get its result in some future poll episode = active_episodes[env_id] for agent_id, raw_obs in resetted_obs.items(): policy_id = episode.policy_for(agent_id) policy = _get_or_raise(policies, policy_id) prep_obs = _get_or_raise(preprocessors, policy_id).transform(raw_obs) filtered_obs = _get_or_raise(obs_filters, policy_id)(prep_obs) episode._set_last_observation(agent_id, filtered_obs) to_eval[policy_id].append( PolicyEvalData( env_id, agent_id, filtered_obs, episode.last_info_for(agent_id) or {}, episode.rnn_state_for(agent_id), np.zeros_like( _flatten_action(policy.action_space.sample())), 0.0)) return active_envs, to_eval, outputs
[ "Record", "new", "data", "from", "the", "environment", "and", "prepare", "for", "policy", "evaluation", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/evaluation/sampler.py#L342-L505
[ "def", "_process_observations", "(", "base_env", ",", "policies", ",", "batch_builder_pool", ",", "active_episodes", ",", "unfiltered_obs", ",", "rewards", ",", "dones", ",", "infos", ",", "off_policy_actions", ",", "horizon", ",", "preprocessors", ",", "obs_filters...
4eade036a0505e244c976f36aaa2d64386b5129b
train
_do_policy_eval
Call compute actions on observation batches to get next actions. Returns: eval_results: dict of policy to compute_action() outputs.
python/ray/rllib/evaluation/sampler.py
def _do_policy_eval(tf_sess, to_eval, policies, active_episodes): """Call compute actions on observation batches to get next actions. Returns: eval_results: dict of policy to compute_action() outputs. """ eval_results = {} if tf_sess: builder = TFRunBuilder(tf_sess, "policy_eval") pending_fetches = {} else: builder = None if log_once("compute_actions_input"): logger.info("Inputs to compute_actions():\n\n{}\n".format( summarize(to_eval))) for policy_id, eval_data in to_eval.items(): rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data]) policy = _get_or_raise(policies, policy_id) if builder and (policy.compute_actions.__code__ is TFPolicyGraph.compute_actions.__code__): # TODO(ekl): how can we make info batch available to TF code? pending_fetches[policy_id] = policy._build_compute_actions( builder, [t.obs for t in eval_data], rnn_in_cols, prev_action_batch=[t.prev_action for t in eval_data], prev_reward_batch=[t.prev_reward for t in eval_data]) else: eval_results[policy_id] = policy.compute_actions( [t.obs for t in eval_data], rnn_in_cols, prev_action_batch=[t.prev_action for t in eval_data], prev_reward_batch=[t.prev_reward for t in eval_data], info_batch=[t.info for t in eval_data], episodes=[active_episodes[t.env_id] for t in eval_data]) if builder: for k, v in pending_fetches.items(): eval_results[k] = builder.get(v) if log_once("compute_actions_result"): logger.info("Outputs of compute_actions():\n\n{}\n".format( summarize(eval_results))) return eval_results
def _do_policy_eval(tf_sess, to_eval, policies, active_episodes): """Call compute actions on observation batches to get next actions. Returns: eval_results: dict of policy to compute_action() outputs. """ eval_results = {} if tf_sess: builder = TFRunBuilder(tf_sess, "policy_eval") pending_fetches = {} else: builder = None if log_once("compute_actions_input"): logger.info("Inputs to compute_actions():\n\n{}\n".format( summarize(to_eval))) for policy_id, eval_data in to_eval.items(): rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data]) policy = _get_or_raise(policies, policy_id) if builder and (policy.compute_actions.__code__ is TFPolicyGraph.compute_actions.__code__): # TODO(ekl): how can we make info batch available to TF code? pending_fetches[policy_id] = policy._build_compute_actions( builder, [t.obs for t in eval_data], rnn_in_cols, prev_action_batch=[t.prev_action for t in eval_data], prev_reward_batch=[t.prev_reward for t in eval_data]) else: eval_results[policy_id] = policy.compute_actions( [t.obs for t in eval_data], rnn_in_cols, prev_action_batch=[t.prev_action for t in eval_data], prev_reward_batch=[t.prev_reward for t in eval_data], info_batch=[t.info for t in eval_data], episodes=[active_episodes[t.env_id] for t in eval_data]) if builder: for k, v in pending_fetches.items(): eval_results[k] = builder.get(v) if log_once("compute_actions_result"): logger.info("Outputs of compute_actions():\n\n{}\n".format( summarize(eval_results))) return eval_results
[ "Call", "compute", "actions", "on", "observation", "batches", "to", "get", "next", "actions", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/evaluation/sampler.py#L508-L554
[ "def", "_do_policy_eval", "(", "tf_sess", ",", "to_eval", ",", "policies", ",", "active_episodes", ")", ":", "eval_results", "=", "{", "}", "if", "tf_sess", ":", "builder", "=", "TFRunBuilder", "(", "tf_sess", ",", "\"policy_eval\"", ")", "pending_fetches", "=...
4eade036a0505e244c976f36aaa2d64386b5129b
train
_process_policy_eval_results
Process the output of policy neural network evaluation. Records policy evaluation results into the given episode objects and returns replies to send back to agents in the env. Returns: actions_to_send: nested dict of env id -> agent id -> agent replies.
python/ray/rllib/evaluation/sampler.py
def _process_policy_eval_results(to_eval, eval_results, active_episodes, active_envs, off_policy_actions, policies, clip_actions): """Process the output of policy neural network evaluation. Records policy evaluation results into the given episode objects and returns replies to send back to agents in the env. Returns: actions_to_send: nested dict of env id -> agent id -> agent replies. """ actions_to_send = defaultdict(dict) for env_id in active_envs: actions_to_send[env_id] = {} # at minimum send empty dict for policy_id, eval_data in to_eval.items(): rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data]) actions, rnn_out_cols, pi_info_cols = eval_results[policy_id] if len(rnn_in_cols) != len(rnn_out_cols): raise ValueError("Length of RNN in did not match RNN out, got: " "{} vs {}".format(rnn_in_cols, rnn_out_cols)) # Add RNN state info for f_i, column in enumerate(rnn_in_cols): pi_info_cols["state_in_{}".format(f_i)] = column for f_i, column in enumerate(rnn_out_cols): pi_info_cols["state_out_{}".format(f_i)] = column # Save output rows actions = _unbatch_tuple_actions(actions) policy = _get_or_raise(policies, policy_id) for i, action in enumerate(actions): env_id = eval_data[i].env_id agent_id = eval_data[i].agent_id if clip_actions: actions_to_send[env_id][agent_id] = clip_action( action, policy.action_space) else: actions_to_send[env_id][agent_id] = action episode = active_episodes[env_id] episode._set_rnn_state(agent_id, [c[i] for c in rnn_out_cols]) episode._set_last_pi_info( agent_id, {k: v[i] for k, v in pi_info_cols.items()}) if env_id in off_policy_actions and \ agent_id in off_policy_actions[env_id]: episode._set_last_action(agent_id, off_policy_actions[env_id][agent_id]) else: episode._set_last_action(agent_id, action) return actions_to_send
def _process_policy_eval_results(to_eval, eval_results, active_episodes, active_envs, off_policy_actions, policies, clip_actions): """Process the output of policy neural network evaluation. Records policy evaluation results into the given episode objects and returns replies to send back to agents in the env. Returns: actions_to_send: nested dict of env id -> agent id -> agent replies. """ actions_to_send = defaultdict(dict) for env_id in active_envs: actions_to_send[env_id] = {} # at minimum send empty dict for policy_id, eval_data in to_eval.items(): rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data]) actions, rnn_out_cols, pi_info_cols = eval_results[policy_id] if len(rnn_in_cols) != len(rnn_out_cols): raise ValueError("Length of RNN in did not match RNN out, got: " "{} vs {}".format(rnn_in_cols, rnn_out_cols)) # Add RNN state info for f_i, column in enumerate(rnn_in_cols): pi_info_cols["state_in_{}".format(f_i)] = column for f_i, column in enumerate(rnn_out_cols): pi_info_cols["state_out_{}".format(f_i)] = column # Save output rows actions = _unbatch_tuple_actions(actions) policy = _get_or_raise(policies, policy_id) for i, action in enumerate(actions): env_id = eval_data[i].env_id agent_id = eval_data[i].agent_id if clip_actions: actions_to_send[env_id][agent_id] = clip_action( action, policy.action_space) else: actions_to_send[env_id][agent_id] = action episode = active_episodes[env_id] episode._set_rnn_state(agent_id, [c[i] for c in rnn_out_cols]) episode._set_last_pi_info( agent_id, {k: v[i] for k, v in pi_info_cols.items()}) if env_id in off_policy_actions and \ agent_id in off_policy_actions[env_id]: episode._set_last_action(agent_id, off_policy_actions[env_id][agent_id]) else: episode._set_last_action(agent_id, action) return actions_to_send
[ "Process", "the", "output", "of", "policy", "neural", "network", "evaluation", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/evaluation/sampler.py#L557-L607
[ "def", "_process_policy_eval_results", "(", "to_eval", ",", "eval_results", ",", "active_episodes", ",", "active_envs", ",", "off_policy_actions", ",", "policies", ",", "clip_actions", ")", ":", "actions_to_send", "=", "defaultdict", "(", "dict", ")", "for", "env_id...
4eade036a0505e244c976f36aaa2d64386b5129b
train
_fetch_atari_metrics
Atari games have multiple logical episodes, one per life. However for metrics reporting we count full episodes all lives included.
python/ray/rllib/evaluation/sampler.py
def _fetch_atari_metrics(base_env): """Atari games have multiple logical episodes, one per life. However for metrics reporting we count full episodes all lives included. """ unwrapped = base_env.get_unwrapped() if not unwrapped: return None atari_out = [] for u in unwrapped: monitor = get_wrapper_by_cls(u, MonitorEnv) if not monitor: return None for eps_rew, eps_len in monitor.next_episode_results(): atari_out.append(RolloutMetrics(eps_len, eps_rew, {}, {}, {})) return atari_out
def _fetch_atari_metrics(base_env): """Atari games have multiple logical episodes, one per life. However for metrics reporting we count full episodes all lives included. """ unwrapped = base_env.get_unwrapped() if not unwrapped: return None atari_out = [] for u in unwrapped: monitor = get_wrapper_by_cls(u, MonitorEnv) if not monitor: return None for eps_rew, eps_len in monitor.next_episode_results(): atari_out.append(RolloutMetrics(eps_len, eps_rew, {}, {}, {})) return atari_out
[ "Atari", "games", "have", "multiple", "logical", "episodes", "one", "per", "life", "." ]
ray-project/ray
python
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/rllib/evaluation/sampler.py#L610-L625
[ "def", "_fetch_atari_metrics", "(", "base_env", ")", ":", "unwrapped", "=", "base_env", ".", "get_unwrapped", "(", ")", "if", "not", "unwrapped", ":", "return", "None", "atari_out", "=", "[", "]", "for", "u", "in", "unwrapped", ":", "monitor", "=", "get_wr...
4eade036a0505e244c976f36aaa2d64386b5129b
train
compare_version
Compare two version number strings of the form W.X.Y.Z. The numbers are compared most-significant to least-significant. For example, 12.345.67.89 > 2.987.88.99. Args: a: First version number string to compare b: Second version number string to compare Returns: 0 if the numbers are identical, a positive number if 'a' is larger, and a negative number if 'b' is larger.
android/jni/msbuild.py
def compare_version(a, b): """Compare two version number strings of the form W.X.Y.Z. The numbers are compared most-significant to least-significant. For example, 12.345.67.89 > 2.987.88.99. Args: a: First version number string to compare b: Second version number string to compare Returns: 0 if the numbers are identical, a positive number if 'a' is larger, and a negative number if 'b' is larger. """ aa = string.split(a, ".") bb = string.split(b, ".") for i in range(0, 4): if aa[i] != bb[i]: return cmp(int(aa[i]), int(bb[i])) return 0
def compare_version(a, b): """Compare two version number strings of the form W.X.Y.Z. The numbers are compared most-significant to least-significant. For example, 12.345.67.89 > 2.987.88.99. Args: a: First version number string to compare b: Second version number string to compare Returns: 0 if the numbers are identical, a positive number if 'a' is larger, and a negative number if 'b' is larger. """ aa = string.split(a, ".") bb = string.split(b, ".") for i in range(0, 4): if aa[i] != bb[i]: return cmp(int(aa[i]), int(bb[i])) return 0
[ "Compare", "two", "version", "number", "strings", "of", "the", "form", "W", ".", "X", ".", "Y", ".", "Z", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/android/jni/msbuild.py#L37-L56
[ "def", "compare_version", "(", "a", ",", "b", ")", ":", "aa", "=", "string", ".", "split", "(", "a", ",", "\".\"", ")", "bb", "=", "string", ".", "split", "(", "b", ",", "\".\"", ")", "for", "i", "in", "range", "(", "0", ",", "4", ")", ":", ...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
FlatbuffersConan.configure_cmake
Create CMake instance and execute configure step
conanfile.py
def configure_cmake(self): """Create CMake instance and execute configure step """ cmake = CMake(self) cmake.definitions["FLATBUFFERS_BUILD_TESTS"] = False cmake.definitions["FLATBUFFERS_BUILD_SHAREDLIB"] = self.options.shared cmake.definitions["FLATBUFFERS_BUILD_FLATLIB"] = not self.options.shared cmake.configure() return cmake
def configure_cmake(self): """Create CMake instance and execute configure step """ cmake = CMake(self) cmake.definitions["FLATBUFFERS_BUILD_TESTS"] = False cmake.definitions["FLATBUFFERS_BUILD_SHAREDLIB"] = self.options.shared cmake.definitions["FLATBUFFERS_BUILD_FLATLIB"] = not self.options.shared cmake.configure() return cmake
[ "Create", "CMake", "instance", "and", "execute", "configure", "step" ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/conanfile.py#L38-L46
[ "def", "configure_cmake", "(", "self", ")", ":", "cmake", "=", "CMake", "(", "self", ")", "cmake", ".", "definitions", "[", "\"FLATBUFFERS_BUILD_TESTS\"", "]", "=", "False", "cmake", ".", "definitions", "[", "\"FLATBUFFERS_BUILD_SHAREDLIB\"", "]", "=", "self", ...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
FlatbuffersConan.package
Copy Flatbuffers' artifacts to package folder
conanfile.py
def package(self): """Copy Flatbuffers' artifacts to package folder """ cmake = self.configure_cmake() cmake.install() self.copy(pattern="LICENSE.txt", dst="licenses") self.copy(pattern="FindFlatBuffers.cmake", dst=os.path.join("lib", "cmake", "flatbuffers"), src="CMake") self.copy(pattern="flathash*", dst="bin", src="bin") self.copy(pattern="flatc*", dst="bin", src="bin") if self.settings.os == "Windows" and self.options.shared: if self.settings.compiler == "Visual Studio": shutil.move(os.path.join(self.package_folder, "lib", "%s.dll" % self.name), os.path.join(self.package_folder, "bin", "%s.dll" % self.name)) elif self.settings.compiler == "gcc": shutil.move(os.path.join(self.package_folder, "lib", "lib%s.dll" % self.name), os.path.join(self.package_folder, "bin", "lib%s.dll" % self.name))
def package(self): """Copy Flatbuffers' artifacts to package folder """ cmake = self.configure_cmake() cmake.install() self.copy(pattern="LICENSE.txt", dst="licenses") self.copy(pattern="FindFlatBuffers.cmake", dst=os.path.join("lib", "cmake", "flatbuffers"), src="CMake") self.copy(pattern="flathash*", dst="bin", src="bin") self.copy(pattern="flatc*", dst="bin", src="bin") if self.settings.os == "Windows" and self.options.shared: if self.settings.compiler == "Visual Studio": shutil.move(os.path.join(self.package_folder, "lib", "%s.dll" % self.name), os.path.join(self.package_folder, "bin", "%s.dll" % self.name)) elif self.settings.compiler == "gcc": shutil.move(os.path.join(self.package_folder, "lib", "lib%s.dll" % self.name), os.path.join(self.package_folder, "bin", "lib%s.dll" % self.name))
[ "Copy", "Flatbuffers", "artifacts", "to", "package", "folder" ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/conanfile.py#L54-L69
[ "def", "package", "(", "self", ")", ":", "cmake", "=", "self", ".", "configure_cmake", "(", ")", "cmake", ".", "install", "(", ")", "self", ".", "copy", "(", "pattern", "=", "\"LICENSE.txt\"", ",", "dst", "=", "\"licenses\"", ")", "self", ".", "copy", ...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
FlatbuffersConan.package_info
Collect built libraries names and solve flatc path.
conanfile.py
def package_info(self): """Collect built libraries names and solve flatc path. """ self.cpp_info.libs = tools.collect_libs(self) self.user_info.flatc = os.path.join(self.package_folder, "bin", "flatc")
def package_info(self): """Collect built libraries names and solve flatc path. """ self.cpp_info.libs = tools.collect_libs(self) self.user_info.flatc = os.path.join(self.package_folder, "bin", "flatc")
[ "Collect", "built", "libraries", "names", "and", "solve", "flatc", "path", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/conanfile.py#L71-L75
[ "def", "package_info", "(", "self", ")", ":", "self", ".", "cpp_info", ".", "libs", "=", "tools", ".", "collect_libs", "(", "self", ")", "self", ".", "user_info", ".", "flatc", "=", "os", ".", "path", ".", "join", "(", "self", ".", "package_folder", ...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Table.Offset
Offset provides access into the Table's vtable. Deprecated fields are ignored by checking the vtable's length.
python/flatbuffers/table.py
def Offset(self, vtableOffset): """Offset provides access into the Table's vtable. Deprecated fields are ignored by checking the vtable's length.""" vtable = self.Pos - self.Get(N.SOffsetTFlags, self.Pos) vtableEnd = self.Get(N.VOffsetTFlags, vtable) if vtableOffset < vtableEnd: return self.Get(N.VOffsetTFlags, vtable + vtableOffset) return 0
def Offset(self, vtableOffset): """Offset provides access into the Table's vtable. Deprecated fields are ignored by checking the vtable's length.""" vtable = self.Pos - self.Get(N.SOffsetTFlags, self.Pos) vtableEnd = self.Get(N.VOffsetTFlags, vtable) if vtableOffset < vtableEnd: return self.Get(N.VOffsetTFlags, vtable + vtableOffset) return 0
[ "Offset", "provides", "access", "into", "the", "Table", "s", "vtable", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/table.py#L32-L41
[ "def", "Offset", "(", "self", ",", "vtableOffset", ")", ":", "vtable", "=", "self", ".", "Pos", "-", "self", ".", "Get", "(", "N", ".", "SOffsetTFlags", ",", "self", ".", "Pos", ")", "vtableEnd", "=", "self", ".", "Get", "(", "N", ".", "VOffsetTFla...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Table.Indirect
Indirect retrieves the relative offset stored at `offset`.
python/flatbuffers/table.py
def Indirect(self, off): """Indirect retrieves the relative offset stored at `offset`.""" N.enforce_number(off, N.UOffsetTFlags) return off + encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
def Indirect(self, off): """Indirect retrieves the relative offset stored at `offset`.""" N.enforce_number(off, N.UOffsetTFlags) return off + encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
[ "Indirect", "retrieves", "the", "relative", "offset", "stored", "at", "offset", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/table.py#L43-L46
[ "def", "Indirect", "(", "self", ",", "off", ")", ":", "N", ".", "enforce_number", "(", "off", ",", "N", ".", "UOffsetTFlags", ")", "return", "off", "+", "encode", ".", "Get", "(", "N", ".", "UOffsetTFlags", ".", "packer_type", ",", "self", ".", "Byte...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Table.String
String gets a string from data stored inside the flatbuffer.
python/flatbuffers/table.py
def String(self, off): """String gets a string from data stored inside the flatbuffer.""" N.enforce_number(off, N.UOffsetTFlags) off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) start = off + N.UOffsetTFlags.bytewidth length = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) return bytes(self.Bytes[start:start+length])
def String(self, off): """String gets a string from data stored inside the flatbuffer.""" N.enforce_number(off, N.UOffsetTFlags) off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) start = off + N.UOffsetTFlags.bytewidth length = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) return bytes(self.Bytes[start:start+length])
[ "String", "gets", "a", "string", "from", "data", "stored", "inside", "the", "flatbuffer", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/table.py#L48-L54
[ "def", "String", "(", "self", ",", "off", ")", ":", "N", ".", "enforce_number", "(", "off", ",", "N", ".", "UOffsetTFlags", ")", "off", "+=", "encode", ".", "Get", "(", "N", ".", "UOffsetTFlags", ".", "packer_type", ",", "self", ".", "Bytes", ",", ...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Table.VectorLen
VectorLen retrieves the length of the vector whose offset is stored at "off" in this object.
python/flatbuffers/table.py
def VectorLen(self, off): """VectorLen retrieves the length of the vector whose offset is stored at "off" in this object.""" N.enforce_number(off, N.UOffsetTFlags) off += self.Pos off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) ret = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) return ret
def VectorLen(self, off): """VectorLen retrieves the length of the vector whose offset is stored at "off" in this object.""" N.enforce_number(off, N.UOffsetTFlags) off += self.Pos off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) ret = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off) return ret
[ "VectorLen", "retrieves", "the", "length", "of", "the", "vector", "whose", "offset", "is", "stored", "at", "off", "in", "this", "object", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/table.py#L56-L64
[ "def", "VectorLen", "(", "self", ",", "off", ")", ":", "N", ".", "enforce_number", "(", "off", ",", "N", ".", "UOffsetTFlags", ")", "off", "+=", "self", ".", "Pos", "off", "+=", "encode", ".", "Get", "(", "N", ".", "UOffsetTFlags", ".", "packer_type"...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Table.Vector
Vector retrieves the start of data of the vector whose offset is stored at "off" in this object.
python/flatbuffers/table.py
def Vector(self, off): """Vector retrieves the start of data of the vector whose offset is stored at "off" in this object.""" N.enforce_number(off, N.UOffsetTFlags) off += self.Pos x = off + self.Get(N.UOffsetTFlags, off) # data starts after metadata containing the vector length x += N.UOffsetTFlags.bytewidth return x
def Vector(self, off): """Vector retrieves the start of data of the vector whose offset is stored at "off" in this object.""" N.enforce_number(off, N.UOffsetTFlags) off += self.Pos x = off + self.Get(N.UOffsetTFlags, off) # data starts after metadata containing the vector length x += N.UOffsetTFlags.bytewidth return x
[ "Vector", "retrieves", "the", "start", "of", "data", "of", "the", "vector", "whose", "offset", "is", "stored", "at", "off", "in", "this", "object", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/table.py#L66-L75
[ "def", "Vector", "(", "self", ",", "off", ")", ":", "N", ".", "enforce_number", "(", "off", ",", "N", ".", "UOffsetTFlags", ")", "off", "+=", "self", ".", "Pos", "x", "=", "off", "+", "self", ".", "Get", "(", "N", ".", "UOffsetTFlags", ",", "off"...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Table.Union
Union initializes any Table-derived type to point to the union at the given offset.
python/flatbuffers/table.py
def Union(self, t2, off): """Union initializes any Table-derived type to point to the union at the given offset.""" assert type(t2) is Table N.enforce_number(off, N.UOffsetTFlags) off += self.Pos t2.Pos = off + self.Get(N.UOffsetTFlags, off) t2.Bytes = self.Bytes
def Union(self, t2, off): """Union initializes any Table-derived type to point to the union at the given offset.""" assert type(t2) is Table N.enforce_number(off, N.UOffsetTFlags) off += self.Pos t2.Pos = off + self.Get(N.UOffsetTFlags, off) t2.Bytes = self.Bytes
[ "Union", "initializes", "any", "Table", "-", "derived", "type", "to", "point", "to", "the", "union", "at", "the", "given", "offset", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/table.py#L77-L85
[ "def", "Union", "(", "self", ",", "t2", ",", "off", ")", ":", "assert", "type", "(", "t2", ")", "is", "Table", "N", ".", "enforce_number", "(", "off", ",", "N", ".", "UOffsetTFlags", ")", "off", "+=", "self", ".", "Pos", "t2", ".", "Pos", "=", ...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Table.Get
Get retrieves a value of the type specified by `flags` at the given offset.
python/flatbuffers/table.py
def Get(self, flags, off): """ Get retrieves a value of the type specified by `flags` at the given offset. """ N.enforce_number(off, N.UOffsetTFlags) return flags.py_type(encode.Get(flags.packer_type, self.Bytes, off))
def Get(self, flags, off): """ Get retrieves a value of the type specified by `flags` at the given offset. """ N.enforce_number(off, N.UOffsetTFlags) return flags.py_type(encode.Get(flags.packer_type, self.Bytes, off))
[ "Get", "retrieves", "a", "value", "of", "the", "type", "specified", "by", "flags", "at", "the", "given", "offset", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/table.py#L87-L93
[ "def", "Get", "(", "self", ",", "flags", ",", "off", ")", ":", "N", ".", "enforce_number", "(", "off", ",", "N", ".", "UOffsetTFlags", ")", "return", "flags", ".", "py_type", "(", "encode", ".", "Get", "(", "flags", ".", "packer_type", ",", "self", ...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Table.GetVectorAsNumpy
GetVectorAsNumpy returns the vector that starts at `Vector(off)` as a numpy array with the type specified by `flags`. The array is a `view` into Bytes, so modifying the returned array will modify Bytes in place.
python/flatbuffers/table.py
def GetVectorAsNumpy(self, flags, off): """ GetVectorAsNumpy returns the vector that starts at `Vector(off)` as a numpy array with the type specified by `flags`. The array is a `view` into Bytes, so modifying the returned array will modify Bytes in place. """ offset = self.Vector(off) length = self.VectorLen(off) # TODO: length accounts for bytewidth, right? numpy_dtype = N.to_numpy_type(flags) return encode.GetVectorAsNumpy(numpy_dtype, self.Bytes, length, offset)
def GetVectorAsNumpy(self, flags, off): """ GetVectorAsNumpy returns the vector that starts at `Vector(off)` as a numpy array with the type specified by `flags`. The array is a `view` into Bytes, so modifying the returned array will modify Bytes in place. """ offset = self.Vector(off) length = self.VectorLen(off) # TODO: length accounts for bytewidth, right? numpy_dtype = N.to_numpy_type(flags) return encode.GetVectorAsNumpy(numpy_dtype, self.Bytes, length, offset)
[ "GetVectorAsNumpy", "returns", "the", "vector", "that", "starts", "at", "Vector", "(", "off", ")", "as", "a", "numpy", "array", "with", "the", "type", "specified", "by", "flags", ".", "The", "array", "is", "a", "view", "into", "Bytes", "so", "modifying", ...
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/table.py#L104-L114
[ "def", "GetVectorAsNumpy", "(", "self", ",", "flags", ",", "off", ")", ":", "offset", "=", "self", ".", "Vector", "(", "off", ")", "length", "=", "self", ".", "VectorLen", "(", "off", ")", "# TODO: length accounts for bytewidth, right?", "numpy_dtype", "=", ...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Table.GetVOffsetTSlot
GetVOffsetTSlot retrieves the VOffsetT that the given vtable location points to. If the vtable value is zero, the default value `d` will be returned.
python/flatbuffers/table.py
def GetVOffsetTSlot(self, slot, d): """ GetVOffsetTSlot retrieves the VOffsetT that the given vtable location points to. If the vtable value is zero, the default value `d` will be returned. """ N.enforce_number(slot, N.VOffsetTFlags) N.enforce_number(d, N.VOffsetTFlags) off = self.Offset(slot) if off == 0: return d return off
def GetVOffsetTSlot(self, slot, d): """ GetVOffsetTSlot retrieves the VOffsetT that the given vtable location points to. If the vtable value is zero, the default value `d` will be returned. """ N.enforce_number(slot, N.VOffsetTFlags) N.enforce_number(d, N.VOffsetTFlags) off = self.Offset(slot) if off == 0: return d return off
[ "GetVOffsetTSlot", "retrieves", "the", "VOffsetT", "that", "the", "given", "vtable", "location", "points", "to", ".", "If", "the", "vtable", "value", "is", "zero", "the", "default", "value", "d", "will", "be", "returned", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/table.py#L116-L129
[ "def", "GetVOffsetTSlot", "(", "self", ",", "slot", ",", "d", ")", ":", "N", ".", "enforce_number", "(", "slot", ",", "N", ".", "VOffsetTFlags", ")", "N", ".", "enforce_number", "(", "d", ",", "N", ".", "VOffsetTFlags", ")", "off", "=", "self", ".", ...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
GetVectorAsNumpy
GetVecAsNumpy decodes values starting at buf[head] as `numpy_type`, where `numpy_type` is a numpy dtype.
python/flatbuffers/encode.py
def GetVectorAsNumpy(numpy_type, buf, count, offset): """ GetVecAsNumpy decodes values starting at buf[head] as `numpy_type`, where `numpy_type` is a numpy dtype. """ if np is not None: # TODO: could set .flags.writeable = False to make users jump through # hoops before modifying... return np.frombuffer(buf, dtype=numpy_type, count=count, offset=offset) else: raise NumpyRequiredForThisFeature('Numpy was not found.')
def GetVectorAsNumpy(numpy_type, buf, count, offset): """ GetVecAsNumpy decodes values starting at buf[head] as `numpy_type`, where `numpy_type` is a numpy dtype. """ if np is not None: # TODO: could set .flags.writeable = False to make users jump through # hoops before modifying... return np.frombuffer(buf, dtype=numpy_type, count=count, offset=offset) else: raise NumpyRequiredForThisFeature('Numpy was not found.')
[ "GetVecAsNumpy", "decodes", "values", "starting", "at", "buf", "[", "head", "]", "as", "numpy_type", "where", "numpy_type", "is", "a", "numpy", "dtype", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/encode.py#L27-L35
[ "def", "GetVectorAsNumpy", "(", "numpy_type", ",", "buf", ",", "count", ",", "offset", ")", ":", "if", "np", "is", "not", "None", ":", "# TODO: could set .flags.writeable = False to make users jump through", "# hoops before modifying...", "return", "np", ".", "fro...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Write
Write encodes `n` at buf[head] using `packer_type`.
python/flatbuffers/encode.py
def Write(packer_type, buf, head, n): """ Write encodes `n` at buf[head] using `packer_type`. """ packer_type.pack_into(buf, head, n)
def Write(packer_type, buf, head, n): """ Write encodes `n` at buf[head] using `packer_type`. """ packer_type.pack_into(buf, head, n)
[ "Write", "encodes", "n", "at", "buf", "[", "head", "]", "using", "packer_type", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/encode.py#L38-L40
[ "def", "Write", "(", "packer_type", ",", "buf", ",", "head", ",", "n", ")", ":", "packer_type", ".", "pack_into", "(", "buf", ",", "head", ",", "n", ")" ]
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
main
Script that finds and runs flatc built from source.
android/jni/run_flatc.py
def main(): """Script that finds and runs flatc built from source.""" if len(sys.argv) < 2: sys.stderr.write('Usage: run_flatc.py flatbuffers_dir [flatc_args]\n') return 1 cwd = os.getcwd() flatc = '' flatbuffers_dir = sys.argv[1] for path in FLATC_SEARCH_PATHS: current = os.path.join(flatbuffers_dir, path, 'flatc' + EXECUTABLE_EXTENSION) if os.path.exists(current): flatc = current break if not flatc: sys.stderr.write('flatc not found\n') return 1 command = [flatc] + sys.argv[2:] return subprocess.call(command)
def main(): """Script that finds and runs flatc built from source.""" if len(sys.argv) < 2: sys.stderr.write('Usage: run_flatc.py flatbuffers_dir [flatc_args]\n') return 1 cwd = os.getcwd() flatc = '' flatbuffers_dir = sys.argv[1] for path in FLATC_SEARCH_PATHS: current = os.path.join(flatbuffers_dir, path, 'flatc' + EXECUTABLE_EXTENSION) if os.path.exists(current): flatc = current break if not flatc: sys.stderr.write('flatc not found\n') return 1 command = [flatc] + sys.argv[2:] return subprocess.call(command)
[ "Script", "that", "finds", "and", "runs", "flatc", "built", "from", "source", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/android/jni/run_flatc.py#L25-L43
[ "def", "main", "(", ")", ":", "if", "len", "(", "sys", ".", "argv", ")", "<", "2", ":", "sys", ".", "stderr", ".", "write", "(", "'Usage: run_flatc.py flatbuffers_dir [flatc_args]\\n'", ")", "return", "1", "cwd", "=", "os", ".", "getcwd", "(", ")", "fl...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
import_numpy
Returns the numpy module if it exists on the system, otherwise returns None.
python/flatbuffers/compat.py
def import_numpy(): """ Returns the numpy module if it exists on the system, otherwise returns None. """ try: imp.find_module('numpy') numpy_exists = True except ImportError: numpy_exists = False if numpy_exists: # We do this outside of try/except block in case numpy exists # but is not installed correctly. We do not want to catch an # incorrect installation which would manifest as an # ImportError. import numpy as np else: np = None return np
def import_numpy(): """ Returns the numpy module if it exists on the system, otherwise returns None. """ try: imp.find_module('numpy') numpy_exists = True except ImportError: numpy_exists = False if numpy_exists: # We do this outside of try/except block in case numpy exists # but is not installed correctly. We do not want to catch an # incorrect installation which would manifest as an # ImportError. import numpy as np else: np = None return np
[ "Returns", "the", "numpy", "module", "if", "it", "exists", "on", "the", "system", "otherwise", "returns", "None", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/compat.py#L50-L70
[ "def", "import_numpy", "(", ")", ":", "try", ":", "imp", ".", "find_module", "(", "'numpy'", ")", "numpy_exists", "=", "True", "except", "ImportError", ":", "numpy_exists", "=", "False", "if", "numpy_exists", ":", "# We do this outside of try/except block in case nu...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
vtableEqual
vtableEqual compares an unwritten vtable to a written vtable.
python/flatbuffers/builder.py
def vtableEqual(a, objectStart, b): """vtableEqual compares an unwritten vtable to a written vtable.""" N.enforce_number(objectStart, N.UOffsetTFlags) if len(a) * N.VOffsetTFlags.bytewidth != len(b): return False for i, elem in enumerate(a): x = encode.Get(packer.voffset, b, i * N.VOffsetTFlags.bytewidth) # Skip vtable entries that indicate a default value. if x == 0 and elem == 0: pass else: y = objectStart - elem if x != y: return False return True
def vtableEqual(a, objectStart, b): """vtableEqual compares an unwritten vtable to a written vtable.""" N.enforce_number(objectStart, N.UOffsetTFlags) if len(a) * N.VOffsetTFlags.bytewidth != len(b): return False for i, elem in enumerate(a): x = encode.Get(packer.voffset, b, i * N.VOffsetTFlags.bytewidth) # Skip vtable entries that indicate a default value. if x == 0 and elem == 0: pass else: y = objectStart - elem if x != y: return False return True
[ "vtableEqual", "compares", "an", "unwritten", "vtable", "to", "a", "written", "vtable", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L735-L753
[ "def", "vtableEqual", "(", "a", ",", "objectStart", ",", "b", ")", ":", "N", ".", "enforce_number", "(", "objectStart", ",", "N", ".", "UOffsetTFlags", ")", "if", "len", "(", "a", ")", "*", "N", ".", "VOffsetTFlags", ".", "bytewidth", "!=", "len", "(...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.StartObject
StartObject initializes bookkeeping for writing a new object.
python/flatbuffers/builder.py
def StartObject(self, numfields): """StartObject initializes bookkeeping for writing a new object.""" self.assertNotNested() # use 32-bit offsets so that arithmetic doesn't overflow. self.current_vtable = [0 for _ in range_func(numfields)] self.objectEnd = self.Offset() self.nested = True
def StartObject(self, numfields): """StartObject initializes bookkeeping for writing a new object.""" self.assertNotNested() # use 32-bit offsets so that arithmetic doesn't overflow. self.current_vtable = [0 for _ in range_func(numfields)] self.objectEnd = self.Offset() self.nested = True
[ "StartObject", "initializes", "bookkeeping", "for", "writing", "a", "new", "object", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L156-L164
[ "def", "StartObject", "(", "self", ",", "numfields", ")", ":", "self", ".", "assertNotNested", "(", ")", "# use 32-bit offsets so that arithmetic doesn't overflow.", "self", ".", "current_vtable", "=", "[", "0", "for", "_", "in", "range_func", "(", "numfields", ")...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.WriteVtable
WriteVtable serializes the vtable for the current object, if needed. Before writing out the vtable, this checks pre-existing vtables for equality to this one. If an equal vtable is found, point the object to the existing vtable and return. Because vtable values are sensitive to alignment of object data, not all logically-equal vtables will be deduplicated. A vtable has the following format: <VOffsetT: size of the vtable in bytes, including this value> <VOffsetT: size of the object in bytes, including the vtable offset> <VOffsetT: offset for a field> * N, where N is the number of fields in the schema for this type. Includes deprecated fields. Thus, a vtable is made of 2 + N elements, each VOffsetT bytes wide. An object has the following format: <SOffsetT: offset to this object's vtable (may be negative)> <byte: data>+
python/flatbuffers/builder.py
def WriteVtable(self): """ WriteVtable serializes the vtable for the current object, if needed. Before writing out the vtable, this checks pre-existing vtables for equality to this one. If an equal vtable is found, point the object to the existing vtable and return. Because vtable values are sensitive to alignment of object data, not all logically-equal vtables will be deduplicated. A vtable has the following format: <VOffsetT: size of the vtable in bytes, including this value> <VOffsetT: size of the object in bytes, including the vtable offset> <VOffsetT: offset for a field> * N, where N is the number of fields in the schema for this type. Includes deprecated fields. Thus, a vtable is made of 2 + N elements, each VOffsetT bytes wide. An object has the following format: <SOffsetT: offset to this object's vtable (may be negative)> <byte: data>+ """ # Prepend a zero scalar to the object. Later in this function we'll # write an offset here that points to the object's vtable: self.PrependSOffsetTRelative(0) objectOffset = self.Offset() existingVtable = None # Trim trailing 0 offsets. while self.current_vtable and self.current_vtable[-1] == 0: self.current_vtable.pop() # Search backwards through existing vtables, because similar vtables # are likely to have been recently appended. See # BenchmarkVtableDeduplication for a case in which this heuristic # saves about 30% of the time used in writing objects with duplicate # tables. i = len(self.vtables) - 1 while i >= 0: # Find the other vtable, which is associated with `i`: vt2Offset = self.vtables[i] vt2Start = len(self.Bytes) - vt2Offset vt2Len = encode.Get(packer.voffset, self.Bytes, vt2Start) metadata = VtableMetadataFields * N.VOffsetTFlags.bytewidth vt2End = vt2Start + vt2Len vt2 = self.Bytes[vt2Start+metadata:vt2End] # Compare the other vtable to the one under consideration. # If they are equal, store the offset and break: if vtableEqual(self.current_vtable, objectOffset, vt2): existingVtable = vt2Offset break i -= 1 if existingVtable is None: # Did not find a vtable, so write this one to the buffer. # Write out the current vtable in reverse , because # serialization occurs in last-first order: i = len(self.current_vtable) - 1 while i >= 0: off = 0 if self.current_vtable[i] != 0: # Forward reference to field; # use 32bit number to ensure no overflow: off = objectOffset - self.current_vtable[i] self.PrependVOffsetT(off) i -= 1 # The two metadata fields are written last. # First, store the object bytesize: objectSize = UOffsetTFlags.py_type(objectOffset - self.objectEnd) self.PrependVOffsetT(VOffsetTFlags.py_type(objectSize)) # Second, store the vtable bytesize: vBytes = len(self.current_vtable) + VtableMetadataFields vBytes *= N.VOffsetTFlags.bytewidth self.PrependVOffsetT(VOffsetTFlags.py_type(vBytes)) # Next, write the offset to the new vtable in the # already-allocated SOffsetT at the beginning of this object: objectStart = SOffsetTFlags.py_type(len(self.Bytes) - objectOffset) encode.Write(packer.soffset, self.Bytes, objectStart, SOffsetTFlags.py_type(self.Offset() - objectOffset)) # Finally, store this vtable in memory for future # deduplication: self.vtables.append(self.Offset()) else: # Found a duplicate vtable. objectStart = SOffsetTFlags.py_type(len(self.Bytes) - objectOffset) self.head = UOffsetTFlags.py_type(objectStart) # Write the offset to the found vtable in the # already-allocated SOffsetT at the beginning of this object: encode.Write(packer.soffset, self.Bytes, self.Head(), SOffsetTFlags.py_type(existingVtable - objectOffset)) self.current_vtable = None return objectOffset
def WriteVtable(self): """ WriteVtable serializes the vtable for the current object, if needed. Before writing out the vtable, this checks pre-existing vtables for equality to this one. If an equal vtable is found, point the object to the existing vtable and return. Because vtable values are sensitive to alignment of object data, not all logically-equal vtables will be deduplicated. A vtable has the following format: <VOffsetT: size of the vtable in bytes, including this value> <VOffsetT: size of the object in bytes, including the vtable offset> <VOffsetT: offset for a field> * N, where N is the number of fields in the schema for this type. Includes deprecated fields. Thus, a vtable is made of 2 + N elements, each VOffsetT bytes wide. An object has the following format: <SOffsetT: offset to this object's vtable (may be negative)> <byte: data>+ """ # Prepend a zero scalar to the object. Later in this function we'll # write an offset here that points to the object's vtable: self.PrependSOffsetTRelative(0) objectOffset = self.Offset() existingVtable = None # Trim trailing 0 offsets. while self.current_vtable and self.current_vtable[-1] == 0: self.current_vtable.pop() # Search backwards through existing vtables, because similar vtables # are likely to have been recently appended. See # BenchmarkVtableDeduplication for a case in which this heuristic # saves about 30% of the time used in writing objects with duplicate # tables. i = len(self.vtables) - 1 while i >= 0: # Find the other vtable, which is associated with `i`: vt2Offset = self.vtables[i] vt2Start = len(self.Bytes) - vt2Offset vt2Len = encode.Get(packer.voffset, self.Bytes, vt2Start) metadata = VtableMetadataFields * N.VOffsetTFlags.bytewidth vt2End = vt2Start + vt2Len vt2 = self.Bytes[vt2Start+metadata:vt2End] # Compare the other vtable to the one under consideration. # If they are equal, store the offset and break: if vtableEqual(self.current_vtable, objectOffset, vt2): existingVtable = vt2Offset break i -= 1 if existingVtable is None: # Did not find a vtable, so write this one to the buffer. # Write out the current vtable in reverse , because # serialization occurs in last-first order: i = len(self.current_vtable) - 1 while i >= 0: off = 0 if self.current_vtable[i] != 0: # Forward reference to field; # use 32bit number to ensure no overflow: off = objectOffset - self.current_vtable[i] self.PrependVOffsetT(off) i -= 1 # The two metadata fields are written last. # First, store the object bytesize: objectSize = UOffsetTFlags.py_type(objectOffset - self.objectEnd) self.PrependVOffsetT(VOffsetTFlags.py_type(objectSize)) # Second, store the vtable bytesize: vBytes = len(self.current_vtable) + VtableMetadataFields vBytes *= N.VOffsetTFlags.bytewidth self.PrependVOffsetT(VOffsetTFlags.py_type(vBytes)) # Next, write the offset to the new vtable in the # already-allocated SOffsetT at the beginning of this object: objectStart = SOffsetTFlags.py_type(len(self.Bytes) - objectOffset) encode.Write(packer.soffset, self.Bytes, objectStart, SOffsetTFlags.py_type(self.Offset() - objectOffset)) # Finally, store this vtable in memory for future # deduplication: self.vtables.append(self.Offset()) else: # Found a duplicate vtable. objectStart = SOffsetTFlags.py_type(len(self.Bytes) - objectOffset) self.head = UOffsetTFlags.py_type(objectStart) # Write the offset to the found vtable in the # already-allocated SOffsetT at the beginning of this object: encode.Write(packer.soffset, self.Bytes, self.Head(), SOffsetTFlags.py_type(existingVtable - objectOffset)) self.current_vtable = None return objectOffset
[ "WriteVtable", "serializes", "the", "vtable", "for", "the", "current", "object", "if", "needed", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L166-L273
[ "def", "WriteVtable", "(", "self", ")", ":", "# Prepend a zero scalar to the object. Later in this function we'll", "# write an offset here that points to the object's vtable:", "self", ".", "PrependSOffsetTRelative", "(", "0", ")", "objectOffset", "=", "self", ".", "Offset", "...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.growByteBuffer
Doubles the size of the byteslice, and copies the old data towards the end of the new buffer (since we build the buffer backwards).
python/flatbuffers/builder.py
def growByteBuffer(self): """Doubles the size of the byteslice, and copies the old data towards the end of the new buffer (since we build the buffer backwards).""" if len(self.Bytes) == Builder.MAX_BUFFER_SIZE: msg = "flatbuffers: cannot grow buffer beyond 2 gigabytes" raise BuilderSizeError(msg) newSize = min(len(self.Bytes) * 2, Builder.MAX_BUFFER_SIZE) if newSize == 0: newSize = 1 bytes2 = bytearray(newSize) bytes2[newSize-len(self.Bytes):] = self.Bytes self.Bytes = bytes2
def growByteBuffer(self): """Doubles the size of the byteslice, and copies the old data towards the end of the new buffer (since we build the buffer backwards).""" if len(self.Bytes) == Builder.MAX_BUFFER_SIZE: msg = "flatbuffers: cannot grow buffer beyond 2 gigabytes" raise BuilderSizeError(msg) newSize = min(len(self.Bytes) * 2, Builder.MAX_BUFFER_SIZE) if newSize == 0: newSize = 1 bytes2 = bytearray(newSize) bytes2[newSize-len(self.Bytes):] = self.Bytes self.Bytes = bytes2
[ "Doubles", "the", "size", "of", "the", "byteslice", "and", "copies", "the", "old", "data", "towards", "the", "end", "of", "the", "new", "buffer", "(", "since", "we", "build", "the", "buffer", "backwards", ")", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L281-L293
[ "def", "growByteBuffer", "(", "self", ")", ":", "if", "len", "(", "self", ".", "Bytes", ")", "==", "Builder", ".", "MAX_BUFFER_SIZE", ":", "msg", "=", "\"flatbuffers: cannot grow buffer beyond 2 gigabytes\"", "raise", "BuilderSizeError", "(", "msg", ")", "newSize"...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.Pad
Pad places zeros at the current offset.
python/flatbuffers/builder.py
def Pad(self, n): """Pad places zeros at the current offset.""" for i in range_func(n): self.Place(0, N.Uint8Flags)
def Pad(self, n): """Pad places zeros at the current offset.""" for i in range_func(n): self.Place(0, N.Uint8Flags)
[ "Pad", "places", "zeros", "at", "the", "current", "offset", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L311-L314
[ "def", "Pad", "(", "self", ",", "n", ")", ":", "for", "i", "in", "range_func", "(", "n", ")", ":", "self", ".", "Place", "(", "0", ",", "N", ".", "Uint8Flags", ")" ]
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.Prep
Prep prepares to write an element of `size` after `additional_bytes` have been written, e.g. if you write a string, you need to align such the int length field is aligned to SizeInt32, and the string data follows it directly. If all you need to do is align, `additionalBytes` will be 0.
python/flatbuffers/builder.py
def Prep(self, size, additionalBytes): """ Prep prepares to write an element of `size` after `additional_bytes` have been written, e.g. if you write a string, you need to align such the int length field is aligned to SizeInt32, and the string data follows it directly. If all you need to do is align, `additionalBytes` will be 0. """ # Track the biggest thing we've ever aligned to. if size > self.minalign: self.minalign = size # Find the amount of alignment needed such that `size` is properly # aligned after `additionalBytes`: alignSize = (~(len(self.Bytes) - self.Head() + additionalBytes)) + 1 alignSize &= (size - 1) # Reallocate the buffer if needed: while self.Head() < alignSize+size+additionalBytes: oldBufSize = len(self.Bytes) self.growByteBuffer() updated_head = self.head + len(self.Bytes) - oldBufSize self.head = UOffsetTFlags.py_type(updated_head) self.Pad(alignSize)
def Prep(self, size, additionalBytes): """ Prep prepares to write an element of `size` after `additional_bytes` have been written, e.g. if you write a string, you need to align such the int length field is aligned to SizeInt32, and the string data follows it directly. If all you need to do is align, `additionalBytes` will be 0. """ # Track the biggest thing we've ever aligned to. if size > self.minalign: self.minalign = size # Find the amount of alignment needed such that `size` is properly # aligned after `additionalBytes`: alignSize = (~(len(self.Bytes) - self.Head() + additionalBytes)) + 1 alignSize &= (size - 1) # Reallocate the buffer if needed: while self.Head() < alignSize+size+additionalBytes: oldBufSize = len(self.Bytes) self.growByteBuffer() updated_head = self.head + len(self.Bytes) - oldBufSize self.head = UOffsetTFlags.py_type(updated_head) self.Pad(alignSize)
[ "Prep", "prepares", "to", "write", "an", "element", "of", "size", "after", "additional_bytes", "have", "been", "written", "e", ".", "g", ".", "if", "you", "write", "a", "string", "you", "need", "to", "align", "such", "the", "int", "length", "field", "is"...
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L316-L340
[ "def", "Prep", "(", "self", ",", "size", ",", "additionalBytes", ")", ":", "# Track the biggest thing we've ever aligned to.", "if", "size", ">", "self", ".", "minalign", ":", "self", ".", "minalign", "=", "size", "# Find the amount of alignment needed such that `size` ...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.PrependSOffsetTRelative
PrependSOffsetTRelative prepends an SOffsetT, relative to where it will be written.
python/flatbuffers/builder.py
def PrependSOffsetTRelative(self, off): """ PrependSOffsetTRelative prepends an SOffsetT, relative to where it will be written. """ # Ensure alignment is already done: self.Prep(N.SOffsetTFlags.bytewidth, 0) if not (off <= self.Offset()): msg = "flatbuffers: Offset arithmetic error." raise OffsetArithmeticError(msg) off2 = self.Offset() - off + N.SOffsetTFlags.bytewidth self.PlaceSOffsetT(off2)
def PrependSOffsetTRelative(self, off): """ PrependSOffsetTRelative prepends an SOffsetT, relative to where it will be written. """ # Ensure alignment is already done: self.Prep(N.SOffsetTFlags.bytewidth, 0) if not (off <= self.Offset()): msg = "flatbuffers: Offset arithmetic error." raise OffsetArithmeticError(msg) off2 = self.Offset() - off + N.SOffsetTFlags.bytewidth self.PlaceSOffsetT(off2)
[ "PrependSOffsetTRelative", "prepends", "an", "SOffsetT", "relative", "to", "where", "it", "will", "be", "written", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L342-L354
[ "def", "PrependSOffsetTRelative", "(", "self", ",", "off", ")", ":", "# Ensure alignment is already done:", "self", ".", "Prep", "(", "N", ".", "SOffsetTFlags", ".", "bytewidth", ",", "0", ")", "if", "not", "(", "off", "<=", "self", ".", "Offset", "(", ")"...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.PrependUOffsetTRelative
Prepends an unsigned offset into vector data, relative to where it will be written.
python/flatbuffers/builder.py
def PrependUOffsetTRelative(self, off): """Prepends an unsigned offset into vector data, relative to where it will be written. """ # Ensure alignment is already done: self.Prep(N.UOffsetTFlags.bytewidth, 0) if not (off <= self.Offset()): msg = "flatbuffers: Offset arithmetic error." raise OffsetArithmeticError(msg) off2 = self.Offset() - off + N.UOffsetTFlags.bytewidth self.PlaceUOffsetT(off2)
def PrependUOffsetTRelative(self, off): """Prepends an unsigned offset into vector data, relative to where it will be written. """ # Ensure alignment is already done: self.Prep(N.UOffsetTFlags.bytewidth, 0) if not (off <= self.Offset()): msg = "flatbuffers: Offset arithmetic error." raise OffsetArithmeticError(msg) off2 = self.Offset() - off + N.UOffsetTFlags.bytewidth self.PlaceUOffsetT(off2)
[ "Prepends", "an", "unsigned", "offset", "into", "vector", "data", "relative", "to", "where", "it", "will", "be", "written", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L357-L368
[ "def", "PrependUOffsetTRelative", "(", "self", ",", "off", ")", ":", "# Ensure alignment is already done:", "self", ".", "Prep", "(", "N", ".", "UOffsetTFlags", ".", "bytewidth", ",", "0", ")", "if", "not", "(", "off", "<=", "self", ".", "Offset", "(", ")"...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.StartVector
StartVector initializes bookkeeping for writing a new vector. A vector has the following format: - <UOffsetT: number of elements in this vector> - <T: data>+, where T is the type of elements of this vector.
python/flatbuffers/builder.py
def StartVector(self, elemSize, numElems, alignment): """ StartVector initializes bookkeeping for writing a new vector. A vector has the following format: - <UOffsetT: number of elements in this vector> - <T: data>+, where T is the type of elements of this vector. """ self.assertNotNested() self.nested = True self.Prep(N.Uint32Flags.bytewidth, elemSize*numElems) self.Prep(alignment, elemSize*numElems) # In case alignment > int. return self.Offset()
def StartVector(self, elemSize, numElems, alignment): """ StartVector initializes bookkeeping for writing a new vector. A vector has the following format: - <UOffsetT: number of elements in this vector> - <T: data>+, where T is the type of elements of this vector. """ self.assertNotNested() self.nested = True self.Prep(N.Uint32Flags.bytewidth, elemSize*numElems) self.Prep(alignment, elemSize*numElems) # In case alignment > int. return self.Offset()
[ "StartVector", "initializes", "bookkeeping", "for", "writing", "a", "new", "vector", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L371-L384
[ "def", "StartVector", "(", "self", ",", "elemSize", ",", "numElems", ",", "alignment", ")", ":", "self", ".", "assertNotNested", "(", ")", "self", ".", "nested", "=", "True", "self", ".", "Prep", "(", "N", ".", "Uint32Flags", ".", "bytewidth", ",", "el...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.EndVector
EndVector writes data necessary to finish vector construction.
python/flatbuffers/builder.py
def EndVector(self, vectorNumElems): """EndVector writes data necessary to finish vector construction.""" self.assertNested() ## @cond FLATBUFFERS_INTERNAL self.nested = False ## @endcond # we already made space for this, so write without PrependUint32 self.PlaceUOffsetT(vectorNumElems) return self.Offset()
def EndVector(self, vectorNumElems): """EndVector writes data necessary to finish vector construction.""" self.assertNested() ## @cond FLATBUFFERS_INTERNAL self.nested = False ## @endcond # we already made space for this, so write without PrependUint32 self.PlaceUOffsetT(vectorNumElems) return self.Offset()
[ "EndVector", "writes", "data", "necessary", "to", "finish", "vector", "construction", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L387-L396
[ "def", "EndVector", "(", "self", ",", "vectorNumElems", ")", ":", "self", ".", "assertNested", "(", ")", "## @cond FLATBUFFERS_INTERNAL", "self", ".", "nested", "=", "False", "## @endcond", "# we already made space for this, so write without PrependUint32", "self", ".", ...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.CreateString
CreateString writes a null-terminated byte string as a vector.
python/flatbuffers/builder.py
def CreateString(self, s, encoding='utf-8', errors='strict'): """CreateString writes a null-terminated byte string as a vector.""" self.assertNotNested() ## @cond FLATBUFFERS_INTERNAL self.nested = True ## @endcond if isinstance(s, compat.string_types): x = s.encode(encoding, errors) elif isinstance(s, compat.binary_types): x = s else: raise TypeError("non-string passed to CreateString") self.Prep(N.UOffsetTFlags.bytewidth, (len(x)+1)*N.Uint8Flags.bytewidth) self.Place(0, N.Uint8Flags) l = UOffsetTFlags.py_type(len(s)) ## @cond FLATBUFFERS_INTERNAL self.head = UOffsetTFlags.py_type(self.Head() - l) ## @endcond self.Bytes[self.Head():self.Head()+l] = x return self.EndVector(len(x))
def CreateString(self, s, encoding='utf-8', errors='strict'): """CreateString writes a null-terminated byte string as a vector.""" self.assertNotNested() ## @cond FLATBUFFERS_INTERNAL self.nested = True ## @endcond if isinstance(s, compat.string_types): x = s.encode(encoding, errors) elif isinstance(s, compat.binary_types): x = s else: raise TypeError("non-string passed to CreateString") self.Prep(N.UOffsetTFlags.bytewidth, (len(x)+1)*N.Uint8Flags.bytewidth) self.Place(0, N.Uint8Flags) l = UOffsetTFlags.py_type(len(s)) ## @cond FLATBUFFERS_INTERNAL self.head = UOffsetTFlags.py_type(self.Head() - l) ## @endcond self.Bytes[self.Head():self.Head()+l] = x return self.EndVector(len(x))
[ "CreateString", "writes", "a", "null", "-", "terminated", "byte", "string", "as", "a", "vector", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L398-L422
[ "def", "CreateString", "(", "self", ",", "s", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'strict'", ")", ":", "self", ".", "assertNotNested", "(", ")", "## @cond FLATBUFFERS_INTERNAL", "self", ".", "nested", "=", "True", "## @endcond", "if", "isin...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.CreateByteVector
CreateString writes a byte vector.
python/flatbuffers/builder.py
def CreateByteVector(self, x): """CreateString writes a byte vector.""" self.assertNotNested() ## @cond FLATBUFFERS_INTERNAL self.nested = True ## @endcond if not isinstance(x, compat.binary_types): raise TypeError("non-byte vector passed to CreateByteVector") self.Prep(N.UOffsetTFlags.bytewidth, len(x)*N.Uint8Flags.bytewidth) l = UOffsetTFlags.py_type(len(x)) ## @cond FLATBUFFERS_INTERNAL self.head = UOffsetTFlags.py_type(self.Head() - l) ## @endcond self.Bytes[self.Head():self.Head()+l] = x return self.EndVector(len(x))
def CreateByteVector(self, x): """CreateString writes a byte vector.""" self.assertNotNested() ## @cond FLATBUFFERS_INTERNAL self.nested = True ## @endcond if not isinstance(x, compat.binary_types): raise TypeError("non-byte vector passed to CreateByteVector") self.Prep(N.UOffsetTFlags.bytewidth, len(x)*N.Uint8Flags.bytewidth) l = UOffsetTFlags.py_type(len(x)) ## @cond FLATBUFFERS_INTERNAL self.head = UOffsetTFlags.py_type(self.Head() - l) ## @endcond self.Bytes[self.Head():self.Head()+l] = x return self.EndVector(len(x))
[ "CreateString", "writes", "a", "byte", "vector", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L424-L443
[ "def", "CreateByteVector", "(", "self", ",", "x", ")", ":", "self", ".", "assertNotNested", "(", ")", "## @cond FLATBUFFERS_INTERNAL", "self", ".", "nested", "=", "True", "## @endcond", "if", "not", "isinstance", "(", "x", ",", "compat", ".", "binary_types", ...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.CreateNumpyVector
CreateNumpyVector writes a numpy array into the buffer.
python/flatbuffers/builder.py
def CreateNumpyVector(self, x): """CreateNumpyVector writes a numpy array into the buffer.""" if np is None: # Numpy is required for this feature raise NumpyRequiredForThisFeature("Numpy was not found.") if not isinstance(x, np.ndarray): raise TypeError("non-numpy-ndarray passed to CreateNumpyVector") if x.dtype.kind not in ['b', 'i', 'u', 'f']: raise TypeError("numpy-ndarray holds elements of unsupported datatype") if x.ndim > 1: raise TypeError("multidimensional-ndarray passed to CreateNumpyVector") self.StartVector(x.itemsize, x.size, x.dtype.alignment) # Ensure little endian byte ordering if x.dtype.str[0] == "<": x_lend = x else: x_lend = x.byteswap(inplace=False) # Calculate total length l = UOffsetTFlags.py_type(x_lend.itemsize * x_lend.size) ## @cond FLATBUFFERS_INTERNAL self.head = UOffsetTFlags.py_type(self.Head() - l) ## @endcond # tobytes ensures c_contiguous ordering self.Bytes[self.Head():self.Head()+l] = x_lend.tobytes(order='C') return self.EndVector(x.size)
def CreateNumpyVector(self, x): """CreateNumpyVector writes a numpy array into the buffer.""" if np is None: # Numpy is required for this feature raise NumpyRequiredForThisFeature("Numpy was not found.") if not isinstance(x, np.ndarray): raise TypeError("non-numpy-ndarray passed to CreateNumpyVector") if x.dtype.kind not in ['b', 'i', 'u', 'f']: raise TypeError("numpy-ndarray holds elements of unsupported datatype") if x.ndim > 1: raise TypeError("multidimensional-ndarray passed to CreateNumpyVector") self.StartVector(x.itemsize, x.size, x.dtype.alignment) # Ensure little endian byte ordering if x.dtype.str[0] == "<": x_lend = x else: x_lend = x.byteswap(inplace=False) # Calculate total length l = UOffsetTFlags.py_type(x_lend.itemsize * x_lend.size) ## @cond FLATBUFFERS_INTERNAL self.head = UOffsetTFlags.py_type(self.Head() - l) ## @endcond # tobytes ensures c_contiguous ordering self.Bytes[self.Head():self.Head()+l] = x_lend.tobytes(order='C') return self.EndVector(x.size)
[ "CreateNumpyVector", "writes", "a", "numpy", "array", "into", "the", "buffer", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L445-L478
[ "def", "CreateNumpyVector", "(", "self", ",", "x", ")", ":", "if", "np", "is", "None", ":", "# Numpy is required for this feature", "raise", "NumpyRequiredForThisFeature", "(", "\"Numpy was not found.\"", ")", "if", "not", "isinstance", "(", "x", ",", "np", ".", ...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.assertStructIsInline
Structs are always stored inline, so need to be created right where they are used. You'll get this error if you created it elsewhere.
python/flatbuffers/builder.py
def assertStructIsInline(self, obj): """ Structs are always stored inline, so need to be created right where they are used. You'll get this error if you created it elsewhere. """ N.enforce_number(obj, N.UOffsetTFlags) if obj != self.Offset(): msg = ("flatbuffers: Tried to write a Struct at an Offset that " "is different from the current Offset of the Builder.") raise StructIsNotInlineError(msg)
def assertStructIsInline(self, obj): """ Structs are always stored inline, so need to be created right where they are used. You'll get this error if you created it elsewhere. """ N.enforce_number(obj, N.UOffsetTFlags) if obj != self.Offset(): msg = ("flatbuffers: Tried to write a Struct at an Offset that " "is different from the current Offset of the Builder.") raise StructIsNotInlineError(msg)
[ "Structs", "are", "always", "stored", "inline", "so", "need", "to", "be", "created", "right", "where", "they", "are", "used", ".", "You", "ll", "get", "this", "error", "if", "you", "created", "it", "elsewhere", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L498-L509
[ "def", "assertStructIsInline", "(", "self", ",", "obj", ")", ":", "N", ".", "enforce_number", "(", "obj", ",", "N", ".", "UOffsetTFlags", ")", "if", "obj", "!=", "self", ".", "Offset", "(", ")", ":", "msg", "=", "(", "\"flatbuffers: Tried to write a Struct...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.Slot
Slot sets the vtable key `voffset` to the current location in the buffer.
python/flatbuffers/builder.py
def Slot(self, slotnum): """ Slot sets the vtable key `voffset` to the current location in the buffer. """ self.assertNested() self.current_vtable[slotnum] = self.Offset()
def Slot(self, slotnum): """ Slot sets the vtable key `voffset` to the current location in the buffer. """ self.assertNested() self.current_vtable[slotnum] = self.Offset()
[ "Slot", "sets", "the", "vtable", "key", "voffset", "to", "the", "current", "location", "in", "the", "buffer", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L511-L518
[ "def", "Slot", "(", "self", ",", "slotnum", ")", ":", "self", ".", "assertNested", "(", ")", "self", ".", "current_vtable", "[", "slotnum", "]", "=", "self", ".", "Offset", "(", ")" ]
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.__Finish
Finish finalizes a buffer, pointing to the given `rootTable`.
python/flatbuffers/builder.py
def __Finish(self, rootTable, sizePrefix): """Finish finalizes a buffer, pointing to the given `rootTable`.""" N.enforce_number(rootTable, N.UOffsetTFlags) prepSize = N.UOffsetTFlags.bytewidth if sizePrefix: prepSize += N.Int32Flags.bytewidth self.Prep(self.minalign, prepSize) self.PrependUOffsetTRelative(rootTable) if sizePrefix: size = len(self.Bytes) - self.Head() N.enforce_number(size, N.Int32Flags) self.PrependInt32(size) self.finished = True return self.Head()
def __Finish(self, rootTable, sizePrefix): """Finish finalizes a buffer, pointing to the given `rootTable`.""" N.enforce_number(rootTable, N.UOffsetTFlags) prepSize = N.UOffsetTFlags.bytewidth if sizePrefix: prepSize += N.Int32Flags.bytewidth self.Prep(self.minalign, prepSize) self.PrependUOffsetTRelative(rootTable) if sizePrefix: size = len(self.Bytes) - self.Head() N.enforce_number(size, N.Int32Flags) self.PrependInt32(size) self.finished = True return self.Head()
[ "Finish", "finalizes", "a", "buffer", "pointing", "to", "the", "given", "rootTable", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L521-L534
[ "def", "__Finish", "(", "self", ",", "rootTable", ",", "sizePrefix", ")", ":", "N", ".", "enforce_number", "(", "rootTable", ",", "N", ".", "UOffsetTFlags", ")", "prepSize", "=", "N", ".", "UOffsetTFlags", ".", "bytewidth", "if", "sizePrefix", ":", "prepSi...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.PrependUOffsetTRelativeSlot
PrependUOffsetTRelativeSlot prepends an UOffsetT onto the object at vtable slot `o`. If value `x` equals default `d`, then the slot will be set to zero and no other data will be written.
python/flatbuffers/builder.py
def PrependUOffsetTRelativeSlot(self, o, x, d): """ PrependUOffsetTRelativeSlot prepends an UOffsetT onto the object at vtable slot `o`. If value `x` equals default `d`, then the slot will be set to zero and no other data will be written. """ if x != d: self.PrependUOffsetTRelative(x) self.Slot(o)
def PrependUOffsetTRelativeSlot(self, o, x, d): """ PrependUOffsetTRelativeSlot prepends an UOffsetT onto the object at vtable slot `o`. If value `x` equals default `d`, then the slot will be set to zero and no other data will be written. """ if x != d: self.PrependUOffsetTRelative(x) self.Slot(o)
[ "PrependUOffsetTRelativeSlot", "prepends", "an", "UOffsetT", "onto", "the", "object", "at", "vtable", "slot", "o", ".", "If", "value", "x", "equals", "default", "d", "then", "the", "slot", "will", "be", "set", "to", "zero", "and", "no", "other", "data", "w...
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L585-L594
[ "def", "PrependUOffsetTRelativeSlot", "(", "self", ",", "o", ",", "x", ",", "d", ")", ":", "if", "x", "!=", "d", ":", "self", ".", "PrependUOffsetTRelative", "(", "x", ")", "self", ".", "Slot", "(", "o", ")" ]
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.PrependStructSlot
PrependStructSlot prepends a struct onto the object at vtable slot `o`. Structs are stored inline, so nothing additional is being added. In generated code, `d` is always 0.
python/flatbuffers/builder.py
def PrependStructSlot(self, v, x, d): """ PrependStructSlot prepends a struct onto the object at vtable slot `o`. Structs are stored inline, so nothing additional is being added. In generated code, `d` is always 0. """ N.enforce_number(d, N.UOffsetTFlags) if x != d: self.assertStructIsInline(x) self.Slot(v)
def PrependStructSlot(self, v, x, d): """ PrependStructSlot prepends a struct onto the object at vtable slot `o`. Structs are stored inline, so nothing additional is being added. In generated code, `d` is always 0. """ N.enforce_number(d, N.UOffsetTFlags) if x != d: self.assertStructIsInline(x) self.Slot(v)
[ "PrependStructSlot", "prepends", "a", "struct", "onto", "the", "object", "at", "vtable", "slot", "o", ".", "Structs", "are", "stored", "inline", "so", "nothing", "additional", "is", "being", "added", ".", "In", "generated", "code", "d", "is", "always", "0", ...
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L596-L606
[ "def", "PrependStructSlot", "(", "self", ",", "v", ",", "x", ",", "d", ")", ":", "N", ".", "enforce_number", "(", "d", ",", "N", ".", "UOffsetTFlags", ")", "if", "x", "!=", "d", ":", "self", ".", "assertStructIsInline", "(", "x", ")", "self", ".", ...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.Place
Place prepends a value specified by `flags` to the Builder, without checking for available space.
python/flatbuffers/builder.py
def Place(self, x, flags): """ Place prepends a value specified by `flags` to the Builder, without checking for available space. """ N.enforce_number(x, flags) self.head = self.head - flags.bytewidth encode.Write(flags.packer_type, self.Bytes, self.Head(), x)
def Place(self, x, flags): """ Place prepends a value specified by `flags` to the Builder, without checking for available space. """ N.enforce_number(x, flags) self.head = self.head - flags.bytewidth encode.Write(flags.packer_type, self.Bytes, self.Head(), x)
[ "Place", "prepends", "a", "value", "specified", "by", "flags", "to", "the", "Builder", "without", "checking", "for", "available", "space", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L699-L707
[ "def", "Place", "(", "self", ",", "x", ",", "flags", ")", ":", "N", ".", "enforce_number", "(", "x", ",", "flags", ")", "self", ".", "head", "=", "self", ".", "head", "-", "flags", ".", "bytewidth", "encode", ".", "Write", "(", "flags", ".", "pac...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.PlaceVOffsetT
PlaceVOffsetT prepends a VOffsetT to the Builder, without checking for space.
python/flatbuffers/builder.py
def PlaceVOffsetT(self, x): """PlaceVOffsetT prepends a VOffsetT to the Builder, without checking for space. """ N.enforce_number(x, N.VOffsetTFlags) self.head = self.head - N.VOffsetTFlags.bytewidth encode.Write(packer.voffset, self.Bytes, self.Head(), x)
def PlaceVOffsetT(self, x): """PlaceVOffsetT prepends a VOffsetT to the Builder, without checking for space. """ N.enforce_number(x, N.VOffsetTFlags) self.head = self.head - N.VOffsetTFlags.bytewidth encode.Write(packer.voffset, self.Bytes, self.Head(), x)
[ "PlaceVOffsetT", "prepends", "a", "VOffsetT", "to", "the", "Builder", "without", "checking", "for", "space", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L709-L715
[ "def", "PlaceVOffsetT", "(", "self", ",", "x", ")", ":", "N", ".", "enforce_number", "(", "x", ",", "N", ".", "VOffsetTFlags", ")", "self", ".", "head", "=", "self", ".", "head", "-", "N", ".", "VOffsetTFlags", ".", "bytewidth", "encode", ".", "Write...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.PlaceSOffsetT
PlaceSOffsetT prepends a SOffsetT to the Builder, without checking for space.
python/flatbuffers/builder.py
def PlaceSOffsetT(self, x): """PlaceSOffsetT prepends a SOffsetT to the Builder, without checking for space. """ N.enforce_number(x, N.SOffsetTFlags) self.head = self.head - N.SOffsetTFlags.bytewidth encode.Write(packer.soffset, self.Bytes, self.Head(), x)
def PlaceSOffsetT(self, x): """PlaceSOffsetT prepends a SOffsetT to the Builder, without checking for space. """ N.enforce_number(x, N.SOffsetTFlags) self.head = self.head - N.SOffsetTFlags.bytewidth encode.Write(packer.soffset, self.Bytes, self.Head(), x)
[ "PlaceSOffsetT", "prepends", "a", "SOffsetT", "to", "the", "Builder", "without", "checking", "for", "space", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L717-L723
[ "def", "PlaceSOffsetT", "(", "self", ",", "x", ")", ":", "N", ".", "enforce_number", "(", "x", ",", "N", ".", "SOffsetTFlags", ")", "self", ".", "head", "=", "self", ".", "head", "-", "N", ".", "SOffsetTFlags", ".", "bytewidth", "encode", ".", "Write...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
Builder.PlaceUOffsetT
PlaceUOffsetT prepends a UOffsetT to the Builder, without checking for space.
python/flatbuffers/builder.py
def PlaceUOffsetT(self, x): """PlaceUOffsetT prepends a UOffsetT to the Builder, without checking for space. """ N.enforce_number(x, N.UOffsetTFlags) self.head = self.head - N.UOffsetTFlags.bytewidth encode.Write(packer.uoffset, self.Bytes, self.Head(), x)
def PlaceUOffsetT(self, x): """PlaceUOffsetT prepends a UOffsetT to the Builder, without checking for space. """ N.enforce_number(x, N.UOffsetTFlags) self.head = self.head - N.UOffsetTFlags.bytewidth encode.Write(packer.uoffset, self.Bytes, self.Head(), x)
[ "PlaceUOffsetT", "prepends", "a", "UOffsetT", "to", "the", "Builder", "without", "checking", "for", "space", "." ]
google/flatbuffers
python
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/builder.py#L725-L731
[ "def", "PlaceUOffsetT", "(", "self", ",", "x", ")", ":", "N", ".", "enforce_number", "(", "x", ",", "N", ".", "UOffsetTFlags", ")", "self", ".", "head", "=", "self", ".", "head", "-", "N", ".", "UOffsetTFlags", ".", "bytewidth", "encode", ".", "Write...
6cc30b3272d79c85db7d4871ac0aa69541dc89de
train
site_data_dir
r"""Return full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of data dirs should be returned. By default, the first item from XDG_DATA_DIRS is returned, or '/usr/local/share/<AppName>', if XDG_DATA_DIRS is not set Typical site data directories are: Mac OS X: /Library/Application Support/<AppName> Unix: /usr/local/share/<AppName> or /usr/share/<AppName> Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName> Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7. For Unix, this is using the $XDG_DATA_DIRS[0] default. WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
pipenv/vendor/appdirs.py
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): r"""Return full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of data dirs should be returned. By default, the first item from XDG_DATA_DIRS is returned, or '/usr/local/share/<AppName>', if XDG_DATA_DIRS is not set Typical site data directories are: Mac OS X: /Library/Application Support/<AppName> Unix: /usr/local/share/<AppName> or /usr/share/<AppName> Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName> Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7. For Unix, this is using the $XDG_DATA_DIRS[0] default. WARNING: Do not use this on Windows. See the Vista-Fail note above for why. """ if system == "win32": if appauthor is None: appauthor = appname path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) elif system == 'darwin': path = os.path.expanduser('/Library/Application Support') if appname: path = os.path.join(path, appname) else: # XDG default for $XDG_DATA_DIRS # only first, if multipath is False path = os.getenv('XDG_DATA_DIRS', os.pathsep.join(['/usr/local/share', '/usr/share'])) pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] if appname: if version: appname = os.path.join(appname, version) pathlist = [os.sep.join([x, appname]) for x in pathlist] if multipath: path = os.pathsep.join(pathlist) else: path = pathlist[0] return path if appname and version: path = os.path.join(path, version) return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): r"""Return full path to the user-shared data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "multipath" is an optional parameter only applicable to *nix which indicates that the entire list of data dirs should be returned. By default, the first item from XDG_DATA_DIRS is returned, or '/usr/local/share/<AppName>', if XDG_DATA_DIRS is not set Typical site data directories are: Mac OS X: /Library/Application Support/<AppName> Unix: /usr/local/share/<AppName> or /usr/share/<AppName> Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName> Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7. For Unix, this is using the $XDG_DATA_DIRS[0] default. WARNING: Do not use this on Windows. See the Vista-Fail note above for why. """ if system == "win32": if appauthor is None: appauthor = appname path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) if appname: if appauthor is not False: path = os.path.join(path, appauthor, appname) else: path = os.path.join(path, appname) elif system == 'darwin': path = os.path.expanduser('/Library/Application Support') if appname: path = os.path.join(path, appname) else: # XDG default for $XDG_DATA_DIRS # only first, if multipath is False path = os.getenv('XDG_DATA_DIRS', os.pathsep.join(['/usr/local/share', '/usr/share'])) pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] if appname: if version: appname = os.path.join(appname, version) pathlist = [os.sep.join([x, appname]) for x in pathlist] if multipath: path = os.pathsep.join(pathlist) else: path = pathlist[0] return path if appname and version: path = os.path.join(path, version) return path
[ "r", "Return", "full", "path", "to", "the", "user", "-", "shared", "data", "dir", "for", "this", "application", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/appdirs.py#L100-L163
[ "def", "site_data_dir", "(", "appname", "=", "None", ",", "appauthor", "=", "None", ",", "version", "=", "None", ",", "multipath", "=", "False", ")", ":", "if", "system", "==", "\"win32\"", ":", "if", "appauthor", "is", "None", ":", "appauthor", "=", "...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
user_config_dir
r"""Return full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user config directories are: Mac OS X: same as user_data_dir Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by default "~/.config/<AppName>".
pipenv/vendor/appdirs.py
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): r"""Return full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user config directories are: Mac OS X: same as user_data_dir Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by default "~/.config/<AppName>". """ if system in ["win32", "darwin"]: path = user_data_dir(appname, appauthor, None, roaming) else: path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): r"""Return full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "appauthor" (only used on Windows) is the name of the appauthor or distributing body for this application. Typically it is the owning company name. This falls back to appname. You may pass False to disable it. "version" is an optional version path element to append to the path. You might want to use this if you want multiple versions of your app to be able to run independently. If used, this would typically be "<major>.<minor>". Only applied when appname is present. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user config directories are: Mac OS X: same as user_data_dir Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by default "~/.config/<AppName>". """ if system in ["win32", "darwin"]: path = user_data_dir(appname, appauthor, None, roaming) else: path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path
[ "r", "Return", "full", "path", "to", "the", "user", "-", "specific", "config", "dir", "for", "this", "application", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/appdirs.py#L166-L203
[ "def", "user_config_dir", "(", "appname", "=", "None", ",", "appauthor", "=", "None", ",", "version", "=", "None", ",", "roaming", "=", "False", ")", ":", "if", "system", "in", "[", "\"win32\"", ",", "\"darwin\"", "]", ":", "path", "=", "user_data_dir", ...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
request
Constructs and sends a :class:`Request <Request>`. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the body of the :class:`Request`. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response <Response>` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'https://httpbin.org/get') <Response [200]>
pipenv/vendor/requests/api.py
def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the body of the :class:`Request`. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response <Response>` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'https://httpbin.org/get') <Response [200]> """ # By using the 'with' statement we are sure the session is closed, thus we # avoid leaving sockets open which can trigger a ResourceWarning in some # cases, and look like a memory leak in others. with sessions.Session() as session: return session.request(method=method, url=url, **kwargs)
def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the body of the :class:`Request`. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response <Response>` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'https://httpbin.org/get') <Response [200]> """ # By using the 'with' statement we are sure the session is closed, thus we # avoid leaving sockets open which can trigger a ResourceWarning in some # cases, and look like a memory leak in others. with sessions.Session() as session: return session.request(method=method, url=url, **kwargs)
[ "Constructs", "and", "sends", "a", ":", "class", ":", "Request", "<Request", ">", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requests/api.py#L16-L60
[ "def", "request", "(", "method", ",", "url", ",", "*", "*", "kwargs", ")", ":", "# By using the 'with' statement we are sure the session is closed, thus we", "# avoid leaving sockets open which can trigger a ResourceWarning in some", "# cases, and look like a memory leak in others.", "...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
get
r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response
pipenv/vendor/requests/api.py
def get(url, params=None, **kwargs): r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('get', url, params=params, **kwargs)
def get(url, params=None, **kwargs): r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('get', url, params=params, **kwargs)
[ "r", "Sends", "a", "GET", "request", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requests/api.py#L63-L75
[ "def", "get", "(", "url", ",", "params", "=", "None", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'allow_redirects'", ",", "True", ")", "return", "request", "(", "'get'", ",", "url", ",", "params", "=", "params", ",", "*", ...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
dump
Writes out dict as toml to a file Args: o: Object to dump into toml f: File descriptor where the toml should be stored Returns: String containing the toml corresponding to dictionary Raises: TypeError: When anything other than file descriptor is passed
pipenv/vendor/toml/encoder.py
def dump(o, f): """Writes out dict as toml to a file Args: o: Object to dump into toml f: File descriptor where the toml should be stored Returns: String containing the toml corresponding to dictionary Raises: TypeError: When anything other than file descriptor is passed """ if not f.write: raise TypeError("You can only dump an object to a file descriptor") d = dumps(o) f.write(d) return d
def dump(o, f): """Writes out dict as toml to a file Args: o: Object to dump into toml f: File descriptor where the toml should be stored Returns: String containing the toml corresponding to dictionary Raises: TypeError: When anything other than file descriptor is passed """ if not f.write: raise TypeError("You can only dump an object to a file descriptor") d = dumps(o) f.write(d) return d
[ "Writes", "out", "dict", "as", "toml", "to", "a", "file" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/toml/encoder.py#L11-L29
[ "def", "dump", "(", "o", ",", "f", ")", ":", "if", "not", "f", ".", "write", ":", "raise", "TypeError", "(", "\"You can only dump an object to a file descriptor\"", ")", "d", "=", "dumps", "(", "o", ")", "f", ".", "write", "(", "d", ")", "return", "d" ...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
dumps
Stringifies input dict as toml Args: o: Object to dump into toml preserve: Boolean parameter. If true, preserve inline tables. Returns: String containing the toml corresponding to dict
pipenv/vendor/toml/encoder.py
def dumps(o, encoder=None): """Stringifies input dict as toml Args: o: Object to dump into toml preserve: Boolean parameter. If true, preserve inline tables. Returns: String containing the toml corresponding to dict """ retval = "" if encoder is None: encoder = TomlEncoder(o.__class__) addtoretval, sections = encoder.dump_sections(o, "") retval += addtoretval while sections: newsections = encoder.get_empty_table() for section in sections: addtoretval, addtosections = encoder.dump_sections( sections[section], section) if addtoretval or (not addtoretval and not addtosections): if retval and retval[-2:] != "\n\n": retval += "\n" retval += "[" + section + "]\n" if addtoretval: retval += addtoretval for s in addtosections: newsections[section + "." + s] = addtosections[s] sections = newsections return retval
def dumps(o, encoder=None): """Stringifies input dict as toml Args: o: Object to dump into toml preserve: Boolean parameter. If true, preserve inline tables. Returns: String containing the toml corresponding to dict """ retval = "" if encoder is None: encoder = TomlEncoder(o.__class__) addtoretval, sections = encoder.dump_sections(o, "") retval += addtoretval while sections: newsections = encoder.get_empty_table() for section in sections: addtoretval, addtosections = encoder.dump_sections( sections[section], section) if addtoretval or (not addtoretval and not addtosections): if retval and retval[-2:] != "\n\n": retval += "\n" retval += "[" + section + "]\n" if addtoretval: retval += addtoretval for s in addtosections: newsections[section + "." + s] = addtosections[s] sections = newsections return retval
[ "Stringifies", "input", "dict", "as", "toml" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/toml/encoder.py#L32-L64
[ "def", "dumps", "(", "o", ",", "encoder", "=", "None", ")", ":", "retval", "=", "\"\"", "if", "encoder", "is", "None", ":", "encoder", "=", "TomlEncoder", "(", "o", ".", "__class__", ")", "addtoretval", ",", "sections", "=", "encoder", ".", "dump_secti...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
TomlEncoder.dump_inline_table
Preserve inline table in its compact syntax instead of expanding into subsection. https://github.com/toml-lang/toml#user-content-inline-table
pipenv/vendor/toml/encoder.py
def dump_inline_table(self, section): """Preserve inline table in its compact syntax instead of expanding into subsection. https://github.com/toml-lang/toml#user-content-inline-table """ retval = "" if isinstance(section, dict): val_list = [] for k, v in section.items(): val = self.dump_inline_table(v) val_list.append(k + " = " + val) retval += "{ " + ", ".join(val_list) + " }\n" return retval else: return unicode(self.dump_value(section))
def dump_inline_table(self, section): """Preserve inline table in its compact syntax instead of expanding into subsection. https://github.com/toml-lang/toml#user-content-inline-table """ retval = "" if isinstance(section, dict): val_list = [] for k, v in section.items(): val = self.dump_inline_table(v) val_list.append(k + " = " + val) retval += "{ " + ", ".join(val_list) + " }\n" return retval else: return unicode(self.dump_value(section))
[ "Preserve", "inline", "table", "in", "its", "compact", "syntax", "instead", "of", "expanding", "into", "subsection", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/toml/encoder.py#L137-L152
[ "def", "dump_inline_table", "(", "self", ",", "section", ")", ":", "retval", "=", "\"\"", "if", "isinstance", "(", "section", ",", "dict", ")", ":", "val_list", "=", "[", "]", "for", "k", ",", "v", "in", "section", ".", "items", "(", ")", ":", "val...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
_is_env_truthy
An environment variable is truthy if it exists and isn't one of (0, false, no, off)
pipenv/environments.py
def _is_env_truthy(name): """An environment variable is truthy if it exists and isn't one of (0, false, no, off) """ if name not in os.environ: return False return os.environ.get(name).lower() not in ("0", "false", "no", "off")
def _is_env_truthy(name): """An environment variable is truthy if it exists and isn't one of (0, false, no, off) """ if name not in os.environ: return False return os.environ.get(name).lower() not in ("0", "false", "no", "off")
[ "An", "environment", "variable", "is", "truthy", "if", "it", "exists", "and", "isn", "t", "one", "of", "(", "0", "false", "no", "off", ")" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environments.py#L17-L22
[ "def", "_is_env_truthy", "(", "name", ")", ":", "if", "name", "not", "in", "os", ".", "environ", ":", "return", "False", "return", "os", ".", "environ", ".", "get", "(", "name", ")", ".", "lower", "(", ")", "not", "in", "(", "\"0\"", ",", "\"false\...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
is_in_virtualenv
Check virtualenv membership dynamically :return: True or false depending on whether we are in a regular virtualenv or not :rtype: bool
pipenv/environments.py
def is_in_virtualenv(): """ Check virtualenv membership dynamically :return: True or false depending on whether we are in a regular virtualenv or not :rtype: bool """ pipenv_active = os.environ.get("PIPENV_ACTIVE", False) virtual_env = None use_system = False ignore_virtualenvs = bool(os.environ.get("PIPENV_IGNORE_VIRTUALENVS", False)) if not pipenv_active and not ignore_virtualenvs: virtual_env = os.environ.get("VIRTUAL_ENV") use_system = bool(virtual_env) return (use_system or virtual_env) and not (pipenv_active or ignore_virtualenvs)
def is_in_virtualenv(): """ Check virtualenv membership dynamically :return: True or false depending on whether we are in a regular virtualenv or not :rtype: bool """ pipenv_active = os.environ.get("PIPENV_ACTIVE", False) virtual_env = None use_system = False ignore_virtualenvs = bool(os.environ.get("PIPENV_IGNORE_VIRTUALENVS", False)) if not pipenv_active and not ignore_virtualenvs: virtual_env = os.environ.get("VIRTUAL_ENV") use_system = bool(virtual_env) return (use_system or virtual_env) and not (pipenv_active or ignore_virtualenvs)
[ "Check", "virtualenv", "membership", "dynamically" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environments.py#L293-L309
[ "def", "is_in_virtualenv", "(", ")", ":", "pipenv_active", "=", "os", ".", "environ", ".", "get", "(", "\"PIPENV_ACTIVE\"", ",", "False", ")", "virtual_env", "=", "None", "use_system", "=", "False", "ignore_virtualenvs", "=", "bool", "(", "os", ".", "environ...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
unpackb
Unpack an object from `packed`. Raises `ExtraData` when `packed` contains extra bytes. See :class:`Unpacker` for options.
pipenv/patched/notpip/_vendor/msgpack/fallback.py
def unpackb(packed, **kwargs): """ Unpack an object from `packed`. Raises `ExtraData` when `packed` contains extra bytes. See :class:`Unpacker` for options. """ unpacker = Unpacker(None, **kwargs) unpacker.feed(packed) try: ret = unpacker._unpack() except OutOfData: raise UnpackValueError("Data is not enough.") if unpacker._got_extradata(): raise ExtraData(ret, unpacker._get_extradata()) return ret
def unpackb(packed, **kwargs): """ Unpack an object from `packed`. Raises `ExtraData` when `packed` contains extra bytes. See :class:`Unpacker` for options. """ unpacker = Unpacker(None, **kwargs) unpacker.feed(packed) try: ret = unpacker._unpack() except OutOfData: raise UnpackValueError("Data is not enough.") if unpacker._got_extradata(): raise ExtraData(ret, unpacker._get_extradata()) return ret
[ "Unpack", "an", "object", "from", "packed", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/msgpack/fallback.py#L111-L126
[ "def", "unpackb", "(", "packed", ",", "*", "*", "kwargs", ")", ":", "unpacker", "=", "Unpacker", "(", "None", ",", "*", "*", "kwargs", ")", "unpacker", ".", "feed", "(", "packed", ")", "try", ":", "ret", "=", "unpacker", ".", "_unpack", "(", ")", ...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
Unpacker._consume
Gets rid of the used parts of the buffer.
pipenv/patched/notpip/_vendor/msgpack/fallback.py
def _consume(self): """ Gets rid of the used parts of the buffer. """ self._stream_offset += self._buff_i - self._buf_checkpoint self._buf_checkpoint = self._buff_i
def _consume(self): """ Gets rid of the used parts of the buffer. """ self._stream_offset += self._buff_i - self._buf_checkpoint self._buf_checkpoint = self._buff_i
[ "Gets", "rid", "of", "the", "used", "parts", "of", "the", "buffer", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/msgpack/fallback.py#L300-L303
[ "def", "_consume", "(", "self", ")", ":", "self", ".", "_stream_offset", "+=", "self", ".", "_buff_i", "-", "self", ".", "_buf_checkpoint", "self", ".", "_buf_checkpoint", "=", "self", ".", "_buff_i" ]
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
HTTPConnection._new_conn
Establish a socket connection and set nodelay settings on it. :return: New socket connection.
pipenv/vendor/urllib3/connection.py
def _new_conn(self): """ Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ extra_kw = {} if self.source_address: extra_kw['source_address'] = self.source_address if self.socket_options: extra_kw['socket_options'] = self.socket_options try: conn = connection.create_connection( (self._dns_host, self.port), self.timeout, **extra_kw) except SocketTimeout as e: raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout)) except SocketError as e: raise NewConnectionError( self, "Failed to establish a new connection: %s" % e) return conn
def _new_conn(self): """ Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ extra_kw = {} if self.source_address: extra_kw['source_address'] = self.source_address if self.socket_options: extra_kw['socket_options'] = self.socket_options try: conn = connection.create_connection( (self._dns_host, self.port), self.timeout, **extra_kw) except SocketTimeout as e: raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout)) except SocketError as e: raise NewConnectionError( self, "Failed to establish a new connection: %s" % e) return conn
[ "Establish", "a", "socket", "connection", "and", "set", "nodelay", "settings", "on", "it", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/connection.py#L145-L170
[ "def", "_new_conn", "(", "self", ")", ":", "extra_kw", "=", "{", "}", "if", "self", ".", "source_address", ":", "extra_kw", "[", "'source_address'", "]", "=", "self", ".", "source_address", "if", "self", ".", "socket_options", ":", "extra_kw", "[", "'socke...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
HTTPConnection.request_chunked
Alternative to the common request method, which sends the body with chunked encoding and not as one block
pipenv/vendor/urllib3/connection.py
def request_chunked(self, method, url, body=None, headers=None): """ Alternative to the common request method, which sends the body with chunked encoding and not as one block """ headers = HTTPHeaderDict(headers if headers is not None else {}) skip_accept_encoding = 'accept-encoding' in headers skip_host = 'host' in headers self.putrequest( method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) for header, value in headers.items(): self.putheader(header, value) if 'transfer-encoding' not in headers: self.putheader('Transfer-Encoding', 'chunked') self.endheaders() if body is not None: stringish_types = six.string_types + (bytes,) if isinstance(body, stringish_types): body = (body,) for chunk in body: if not chunk: continue if not isinstance(chunk, bytes): chunk = chunk.encode('utf8') len_str = hex(len(chunk))[2:] self.send(len_str.encode('utf-8')) self.send(b'\r\n') self.send(chunk) self.send(b'\r\n') # After the if clause, to always have a closed body self.send(b'0\r\n\r\n')
def request_chunked(self, method, url, body=None, headers=None): """ Alternative to the common request method, which sends the body with chunked encoding and not as one block """ headers = HTTPHeaderDict(headers if headers is not None else {}) skip_accept_encoding = 'accept-encoding' in headers skip_host = 'host' in headers self.putrequest( method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) for header, value in headers.items(): self.putheader(header, value) if 'transfer-encoding' not in headers: self.putheader('Transfer-Encoding', 'chunked') self.endheaders() if body is not None: stringish_types = six.string_types + (bytes,) if isinstance(body, stringish_types): body = (body,) for chunk in body: if not chunk: continue if not isinstance(chunk, bytes): chunk = chunk.encode('utf8') len_str = hex(len(chunk))[2:] self.send(len_str.encode('utf-8')) self.send(b'\r\n') self.send(chunk) self.send(b'\r\n') # After the if clause, to always have a closed body self.send(b'0\r\n\r\n')
[ "Alternative", "to", "the", "common", "request", "method", "which", "sends", "the", "body", "with", "chunked", "encoding", "and", "not", "as", "one", "block" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/connection.py#L184-L220
[ "def", "request_chunked", "(", "self", ",", "method", ",", "url", ",", "body", "=", "None", ",", "headers", "=", "None", ")", ":", "headers", "=", "HTTPHeaderDict", "(", "headers", "if", "headers", "is", "not", "None", "else", "{", "}", ")", "skip_acce...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
VerifiedHTTPSConnection.set_cert
This method should only be called once, before the connection is used.
pipenv/vendor/urllib3/connection.py
def set_cert(self, key_file=None, cert_file=None, cert_reqs=None, ca_certs=None, assert_hostname=None, assert_fingerprint=None, ca_cert_dir=None): """ This method should only be called once, before the connection is used. """ # If cert_reqs is not provided, we can try to guess. If the user gave # us a cert database, we assume they want to use it: otherwise, if # they gave us an SSL Context object we should use whatever is set for # it. if cert_reqs is None: if ca_certs or ca_cert_dir: cert_reqs = 'CERT_REQUIRED' elif self.ssl_context is not None: cert_reqs = self.ssl_context.verify_mode self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint self.ca_certs = ca_certs and os.path.expanduser(ca_certs) self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
def set_cert(self, key_file=None, cert_file=None, cert_reqs=None, ca_certs=None, assert_hostname=None, assert_fingerprint=None, ca_cert_dir=None): """ This method should only be called once, before the connection is used. """ # If cert_reqs is not provided, we can try to guess. If the user gave # us a cert database, we assume they want to use it: otherwise, if # they gave us an SSL Context object we should use whatever is set for # it. if cert_reqs is None: if ca_certs or ca_cert_dir: cert_reqs = 'CERT_REQUIRED' elif self.ssl_context is not None: cert_reqs = self.ssl_context.verify_mode self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint self.ca_certs = ca_certs and os.path.expanduser(ca_certs) self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
[ "This", "method", "should", "only", "be", "called", "once", "before", "the", "connection", "is", "used", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/connection.py#L274-L297
[ "def", "set_cert", "(", "self", ",", "key_file", "=", "None", ",", "cert_file", "=", "None", ",", "cert_reqs", "=", "None", ",", "ca_certs", "=", "None", ",", "assert_hostname", "=", "None", ",", "assert_fingerprint", "=", "None", ",", "ca_cert_dir", "=", ...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
prettify_exc
Catch known errors and prettify them instead of showing the entire traceback, for better UX
pipenv/exceptions.py
def prettify_exc(error): """Catch known errors and prettify them instead of showing the entire traceback, for better UX""" matched_exceptions = [k for k in KNOWN_EXCEPTIONS.keys() if k in error] if not matched_exceptions: return "{}".format(vistir.misc.decode_for_output(error)) errors = [] for match in matched_exceptions: _, error, info = error.rpartition(KNOWN_EXCEPTIONS[match]) errors.append("{} {}".format(error, info)) return "\n".join(errors)
def prettify_exc(error): """Catch known errors and prettify them instead of showing the entire traceback, for better UX""" matched_exceptions = [k for k in KNOWN_EXCEPTIONS.keys() if k in error] if not matched_exceptions: return "{}".format(vistir.misc.decode_for_output(error)) errors = [] for match in matched_exceptions: _, error, info = error.rpartition(KNOWN_EXCEPTIONS[match]) errors.append("{} {}".format(error, info)) return "\n".join(errors)
[ "Catch", "known", "errors", "and", "prettify", "them", "instead", "of", "showing", "the", "entire", "traceback", "for", "better", "UX" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/exceptions.py#L412-L423
[ "def", "prettify_exc", "(", "error", ")", ":", "matched_exceptions", "=", "[", "k", "for", "k", "in", "KNOWN_EXCEPTIONS", ".", "keys", "(", ")", "if", "k", "in", "error", "]", "if", "not", "matched_exceptions", ":", "return", "\"{}\"", ".", "format", "("...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
get_stream_handle
Get the OS appropriate handle for the corresponding output stream. :param str stream: The the stream to get the handle for :return: A handle to the appropriate stream, either a ctypes buffer or **sys.stdout** or **sys.stderr**.
pipenv/vendor/vistir/cursor.py
def get_stream_handle(stream=sys.stdout): """ Get the OS appropriate handle for the corresponding output stream. :param str stream: The the stream to get the handle for :return: A handle to the appropriate stream, either a ctypes buffer or **sys.stdout** or **sys.stderr**. """ handle = stream if os.name == "nt": from ctypes import windll handle_id = WIN_STDOUT_HANDLE_ID handle = windll.kernel32.GetStdHandle(handle_id) return handle
def get_stream_handle(stream=sys.stdout): """ Get the OS appropriate handle for the corresponding output stream. :param str stream: The the stream to get the handle for :return: A handle to the appropriate stream, either a ctypes buffer or **sys.stdout** or **sys.stderr**. """ handle = stream if os.name == "nt": from ctypes import windll handle_id = WIN_STDOUT_HANDLE_ID handle = windll.kernel32.GetStdHandle(handle_id) return handle
[ "Get", "the", "OS", "appropriate", "handle", "for", "the", "corresponding", "output", "stream", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/cursor.py#L19-L33
[ "def", "get_stream_handle", "(", "stream", "=", "sys", ".", "stdout", ")", ":", "handle", "=", "stream", "if", "os", ".", "name", "==", "\"nt\"", ":", "from", "ctypes", "import", "windll", "handle_id", "=", "WIN_STDOUT_HANDLE_ID", "handle", "=", "windll", ...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
hide_cursor
Hide the console cursor on the given stream :param stream: The name of the stream to get the handle for :return: None :rtype: None
pipenv/vendor/vistir/cursor.py
def hide_cursor(stream=sys.stdout): """ Hide the console cursor on the given stream :param stream: The name of the stream to get the handle for :return: None :rtype: None """ handle = get_stream_handle(stream=stream) if os.name == "nt": from ctypes import windll cursor_info = CONSOLE_CURSOR_INFO() windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(cursor_info)) cursor_info.visible = False windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(cursor_info)) else: handle.write("\033[?25l") handle.flush()
def hide_cursor(stream=sys.stdout): """ Hide the console cursor on the given stream :param stream: The name of the stream to get the handle for :return: None :rtype: None """ handle = get_stream_handle(stream=stream) if os.name == "nt": from ctypes import windll cursor_info = CONSOLE_CURSOR_INFO() windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(cursor_info)) cursor_info.visible = False windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(cursor_info)) else: handle.write("\033[?25l") handle.flush()
[ "Hide", "the", "console", "cursor", "on", "the", "given", "stream" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/cursor.py#L36-L55
[ "def", "hide_cursor", "(", "stream", "=", "sys", ".", "stdout", ")", ":", "handle", "=", "get_stream_handle", "(", "stream", "=", "stream", ")", "if", "os", ".", "name", "==", "\"nt\"", ":", "from", "ctypes", "import", "windll", "cursor_info", "=", "CONS...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
choice_complete
Returns the completion results for click.core.Choice Parameters ---------- ctx : click.core.Context The current context incomplete : The string to complete Returns ------- [(str, str)] A list of completion results
pipenv/vendor/click_completion/patch.py
def choice_complete(self, ctx, incomplete): """Returns the completion results for click.core.Choice Parameters ---------- ctx : click.core.Context The current context incomplete : The string to complete Returns ------- [(str, str)] A list of completion results """ return [ (c, None) for c in self.choices if completion_configuration.match_incomplete(c, incomplete) ]
def choice_complete(self, ctx, incomplete): """Returns the completion results for click.core.Choice Parameters ---------- ctx : click.core.Context The current context incomplete : The string to complete Returns ------- [(str, str)] A list of completion results """ return [ (c, None) for c in self.choices if completion_configuration.match_incomplete(c, incomplete) ]
[ "Returns", "the", "completion", "results", "for", "click", ".", "core", ".", "Choice" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click_completion/patch.py#L39-L57
[ "def", "choice_complete", "(", "self", ",", "ctx", ",", "incomplete", ")", ":", "return", "[", "(", "c", ",", "None", ")", "for", "c", "in", "self", ".", "choices", "if", "completion_configuration", ".", "match_incomplete", "(", "c", ",", "incomplete", "...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
_shellcomplete
Internal handler for the bash completion support. Parameters ---------- cli : click.Command The main click Command of the program prog_name : str The program name on the command line complete_var : str The environment variable name used to control the completion behavior (Default value = None)
pipenv/vendor/click_completion/patch.py
def _shellcomplete(cli, prog_name, complete_var=None): """Internal handler for the bash completion support. Parameters ---------- cli : click.Command The main click Command of the program prog_name : str The program name on the command line complete_var : str The environment variable name used to control the completion behavior (Default value = None) """ if complete_var is None: complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper() complete_instr = os.environ.get(complete_var) if not complete_instr: return if complete_instr == 'source': echo(get_code(prog_name=prog_name, env_name=complete_var)) elif complete_instr == 'source-bash': echo(get_code('bash', prog_name, complete_var)) elif complete_instr == 'source-fish': echo(get_code('fish', prog_name, complete_var)) elif complete_instr == 'source-powershell': echo(get_code('powershell', prog_name, complete_var)) elif complete_instr == 'source-zsh': echo(get_code('zsh', prog_name, complete_var)) elif complete_instr in ['complete', 'complete-bash']: # keep 'complete' for bash for backward compatibility do_bash_complete(cli, prog_name) elif complete_instr == 'complete-fish': do_fish_complete(cli, prog_name) elif complete_instr == 'complete-powershell': do_powershell_complete(cli, prog_name) elif complete_instr == 'complete-zsh': do_zsh_complete(cli, prog_name) elif complete_instr == 'install': shell, path = install(prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) elif complete_instr == 'install-bash': shell, path = install(shell='bash', prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) elif complete_instr == 'install-fish': shell, path = install(shell='fish', prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) elif complete_instr == 'install-zsh': shell, path = install(shell='zsh', prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) elif complete_instr == 'install-powershell': shell, path = install(shell='powershell', prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) sys.exit()
def _shellcomplete(cli, prog_name, complete_var=None): """Internal handler for the bash completion support. Parameters ---------- cli : click.Command The main click Command of the program prog_name : str The program name on the command line complete_var : str The environment variable name used to control the completion behavior (Default value = None) """ if complete_var is None: complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper() complete_instr = os.environ.get(complete_var) if not complete_instr: return if complete_instr == 'source': echo(get_code(prog_name=prog_name, env_name=complete_var)) elif complete_instr == 'source-bash': echo(get_code('bash', prog_name, complete_var)) elif complete_instr == 'source-fish': echo(get_code('fish', prog_name, complete_var)) elif complete_instr == 'source-powershell': echo(get_code('powershell', prog_name, complete_var)) elif complete_instr == 'source-zsh': echo(get_code('zsh', prog_name, complete_var)) elif complete_instr in ['complete', 'complete-bash']: # keep 'complete' for bash for backward compatibility do_bash_complete(cli, prog_name) elif complete_instr == 'complete-fish': do_fish_complete(cli, prog_name) elif complete_instr == 'complete-powershell': do_powershell_complete(cli, prog_name) elif complete_instr == 'complete-zsh': do_zsh_complete(cli, prog_name) elif complete_instr == 'install': shell, path = install(prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) elif complete_instr == 'install-bash': shell, path = install(shell='bash', prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) elif complete_instr == 'install-fish': shell, path = install(shell='fish', prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) elif complete_instr == 'install-zsh': shell, path = install(shell='zsh', prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) elif complete_instr == 'install-powershell': shell, path = install(shell='powershell', prog_name=prog_name, env_name=complete_var) click.echo('%s completion installed in %s' % (shell, path)) sys.exit()
[ "Internal", "handler", "for", "the", "bash", "completion", "support", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click_completion/patch.py#L81-L133
[ "def", "_shellcomplete", "(", "cli", ",", "prog_name", ",", "complete_var", "=", "None", ")", ":", "if", "complete_var", "is", "None", ":", "complete_var", "=", "'_%s_COMPLETE'", "%", "(", "prog_name", ".", "replace", "(", "'-'", ",", "'_'", ")", ")", "....
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
patch
Patch click
pipenv/vendor/click_completion/patch.py
def patch(): """Patch click""" import click click.types.ParamType.complete = param_type_complete click.types.Choice.complete = choice_complete click.core.MultiCommand.get_command_short_help = multicommand_get_command_short_help click.core._bashcomplete = _shellcomplete
def patch(): """Patch click""" import click click.types.ParamType.complete = param_type_complete click.types.Choice.complete = choice_complete click.core.MultiCommand.get_command_short_help = multicommand_get_command_short_help click.core._bashcomplete = _shellcomplete
[ "Patch", "click" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click_completion/patch.py#L136-L142
[ "def", "patch", "(", ")", ":", "import", "click", "click", ".", "types", ".", "ParamType", ".", "complete", "=", "param_type_complete", "click", ".", "types", ".", "Choice", ".", "complete", "=", "choice_complete", "click", ".", "core", ".", "MultiCommand", ...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
parse_expr
expr ::= seq ( '|' seq )* ;
pipenv/vendor/docopt.py
def parse_expr(tokens, options): """expr ::= seq ( '|' seq )* ;""" seq = parse_seq(tokens, options) if tokens.current() != '|': return seq result = [Required(*seq)] if len(seq) > 1 else seq while tokens.current() == '|': tokens.move() seq = parse_seq(tokens, options) result += [Required(*seq)] if len(seq) > 1 else seq return [Either(*result)] if len(result) > 1 else result
def parse_expr(tokens, options): """expr ::= seq ( '|' seq )* ;""" seq = parse_seq(tokens, options) if tokens.current() != '|': return seq result = [Required(*seq)] if len(seq) > 1 else seq while tokens.current() == '|': tokens.move() seq = parse_seq(tokens, options) result += [Required(*seq)] if len(seq) > 1 else seq return [Either(*result)] if len(result) > 1 else result
[ "expr", "::", "=", "seq", "(", "|", "seq", ")", "*", ";" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/docopt.py#L379-L389
[ "def", "parse_expr", "(", "tokens", ",", "options", ")", ":", "seq", "=", "parse_seq", "(", "tokens", ",", "options", ")", "if", "tokens", ".", "current", "(", ")", "!=", "'|'", ":", "return", "seq", "result", "=", "[", "Required", "(", "*", "seq", ...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
parse_seq
seq ::= ( atom [ '...' ] )* ;
pipenv/vendor/docopt.py
def parse_seq(tokens, options): """seq ::= ( atom [ '...' ] )* ;""" result = [] while tokens.current() not in [None, ']', ')', '|']: atom = parse_atom(tokens, options) if tokens.current() == '...': atom = [OneOrMore(*atom)] tokens.move() result += atom return result
def parse_seq(tokens, options): """seq ::= ( atom [ '...' ] )* ;""" result = [] while tokens.current() not in [None, ']', ')', '|']: atom = parse_atom(tokens, options) if tokens.current() == '...': atom = [OneOrMore(*atom)] tokens.move() result += atom return result
[ "seq", "::", "=", "(", "atom", "[", "...", "]", ")", "*", ";" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/docopt.py#L392-L401
[ "def", "parse_seq", "(", "tokens", ",", "options", ")", ":", "result", "=", "[", "]", "while", "tokens", ".", "current", "(", ")", "not", "in", "[", "None", ",", "']'", ",", "')'", ",", "'|'", "]", ":", "atom", "=", "parse_atom", "(", "tokens", "...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
parse_argv
Parse command-line argument vector. If options_first: argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ; else: argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ;
pipenv/vendor/docopt.py
def parse_argv(tokens, options, options_first=False): """Parse command-line argument vector. If options_first: argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ; else: argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ; """ parsed = [] while tokens.current() is not None: if tokens.current() == '--': return parsed + [Argument(None, v) for v in tokens] elif tokens.current().startswith('--'): parsed += parse_long(tokens, options) elif tokens.current().startswith('-') and tokens.current() != '-': parsed += parse_shorts(tokens, options) elif options_first: return parsed + [Argument(None, v) for v in tokens] else: parsed.append(Argument(None, tokens.move())) return parsed
def parse_argv(tokens, options, options_first=False): """Parse command-line argument vector. If options_first: argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ; else: argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ; """ parsed = [] while tokens.current() is not None: if tokens.current() == '--': return parsed + [Argument(None, v) for v in tokens] elif tokens.current().startswith('--'): parsed += parse_long(tokens, options) elif tokens.current().startswith('-') and tokens.current() != '-': parsed += parse_shorts(tokens, options) elif options_first: return parsed + [Argument(None, v) for v in tokens] else: parsed.append(Argument(None, tokens.move())) return parsed
[ "Parse", "command", "-", "line", "argument", "vector", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/docopt.py#L430-L451
[ "def", "parse_argv", "(", "tokens", ",", "options", ",", "options_first", "=", "False", ")", ":", "parsed", "=", "[", "]", "while", "tokens", ".", "current", "(", ")", "is", "not", "None", ":", "if", "tokens", ".", "current", "(", ")", "==", "'--'", ...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
unnest
Flatten an arbitrarily nested iterable :param elem: An iterable to flatten :type elem: :class:`~collections.Iterable` >>> nested_iterable = (1234, (3456, 4398345, (234234)), (2396, (23895750, 9283798, 29384, (289375983275, 293759, 2347, (2098, 7987, 27599))))) >>> list(vistir.misc.unnest(nested_iterable)) [1234, 3456, 4398345, 234234, 2396, 23895750, 9283798, 29384, 289375983275, 293759, 2347, 2098, 7987, 27599]
pipenv/vendor/vistir/misc.py
def unnest(elem): """Flatten an arbitrarily nested iterable :param elem: An iterable to flatten :type elem: :class:`~collections.Iterable` >>> nested_iterable = (1234, (3456, 4398345, (234234)), (2396, (23895750, 9283798, 29384, (289375983275, 293759, 2347, (2098, 7987, 27599))))) >>> list(vistir.misc.unnest(nested_iterable)) [1234, 3456, 4398345, 234234, 2396, 23895750, 9283798, 29384, 289375983275, 293759, 2347, 2098, 7987, 27599] """ if isinstance(elem, Iterable) and not isinstance(elem, six.string_types): elem, target = tee(elem, 2) else: target = elem for el in target: if isinstance(el, Iterable) and not isinstance(el, six.string_types): el, el_copy = tee(el, 2) for sub in unnest(el_copy): yield sub else: yield el
def unnest(elem): """Flatten an arbitrarily nested iterable :param elem: An iterable to flatten :type elem: :class:`~collections.Iterable` >>> nested_iterable = (1234, (3456, 4398345, (234234)), (2396, (23895750, 9283798, 29384, (289375983275, 293759, 2347, (2098, 7987, 27599))))) >>> list(vistir.misc.unnest(nested_iterable)) [1234, 3456, 4398345, 234234, 2396, 23895750, 9283798, 29384, 289375983275, 293759, 2347, 2098, 7987, 27599] """ if isinstance(elem, Iterable) and not isinstance(elem, six.string_types): elem, target = tee(elem, 2) else: target = elem for el in target: if isinstance(el, Iterable) and not isinstance(el, six.string_types): el, el_copy = tee(el, 2) for sub in unnest(el_copy): yield sub else: yield el
[ "Flatten", "an", "arbitrarily", "nested", "iterable" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/misc.py#L74-L95
[ "def", "unnest", "(", "elem", ")", ":", "if", "isinstance", "(", "elem", ",", "Iterable", ")", "and", "not", "isinstance", "(", "elem", ",", "six", ".", "string_types", ")", ":", "elem", ",", "target", "=", "tee", "(", "elem", ",", "2", ")", "else"...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
run
Use `subprocess.Popen` to get the output of a command and decode it. :param list cmd: A list representing the command you want to run. :param dict env: Additional environment settings to pass through to the subprocess. :param bool return_object: When True, returns the whole subprocess instance :param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance :param str cwd: Current working directory contect to use for spawning the subprocess. :param bool verbose: Whether to print stdout in real time when non-blocking. :param bool nospin: Whether to disable the cli spinner. :param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar :param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking. :param int dispay_limit: The max width of output lines to display when using a spinner. :param bool write_to_stdout: Whether to write to stdout when using a spinner, default True. :returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object. .. Warning:: Merging standard out and standarad error in a nonblocking subprocess can cause errors in some cases and may not be ideal. Consider disabling this functionality.
pipenv/vendor/vistir/misc.py
def run( cmd, env=None, return_object=False, block=True, cwd=None, verbose=False, nospin=False, spinner_name=None, combine_stderr=True, display_limit=200, write_to_stdout=True, ): """Use `subprocess.Popen` to get the output of a command and decode it. :param list cmd: A list representing the command you want to run. :param dict env: Additional environment settings to pass through to the subprocess. :param bool return_object: When True, returns the whole subprocess instance :param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance :param str cwd: Current working directory contect to use for spawning the subprocess. :param bool verbose: Whether to print stdout in real time when non-blocking. :param bool nospin: Whether to disable the cli spinner. :param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar :param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking. :param int dispay_limit: The max width of output lines to display when using a spinner. :param bool write_to_stdout: Whether to write to stdout when using a spinner, default True. :returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object. .. Warning:: Merging standard out and standarad error in a nonblocking subprocess can cause errors in some cases and may not be ideal. Consider disabling this functionality. """ _env = os.environ.copy() if env: _env.update(env) if six.PY2: fs_encode = partial(to_bytes, encoding=locale_encoding) _env = {fs_encode(k): fs_encode(v) for k, v in _env.items()} else: _env = {k: fs_str(v) for k, v in _env.items()} if not spinner_name: spinner_name = "bouncingBar" if six.PY2: if isinstance(cmd, six.string_types): cmd = cmd.encode("utf-8") elif isinstance(cmd, (list, tuple)): cmd = [c.encode("utf-8") for c in cmd] if not isinstance(cmd, Script): cmd = Script.parse(cmd) if block or not return_object: combine_stderr = False start_text = "" with spinner( spinner_name=spinner_name, start_text=start_text, nospin=nospin, write_to_stdout=write_to_stdout, ) as sp: return _create_subprocess( cmd, env=_env, return_object=return_object, block=block, cwd=cwd, verbose=verbose, spinner=sp, combine_stderr=combine_stderr, start_text=start_text, write_to_stdout=True, )
def run( cmd, env=None, return_object=False, block=True, cwd=None, verbose=False, nospin=False, spinner_name=None, combine_stderr=True, display_limit=200, write_to_stdout=True, ): """Use `subprocess.Popen` to get the output of a command and decode it. :param list cmd: A list representing the command you want to run. :param dict env: Additional environment settings to pass through to the subprocess. :param bool return_object: When True, returns the whole subprocess instance :param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance :param str cwd: Current working directory contect to use for spawning the subprocess. :param bool verbose: Whether to print stdout in real time when non-blocking. :param bool nospin: Whether to disable the cli spinner. :param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar :param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking. :param int dispay_limit: The max width of output lines to display when using a spinner. :param bool write_to_stdout: Whether to write to stdout when using a spinner, default True. :returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object. .. Warning:: Merging standard out and standarad error in a nonblocking subprocess can cause errors in some cases and may not be ideal. Consider disabling this functionality. """ _env = os.environ.copy() if env: _env.update(env) if six.PY2: fs_encode = partial(to_bytes, encoding=locale_encoding) _env = {fs_encode(k): fs_encode(v) for k, v in _env.items()} else: _env = {k: fs_str(v) for k, v in _env.items()} if not spinner_name: spinner_name = "bouncingBar" if six.PY2: if isinstance(cmd, six.string_types): cmd = cmd.encode("utf-8") elif isinstance(cmd, (list, tuple)): cmd = [c.encode("utf-8") for c in cmd] if not isinstance(cmd, Script): cmd = Script.parse(cmd) if block or not return_object: combine_stderr = False start_text = "" with spinner( spinner_name=spinner_name, start_text=start_text, nospin=nospin, write_to_stdout=write_to_stdout, ) as sp: return _create_subprocess( cmd, env=_env, return_object=return_object, block=block, cwd=cwd, verbose=verbose, spinner=sp, combine_stderr=combine_stderr, start_text=start_text, write_to_stdout=True, )
[ "Use", "subprocess", ".", "Popen", "to", "get", "the", "output", "of", "a", "command", "and", "decode", "it", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/misc.py#L265-L335
[ "def", "run", "(", "cmd", ",", "env", "=", "None", ",", "return_object", "=", "False", ",", "block", "=", "True", ",", "cwd", "=", "None", ",", "verbose", "=", "False", ",", "nospin", "=", "False", ",", "spinner_name", "=", "None", ",", "combine_stde...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
load_path
Load the :mod:`sys.path` from the given python executable's environment as json :param str python: Path to a valid python executable :return: A python representation of the `sys.path` value of the given python executable. :rtype: list >>> load_path("/home/user/.virtualenvs/requirementslib-5MhGuG3C/bin/python") ['', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python37.zip', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/lib-dynload', '/home/user/.pyenv/versions/3.7.0/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/site-packages', '/home/user/git/requirementslib/src']
pipenv/vendor/vistir/misc.py
def load_path(python): """Load the :mod:`sys.path` from the given python executable's environment as json :param str python: Path to a valid python executable :return: A python representation of the `sys.path` value of the given python executable. :rtype: list >>> load_path("/home/user/.virtualenvs/requirementslib-5MhGuG3C/bin/python") ['', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python37.zip', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/lib-dynload', '/home/user/.pyenv/versions/3.7.0/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/site-packages', '/home/user/git/requirementslib/src'] """ python = Path(python).as_posix() out, err = run( [python, "-c", "import json, sys; print(json.dumps(sys.path))"], nospin=True ) if out: return json.loads(out) else: return []
def load_path(python): """Load the :mod:`sys.path` from the given python executable's environment as json :param str python: Path to a valid python executable :return: A python representation of the `sys.path` value of the given python executable. :rtype: list >>> load_path("/home/user/.virtualenvs/requirementslib-5MhGuG3C/bin/python") ['', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python37.zip', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/lib-dynload', '/home/user/.pyenv/versions/3.7.0/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/site-packages', '/home/user/git/requirementslib/src'] """ python = Path(python).as_posix() out, err = run( [python, "-c", "import json, sys; print(json.dumps(sys.path))"], nospin=True ) if out: return json.loads(out) else: return []
[ "Load", "the", ":", "mod", ":", "sys", ".", "path", "from", "the", "given", "python", "executable", "s", "environment", "as", "json" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/misc.py#L338-L356
[ "def", "load_path", "(", "python", ")", ":", "python", "=", "Path", "(", "python", ")", ".", "as_posix", "(", ")", "out", ",", "err", "=", "run", "(", "[", "python", ",", "\"-c\"", ",", "\"import json, sys; print(json.dumps(sys.path))\"", "]", ",", "nospin...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
to_bytes
Force a value to bytes. :param string: Some input that can be converted to a bytes. :type string: str or bytes unicode or a memoryview subclass :param encoding: The encoding to use for conversions, defaults to "utf-8" :param encoding: str, optional :return: Corresponding byte representation (for use in filesystem operations) :rtype: bytes
pipenv/vendor/vistir/misc.py
def to_bytes(string, encoding="utf-8", errors="ignore"): """Force a value to bytes. :param string: Some input that can be converted to a bytes. :type string: str or bytes unicode or a memoryview subclass :param encoding: The encoding to use for conversions, defaults to "utf-8" :param encoding: str, optional :return: Corresponding byte representation (for use in filesystem operations) :rtype: bytes """ if not errors: if encoding.lower() == "utf-8": errors = "surrogateescape" if six.PY3 else "ignore" else: errors = "strict" if isinstance(string, bytes): if encoding.lower() == "utf-8": return string else: return string.decode("utf-8").encode(encoding, errors) elif isinstance(string, memoryview): return bytes(string) elif not isinstance(string, six.string_types): try: if six.PY3: return six.text_type(string).encode(encoding, errors) else: return bytes(string) except UnicodeEncodeError: if isinstance(string, Exception): return b" ".join(to_bytes(arg, encoding, errors) for arg in string) return six.text_type(string).encode(encoding, errors) else: return string.encode(encoding, errors)
def to_bytes(string, encoding="utf-8", errors="ignore"): """Force a value to bytes. :param string: Some input that can be converted to a bytes. :type string: str or bytes unicode or a memoryview subclass :param encoding: The encoding to use for conversions, defaults to "utf-8" :param encoding: str, optional :return: Corresponding byte representation (for use in filesystem operations) :rtype: bytes """ if not errors: if encoding.lower() == "utf-8": errors = "surrogateescape" if six.PY3 else "ignore" else: errors = "strict" if isinstance(string, bytes): if encoding.lower() == "utf-8": return string else: return string.decode("utf-8").encode(encoding, errors) elif isinstance(string, memoryview): return bytes(string) elif not isinstance(string, six.string_types): try: if six.PY3: return six.text_type(string).encode(encoding, errors) else: return bytes(string) except UnicodeEncodeError: if isinstance(string, Exception): return b" ".join(to_bytes(arg, encoding, errors) for arg in string) return six.text_type(string).encode(encoding, errors) else: return string.encode(encoding, errors)
[ "Force", "a", "value", "to", "bytes", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/misc.py#L397-L431
[ "def", "to_bytes", "(", "string", ",", "encoding", "=", "\"utf-8\"", ",", "errors", "=", "\"ignore\"", ")", ":", "if", "not", "errors", ":", "if", "encoding", ".", "lower", "(", ")", "==", "\"utf-8\"", ":", "errors", "=", "\"surrogateescape\"", "if", "si...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
to_text
Force a value to a text-type. :param string: Some input that can be converted to a unicode representation. :type string: str or bytes unicode :param encoding: The encoding to use for conversions, defaults to "utf-8" :param encoding: str, optional :return: The unicode representation of the string :rtype: str
pipenv/vendor/vistir/misc.py
def to_text(string, encoding="utf-8", errors=None): """Force a value to a text-type. :param string: Some input that can be converted to a unicode representation. :type string: str or bytes unicode :param encoding: The encoding to use for conversions, defaults to "utf-8" :param encoding: str, optional :return: The unicode representation of the string :rtype: str """ if not errors: if encoding.lower() == "utf-8": errors = "surrogateescape" if six.PY3 else "ignore" else: errors = "strict" if issubclass(type(string), six.text_type): return string try: if not issubclass(type(string), six.string_types): if six.PY3: if isinstance(string, bytes): string = six.text_type(string, encoding, errors) else: string = six.text_type(string) elif hasattr(string, "__unicode__"): string = six.text_type(string) else: string = six.text_type(bytes(string), encoding, errors) else: string = string.decode(encoding, errors) except UnicodeDecodeError: string = " ".join(to_text(arg, encoding, errors) for arg in string) return string
def to_text(string, encoding="utf-8", errors=None): """Force a value to a text-type. :param string: Some input that can be converted to a unicode representation. :type string: str or bytes unicode :param encoding: The encoding to use for conversions, defaults to "utf-8" :param encoding: str, optional :return: The unicode representation of the string :rtype: str """ if not errors: if encoding.lower() == "utf-8": errors = "surrogateescape" if six.PY3 else "ignore" else: errors = "strict" if issubclass(type(string), six.text_type): return string try: if not issubclass(type(string), six.string_types): if six.PY3: if isinstance(string, bytes): string = six.text_type(string, encoding, errors) else: string = six.text_type(string) elif hasattr(string, "__unicode__"): string = six.text_type(string) else: string = six.text_type(bytes(string), encoding, errors) else: string = string.decode(encoding, errors) except UnicodeDecodeError: string = " ".join(to_text(arg, encoding, errors) for arg in string) return string
[ "Force", "a", "value", "to", "a", "text", "-", "type", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/misc.py#L434-L467
[ "def", "to_text", "(", "string", ",", "encoding", "=", "\"utf-8\"", ",", "errors", "=", "None", ")", ":", "if", "not", "errors", ":", "if", "encoding", ".", "lower", "(", ")", "==", "\"utf-8\"", ":", "errors", "=", "\"surrogateescape\"", "if", "six", "...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
divide
split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping :param int n: Number of unique groups :param iter iterable: An iterable to split up :return: a list of new iterables derived from the original iterable :rtype: list
pipenv/vendor/vistir/misc.py
def divide(n, iterable): """ split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping :param int n: Number of unique groups :param iter iterable: An iterable to split up :return: a list of new iterables derived from the original iterable :rtype: list """ seq = tuple(iterable) q, r = divmod(len(seq), n) ret = [] for i in range(n): start = (i * q) + (i if i < r else r) stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r) ret.append(iter(seq[start:stop])) return ret
def divide(n, iterable): """ split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping :param int n: Number of unique groups :param iter iterable: An iterable to split up :return: a list of new iterables derived from the original iterable :rtype: list """ seq = tuple(iterable) q, r = divmod(len(seq), n) ret = [] for i in range(n): start = (i * q) + (i if i < r else r) stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r) ret.append(iter(seq[start:stop])) return ret
[ "split", "an", "iterable", "into", "n", "groups", "per", "https", ":", "//", "more", "-", "itertools", ".", "readthedocs", ".", "io", "/", "en", "/", "latest", "/", "api", ".", "html#grouping" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/misc.py#L470-L489
[ "def", "divide", "(", "n", ",", "iterable", ")", ":", "seq", "=", "tuple", "(", "iterable", ")", "q", ",", "r", "=", "divmod", "(", "len", "(", "seq", ")", ",", "n", ")", "ret", "=", "[", "]", "for", "i", "in", "range", "(", "n", ")", ":", ...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
getpreferredencoding
Determine the proper output encoding for terminal rendering
pipenv/vendor/vistir/misc.py
def getpreferredencoding(): """Determine the proper output encoding for terminal rendering""" # Borrowed from Invoke # (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881) _encoding = locale.getpreferredencoding(False) if six.PY2 and not sys.platform == "win32": _default_encoding = locale.getdefaultlocale()[1] if _default_encoding is not None: _encoding = _default_encoding return _encoding
def getpreferredencoding(): """Determine the proper output encoding for terminal rendering""" # Borrowed from Invoke # (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881) _encoding = locale.getpreferredencoding(False) if six.PY2 and not sys.platform == "win32": _default_encoding = locale.getdefaultlocale()[1] if _default_encoding is not None: _encoding = _default_encoding return _encoding
[ "Determine", "the", "proper", "output", "encoding", "for", "terminal", "rendering" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/misc.py#L522-L532
[ "def", "getpreferredencoding", "(", ")", ":", "# Borrowed from Invoke", "# (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881)", "_encoding", "=", "locale", ".", "getpreferredencoding", "(", "False", ")", "if", "six", ".", "PY2", "and", "not", "sys"...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
decode_for_output
Given a string, decode it for output to a terminal :param str output: A string to print to a terminal :param target_stream: A stream to write to, we will encode to target this stream if possible. :param dict translation_map: A mapping of unicode character ordinals to replacement strings. :return: A re-encoded string using the preferred encoding :rtype: str
pipenv/vendor/vistir/misc.py
def decode_for_output(output, target_stream=None, translation_map=None): """Given a string, decode it for output to a terminal :param str output: A string to print to a terminal :param target_stream: A stream to write to, we will encode to target this stream if possible. :param dict translation_map: A mapping of unicode character ordinals to replacement strings. :return: A re-encoded string using the preferred encoding :rtype: str """ if not isinstance(output, six.string_types): return output encoding = None if target_stream is not None: encoding = getattr(target_stream, "encoding", None) encoding = get_output_encoding(encoding) try: output = _encode(output, encoding=encoding, translation_map=translation_map) except (UnicodeDecodeError, UnicodeEncodeError): output = to_native_string(output) output = _encode( output, encoding=encoding, errors="replace", translation_map=translation_map ) return to_text(output, encoding=encoding, errors="replace")
def decode_for_output(output, target_stream=None, translation_map=None): """Given a string, decode it for output to a terminal :param str output: A string to print to a terminal :param target_stream: A stream to write to, we will encode to target this stream if possible. :param dict translation_map: A mapping of unicode character ordinals to replacement strings. :return: A re-encoded string using the preferred encoding :rtype: str """ if not isinstance(output, six.string_types): return output encoding = None if target_stream is not None: encoding = getattr(target_stream, "encoding", None) encoding = get_output_encoding(encoding) try: output = _encode(output, encoding=encoding, translation_map=translation_map) except (UnicodeDecodeError, UnicodeEncodeError): output = to_native_string(output) output = _encode( output, encoding=encoding, errors="replace", translation_map=translation_map ) return to_text(output, encoding=encoding, errors="replace")
[ "Given", "a", "string", "decode", "it", "for", "output", "to", "a", "terminal" ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/misc.py#L574-L597
[ "def", "decode_for_output", "(", "output", ",", "target_stream", "=", "None", ",", "translation_map", "=", "None", ")", ":", "if", "not", "isinstance", "(", "output", ",", "six", ".", "string_types", ")", ":", "return", "output", "encoding", "=", "None", "...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
get_canonical_encoding_name
Given an encoding name, get the canonical name from a codec lookup. :param str name: The name of the codec to lookup :return: The canonical version of the codec name :rtype: str
pipenv/vendor/vistir/misc.py
def get_canonical_encoding_name(name): # type: (str) -> str """ Given an encoding name, get the canonical name from a codec lookup. :param str name: The name of the codec to lookup :return: The canonical version of the codec name :rtype: str """ import codecs try: codec = codecs.lookup(name) except LookupError: return name else: return codec.name
def get_canonical_encoding_name(name): # type: (str) -> str """ Given an encoding name, get the canonical name from a codec lookup. :param str name: The name of the codec to lookup :return: The canonical version of the codec name :rtype: str """ import codecs try: codec = codecs.lookup(name) except LookupError: return name else: return codec.name
[ "Given", "an", "encoding", "name", "get", "the", "canonical", "name", "from", "a", "codec", "lookup", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/misc.py#L600-L617
[ "def", "get_canonical_encoding_name", "(", "name", ")", ":", "# type: (str) -> str", "import", "codecs", "try", ":", "codec", "=", "codecs", ".", "lookup", "(", "name", ")", "except", "LookupError", ":", "return", "name", "else", ":", "return", "codec", ".", ...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
get_wrapped_stream
Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream. :param stream: A stream instance to wrap :returns: A new, wrapped stream :rtype: :class:`StreamWrapper`
pipenv/vendor/vistir/misc.py
def get_wrapped_stream(stream): """ Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream. :param stream: A stream instance to wrap :returns: A new, wrapped stream :rtype: :class:`StreamWrapper` """ if stream is None: raise TypeError("must provide a stream to wrap") encoding = getattr(stream, "encoding", None) encoding = get_output_encoding(encoding) return StreamWrapper(stream, encoding, "replace", line_buffering=True)
def get_wrapped_stream(stream): """ Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream. :param stream: A stream instance to wrap :returns: A new, wrapped stream :rtype: :class:`StreamWrapper` """ if stream is None: raise TypeError("must provide a stream to wrap") encoding = getattr(stream, "encoding", None) encoding = get_output_encoding(encoding) return StreamWrapper(stream, encoding, "replace", line_buffering=True)
[ "Given", "a", "stream", "wrap", "it", "in", "a", "StreamWrapper", "instance", "and", "return", "the", "wrapped", "stream", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/vistir/misc.py#L620-L633
[ "def", "get_wrapped_stream", "(", "stream", ")", ":", "if", "stream", "is", "None", ":", "raise", "TypeError", "(", "\"must provide a stream to wrap\"", ")", "encoding", "=", "getattr", "(", "stream", ",", "\"encoding\"", ",", "None", ")", "encoding", "=", "ge...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
train
is_connection_dropped
Returns True if the connection is dropped and should be closed. :param conn: :class:`httplib.HTTPConnection` object. Note: For platforms like AppEngine, this will always return ``False`` to let the platform handle connection recycling transparently for us.
pipenv/vendor/urllib3/util/connection.py
def is_connection_dropped(conn): # Platform-specific """ Returns True if the connection is dropped and should be closed. :param conn: :class:`httplib.HTTPConnection` object. Note: For platforms like AppEngine, this will always return ``False`` to let the platform handle connection recycling transparently for us. """ sock = getattr(conn, 'sock', False) if sock is False: # Platform-specific: AppEngine return False if sock is None: # Connection already closed (such as by httplib). return True try: # Returns True if readable, which here means it's been dropped return wait_for_read(sock, timeout=0.0) except NoWayToWaitForSocketError: # Platform-specific: AppEngine return False
def is_connection_dropped(conn): # Platform-specific """ Returns True if the connection is dropped and should be closed. :param conn: :class:`httplib.HTTPConnection` object. Note: For platforms like AppEngine, this will always return ``False`` to let the platform handle connection recycling transparently for us. """ sock = getattr(conn, 'sock', False) if sock is False: # Platform-specific: AppEngine return False if sock is None: # Connection already closed (such as by httplib). return True try: # Returns True if readable, which here means it's been dropped return wait_for_read(sock, timeout=0.0) except NoWayToWaitForSocketError: # Platform-specific: AppEngine return False
[ "Returns", "True", "if", "the", "connection", "is", "dropped", "and", "should", "be", "closed", "." ]
pypa/pipenv
python
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/util/connection.py#L7-L26
[ "def", "is_connection_dropped", "(", "conn", ")", ":", "# Platform-specific", "sock", "=", "getattr", "(", "conn", ",", "'sock'", ",", "False", ")", "if", "sock", "is", "False", ":", "# Platform-specific: AppEngine", "return", "False", "if", "sock", "is", "Non...
cae8d76c210b9777e90aab76e9c4b0e53bb19cde