id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
13,300
gamechanger/mongothon
mongothon/model.py
Model.apply_defaults
def apply_defaults(self): """Apply schema defaults to this document.""" self.emit('will_apply_defaults') self.schema.apply_defaults(self) self.emit('did_apply_defaults')
python
def apply_defaults(self): """Apply schema defaults to this document.""" self.emit('will_apply_defaults') self.schema.apply_defaults(self) self.emit('did_apply_defaults')
[ "def", "apply_defaults", "(", "self", ")", ":", "self", ".", "emit", "(", "'will_apply_defaults'", ")", "self", ".", "schema", ".", "apply_defaults", "(", "self", ")", "self", ".", "emit", "(", "'did_apply_defaults'", ")" ]
Apply schema defaults to this document.
[ "Apply", "schema", "defaults", "to", "this", "document", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L106-L110
13,301
gamechanger/mongothon
mongothon/model.py
Model.reload
def reload(self): """Reloads the current model's data from the underlying database record, updating it in-place.""" self.emit('will_reload') self.populate(self.collection.find_one(type(self)._id_spec(self['_id']))) self.emit('did_reload')
python
def reload(self): """Reloads the current model's data from the underlying database record, updating it in-place.""" self.emit('will_reload') self.populate(self.collection.find_one(type(self)._id_spec(self['_id']))) self.emit('did_reload')
[ "def", "reload", "(", "self", ")", ":", "self", ".", "emit", "(", "'will_reload'", ")", "self", ".", "populate", "(", "self", ".", "collection", ".", "find_one", "(", "type", "(", "self", ")", ".", "_id_spec", "(", "self", "[", "'_id'", "]", ")", ")", ")", "self", ".", "emit", "(", "'did_reload'", ")" ]
Reloads the current model's data from the underlying database record, updating it in-place.
[ "Reloads", "the", "current", "model", "s", "data", "from", "the", "underlying", "database", "record", "updating", "it", "in", "-", "place", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L195-L200
13,302
gamechanger/mongothon
mongothon/model.py
Model.on
def on(cls, event, handler_func=None): """ Registers a handler function whenever an instance of the model emits the given event. This method can either called directly, passing a function reference: MyModel.on('did_save', my_function) ...or as a decorator of the function to be registered. @MyModel.on('did_save') def myfunction(my_model): pass """ if handler_func: cls.handler_registrar().register(event, handler_func) return def register(fn): cls.handler_registrar().register(event, fn) return fn return register
python
def on(cls, event, handler_func=None): """ Registers a handler function whenever an instance of the model emits the given event. This method can either called directly, passing a function reference: MyModel.on('did_save', my_function) ...or as a decorator of the function to be registered. @MyModel.on('did_save') def myfunction(my_model): pass """ if handler_func: cls.handler_registrar().register(event, handler_func) return def register(fn): cls.handler_registrar().register(event, fn) return fn return register
[ "def", "on", "(", "cls", ",", "event", ",", "handler_func", "=", "None", ")", ":", "if", "handler_func", ":", "cls", ".", "handler_registrar", "(", ")", ".", "register", "(", "event", ",", "handler_func", ")", "return", "def", "register", "(", "fn", ")", ":", "cls", ".", "handler_registrar", "(", ")", ".", "register", "(", "event", ",", "fn", ")", "return", "fn", "return", "register" ]
Registers a handler function whenever an instance of the model emits the given event. This method can either called directly, passing a function reference: MyModel.on('did_save', my_function) ...or as a decorator of the function to be registered. @MyModel.on('did_save') def myfunction(my_model): pass
[ "Registers", "a", "handler", "function", "whenever", "an", "instance", "of", "the", "model", "emits", "the", "given", "event", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L203-L227
13,303
gamechanger/mongothon
mongothon/model.py
Model._emit
def _emit(self, event, document, *args, **kwargs): """ Inner version of emit which passes the given document as the primary argument to handler functions. """ self.handler_registrar().apply(event, document, *args, **kwargs)
python
def _emit(self, event, document, *args, **kwargs): """ Inner version of emit which passes the given document as the primary argument to handler functions. """ self.handler_registrar().apply(event, document, *args, **kwargs)
[ "def", "_emit", "(", "self", ",", "event", ",", "document", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "handler_registrar", "(", ")", ".", "apply", "(", "event", ",", "document", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Inner version of emit which passes the given document as the primary argument to handler functions.
[ "Inner", "version", "of", "emit", "which", "passes", "the", "given", "document", "as", "the", "primary", "argument", "to", "handler", "functions", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L229-L234
13,304
gamechanger/mongothon
mongothon/model.py
Model.emit
def emit(self, event, *args, **kwargs): """ Emits an event call to all handler functions registered against this model's class and the given event type. """ self._emit(event, self, *args, **kwargs)
python
def emit(self, event, *args, **kwargs): """ Emits an event call to all handler functions registered against this model's class and the given event type. """ self._emit(event, self, *args, **kwargs)
[ "def", "emit", "(", "self", ",", "event", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_emit", "(", "event", ",", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Emits an event call to all handler functions registered against this model's class and the given event type.
[ "Emits", "an", "event", "call", "to", "all", "handler", "functions", "registered", "against", "this", "model", "s", "class", "and", "the", "given", "event", "type", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L237-L242
13,305
gamechanger/mongothon
mongothon/model.py
Model.static_method
def static_method(cls, f): """Decorator which dynamically binds static methods to the model for later use.""" setattr(cls, f.__name__, staticmethod(f)) return f
python
def static_method(cls, f): """Decorator which dynamically binds static methods to the model for later use.""" setattr(cls, f.__name__, staticmethod(f)) return f
[ "def", "static_method", "(", "cls", ",", "f", ")", ":", "setattr", "(", "cls", ",", "f", ".", "__name__", ",", "staticmethod", "(", "f", ")", ")", "return", "f" ]
Decorator which dynamically binds static methods to the model for later use.
[ "Decorator", "which", "dynamically", "binds", "static", "methods", "to", "the", "model", "for", "later", "use", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L267-L270
13,306
gamechanger/mongothon
mongothon/model.py
Model.class_method
def class_method(cls, f): """Decorator which dynamically binds class methods to the model for later use.""" setattr(cls, f.__name__, classmethod(f)) return f
python
def class_method(cls, f): """Decorator which dynamically binds class methods to the model for later use.""" setattr(cls, f.__name__, classmethod(f)) return f
[ "def", "class_method", "(", "cls", ",", "f", ")", ":", "setattr", "(", "cls", ",", "f", ".", "__name__", ",", "classmethod", "(", "f", ")", ")", "return", "f" ]
Decorator which dynamically binds class methods to the model for later use.
[ "Decorator", "which", "dynamically", "binds", "class", "methods", "to", "the", "model", "for", "later", "use", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L273-L276
13,307
gamechanger/mongothon
mongothon/model.py
Model.scope
def scope(cls, f): """Decorator which can dynamically attach a query scope to the model.""" if not hasattr(cls, "scopes"): cls.scopes = copy(STANDARD_SCOPES) cls.scopes.append(f) def create_builder(self, *args, **kwargs): bldr = ScopeBuilder(cls, cls.scopes) return getattr(bldr, f.__name__)(*args, **kwargs) setattr(cls, f.__name__, classmethod(create_builder)) return f
python
def scope(cls, f): """Decorator which can dynamically attach a query scope to the model.""" if not hasattr(cls, "scopes"): cls.scopes = copy(STANDARD_SCOPES) cls.scopes.append(f) def create_builder(self, *args, **kwargs): bldr = ScopeBuilder(cls, cls.scopes) return getattr(bldr, f.__name__)(*args, **kwargs) setattr(cls, f.__name__, classmethod(create_builder)) return f
[ "def", "scope", "(", "cls", ",", "f", ")", ":", "if", "not", "hasattr", "(", "cls", ",", "\"scopes\"", ")", ":", "cls", ".", "scopes", "=", "copy", "(", "STANDARD_SCOPES", ")", "cls", ".", "scopes", ".", "append", "(", "f", ")", "def", "create_builder", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "bldr", "=", "ScopeBuilder", "(", "cls", ",", "cls", ".", "scopes", ")", "return", "getattr", "(", "bldr", ",", "f", ".", "__name__", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "setattr", "(", "cls", ",", "f", ".", "__name__", ",", "classmethod", "(", "create_builder", ")", ")", "return", "f" ]
Decorator which can dynamically attach a query scope to the model.
[ "Decorator", "which", "can", "dynamically", "attach", "a", "query", "scope", "to", "the", "model", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/model.py#L285-L297
13,308
gamechanger/mongothon
mongothon/__init__.py
_module_name_from_previous_frame
def _module_name_from_previous_frame(num_frames_back): """ Returns the module name associated with a frame `num_frames_back` in the call stack. This function adds 1 to account for itself, so `num_frames_back` should be given relative to the caller. """ frm = inspect.stack()[num_frames_back + 1] return inspect.getmodule(frm[0]).__name__
python
def _module_name_from_previous_frame(num_frames_back): """ Returns the module name associated with a frame `num_frames_back` in the call stack. This function adds 1 to account for itself, so `num_frames_back` should be given relative to the caller. """ frm = inspect.stack()[num_frames_back + 1] return inspect.getmodule(frm[0]).__name__
[ "def", "_module_name_from_previous_frame", "(", "num_frames_back", ")", ":", "frm", "=", "inspect", ".", "stack", "(", ")", "[", "num_frames_back", "+", "1", "]", "return", "inspect", ".", "getmodule", "(", "frm", "[", "0", "]", ")", ".", "__name__" ]
Returns the module name associated with a frame `num_frames_back` in the call stack. This function adds 1 to account for itself, so `num_frames_back` should be given relative to the caller.
[ "Returns", "the", "module", "name", "associated", "with", "a", "frame", "num_frames_back", "in", "the", "call", "stack", ".", "This", "function", "adds", "1", "to", "account", "for", "itself", "so", "num_frames_back", "should", "be", "given", "relative", "to", "the", "caller", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/__init__.py#L9-L16
13,309
gamechanger/mongothon
mongothon/__init__.py
create_model
def create_model(schema, collection, class_name=None): """ Main entry point to creating a new mongothon model. Both schema and Pymongo collection objects must be provided. Returns a new class which can be used as a model class. The class name of the model class by default is inferred from the provided collection (converted to camel case). Optionally, a class_name argument can be provided to override this. """ if not class_name: class_name = camelize(str(collection.name)) model_class = type(class_name, (Model,), dict(schema=schema, _collection_factory=staticmethod(lambda: collection))) # Since we are dynamically creating this class here, we modify __module__ on the # created class to point back to the module from which `create_model` was called model_class.__module__ = _module_name_from_previous_frame(1) return model_class
python
def create_model(schema, collection, class_name=None): """ Main entry point to creating a new mongothon model. Both schema and Pymongo collection objects must be provided. Returns a new class which can be used as a model class. The class name of the model class by default is inferred from the provided collection (converted to camel case). Optionally, a class_name argument can be provided to override this. """ if not class_name: class_name = camelize(str(collection.name)) model_class = type(class_name, (Model,), dict(schema=schema, _collection_factory=staticmethod(lambda: collection))) # Since we are dynamically creating this class here, we modify __module__ on the # created class to point back to the module from which `create_model` was called model_class.__module__ = _module_name_from_previous_frame(1) return model_class
[ "def", "create_model", "(", "schema", ",", "collection", ",", "class_name", "=", "None", ")", ":", "if", "not", "class_name", ":", "class_name", "=", "camelize", "(", "str", "(", "collection", ".", "name", ")", ")", "model_class", "=", "type", "(", "class_name", ",", "(", "Model", ",", ")", ",", "dict", "(", "schema", "=", "schema", ",", "_collection_factory", "=", "staticmethod", "(", "lambda", ":", "collection", ")", ")", ")", "# Since we are dynamically creating this class here, we modify __module__ on the", "# created class to point back to the module from which `create_model` was called", "model_class", ".", "__module__", "=", "_module_name_from_previous_frame", "(", "1", ")", "return", "model_class" ]
Main entry point to creating a new mongothon model. Both schema and Pymongo collection objects must be provided. Returns a new class which can be used as a model class. The class name of the model class by default is inferred from the provided collection (converted to camel case). Optionally, a class_name argument can be provided to override this.
[ "Main", "entry", "point", "to", "creating", "a", "new", "mongothon", "model", ".", "Both", "schema", "and", "Pymongo", "collection", "objects", "must", "be", "provided", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/__init__.py#L19-L42
13,310
gamechanger/mongothon
mongothon/__init__.py
create_model_offline
def create_model_offline(schema, collection_factory, class_name): """ Entry point for creating a new Mongothon model without instantiating a database connection. The collection is instead provided through a closure that is resolved upon the model's first database access. """ model_class = type(class_name, (Model,), dict(schema=schema, _collection_factory=staticmethod(collection_factory))) # Since we are dynamically creating this class here, we modify __module__ on the # created class to point back to the module from which `create_model_offline` was called model_class.__module__ = _module_name_from_previous_frame(1) return model_class
python
def create_model_offline(schema, collection_factory, class_name): """ Entry point for creating a new Mongothon model without instantiating a database connection. The collection is instead provided through a closure that is resolved upon the model's first database access. """ model_class = type(class_name, (Model,), dict(schema=schema, _collection_factory=staticmethod(collection_factory))) # Since we are dynamically creating this class here, we modify __module__ on the # created class to point back to the module from which `create_model_offline` was called model_class.__module__ = _module_name_from_previous_frame(1) return model_class
[ "def", "create_model_offline", "(", "schema", ",", "collection_factory", ",", "class_name", ")", ":", "model_class", "=", "type", "(", "class_name", ",", "(", "Model", ",", ")", ",", "dict", "(", "schema", "=", "schema", ",", "_collection_factory", "=", "staticmethod", "(", "collection_factory", ")", ")", ")", "# Since we are dynamically creating this class here, we modify __module__ on the", "# created class to point back to the module from which `create_model_offline` was called", "model_class", ".", "__module__", "=", "_module_name_from_previous_frame", "(", "1", ")", "return", "model_class" ]
Entry point for creating a new Mongothon model without instantiating a database connection. The collection is instead provided through a closure that is resolved upon the model's first database access.
[ "Entry", "point", "for", "creating", "a", "new", "Mongothon", "model", "without", "instantiating", "a", "database", "connection", ".", "The", "collection", "is", "instead", "provided", "through", "a", "closure", "that", "is", "resolved", "upon", "the", "model", "s", "first", "database", "access", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/__init__.py#L45-L59
13,311
gamechanger/mongothon
mongothon/document.py
wrap
def wrap(value): """ Wraps the given value in a Document or DocumentList as applicable. """ if isinstance(value, Document) or isinstance(value, DocumentList): return value elif isinstance(value, dict): return Document(value) elif isinstance(value, list): return DocumentList(value) else: return value
python
def wrap(value): """ Wraps the given value in a Document or DocumentList as applicable. """ if isinstance(value, Document) or isinstance(value, DocumentList): return value elif isinstance(value, dict): return Document(value) elif isinstance(value, list): return DocumentList(value) else: return value
[ "def", "wrap", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "Document", ")", "or", "isinstance", "(", "value", ",", "DocumentList", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "Document", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "return", "DocumentList", "(", "value", ")", "else", ":", "return", "value" ]
Wraps the given value in a Document or DocumentList as applicable.
[ "Wraps", "the", "given", "value", "in", "a", "Document", "or", "DocumentList", "as", "applicable", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L3-L14
13,312
gamechanger/mongothon
mongothon/document.py
unwrap
def unwrap(value): """ Unwraps the given Document or DocumentList as applicable. """ if isinstance(value, Document): return value.to_dict() elif isinstance(value, DocumentList): return value.to_list() else: return value
python
def unwrap(value): """ Unwraps the given Document or DocumentList as applicable. """ if isinstance(value, Document): return value.to_dict() elif isinstance(value, DocumentList): return value.to_list() else: return value
[ "def", "unwrap", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "Document", ")", ":", "return", "value", ".", "to_dict", "(", ")", "elif", "isinstance", "(", "value", ",", "DocumentList", ")", ":", "return", "value", ".", "to_list", "(", ")", "else", ":", "return", "value" ]
Unwraps the given Document or DocumentList as applicable.
[ "Unwraps", "the", "given", "Document", "or", "DocumentList", "as", "applicable", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L17-L26
13,313
gamechanger/mongothon
mongothon/document.py
ChangeTracker.note_change
def note_change(self, key, value): """ Updates change state to reflect a change to a field. Takes care of ignoring no-ops, reversions and takes appropriate steps if the field was previously deleted or added to ensure the change state purely reflects the diff since last reset. """ # If we're changing the value and we haven't done so already, note it. if value != self._instance[key] and key not in self._previous and key not in self._added: self._previous[key] = self._instance[key] # If we're setting the value back to the original value, discard the change note if key in self._previous and value == self._previous[key]: del self._previous[key]
python
def note_change(self, key, value): """ Updates change state to reflect a change to a field. Takes care of ignoring no-ops, reversions and takes appropriate steps if the field was previously deleted or added to ensure the change state purely reflects the diff since last reset. """ # If we're changing the value and we haven't done so already, note it. if value != self._instance[key] and key not in self._previous and key not in self._added: self._previous[key] = self._instance[key] # If we're setting the value back to the original value, discard the change note if key in self._previous and value == self._previous[key]: del self._previous[key]
[ "def", "note_change", "(", "self", ",", "key", ",", "value", ")", ":", "# If we're changing the value and we haven't done so already, note it.", "if", "value", "!=", "self", ".", "_instance", "[", "key", "]", "and", "key", "not", "in", "self", ".", "_previous", "and", "key", "not", "in", "self", ".", "_added", ":", "self", ".", "_previous", "[", "key", "]", "=", "self", ".", "_instance", "[", "key", "]", "# If we're setting the value back to the original value, discard the change note", "if", "key", "in", "self", ".", "_previous", "and", "value", "==", "self", ".", "_previous", "[", "key", "]", ":", "del", "self", ".", "_previous", "[", "key", "]" ]
Updates change state to reflect a change to a field. Takes care of ignoring no-ops, reversions and takes appropriate steps if the field was previously deleted or added to ensure the change state purely reflects the diff since last reset.
[ "Updates", "change", "state", "to", "reflect", "a", "change", "to", "a", "field", ".", "Takes", "care", "of", "ignoring", "no", "-", "ops", "reversions", "and", "takes", "appropriate", "steps", "if", "the", "field", "was", "previously", "deleted", "or", "added", "to", "ensure", "the", "change", "state", "purely", "reflects", "the", "diff", "since", "last", "reset", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L49-L62
13,314
gamechanger/mongothon
mongothon/document.py
ChangeTracker.note_addition
def note_addition(self, key, value): """ Updates the change state to reflect the addition of a field. Detects previous changes and deletions of the field and acts accordingly. """ # If we're adding a field we previously deleted, remove the deleted note. if key in self._deleted: # If the key we're adding back has a different value, then it's a change if value != self._deleted[key]: self._previous[key] = self._deleted[key] del self._deleted[key] else: self._added.append(key)
python
def note_addition(self, key, value): """ Updates the change state to reflect the addition of a field. Detects previous changes and deletions of the field and acts accordingly. """ # If we're adding a field we previously deleted, remove the deleted note. if key in self._deleted: # If the key we're adding back has a different value, then it's a change if value != self._deleted[key]: self._previous[key] = self._deleted[key] del self._deleted[key] else: self._added.append(key)
[ "def", "note_addition", "(", "self", ",", "key", ",", "value", ")", ":", "# If we're adding a field we previously deleted, remove the deleted note.", "if", "key", "in", "self", ".", "_deleted", ":", "# If the key we're adding back has a different value, then it's a change", "if", "value", "!=", "self", ".", "_deleted", "[", "key", "]", ":", "self", ".", "_previous", "[", "key", "]", "=", "self", ".", "_deleted", "[", "key", "]", "del", "self", ".", "_deleted", "[", "key", "]", "else", ":", "self", ".", "_added", ".", "append", "(", "key", ")" ]
Updates the change state to reflect the addition of a field. Detects previous changes and deletions of the field and acts accordingly.
[ "Updates", "the", "change", "state", "to", "reflect", "the", "addition", "of", "a", "field", ".", "Detects", "previous", "changes", "and", "deletions", "of", "the", "field", "and", "acts", "accordingly", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L64-L76
13,315
gamechanger/mongothon
mongothon/document.py
ChangeTracker.note_deletion
def note_deletion(self, key): """ Notes the deletion of a field. """ # If we'rew deleting a key we previously added, then there is no diff if key in self._added: self._added.remove(key) else: # If the deleted key was previously changed, use the original value if key in self._previous: self._deleted[key] = self._previous[key] del self._previous[key] else: self._deleted[key] = self._instance[key]
python
def note_deletion(self, key): """ Notes the deletion of a field. """ # If we'rew deleting a key we previously added, then there is no diff if key in self._added: self._added.remove(key) else: # If the deleted key was previously changed, use the original value if key in self._previous: self._deleted[key] = self._previous[key] del self._previous[key] else: self._deleted[key] = self._instance[key]
[ "def", "note_deletion", "(", "self", ",", "key", ")", ":", "# If we'rew deleting a key we previously added, then there is no diff", "if", "key", "in", "self", ".", "_added", ":", "self", ".", "_added", ".", "remove", "(", "key", ")", "else", ":", "# If the deleted key was previously changed, use the original value", "if", "key", "in", "self", ".", "_previous", ":", "self", ".", "_deleted", "[", "key", "]", "=", "self", ".", "_previous", "[", "key", "]", "del", "self", ".", "_previous", "[", "key", "]", "else", ":", "self", ".", "_deleted", "[", "key", "]", "=", "self", ".", "_instance", "[", "key", "]" ]
Notes the deletion of a field.
[ "Notes", "the", "deletion", "of", "a", "field", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L78-L91
13,316
gamechanger/mongothon
mongothon/document.py
ChangeTracker.changes
def changes(self): """ Returns a dict containing just the fields which have changed on this Document since it was created or last saved, together with both their previous and current values doc['name'] # => 'bob' doc['name'] = 'clive' doc.changes # => {'name': ('bob', clive')} """ return {key: (self._previous[key], self._instance[key]) for key in self._previous}
python
def changes(self): """ Returns a dict containing just the fields which have changed on this Document since it was created or last saved, together with both their previous and current values doc['name'] # => 'bob' doc['name'] = 'clive' doc.changes # => {'name': ('bob', clive')} """ return {key: (self._previous[key], self._instance[key]) for key in self._previous}
[ "def", "changes", "(", "self", ")", ":", "return", "{", "key", ":", "(", "self", ".", "_previous", "[", "key", "]", ",", "self", ".", "_instance", "[", "key", "]", ")", "for", "key", "in", "self", ".", "_previous", "}" ]
Returns a dict containing just the fields which have changed on this Document since it was created or last saved, together with both their previous and current values doc['name'] # => 'bob' doc['name'] = 'clive' doc.changes # => {'name': ('bob', clive')}
[ "Returns", "a", "dict", "containing", "just", "the", "fields", "which", "have", "changed", "on", "this", "Document", "since", "it", "was", "created", "or", "last", "saved", "together", "with", "both", "their", "previous", "and", "current", "values" ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L107-L118
13,317
gamechanger/mongothon
mongothon/document.py
Document.reset_all_changes
def reset_all_changes(self): """ Resets change tracking in this document, recursing into child Documents and DocumentLists. """ self.reset_changes() for value in self.values(): if isinstance(value, Document) or isinstance(value, DocumentList): value.reset_all_changes()
python
def reset_all_changes(self): """ Resets change tracking in this document, recursing into child Documents and DocumentLists. """ self.reset_changes() for value in self.values(): if isinstance(value, Document) or isinstance(value, DocumentList): value.reset_all_changes()
[ "def", "reset_all_changes", "(", "self", ")", ":", "self", ".", "reset_changes", "(", ")", "for", "value", "in", "self", ".", "values", "(", ")", ":", "if", "isinstance", "(", "value", ",", "Document", ")", "or", "isinstance", "(", "value", ",", "DocumentList", ")", ":", "value", ".", "reset_all_changes", "(", ")" ]
Resets change tracking in this document, recursing into child Documents and DocumentLists.
[ "Resets", "change", "tracking", "in", "this", "document", "recursing", "into", "child", "Documents", "and", "DocumentLists", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L161-L169
13,318
gamechanger/mongothon
mongothon/document.py
Document.populate
def populate(self, other): """Like update, but clears the contents first.""" self.clear() self.update(other) self.reset_all_changes()
python
def populate(self, other): """Like update, but clears the contents first.""" self.clear() self.update(other) self.reset_all_changes()
[ "def", "populate", "(", "self", ",", "other", ")", ":", "self", ".", "clear", "(", ")", "self", ".", "update", "(", "other", ")", "self", ".", "reset_all_changes", "(", ")" ]
Like update, but clears the contents first.
[ "Like", "update", "but", "clears", "the", "contents", "first", "." ]
5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/document.py#L215-L219
13,319
msoulier/tftpy
tftpy/TftpPacketFactory.py
TftpPacketFactory.parse
def parse(self, buffer): """This method is used to parse an existing datagram into its corresponding TftpPacket object. The buffer is the raw bytes off of the network.""" log.debug("parsing a %d byte packet" % len(buffer)) (opcode,) = struct.unpack(str("!H"), buffer[:2]) log.debug("opcode is %d" % opcode) packet = self.__create(opcode) packet.buffer = buffer return packet.decode()
python
def parse(self, buffer): """This method is used to parse an existing datagram into its corresponding TftpPacket object. The buffer is the raw bytes off of the network.""" log.debug("parsing a %d byte packet" % len(buffer)) (opcode,) = struct.unpack(str("!H"), buffer[:2]) log.debug("opcode is %d" % opcode) packet = self.__create(opcode) packet.buffer = buffer return packet.decode()
[ "def", "parse", "(", "self", ",", "buffer", ")", ":", "log", ".", "debug", "(", "\"parsing a %d byte packet\"", "%", "len", "(", "buffer", ")", ")", "(", "opcode", ",", ")", "=", "struct", ".", "unpack", "(", "str", "(", "\"!H\"", ")", ",", "buffer", "[", ":", "2", "]", ")", "log", ".", "debug", "(", "\"opcode is %d\"", "%", "opcode", ")", "packet", "=", "self", ".", "__create", "(", "opcode", ")", "packet", ".", "buffer", "=", "buffer", "return", "packet", ".", "decode", "(", ")" ]
This method is used to parse an existing datagram into its corresponding TftpPacket object. The buffer is the raw bytes off of the network.
[ "This", "method", "is", "used", "to", "parse", "an", "existing", "datagram", "into", "its", "corresponding", "TftpPacket", "object", ".", "The", "buffer", "is", "the", "raw", "bytes", "off", "of", "the", "network", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketFactory.py#L28-L37
13,320
msoulier/tftpy
tftpy/TftpPacketFactory.py
TftpPacketFactory.__create
def __create(self, opcode): """This method returns the appropriate class object corresponding to the passed opcode.""" tftpassert(opcode in self.classes, "Unsupported opcode: %d" % opcode) packet = self.classes[opcode]() return packet
python
def __create(self, opcode): """This method returns the appropriate class object corresponding to the passed opcode.""" tftpassert(opcode in self.classes, "Unsupported opcode: %d" % opcode) packet = self.classes[opcode]() return packet
[ "def", "__create", "(", "self", ",", "opcode", ")", ":", "tftpassert", "(", "opcode", "in", "self", ".", "classes", ",", "\"Unsupported opcode: %d\"", "%", "opcode", ")", "packet", "=", "self", ".", "classes", "[", "opcode", "]", "(", ")", "return", "packet" ]
This method returns the appropriate class object corresponding to the passed opcode.
[ "This", "method", "returns", "the", "appropriate", "class", "object", "corresponding", "to", "the", "passed", "opcode", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketFactory.py#L39-L47
13,321
msoulier/tftpy
tftpy/TftpContexts.py
TftpMetrics.add_dup
def add_dup(self, pkt): """This method adds a dup for a packet to the metrics.""" log.debug("Recording a dup of %s", pkt) s = str(pkt) if s in self.dups: self.dups[s] += 1 else: self.dups[s] = 1 tftpassert(self.dups[s] < MAX_DUPS, "Max duplicates reached")
python
def add_dup(self, pkt): """This method adds a dup for a packet to the metrics.""" log.debug("Recording a dup of %s", pkt) s = str(pkt) if s in self.dups: self.dups[s] += 1 else: self.dups[s] = 1 tftpassert(self.dups[s] < MAX_DUPS, "Max duplicates reached")
[ "def", "add_dup", "(", "self", ",", "pkt", ")", ":", "log", ".", "debug", "(", "\"Recording a dup of %s\"", ",", "pkt", ")", "s", "=", "str", "(", "pkt", ")", "if", "s", "in", "self", ".", "dups", ":", "self", ".", "dups", "[", "s", "]", "+=", "1", "else", ":", "self", ".", "dups", "[", "s", "]", "=", "1", "tftpassert", "(", "self", ".", "dups", "[", "s", "]", "<", "MAX_DUPS", ",", "\"Max duplicates reached\"", ")" ]
This method adds a dup for a packet to the metrics.
[ "This", "method", "adds", "a", "dup", "for", "a", "packet", "to", "the", "metrics", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L62-L70
13,322
msoulier/tftpy
tftpy/TftpContexts.py
TftpContext.checkTimeout
def checkTimeout(self, now): """Compare current time with last_update time, and raise an exception if we're over the timeout time.""" log.debug("checking for timeout on session %s", self) if now - self.last_update > self.timeout: raise TftpTimeout("Timeout waiting for traffic")
python
def checkTimeout(self, now): """Compare current time with last_update time, and raise an exception if we're over the timeout time.""" log.debug("checking for timeout on session %s", self) if now - self.last_update > self.timeout: raise TftpTimeout("Timeout waiting for traffic")
[ "def", "checkTimeout", "(", "self", ",", "now", ")", ":", "log", ".", "debug", "(", "\"checking for timeout on session %s\"", ",", "self", ")", "if", "now", "-", "self", ".", "last_update", ">", "self", ".", "timeout", ":", "raise", "TftpTimeout", "(", "\"Timeout waiting for traffic\"", ")" ]
Compare current time with last_update time, and raise an exception if we're over the timeout time.
[ "Compare", "current", "time", "with", "last_update", "time", "and", "raise", "an", "exception", "if", "we", "re", "over", "the", "timeout", "time", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L121-L126
13,323
msoulier/tftpy
tftpy/TftpContexts.py
TftpContext.end
def end(self, close_fileobj=True): """Perform session cleanup, since the end method should always be called explicitely by the calling code, this works better than the destructor. Set close_fileobj to False so fileobj can be returned open.""" log.debug("in TftpContext.end - closing socket") self.sock.close() if close_fileobj and self.fileobj is not None and not self.fileobj.closed: log.debug("self.fileobj is open - closing") self.fileobj.close()
python
def end(self, close_fileobj=True): """Perform session cleanup, since the end method should always be called explicitely by the calling code, this works better than the destructor. Set close_fileobj to False so fileobj can be returned open.""" log.debug("in TftpContext.end - closing socket") self.sock.close() if close_fileobj and self.fileobj is not None and not self.fileobj.closed: log.debug("self.fileobj is open - closing") self.fileobj.close()
[ "def", "end", "(", "self", ",", "close_fileobj", "=", "True", ")", ":", "log", ".", "debug", "(", "\"in TftpContext.end - closing socket\"", ")", "self", ".", "sock", ".", "close", "(", ")", "if", "close_fileobj", "and", "self", ".", "fileobj", "is", "not", "None", "and", "not", "self", ".", "fileobj", ".", "closed", ":", "log", ".", "debug", "(", "\"self.fileobj is open - closing\"", ")", "self", ".", "fileobj", ".", "close", "(", ")" ]
Perform session cleanup, since the end method should always be called explicitely by the calling code, this works better than the destructor. Set close_fileobj to False so fileobj can be returned open.
[ "Perform", "session", "cleanup", "since", "the", "end", "method", "should", "always", "be", "called", "explicitely", "by", "the", "calling", "code", "this", "works", "better", "than", "the", "destructor", ".", "Set", "close_fileobj", "to", "False", "so", "fileobj", "can", "be", "returned", "open", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L131-L140
13,324
msoulier/tftpy
tftpy/TftpContexts.py
TftpContext.sethost
def sethost(self, host): """Setter method that also sets the address property as a result of the host that is set.""" self.__host = host self.address = socket.gethostbyname(host)
python
def sethost(self, host): """Setter method that also sets the address property as a result of the host that is set.""" self.__host = host self.address = socket.gethostbyname(host)
[ "def", "sethost", "(", "self", ",", "host", ")", ":", "self", ".", "__host", "=", "host", "self", ".", "address", "=", "socket", ".", "gethostbyname", "(", "host", ")" ]
Setter method that also sets the address property as a result of the host that is set.
[ "Setter", "method", "that", "also", "sets", "the", "address", "property", "as", "a", "result", "of", "the", "host", "that", "is", "set", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L146-L150
13,325
msoulier/tftpy
tftpy/TftpContexts.py
TftpContext.cycle
def cycle(self): """Here we wait for a response from the server after sending it something, and dispatch appropriate action to that response.""" try: (buffer, (raddress, rport)) = self.sock.recvfrom(MAX_BLKSIZE) except socket.timeout: log.warning("Timeout waiting for traffic, retrying...") raise TftpTimeout("Timed-out waiting for traffic") # Ok, we've received a packet. Log it. log.debug("Received %d bytes from %s:%s", len(buffer), raddress, rport) # And update our last updated time. self.last_update = time.time() # Decode it. recvpkt = self.factory.parse(buffer) # Check for known "connection". if raddress != self.address: log.warning("Received traffic from %s, expected host %s. Discarding" % (raddress, self.host)) if self.tidport and self.tidport != rport: log.warning("Received traffic from %s:%s but we're " "connected to %s:%s. Discarding." % (raddress, rport, self.host, self.tidport)) # If there is a packethook defined, call it. We unconditionally # pass all packets, it's up to the client to screen out different # kinds of packets. This way, the client is privy to things like # negotiated options. if self.packethook: self.packethook(recvpkt) # And handle it, possibly changing state. self.state = self.state.handle(recvpkt, raddress, rport) # If we didn't throw any exceptions here, reset the retry_count to # zero. self.retry_count = 0
python
def cycle(self): """Here we wait for a response from the server after sending it something, and dispatch appropriate action to that response.""" try: (buffer, (raddress, rport)) = self.sock.recvfrom(MAX_BLKSIZE) except socket.timeout: log.warning("Timeout waiting for traffic, retrying...") raise TftpTimeout("Timed-out waiting for traffic") # Ok, we've received a packet. Log it. log.debug("Received %d bytes from %s:%s", len(buffer), raddress, rport) # And update our last updated time. self.last_update = time.time() # Decode it. recvpkt = self.factory.parse(buffer) # Check for known "connection". if raddress != self.address: log.warning("Received traffic from %s, expected host %s. Discarding" % (raddress, self.host)) if self.tidport and self.tidport != rport: log.warning("Received traffic from %s:%s but we're " "connected to %s:%s. Discarding." % (raddress, rport, self.host, self.tidport)) # If there is a packethook defined, call it. We unconditionally # pass all packets, it's up to the client to screen out different # kinds of packets. This way, the client is privy to things like # negotiated options. if self.packethook: self.packethook(recvpkt) # And handle it, possibly changing state. self.state = self.state.handle(recvpkt, raddress, rport) # If we didn't throw any exceptions here, reset the retry_count to # zero. self.retry_count = 0
[ "def", "cycle", "(", "self", ")", ":", "try", ":", "(", "buffer", ",", "(", "raddress", ",", "rport", ")", ")", "=", "self", ".", "sock", ".", "recvfrom", "(", "MAX_BLKSIZE", ")", "except", "socket", ".", "timeout", ":", "log", ".", "warning", "(", "\"Timeout waiting for traffic, retrying...\"", ")", "raise", "TftpTimeout", "(", "\"Timed-out waiting for traffic\"", ")", "# Ok, we've received a packet. Log it.", "log", ".", "debug", "(", "\"Received %d bytes from %s:%s\"", ",", "len", "(", "buffer", ")", ",", "raddress", ",", "rport", ")", "# And update our last updated time.", "self", ".", "last_update", "=", "time", ".", "time", "(", ")", "# Decode it.", "recvpkt", "=", "self", ".", "factory", ".", "parse", "(", "buffer", ")", "# Check for known \"connection\".", "if", "raddress", "!=", "self", ".", "address", ":", "log", ".", "warning", "(", "\"Received traffic from %s, expected host %s. Discarding\"", "%", "(", "raddress", ",", "self", ".", "host", ")", ")", "if", "self", ".", "tidport", "and", "self", ".", "tidport", "!=", "rport", ":", "log", ".", "warning", "(", "\"Received traffic from %s:%s but we're \"", "\"connected to %s:%s. Discarding.\"", "%", "(", "raddress", ",", "rport", ",", "self", ".", "host", ",", "self", ".", "tidport", ")", ")", "# If there is a packethook defined, call it. We unconditionally", "# pass all packets, it's up to the client to screen out different", "# kinds of packets. This way, the client is privy to things like", "# negotiated options.", "if", "self", ".", "packethook", ":", "self", ".", "packethook", "(", "recvpkt", ")", "# And handle it, possibly changing state.", "self", ".", "state", "=", "self", ".", "state", ".", "handle", "(", "recvpkt", ",", "raddress", ",", "rport", ")", "# If we didn't throw any exceptions here, reset the retry_count to", "# zero.", "self", ".", "retry_count", "=", "0" ]
Here we wait for a response from the server after sending it something, and dispatch appropriate action to that response.
[ "Here", "we", "wait", "for", "a", "response", "from", "the", "server", "after", "sending", "it", "something", "and", "dispatch", "appropriate", "action", "to", "that", "response", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L165-L205
13,326
msoulier/tftpy
tftpy/TftpContexts.py
TftpContextClientDownload.start
def start(self): """Initiate the download.""" log.info("Sending tftp download request to %s" % self.host) log.info(" filename -> %s" % self.file_to_transfer) log.info(" options -> %s" % self.options) self.metrics.start_time = time.time() log.debug("Set metrics.start_time to %s" % self.metrics.start_time) # FIXME: put this in a sendRRQ method? pkt = TftpPacketRRQ() pkt.filename = self.file_to_transfer pkt.mode = "octet" # FIXME - shouldn't hardcode this pkt.options = self.options self.sock.sendto(pkt.encode().buffer, (self.host, self.port)) self.next_block = 1 self.last_pkt = pkt self.state = TftpStateSentRRQ(self) while self.state: try: log.debug("State is %s" % self.state) self.cycle() except TftpTimeout as err: log.error(str(err)) self.retry_count += 1 if self.retry_count >= TIMEOUT_RETRIES: log.debug("hit max retries, giving up") raise else: log.warning("resending last packet") self.state.resendLast() except TftpFileNotFoundError as err: # If we received file not found, then we should not save the open # output file or we'll be left with a size zero file. Delete it, # if it exists. log.error("Received File not found error") if self.fileobj is not None and not self.filelike_fileobj: if os.path.exists(self.fileobj.name): log.debug("unlinking output file of %s", self.fileobj.name) os.unlink(self.fileobj.name) raise
python
def start(self): """Initiate the download.""" log.info("Sending tftp download request to %s" % self.host) log.info(" filename -> %s" % self.file_to_transfer) log.info(" options -> %s" % self.options) self.metrics.start_time = time.time() log.debug("Set metrics.start_time to %s" % self.metrics.start_time) # FIXME: put this in a sendRRQ method? pkt = TftpPacketRRQ() pkt.filename = self.file_to_transfer pkt.mode = "octet" # FIXME - shouldn't hardcode this pkt.options = self.options self.sock.sendto(pkt.encode().buffer, (self.host, self.port)) self.next_block = 1 self.last_pkt = pkt self.state = TftpStateSentRRQ(self) while self.state: try: log.debug("State is %s" % self.state) self.cycle() except TftpTimeout as err: log.error(str(err)) self.retry_count += 1 if self.retry_count >= TIMEOUT_RETRIES: log.debug("hit max retries, giving up") raise else: log.warning("resending last packet") self.state.resendLast() except TftpFileNotFoundError as err: # If we received file not found, then we should not save the open # output file or we'll be left with a size zero file. Delete it, # if it exists. log.error("Received File not found error") if self.fileobj is not None and not self.filelike_fileobj: if os.path.exists(self.fileobj.name): log.debug("unlinking output file of %s", self.fileobj.name) os.unlink(self.fileobj.name) raise
[ "def", "start", "(", "self", ")", ":", "log", ".", "info", "(", "\"Sending tftp download request to %s\"", "%", "self", ".", "host", ")", "log", ".", "info", "(", "\" filename -> %s\"", "%", "self", ".", "file_to_transfer", ")", "log", ".", "info", "(", "\" options -> %s\"", "%", "self", ".", "options", ")", "self", ".", "metrics", ".", "start_time", "=", "time", ".", "time", "(", ")", "log", ".", "debug", "(", "\"Set metrics.start_time to %s\"", "%", "self", ".", "metrics", ".", "start_time", ")", "# FIXME: put this in a sendRRQ method?", "pkt", "=", "TftpPacketRRQ", "(", ")", "pkt", ".", "filename", "=", "self", ".", "file_to_transfer", "pkt", ".", "mode", "=", "\"octet\"", "# FIXME - shouldn't hardcode this", "pkt", ".", "options", "=", "self", ".", "options", "self", ".", "sock", ".", "sendto", "(", "pkt", ".", "encode", "(", ")", ".", "buffer", ",", "(", "self", ".", "host", ",", "self", ".", "port", ")", ")", "self", ".", "next_block", "=", "1", "self", ".", "last_pkt", "=", "pkt", "self", ".", "state", "=", "TftpStateSentRRQ", "(", "self", ")", "while", "self", ".", "state", ":", "try", ":", "log", ".", "debug", "(", "\"State is %s\"", "%", "self", ".", "state", ")", "self", ".", "cycle", "(", ")", "except", "TftpTimeout", "as", "err", ":", "log", ".", "error", "(", "str", "(", "err", ")", ")", "self", ".", "retry_count", "+=", "1", "if", "self", ".", "retry_count", ">=", "TIMEOUT_RETRIES", ":", "log", ".", "debug", "(", "\"hit max retries, giving up\"", ")", "raise", "else", ":", "log", ".", "warning", "(", "\"resending last packet\"", ")", "self", ".", "state", ".", "resendLast", "(", ")", "except", "TftpFileNotFoundError", "as", "err", ":", "# If we received file not found, then we should not save the open", "# output file or we'll be left with a size zero file. Delete it,", "# if it exists.", "log", ".", "error", "(", "\"Received File not found error\"", ")", "if", "self", ".", "fileobj", "is", "not", "None", "and", "not", "self", ".", "filelike_fileobj", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "fileobj", ".", "name", ")", ":", "log", ".", "debug", "(", "\"unlinking output file of %s\"", ",", "self", ".", "fileobj", ".", "name", ")", "os", ".", "unlink", "(", "self", ".", "fileobj", ".", "name", ")", "raise" ]
Initiate the download.
[ "Initiate", "the", "download", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L379-L422
13,327
msoulier/tftpy
tftpy/TftpContexts.py
TftpContextClientDownload.end
def end(self): """Finish up the context.""" TftpContext.end(self, not self.filelike_fileobj) self.metrics.end_time = time.time() log.debug("Set metrics.end_time to %s" % self.metrics.end_time) self.metrics.compute()
python
def end(self): """Finish up the context.""" TftpContext.end(self, not self.filelike_fileobj) self.metrics.end_time = time.time() log.debug("Set metrics.end_time to %s" % self.metrics.end_time) self.metrics.compute()
[ "def", "end", "(", "self", ")", ":", "TftpContext", ".", "end", "(", "self", ",", "not", "self", ".", "filelike_fileobj", ")", "self", ".", "metrics", ".", "end_time", "=", "time", ".", "time", "(", ")", "log", ".", "debug", "(", "\"Set metrics.end_time to %s\"", "%", "self", ".", "metrics", ".", "end_time", ")", "self", ".", "metrics", ".", "compute", "(", ")" ]
Finish up the context.
[ "Finish", "up", "the", "context", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpContexts.py#L424-L429
13,328
msoulier/tftpy
tftpy/TftpClient.py
TftpClient.download
def download(self, filename, output, packethook=None, timeout=SOCK_TIMEOUT): """This method initiates a tftp download from the configured remote host, requesting the filename passed. It writes the file to output, which can be a file-like object or a path to a local file. If a packethook is provided, it must be a function that takes a single parameter, which will be a copy of each DAT packet received in the form of a TftpPacketDAT object. The timeout parameter may be used to override the default SOCK_TIMEOUT setting, which is the amount of time that the client will wait for a receive packet to arrive. Note: If output is a hyphen, stdout is used.""" # We're downloading. log.debug("Creating download context with the following params:") log.debug("host = %s, port = %s, filename = %s" % (self.host, self.iport, filename)) log.debug("options = %s, packethook = %s, timeout = %s" % (self.options, packethook, timeout)) self.context = TftpContextClientDownload(self.host, self.iport, filename, output, self.options, packethook, timeout, localip = self.localip) self.context.start() # Download happens here self.context.end() metrics = self.context.metrics log.info('') log.info("Download complete.") if metrics.duration == 0: log.info("Duration too short, rate undetermined") else: log.info("Downloaded %.2f bytes in %.2f seconds" % (metrics.bytes, metrics.duration)) log.info("Average rate: %.2f kbps" % metrics.kbps) log.info("%.2f bytes in resent data" % metrics.resent_bytes) log.info("Received %d duplicate packets" % metrics.dupcount)
python
def download(self, filename, output, packethook=None, timeout=SOCK_TIMEOUT): """This method initiates a tftp download from the configured remote host, requesting the filename passed. It writes the file to output, which can be a file-like object or a path to a local file. If a packethook is provided, it must be a function that takes a single parameter, which will be a copy of each DAT packet received in the form of a TftpPacketDAT object. The timeout parameter may be used to override the default SOCK_TIMEOUT setting, which is the amount of time that the client will wait for a receive packet to arrive. Note: If output is a hyphen, stdout is used.""" # We're downloading. log.debug("Creating download context with the following params:") log.debug("host = %s, port = %s, filename = %s" % (self.host, self.iport, filename)) log.debug("options = %s, packethook = %s, timeout = %s" % (self.options, packethook, timeout)) self.context = TftpContextClientDownload(self.host, self.iport, filename, output, self.options, packethook, timeout, localip = self.localip) self.context.start() # Download happens here self.context.end() metrics = self.context.metrics log.info('') log.info("Download complete.") if metrics.duration == 0: log.info("Duration too short, rate undetermined") else: log.info("Downloaded %.2f bytes in %.2f seconds" % (metrics.bytes, metrics.duration)) log.info("Average rate: %.2f kbps" % metrics.kbps) log.info("%.2f bytes in resent data" % metrics.resent_bytes) log.info("Received %d duplicate packets" % metrics.dupcount)
[ "def", "download", "(", "self", ",", "filename", ",", "output", ",", "packethook", "=", "None", ",", "timeout", "=", "SOCK_TIMEOUT", ")", ":", "# We're downloading.", "log", ".", "debug", "(", "\"Creating download context with the following params:\"", ")", "log", ".", "debug", "(", "\"host = %s, port = %s, filename = %s\"", "%", "(", "self", ".", "host", ",", "self", ".", "iport", ",", "filename", ")", ")", "log", ".", "debug", "(", "\"options = %s, packethook = %s, timeout = %s\"", "%", "(", "self", ".", "options", ",", "packethook", ",", "timeout", ")", ")", "self", ".", "context", "=", "TftpContextClientDownload", "(", "self", ".", "host", ",", "self", ".", "iport", ",", "filename", ",", "output", ",", "self", ".", "options", ",", "packethook", ",", "timeout", ",", "localip", "=", "self", ".", "localip", ")", "self", ".", "context", ".", "start", "(", ")", "# Download happens here", "self", ".", "context", ".", "end", "(", ")", "metrics", "=", "self", ".", "context", ".", "metrics", "log", ".", "info", "(", "''", ")", "log", ".", "info", "(", "\"Download complete.\"", ")", "if", "metrics", ".", "duration", "==", "0", ":", "log", ".", "info", "(", "\"Duration too short, rate undetermined\"", ")", "else", ":", "log", ".", "info", "(", "\"Downloaded %.2f bytes in %.2f seconds\"", "%", "(", "metrics", ".", "bytes", ",", "metrics", ".", "duration", ")", ")", "log", ".", "info", "(", "\"Average rate: %.2f kbps\"", "%", "metrics", ".", "kbps", ")", "log", ".", "info", "(", "\"%.2f bytes in resent data\"", "%", "metrics", ".", "resent_bytes", ")", "log", ".", "info", "(", "\"Received %d duplicate packets\"", "%", "metrics", ".", "dupcount", ")" ]
This method initiates a tftp download from the configured remote host, requesting the filename passed. It writes the file to output, which can be a file-like object or a path to a local file. If a packethook is provided, it must be a function that takes a single parameter, which will be a copy of each DAT packet received in the form of a TftpPacketDAT object. The timeout parameter may be used to override the default SOCK_TIMEOUT setting, which is the amount of time that the client will wait for a receive packet to arrive. Note: If output is a hyphen, stdout is used.
[ "This", "method", "initiates", "a", "tftp", "download", "from", "the", "configured", "remote", "host", "requesting", "the", "filename", "passed", ".", "It", "writes", "the", "file", "to", "output", "which", "can", "be", "a", "file", "-", "like", "object", "or", "a", "path", "to", "a", "local", "file", ".", "If", "a", "packethook", "is", "provided", "it", "must", "be", "a", "function", "that", "takes", "a", "single", "parameter", "which", "will", "be", "a", "copy", "of", "each", "DAT", "packet", "received", "in", "the", "form", "of", "a", "TftpPacketDAT", "object", ".", "The", "timeout", "parameter", "may", "be", "used", "to", "override", "the", "default", "SOCK_TIMEOUT", "setting", "which", "is", "the", "amount", "of", "time", "that", "the", "client", "will", "wait", "for", "a", "receive", "packet", "to", "arrive", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpClient.py#L35-L72
13,329
msoulier/tftpy
tftpy/TftpClient.py
TftpClient.upload
def upload(self, filename, input, packethook=None, timeout=SOCK_TIMEOUT): """This method initiates a tftp upload to the configured remote host, uploading the filename passed. It reads the file from input, which can be a file-like object or a path to a local file. If a packethook is provided, it must be a function that takes a single parameter, which will be a copy of each DAT packet sent in the form of a TftpPacketDAT object. The timeout parameter may be used to override the default SOCK_TIMEOUT setting, which is the amount of time that the client will wait for a DAT packet to be ACKd by the server. Note: If input is a hyphen, stdin is used.""" self.context = TftpContextClientUpload(self.host, self.iport, filename, input, self.options, packethook, timeout, localip = self.localip) self.context.start() # Upload happens here self.context.end() metrics = self.context.metrics log.info('') log.info("Upload complete.") if metrics.duration == 0: log.info("Duration too short, rate undetermined") else: log.info("Uploaded %d bytes in %.2f seconds" % (metrics.bytes, metrics.duration)) log.info("Average rate: %.2f kbps" % metrics.kbps) log.info("%.2f bytes in resent data" % metrics.resent_bytes) log.info("Resent %d packets" % metrics.dupcount)
python
def upload(self, filename, input, packethook=None, timeout=SOCK_TIMEOUT): """This method initiates a tftp upload to the configured remote host, uploading the filename passed. It reads the file from input, which can be a file-like object or a path to a local file. If a packethook is provided, it must be a function that takes a single parameter, which will be a copy of each DAT packet sent in the form of a TftpPacketDAT object. The timeout parameter may be used to override the default SOCK_TIMEOUT setting, which is the amount of time that the client will wait for a DAT packet to be ACKd by the server. Note: If input is a hyphen, stdin is used.""" self.context = TftpContextClientUpload(self.host, self.iport, filename, input, self.options, packethook, timeout, localip = self.localip) self.context.start() # Upload happens here self.context.end() metrics = self.context.metrics log.info('') log.info("Upload complete.") if metrics.duration == 0: log.info("Duration too short, rate undetermined") else: log.info("Uploaded %d bytes in %.2f seconds" % (metrics.bytes, metrics.duration)) log.info("Average rate: %.2f kbps" % metrics.kbps) log.info("%.2f bytes in resent data" % metrics.resent_bytes) log.info("Resent %d packets" % metrics.dupcount)
[ "def", "upload", "(", "self", ",", "filename", ",", "input", ",", "packethook", "=", "None", ",", "timeout", "=", "SOCK_TIMEOUT", ")", ":", "self", ".", "context", "=", "TftpContextClientUpload", "(", "self", ".", "host", ",", "self", ".", "iport", ",", "filename", ",", "input", ",", "self", ".", "options", ",", "packethook", ",", "timeout", ",", "localip", "=", "self", ".", "localip", ")", "self", ".", "context", ".", "start", "(", ")", "# Upload happens here", "self", ".", "context", ".", "end", "(", ")", "metrics", "=", "self", ".", "context", ".", "metrics", "log", ".", "info", "(", "''", ")", "log", ".", "info", "(", "\"Upload complete.\"", ")", "if", "metrics", ".", "duration", "==", "0", ":", "log", ".", "info", "(", "\"Duration too short, rate undetermined\"", ")", "else", ":", "log", ".", "info", "(", "\"Uploaded %d bytes in %.2f seconds\"", "%", "(", "metrics", ".", "bytes", ",", "metrics", ".", "duration", ")", ")", "log", ".", "info", "(", "\"Average rate: %.2f kbps\"", "%", "metrics", ".", "kbps", ")", "log", ".", "info", "(", "\"%.2f bytes in resent data\"", "%", "metrics", ".", "resent_bytes", ")", "log", ".", "info", "(", "\"Resent %d packets\"", "%", "metrics", ".", "dupcount", ")" ]
This method initiates a tftp upload to the configured remote host, uploading the filename passed. It reads the file from input, which can be a file-like object or a path to a local file. If a packethook is provided, it must be a function that takes a single parameter, which will be a copy of each DAT packet sent in the form of a TftpPacketDAT object. The timeout parameter may be used to override the default SOCK_TIMEOUT setting, which is the amount of time that the client will wait for a DAT packet to be ACKd by the server. Note: If input is a hyphen, stdin is used.
[ "This", "method", "initiates", "a", "tftp", "upload", "to", "the", "configured", "remote", "host", "uploading", "the", "filename", "passed", ".", "It", "reads", "the", "file", "from", "input", "which", "can", "be", "a", "file", "-", "like", "object", "or", "a", "path", "to", "a", "local", "file", ".", "If", "a", "packethook", "is", "provided", "it", "must", "be", "a", "function", "that", "takes", "a", "single", "parameter", "which", "will", "be", "a", "copy", "of", "each", "DAT", "packet", "sent", "in", "the", "form", "of", "a", "TftpPacketDAT", "object", ".", "The", "timeout", "parameter", "may", "be", "used", "to", "override", "the", "default", "SOCK_TIMEOUT", "setting", "which", "is", "the", "amount", "of", "time", "that", "the", "client", "will", "wait", "for", "a", "DAT", "packet", "to", "be", "ACKd", "by", "the", "server", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpClient.py#L74-L107
13,330
msoulier/tftpy
tftpy/TftpPacketTypes.py
TftpPacketWithOptions.decode_options
def decode_options(self, buffer): """This method decodes the section of the buffer that contains an unknown number of options. It returns a dictionary of option names and values.""" fmt = b"!" options = {} log.debug("decode_options: buffer is: %s", repr(buffer)) log.debug("size of buffer is %d bytes", len(buffer)) if len(buffer) == 0: log.debug("size of buffer is zero, returning empty hash") return {} # Count the nulls in the buffer. Each one terminates a string. log.debug("about to iterate options buffer counting nulls") length = 0 for i in range(len(buffer)): if ord(buffer[i:i+1]) == 0: log.debug("found a null at length %d", length) if length > 0: fmt += b"%dsx" % length length = -1 else: raise TftpException("Invalid options in buffer") length += 1 log.debug("about to unpack, fmt is: %s", fmt) mystruct = struct.unpack(fmt, buffer) tftpassert(len(mystruct) % 2 == 0, "packet with odd number of option/value pairs") for i in range(0, len(mystruct), 2): key = mystruct[i].decode('ascii') val = mystruct[i+1].decode('ascii') log.debug("setting option %s to %s", key, val) log.debug("types are %s and %s", type(key), type(val)) options[key] = val return options
python
def decode_options(self, buffer): """This method decodes the section of the buffer that contains an unknown number of options. It returns a dictionary of option names and values.""" fmt = b"!" options = {} log.debug("decode_options: buffer is: %s", repr(buffer)) log.debug("size of buffer is %d bytes", len(buffer)) if len(buffer) == 0: log.debug("size of buffer is zero, returning empty hash") return {} # Count the nulls in the buffer. Each one terminates a string. log.debug("about to iterate options buffer counting nulls") length = 0 for i in range(len(buffer)): if ord(buffer[i:i+1]) == 0: log.debug("found a null at length %d", length) if length > 0: fmt += b"%dsx" % length length = -1 else: raise TftpException("Invalid options in buffer") length += 1 log.debug("about to unpack, fmt is: %s", fmt) mystruct = struct.unpack(fmt, buffer) tftpassert(len(mystruct) % 2 == 0, "packet with odd number of option/value pairs") for i in range(0, len(mystruct), 2): key = mystruct[i].decode('ascii') val = mystruct[i+1].decode('ascii') log.debug("setting option %s to %s", key, val) log.debug("types are %s and %s", type(key), type(val)) options[key] = val return options
[ "def", "decode_options", "(", "self", ",", "buffer", ")", ":", "fmt", "=", "b\"!\"", "options", "=", "{", "}", "log", ".", "debug", "(", "\"decode_options: buffer is: %s\"", ",", "repr", "(", "buffer", ")", ")", "log", ".", "debug", "(", "\"size of buffer is %d bytes\"", ",", "len", "(", "buffer", ")", ")", "if", "len", "(", "buffer", ")", "==", "0", ":", "log", ".", "debug", "(", "\"size of buffer is zero, returning empty hash\"", ")", "return", "{", "}", "# Count the nulls in the buffer. Each one terminates a string.", "log", ".", "debug", "(", "\"about to iterate options buffer counting nulls\"", ")", "length", "=", "0", "for", "i", "in", "range", "(", "len", "(", "buffer", ")", ")", ":", "if", "ord", "(", "buffer", "[", "i", ":", "i", "+", "1", "]", ")", "==", "0", ":", "log", ".", "debug", "(", "\"found a null at length %d\"", ",", "length", ")", "if", "length", ">", "0", ":", "fmt", "+=", "b\"%dsx\"", "%", "length", "length", "=", "-", "1", "else", ":", "raise", "TftpException", "(", "\"Invalid options in buffer\"", ")", "length", "+=", "1", "log", ".", "debug", "(", "\"about to unpack, fmt is: %s\"", ",", "fmt", ")", "mystruct", "=", "struct", ".", "unpack", "(", "fmt", ",", "buffer", ")", "tftpassert", "(", "len", "(", "mystruct", ")", "%", "2", "==", "0", ",", "\"packet with odd number of option/value pairs\"", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "mystruct", ")", ",", "2", ")", ":", "key", "=", "mystruct", "[", "i", "]", ".", "decode", "(", "'ascii'", ")", "val", "=", "mystruct", "[", "i", "+", "1", "]", ".", "decode", "(", "'ascii'", ")", "log", ".", "debug", "(", "\"setting option %s to %s\"", ",", "key", ",", "val", ")", "log", ".", "debug", "(", "\"types are %s and %s\"", ",", "type", "(", "key", ")", ",", "type", "(", "val", ")", ")", "options", "[", "key", "]", "=", "val", "return", "options" ]
This method decodes the section of the buffer that contains an unknown number of options. It returns a dictionary of option names and values.
[ "This", "method", "decodes", "the", "section", "of", "the", "buffer", "that", "contains", "an", "unknown", "number", "of", "options", ".", "It", "returns", "a", "dictionary", "of", "option", "names", "and", "values", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L56-L95
13,331
msoulier/tftpy
tftpy/TftpPacketTypes.py
TftpPacketInitial.encode
def encode(self): """Encode the packet's buffer from the instance variables.""" tftpassert(self.filename, "filename required in initial packet") tftpassert(self.mode, "mode required in initial packet") # Make sure filename and mode are bytestrings. filename = self.filename mode = self.mode if not isinstance(filename, bytes): filename = filename.encode('ascii') if not isinstance(self.mode, bytes): mode = mode.encode('ascii') ptype = None if self.opcode == 1: ptype = "RRQ" else: ptype = "WRQ" log.debug("Encoding %s packet, filename = %s, mode = %s", ptype, filename, mode) for key in self.options: log.debug(" Option %s = %s", key, self.options[key]) fmt = b"!H" fmt += b"%dsx" % len(filename) if mode == b"octet": fmt += b"5sx" else: raise AssertionError("Unsupported mode: %s" % mode) # Add options. Note that the options list must be bytes. options_list = [] if len(list(self.options.keys())) > 0: log.debug("there are options to encode") for key in self.options: # Populate the option name name = key if not isinstance(name, bytes): name = name.encode('ascii') options_list.append(name) fmt += b"%dsx" % len(name) # Populate the option value value = self.options[key] # Work with all strings. if isinstance(value, int): value = str(value) if not isinstance(value, bytes): value = value.encode('ascii') options_list.append(value) fmt += b"%dsx" % len(value) log.debug("fmt is %s", fmt) log.debug("options_list is %s", options_list) log.debug("size of struct is %d", struct.calcsize(fmt)) self.buffer = struct.pack(fmt, self.opcode, filename, mode, *options_list) log.debug("buffer is %s", repr(self.buffer)) return self
python
def encode(self): """Encode the packet's buffer from the instance variables.""" tftpassert(self.filename, "filename required in initial packet") tftpassert(self.mode, "mode required in initial packet") # Make sure filename and mode are bytestrings. filename = self.filename mode = self.mode if not isinstance(filename, bytes): filename = filename.encode('ascii') if not isinstance(self.mode, bytes): mode = mode.encode('ascii') ptype = None if self.opcode == 1: ptype = "RRQ" else: ptype = "WRQ" log.debug("Encoding %s packet, filename = %s, mode = %s", ptype, filename, mode) for key in self.options: log.debug(" Option %s = %s", key, self.options[key]) fmt = b"!H" fmt += b"%dsx" % len(filename) if mode == b"octet": fmt += b"5sx" else: raise AssertionError("Unsupported mode: %s" % mode) # Add options. Note that the options list must be bytes. options_list = [] if len(list(self.options.keys())) > 0: log.debug("there are options to encode") for key in self.options: # Populate the option name name = key if not isinstance(name, bytes): name = name.encode('ascii') options_list.append(name) fmt += b"%dsx" % len(name) # Populate the option value value = self.options[key] # Work with all strings. if isinstance(value, int): value = str(value) if not isinstance(value, bytes): value = value.encode('ascii') options_list.append(value) fmt += b"%dsx" % len(value) log.debug("fmt is %s", fmt) log.debug("options_list is %s", options_list) log.debug("size of struct is %d", struct.calcsize(fmt)) self.buffer = struct.pack(fmt, self.opcode, filename, mode, *options_list) log.debug("buffer is %s", repr(self.buffer)) return self
[ "def", "encode", "(", "self", ")", ":", "tftpassert", "(", "self", ".", "filename", ",", "\"filename required in initial packet\"", ")", "tftpassert", "(", "self", ".", "mode", ",", "\"mode required in initial packet\"", ")", "# Make sure filename and mode are bytestrings.", "filename", "=", "self", ".", "filename", "mode", "=", "self", ".", "mode", "if", "not", "isinstance", "(", "filename", ",", "bytes", ")", ":", "filename", "=", "filename", ".", "encode", "(", "'ascii'", ")", "if", "not", "isinstance", "(", "self", ".", "mode", ",", "bytes", ")", ":", "mode", "=", "mode", ".", "encode", "(", "'ascii'", ")", "ptype", "=", "None", "if", "self", ".", "opcode", "==", "1", ":", "ptype", "=", "\"RRQ\"", "else", ":", "ptype", "=", "\"WRQ\"", "log", ".", "debug", "(", "\"Encoding %s packet, filename = %s, mode = %s\"", ",", "ptype", ",", "filename", ",", "mode", ")", "for", "key", "in", "self", ".", "options", ":", "log", ".", "debug", "(", "\" Option %s = %s\"", ",", "key", ",", "self", ".", "options", "[", "key", "]", ")", "fmt", "=", "b\"!H\"", "fmt", "+=", "b\"%dsx\"", "%", "len", "(", "filename", ")", "if", "mode", "==", "b\"octet\"", ":", "fmt", "+=", "b\"5sx\"", "else", ":", "raise", "AssertionError", "(", "\"Unsupported mode: %s\"", "%", "mode", ")", "# Add options. Note that the options list must be bytes.", "options_list", "=", "[", "]", "if", "len", "(", "list", "(", "self", ".", "options", ".", "keys", "(", ")", ")", ")", ">", "0", ":", "log", ".", "debug", "(", "\"there are options to encode\"", ")", "for", "key", "in", "self", ".", "options", ":", "# Populate the option name", "name", "=", "key", "if", "not", "isinstance", "(", "name", ",", "bytes", ")", ":", "name", "=", "name", ".", "encode", "(", "'ascii'", ")", "options_list", ".", "append", "(", "name", ")", "fmt", "+=", "b\"%dsx\"", "%", "len", "(", "name", ")", "# Populate the option value", "value", "=", "self", ".", "options", "[", "key", "]", "# Work with all strings.", "if", "isinstance", "(", "value", ",", "int", ")", ":", "value", "=", "str", "(", "value", ")", "if", "not", "isinstance", "(", "value", ",", "bytes", ")", ":", "value", "=", "value", ".", "encode", "(", "'ascii'", ")", "options_list", ".", "append", "(", "value", ")", "fmt", "+=", "b\"%dsx\"", "%", "len", "(", "value", ")", "log", ".", "debug", "(", "\"fmt is %s\"", ",", "fmt", ")", "log", ".", "debug", "(", "\"options_list is %s\"", ",", "options_list", ")", "log", ".", "debug", "(", "\"size of struct is %d\"", ",", "struct", ".", "calcsize", "(", "fmt", ")", ")", "self", ".", "buffer", "=", "struct", ".", "pack", "(", "fmt", ",", "self", ".", "opcode", ",", "filename", ",", "mode", ",", "*", "options_list", ")", "log", ".", "debug", "(", "\"buffer is %s\"", ",", "repr", "(", "self", ".", "buffer", ")", ")", "return", "self" ]
Encode the packet's buffer from the instance variables.
[ "Encode", "the", "packet", "s", "buffer", "from", "the", "instance", "variables", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L132-L190
13,332
msoulier/tftpy
tftpy/TftpPacketTypes.py
TftpPacketDAT.encode
def encode(self): """Encode the DAT packet. This method populates self.buffer, and returns self for easy method chaining.""" if len(self.data) == 0: log.debug("Encoding an empty DAT packet") data = self.data if not isinstance(self.data, bytes): data = self.data.encode('ascii') fmt = b"!HH%ds" % len(data) self.buffer = struct.pack(fmt, self.opcode, self.blocknumber, data) return self
python
def encode(self): """Encode the DAT packet. This method populates self.buffer, and returns self for easy method chaining.""" if len(self.data) == 0: log.debug("Encoding an empty DAT packet") data = self.data if not isinstance(self.data, bytes): data = self.data.encode('ascii') fmt = b"!HH%ds" % len(data) self.buffer = struct.pack(fmt, self.opcode, self.blocknumber, data) return self
[ "def", "encode", "(", "self", ")", ":", "if", "len", "(", "self", ".", "data", ")", "==", "0", ":", "log", ".", "debug", "(", "\"Encoding an empty DAT packet\"", ")", "data", "=", "self", ".", "data", "if", "not", "isinstance", "(", "self", ".", "data", ",", "bytes", ")", ":", "data", "=", "self", ".", "data", ".", "encode", "(", "'ascii'", ")", "fmt", "=", "b\"!HH%ds\"", "%", "len", "(", "data", ")", "self", ".", "buffer", "=", "struct", ".", "pack", "(", "fmt", ",", "self", ".", "opcode", ",", "self", ".", "blocknumber", ",", "data", ")", "return", "self" ]
Encode the DAT packet. This method populates self.buffer, and returns self for easy method chaining.
[ "Encode", "the", "DAT", "packet", ".", "This", "method", "populates", "self", ".", "buffer", "and", "returns", "self", "for", "easy", "method", "chaining", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L292-L305
13,333
msoulier/tftpy
tftpy/TftpPacketTypes.py
TftpPacketDAT.decode
def decode(self): """Decode self.buffer into instance variables. It returns self for easy method chaining.""" # We know the first 2 bytes are the opcode. The second two are the # block number. (self.blocknumber,) = struct.unpack(str("!H"), self.buffer[2:4]) log.debug("decoding DAT packet, block number %d", self.blocknumber) log.debug("should be %d bytes in the packet total", len(self.buffer)) # Everything else is data. self.data = self.buffer[4:] log.debug("found %d bytes of data", len(self.data)) return self
python
def decode(self): """Decode self.buffer into instance variables. It returns self for easy method chaining.""" # We know the first 2 bytes are the opcode. The second two are the # block number. (self.blocknumber,) = struct.unpack(str("!H"), self.buffer[2:4]) log.debug("decoding DAT packet, block number %d", self.blocknumber) log.debug("should be %d bytes in the packet total", len(self.buffer)) # Everything else is data. self.data = self.buffer[4:] log.debug("found %d bytes of data", len(self.data)) return self
[ "def", "decode", "(", "self", ")", ":", "# We know the first 2 bytes are the opcode. The second two are the", "# block number.", "(", "self", ".", "blocknumber", ",", ")", "=", "struct", ".", "unpack", "(", "str", "(", "\"!H\"", ")", ",", "self", ".", "buffer", "[", "2", ":", "4", "]", ")", "log", ".", "debug", "(", "\"decoding DAT packet, block number %d\"", ",", "self", ".", "blocknumber", ")", "log", ".", "debug", "(", "\"should be %d bytes in the packet total\"", ",", "len", "(", "self", ".", "buffer", ")", ")", "# Everything else is data.", "self", ".", "data", "=", "self", ".", "buffer", "[", "4", ":", "]", "log", ".", "debug", "(", "\"found %d bytes of data\"", ",", "len", "(", "self", ".", "data", ")", ")", "return", "self" ]
Decode self.buffer into instance variables. It returns self for easy method chaining.
[ "Decode", "self", ".", "buffer", "into", "instance", "variables", ".", "It", "returns", "self", "for", "easy", "method", "chaining", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L307-L318
13,334
msoulier/tftpy
tftpy/TftpPacketTypes.py
TftpPacketERR.encode
def encode(self): """Encode the DAT packet based on instance variables, populating self.buffer, returning self.""" fmt = b"!HH%dsx" % len(self.errmsgs[self.errorcode]) log.debug("encoding ERR packet with fmt %s", fmt) self.buffer = struct.pack(fmt, self.opcode, self.errorcode, self.errmsgs[self.errorcode]) return self
python
def encode(self): """Encode the DAT packet based on instance variables, populating self.buffer, returning self.""" fmt = b"!HH%dsx" % len(self.errmsgs[self.errorcode]) log.debug("encoding ERR packet with fmt %s", fmt) self.buffer = struct.pack(fmt, self.opcode, self.errorcode, self.errmsgs[self.errorcode]) return self
[ "def", "encode", "(", "self", ")", ":", "fmt", "=", "b\"!HH%dsx\"", "%", "len", "(", "self", ".", "errmsgs", "[", "self", ".", "errorcode", "]", ")", "log", ".", "debug", "(", "\"encoding ERR packet with fmt %s\"", ",", "fmt", ")", "self", ".", "buffer", "=", "struct", ".", "pack", "(", "fmt", ",", "self", ".", "opcode", ",", "self", ".", "errorcode", ",", "self", ".", "errmsgs", "[", "self", ".", "errorcode", "]", ")", "return", "self" ]
Encode the DAT packet based on instance variables, populating self.buffer, returning self.
[ "Encode", "the", "DAT", "packet", "based", "on", "instance", "variables", "populating", "self", ".", "buffer", "returning", "self", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L399-L408
13,335
msoulier/tftpy
tftpy/TftpPacketTypes.py
TftpPacketERR.decode
def decode(self): "Decode self.buffer, populating instance variables and return self." buflen = len(self.buffer) tftpassert(buflen >= 4, "malformed ERR packet, too short") log.debug("Decoding ERR packet, length %s bytes", buflen) if buflen == 4: log.debug("Allowing this affront to the RFC of a 4-byte packet") fmt = b"!HH" log.debug("Decoding ERR packet with fmt: %s", fmt) self.opcode, self.errorcode = struct.unpack(fmt, self.buffer) else: log.debug("Good ERR packet > 4 bytes") fmt = b"!HH%dsx" % (len(self.buffer) - 5) log.debug("Decoding ERR packet with fmt: %s", fmt) self.opcode, self.errorcode, self.errmsg = struct.unpack(fmt, self.buffer) log.error("ERR packet - errorcode: %d, message: %s" % (self.errorcode, self.errmsg)) return self
python
def decode(self): "Decode self.buffer, populating instance variables and return self." buflen = len(self.buffer) tftpassert(buflen >= 4, "malformed ERR packet, too short") log.debug("Decoding ERR packet, length %s bytes", buflen) if buflen == 4: log.debug("Allowing this affront to the RFC of a 4-byte packet") fmt = b"!HH" log.debug("Decoding ERR packet with fmt: %s", fmt) self.opcode, self.errorcode = struct.unpack(fmt, self.buffer) else: log.debug("Good ERR packet > 4 bytes") fmt = b"!HH%dsx" % (len(self.buffer) - 5) log.debug("Decoding ERR packet with fmt: %s", fmt) self.opcode, self.errorcode, self.errmsg = struct.unpack(fmt, self.buffer) log.error("ERR packet - errorcode: %d, message: %s" % (self.errorcode, self.errmsg)) return self
[ "def", "decode", "(", "self", ")", ":", "buflen", "=", "len", "(", "self", ".", "buffer", ")", "tftpassert", "(", "buflen", ">=", "4", ",", "\"malformed ERR packet, too short\"", ")", "log", ".", "debug", "(", "\"Decoding ERR packet, length %s bytes\"", ",", "buflen", ")", "if", "buflen", "==", "4", ":", "log", ".", "debug", "(", "\"Allowing this affront to the RFC of a 4-byte packet\"", ")", "fmt", "=", "b\"!HH\"", "log", ".", "debug", "(", "\"Decoding ERR packet with fmt: %s\"", ",", "fmt", ")", "self", ".", "opcode", ",", "self", ".", "errorcode", "=", "struct", ".", "unpack", "(", "fmt", ",", "self", ".", "buffer", ")", "else", ":", "log", ".", "debug", "(", "\"Good ERR packet > 4 bytes\"", ")", "fmt", "=", "b\"!HH%dsx\"", "%", "(", "len", "(", "self", ".", "buffer", ")", "-", "5", ")", "log", ".", "debug", "(", "\"Decoding ERR packet with fmt: %s\"", ",", "fmt", ")", "self", ".", "opcode", ",", "self", ".", "errorcode", ",", "self", ".", "errmsg", "=", "struct", ".", "unpack", "(", "fmt", ",", "self", ".", "buffer", ")", "log", ".", "error", "(", "\"ERR packet - errorcode: %d, message: %s\"", "%", "(", "self", ".", "errorcode", ",", "self", ".", "errmsg", ")", ")", "return", "self" ]
Decode self.buffer, populating instance variables and return self.
[ "Decode", "self", ".", "buffer", "populating", "instance", "variables", "and", "return", "self", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L410-L429
13,336
msoulier/tftpy
tftpy/TftpPacketTypes.py
TftpPacketOACK.match_options
def match_options(self, options): """This method takes a set of options, and tries to match them with its own. It can accept some changes in those options from the server as part of a negotiation. Changed or unchanged, it will return a dict of the options so that the session can update itself to the negotiated options.""" for name in self.options: if name in options: if name == 'blksize': # We can accept anything between the min and max values. size = int(self.options[name]) if size >= MIN_BLKSIZE and size <= MAX_BLKSIZE: log.debug("negotiated blksize of %d bytes", size) options['blksize'] = size else: raise TftpException("blksize %s option outside allowed range" % size) elif name == 'tsize': size = int(self.options[name]) if size < 0: raise TftpException("Negative file sizes not supported") else: raise TftpException("Unsupported option: %s" % name) return True
python
def match_options(self, options): """This method takes a set of options, and tries to match them with its own. It can accept some changes in those options from the server as part of a negotiation. Changed or unchanged, it will return a dict of the options so that the session can update itself to the negotiated options.""" for name in self.options: if name in options: if name == 'blksize': # We can accept anything between the min and max values. size = int(self.options[name]) if size >= MIN_BLKSIZE and size <= MAX_BLKSIZE: log.debug("negotiated blksize of %d bytes", size) options['blksize'] = size else: raise TftpException("blksize %s option outside allowed range" % size) elif name == 'tsize': size = int(self.options[name]) if size < 0: raise TftpException("Negative file sizes not supported") else: raise TftpException("Unsupported option: %s" % name) return True
[ "def", "match_options", "(", "self", ",", "options", ")", ":", "for", "name", "in", "self", ".", "options", ":", "if", "name", "in", "options", ":", "if", "name", "==", "'blksize'", ":", "# We can accept anything between the min and max values.", "size", "=", "int", "(", "self", ".", "options", "[", "name", "]", ")", "if", "size", ">=", "MIN_BLKSIZE", "and", "size", "<=", "MAX_BLKSIZE", ":", "log", ".", "debug", "(", "\"negotiated blksize of %d bytes\"", ",", "size", ")", "options", "[", "'blksize'", "]", "=", "size", "else", ":", "raise", "TftpException", "(", "\"blksize %s option outside allowed range\"", "%", "size", ")", "elif", "name", "==", "'tsize'", ":", "size", "=", "int", "(", "self", ".", "options", "[", "name", "]", ")", "if", "size", "<", "0", ":", "raise", "TftpException", "(", "\"Negative file sizes not supported\"", ")", "else", ":", "raise", "TftpException", "(", "\"Unsupported option: %s\"", "%", "name", ")", "return", "True" ]
This method takes a set of options, and tries to match them with its own. It can accept some changes in those options from the server as part of a negotiation. Changed or unchanged, it will return a dict of the options so that the session can update itself to the negotiated options.
[ "This", "method", "takes", "a", "set", "of", "options", "and", "tries", "to", "match", "them", "with", "its", "own", ".", "It", "can", "accept", "some", "changes", "in", "those", "options", "from", "the", "server", "as", "part", "of", "a", "negotiation", ".", "Changed", "or", "unchanged", "it", "will", "return", "a", "dict", "of", "the", "options", "so", "that", "the", "session", "can", "update", "itself", "to", "the", "negotiated", "options", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L472-L494
13,337
msoulier/tftpy
tftpy/TftpStates.py
TftpState.handleOACK
def handleOACK(self, pkt): """This method handles an OACK from the server, syncing any accepted options.""" if len(pkt.options.keys()) > 0: if pkt.match_options(self.context.options): log.info("Successful negotiation of options") # Set options to OACK options self.context.options = pkt.options for key in self.context.options: log.info(" %s = %s" % (key, self.context.options[key])) else: log.error("Failed to negotiate options") raise TftpException("Failed to negotiate options") else: raise TftpException("No options found in OACK")
python
def handleOACK(self, pkt): """This method handles an OACK from the server, syncing any accepted options.""" if len(pkt.options.keys()) > 0: if pkt.match_options(self.context.options): log.info("Successful negotiation of options") # Set options to OACK options self.context.options = pkt.options for key in self.context.options: log.info(" %s = %s" % (key, self.context.options[key])) else: log.error("Failed to negotiate options") raise TftpException("Failed to negotiate options") else: raise TftpException("No options found in OACK")
[ "def", "handleOACK", "(", "self", ",", "pkt", ")", ":", "if", "len", "(", "pkt", ".", "options", ".", "keys", "(", ")", ")", ">", "0", ":", "if", "pkt", ".", "match_options", "(", "self", ".", "context", ".", "options", ")", ":", "log", ".", "info", "(", "\"Successful negotiation of options\"", ")", "# Set options to OACK options", "self", ".", "context", ".", "options", "=", "pkt", ".", "options", "for", "key", "in", "self", ".", "context", ".", "options", ":", "log", ".", "info", "(", "\" %s = %s\"", "%", "(", "key", ",", "self", ".", "context", ".", "options", "[", "key", "]", ")", ")", "else", ":", "log", ".", "error", "(", "\"Failed to negotiate options\"", ")", "raise", "TftpException", "(", "\"Failed to negotiate options\"", ")", "else", ":", "raise", "TftpException", "(", "\"No options found in OACK\"", ")" ]
This method handles an OACK from the server, syncing any accepted options.
[ "This", "method", "handles", "an", "OACK", "from", "the", "server", "syncing", "any", "accepted", "options", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L39-L53
13,338
msoulier/tftpy
tftpy/TftpStates.py
TftpState.returnSupportedOptions
def returnSupportedOptions(self, options): """This method takes a requested options list from a client, and returns the ones that are supported.""" # We support the options blksize and tsize right now. # FIXME - put this somewhere else? accepted_options = {} for option in options: if option == 'blksize': # Make sure it's valid. if int(options[option]) > MAX_BLKSIZE: log.info("Client requested blksize greater than %d " "setting to maximum" % MAX_BLKSIZE) accepted_options[option] = MAX_BLKSIZE elif int(options[option]) < MIN_BLKSIZE: log.info("Client requested blksize less than %d " "setting to minimum" % MIN_BLKSIZE) accepted_options[option] = MIN_BLKSIZE else: accepted_options[option] = options[option] elif option == 'tsize': log.debug("tsize option is set") accepted_options['tsize'] = 0 else: log.info("Dropping unsupported option '%s'" % option) log.debug("Returning these accepted options: %s", accepted_options) return accepted_options
python
def returnSupportedOptions(self, options): """This method takes a requested options list from a client, and returns the ones that are supported.""" # We support the options blksize and tsize right now. # FIXME - put this somewhere else? accepted_options = {} for option in options: if option == 'blksize': # Make sure it's valid. if int(options[option]) > MAX_BLKSIZE: log.info("Client requested blksize greater than %d " "setting to maximum" % MAX_BLKSIZE) accepted_options[option] = MAX_BLKSIZE elif int(options[option]) < MIN_BLKSIZE: log.info("Client requested blksize less than %d " "setting to minimum" % MIN_BLKSIZE) accepted_options[option] = MIN_BLKSIZE else: accepted_options[option] = options[option] elif option == 'tsize': log.debug("tsize option is set") accepted_options['tsize'] = 0 else: log.info("Dropping unsupported option '%s'" % option) log.debug("Returning these accepted options: %s", accepted_options) return accepted_options
[ "def", "returnSupportedOptions", "(", "self", ",", "options", ")", ":", "# We support the options blksize and tsize right now.", "# FIXME - put this somewhere else?", "accepted_options", "=", "{", "}", "for", "option", "in", "options", ":", "if", "option", "==", "'blksize'", ":", "# Make sure it's valid.", "if", "int", "(", "options", "[", "option", "]", ")", ">", "MAX_BLKSIZE", ":", "log", ".", "info", "(", "\"Client requested blksize greater than %d \"", "\"setting to maximum\"", "%", "MAX_BLKSIZE", ")", "accepted_options", "[", "option", "]", "=", "MAX_BLKSIZE", "elif", "int", "(", "options", "[", "option", "]", ")", "<", "MIN_BLKSIZE", ":", "log", ".", "info", "(", "\"Client requested blksize less than %d \"", "\"setting to minimum\"", "%", "MIN_BLKSIZE", ")", "accepted_options", "[", "option", "]", "=", "MIN_BLKSIZE", "else", ":", "accepted_options", "[", "option", "]", "=", "options", "[", "option", "]", "elif", "option", "==", "'tsize'", ":", "log", ".", "debug", "(", "\"tsize option is set\"", ")", "accepted_options", "[", "'tsize'", "]", "=", "0", "else", ":", "log", ".", "info", "(", "\"Dropping unsupported option '%s'\"", "%", "option", ")", "log", ".", "debug", "(", "\"Returning these accepted options: %s\"", ",", "accepted_options", ")", "return", "accepted_options" ]
This method takes a requested options list from a client, and returns the ones that are supported.
[ "This", "method", "takes", "a", "requested", "options", "list", "from", "a", "client", "and", "returns", "the", "ones", "that", "are", "supported", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L55-L80
13,339
msoulier/tftpy
tftpy/TftpStates.py
TftpState.sendDAT
def sendDAT(self): """This method sends the next DAT packet based on the data in the context. It returns a boolean indicating whether the transfer is finished.""" finished = False blocknumber = self.context.next_block # Test hook if DELAY_BLOCK and DELAY_BLOCK == blocknumber: import time log.debug("Deliberately delaying 10 seconds...") time.sleep(10) dat = None blksize = self.context.getBlocksize() buffer = self.context.fileobj.read(blksize) log.debug("Read %d bytes into buffer", len(buffer)) if len(buffer) < blksize: log.info("Reached EOF on file %s" % self.context.file_to_transfer) finished = True dat = TftpPacketDAT() dat.data = buffer dat.blocknumber = blocknumber self.context.metrics.bytes += len(dat.data) log.debug("Sending DAT packet %d", dat.blocknumber) self.context.sock.sendto(dat.encode().buffer, (self.context.host, self.context.tidport)) if self.context.packethook: self.context.packethook(dat) self.context.last_pkt = dat return finished
python
def sendDAT(self): """This method sends the next DAT packet based on the data in the context. It returns a boolean indicating whether the transfer is finished.""" finished = False blocknumber = self.context.next_block # Test hook if DELAY_BLOCK and DELAY_BLOCK == blocknumber: import time log.debug("Deliberately delaying 10 seconds...") time.sleep(10) dat = None blksize = self.context.getBlocksize() buffer = self.context.fileobj.read(blksize) log.debug("Read %d bytes into buffer", len(buffer)) if len(buffer) < blksize: log.info("Reached EOF on file %s" % self.context.file_to_transfer) finished = True dat = TftpPacketDAT() dat.data = buffer dat.blocknumber = blocknumber self.context.metrics.bytes += len(dat.data) log.debug("Sending DAT packet %d", dat.blocknumber) self.context.sock.sendto(dat.encode().buffer, (self.context.host, self.context.tidport)) if self.context.packethook: self.context.packethook(dat) self.context.last_pkt = dat return finished
[ "def", "sendDAT", "(", "self", ")", ":", "finished", "=", "False", "blocknumber", "=", "self", ".", "context", ".", "next_block", "# Test hook", "if", "DELAY_BLOCK", "and", "DELAY_BLOCK", "==", "blocknumber", ":", "import", "time", "log", ".", "debug", "(", "\"Deliberately delaying 10 seconds...\"", ")", "time", ".", "sleep", "(", "10", ")", "dat", "=", "None", "blksize", "=", "self", ".", "context", ".", "getBlocksize", "(", ")", "buffer", "=", "self", ".", "context", ".", "fileobj", ".", "read", "(", "blksize", ")", "log", ".", "debug", "(", "\"Read %d bytes into buffer\"", ",", "len", "(", "buffer", ")", ")", "if", "len", "(", "buffer", ")", "<", "blksize", ":", "log", ".", "info", "(", "\"Reached EOF on file %s\"", "%", "self", ".", "context", ".", "file_to_transfer", ")", "finished", "=", "True", "dat", "=", "TftpPacketDAT", "(", ")", "dat", ".", "data", "=", "buffer", "dat", ".", "blocknumber", "=", "blocknumber", "self", ".", "context", ".", "metrics", ".", "bytes", "+=", "len", "(", "dat", ".", "data", ")", "log", ".", "debug", "(", "\"Sending DAT packet %d\"", ",", "dat", ".", "blocknumber", ")", "self", ".", "context", ".", "sock", ".", "sendto", "(", "dat", ".", "encode", "(", ")", ".", "buffer", ",", "(", "self", ".", "context", ".", "host", ",", "self", ".", "context", ".", "tidport", ")", ")", "if", "self", ".", "context", ".", "packethook", ":", "self", ".", "context", ".", "packethook", "(", "dat", ")", "self", ".", "context", ".", "last_pkt", "=", "dat", "return", "finished" ]
This method sends the next DAT packet based on the data in the context. It returns a boolean indicating whether the transfer is finished.
[ "This", "method", "sends", "the", "next", "DAT", "packet", "based", "on", "the", "data", "in", "the", "context", ".", "It", "returns", "a", "boolean", "indicating", "whether", "the", "transfer", "is", "finished", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L82-L111
13,340
msoulier/tftpy
tftpy/TftpStates.py
TftpState.sendACK
def sendACK(self, blocknumber=None): """This method sends an ack packet to the block number specified. If none is specified, it defaults to the next_block property in the parent context.""" log.debug("In sendACK, passed blocknumber is %s", blocknumber) if blocknumber is None: blocknumber = self.context.next_block log.info("Sending ack to block %d" % blocknumber) ackpkt = TftpPacketACK() ackpkt.blocknumber = blocknumber self.context.sock.sendto(ackpkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = ackpkt
python
def sendACK(self, blocknumber=None): """This method sends an ack packet to the block number specified. If none is specified, it defaults to the next_block property in the parent context.""" log.debug("In sendACK, passed blocknumber is %s", blocknumber) if blocknumber is None: blocknumber = self.context.next_block log.info("Sending ack to block %d" % blocknumber) ackpkt = TftpPacketACK() ackpkt.blocknumber = blocknumber self.context.sock.sendto(ackpkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = ackpkt
[ "def", "sendACK", "(", "self", ",", "blocknumber", "=", "None", ")", ":", "log", ".", "debug", "(", "\"In sendACK, passed blocknumber is %s\"", ",", "blocknumber", ")", "if", "blocknumber", "is", "None", ":", "blocknumber", "=", "self", ".", "context", ".", "next_block", "log", ".", "info", "(", "\"Sending ack to block %d\"", "%", "blocknumber", ")", "ackpkt", "=", "TftpPacketACK", "(", ")", "ackpkt", ".", "blocknumber", "=", "blocknumber", "self", ".", "context", ".", "sock", ".", "sendto", "(", "ackpkt", ".", "encode", "(", ")", ".", "buffer", ",", "(", "self", ".", "context", ".", "host", ",", "self", ".", "context", ".", "tidport", ")", ")", "self", ".", "context", ".", "last_pkt", "=", "ackpkt" ]
This method sends an ack packet to the block number specified. If none is specified, it defaults to the next_block property in the parent context.
[ "This", "method", "sends", "an", "ack", "packet", "to", "the", "block", "number", "specified", ".", "If", "none", "is", "specified", "it", "defaults", "to", "the", "next_block", "property", "in", "the", "parent", "context", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L113-L126
13,341
msoulier/tftpy
tftpy/TftpStates.py
TftpState.sendError
def sendError(self, errorcode): """This method uses the socket passed, and uses the errorcode to compose and send an error packet.""" log.debug("In sendError, being asked to send error %d", errorcode) errpkt = TftpPacketERR() errpkt.errorcode = errorcode if self.context.tidport == None: log.debug("Error packet received outside session. Discarding") else: self.context.sock.sendto(errpkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = errpkt
python
def sendError(self, errorcode): """This method uses the socket passed, and uses the errorcode to compose and send an error packet.""" log.debug("In sendError, being asked to send error %d", errorcode) errpkt = TftpPacketERR() errpkt.errorcode = errorcode if self.context.tidport == None: log.debug("Error packet received outside session. Discarding") else: self.context.sock.sendto(errpkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = errpkt
[ "def", "sendError", "(", "self", ",", "errorcode", ")", ":", "log", ".", "debug", "(", "\"In sendError, being asked to send error %d\"", ",", "errorcode", ")", "errpkt", "=", "TftpPacketERR", "(", ")", "errpkt", ".", "errorcode", "=", "errorcode", "if", "self", ".", "context", ".", "tidport", "==", "None", ":", "log", ".", "debug", "(", "\"Error packet received outside session. Discarding\"", ")", "else", ":", "self", ".", "context", ".", "sock", ".", "sendto", "(", "errpkt", ".", "encode", "(", ")", ".", "buffer", ",", "(", "self", ".", "context", ".", "host", ",", "self", ".", "context", ".", "tidport", ")", ")", "self", ".", "context", ".", "last_pkt", "=", "errpkt" ]
This method uses the socket passed, and uses the errorcode to compose and send an error packet.
[ "This", "method", "uses", "the", "socket", "passed", "and", "uses", "the", "errorcode", "to", "compose", "and", "send", "an", "error", "packet", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L128-L140
13,342
msoulier/tftpy
tftpy/TftpStates.py
TftpState.sendOACK
def sendOACK(self): """This method sends an OACK packet with the options from the current context.""" log.debug("In sendOACK with options %s", self.context.options) pkt = TftpPacketOACK() pkt.options = self.context.options self.context.sock.sendto(pkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = pkt
python
def sendOACK(self): """This method sends an OACK packet with the options from the current context.""" log.debug("In sendOACK with options %s", self.context.options) pkt = TftpPacketOACK() pkt.options = self.context.options self.context.sock.sendto(pkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = pkt
[ "def", "sendOACK", "(", "self", ")", ":", "log", ".", "debug", "(", "\"In sendOACK with options %s\"", ",", "self", ".", "context", ".", "options", ")", "pkt", "=", "TftpPacketOACK", "(", ")", "pkt", ".", "options", "=", "self", ".", "context", ".", "options", "self", ".", "context", ".", "sock", ".", "sendto", "(", "pkt", ".", "encode", "(", ")", ".", "buffer", ",", "(", "self", ".", "context", ".", "host", ",", "self", ".", "context", ".", "tidport", ")", ")", "self", ".", "context", ".", "last_pkt", "=", "pkt" ]
This method sends an OACK packet with the options from the current context.
[ "This", "method", "sends", "an", "OACK", "packet", "with", "the", "options", "from", "the", "current", "context", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L142-L151
13,343
msoulier/tftpy
tftpy/TftpStates.py
TftpState.resendLast
def resendLast(self): "Resend the last sent packet due to a timeout." log.warning("Resending packet %s on sessions %s" % (self.context.last_pkt, self)) self.context.metrics.resent_bytes += len(self.context.last_pkt.buffer) self.context.metrics.add_dup(self.context.last_pkt) sendto_port = self.context.tidport if not sendto_port: # If the tidport wasn't set, then the remote end hasn't even # started talking to us yet. That's not good. Maybe it's not # there. sendto_port = self.context.port self.context.sock.sendto(self.context.last_pkt.encode().buffer, (self.context.host, sendto_port)) if self.context.packethook: self.context.packethook(self.context.last_pkt)
python
def resendLast(self): "Resend the last sent packet due to a timeout." log.warning("Resending packet %s on sessions %s" % (self.context.last_pkt, self)) self.context.metrics.resent_bytes += len(self.context.last_pkt.buffer) self.context.metrics.add_dup(self.context.last_pkt) sendto_port = self.context.tidport if not sendto_port: # If the tidport wasn't set, then the remote end hasn't even # started talking to us yet. That's not good. Maybe it's not # there. sendto_port = self.context.port self.context.sock.sendto(self.context.last_pkt.encode().buffer, (self.context.host, sendto_port)) if self.context.packethook: self.context.packethook(self.context.last_pkt)
[ "def", "resendLast", "(", "self", ")", ":", "log", ".", "warning", "(", "\"Resending packet %s on sessions %s\"", "%", "(", "self", ".", "context", ".", "last_pkt", ",", "self", ")", ")", "self", ".", "context", ".", "metrics", ".", "resent_bytes", "+=", "len", "(", "self", ".", "context", ".", "last_pkt", ".", "buffer", ")", "self", ".", "context", ".", "metrics", ".", "add_dup", "(", "self", ".", "context", ".", "last_pkt", ")", "sendto_port", "=", "self", ".", "context", ".", "tidport", "if", "not", "sendto_port", ":", "# If the tidport wasn't set, then the remote end hasn't even", "# started talking to us yet. That's not good. Maybe it's not", "# there.", "sendto_port", "=", "self", ".", "context", ".", "port", "self", ".", "context", ".", "sock", ".", "sendto", "(", "self", ".", "context", ".", "last_pkt", ".", "encode", "(", ")", ".", "buffer", ",", "(", "self", ".", "context", ".", "host", ",", "sendto_port", ")", ")", "if", "self", ".", "context", ".", "packethook", ":", "self", ".", "context", ".", "packethook", "(", "self", ".", "context", ".", "last_pkt", ")" ]
Resend the last sent packet due to a timeout.
[ "Resend", "the", "last", "sent", "packet", "due", "to", "a", "timeout", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L153-L168
13,344
msoulier/tftpy
tftpy/TftpStates.py
TftpState.handleDat
def handleDat(self, pkt): """This method handles a DAT packet during a client download, or a server upload.""" log.info("Handling DAT packet - block %d" % pkt.blocknumber) log.debug("Expecting block %s", self.context.next_block) if pkt.blocknumber == self.context.next_block: log.debug("Good, received block %d in sequence", pkt.blocknumber) self.sendACK() self.context.next_block += 1 log.debug("Writing %d bytes to output file", len(pkt.data)) self.context.fileobj.write(pkt.data) self.context.metrics.bytes += len(pkt.data) # Check for end-of-file, any less than full data packet. if len(pkt.data) < self.context.getBlocksize(): log.info("End of file detected") return None elif pkt.blocknumber < self.context.next_block: if pkt.blocknumber == 0: log.warning("There is no block zero!") self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("There is no block zero!") log.warning("Dropping duplicate block %d" % pkt.blocknumber) self.context.metrics.add_dup(pkt) log.debug("ACKing block %d again, just in case", pkt.blocknumber) self.sendACK(pkt.blocknumber) else: # FIXME: should we be more tolerant and just discard instead? msg = "Whoa! Received future block %d but expected %d" \ % (pkt.blocknumber, self.context.next_block) log.error(msg) raise TftpException(msg) # Default is to ack return TftpStateExpectDAT(self.context)
python
def handleDat(self, pkt): """This method handles a DAT packet during a client download, or a server upload.""" log.info("Handling DAT packet - block %d" % pkt.blocknumber) log.debug("Expecting block %s", self.context.next_block) if pkt.blocknumber == self.context.next_block: log.debug("Good, received block %d in sequence", pkt.blocknumber) self.sendACK() self.context.next_block += 1 log.debug("Writing %d bytes to output file", len(pkt.data)) self.context.fileobj.write(pkt.data) self.context.metrics.bytes += len(pkt.data) # Check for end-of-file, any less than full data packet. if len(pkt.data) < self.context.getBlocksize(): log.info("End of file detected") return None elif pkt.blocknumber < self.context.next_block: if pkt.blocknumber == 0: log.warning("There is no block zero!") self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("There is no block zero!") log.warning("Dropping duplicate block %d" % pkt.blocknumber) self.context.metrics.add_dup(pkt) log.debug("ACKing block %d again, just in case", pkt.blocknumber) self.sendACK(pkt.blocknumber) else: # FIXME: should we be more tolerant and just discard instead? msg = "Whoa! Received future block %d but expected %d" \ % (pkt.blocknumber, self.context.next_block) log.error(msg) raise TftpException(msg) # Default is to ack return TftpStateExpectDAT(self.context)
[ "def", "handleDat", "(", "self", ",", "pkt", ")", ":", "log", ".", "info", "(", "\"Handling DAT packet - block %d\"", "%", "pkt", ".", "blocknumber", ")", "log", ".", "debug", "(", "\"Expecting block %s\"", ",", "self", ".", "context", ".", "next_block", ")", "if", "pkt", ".", "blocknumber", "==", "self", ".", "context", ".", "next_block", ":", "log", ".", "debug", "(", "\"Good, received block %d in sequence\"", ",", "pkt", ".", "blocknumber", ")", "self", ".", "sendACK", "(", ")", "self", ".", "context", ".", "next_block", "+=", "1", "log", ".", "debug", "(", "\"Writing %d bytes to output file\"", ",", "len", "(", "pkt", ".", "data", ")", ")", "self", ".", "context", ".", "fileobj", ".", "write", "(", "pkt", ".", "data", ")", "self", ".", "context", ".", "metrics", ".", "bytes", "+=", "len", "(", "pkt", ".", "data", ")", "# Check for end-of-file, any less than full data packet.", "if", "len", "(", "pkt", ".", "data", ")", "<", "self", ".", "context", ".", "getBlocksize", "(", ")", ":", "log", ".", "info", "(", "\"End of file detected\"", ")", "return", "None", "elif", "pkt", ".", "blocknumber", "<", "self", ".", "context", ".", "next_block", ":", "if", "pkt", ".", "blocknumber", "==", "0", ":", "log", ".", "warning", "(", "\"There is no block zero!\"", ")", "self", ".", "sendError", "(", "TftpErrors", ".", "IllegalTftpOp", ")", "raise", "TftpException", "(", "\"There is no block zero!\"", ")", "log", ".", "warning", "(", "\"Dropping duplicate block %d\"", "%", "pkt", ".", "blocknumber", ")", "self", ".", "context", ".", "metrics", ".", "add_dup", "(", "pkt", ")", "log", ".", "debug", "(", "\"ACKing block %d again, just in case\"", ",", "pkt", ".", "blocknumber", ")", "self", ".", "sendACK", "(", "pkt", ".", "blocknumber", ")", "else", ":", "# FIXME: should we be more tolerant and just discard instead?", "msg", "=", "\"Whoa! Received future block %d but expected %d\"", "%", "(", "pkt", ".", "blocknumber", ",", "self", ".", "context", ".", "next_block", ")", "log", ".", "error", "(", "msg", ")", "raise", "TftpException", "(", "msg", ")", "# Default is to ack", "return", "TftpStateExpectDAT", "(", "self", ".", "context", ")" ]
This method handles a DAT packet during a client download, or a server upload.
[ "This", "method", "handles", "a", "DAT", "packet", "during", "a", "client", "download", "or", "a", "server", "upload", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L170-L207
13,345
msoulier/tftpy
tftpy/TftpStates.py
TftpServerState.serverInitial
def serverInitial(self, pkt, raddress, rport): """This method performs initial setup for a server context transfer, put here to refactor code out of the TftpStateServerRecvRRQ and TftpStateServerRecvWRQ classes, since their initial setup is identical. The method returns a boolean, sendoack, to indicate whether it is required to send an OACK to the client.""" options = pkt.options sendoack = False if not self.context.tidport: self.context.tidport = rport log.info("Setting tidport to %s" % rport) log.debug("Setting default options, blksize") self.context.options = { 'blksize': DEF_BLKSIZE } if options: log.debug("Options requested: %s", options) supported_options = self.returnSupportedOptions(options) self.context.options.update(supported_options) sendoack = True # FIXME - only octet mode is supported at this time. if pkt.mode != 'octet': #self.sendError(TftpErrors.IllegalTftpOp) #raise TftpException("Only octet transfers are supported at this time.") log.warning("Received non-octet mode request. I'll reply with binary data.") # test host/port of client end if self.context.host != raddress or self.context.port != rport: self.sendError(TftpErrors.UnknownTID) log.error("Expected traffic from %s:%s but received it " "from %s:%s instead." % (self.context.host, self.context.port, raddress, rport)) # FIXME: increment an error count? # Return same state, we're still waiting for valid traffic. return self log.debug("Requested filename is %s", pkt.filename) # Build the filename on this server and ensure it is contained # in the specified root directory. # # Filenames that begin with server root are accepted. It's # assumed the client and server are tightly connected and this # provides backwards compatibility. # # Filenames otherwise are relative to the server root. If they # begin with a '/' strip it off as otherwise os.path.join will # treat it as absolute (regardless of whether it is ntpath or # posixpath module if pkt.filename.startswith(self.context.root): full_path = pkt.filename else: full_path = os.path.join(self.context.root, pkt.filename.lstrip('/')) # Use abspath to eliminate any remaining relative elements # (e.g. '..') and ensure that is still within the server's # root directory self.full_path = os.path.abspath(full_path) log.debug("full_path is %s", full_path) if self.full_path.startswith(self.context.root): log.info("requested file is in the server root - good") else: log.warning("requested file is not within the server root - bad") self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("bad file path") self.context.file_to_transfer = pkt.filename return sendoack
python
def serverInitial(self, pkt, raddress, rport): """This method performs initial setup for a server context transfer, put here to refactor code out of the TftpStateServerRecvRRQ and TftpStateServerRecvWRQ classes, since their initial setup is identical. The method returns a boolean, sendoack, to indicate whether it is required to send an OACK to the client.""" options = pkt.options sendoack = False if not self.context.tidport: self.context.tidport = rport log.info("Setting tidport to %s" % rport) log.debug("Setting default options, blksize") self.context.options = { 'blksize': DEF_BLKSIZE } if options: log.debug("Options requested: %s", options) supported_options = self.returnSupportedOptions(options) self.context.options.update(supported_options) sendoack = True # FIXME - only octet mode is supported at this time. if pkt.mode != 'octet': #self.sendError(TftpErrors.IllegalTftpOp) #raise TftpException("Only octet transfers are supported at this time.") log.warning("Received non-octet mode request. I'll reply with binary data.") # test host/port of client end if self.context.host != raddress or self.context.port != rport: self.sendError(TftpErrors.UnknownTID) log.error("Expected traffic from %s:%s but received it " "from %s:%s instead." % (self.context.host, self.context.port, raddress, rport)) # FIXME: increment an error count? # Return same state, we're still waiting for valid traffic. return self log.debug("Requested filename is %s", pkt.filename) # Build the filename on this server and ensure it is contained # in the specified root directory. # # Filenames that begin with server root are accepted. It's # assumed the client and server are tightly connected and this # provides backwards compatibility. # # Filenames otherwise are relative to the server root. If they # begin with a '/' strip it off as otherwise os.path.join will # treat it as absolute (regardless of whether it is ntpath or # posixpath module if pkt.filename.startswith(self.context.root): full_path = pkt.filename else: full_path = os.path.join(self.context.root, pkt.filename.lstrip('/')) # Use abspath to eliminate any remaining relative elements # (e.g. '..') and ensure that is still within the server's # root directory self.full_path = os.path.abspath(full_path) log.debug("full_path is %s", full_path) if self.full_path.startswith(self.context.root): log.info("requested file is in the server root - good") else: log.warning("requested file is not within the server root - bad") self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("bad file path") self.context.file_to_transfer = pkt.filename return sendoack
[ "def", "serverInitial", "(", "self", ",", "pkt", ",", "raddress", ",", "rport", ")", ":", "options", "=", "pkt", ".", "options", "sendoack", "=", "False", "if", "not", "self", ".", "context", ".", "tidport", ":", "self", ".", "context", ".", "tidport", "=", "rport", "log", ".", "info", "(", "\"Setting tidport to %s\"", "%", "rport", ")", "log", ".", "debug", "(", "\"Setting default options, blksize\"", ")", "self", ".", "context", ".", "options", "=", "{", "'blksize'", ":", "DEF_BLKSIZE", "}", "if", "options", ":", "log", ".", "debug", "(", "\"Options requested: %s\"", ",", "options", ")", "supported_options", "=", "self", ".", "returnSupportedOptions", "(", "options", ")", "self", ".", "context", ".", "options", ".", "update", "(", "supported_options", ")", "sendoack", "=", "True", "# FIXME - only octet mode is supported at this time.", "if", "pkt", ".", "mode", "!=", "'octet'", ":", "#self.sendError(TftpErrors.IllegalTftpOp)", "#raise TftpException(\"Only octet transfers are supported at this time.\")", "log", ".", "warning", "(", "\"Received non-octet mode request. I'll reply with binary data.\"", ")", "# test host/port of client end", "if", "self", ".", "context", ".", "host", "!=", "raddress", "or", "self", ".", "context", ".", "port", "!=", "rport", ":", "self", ".", "sendError", "(", "TftpErrors", ".", "UnknownTID", ")", "log", ".", "error", "(", "\"Expected traffic from %s:%s but received it \"", "\"from %s:%s instead.\"", "%", "(", "self", ".", "context", ".", "host", ",", "self", ".", "context", ".", "port", ",", "raddress", ",", "rport", ")", ")", "# FIXME: increment an error count?", "# Return same state, we're still waiting for valid traffic.", "return", "self", "log", ".", "debug", "(", "\"Requested filename is %s\"", ",", "pkt", ".", "filename", ")", "# Build the filename on this server and ensure it is contained", "# in the specified root directory.", "#", "# Filenames that begin with server root are accepted. It's", "# assumed the client and server are tightly connected and this", "# provides backwards compatibility.", "#", "# Filenames otherwise are relative to the server root. If they", "# begin with a '/' strip it off as otherwise os.path.join will", "# treat it as absolute (regardless of whether it is ntpath or", "# posixpath module", "if", "pkt", ".", "filename", ".", "startswith", "(", "self", ".", "context", ".", "root", ")", ":", "full_path", "=", "pkt", ".", "filename", "else", ":", "full_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "context", ".", "root", ",", "pkt", ".", "filename", ".", "lstrip", "(", "'/'", ")", ")", "# Use abspath to eliminate any remaining relative elements", "# (e.g. '..') and ensure that is still within the server's", "# root directory", "self", ".", "full_path", "=", "os", ".", "path", ".", "abspath", "(", "full_path", ")", "log", ".", "debug", "(", "\"full_path is %s\"", ",", "full_path", ")", "if", "self", ".", "full_path", ".", "startswith", "(", "self", ".", "context", ".", "root", ")", ":", "log", ".", "info", "(", "\"requested file is in the server root - good\"", ")", "else", ":", "log", ".", "warning", "(", "\"requested file is not within the server root - bad\"", ")", "self", ".", "sendError", "(", "TftpErrors", ".", "IllegalTftpOp", ")", "raise", "TftpException", "(", "\"bad file path\"", ")", "self", ".", "context", ".", "file_to_transfer", "=", "pkt", ".", "filename", "return", "sendoack" ]
This method performs initial setup for a server context transfer, put here to refactor code out of the TftpStateServerRecvRRQ and TftpStateServerRecvWRQ classes, since their initial setup is identical. The method returns a boolean, sendoack, to indicate whether it is required to send an OACK to the client.
[ "This", "method", "performs", "initial", "setup", "for", "a", "server", "context", "transfer", "put", "here", "to", "refactor", "code", "out", "of", "the", "TftpStateServerRecvRRQ", "and", "TftpStateServerRecvWRQ", "classes", "since", "their", "initial", "setup", "is", "identical", ".", "The", "method", "returns", "a", "boolean", "sendoack", "to", "indicate", "whether", "it", "is", "required", "to", "send", "an", "OACK", "to", "the", "client", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L219-L291
13,346
msoulier/tftpy
tftpy/TftpStates.py
TftpStateServerRecvRRQ.handle
def handle(self, pkt, raddress, rport): "Handle an initial RRQ packet as a server." log.debug("In TftpStateServerRecvRRQ.handle") sendoack = self.serverInitial(pkt, raddress, rport) path = self.full_path log.info("Opening file %s for reading" % path) if os.path.exists(path): # Note: Open in binary mode for win32 portability, since win32 # blows. self.context.fileobj = open(path, "rb") elif self.context.dyn_file_func: log.debug("No such file %s but using dyn_file_func", path) self.context.fileobj = \ self.context.dyn_file_func(self.context.file_to_transfer, raddress=raddress, rport=rport) if self.context.fileobj is None: log.debug("dyn_file_func returned 'None', treating as " "FileNotFound") self.sendError(TftpErrors.FileNotFound) raise TftpException("File not found: %s" % path) else: log.warn("File not found: %s", path) self.sendError(TftpErrors.FileNotFound) raise TftpException("File not found: {}".format(path)) # Options negotiation. if sendoack and 'tsize' in self.context.options: # getting the file size for the tsize option. As we handle # file-like objects and not only real files, we use this seeking # method instead of asking the OS self.context.fileobj.seek(0, os.SEEK_END) tsize = str(self.context.fileobj.tell()) self.context.fileobj.seek(0, 0) self.context.options['tsize'] = tsize if sendoack: # Note, next_block is 0 here since that's the proper # acknowledgement to an OACK. # FIXME: perhaps we do need a TftpStateExpectOACK class... self.sendOACK() # Note, self.context.next_block is already 0. else: self.context.next_block = 1 log.debug("No requested options, starting send...") self.context.pending_complete = self.sendDAT() # Note, we expect an ack regardless of whether we sent a DAT or an # OACK. return TftpStateExpectACK(self.context)
python
def handle(self, pkt, raddress, rport): "Handle an initial RRQ packet as a server." log.debug("In TftpStateServerRecvRRQ.handle") sendoack = self.serverInitial(pkt, raddress, rport) path = self.full_path log.info("Opening file %s for reading" % path) if os.path.exists(path): # Note: Open in binary mode for win32 portability, since win32 # blows. self.context.fileobj = open(path, "rb") elif self.context.dyn_file_func: log.debug("No such file %s but using dyn_file_func", path) self.context.fileobj = \ self.context.dyn_file_func(self.context.file_to_transfer, raddress=raddress, rport=rport) if self.context.fileobj is None: log.debug("dyn_file_func returned 'None', treating as " "FileNotFound") self.sendError(TftpErrors.FileNotFound) raise TftpException("File not found: %s" % path) else: log.warn("File not found: %s", path) self.sendError(TftpErrors.FileNotFound) raise TftpException("File not found: {}".format(path)) # Options negotiation. if sendoack and 'tsize' in self.context.options: # getting the file size for the tsize option. As we handle # file-like objects and not only real files, we use this seeking # method instead of asking the OS self.context.fileobj.seek(0, os.SEEK_END) tsize = str(self.context.fileobj.tell()) self.context.fileobj.seek(0, 0) self.context.options['tsize'] = tsize if sendoack: # Note, next_block is 0 here since that's the proper # acknowledgement to an OACK. # FIXME: perhaps we do need a TftpStateExpectOACK class... self.sendOACK() # Note, self.context.next_block is already 0. else: self.context.next_block = 1 log.debug("No requested options, starting send...") self.context.pending_complete = self.sendDAT() # Note, we expect an ack regardless of whether we sent a DAT or an # OACK. return TftpStateExpectACK(self.context)
[ "def", "handle", "(", "self", ",", "pkt", ",", "raddress", ",", "rport", ")", ":", "log", ".", "debug", "(", "\"In TftpStateServerRecvRRQ.handle\"", ")", "sendoack", "=", "self", ".", "serverInitial", "(", "pkt", ",", "raddress", ",", "rport", ")", "path", "=", "self", ".", "full_path", "log", ".", "info", "(", "\"Opening file %s for reading\"", "%", "path", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "# Note: Open in binary mode for win32 portability, since win32", "# blows.", "self", ".", "context", ".", "fileobj", "=", "open", "(", "path", ",", "\"rb\"", ")", "elif", "self", ".", "context", ".", "dyn_file_func", ":", "log", ".", "debug", "(", "\"No such file %s but using dyn_file_func\"", ",", "path", ")", "self", ".", "context", ".", "fileobj", "=", "self", ".", "context", ".", "dyn_file_func", "(", "self", ".", "context", ".", "file_to_transfer", ",", "raddress", "=", "raddress", ",", "rport", "=", "rport", ")", "if", "self", ".", "context", ".", "fileobj", "is", "None", ":", "log", ".", "debug", "(", "\"dyn_file_func returned 'None', treating as \"", "\"FileNotFound\"", ")", "self", ".", "sendError", "(", "TftpErrors", ".", "FileNotFound", ")", "raise", "TftpException", "(", "\"File not found: %s\"", "%", "path", ")", "else", ":", "log", ".", "warn", "(", "\"File not found: %s\"", ",", "path", ")", "self", ".", "sendError", "(", "TftpErrors", ".", "FileNotFound", ")", "raise", "TftpException", "(", "\"File not found: {}\"", ".", "format", "(", "path", ")", ")", "# Options negotiation.", "if", "sendoack", "and", "'tsize'", "in", "self", ".", "context", ".", "options", ":", "# getting the file size for the tsize option. As we handle", "# file-like objects and not only real files, we use this seeking", "# method instead of asking the OS", "self", ".", "context", ".", "fileobj", ".", "seek", "(", "0", ",", "os", ".", "SEEK_END", ")", "tsize", "=", "str", "(", "self", ".", "context", ".", "fileobj", ".", "tell", "(", ")", ")", "self", ".", "context", ".", "fileobj", ".", "seek", "(", "0", ",", "0", ")", "self", ".", "context", ".", "options", "[", "'tsize'", "]", "=", "tsize", "if", "sendoack", ":", "# Note, next_block is 0 here since that's the proper", "# acknowledgement to an OACK.", "# FIXME: perhaps we do need a TftpStateExpectOACK class...", "self", ".", "sendOACK", "(", ")", "# Note, self.context.next_block is already 0.", "else", ":", "self", ".", "context", ".", "next_block", "=", "1", "log", ".", "debug", "(", "\"No requested options, starting send...\"", ")", "self", ".", "context", ".", "pending_complete", "=", "self", ".", "sendDAT", "(", ")", "# Note, we expect an ack regardless of whether we sent a DAT or an", "# OACK.", "return", "TftpStateExpectACK", "(", "self", ".", "context", ")" ]
Handle an initial RRQ packet as a server.
[ "Handle", "an", "initial", "RRQ", "packet", "as", "a", "server", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L297-L344
13,347
msoulier/tftpy
tftpy/TftpStates.py
TftpStateServerRecvWRQ.make_subdirs
def make_subdirs(self): """The purpose of this method is to, if necessary, create all of the subdirectories leading up to the file to the written.""" # Pull off everything below the root. subpath = self.full_path[len(self.context.root):] log.debug("make_subdirs: subpath is %s", subpath) # Split on directory separators, but drop the last one, as it should # be the filename. dirs = subpath.split(os.sep)[:-1] log.debug("dirs is %s", dirs) current = self.context.root for dir in dirs: if dir: current = os.path.join(current, dir) if os.path.isdir(current): log.debug("%s is already an existing directory", current) else: os.mkdir(current, 0o700)
python
def make_subdirs(self): """The purpose of this method is to, if necessary, create all of the subdirectories leading up to the file to the written.""" # Pull off everything below the root. subpath = self.full_path[len(self.context.root):] log.debug("make_subdirs: subpath is %s", subpath) # Split on directory separators, but drop the last one, as it should # be the filename. dirs = subpath.split(os.sep)[:-1] log.debug("dirs is %s", dirs) current = self.context.root for dir in dirs: if dir: current = os.path.join(current, dir) if os.path.isdir(current): log.debug("%s is already an existing directory", current) else: os.mkdir(current, 0o700)
[ "def", "make_subdirs", "(", "self", ")", ":", "# Pull off everything below the root.", "subpath", "=", "self", ".", "full_path", "[", "len", "(", "self", ".", "context", ".", "root", ")", ":", "]", "log", ".", "debug", "(", "\"make_subdirs: subpath is %s\"", ",", "subpath", ")", "# Split on directory separators, but drop the last one, as it should", "# be the filename.", "dirs", "=", "subpath", ".", "split", "(", "os", ".", "sep", ")", "[", ":", "-", "1", "]", "log", ".", "debug", "(", "\"dirs is %s\"", ",", "dirs", ")", "current", "=", "self", ".", "context", ".", "root", "for", "dir", "in", "dirs", ":", "if", "dir", ":", "current", "=", "os", ".", "path", ".", "join", "(", "current", ",", "dir", ")", "if", "os", ".", "path", ".", "isdir", "(", "current", ")", ":", "log", ".", "debug", "(", "\"%s is already an existing directory\"", ",", "current", ")", "else", ":", "os", ".", "mkdir", "(", "current", ",", "0o700", ")" ]
The purpose of this method is to, if necessary, create all of the subdirectories leading up to the file to the written.
[ "The", "purpose", "of", "this", "method", "is", "to", "if", "necessary", "create", "all", "of", "the", "subdirectories", "leading", "up", "to", "the", "file", "to", "the", "written", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L352-L369
13,348
msoulier/tftpy
tftpy/TftpStates.py
TftpStateServerRecvWRQ.handle
def handle(self, pkt, raddress, rport): "Handle an initial WRQ packet as a server." log.debug("In TftpStateServerRecvWRQ.handle") sendoack = self.serverInitial(pkt, raddress, rport) path = self.full_path if self.context.upload_open: f = self.context.upload_open(path, self.context) if f is None: self.sendError(TftpErrors.AccessViolation) raise TftpException("Dynamic path %s not permitted" % path) else: self.context.fileobj = f else: log.info("Opening file %s for writing" % path) if os.path.exists(path): # FIXME: correct behavior? log.warning("File %s exists already, overwriting..." % ( self.context.file_to_transfer)) # FIXME: I think we should upload to a temp file and not overwrite # the existing file until the file is successfully uploaded. self.make_subdirs() self.context.fileobj = open(path, "wb") # Options negotiation. if sendoack: log.debug("Sending OACK to client") self.sendOACK() else: log.debug("No requested options, expecting transfer to begin...") self.sendACK() # Whether we're sending an oack or not, we're expecting a DAT for # block 1 self.context.next_block = 1 # We may have sent an OACK, but we're expecting a DAT as the response # to either the OACK or an ACK, so lets unconditionally use the # TftpStateExpectDAT state. return TftpStateExpectDAT(self.context)
python
def handle(self, pkt, raddress, rport): "Handle an initial WRQ packet as a server." log.debug("In TftpStateServerRecvWRQ.handle") sendoack = self.serverInitial(pkt, raddress, rport) path = self.full_path if self.context.upload_open: f = self.context.upload_open(path, self.context) if f is None: self.sendError(TftpErrors.AccessViolation) raise TftpException("Dynamic path %s not permitted" % path) else: self.context.fileobj = f else: log.info("Opening file %s for writing" % path) if os.path.exists(path): # FIXME: correct behavior? log.warning("File %s exists already, overwriting..." % ( self.context.file_to_transfer)) # FIXME: I think we should upload to a temp file and not overwrite # the existing file until the file is successfully uploaded. self.make_subdirs() self.context.fileobj = open(path, "wb") # Options negotiation. if sendoack: log.debug("Sending OACK to client") self.sendOACK() else: log.debug("No requested options, expecting transfer to begin...") self.sendACK() # Whether we're sending an oack or not, we're expecting a DAT for # block 1 self.context.next_block = 1 # We may have sent an OACK, but we're expecting a DAT as the response # to either the OACK or an ACK, so lets unconditionally use the # TftpStateExpectDAT state. return TftpStateExpectDAT(self.context)
[ "def", "handle", "(", "self", ",", "pkt", ",", "raddress", ",", "rport", ")", ":", "log", ".", "debug", "(", "\"In TftpStateServerRecvWRQ.handle\"", ")", "sendoack", "=", "self", ".", "serverInitial", "(", "pkt", ",", "raddress", ",", "rport", ")", "path", "=", "self", ".", "full_path", "if", "self", ".", "context", ".", "upload_open", ":", "f", "=", "self", ".", "context", ".", "upload_open", "(", "path", ",", "self", ".", "context", ")", "if", "f", "is", "None", ":", "self", ".", "sendError", "(", "TftpErrors", ".", "AccessViolation", ")", "raise", "TftpException", "(", "\"Dynamic path %s not permitted\"", "%", "path", ")", "else", ":", "self", ".", "context", ".", "fileobj", "=", "f", "else", ":", "log", ".", "info", "(", "\"Opening file %s for writing\"", "%", "path", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "# FIXME: correct behavior?", "log", ".", "warning", "(", "\"File %s exists already, overwriting...\"", "%", "(", "self", ".", "context", ".", "file_to_transfer", ")", ")", "# FIXME: I think we should upload to a temp file and not overwrite", "# the existing file until the file is successfully uploaded.", "self", ".", "make_subdirs", "(", ")", "self", ".", "context", ".", "fileobj", "=", "open", "(", "path", ",", "\"wb\"", ")", "# Options negotiation.", "if", "sendoack", ":", "log", ".", "debug", "(", "\"Sending OACK to client\"", ")", "self", ".", "sendOACK", "(", ")", "else", ":", "log", ".", "debug", "(", "\"No requested options, expecting transfer to begin...\"", ")", "self", ".", "sendACK", "(", ")", "# Whether we're sending an oack or not, we're expecting a DAT for", "# block 1", "self", ".", "context", ".", "next_block", "=", "1", "# We may have sent an OACK, but we're expecting a DAT as the response", "# to either the OACK or an ACK, so lets unconditionally use the", "# TftpStateExpectDAT state.", "return", "TftpStateExpectDAT", "(", "self", ".", "context", ")" ]
Handle an initial WRQ packet as a server.
[ "Handle", "an", "initial", "WRQ", "packet", "as", "a", "server", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L371-L407
13,349
msoulier/tftpy
tftpy/TftpStates.py
TftpStateExpectACK.handle
def handle(self, pkt, raddress, rport): "Handle a packet, hopefully an ACK since we just sent a DAT." if isinstance(pkt, TftpPacketACK): log.debug("Received ACK for packet %d" % pkt.blocknumber) # Is this an ack to the one we just sent? if self.context.next_block == pkt.blocknumber: if self.context.pending_complete: log.info("Received ACK to final DAT, we're done.") return None else: log.debug("Good ACK, sending next DAT") self.context.next_block += 1 log.debug("Incremented next_block to %d", self.context.next_block) self.context.pending_complete = self.sendDAT() elif pkt.blocknumber < self.context.next_block: log.warning("Received duplicate ACK for block %d" % pkt.blocknumber) self.context.metrics.add_dup(pkt) else: log.warning("Oooh, time warp. Received ACK to packet we " "didn't send yet. Discarding.") self.context.metrics.errors += 1 return self elif isinstance(pkt, TftpPacketERR): log.error("Received ERR packet from peer: %s" % str(pkt)) raise TftpException("Received ERR packet from peer: %s" % str(pkt)) else: log.warning("Discarding unsupported packet: %s" % str(pkt)) return self
python
def handle(self, pkt, raddress, rport): "Handle a packet, hopefully an ACK since we just sent a DAT." if isinstance(pkt, TftpPacketACK): log.debug("Received ACK for packet %d" % pkt.blocknumber) # Is this an ack to the one we just sent? if self.context.next_block == pkt.blocknumber: if self.context.pending_complete: log.info("Received ACK to final DAT, we're done.") return None else: log.debug("Good ACK, sending next DAT") self.context.next_block += 1 log.debug("Incremented next_block to %d", self.context.next_block) self.context.pending_complete = self.sendDAT() elif pkt.blocknumber < self.context.next_block: log.warning("Received duplicate ACK for block %d" % pkt.blocknumber) self.context.metrics.add_dup(pkt) else: log.warning("Oooh, time warp. Received ACK to packet we " "didn't send yet. Discarding.") self.context.metrics.errors += 1 return self elif isinstance(pkt, TftpPacketERR): log.error("Received ERR packet from peer: %s" % str(pkt)) raise TftpException("Received ERR packet from peer: %s" % str(pkt)) else: log.warning("Discarding unsupported packet: %s" % str(pkt)) return self
[ "def", "handle", "(", "self", ",", "pkt", ",", "raddress", ",", "rport", ")", ":", "if", "isinstance", "(", "pkt", ",", "TftpPacketACK", ")", ":", "log", ".", "debug", "(", "\"Received ACK for packet %d\"", "%", "pkt", ".", "blocknumber", ")", "# Is this an ack to the one we just sent?", "if", "self", ".", "context", ".", "next_block", "==", "pkt", ".", "blocknumber", ":", "if", "self", ".", "context", ".", "pending_complete", ":", "log", ".", "info", "(", "\"Received ACK to final DAT, we're done.\"", ")", "return", "None", "else", ":", "log", ".", "debug", "(", "\"Good ACK, sending next DAT\"", ")", "self", ".", "context", ".", "next_block", "+=", "1", "log", ".", "debug", "(", "\"Incremented next_block to %d\"", ",", "self", ".", "context", ".", "next_block", ")", "self", ".", "context", ".", "pending_complete", "=", "self", ".", "sendDAT", "(", ")", "elif", "pkt", ".", "blocknumber", "<", "self", ".", "context", ".", "next_block", ":", "log", ".", "warning", "(", "\"Received duplicate ACK for block %d\"", "%", "pkt", ".", "blocknumber", ")", "self", ".", "context", ".", "metrics", ".", "add_dup", "(", "pkt", ")", "else", ":", "log", ".", "warning", "(", "\"Oooh, time warp. Received ACK to packet we \"", "\"didn't send yet. Discarding.\"", ")", "self", ".", "context", ".", "metrics", ".", "errors", "+=", "1", "return", "self", "elif", "isinstance", "(", "pkt", ",", "TftpPacketERR", ")", ":", "log", ".", "error", "(", "\"Received ERR packet from peer: %s\"", "%", "str", "(", "pkt", ")", ")", "raise", "TftpException", "(", "\"Received ERR packet from peer: %s\"", "%", "str", "(", "pkt", ")", ")", "else", ":", "log", ".", "warning", "(", "\"Discarding unsupported packet: %s\"", "%", "str", "(", "pkt", ")", ")", "return", "self" ]
Handle a packet, hopefully an ACK since we just sent a DAT.
[ "Handle", "a", "packet", "hopefully", "an", "ACK", "since", "we", "just", "sent", "a", "DAT", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L438-L469
13,350
msoulier/tftpy
tftpy/TftpStates.py
TftpStateExpectDAT.handle
def handle(self, pkt, raddress, rport): """Handle the packet in response to an ACK, which should be a DAT.""" if isinstance(pkt, TftpPacketDAT): return self.handleDat(pkt) # Every other packet type is a problem. elif isinstance(pkt, TftpPacketACK): # Umm, we ACK, you don't. self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received ACK from peer when expecting DAT") elif isinstance(pkt, TftpPacketWRQ): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received WRQ from peer when expecting DAT") elif isinstance(pkt, TftpPacketERR): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received ERR from peer: " + str(pkt)) else: self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received unknown packet type from peer: " + str(pkt))
python
def handle(self, pkt, raddress, rport): """Handle the packet in response to an ACK, which should be a DAT.""" if isinstance(pkt, TftpPacketDAT): return self.handleDat(pkt) # Every other packet type is a problem. elif isinstance(pkt, TftpPacketACK): # Umm, we ACK, you don't. self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received ACK from peer when expecting DAT") elif isinstance(pkt, TftpPacketWRQ): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received WRQ from peer when expecting DAT") elif isinstance(pkt, TftpPacketERR): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received ERR from peer: " + str(pkt)) else: self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received unknown packet type from peer: " + str(pkt))
[ "def", "handle", "(", "self", ",", "pkt", ",", "raddress", ",", "rport", ")", ":", "if", "isinstance", "(", "pkt", ",", "TftpPacketDAT", ")", ":", "return", "self", ".", "handleDat", "(", "pkt", ")", "# Every other packet type is a problem.", "elif", "isinstance", "(", "pkt", ",", "TftpPacketACK", ")", ":", "# Umm, we ACK, you don't.", "self", ".", "sendError", "(", "TftpErrors", ".", "IllegalTftpOp", ")", "raise", "TftpException", "(", "\"Received ACK from peer when expecting DAT\"", ")", "elif", "isinstance", "(", "pkt", ",", "TftpPacketWRQ", ")", ":", "self", ".", "sendError", "(", "TftpErrors", ".", "IllegalTftpOp", ")", "raise", "TftpException", "(", "\"Received WRQ from peer when expecting DAT\"", ")", "elif", "isinstance", "(", "pkt", ",", "TftpPacketERR", ")", ":", "self", ".", "sendError", "(", "TftpErrors", ".", "IllegalTftpOp", ")", "raise", "TftpException", "(", "\"Received ERR from peer: \"", "+", "str", "(", "pkt", ")", ")", "else", ":", "self", ".", "sendError", "(", "TftpErrors", ".", "IllegalTftpOp", ")", "raise", "TftpException", "(", "\"Received unknown packet type from peer: \"", "+", "str", "(", "pkt", ")", ")" ]
Handle the packet in response to an ACK, which should be a DAT.
[ "Handle", "the", "packet", "in", "response", "to", "an", "ACK", "which", "should", "be", "a", "DAT", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L473-L494
13,351
msoulier/tftpy
tftpy/TftpStates.py
TftpStateSentRRQ.handle
def handle(self, pkt, raddress, rport): """Handle the packet in response to an RRQ to the server.""" if not self.context.tidport: self.context.tidport = rport log.info("Set remote port for session to %s" % rport) # Now check the packet type and dispatch it properly. if isinstance(pkt, TftpPacketOACK): log.info("Received OACK from server") try: self.handleOACK(pkt) except TftpException as err: log.error("Failed to negotiate options: %s" % str(err)) self.sendError(TftpErrors.FailedNegotiation) raise else: log.debug("Sending ACK to OACK") self.sendACK(blocknumber=0) log.debug("Changing state to TftpStateExpectDAT") return TftpStateExpectDAT(self.context) elif isinstance(pkt, TftpPacketDAT): # If there are any options set, then the server didn't honour any # of them. log.info("Received DAT from server") if self.context.options: log.info("Server ignored options, falling back to defaults") self.context.options = { 'blksize': DEF_BLKSIZE } return self.handleDat(pkt) # Every other packet type is a problem. elif isinstance(pkt, TftpPacketACK): # Umm, we ACK, the server doesn't. self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received ACK from server while in download") elif isinstance(pkt, TftpPacketWRQ): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received WRQ from server while in download") elif isinstance(pkt, TftpPacketERR): self.sendError(TftpErrors.IllegalTftpOp) log.debug("Received ERR packet: %s", pkt) if pkt.errorcode == TftpErrors.FileNotFound: raise TftpFileNotFoundError("File not found") else: raise TftpException("Received ERR from server: {}".format(pkt)) else: self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received unknown packet type from server: %s" % pkt) # By default, no state change. return self
python
def handle(self, pkt, raddress, rport): """Handle the packet in response to an RRQ to the server.""" if not self.context.tidport: self.context.tidport = rport log.info("Set remote port for session to %s" % rport) # Now check the packet type and dispatch it properly. if isinstance(pkt, TftpPacketOACK): log.info("Received OACK from server") try: self.handleOACK(pkt) except TftpException as err: log.error("Failed to negotiate options: %s" % str(err)) self.sendError(TftpErrors.FailedNegotiation) raise else: log.debug("Sending ACK to OACK") self.sendACK(blocknumber=0) log.debug("Changing state to TftpStateExpectDAT") return TftpStateExpectDAT(self.context) elif isinstance(pkt, TftpPacketDAT): # If there are any options set, then the server didn't honour any # of them. log.info("Received DAT from server") if self.context.options: log.info("Server ignored options, falling back to defaults") self.context.options = { 'blksize': DEF_BLKSIZE } return self.handleDat(pkt) # Every other packet type is a problem. elif isinstance(pkt, TftpPacketACK): # Umm, we ACK, the server doesn't. self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received ACK from server while in download") elif isinstance(pkt, TftpPacketWRQ): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received WRQ from server while in download") elif isinstance(pkt, TftpPacketERR): self.sendError(TftpErrors.IllegalTftpOp) log.debug("Received ERR packet: %s", pkt) if pkt.errorcode == TftpErrors.FileNotFound: raise TftpFileNotFoundError("File not found") else: raise TftpException("Received ERR from server: {}".format(pkt)) else: self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received unknown packet type from server: %s" % pkt) # By default, no state change. return self
[ "def", "handle", "(", "self", ",", "pkt", ",", "raddress", ",", "rport", ")", ":", "if", "not", "self", ".", "context", ".", "tidport", ":", "self", ".", "context", ".", "tidport", "=", "rport", "log", ".", "info", "(", "\"Set remote port for session to %s\"", "%", "rport", ")", "# Now check the packet type and dispatch it properly.", "if", "isinstance", "(", "pkt", ",", "TftpPacketOACK", ")", ":", "log", ".", "info", "(", "\"Received OACK from server\"", ")", "try", ":", "self", ".", "handleOACK", "(", "pkt", ")", "except", "TftpException", "as", "err", ":", "log", ".", "error", "(", "\"Failed to negotiate options: %s\"", "%", "str", "(", "err", ")", ")", "self", ".", "sendError", "(", "TftpErrors", ".", "FailedNegotiation", ")", "raise", "else", ":", "log", ".", "debug", "(", "\"Sending ACK to OACK\"", ")", "self", ".", "sendACK", "(", "blocknumber", "=", "0", ")", "log", ".", "debug", "(", "\"Changing state to TftpStateExpectDAT\"", ")", "return", "TftpStateExpectDAT", "(", "self", ".", "context", ")", "elif", "isinstance", "(", "pkt", ",", "TftpPacketDAT", ")", ":", "# If there are any options set, then the server didn't honour any", "# of them.", "log", ".", "info", "(", "\"Received DAT from server\"", ")", "if", "self", ".", "context", ".", "options", ":", "log", ".", "info", "(", "\"Server ignored options, falling back to defaults\"", ")", "self", ".", "context", ".", "options", "=", "{", "'blksize'", ":", "DEF_BLKSIZE", "}", "return", "self", ".", "handleDat", "(", "pkt", ")", "# Every other packet type is a problem.", "elif", "isinstance", "(", "pkt", ",", "TftpPacketACK", ")", ":", "# Umm, we ACK, the server doesn't.", "self", ".", "sendError", "(", "TftpErrors", ".", "IllegalTftpOp", ")", "raise", "TftpException", "(", "\"Received ACK from server while in download\"", ")", "elif", "isinstance", "(", "pkt", ",", "TftpPacketWRQ", ")", ":", "self", ".", "sendError", "(", "TftpErrors", ".", "IllegalTftpOp", ")", "raise", "TftpException", "(", "\"Received WRQ from server while in download\"", ")", "elif", "isinstance", "(", "pkt", ",", "TftpPacketERR", ")", ":", "self", ".", "sendError", "(", "TftpErrors", ".", "IllegalTftpOp", ")", "log", ".", "debug", "(", "\"Received ERR packet: %s\"", ",", "pkt", ")", "if", "pkt", ".", "errorcode", "==", "TftpErrors", ".", "FileNotFound", ":", "raise", "TftpFileNotFoundError", "(", "\"File not found\"", ")", "else", ":", "raise", "TftpException", "(", "\"Received ERR from server: {}\"", ".", "format", "(", "pkt", ")", ")", "else", ":", "self", ".", "sendError", "(", "TftpErrors", ".", "IllegalTftpOp", ")", "raise", "TftpException", "(", "\"Received unknown packet type from server: %s\"", "%", "pkt", ")", "# By default, no state change.", "return", "self" ]
Handle the packet in response to an RRQ to the server.
[ "Handle", "the", "packet", "in", "response", "to", "an", "RRQ", "to", "the", "server", "." ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L556-L611
13,352
msoulier/tftpy
tftpy/TftpServer.py
TftpServer.stop
def stop(self, now=False): """Stop the server gracefully. Do not take any new transfers, but complete the existing ones. If force is True, drop everything and stop. Note, immediately will not interrupt the select loop, it will happen when the server returns on ready data, or a timeout. ie. SOCK_TIMEOUT""" if now: self.shutdown_immediately = True else: self.shutdown_gracefully = True
python
def stop(self, now=False): """Stop the server gracefully. Do not take any new transfers, but complete the existing ones. If force is True, drop everything and stop. Note, immediately will not interrupt the select loop, it will happen when the server returns on ready data, or a timeout. ie. SOCK_TIMEOUT""" if now: self.shutdown_immediately = True else: self.shutdown_gracefully = True
[ "def", "stop", "(", "self", ",", "now", "=", "False", ")", ":", "if", "now", ":", "self", ".", "shutdown_immediately", "=", "True", "else", ":", "self", ".", "shutdown_gracefully", "=", "True" ]
Stop the server gracefully. Do not take any new transfers, but complete the existing ones. If force is True, drop everything and stop. Note, immediately will not interrupt the select loop, it will happen when the server returns on ready data, or a timeout. ie. SOCK_TIMEOUT
[ "Stop", "the", "server", "gracefully", ".", "Do", "not", "take", "any", "new", "transfers", "but", "complete", "the", "existing", "ones", ".", "If", "force", "is", "True", "drop", "everything", "and", "stop", ".", "Note", "immediately", "will", "not", "interrupt", "the", "select", "loop", "it", "will", "happen", "when", "the", "server", "returns", "on", "ready", "data", "or", "a", "timeout", ".", "ie", ".", "SOCK_TIMEOUT" ]
af2f2fe89a3bf45748b78703820efb0986a8207a
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpServer.py#L249-L258
13,353
avelkoski/FRB
fred/helpers/__init__.py
_fetch
def _fetch(url, ssl_verify = True): """ Helper funcation to fetch content from a given url. """ req = Request(url) if ssl_verify: page = urlopen(req) else: ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE page = urlopen(req, context=ctx) content = page.read().decode('utf-8') page.close() return content
python
def _fetch(url, ssl_verify = True): """ Helper funcation to fetch content from a given url. """ req = Request(url) if ssl_verify: page = urlopen(req) else: ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE page = urlopen(req, context=ctx) content = page.read().decode('utf-8') page.close() return content
[ "def", "_fetch", "(", "url", ",", "ssl_verify", "=", "True", ")", ":", "req", "=", "Request", "(", "url", ")", "if", "ssl_verify", ":", "page", "=", "urlopen", "(", "req", ")", "else", ":", "ctx", "=", "ssl", ".", "create_default_context", "(", ")", "ctx", ".", "check_hostname", "=", "False", "ctx", ".", "verify_mode", "=", "ssl", ".", "CERT_NONE", "page", "=", "urlopen", "(", "req", ",", "context", "=", "ctx", ")", "content", "=", "page", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "page", ".", "close", "(", ")", "return", "content" ]
Helper funcation to fetch content from a given url.
[ "Helper", "funcation", "to", "fetch", "content", "from", "a", "given", "url", "." ]
692bcf576e17bd1a81db2b7644f4f61aeb39e5c7
https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/helpers/__init__.py#L25-L40
13,354
avelkoski/FRB
fred/helpers/__init__.py
_dict
def _dict(content): """ Helper funcation that converts text-based get response to a python dictionary for additional manipulation. """ if _has_pandas: data = _data_frame(content).to_dict(orient='records') else: response = loads(content) key = [x for x in response.keys() if x in c.response_data][0] data = response[key] return data
python
def _dict(content): """ Helper funcation that converts text-based get response to a python dictionary for additional manipulation. """ if _has_pandas: data = _data_frame(content).to_dict(orient='records') else: response = loads(content) key = [x for x in response.keys() if x in c.response_data][0] data = response[key] return data
[ "def", "_dict", "(", "content", ")", ":", "if", "_has_pandas", ":", "data", "=", "_data_frame", "(", "content", ")", ".", "to_dict", "(", "orient", "=", "'records'", ")", "else", ":", "response", "=", "loads", "(", "content", ")", "key", "=", "[", "x", "for", "x", "in", "response", ".", "keys", "(", ")", "if", "x", "in", "c", ".", "response_data", "]", "[", "0", "]", "data", "=", "response", "[", "key", "]", "return", "data" ]
Helper funcation that converts text-based get response to a python dictionary for additional manipulation.
[ "Helper", "funcation", "that", "converts", "text", "-", "based", "get", "response", "to", "a", "python", "dictionary", "for", "additional", "manipulation", "." ]
692bcf576e17bd1a81db2b7644f4f61aeb39e5c7
https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/helpers/__init__.py#L61-L72
13,355
avelkoski/FRB
fred/helpers/__init__.py
_data_frame
def _data_frame(content): """ Helper funcation that converts text-based get response to a pandas dataframe for additional manipulation. """ response = loads(content) key = [x for x in response.keys() if x in c.response_data][0] frame = DataFrame(response[key]) final_frame = _convert(frame) return final_frame
python
def _data_frame(content): """ Helper funcation that converts text-based get response to a pandas dataframe for additional manipulation. """ response = loads(content) key = [x for x in response.keys() if x in c.response_data][0] frame = DataFrame(response[key]) final_frame = _convert(frame) return final_frame
[ "def", "_data_frame", "(", "content", ")", ":", "response", "=", "loads", "(", "content", ")", "key", "=", "[", "x", "for", "x", "in", "response", ".", "keys", "(", ")", "if", "x", "in", "c", ".", "response_data", "]", "[", "0", "]", "frame", "=", "DataFrame", "(", "response", "[", "key", "]", ")", "final_frame", "=", "_convert", "(", "frame", ")", "return", "final_frame" ]
Helper funcation that converts text-based get response to a pandas dataframe for additional manipulation.
[ "Helper", "funcation", "that", "converts", "text", "-", "based", "get", "response", "to", "a", "pandas", "dataframe", "for", "additional", "manipulation", "." ]
692bcf576e17bd1a81db2b7644f4f61aeb39e5c7
https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/helpers/__init__.py#L74-L83
13,356
avelkoski/FRB
fred/helpers/__init__.py
_tab
def _tab(content): """ Helper funcation that converts text-based get response to tab separated values for additional manipulation. """ response = _data_frame(content).to_csv(index=False,sep='\t') return response
python
def _tab(content): """ Helper funcation that converts text-based get response to tab separated values for additional manipulation. """ response = _data_frame(content).to_csv(index=False,sep='\t') return response
[ "def", "_tab", "(", "content", ")", ":", "response", "=", "_data_frame", "(", "content", ")", ".", "to_csv", "(", "index", "=", "False", ",", "sep", "=", "'\\t'", ")", "return", "response" ]
Helper funcation that converts text-based get response to tab separated values for additional manipulation.
[ "Helper", "funcation", "that", "converts", "text", "-", "based", "get", "response", "to", "tab", "separated", "values", "for", "additional", "manipulation", "." ]
692bcf576e17bd1a81db2b7644f4f61aeb39e5c7
https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/helpers/__init__.py#L93-L99
13,357
avelkoski/FRB
fred/helpers/__init__.py
_pipe
def _pipe(content): """ Helper funcation that converts text-based get response to pipe separated values for additional manipulation. """ response = _data_frame(content).to_csv(index=False,sep='|') return response
python
def _pipe(content): """ Helper funcation that converts text-based get response to pipe separated values for additional manipulation. """ response = _data_frame(content).to_csv(index=False,sep='|') return response
[ "def", "_pipe", "(", "content", ")", ":", "response", "=", "_data_frame", "(", "content", ")", ".", "to_csv", "(", "index", "=", "False", ",", "sep", "=", "'|'", ")", "return", "response" ]
Helper funcation that converts text-based get response to pipe separated values for additional manipulation.
[ "Helper", "funcation", "that", "converts", "text", "-", "based", "get", "response", "to", "pipe", "separated", "values", "for", "additional", "manipulation", "." ]
692bcf576e17bd1a81db2b7644f4f61aeb39e5c7
https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/helpers/__init__.py#L101-L107
13,358
avelkoski/FRB
fred/helpers/__init__.py
_get_request
def _get_request(url_root,api_key,path,response_type,params, ssl_verify): """ Helper funcation that requests a get response from FRED. """ url = _url_builder(url_root,api_key,path,params) content = _fetch(url, ssl_verify) response = _dispatch(response_type)(content) return response
python
def _get_request(url_root,api_key,path,response_type,params, ssl_verify): """ Helper funcation that requests a get response from FRED. """ url = _url_builder(url_root,api_key,path,params) content = _fetch(url, ssl_verify) response = _dispatch(response_type)(content) return response
[ "def", "_get_request", "(", "url_root", ",", "api_key", ",", "path", ",", "response_type", ",", "params", ",", "ssl_verify", ")", ":", "url", "=", "_url_builder", "(", "url_root", ",", "api_key", ",", "path", ",", "params", ")", "content", "=", "_fetch", "(", "url", ",", "ssl_verify", ")", "response", "=", "_dispatch", "(", "response_type", ")", "(", "content", ")", "return", "response" ]
Helper funcation that requests a get response from FRED.
[ "Helper", "funcation", "that", "requests", "a", "get", "response", "from", "FRED", "." ]
692bcf576e17bd1a81db2b7644f4f61aeb39e5c7
https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/helpers/__init__.py#L141-L148
13,359
NicolasLM/atoma
atoma/atom.py
parse_atom_file
def parse_atom_file(filename: str) -> AtomFeed: """Parse an Atom feed from a local XML file.""" root = parse_xml(filename).getroot() return _parse_atom(root)
python
def parse_atom_file(filename: str) -> AtomFeed: """Parse an Atom feed from a local XML file.""" root = parse_xml(filename).getroot() return _parse_atom(root)
[ "def", "parse_atom_file", "(", "filename", ":", "str", ")", "->", "AtomFeed", ":", "root", "=", "parse_xml", "(", "filename", ")", ".", "getroot", "(", ")", "return", "_parse_atom", "(", "root", ")" ]
Parse an Atom feed from a local XML file.
[ "Parse", "an", "Atom", "feed", "from", "a", "local", "XML", "file", "." ]
16c6956112f975eb2ce774b2d5f8e9ddffde569f
https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/atom.py#L275-L278
13,360
NicolasLM/atoma
atoma/atom.py
parse_atom_bytes
def parse_atom_bytes(data: bytes) -> AtomFeed: """Parse an Atom feed from a byte-string containing XML data.""" root = parse_xml(BytesIO(data)).getroot() return _parse_atom(root)
python
def parse_atom_bytes(data: bytes) -> AtomFeed: """Parse an Atom feed from a byte-string containing XML data.""" root = parse_xml(BytesIO(data)).getroot() return _parse_atom(root)
[ "def", "parse_atom_bytes", "(", "data", ":", "bytes", ")", "->", "AtomFeed", ":", "root", "=", "parse_xml", "(", "BytesIO", "(", "data", ")", ")", ".", "getroot", "(", ")", "return", "_parse_atom", "(", "root", ")" ]
Parse an Atom feed from a byte-string containing XML data.
[ "Parse", "an", "Atom", "feed", "from", "a", "byte", "-", "string", "containing", "XML", "data", "." ]
16c6956112f975eb2ce774b2d5f8e9ddffde569f
https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/atom.py#L281-L284
13,361
NicolasLM/atoma
atoma/rss.py
_get_link
def _get_link(element: Element) -> Optional[str]: """Attempt to retrieve item link. Use the GUID as a fallback if it is a permalink. """ link = get_text(element, 'link') if link is not None: return link guid = get_child(element, 'guid') if guid is not None and guid.attrib.get('isPermaLink') == 'true': return get_text(element, 'guid') return None
python
def _get_link(element: Element) -> Optional[str]: """Attempt to retrieve item link. Use the GUID as a fallback if it is a permalink. """ link = get_text(element, 'link') if link is not None: return link guid = get_child(element, 'guid') if guid is not None and guid.attrib.get('isPermaLink') == 'true': return get_text(element, 'guid') return None
[ "def", "_get_link", "(", "element", ":", "Element", ")", "->", "Optional", "[", "str", "]", ":", "link", "=", "get_text", "(", "element", ",", "'link'", ")", "if", "link", "is", "not", "None", ":", "return", "link", "guid", "=", "get_child", "(", "element", ",", "'guid'", ")", "if", "guid", "is", "not", "None", "and", "guid", ".", "attrib", ".", "get", "(", "'isPermaLink'", ")", "==", "'true'", ":", "return", "get_text", "(", "element", ",", "'guid'", ")", "return", "None" ]
Attempt to retrieve item link. Use the GUID as a fallback if it is a permalink.
[ "Attempt", "to", "retrieve", "item", "link", "." ]
16c6956112f975eb2ce774b2d5f8e9ddffde569f
https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/rss.py#L118-L131
13,362
NicolasLM/atoma
atoma/rss.py
parse_rss_file
def parse_rss_file(filename: str) -> RSSChannel: """Parse an RSS feed from a local XML file.""" root = parse_xml(filename).getroot() return _parse_rss(root)
python
def parse_rss_file(filename: str) -> RSSChannel: """Parse an RSS feed from a local XML file.""" root = parse_xml(filename).getroot() return _parse_rss(root)
[ "def", "parse_rss_file", "(", "filename", ":", "str", ")", "->", "RSSChannel", ":", "root", "=", "parse_xml", "(", "filename", ")", ".", "getroot", "(", ")", "return", "_parse_rss", "(", "root", ")" ]
Parse an RSS feed from a local XML file.
[ "Parse", "an", "RSS", "feed", "from", "a", "local", "XML", "file", "." ]
16c6956112f975eb2ce774b2d5f8e9ddffde569f
https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/rss.py#L212-L215
13,363
NicolasLM/atoma
atoma/rss.py
parse_rss_bytes
def parse_rss_bytes(data: bytes) -> RSSChannel: """Parse an RSS feed from a byte-string containing XML data.""" root = parse_xml(BytesIO(data)).getroot() return _parse_rss(root)
python
def parse_rss_bytes(data: bytes) -> RSSChannel: """Parse an RSS feed from a byte-string containing XML data.""" root = parse_xml(BytesIO(data)).getroot() return _parse_rss(root)
[ "def", "parse_rss_bytes", "(", "data", ":", "bytes", ")", "->", "RSSChannel", ":", "root", "=", "parse_xml", "(", "BytesIO", "(", "data", ")", ")", ".", "getroot", "(", ")", "return", "_parse_rss", "(", "root", ")" ]
Parse an RSS feed from a byte-string containing XML data.
[ "Parse", "an", "RSS", "feed", "from", "a", "byte", "-", "string", "containing", "XML", "data", "." ]
16c6956112f975eb2ce774b2d5f8e9ddffde569f
https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/rss.py#L218-L221
13,364
NicolasLM/atoma
atoma/json_feed.py
parse_json_feed_file
def parse_json_feed_file(filename: str) -> JSONFeed: """Parse a JSON feed from a local json file.""" with open(filename) as f: try: root = json.load(f) except json.decoder.JSONDecodeError: raise FeedJSONError('Not a valid JSON document') return parse_json_feed(root)
python
def parse_json_feed_file(filename: str) -> JSONFeed: """Parse a JSON feed from a local json file.""" with open(filename) as f: try: root = json.load(f) except json.decoder.JSONDecodeError: raise FeedJSONError('Not a valid JSON document') return parse_json_feed(root)
[ "def", "parse_json_feed_file", "(", "filename", ":", "str", ")", "->", "JSONFeed", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "try", ":", "root", "=", "json", ".", "load", "(", "f", ")", "except", "json", ".", "decoder", ".", "JSONDecodeError", ":", "raise", "FeedJSONError", "(", "'Not a valid JSON document'", ")", "return", "parse_json_feed", "(", "root", ")" ]
Parse a JSON feed from a local json file.
[ "Parse", "a", "JSON", "feed", "from", "a", "local", "json", "file", "." ]
16c6956112f975eb2ce774b2d5f8e9ddffde569f
https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/json_feed.py#L205-L213
13,365
NicolasLM/atoma
atoma/json_feed.py
parse_json_feed_bytes
def parse_json_feed_bytes(data: bytes) -> JSONFeed: """Parse a JSON feed from a byte-string containing JSON data.""" try: root = json.loads(data) except json.decoder.JSONDecodeError: raise FeedJSONError('Not a valid JSON document') return parse_json_feed(root)
python
def parse_json_feed_bytes(data: bytes) -> JSONFeed: """Parse a JSON feed from a byte-string containing JSON data.""" try: root = json.loads(data) except json.decoder.JSONDecodeError: raise FeedJSONError('Not a valid JSON document') return parse_json_feed(root)
[ "def", "parse_json_feed_bytes", "(", "data", ":", "bytes", ")", "->", "JSONFeed", ":", "try", ":", "root", "=", "json", ".", "loads", "(", "data", ")", "except", "json", ".", "decoder", ".", "JSONDecodeError", ":", "raise", "FeedJSONError", "(", "'Not a valid JSON document'", ")", "return", "parse_json_feed", "(", "root", ")" ]
Parse a JSON feed from a byte-string containing JSON data.
[ "Parse", "a", "JSON", "feed", "from", "a", "byte", "-", "string", "containing", "JSON", "data", "." ]
16c6956112f975eb2ce774b2d5f8e9ddffde569f
https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/json_feed.py#L216-L223
13,366
NicolasLM/atoma
atoma/opml.py
parse_opml_file
def parse_opml_file(filename: str) -> OPML: """Parse an OPML document from a local XML file.""" root = parse_xml(filename).getroot() return _parse_opml(root)
python
def parse_opml_file(filename: str) -> OPML: """Parse an OPML document from a local XML file.""" root = parse_xml(filename).getroot() return _parse_opml(root)
[ "def", "parse_opml_file", "(", "filename", ":", "str", ")", "->", "OPML", ":", "root", "=", "parse_xml", "(", "filename", ")", ".", "getroot", "(", ")", "return", "_parse_opml", "(", "root", ")" ]
Parse an OPML document from a local XML file.
[ "Parse", "an", "OPML", "document", "from", "a", "local", "XML", "file", "." ]
16c6956112f975eb2ce774b2d5f8e9ddffde569f
https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/opml.py#L82-L85
13,367
NicolasLM/atoma
atoma/opml.py
parse_opml_bytes
def parse_opml_bytes(data: bytes) -> OPML: """Parse an OPML document from a byte-string containing XML data.""" root = parse_xml(BytesIO(data)).getroot() return _parse_opml(root)
python
def parse_opml_bytes(data: bytes) -> OPML: """Parse an OPML document from a byte-string containing XML data.""" root = parse_xml(BytesIO(data)).getroot() return _parse_opml(root)
[ "def", "parse_opml_bytes", "(", "data", ":", "bytes", ")", "->", "OPML", ":", "root", "=", "parse_xml", "(", "BytesIO", "(", "data", ")", ")", ".", "getroot", "(", ")", "return", "_parse_opml", "(", "root", ")" ]
Parse an OPML document from a byte-string containing XML data.
[ "Parse", "an", "OPML", "document", "from", "a", "byte", "-", "string", "containing", "XML", "data", "." ]
16c6956112f975eb2ce774b2d5f8e9ddffde569f
https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/opml.py#L88-L91
13,368
NicolasLM/atoma
atoma/opml.py
get_feed_list
def get_feed_list(opml_obj: OPML) -> List[str]: """Walk an OPML document to extract the list of feed it contains.""" rv = list() def collect(obj): for outline in obj.outlines: if outline.type == 'rss' and outline.xml_url: rv.append(outline.xml_url) if outline.outlines: collect(outline) collect(opml_obj) return rv
python
def get_feed_list(opml_obj: OPML) -> List[str]: """Walk an OPML document to extract the list of feed it contains.""" rv = list() def collect(obj): for outline in obj.outlines: if outline.type == 'rss' and outline.xml_url: rv.append(outline.xml_url) if outline.outlines: collect(outline) collect(opml_obj) return rv
[ "def", "get_feed_list", "(", "opml_obj", ":", "OPML", ")", "->", "List", "[", "str", "]", ":", "rv", "=", "list", "(", ")", "def", "collect", "(", "obj", ")", ":", "for", "outline", "in", "obj", ".", "outlines", ":", "if", "outline", ".", "type", "==", "'rss'", "and", "outline", ".", "xml_url", ":", "rv", ".", "append", "(", "outline", ".", "xml_url", ")", "if", "outline", ".", "outlines", ":", "collect", "(", "outline", ")", "collect", "(", "opml_obj", ")", "return", "rv" ]
Walk an OPML document to extract the list of feed it contains.
[ "Walk", "an", "OPML", "document", "to", "extract", "the", "list", "of", "feed", "it", "contains", "." ]
16c6956112f975eb2ce774b2d5f8e9ddffde569f
https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/opml.py#L94-L107
13,369
NicolasLM/atoma
atoma/simple.py
simple_parse_file
def simple_parse_file(filename: str) -> Feed: """Parse an Atom, RSS or JSON feed from a local file.""" pairs = ( (rss.parse_rss_file, _adapt_rss_channel), (atom.parse_atom_file, _adapt_atom_feed), (json_feed.parse_json_feed_file, _adapt_json_feed) ) return _simple_parse(pairs, filename)
python
def simple_parse_file(filename: str) -> Feed: """Parse an Atom, RSS or JSON feed from a local file.""" pairs = ( (rss.parse_rss_file, _adapt_rss_channel), (atom.parse_atom_file, _adapt_atom_feed), (json_feed.parse_json_feed_file, _adapt_json_feed) ) return _simple_parse(pairs, filename)
[ "def", "simple_parse_file", "(", "filename", ":", "str", ")", "->", "Feed", ":", "pairs", "=", "(", "(", "rss", ".", "parse_rss_file", ",", "_adapt_rss_channel", ")", ",", "(", "atom", ".", "parse_atom_file", ",", "_adapt_atom_feed", ")", ",", "(", "json_feed", ".", "parse_json_feed_file", ",", "_adapt_json_feed", ")", ")", "return", "_simple_parse", "(", "pairs", ",", "filename", ")" ]
Parse an Atom, RSS or JSON feed from a local file.
[ "Parse", "an", "Atom", "RSS", "or", "JSON", "feed", "from", "a", "local", "file", "." ]
16c6956112f975eb2ce774b2d5f8e9ddffde569f
https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/simple.py#L207-L214
13,370
NicolasLM/atoma
atoma/simple.py
simple_parse_bytes
def simple_parse_bytes(data: bytes) -> Feed: """Parse an Atom, RSS or JSON feed from a byte-string containing data.""" pairs = ( (rss.parse_rss_bytes, _adapt_rss_channel), (atom.parse_atom_bytes, _adapt_atom_feed), (json_feed.parse_json_feed_bytes, _adapt_json_feed) ) return _simple_parse(pairs, data)
python
def simple_parse_bytes(data: bytes) -> Feed: """Parse an Atom, RSS or JSON feed from a byte-string containing data.""" pairs = ( (rss.parse_rss_bytes, _adapt_rss_channel), (atom.parse_atom_bytes, _adapt_atom_feed), (json_feed.parse_json_feed_bytes, _adapt_json_feed) ) return _simple_parse(pairs, data)
[ "def", "simple_parse_bytes", "(", "data", ":", "bytes", ")", "->", "Feed", ":", "pairs", "=", "(", "(", "rss", ".", "parse_rss_bytes", ",", "_adapt_rss_channel", ")", ",", "(", "atom", ".", "parse_atom_bytes", ",", "_adapt_atom_feed", ")", ",", "(", "json_feed", ".", "parse_json_feed_bytes", ",", "_adapt_json_feed", ")", ")", "return", "_simple_parse", "(", "pairs", ",", "data", ")" ]
Parse an Atom, RSS or JSON feed from a byte-string containing data.
[ "Parse", "an", "Atom", "RSS", "or", "JSON", "feed", "from", "a", "byte", "-", "string", "containing", "data", "." ]
16c6956112f975eb2ce774b2d5f8e9ddffde569f
https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/simple.py#L217-L224
13,371
Atomistica/atomistica
src/python/atomistica/deformation.py
get_shear_distance
def get_shear_distance(a): """ Returns the distance a volume has moved during simple shear. Considers either Lees-Edwards boundary conditions or sheared cells. """ cx, cy, cz = a.cell if 'shear_dx' in a.info: assert abs(cx[1]) < 1e-12, 'cx[1] = {0}'.format(cx[1]) assert abs(cx[2]) < 1e-12, 'cx[2] = {0}'.format(cx[2]) assert abs(cy[0]) < 1e-12, 'cx[0] = {0}'.format(cy[0]) assert abs(cy[2]) < 1e-12, 'cy[2] = {0}'.format(cy[2]) assert abs(cz[0]) < 1e-12, 'cz[0] = {0}'.format(cz[0]) assert abs(cz[1]) < 1e-12, 'cz[1] = {0}'.format(cz[1]) dx, dy, dz = a.info['shear_dx'] else: assert abs(cx[1]) < 1e-12, 'cx[1] = {0}'.format(cx[1]) assert abs(cx[2]) < 1e-12, 'cx[2] = {0}'.format(cx[2]) assert abs(cy[0]) < 1e-12, 'cy[0] = {0}'.format(cy[0]) assert abs(cy[2]) < 1e-12, 'cy[2] = {0}'.format(cy[2]) dx, dy, sz = cz return dx, dy
python
def get_shear_distance(a): """ Returns the distance a volume has moved during simple shear. Considers either Lees-Edwards boundary conditions or sheared cells. """ cx, cy, cz = a.cell if 'shear_dx' in a.info: assert abs(cx[1]) < 1e-12, 'cx[1] = {0}'.format(cx[1]) assert abs(cx[2]) < 1e-12, 'cx[2] = {0}'.format(cx[2]) assert abs(cy[0]) < 1e-12, 'cx[0] = {0}'.format(cy[0]) assert abs(cy[2]) < 1e-12, 'cy[2] = {0}'.format(cy[2]) assert abs(cz[0]) < 1e-12, 'cz[0] = {0}'.format(cz[0]) assert abs(cz[1]) < 1e-12, 'cz[1] = {0}'.format(cz[1]) dx, dy, dz = a.info['shear_dx'] else: assert abs(cx[1]) < 1e-12, 'cx[1] = {0}'.format(cx[1]) assert abs(cx[2]) < 1e-12, 'cx[2] = {0}'.format(cx[2]) assert abs(cy[0]) < 1e-12, 'cy[0] = {0}'.format(cy[0]) assert abs(cy[2]) < 1e-12, 'cy[2] = {0}'.format(cy[2]) dx, dy, sz = cz return dx, dy
[ "def", "get_shear_distance", "(", "a", ")", ":", "cx", ",", "cy", ",", "cz", "=", "a", ".", "cell", "if", "'shear_dx'", "in", "a", ".", "info", ":", "assert", "abs", "(", "cx", "[", "1", "]", ")", "<", "1e-12", ",", "'cx[1] = {0}'", ".", "format", "(", "cx", "[", "1", "]", ")", "assert", "abs", "(", "cx", "[", "2", "]", ")", "<", "1e-12", ",", "'cx[2] = {0}'", ".", "format", "(", "cx", "[", "2", "]", ")", "assert", "abs", "(", "cy", "[", "0", "]", ")", "<", "1e-12", ",", "'cx[0] = {0}'", ".", "format", "(", "cy", "[", "0", "]", ")", "assert", "abs", "(", "cy", "[", "2", "]", ")", "<", "1e-12", ",", "'cy[2] = {0}'", ".", "format", "(", "cy", "[", "2", "]", ")", "assert", "abs", "(", "cz", "[", "0", "]", ")", "<", "1e-12", ",", "'cz[0] = {0}'", ".", "format", "(", "cz", "[", "0", "]", ")", "assert", "abs", "(", "cz", "[", "1", "]", ")", "<", "1e-12", ",", "'cz[1] = {0}'", ".", "format", "(", "cz", "[", "1", "]", ")", "dx", ",", "dy", ",", "dz", "=", "a", ".", "info", "[", "'shear_dx'", "]", "else", ":", "assert", "abs", "(", "cx", "[", "1", "]", ")", "<", "1e-12", ",", "'cx[1] = {0}'", ".", "format", "(", "cx", "[", "1", "]", ")", "assert", "abs", "(", "cx", "[", "2", "]", ")", "<", "1e-12", ",", "'cx[2] = {0}'", ".", "format", "(", "cx", "[", "2", "]", ")", "assert", "abs", "(", "cy", "[", "0", "]", ")", "<", "1e-12", ",", "'cy[0] = {0}'", ".", "format", "(", "cy", "[", "0", "]", ")", "assert", "abs", "(", "cy", "[", "2", "]", ")", "<", "1e-12", ",", "'cy[2] = {0}'", ".", "format", "(", "cy", "[", "2", "]", ")", "dx", ",", "dy", ",", "sz", "=", "cz", "return", "dx", ",", "dy" ]
Returns the distance a volume has moved during simple shear. Considers either Lees-Edwards boundary conditions or sheared cells.
[ "Returns", "the", "distance", "a", "volume", "has", "moved", "during", "simple", "shear", ".", "Considers", "either", "Lees", "-", "Edwards", "boundary", "conditions", "or", "sheared", "cells", "." ]
5ed79d776c92b91a566be22615bfb304ecc75db7
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/deformation.py#L30-L50
13,372
Atomistica/atomistica
src/python/atomistica/atomic_strain.py
array_inverse
def array_inverse(A): """ Compute inverse for each matrix in a list of matrices. This is faster than calling numpy.linalg.inv for each matrix. """ A = np.ascontiguousarray(A, dtype=float) b = np.identity(A.shape[2], dtype=A.dtype) n_eq = A.shape[1] n_rhs = A.shape[2] pivots = np.zeros(n_eq, np.intc) identity = np.eye(n_eq) def lapack_inverse(a): b = np.copy(identity) pivots = np.zeros(n_eq, np.intc) results = np.linalg.lapack_lite.dgesv(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0) if results['info'] > 0: raise np.linalg.LinAlgError('Singular matrix') return b return np.array([lapack_inverse(a) for a in A])
python
def array_inverse(A): """ Compute inverse for each matrix in a list of matrices. This is faster than calling numpy.linalg.inv for each matrix. """ A = np.ascontiguousarray(A, dtype=float) b = np.identity(A.shape[2], dtype=A.dtype) n_eq = A.shape[1] n_rhs = A.shape[2] pivots = np.zeros(n_eq, np.intc) identity = np.eye(n_eq) def lapack_inverse(a): b = np.copy(identity) pivots = np.zeros(n_eq, np.intc) results = np.linalg.lapack_lite.dgesv(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0) if results['info'] > 0: raise np.linalg.LinAlgError('Singular matrix') return b return np.array([lapack_inverse(a) for a in A])
[ "def", "array_inverse", "(", "A", ")", ":", "A", "=", "np", ".", "ascontiguousarray", "(", "A", ",", "dtype", "=", "float", ")", "b", "=", "np", ".", "identity", "(", "A", ".", "shape", "[", "2", "]", ",", "dtype", "=", "A", ".", "dtype", ")", "n_eq", "=", "A", ".", "shape", "[", "1", "]", "n_rhs", "=", "A", ".", "shape", "[", "2", "]", "pivots", "=", "np", ".", "zeros", "(", "n_eq", ",", "np", ".", "intc", ")", "identity", "=", "np", ".", "eye", "(", "n_eq", ")", "def", "lapack_inverse", "(", "a", ")", ":", "b", "=", "np", ".", "copy", "(", "identity", ")", "pivots", "=", "np", ".", "zeros", "(", "n_eq", ",", "np", ".", "intc", ")", "results", "=", "np", ".", "linalg", ".", "lapack_lite", ".", "dgesv", "(", "n_eq", ",", "n_rhs", ",", "a", ",", "n_eq", ",", "pivots", ",", "b", ",", "n_eq", ",", "0", ")", "if", "results", "[", "'info'", "]", ">", "0", ":", "raise", "np", ".", "linalg", ".", "LinAlgError", "(", "'Singular matrix'", ")", "return", "b", "return", "np", ".", "array", "(", "[", "lapack_inverse", "(", "a", ")", "for", "a", "in", "A", "]", ")" ]
Compute inverse for each matrix in a list of matrices. This is faster than calling numpy.linalg.inv for each matrix.
[ "Compute", "inverse", "for", "each", "matrix", "in", "a", "list", "of", "matrices", ".", "This", "is", "faster", "than", "calling", "numpy", ".", "linalg", ".", "inv", "for", "each", "matrix", "." ]
5ed79d776c92b91a566be22615bfb304ecc75db7
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/atomic_strain.py#L66-L86
13,373
Atomistica/atomistica
src/python/atomistica/atomic_strain.py
get_delta_plus_epsilon
def get_delta_plus_epsilon(nat, i_now, dr_now, dr_old): """ Calculate delta_ij+epsilon_ij, i.e. the deformation gradient matrix """ XIJ = get_XIJ(nat, i_now, dr_now, dr_old) YIJ = get_YIJ(nat, i_now, dr_old) YIJ_invert = array_inverse(YIJ) # Perform sum_k X_ik Y_jk^-1 epsilon = np.sum(XIJ.reshape(-1,3,1,3)*YIJ_invert.reshape(-1,1,3,3), axis=3) return epsilon
python
def get_delta_plus_epsilon(nat, i_now, dr_now, dr_old): """ Calculate delta_ij+epsilon_ij, i.e. the deformation gradient matrix """ XIJ = get_XIJ(nat, i_now, dr_now, dr_old) YIJ = get_YIJ(nat, i_now, dr_old) YIJ_invert = array_inverse(YIJ) # Perform sum_k X_ik Y_jk^-1 epsilon = np.sum(XIJ.reshape(-1,3,1,3)*YIJ_invert.reshape(-1,1,3,3), axis=3) return epsilon
[ "def", "get_delta_plus_epsilon", "(", "nat", ",", "i_now", ",", "dr_now", ",", "dr_old", ")", ":", "XIJ", "=", "get_XIJ", "(", "nat", ",", "i_now", ",", "dr_now", ",", "dr_old", ")", "YIJ", "=", "get_YIJ", "(", "nat", ",", "i_now", ",", "dr_old", ")", "YIJ_invert", "=", "array_inverse", "(", "YIJ", ")", "# Perform sum_k X_ik Y_jk^-1", "epsilon", "=", "np", ".", "sum", "(", "XIJ", ".", "reshape", "(", "-", "1", ",", "3", ",", "1", ",", "3", ")", "*", "YIJ_invert", ".", "reshape", "(", "-", "1", ",", "1", ",", "3", ",", "3", ")", ",", "axis", "=", "3", ")", "return", "epsilon" ]
Calculate delta_ij+epsilon_ij, i.e. the deformation gradient matrix
[ "Calculate", "delta_ij", "+", "epsilon_ij", "i", ".", "e", ".", "the", "deformation", "gradient", "matrix" ]
5ed79d776c92b91a566be22615bfb304ecc75db7
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/atomic_strain.py#L89-L101
13,374
Atomistica/atomistica
src/python/atomistica/atomic_strain.py
get_D_square_min
def get_D_square_min(atoms_now, atoms_old, i_now, j_now, delta_plus_epsilon=None): """ Calculate the D^2_min norm of Falk and Langer """ nat = len(atoms_now) assert len(atoms_now) == len(atoms_old) pos_now = atoms_now.positions pos_old = atoms_old.positions # Compute current and old distance vectors. Note that current distance # vectors cannot be taken from the neighbor calculation, because neighbors # are calculated from the sheared cell while these distance need to come # from the unsheared cell. Taking the distance from the unsheared cell # make periodic boundary conditions (and flipping of cell) a lot easier. dr_now = mic(pos_now[i_now] - pos_now[j_now], atoms_now.cell) dr_old = mic(pos_old[i_now] - pos_old[j_now], atoms_old.cell) # Sanity check: Shape needs to be identical! assert dr_now.shape == dr_old.shape if delta_plus_epsilon is None: # Get minimum strain tensor delta_plus_epsilon = get_delta_plus_epsilon(nat, i_now, dr_now, dr_old) # Spread epsilon out for each neighbor index delta_plus_epsilon_n = delta_plus_epsilon[i_now] # Compute D^2_min d_sq_n = np.sum( ( dr_now- np.sum(delta_plus_epsilon_n.reshape(-1,3,3)*dr_old.reshape(-1,1,3), axis=2) )**2, axis=1) # For each atom, sum over all neighbors d_sq = np.bincount(i_now, weights=d_sq_n) return delta_plus_epsilon, d_sq
python
def get_D_square_min(atoms_now, atoms_old, i_now, j_now, delta_plus_epsilon=None): """ Calculate the D^2_min norm of Falk and Langer """ nat = len(atoms_now) assert len(atoms_now) == len(atoms_old) pos_now = atoms_now.positions pos_old = atoms_old.positions # Compute current and old distance vectors. Note that current distance # vectors cannot be taken from the neighbor calculation, because neighbors # are calculated from the sheared cell while these distance need to come # from the unsheared cell. Taking the distance from the unsheared cell # make periodic boundary conditions (and flipping of cell) a lot easier. dr_now = mic(pos_now[i_now] - pos_now[j_now], atoms_now.cell) dr_old = mic(pos_old[i_now] - pos_old[j_now], atoms_old.cell) # Sanity check: Shape needs to be identical! assert dr_now.shape == dr_old.shape if delta_plus_epsilon is None: # Get minimum strain tensor delta_plus_epsilon = get_delta_plus_epsilon(nat, i_now, dr_now, dr_old) # Spread epsilon out for each neighbor index delta_plus_epsilon_n = delta_plus_epsilon[i_now] # Compute D^2_min d_sq_n = np.sum( ( dr_now- np.sum(delta_plus_epsilon_n.reshape(-1,3,3)*dr_old.reshape(-1,1,3), axis=2) )**2, axis=1) # For each atom, sum over all neighbors d_sq = np.bincount(i_now, weights=d_sq_n) return delta_plus_epsilon, d_sq
[ "def", "get_D_square_min", "(", "atoms_now", ",", "atoms_old", ",", "i_now", ",", "j_now", ",", "delta_plus_epsilon", "=", "None", ")", ":", "nat", "=", "len", "(", "atoms_now", ")", "assert", "len", "(", "atoms_now", ")", "==", "len", "(", "atoms_old", ")", "pos_now", "=", "atoms_now", ".", "positions", "pos_old", "=", "atoms_old", ".", "positions", "# Compute current and old distance vectors. Note that current distance", "# vectors cannot be taken from the neighbor calculation, because neighbors", "# are calculated from the sheared cell while these distance need to come", "# from the unsheared cell. Taking the distance from the unsheared cell", "# make periodic boundary conditions (and flipping of cell) a lot easier.", "dr_now", "=", "mic", "(", "pos_now", "[", "i_now", "]", "-", "pos_now", "[", "j_now", "]", ",", "atoms_now", ".", "cell", ")", "dr_old", "=", "mic", "(", "pos_old", "[", "i_now", "]", "-", "pos_old", "[", "j_now", "]", ",", "atoms_old", ".", "cell", ")", "# Sanity check: Shape needs to be identical!", "assert", "dr_now", ".", "shape", "==", "dr_old", ".", "shape", "if", "delta_plus_epsilon", "is", "None", ":", "# Get minimum strain tensor", "delta_plus_epsilon", "=", "get_delta_plus_epsilon", "(", "nat", ",", "i_now", ",", "dr_now", ",", "dr_old", ")", "# Spread epsilon out for each neighbor index", "delta_plus_epsilon_n", "=", "delta_plus_epsilon", "[", "i_now", "]", "# Compute D^2_min", "d_sq_n", "=", "np", ".", "sum", "(", "(", "dr_now", "-", "np", ".", "sum", "(", "delta_plus_epsilon_n", ".", "reshape", "(", "-", "1", ",", "3", ",", "3", ")", "*", "dr_old", ".", "reshape", "(", "-", "1", ",", "1", ",", "3", ")", ",", "axis", "=", "2", ")", ")", "**", "2", ",", "axis", "=", "1", ")", "# For each atom, sum over all neighbors", "d_sq", "=", "np", ".", "bincount", "(", "i_now", ",", "weights", "=", "d_sq_n", ")", "return", "delta_plus_epsilon", ",", "d_sq" ]
Calculate the D^2_min norm of Falk and Langer
[ "Calculate", "the", "D^2_min", "norm", "of", "Falk", "and", "Langer" ]
5ed79d776c92b91a566be22615bfb304ecc75db7
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/atomic_strain.py#L104-L144
13,375
Atomistica/atomistica
src/python/atomistica/hardware.py
dhms
def dhms(secs): """return days,hours,minutes and seconds""" dhms = [0, 0, 0, 0] dhms[0] = int(secs // 86400) s = secs % 86400 dhms[1] = int(s // 3600) s = secs % 3600 dhms[2] = int(s // 60) s = secs % 60 dhms[3] = int(s+.5) return dhms
python
def dhms(secs): """return days,hours,minutes and seconds""" dhms = [0, 0, 0, 0] dhms[0] = int(secs // 86400) s = secs % 86400 dhms[1] = int(s // 3600) s = secs % 3600 dhms[2] = int(s // 60) s = secs % 60 dhms[3] = int(s+.5) return dhms
[ "def", "dhms", "(", "secs", ")", ":", "dhms", "=", "[", "0", ",", "0", ",", "0", ",", "0", "]", "dhms", "[", "0", "]", "=", "int", "(", "secs", "//", "86400", ")", "s", "=", "secs", "%", "86400", "dhms", "[", "1", "]", "=", "int", "(", "s", "//", "3600", ")", "s", "=", "secs", "%", "3600", "dhms", "[", "2", "]", "=", "int", "(", "s", "//", "60", ")", "s", "=", "secs", "%", "60", "dhms", "[", "3", "]", "=", "int", "(", "s", "+", ".5", ")", "return", "dhms" ]
return days,hours,minutes and seconds
[ "return", "days", "hours", "minutes", "and", "seconds" ]
5ed79d776c92b91a566be22615bfb304ecc75db7
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/hardware.py#L52-L62
13,376
Atomistica/atomistica
src/python/atomistica/hardware.py
hms
def hms(secs): """return hours,minutes and seconds""" hms = [0, 0, 0] hms[0] = int(secs // 3600) s = secs % 3600 hms[1] = int(s // 60) s = secs % 60 hms[2] = int(s+.5) return hms
python
def hms(secs): """return hours,minutes and seconds""" hms = [0, 0, 0] hms[0] = int(secs // 3600) s = secs % 3600 hms[1] = int(s // 60) s = secs % 60 hms[2] = int(s+.5) return hms
[ "def", "hms", "(", "secs", ")", ":", "hms", "=", "[", "0", ",", "0", ",", "0", "]", "hms", "[", "0", "]", "=", "int", "(", "secs", "//", "3600", ")", "s", "=", "secs", "%", "3600", "hms", "[", "1", "]", "=", "int", "(", "s", "//", "60", ")", "s", "=", "secs", "%", "60", "hms", "[", "2", "]", "=", "int", "(", "s", "+", ".5", ")", "return", "hms" ]
return hours,minutes and seconds
[ "return", "hours", "minutes", "and", "seconds" ]
5ed79d776c92b91a566be22615bfb304ecc75db7
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/hardware.py#L65-L73
13,377
Atomistica/atomistica
src/python/atomistica/analysis.py
get_enclosing_orthorhombic_box
def get_enclosing_orthorhombic_box(cell): """ Return lower and upper bounds of the orthorhombic box that encloses the parallelepiped spanned by the three cell vectors of cell. """ # Cell vectors cx, cy, cz = cell # The cell has eight corners, one is at the origin, three at cx, cy, cz # and the last ones are... c1 = cx+cy c2 = cx+cz c3 = cy+cz c4 = cx+cy+cz corners = np.array([[0,0,0],cx,cy,cz,c1,c2,c3,c4]) lower = np.min(corners, axis=0) upper = np.max(corners, axis=0) return lower, upper
python
def get_enclosing_orthorhombic_box(cell): """ Return lower and upper bounds of the orthorhombic box that encloses the parallelepiped spanned by the three cell vectors of cell. """ # Cell vectors cx, cy, cz = cell # The cell has eight corners, one is at the origin, three at cx, cy, cz # and the last ones are... c1 = cx+cy c2 = cx+cz c3 = cy+cz c4 = cx+cy+cz corners = np.array([[0,0,0],cx,cy,cz,c1,c2,c3,c4]) lower = np.min(corners, axis=0) upper = np.max(corners, axis=0) return lower, upper
[ "def", "get_enclosing_orthorhombic_box", "(", "cell", ")", ":", "# Cell vectors", "cx", ",", "cy", ",", "cz", "=", "cell", "# The cell has eight corners, one is at the origin, three at cx, cy, cz", "# and the last ones are...", "c1", "=", "cx", "+", "cy", "c2", "=", "cx", "+", "cz", "c3", "=", "cy", "+", "cz", "c4", "=", "cx", "+", "cy", "+", "cz", "corners", "=", "np", ".", "array", "(", "[", "[", "0", ",", "0", ",", "0", "]", ",", "cx", ",", "cy", ",", "cz", ",", "c1", ",", "c2", ",", "c3", ",", "c4", "]", ")", "lower", "=", "np", ".", "min", "(", "corners", ",", "axis", "=", "0", ")", "upper", "=", "np", ".", "max", "(", "corners", ",", "axis", "=", "0", ")", "return", "lower", ",", "upper" ]
Return lower and upper bounds of the orthorhombic box that encloses the parallelepiped spanned by the three cell vectors of cell.
[ "Return", "lower", "and", "upper", "bounds", "of", "the", "orthorhombic", "box", "that", "encloses", "the", "parallelepiped", "spanned", "by", "the", "three", "cell", "vectors", "of", "cell", "." ]
5ed79d776c92b91a566be22615bfb304ecc75db7
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/analysis.py#L38-L58
13,378
Atomistica/atomistica
src/python/atomistica/analysis.py
stress_invariants
def stress_invariants(s): """Receives a list of stress tensors and returns the three invariants. Return hydrostatic pressure, octahedral shear stress and J3 """ s = np.asarray(s) if s.shape == (6,): s = s.reshape(1,-1) elif s.shape == (3,3): s = s.reshape(1,-1,-1) if len(s.shape) == 3: s = np.transpose([s[:,0,0],s[:,1,1],s[:,2,2], (s[:,0,1]+s[:,1,0])/2, (s[:,1,2]+s[:,2,1])/2, (s[:,2,0]+s[:,0,2])/2]) I1 = s[:,0]+s[:,1]+s[:,2] I2 = s[:,0]*s[:,1]+s[:,1]*s[:,2]+s[:,2]*s[:,0]-s[:,3]**2-s[:,4]**2-s[:,5]**2 I3 = s[:,0]*s[:,1]*s[:,2]+2*s[:,3]*s[:,4]*s[:,5]-s[:,3]**2*s[:,2]-s[:,4]**2*s[:,0]-s[:,5]**2*s[:,1] J2 = I1**2/3-I2 J3 = 2*I1**3/27-I1*I2/3+I3 # Return hydrostatic pressure, octahedral shear stress and J3 return -I1/3, np.sqrt(2*J2/3), J3
python
def stress_invariants(s): """Receives a list of stress tensors and returns the three invariants. Return hydrostatic pressure, octahedral shear stress and J3 """ s = np.asarray(s) if s.shape == (6,): s = s.reshape(1,-1) elif s.shape == (3,3): s = s.reshape(1,-1,-1) if len(s.shape) == 3: s = np.transpose([s[:,0,0],s[:,1,1],s[:,2,2], (s[:,0,1]+s[:,1,0])/2, (s[:,1,2]+s[:,2,1])/2, (s[:,2,0]+s[:,0,2])/2]) I1 = s[:,0]+s[:,1]+s[:,2] I2 = s[:,0]*s[:,1]+s[:,1]*s[:,2]+s[:,2]*s[:,0]-s[:,3]**2-s[:,4]**2-s[:,5]**2 I3 = s[:,0]*s[:,1]*s[:,2]+2*s[:,3]*s[:,4]*s[:,5]-s[:,3]**2*s[:,2]-s[:,4]**2*s[:,0]-s[:,5]**2*s[:,1] J2 = I1**2/3-I2 J3 = 2*I1**3/27-I1*I2/3+I3 # Return hydrostatic pressure, octahedral shear stress and J3 return -I1/3, np.sqrt(2*J2/3), J3
[ "def", "stress_invariants", "(", "s", ")", ":", "s", "=", "np", ".", "asarray", "(", "s", ")", "if", "s", ".", "shape", "==", "(", "6", ",", ")", ":", "s", "=", "s", ".", "reshape", "(", "1", ",", "-", "1", ")", "elif", "s", ".", "shape", "==", "(", "3", ",", "3", ")", ":", "s", "=", "s", ".", "reshape", "(", "1", ",", "-", "1", ",", "-", "1", ")", "if", "len", "(", "s", ".", "shape", ")", "==", "3", ":", "s", "=", "np", ".", "transpose", "(", "[", "s", "[", ":", ",", "0", ",", "0", "]", ",", "s", "[", ":", ",", "1", ",", "1", "]", ",", "s", "[", ":", ",", "2", ",", "2", "]", ",", "(", "s", "[", ":", ",", "0", ",", "1", "]", "+", "s", "[", ":", ",", "1", ",", "0", "]", ")", "/", "2", ",", "(", "s", "[", ":", ",", "1", ",", "2", "]", "+", "s", "[", ":", ",", "2", ",", "1", "]", ")", "/", "2", ",", "(", "s", "[", ":", ",", "2", ",", "0", "]", "+", "s", "[", ":", ",", "0", ",", "2", "]", ")", "/", "2", "]", ")", "I1", "=", "s", "[", ":", ",", "0", "]", "+", "s", "[", ":", ",", "1", "]", "+", "s", "[", ":", ",", "2", "]", "I2", "=", "s", "[", ":", ",", "0", "]", "*", "s", "[", ":", ",", "1", "]", "+", "s", "[", ":", ",", "1", "]", "*", "s", "[", ":", ",", "2", "]", "+", "s", "[", ":", ",", "2", "]", "*", "s", "[", ":", ",", "0", "]", "-", "s", "[", ":", ",", "3", "]", "**", "2", "-", "s", "[", ":", ",", "4", "]", "**", "2", "-", "s", "[", ":", ",", "5", "]", "**", "2", "I3", "=", "s", "[", ":", ",", "0", "]", "*", "s", "[", ":", ",", "1", "]", "*", "s", "[", ":", ",", "2", "]", "+", "2", "*", "s", "[", ":", ",", "3", "]", "*", "s", "[", ":", ",", "4", "]", "*", "s", "[", ":", ",", "5", "]", "-", "s", "[", ":", ",", "3", "]", "**", "2", "*", "s", "[", ":", ",", "2", "]", "-", "s", "[", ":", ",", "4", "]", "**", "2", "*", "s", "[", ":", ",", "0", "]", "-", "s", "[", ":", ",", "5", "]", "**", "2", "*", "s", "[", ":", ",", "1", "]", "J2", "=", "I1", "**", "2", "/", "3", "-", "I2", "J3", "=", "2", "*", "I1", "**", "3", "/", "27", "-", "I1", "*", "I2", "/", "3", "+", "I3", "# Return hydrostatic pressure, octahedral shear stress and J3", "return", "-", "I1", "/", "3", ",", "np", ".", "sqrt", "(", "2", "*", "J2", "/", "3", ")", ",", "J3" ]
Receives a list of stress tensors and returns the three invariants. Return hydrostatic pressure, octahedral shear stress and J3
[ "Receives", "a", "list", "of", "stress", "tensors", "and", "returns", "the", "three", "invariants", ".", "Return", "hydrostatic", "pressure", "octahedral", "shear", "stress", "and", "J3" ]
5ed79d776c92b91a566be22615bfb304ecc75db7
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/analysis.py#L181-L203
13,379
Atomistica/atomistica
tools/meta.py
scanmeta
def scanmeta(f): """Scan file headers for @meta ... @endmeta information and store that into a dictionary. """ print(f) if isinstance(f, str): f = io.open(f, mode='r', encoding='latin-1') done = False l = f.readline() s = None while l and s is None: i = l.find('!') if i >= 0: l = l[i+1:] i = l.find('@meta') if i >= 0: l = l[i+5:] i = l.find('@endmeta') if i >= 0: s = l[:i] done = True else: s = l l = f.readline() if not done and not l: return { } while l and not done: i = l.find('!') if i >= 0: l = l[i+1:] i = l.find('@endmeta') if i >= 0: s += ' '+l[:i] done = True else: s += ' '+l l = f.readline() s = map(lambda x: x.split(':'), s.split()) d = { } for x in s: if len(x) > 2 or len(x) == 0: raise RuntimeError('Syntax error in meta information.') elif len(x) == 2: d[x[0]] = x[1] else: d[x[0]] = None return d
python
def scanmeta(f): """Scan file headers for @meta ... @endmeta information and store that into a dictionary. """ print(f) if isinstance(f, str): f = io.open(f, mode='r', encoding='latin-1') done = False l = f.readline() s = None while l and s is None: i = l.find('!') if i >= 0: l = l[i+1:] i = l.find('@meta') if i >= 0: l = l[i+5:] i = l.find('@endmeta') if i >= 0: s = l[:i] done = True else: s = l l = f.readline() if not done and not l: return { } while l and not done: i = l.find('!') if i >= 0: l = l[i+1:] i = l.find('@endmeta') if i >= 0: s += ' '+l[:i] done = True else: s += ' '+l l = f.readline() s = map(lambda x: x.split(':'), s.split()) d = { } for x in s: if len(x) > 2 or len(x) == 0: raise RuntimeError('Syntax error in meta information.') elif len(x) == 2: d[x[0]] = x[1] else: d[x[0]] = None return d
[ "def", "scanmeta", "(", "f", ")", ":", "print", "(", "f", ")", "if", "isinstance", "(", "f", ",", "str", ")", ":", "f", "=", "io", ".", "open", "(", "f", ",", "mode", "=", "'r'", ",", "encoding", "=", "'latin-1'", ")", "done", "=", "False", "l", "=", "f", ".", "readline", "(", ")", "s", "=", "None", "while", "l", "and", "s", "is", "None", ":", "i", "=", "l", ".", "find", "(", "'!'", ")", "if", "i", ">=", "0", ":", "l", "=", "l", "[", "i", "+", "1", ":", "]", "i", "=", "l", ".", "find", "(", "'@meta'", ")", "if", "i", ">=", "0", ":", "l", "=", "l", "[", "i", "+", "5", ":", "]", "i", "=", "l", ".", "find", "(", "'@endmeta'", ")", "if", "i", ">=", "0", ":", "s", "=", "l", "[", ":", "i", "]", "done", "=", "True", "else", ":", "s", "=", "l", "l", "=", "f", ".", "readline", "(", ")", "if", "not", "done", "and", "not", "l", ":", "return", "{", "}", "while", "l", "and", "not", "done", ":", "i", "=", "l", ".", "find", "(", "'!'", ")", "if", "i", ">=", "0", ":", "l", "=", "l", "[", "i", "+", "1", ":", "]", "i", "=", "l", ".", "find", "(", "'@endmeta'", ")", "if", "i", ">=", "0", ":", "s", "+=", "' '", "+", "l", "[", ":", "i", "]", "done", "=", "True", "else", ":", "s", "+=", "' '", "+", "l", "l", "=", "f", ".", "readline", "(", ")", "s", "=", "map", "(", "lambda", "x", ":", "x", ".", "split", "(", "':'", ")", ",", "s", ".", "split", "(", ")", ")", "d", "=", "{", "}", "for", "x", "in", "s", ":", "if", "len", "(", "x", ")", ">", "2", "or", "len", "(", "x", ")", "==", "0", ":", "raise", "RuntimeError", "(", "'Syntax error in meta information.'", ")", "elif", "len", "(", "x", ")", "==", "2", ":", "d", "[", "x", "[", "0", "]", "]", "=", "x", "[", "1", "]", "else", ":", "d", "[", "x", "[", "0", "]", "]", "=", "None", "return", "d" ]
Scan file headers for @meta ... @endmeta information and store that into a dictionary.
[ "Scan", "file", "headers", "for" ]
5ed79d776c92b91a566be22615bfb304ecc75db7
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/tools/meta.py#L14-L67
13,380
Atomistica/atomistica
src/python/atomistica/snippets.py
mic
def mic(dr, cell, pbc=None): """ Apply minimum image convention to an array of distance vectors. """ # Check where distance larger than 1/2 cell. Particles have crossed # periodic boundaries then and need to be unwrapped. rec = np.linalg.inv(cell) if pbc is not None: rec *= np.array(pbc, dtype=int).reshape(3,1) dri = np.round(np.dot(dr, rec)) # Unwrap return dr - np.dot(dri, cell)
python
def mic(dr, cell, pbc=None): """ Apply minimum image convention to an array of distance vectors. """ # Check where distance larger than 1/2 cell. Particles have crossed # periodic boundaries then and need to be unwrapped. rec = np.linalg.inv(cell) if pbc is not None: rec *= np.array(pbc, dtype=int).reshape(3,1) dri = np.round(np.dot(dr, rec)) # Unwrap return dr - np.dot(dri, cell)
[ "def", "mic", "(", "dr", ",", "cell", ",", "pbc", "=", "None", ")", ":", "# Check where distance larger than 1/2 cell. Particles have crossed", "# periodic boundaries then and need to be unwrapped.", "rec", "=", "np", ".", "linalg", ".", "inv", "(", "cell", ")", "if", "pbc", "is", "not", "None", ":", "rec", "*=", "np", ".", "array", "(", "pbc", ",", "dtype", "=", "int", ")", ".", "reshape", "(", "3", ",", "1", ")", "dri", "=", "np", ".", "round", "(", "np", ".", "dot", "(", "dr", ",", "rec", ")", ")", "# Unwrap", "return", "dr", "-", "np", ".", "dot", "(", "dri", ",", "cell", ")" ]
Apply minimum image convention to an array of distance vectors.
[ "Apply", "minimum", "image", "convention", "to", "an", "array", "of", "distance", "vectors", "." ]
5ed79d776c92b91a566be22615bfb304ecc75db7
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/snippets.py#L31-L43
13,381
Atomistica/atomistica
src/python/tools/a_run.py
s_from_dhms
def s_from_dhms(time): """return seconds from dhms""" dhms_s = { 's' : 1, 'm' : 60, 'h' : 3600, 'd' : 86400 } time = time.lower() word_list = re.findall('\d*[^\d]*',time) seconds=0 for word in word_list: if word != '': sec = 1 for t in list(dhms_s.keys()): nw = word.replace(t,'') if nw != word: sec = dhms_s[t] word = nw break try: seconds += int(word) * sec except: raise RuntimeError('unknown format in timestring ' + time) return seconds
python
def s_from_dhms(time): """return seconds from dhms""" dhms_s = { 's' : 1, 'm' : 60, 'h' : 3600, 'd' : 86400 } time = time.lower() word_list = re.findall('\d*[^\d]*',time) seconds=0 for word in word_list: if word != '': sec = 1 for t in list(dhms_s.keys()): nw = word.replace(t,'') if nw != word: sec = dhms_s[t] word = nw break try: seconds += int(word) * sec except: raise RuntimeError('unknown format in timestring ' + time) return seconds
[ "def", "s_from_dhms", "(", "time", ")", ":", "dhms_s", "=", "{", "'s'", ":", "1", ",", "'m'", ":", "60", ",", "'h'", ":", "3600", ",", "'d'", ":", "86400", "}", "time", "=", "time", ".", "lower", "(", ")", "word_list", "=", "re", ".", "findall", "(", "'\\d*[^\\d]*'", ",", "time", ")", "seconds", "=", "0", "for", "word", "in", "word_list", ":", "if", "word", "!=", "''", ":", "sec", "=", "1", "for", "t", "in", "list", "(", "dhms_s", ".", "keys", "(", ")", ")", ":", "nw", "=", "word", ".", "replace", "(", "t", ",", "''", ")", "if", "nw", "!=", "word", ":", "sec", "=", "dhms_s", "[", "t", "]", "word", "=", "nw", "break", "try", ":", "seconds", "+=", "int", "(", "word", ")", "*", "sec", "except", ":", "raise", "RuntimeError", "(", "'unknown format in timestring '", "+", "time", ")", "return", "seconds" ]
return seconds from dhms
[ "return", "seconds", "from", "dhms" ]
5ed79d776c92b91a566be22615bfb304ecc75db7
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/tools/a_run.py#L33-L52
13,382
Atomistica/atomistica
src/python/atomistica/join_calculators.py
JoinCalculators.get_stress
def get_stress(self, a): """Calculate stress tensor.""" s = np.zeros( 6, dtype=float ) for c in self.calcs: s += c.get_stress(a) return s
python
def get_stress(self, a): """Calculate stress tensor.""" s = np.zeros( 6, dtype=float ) for c in self.calcs: s += c.get_stress(a) return s
[ "def", "get_stress", "(", "self", ",", "a", ")", ":", "s", "=", "np", ".", "zeros", "(", "6", ",", "dtype", "=", "float", ")", "for", "c", "in", "self", ".", "calcs", ":", "s", "+=", "c", ".", "get_stress", "(", "a", ")", "return", "s" ]
Calculate stress tensor.
[ "Calculate", "stress", "tensor", "." ]
5ed79d776c92b91a566be22615bfb304ecc75db7
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/join_calculators.py#L66-L71
13,383
Atomistica/atomistica
src/python/atomistica/join_calculators.py
JoinCalculators.set_atoms
def set_atoms(self, a): """Assign an atoms object.""" for c in self.calcs: if hasattr(c, "set_atoms"): c.set_atoms(a)
python
def set_atoms(self, a): """Assign an atoms object.""" for c in self.calcs: if hasattr(c, "set_atoms"): c.set_atoms(a)
[ "def", "set_atoms", "(", "self", ",", "a", ")", ":", "for", "c", "in", "self", ".", "calcs", ":", "if", "hasattr", "(", "c", ",", "\"set_atoms\"", ")", ":", "c", ".", "set_atoms", "(", "a", ")" ]
Assign an atoms object.
[ "Assign", "an", "atoms", "object", "." ]
5ed79d776c92b91a566be22615bfb304ecc75db7
https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/join_calculators.py#L79-L83
13,384
thieman/py-dag
dag/__init__.py
DAG.rename_edges
def rename_edges(self, old_task_name, new_task_name, graph=None): """ Change references to a task in existing edges. """ if not graph: graph = self.graph for node, edges in graph.items(): if node == old_task_name: graph[new_task_name] = copy(edges) del graph[old_task_name] else: if old_task_name in edges: edges.remove(old_task_name) edges.add(new_task_name)
python
def rename_edges(self, old_task_name, new_task_name, graph=None): """ Change references to a task in existing edges. """ if not graph: graph = self.graph for node, edges in graph.items(): if node == old_task_name: graph[new_task_name] = copy(edges) del graph[old_task_name] else: if old_task_name in edges: edges.remove(old_task_name) edges.add(new_task_name)
[ "def", "rename_edges", "(", "self", ",", "old_task_name", ",", "new_task_name", ",", "graph", "=", "None", ")", ":", "if", "not", "graph", ":", "graph", "=", "self", ".", "graph", "for", "node", ",", "edges", "in", "graph", ".", "items", "(", ")", ":", "if", "node", "==", "old_task_name", ":", "graph", "[", "new_task_name", "]", "=", "copy", "(", "edges", ")", "del", "graph", "[", "old_task_name", "]", "else", ":", "if", "old_task_name", "in", "edges", ":", "edges", ".", "remove", "(", "old_task_name", ")", "edges", ".", "add", "(", "new_task_name", ")" ]
Change references to a task in existing edges.
[ "Change", "references", "to", "a", "task", "in", "existing", "edges", "." ]
5b5eed396c930751576bdf0d45907a665aac000b
https://github.com/thieman/py-dag/blob/5b5eed396c930751576bdf0d45907a665aac000b/dag/__init__.py#L77-L90
13,385
thieman/py-dag
dag/__init__.py
DAG.predecessors
def predecessors(self, node, graph=None): """ Returns a list of all predecessors of the given node """ if graph is None: graph = self.graph return [key for key in graph if node in graph[key]]
python
def predecessors(self, node, graph=None): """ Returns a list of all predecessors of the given node """ if graph is None: graph = self.graph return [key for key in graph if node in graph[key]]
[ "def", "predecessors", "(", "self", ",", "node", ",", "graph", "=", "None", ")", ":", "if", "graph", "is", "None", ":", "graph", "=", "self", ".", "graph", "return", "[", "key", "for", "key", "in", "graph", "if", "node", "in", "graph", "[", "key", "]", "]" ]
Returns a list of all predecessors of the given node
[ "Returns", "a", "list", "of", "all", "predecessors", "of", "the", "given", "node" ]
5b5eed396c930751576bdf0d45907a665aac000b
https://github.com/thieman/py-dag/blob/5b5eed396c930751576bdf0d45907a665aac000b/dag/__init__.py#L92-L96
13,386
buriburisuri/sugartensor
sugartensor/sg_initializer.py
constant
def constant(name, shape, value=0, dtype=tf.sg_floatx, summary=True, regularizer=None, trainable=True): r"""Creates a tensor variable of which initial values are `value` and shape is `shape`. Args: name: The name of new variable. shape: A tuple/list of integers or an integer. If shape is an integer, it is converted to a list. value: A Python scalar. All elements of the initialized variable will be set to this value. Default is 0. dtype: The data type. Only floating point types are supported. Default is float32. summary: If True, add this constant to tensor board summary. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization trainable: If True, add this constant to trainable collection. Default is True. Returns: A `Variable`. """ shape = shape if isinstance(shape, (tuple, list)) else [shape] x = tf.get_variable(name, shape, dtype=dtype, initializer=tf.constant_initializer(value), regularizer=regularizer, trainable=trainable) # add summary if summary: tf.sg_summary_param(x) return x
python
def constant(name, shape, value=0, dtype=tf.sg_floatx, summary=True, regularizer=None, trainable=True): r"""Creates a tensor variable of which initial values are `value` and shape is `shape`. Args: name: The name of new variable. shape: A tuple/list of integers or an integer. If shape is an integer, it is converted to a list. value: A Python scalar. All elements of the initialized variable will be set to this value. Default is 0. dtype: The data type. Only floating point types are supported. Default is float32. summary: If True, add this constant to tensor board summary. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization trainable: If True, add this constant to trainable collection. Default is True. Returns: A `Variable`. """ shape = shape if isinstance(shape, (tuple, list)) else [shape] x = tf.get_variable(name, shape, dtype=dtype, initializer=tf.constant_initializer(value), regularizer=regularizer, trainable=trainable) # add summary if summary: tf.sg_summary_param(x) return x
[ "def", "constant", "(", "name", ",", "shape", ",", "value", "=", "0", ",", "dtype", "=", "tf", ".", "sg_floatx", ",", "summary", "=", "True", ",", "regularizer", "=", "None", ",", "trainable", "=", "True", ")", ":", "shape", "=", "shape", "if", "isinstance", "(", "shape", ",", "(", "tuple", ",", "list", ")", ")", "else", "[", "shape", "]", "x", "=", "tf", ".", "get_variable", "(", "name", ",", "shape", ",", "dtype", "=", "dtype", ",", "initializer", "=", "tf", ".", "constant_initializer", "(", "value", ")", ",", "regularizer", "=", "regularizer", ",", "trainable", "=", "trainable", ")", "# add summary", "if", "summary", ":", "tf", ".", "sg_summary_param", "(", "x", ")", "return", "x" ]
r"""Creates a tensor variable of which initial values are `value` and shape is `shape`. Args: name: The name of new variable. shape: A tuple/list of integers or an integer. If shape is an integer, it is converted to a list. value: A Python scalar. All elements of the initialized variable will be set to this value. Default is 0. dtype: The data type. Only floating point types are supported. Default is float32. summary: If True, add this constant to tensor board summary. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization trainable: If True, add this constant to trainable collection. Default is True. Returns: A `Variable`.
[ "r", "Creates", "a", "tensor", "variable", "of", "which", "initial", "values", "are", "value", "and", "shape", "is", "shape", "." ]
d2c039954777c7fbe3eb0c2ae40c45c9854deb40
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_initializer.py#L10-L36
13,387
buriburisuri/sugartensor
sugartensor/sg_queue.py
sg_producer_func
def sg_producer_func(func): r"""Decorates a function `func` as sg_producer_func. Args: func: A function to decorate. """ @wraps(func) def wrapper(**kwargs): r"""Manages arguments of `tf.sg_opt`. Args: **kwargs: source: A source queue list to enqueue dtypes: Input data types of each tensor out_dtypes: Output data types of each tensor ( If None, same as dtypes ) capacity: Queue capacity. Default is 32. num_threads: Number of threads. Default is 1. """ # default option opt = tf.sg_opt(kwargs) + tf.sg_opt(dtypes=[tf.sg_floatx], capacity=32, num_threads=1) # source queue list check assert opt.source is not None, 'source is mandatory.' if type(opt.source) is not list and type(opt.source) is not tuple: opt.source = [opt.source] if type(opt.dtypes) is not list and type(opt.dtypes) is not tuple: opt.dtypes = [opt.dtypes] # default out_dtypes if opt.out_dtypes is None: opt.out_dtypes = opt.dtypes if type(opt.out_dtypes) is not list and type(opt.out_dtypes) is not tuple: opt.out_dtypes = [opt.out_dtypes] assert len(opt.source) == len(opt.dtypes), 'Source and dtypes should have same length.' # enqueue function def enqueue_func(sess, op): # read data from source queue data = func(sess.run(opt.source)) # create feeder dict feed_dict = {} for ph, col in zip(placeholders, data): feed_dict[ph] = col # run session sess.run(op, feed_dict=feed_dict) # create place holder list placeholders = [] for dtype in opt.dtypes: placeholders.append(tf.placeholder(dtype=dtype)) # create FIFO queue queue = tf.FIFOQueue(opt.capacity, dtypes=opt.out_dtypes) # enqueue operation enqueue_op = queue.enqueue(placeholders) # create queue runner runner = _FuncQueueRunner(enqueue_func, queue, [enqueue_op] * opt.num_threads) # register to global collection tf.train.add_queue_runner(runner) # return de-queue operation return queue.dequeue() return wrapper
python
def sg_producer_func(func): r"""Decorates a function `func` as sg_producer_func. Args: func: A function to decorate. """ @wraps(func) def wrapper(**kwargs): r"""Manages arguments of `tf.sg_opt`. Args: **kwargs: source: A source queue list to enqueue dtypes: Input data types of each tensor out_dtypes: Output data types of each tensor ( If None, same as dtypes ) capacity: Queue capacity. Default is 32. num_threads: Number of threads. Default is 1. """ # default option opt = tf.sg_opt(kwargs) + tf.sg_opt(dtypes=[tf.sg_floatx], capacity=32, num_threads=1) # source queue list check assert opt.source is not None, 'source is mandatory.' if type(opt.source) is not list and type(opt.source) is not tuple: opt.source = [opt.source] if type(opt.dtypes) is not list and type(opt.dtypes) is not tuple: opt.dtypes = [opt.dtypes] # default out_dtypes if opt.out_dtypes is None: opt.out_dtypes = opt.dtypes if type(opt.out_dtypes) is not list and type(opt.out_dtypes) is not tuple: opt.out_dtypes = [opt.out_dtypes] assert len(opt.source) == len(opt.dtypes), 'Source and dtypes should have same length.' # enqueue function def enqueue_func(sess, op): # read data from source queue data = func(sess.run(opt.source)) # create feeder dict feed_dict = {} for ph, col in zip(placeholders, data): feed_dict[ph] = col # run session sess.run(op, feed_dict=feed_dict) # create place holder list placeholders = [] for dtype in opt.dtypes: placeholders.append(tf.placeholder(dtype=dtype)) # create FIFO queue queue = tf.FIFOQueue(opt.capacity, dtypes=opt.out_dtypes) # enqueue operation enqueue_op = queue.enqueue(placeholders) # create queue runner runner = _FuncQueueRunner(enqueue_func, queue, [enqueue_op] * opt.num_threads) # register to global collection tf.train.add_queue_runner(runner) # return de-queue operation return queue.dequeue() return wrapper
[ "def", "sg_producer_func", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "*", "kwargs", ")", ":", "r\"\"\"Manages arguments of `tf.sg_opt`.\n\n Args:\n **kwargs:\n source: A source queue list to enqueue\n dtypes: Input data types of each tensor\n out_dtypes: Output data types of each tensor ( If None, same as dtypes )\n capacity: Queue capacity. Default is 32.\n num_threads: Number of threads. Default is 1.\n \"\"\"", "# default option", "opt", "=", "tf", ".", "sg_opt", "(", "kwargs", ")", "+", "tf", ".", "sg_opt", "(", "dtypes", "=", "[", "tf", ".", "sg_floatx", "]", ",", "capacity", "=", "32", ",", "num_threads", "=", "1", ")", "# source queue list check", "assert", "opt", ".", "source", "is", "not", "None", ",", "'source is mandatory.'", "if", "type", "(", "opt", ".", "source", ")", "is", "not", "list", "and", "type", "(", "opt", ".", "source", ")", "is", "not", "tuple", ":", "opt", ".", "source", "=", "[", "opt", ".", "source", "]", "if", "type", "(", "opt", ".", "dtypes", ")", "is", "not", "list", "and", "type", "(", "opt", ".", "dtypes", ")", "is", "not", "tuple", ":", "opt", ".", "dtypes", "=", "[", "opt", ".", "dtypes", "]", "# default out_dtypes", "if", "opt", ".", "out_dtypes", "is", "None", ":", "opt", ".", "out_dtypes", "=", "opt", ".", "dtypes", "if", "type", "(", "opt", ".", "out_dtypes", ")", "is", "not", "list", "and", "type", "(", "opt", ".", "out_dtypes", ")", "is", "not", "tuple", ":", "opt", ".", "out_dtypes", "=", "[", "opt", ".", "out_dtypes", "]", "assert", "len", "(", "opt", ".", "source", ")", "==", "len", "(", "opt", ".", "dtypes", ")", ",", "'Source and dtypes should have same length.'", "# enqueue function", "def", "enqueue_func", "(", "sess", ",", "op", ")", ":", "# read data from source queue", "data", "=", "func", "(", "sess", ".", "run", "(", "opt", ".", "source", ")", ")", "# create feeder dict", "feed_dict", "=", "{", "}", "for", "ph", ",", "col", "in", "zip", "(", "placeholders", ",", "data", ")", ":", "feed_dict", "[", "ph", "]", "=", "col", "# run session", "sess", ".", "run", "(", "op", ",", "feed_dict", "=", "feed_dict", ")", "# create place holder list", "placeholders", "=", "[", "]", "for", "dtype", "in", "opt", ".", "dtypes", ":", "placeholders", ".", "append", "(", "tf", ".", "placeholder", "(", "dtype", "=", "dtype", ")", ")", "# create FIFO queue", "queue", "=", "tf", ".", "FIFOQueue", "(", "opt", ".", "capacity", ",", "dtypes", "=", "opt", ".", "out_dtypes", ")", "# enqueue operation", "enqueue_op", "=", "queue", ".", "enqueue", "(", "placeholders", ")", "# create queue runner", "runner", "=", "_FuncQueueRunner", "(", "enqueue_func", ",", "queue", ",", "[", "enqueue_op", "]", "*", "opt", ".", "num_threads", ")", "# register to global collection", "tf", ".", "train", ".", "add_queue_runner", "(", "runner", ")", "# return de-queue operation", "return", "queue", ".", "dequeue", "(", ")", "return", "wrapper" ]
r"""Decorates a function `func` as sg_producer_func. Args: func: A function to decorate.
[ "r", "Decorates", "a", "function", "func", "as", "sg_producer_func", "." ]
d2c039954777c7fbe3eb0c2ae40c45c9854deb40
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_queue.py#L11-L77
13,388
buriburisuri/sugartensor
sugartensor/sg_transform.py
sg_transpose
def sg_transpose(tensor, opt): r"""Permutes the dimensions according to `opt.perm`. See `tf.transpose()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: perm: A permutation of the dimensions of `tensor`. The target shape. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ assert opt.perm is not None, 'perm is mandatory' return tf.transpose(tensor, opt.perm, name=opt.name)
python
def sg_transpose(tensor, opt): r"""Permutes the dimensions according to `opt.perm`. See `tf.transpose()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: perm: A permutation of the dimensions of `tensor`. The target shape. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ assert opt.perm is not None, 'perm is mandatory' return tf.transpose(tensor, opt.perm, name=opt.name)
[ "def", "sg_transpose", "(", "tensor", ",", "opt", ")", ":", "assert", "opt", ".", "perm", "is", "not", "None", ",", "'perm is mandatory'", "return", "tf", ".", "transpose", "(", "tensor", ",", "opt", ".", "perm", ",", "name", "=", "opt", ".", "name", ")" ]
r"""Permutes the dimensions according to `opt.perm`. See `tf.transpose()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: perm: A permutation of the dimensions of `tensor`. The target shape. name: If provided, replace current tensor's name. Returns: A `Tensor`.
[ "r", "Permutes", "the", "dimensions", "according", "to", "opt", ".", "perm", "." ]
d2c039954777c7fbe3eb0c2ae40c45c9854deb40
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L161-L176
13,389
buriburisuri/sugartensor
sugartensor/sg_transform.py
sg_argmin
def sg_argmin(tensor, opt): r"""Returns the indices of the minimum values along the specified axis. See `tf.argin()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis: Target axis. Default is the last one. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ opt += tf.sg_opt(axis=tensor.get_shape().ndims - 1) return tf.argmin(tensor, opt.axis, opt.name)
python
def sg_argmin(tensor, opt): r"""Returns the indices of the minimum values along the specified axis. See `tf.argin()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis: Target axis. Default is the last one. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ opt += tf.sg_opt(axis=tensor.get_shape().ndims - 1) return tf.argmin(tensor, opt.axis, opt.name)
[ "def", "sg_argmin", "(", "tensor", ",", "opt", ")", ":", "opt", "+=", "tf", ".", "sg_opt", "(", "axis", "=", "tensor", ".", "get_shape", "(", ")", ".", "ndims", "-", "1", ")", "return", "tf", ".", "argmin", "(", "tensor", ",", "opt", ".", "axis", ",", "opt", ".", "name", ")" ]
r"""Returns the indices of the minimum values along the specified axis. See `tf.argin()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis: Target axis. Default is the last one. name: If provided, replace current tensor's name. Returns: A `Tensor`.
[ "r", "Returns", "the", "indices", "of", "the", "minimum", "values", "along", "the", "specified", "axis", "." ]
d2c039954777c7fbe3eb0c2ae40c45c9854deb40
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L199-L214
13,390
buriburisuri/sugartensor
sugartensor/sg_transform.py
sg_concat
def sg_concat(tensor, opt): r"""Concatenates tensors along a axis. See `tf.concat()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: target: A `Tensor`. Must have the same rank as `tensor`, and all dimensions except `opt.dim` must be equal. axis : Target axis. Default is the last one. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ assert opt.target is not None, 'target is mandatory.' opt += tf.sg_opt(axis=tensor.get_shape().ndims-1) target = opt.target if isinstance(opt.target, (tuple, list)) else [opt.target] return tf.concat([tensor] + target, opt.axis, name=opt.name)
python
def sg_concat(tensor, opt): r"""Concatenates tensors along a axis. See `tf.concat()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: target: A `Tensor`. Must have the same rank as `tensor`, and all dimensions except `opt.dim` must be equal. axis : Target axis. Default is the last one. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ assert opt.target is not None, 'target is mandatory.' opt += tf.sg_opt(axis=tensor.get_shape().ndims-1) target = opt.target if isinstance(opt.target, (tuple, list)) else [opt.target] return tf.concat([tensor] + target, opt.axis, name=opt.name)
[ "def", "sg_concat", "(", "tensor", ",", "opt", ")", ":", "assert", "opt", ".", "target", "is", "not", "None", ",", "'target is mandatory.'", "opt", "+=", "tf", ".", "sg_opt", "(", "axis", "=", "tensor", ".", "get_shape", "(", ")", ".", "ndims", "-", "1", ")", "target", "=", "opt", ".", "target", "if", "isinstance", "(", "opt", ".", "target", ",", "(", "tuple", ",", "list", ")", ")", "else", "[", "opt", ".", "target", "]", "return", "tf", ".", "concat", "(", "[", "tensor", "]", "+", "target", ",", "opt", ".", "axis", ",", "name", "=", "opt", ".", "name", ")" ]
r"""Concatenates tensors along a axis. See `tf.concat()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: target: A `Tensor`. Must have the same rank as `tensor`, and all dimensions except `opt.dim` must be equal. axis : Target axis. Default is the last one. name: If provided, replace current tensor's name. Returns: A `Tensor`.
[ "r", "Concatenates", "tensors", "along", "a", "axis", "." ]
d2c039954777c7fbe3eb0c2ae40c45c9854deb40
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L218-L237
13,391
buriburisuri/sugartensor
sugartensor/sg_transform.py
sg_log
def sg_log(tensor, opt): r"""Log transform a dense tensor See `tf.log()` in tensorflow. Args: tensor: A `Tensor` ( automatically given by chain ) opt: name: If provided, replace current tensor's name. Returns: A `Tensor`. """ return tf.log(tensor + tf.sg_eps, name=opt.name)
python
def sg_log(tensor, opt): r"""Log transform a dense tensor See `tf.log()` in tensorflow. Args: tensor: A `Tensor` ( automatically given by chain ) opt: name: If provided, replace current tensor's name. Returns: A `Tensor`. """ return tf.log(tensor + tf.sg_eps, name=opt.name)
[ "def", "sg_log", "(", "tensor", ",", "opt", ")", ":", "return", "tf", ".", "log", "(", "tensor", "+", "tf", ".", "sg_eps", ",", "name", "=", "opt", ".", "name", ")" ]
r"""Log transform a dense tensor See `tf.log()` in tensorflow. Args: tensor: A `Tensor` ( automatically given by chain ) opt: name: If provided, replace current tensor's name. Returns: A `Tensor`.
[ "r", "Log", "transform", "a", "dense", "tensor" ]
d2c039954777c7fbe3eb0c2ae40c45c9854deb40
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L281-L294
13,392
buriburisuri/sugartensor
sugartensor/sg_transform.py
sg_prod
def sg_prod(tensor, opt): r"""Computes the product of elements across axis of a tensor. See `tf.reduce_prod()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ return tf.reduce_prod(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
python
def sg_prod(tensor, opt): r"""Computes the product of elements across axis of a tensor. See `tf.reduce_prod()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ return tf.reduce_prod(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
[ "def", "sg_prod", "(", "tensor", ",", "opt", ")", ":", "return", "tf", ".", "reduce_prod", "(", "tensor", ",", "axis", "=", "opt", ".", "axis", ",", "keep_dims", "=", "opt", ".", "keep_dims", ",", "name", "=", "opt", ".", "name", ")" ]
r"""Computes the product of elements across axis of a tensor. See `tf.reduce_prod()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`.
[ "r", "Computes", "the", "product", "of", "elements", "across", "axis", "of", "a", "tensor", "." ]
d2c039954777c7fbe3eb0c2ae40c45c9854deb40
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L357-L372
13,393
buriburisuri/sugartensor
sugartensor/sg_transform.py
sg_min
def sg_min(tensor, opt): r"""Computes the minimum of elements across axis of a tensor. See `tf.reduce_min()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ return tf.reduce_min(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
python
def sg_min(tensor, opt): r"""Computes the minimum of elements across axis of a tensor. See `tf.reduce_min()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ return tf.reduce_min(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
[ "def", "sg_min", "(", "tensor", ",", "opt", ")", ":", "return", "tf", ".", "reduce_min", "(", "tensor", ",", "axis", "=", "opt", ".", "axis", ",", "keep_dims", "=", "opt", ".", "keep_dims", ",", "name", "=", "opt", ".", "name", ")" ]
r"""Computes the minimum of elements across axis of a tensor. See `tf.reduce_min()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`.
[ "r", "Computes", "the", "minimum", "of", "elements", "across", "axis", "of", "a", "tensor", "." ]
d2c039954777c7fbe3eb0c2ae40c45c9854deb40
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L376-L391
13,394
buriburisuri/sugartensor
sugartensor/sg_transform.py
sg_max
def sg_max(tensor, opt): r"""Computes the maximum of elements across axis of a tensor. See `tf.reduce_max()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ return tf.reduce_max(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
python
def sg_max(tensor, opt): r"""Computes the maximum of elements across axis of a tensor. See `tf.reduce_max()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ return tf.reduce_max(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
[ "def", "sg_max", "(", "tensor", ",", "opt", ")", ":", "return", "tf", ".", "reduce_max", "(", "tensor", ",", "axis", "=", "opt", ".", "axis", ",", "keep_dims", "=", "opt", ".", "keep_dims", ",", "name", "=", "opt", ".", "name", ")" ]
r"""Computes the maximum of elements across axis of a tensor. See `tf.reduce_max()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`.
[ "r", "Computes", "the", "maximum", "of", "elements", "across", "axis", "of", "a", "tensor", "." ]
d2c039954777c7fbe3eb0c2ae40c45c9854deb40
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L395-L410
13,395
buriburisuri/sugartensor
sugartensor/sg_transform.py
sg_any
def sg_any(tensor, opt): r"""Computes the "logical or" of elements across axis of a tensor. See `tf.reduce_any()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ return tf.reduce_any(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
python
def sg_any(tensor, opt): r"""Computes the "logical or" of elements across axis of a tensor. See `tf.reduce_any()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ return tf.reduce_any(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
[ "def", "sg_any", "(", "tensor", ",", "opt", ")", ":", "return", "tf", ".", "reduce_any", "(", "tensor", ",", "axis", "=", "opt", ".", "axis", ",", "keep_dims", "=", "opt", ".", "keep_dims", ",", "name", "=", "opt", ".", "name", ")" ]
r"""Computes the "logical or" of elements across axis of a tensor. See `tf.reduce_any()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`.
[ "r", "Computes", "the", "logical", "or", "of", "elements", "across", "axis", "of", "a", "tensor", "." ]
d2c039954777c7fbe3eb0c2ae40c45c9854deb40
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L433-L448
13,396
buriburisuri/sugartensor
sugartensor/sg_transform.py
sg_lookup
def sg_lookup(tensor, opt): r"""Looks up the `tensor`, which is the embedding matrix. Args: tensor: A tensor ( automatically given by chain ) opt: emb: A 2-D `Tensor`. An embedding matrix. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ assert opt.emb is not None, 'emb is mandatory.' return tf.nn.embedding_lookup(opt.emb, tensor, name=opt.name)
python
def sg_lookup(tensor, opt): r"""Looks up the `tensor`, which is the embedding matrix. Args: tensor: A tensor ( automatically given by chain ) opt: emb: A 2-D `Tensor`. An embedding matrix. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ assert opt.emb is not None, 'emb is mandatory.' return tf.nn.embedding_lookup(opt.emb, tensor, name=opt.name)
[ "def", "sg_lookup", "(", "tensor", ",", "opt", ")", ":", "assert", "opt", ".", "emb", "is", "not", "None", ",", "'emb is mandatory.'", "return", "tf", ".", "nn", ".", "embedding_lookup", "(", "opt", ".", "emb", ",", "tensor", ",", "name", "=", "opt", ".", "name", ")" ]
r"""Looks up the `tensor`, which is the embedding matrix. Args: tensor: A tensor ( automatically given by chain ) opt: emb: A 2-D `Tensor`. An embedding matrix. name: If provided, replace current tensor's name. Returns: A `Tensor`.
[ "r", "Looks", "up", "the", "tensor", "which", "is", "the", "embedding", "matrix", "." ]
d2c039954777c7fbe3eb0c2ae40c45c9854deb40
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L533-L547
13,397
buriburisuri/sugartensor
sugartensor/sg_transform.py
sg_reverse_seq
def sg_reverse_seq(tensor, opt): r"""Reverses variable length slices. Before applying the pure tensorflow function tf.reverse_sequence, this function calculates sequence lengths by counting non-zeros. For example, ``` tensor = [[1, 2, 3, 0, 0], [4, 5, 0, 0, 0]] tensor.sg_reverse_seq() => [[3 2 1 0 0] [5 4 0 0 0]] ``` Args: tensor: A 2-D `Tensor` (automatically given by chain). opt: axis: Axis to reverse. Default is 1. name : If provided, it replaces current tensor's name. Returns: A `Tensor` with the same shape and type as `tensor`. """ # default sequence dimension opt += tf.sg_opt(axis=1) seq_len = tf.not_equal(tensor, tf.zeros_like(tensor)).sg_int().sg_sum(axis=opt.axis) return tf.reverse_sequence(tensor, seq_len, opt.axis, name=opt.name)
python
def sg_reverse_seq(tensor, opt): r"""Reverses variable length slices. Before applying the pure tensorflow function tf.reverse_sequence, this function calculates sequence lengths by counting non-zeros. For example, ``` tensor = [[1, 2, 3, 0, 0], [4, 5, 0, 0, 0]] tensor.sg_reverse_seq() => [[3 2 1 0 0] [5 4 0 0 0]] ``` Args: tensor: A 2-D `Tensor` (automatically given by chain). opt: axis: Axis to reverse. Default is 1. name : If provided, it replaces current tensor's name. Returns: A `Tensor` with the same shape and type as `tensor`. """ # default sequence dimension opt += tf.sg_opt(axis=1) seq_len = tf.not_equal(tensor, tf.zeros_like(tensor)).sg_int().sg_sum(axis=opt.axis) return tf.reverse_sequence(tensor, seq_len, opt.axis, name=opt.name)
[ "def", "sg_reverse_seq", "(", "tensor", ",", "opt", ")", ":", "# default sequence dimension", "opt", "+=", "tf", ".", "sg_opt", "(", "axis", "=", "1", ")", "seq_len", "=", "tf", ".", "not_equal", "(", "tensor", ",", "tf", ".", "zeros_like", "(", "tensor", ")", ")", ".", "sg_int", "(", ")", ".", "sg_sum", "(", "axis", "=", "opt", ".", "axis", ")", "return", "tf", ".", "reverse_sequence", "(", "tensor", ",", "seq_len", ",", "opt", ".", "axis", ",", "name", "=", "opt", ".", "name", ")" ]
r"""Reverses variable length slices. Before applying the pure tensorflow function tf.reverse_sequence, this function calculates sequence lengths by counting non-zeros. For example, ``` tensor = [[1, 2, 3, 0, 0], [4, 5, 0, 0, 0]] tensor.sg_reverse_seq() => [[3 2 1 0 0] [5 4 0 0 0]] ``` Args: tensor: A 2-D `Tensor` (automatically given by chain). opt: axis: Axis to reverse. Default is 1. name : If provided, it replaces current tensor's name. Returns: A `Tensor` with the same shape and type as `tensor`.
[ "r", "Reverses", "variable", "length", "slices", "." ]
d2c039954777c7fbe3eb0c2ae40c45c9854deb40
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L551-L578
13,398
buriburisuri/sugartensor
sugartensor/sg_main.py
sg_gpus
def sg_gpus(): r""" Gets current available GPU nums Returns: A integer : total # of GPUs available """ global _gpus if _gpus is None: local_device_protos = device_lib.list_local_devices() _gpus = len([x.name for x in local_device_protos if x.device_type == 'GPU']) return max(_gpus, 1)
python
def sg_gpus(): r""" Gets current available GPU nums Returns: A integer : total # of GPUs available """ global _gpus if _gpus is None: local_device_protos = device_lib.list_local_devices() _gpus = len([x.name for x in local_device_protos if x.device_type == 'GPU']) return max(_gpus, 1)
[ "def", "sg_gpus", "(", ")", ":", "global", "_gpus", "if", "_gpus", "is", "None", ":", "local_device_protos", "=", "device_lib", ".", "list_local_devices", "(", ")", "_gpus", "=", "len", "(", "[", "x", ".", "name", "for", "x", "in", "local_device_protos", "if", "x", ".", "device_type", "==", "'GPU'", "]", ")", "return", "max", "(", "_gpus", ",", "1", ")" ]
r""" Gets current available GPU nums Returns: A integer : total # of GPUs available
[ "r", "Gets", "current", "available", "GPU", "nums" ]
d2c039954777c7fbe3eb0c2ae40c45c9854deb40
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_main.py#L64-L76
13,399
buriburisuri/sugartensor
sugartensor/sg_main.py
sg_context
def sg_context(**kwargs): r"""Context helper for computational graph building. Makes all elements within the with Block share the parameters. For example, in the following example, the default value of parameter `bn` will be set to True in the all layers within the with block. ``` with tf.sg_context(bn=True): ... ... ``` Args: **kwargs: in_dim: An integer. The size of input dimension, which is set to the last one by default. dim: An integer. The size of output dimension. Has the same value as in_dim by default. bn: Boolean. If True, batch normalization is applied. ln: Boolean. If True, layer normalization is applied. dout: A float of range [0, 100). A dropout rate. Default is 0.. bias: Boolean. If True (Default), biases are added. name: A name for the layer. By default, the function name is assigned. act: A name of activation function. e.g., `sigmoid`, `tanh`, etc. reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope as well as all sub-scopes; if `None`, we just inherit the parent scope reuse. Returns: None """ global _context # set options when enter context_now = tf.sg_opt(kwargs) _context += [context_now] # if named context if context_now.name: context_now.scope_name = context_now.name context_now.name = None with tf.variable_scope(context_now.scope_name): yield else: yield # clear options when exit del _context[-1]
python
def sg_context(**kwargs): r"""Context helper for computational graph building. Makes all elements within the with Block share the parameters. For example, in the following example, the default value of parameter `bn` will be set to True in the all layers within the with block. ``` with tf.sg_context(bn=True): ... ... ``` Args: **kwargs: in_dim: An integer. The size of input dimension, which is set to the last one by default. dim: An integer. The size of output dimension. Has the same value as in_dim by default. bn: Boolean. If True, batch normalization is applied. ln: Boolean. If True, layer normalization is applied. dout: A float of range [0, 100). A dropout rate. Default is 0.. bias: Boolean. If True (Default), biases are added. name: A name for the layer. By default, the function name is assigned. act: A name of activation function. e.g., `sigmoid`, `tanh`, etc. reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope as well as all sub-scopes; if `None`, we just inherit the parent scope reuse. Returns: None """ global _context # set options when enter context_now = tf.sg_opt(kwargs) _context += [context_now] # if named context if context_now.name: context_now.scope_name = context_now.name context_now.name = None with tf.variable_scope(context_now.scope_name): yield else: yield # clear options when exit del _context[-1]
[ "def", "sg_context", "(", "*", "*", "kwargs", ")", ":", "global", "_context", "# set options when enter", "context_now", "=", "tf", ".", "sg_opt", "(", "kwargs", ")", "_context", "+=", "[", "context_now", "]", "# if named context", "if", "context_now", ".", "name", ":", "context_now", ".", "scope_name", "=", "context_now", ".", "name", "context_now", ".", "name", "=", "None", "with", "tf", ".", "variable_scope", "(", "context_now", ".", "scope_name", ")", ":", "yield", "else", ":", "yield", "# clear options when exit", "del", "_context", "[", "-", "1", "]" ]
r"""Context helper for computational graph building. Makes all elements within the with Block share the parameters. For example, in the following example, the default value of parameter `bn` will be set to True in the all layers within the with block. ``` with tf.sg_context(bn=True): ... ... ``` Args: **kwargs: in_dim: An integer. The size of input dimension, which is set to the last one by default. dim: An integer. The size of output dimension. Has the same value as in_dim by default. bn: Boolean. If True, batch normalization is applied. ln: Boolean. If True, layer normalization is applied. dout: A float of range [0, 100). A dropout rate. Default is 0.. bias: Boolean. If True (Default), biases are added. name: A name for the layer. By default, the function name is assigned. act: A name of activation function. e.g., `sigmoid`, `tanh`, etc. reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope as well as all sub-scopes; if `None`, we just inherit the parent scope reuse. Returns: None
[ "r", "Context", "helper", "for", "computational", "graph", "building", ".", "Makes", "all", "elements", "within", "the", "with", "Block", "share", "the", "parameters", "." ]
d2c039954777c7fbe3eb0c2ae40c45c9854deb40
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_main.py#L87-L132