id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
243,500
urbn/Caesium
caesium/document.py
AsyncRevisionStackManager.__get_pending_revisions
def __get_pending_revisions(self): """ Get all the pending revisions after the current time :return: A list of revisions :rtype: list """ dttime = time.mktime(datetime.datetime.now().timetuple()) changes = yield self.revisions.find({ "toa" : { "$lt" : dttime, }, "processed": False, "inProcess": None }) if len(changes) > 0: yield self.set_all_revisions_to_in_process([change.get("id") for change in changes]) raise Return(changes)
python
def __get_pending_revisions(self): """ Get all the pending revisions after the current time :return: A list of revisions :rtype: list """ dttime = time.mktime(datetime.datetime.now().timetuple()) changes = yield self.revisions.find({ "toa" : { "$lt" : dttime, }, "processed": False, "inProcess": None }) if len(changes) > 0: yield self.set_all_revisions_to_in_process([change.get("id") for change in changes]) raise Return(changes)
[ "def", "__get_pending_revisions", "(", "self", ")", ":", "dttime", "=", "time", ".", "mktime", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "timetuple", "(", ")", ")", "changes", "=", "yield", "self", ".", "revisions", ".", "find", "(", "{", "\"toa\"", ":", "{", "\"$lt\"", ":", "dttime", ",", "}", ",", "\"processed\"", ":", "False", ",", "\"inProcess\"", ":", "None", "}", ")", "if", "len", "(", "changes", ")", ">", "0", ":", "yield", "self", ".", "set_all_revisions_to_in_process", "(", "[", "change", ".", "get", "(", "\"id\"", ")", "for", "change", "in", "changes", "]", ")", "raise", "Return", "(", "changes", ")" ]
Get all the pending revisions after the current time :return: A list of revisions :rtype: list
[ "Get", "all", "the", "pending", "revisions", "after", "the", "current", "time" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L67-L86
243,501
urbn/Caesium
caesium/document.py
AsyncRevisionStackManager.publish_for_collection
def publish_for_collection(self, collection_name): """ Run the publishing operations for a given collection :param str collection_name: """ self.revisions = BaseAsyncMotorDocument("%s_revisions" % collection_name, self.settings) changes = yield self.__get_pending_revisions() if len(changes) > 0: self.logger.info("%s revisions will be actioned" % len(changes)) for change in changes: try: self.logger.info("Applying %s action %s - %s to document: %s/%s" % (change.get("action"), change.get("id"), change.get("meta",{}).get("comment", "No Comment"), change.get("collection"), change.get("master_id"))) stack = AsyncSchedulableDocumentRevisionStack( change.get("collection"), self.settings, master_id=change.get("master_id") ) revision = yield stack.pop() self.logger.debug(revision) except Exception as ex: self.logger.error(ex)
python
def publish_for_collection(self, collection_name): """ Run the publishing operations for a given collection :param str collection_name: """ self.revisions = BaseAsyncMotorDocument("%s_revisions" % collection_name, self.settings) changes = yield self.__get_pending_revisions() if len(changes) > 0: self.logger.info("%s revisions will be actioned" % len(changes)) for change in changes: try: self.logger.info("Applying %s action %s - %s to document: %s/%s" % (change.get("action"), change.get("id"), change.get("meta",{}).get("comment", "No Comment"), change.get("collection"), change.get("master_id"))) stack = AsyncSchedulableDocumentRevisionStack( change.get("collection"), self.settings, master_id=change.get("master_id") ) revision = yield stack.pop() self.logger.debug(revision) except Exception as ex: self.logger.error(ex)
[ "def", "publish_for_collection", "(", "self", ",", "collection_name", ")", ":", "self", ".", "revisions", "=", "BaseAsyncMotorDocument", "(", "\"%s_revisions\"", "%", "collection_name", ",", "self", ".", "settings", ")", "changes", "=", "yield", "self", ".", "__get_pending_revisions", "(", ")", "if", "len", "(", "changes", ")", ">", "0", ":", "self", ".", "logger", ".", "info", "(", "\"%s revisions will be actioned\"", "%", "len", "(", "changes", ")", ")", "for", "change", "in", "changes", ":", "try", ":", "self", ".", "logger", ".", "info", "(", "\"Applying %s action %s - %s to document: %s/%s\"", "%", "(", "change", ".", "get", "(", "\"action\"", ")", ",", "change", ".", "get", "(", "\"id\"", ")", ",", "change", ".", "get", "(", "\"meta\"", ",", "{", "}", ")", ".", "get", "(", "\"comment\"", ",", "\"No Comment\"", ")", ",", "change", ".", "get", "(", "\"collection\"", ")", ",", "change", ".", "get", "(", "\"master_id\"", ")", ")", ")", "stack", "=", "AsyncSchedulableDocumentRevisionStack", "(", "change", ".", "get", "(", "\"collection\"", ")", ",", "self", ".", "settings", ",", "master_id", "=", "change", ".", "get", "(", "\"master_id\"", ")", ")", "revision", "=", "yield", "stack", ".", "pop", "(", ")", "self", ".", "logger", ".", "debug", "(", "revision", ")", "except", "Exception", "as", "ex", ":", "self", ".", "logger", ".", "error", "(", "ex", ")" ]
Run the publishing operations for a given collection :param str collection_name:
[ "Run", "the", "publishing", "operations", "for", "a", "given", "collection" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L89-L118
243,502
urbn/Caesium
caesium/document.py
AsyncSchedulableDocumentRevisionStack.__update_action
def __update_action(self, revision): """Update a master document and revision history document :param dict revision: The revision dictionary """ patch = revision.get("patch") if patch.get("_id"): del patch["_id"] update_response = yield self.collection.patch(revision.get("master_id"), self.__make_storeable_patch_patchable(patch)) if update_response.get("n") == 0: raise RevisionNotFoundException()
python
def __update_action(self, revision): """Update a master document and revision history document :param dict revision: The revision dictionary """ patch = revision.get("patch") if patch.get("_id"): del patch["_id"] update_response = yield self.collection.patch(revision.get("master_id"), self.__make_storeable_patch_patchable(patch)) if update_response.get("n") == 0: raise RevisionNotFoundException()
[ "def", "__update_action", "(", "self", ",", "revision", ")", ":", "patch", "=", "revision", ".", "get", "(", "\"patch\"", ")", "if", "patch", ".", "get", "(", "\"_id\"", ")", ":", "del", "patch", "[", "\"_id\"", "]", "update_response", "=", "yield", "self", ".", "collection", ".", "patch", "(", "revision", ".", "get", "(", "\"master_id\"", ")", ",", "self", ".", "__make_storeable_patch_patchable", "(", "patch", ")", ")", "if", "update_response", ".", "get", "(", "\"n\"", ")", "==", "0", ":", "raise", "RevisionNotFoundException", "(", ")" ]
Update a master document and revision history document :param dict revision: The revision dictionary
[ "Update", "a", "master", "document", "and", "revision", "history", "document" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L181-L194
243,503
urbn/Caesium
caesium/document.py
AsyncSchedulableDocumentRevisionStack.__insert_action
def __insert_action(self, revision): """ Handle the insert action type. Creates new document to be created in this collection. This allows you to stage a creation of an object :param dict revision: The revision dictionary """ revision["patch"]["_id"] = ObjectId(revision.get("master_id")) insert_response = yield self.collection.insert(revision.get("patch")) if not isinstance(insert_response, str): raise DocumentRevisionInsertFailed()
python
def __insert_action(self, revision): """ Handle the insert action type. Creates new document to be created in this collection. This allows you to stage a creation of an object :param dict revision: The revision dictionary """ revision["patch"]["_id"] = ObjectId(revision.get("master_id")) insert_response = yield self.collection.insert(revision.get("patch")) if not isinstance(insert_response, str): raise DocumentRevisionInsertFailed()
[ "def", "__insert_action", "(", "self", ",", "revision", ")", ":", "revision", "[", "\"patch\"", "]", "[", "\"_id\"", "]", "=", "ObjectId", "(", "revision", ".", "get", "(", "\"master_id\"", ")", ")", "insert_response", "=", "yield", "self", ".", "collection", ".", "insert", "(", "revision", ".", "get", "(", "\"patch\"", ")", ")", "if", "not", "isinstance", "(", "insert_response", ",", "str", ")", ":", "raise", "DocumentRevisionInsertFailed", "(", ")" ]
Handle the insert action type. Creates new document to be created in this collection. This allows you to stage a creation of an object :param dict revision: The revision dictionary
[ "Handle", "the", "insert", "action", "type", "." ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L197-L213
243,504
urbn/Caesium
caesium/document.py
AsyncSchedulableDocumentRevisionStack.__delete_action
def __delete_action(self, revision): """ Handle a delete action to a partiular master id via the revision. :param dict revision: :return: """ delete_response = yield self.collection.delete(revision.get("master_id")) if delete_response.get("n") == 0: raise DocumentRevisionDeleteFailed()
python
def __delete_action(self, revision): """ Handle a delete action to a partiular master id via the revision. :param dict revision: :return: """ delete_response = yield self.collection.delete(revision.get("master_id")) if delete_response.get("n") == 0: raise DocumentRevisionDeleteFailed()
[ "def", "__delete_action", "(", "self", ",", "revision", ")", ":", "delete_response", "=", "yield", "self", ".", "collection", ".", "delete", "(", "revision", ".", "get", "(", "\"master_id\"", ")", ")", "if", "delete_response", ".", "get", "(", "\"n\"", ")", "==", "0", ":", "raise", "DocumentRevisionDeleteFailed", "(", ")" ]
Handle a delete action to a partiular master id via the revision. :param dict revision: :return:
[ "Handle", "a", "delete", "action", "to", "a", "partiular", "master", "id", "via", "the", "revision", "." ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L216-L225
243,505
urbn/Caesium
caesium/document.py
AsyncSchedulableDocumentRevisionStack.pop
def pop(self): """Pop the top revision off the stack back onto the collection at the given id. This method applies the action. Note: This assumes you don't have two revisions scheduled closer than a single scheduling cycle. """ revisions = yield self.list() if len(revisions) > 0: revision = revisions[0] # Update type action if revision.get("action") == self.UPDATE_ACTION: try: yield self.__update_action(revision) except Exception as ex: self.logger.error(ex) # Insert type update if revision.get("action") == self.INSERT_ACTION: try: yield self.__insert_action(revision) except Exception as ex: self.logger.error(ex) #Get the updated object for attachment to the snapshot snapshot_object = yield self.collection.find_one_by_id(revision.get("master_id")) #Handle delete action here if revision.get("action") == self.DELETE_ACTION: try: yield self.__delete_action(revision) except Exception as ex: self.logger.error(ex) snapshot_object = None #Update the revision to be in a post-process state including snapshot revision_update_response = yield self.revisions.patch(revision.get("id"), { "processed" : True, "snapshot" : snapshot_object, "inProcess": False } ) if revision_update_response.get("n") == 0: raise RevisionUpdateFailed(msg="revision document update failed") revision = yield self.revisions.find_one_by_id(revision.get("id")) #TODO: Make this callback method something that can be passed in. This was used in #the original implementation to send back to the client via websocket #revision_success.send('revision_success', type="RevisionSuccess", data=revision) raise Return(revision) raise Return(None)
python
def pop(self): """Pop the top revision off the stack back onto the collection at the given id. This method applies the action. Note: This assumes you don't have two revisions scheduled closer than a single scheduling cycle. """ revisions = yield self.list() if len(revisions) > 0: revision = revisions[0] # Update type action if revision.get("action") == self.UPDATE_ACTION: try: yield self.__update_action(revision) except Exception as ex: self.logger.error(ex) # Insert type update if revision.get("action") == self.INSERT_ACTION: try: yield self.__insert_action(revision) except Exception as ex: self.logger.error(ex) #Get the updated object for attachment to the snapshot snapshot_object = yield self.collection.find_one_by_id(revision.get("master_id")) #Handle delete action here if revision.get("action") == self.DELETE_ACTION: try: yield self.__delete_action(revision) except Exception as ex: self.logger.error(ex) snapshot_object = None #Update the revision to be in a post-process state including snapshot revision_update_response = yield self.revisions.patch(revision.get("id"), { "processed" : True, "snapshot" : snapshot_object, "inProcess": False } ) if revision_update_response.get("n") == 0: raise RevisionUpdateFailed(msg="revision document update failed") revision = yield self.revisions.find_one_by_id(revision.get("id")) #TODO: Make this callback method something that can be passed in. This was used in #the original implementation to send back to the client via websocket #revision_success.send('revision_success', type="RevisionSuccess", data=revision) raise Return(revision) raise Return(None)
[ "def", "pop", "(", "self", ")", ":", "revisions", "=", "yield", "self", ".", "list", "(", ")", "if", "len", "(", "revisions", ")", ">", "0", ":", "revision", "=", "revisions", "[", "0", "]", "# Update type action", "if", "revision", ".", "get", "(", "\"action\"", ")", "==", "self", ".", "UPDATE_ACTION", ":", "try", ":", "yield", "self", ".", "__update_action", "(", "revision", ")", "except", "Exception", "as", "ex", ":", "self", ".", "logger", ".", "error", "(", "ex", ")", "# Insert type update", "if", "revision", ".", "get", "(", "\"action\"", ")", "==", "self", ".", "INSERT_ACTION", ":", "try", ":", "yield", "self", ".", "__insert_action", "(", "revision", ")", "except", "Exception", "as", "ex", ":", "self", ".", "logger", ".", "error", "(", "ex", ")", "#Get the updated object for attachment to the snapshot", "snapshot_object", "=", "yield", "self", ".", "collection", ".", "find_one_by_id", "(", "revision", ".", "get", "(", "\"master_id\"", ")", ")", "#Handle delete action here", "if", "revision", ".", "get", "(", "\"action\"", ")", "==", "self", ".", "DELETE_ACTION", ":", "try", ":", "yield", "self", ".", "__delete_action", "(", "revision", ")", "except", "Exception", "as", "ex", ":", "self", ".", "logger", ".", "error", "(", "ex", ")", "snapshot_object", "=", "None", "#Update the revision to be in a post-process state including snapshot", "revision_update_response", "=", "yield", "self", ".", "revisions", ".", "patch", "(", "revision", ".", "get", "(", "\"id\"", ")", ",", "{", "\"processed\"", ":", "True", ",", "\"snapshot\"", ":", "snapshot_object", ",", "\"inProcess\"", ":", "False", "}", ")", "if", "revision_update_response", ".", "get", "(", "\"n\"", ")", "==", "0", ":", "raise", "RevisionUpdateFailed", "(", "msg", "=", "\"revision document update failed\"", ")", "revision", "=", "yield", "self", ".", "revisions", ".", "find_one_by_id", "(", "revision", ".", "get", "(", "\"id\"", ")", ")", "#TODO: Make this callback method something that can be passed in. This was used in", "#the original implementation to send back to the client via websocket", "#revision_success.send('revision_success', type=\"RevisionSuccess\", data=revision)", "raise", "Return", "(", "revision", ")", "raise", "Return", "(", "None", ")" ]
Pop the top revision off the stack back onto the collection at the given id. This method applies the action. Note: This assumes you don't have two revisions scheduled closer than a single scheduling cycle.
[ "Pop", "the", "top", "revision", "off", "the", "stack", "back", "onto", "the", "collection", "at", "the", "given", "id", ".", "This", "method", "applies", "the", "action", "." ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L228-L285
243,506
urbn/Caesium
caesium/document.py
AsyncSchedulableDocumentRevisionStack.__make_patch_storeable
def __make_patch_storeable(self, patch): """Replace all dots with pipes in key names, mongo doesn't like to store keys with dots. :param dict patch: The patch that needs to be made storeable and applied in the future """ new_patch = {} for key in patch: new_patch[key.replace(".", "|")] = patch[key] return new_patch
python
def __make_patch_storeable(self, patch): """Replace all dots with pipes in key names, mongo doesn't like to store keys with dots. :param dict patch: The patch that needs to be made storeable and applied in the future """ new_patch = {} for key in patch: new_patch[key.replace(".", "|")] = patch[key] return new_patch
[ "def", "__make_patch_storeable", "(", "self", ",", "patch", ")", ":", "new_patch", "=", "{", "}", "for", "key", "in", "patch", ":", "new_patch", "[", "key", ".", "replace", "(", "\".\"", ",", "\"|\"", ")", "]", "=", "patch", "[", "key", "]", "return", "new_patch" ]
Replace all dots with pipes in key names, mongo doesn't like to store keys with dots. :param dict patch: The patch that needs to be made storeable and applied in the future
[ "Replace", "all", "dots", "with", "pipes", "in", "key", "names", "mongo", "doesn", "t", "like", "to", "store", "keys", "with", "dots", "." ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L287-L296
243,507
urbn/Caesium
caesium/document.py
AsyncSchedulableDocumentRevisionStack.push
def push(self, patch=None, toa=None, meta=None): """Push a change on to the revision stack for this ObjectId. Pushing onto the stack is how you get revisions to be staged and scheduled for some future time. :param dict patch: None Denotes Delete :param int toa: Time of action :param dict meta: The meta data for this action """ if not meta: meta = {} if not toa: toa = time.mktime(datetime.datetime.now().timetuple()) if not isinstance(toa, int): toa = int(toa) #Documents should be stored in bson formats if isinstance(patch, dict): patch = self.revisions._dictionary_to_cursor(patch) action = None if isinstance(patch, type(None)): action = self.DELETE_ACTION elif self.master_id and isinstance(patch, dict): action = self.UPDATE_ACTION patch = self.__make_patch_storeable(patch) yield self._lazy_migration(meta=copy.deepcopy(meta), toa=toa-1) elif not self.master_id and isinstance(patch, dict): #Scheduled inserts will not have an object ID and one should be generated action = self.INSERT_ACTION patch["_id"] = ObjectId() self.master_id = patch["_id"].__str__() elif not action: raise RevisionActionNotValid() # We shall never store the _id to a patch dictionary if patch and patch.get("_id"): del patch["_id"] change = { "toa": toa, "processed": False, "collection": self.collection_name, "master_id": self.master_id, "action": action, "patch" : None if action == self.DELETE_ACTION else self.collection._dictionary_to_cursor(patch), "meta": meta } jsonschema.validate(change, self.SCHEMA) id = yield self.revisions.insert(change) raise Return(id)
python
def push(self, patch=None, toa=None, meta=None): """Push a change on to the revision stack for this ObjectId. Pushing onto the stack is how you get revisions to be staged and scheduled for some future time. :param dict patch: None Denotes Delete :param int toa: Time of action :param dict meta: The meta data for this action """ if not meta: meta = {} if not toa: toa = time.mktime(datetime.datetime.now().timetuple()) if not isinstance(toa, int): toa = int(toa) #Documents should be stored in bson formats if isinstance(patch, dict): patch = self.revisions._dictionary_to_cursor(patch) action = None if isinstance(patch, type(None)): action = self.DELETE_ACTION elif self.master_id and isinstance(patch, dict): action = self.UPDATE_ACTION patch = self.__make_patch_storeable(patch) yield self._lazy_migration(meta=copy.deepcopy(meta), toa=toa-1) elif not self.master_id and isinstance(patch, dict): #Scheduled inserts will not have an object ID and one should be generated action = self.INSERT_ACTION patch["_id"] = ObjectId() self.master_id = patch["_id"].__str__() elif not action: raise RevisionActionNotValid() # We shall never store the _id to a patch dictionary if patch and patch.get("_id"): del patch["_id"] change = { "toa": toa, "processed": False, "collection": self.collection_name, "master_id": self.master_id, "action": action, "patch" : None if action == self.DELETE_ACTION else self.collection._dictionary_to_cursor(patch), "meta": meta } jsonschema.validate(change, self.SCHEMA) id = yield self.revisions.insert(change) raise Return(id)
[ "def", "push", "(", "self", ",", "patch", "=", "None", ",", "toa", "=", "None", ",", "meta", "=", "None", ")", ":", "if", "not", "meta", ":", "meta", "=", "{", "}", "if", "not", "toa", ":", "toa", "=", "time", ".", "mktime", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "timetuple", "(", ")", ")", "if", "not", "isinstance", "(", "toa", ",", "int", ")", ":", "toa", "=", "int", "(", "toa", ")", "#Documents should be stored in bson formats", "if", "isinstance", "(", "patch", ",", "dict", ")", ":", "patch", "=", "self", ".", "revisions", ".", "_dictionary_to_cursor", "(", "patch", ")", "action", "=", "None", "if", "isinstance", "(", "patch", ",", "type", "(", "None", ")", ")", ":", "action", "=", "self", ".", "DELETE_ACTION", "elif", "self", ".", "master_id", "and", "isinstance", "(", "patch", ",", "dict", ")", ":", "action", "=", "self", ".", "UPDATE_ACTION", "patch", "=", "self", ".", "__make_patch_storeable", "(", "patch", ")", "yield", "self", ".", "_lazy_migration", "(", "meta", "=", "copy", ".", "deepcopy", "(", "meta", ")", ",", "toa", "=", "toa", "-", "1", ")", "elif", "not", "self", ".", "master_id", "and", "isinstance", "(", "patch", ",", "dict", ")", ":", "#Scheduled inserts will not have an object ID and one should be generated", "action", "=", "self", ".", "INSERT_ACTION", "patch", "[", "\"_id\"", "]", "=", "ObjectId", "(", ")", "self", ".", "master_id", "=", "patch", "[", "\"_id\"", "]", ".", "__str__", "(", ")", "elif", "not", "action", ":", "raise", "RevisionActionNotValid", "(", ")", "# We shall never store the _id to a patch dictionary", "if", "patch", "and", "patch", ".", "get", "(", "\"_id\"", ")", ":", "del", "patch", "[", "\"_id\"", "]", "change", "=", "{", "\"toa\"", ":", "toa", ",", "\"processed\"", ":", "False", ",", "\"collection\"", ":", "self", ".", "collection_name", ",", "\"master_id\"", ":", "self", ".", "master_id", ",", "\"action\"", ":", "action", ",", "\"patch\"", ":", "None", "if", "action", "==", "self", ".", "DELETE_ACTION", "else", "self", ".", "collection", ".", "_dictionary_to_cursor", "(", "patch", ")", ",", "\"meta\"", ":", "meta", "}", "jsonschema", ".", "validate", "(", "change", ",", "self", ".", "SCHEMA", ")", "id", "=", "yield", "self", ".", "revisions", ".", "insert", "(", "change", ")", "raise", "Return", "(", "id", ")" ]
Push a change on to the revision stack for this ObjectId. Pushing onto the stack is how you get revisions to be staged and scheduled for some future time. :param dict patch: None Denotes Delete :param int toa: Time of action :param dict meta: The meta data for this action
[ "Push", "a", "change", "on", "to", "the", "revision", "stack", "for", "this", "ObjectId", ".", "Pushing", "onto", "the", "stack", "is", "how", "you", "get", "revisions", "to", "be", "staged", "and", "scheduled", "for", "some", "future", "time", "." ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L312-L370
243,508
urbn/Caesium
caesium/document.py
AsyncSchedulableDocumentRevisionStack.list
def list(self, toa=None, show_history=False): """Return all revisions for this stack :param int toa: The time of action as a UTC timestamp :param bool show_history: Whether to show historical revisions """ if not toa: toa = time.mktime(datetime.datetime.now().timetuple()) query = { "$query": { "master_id": self.master_id, "processed": show_history, "toa" : {"$lte" : toa} }, "$orderby": { "toa": 1 } } revisions = yield self.revisions.find(query) raise Return(revisions)
python
def list(self, toa=None, show_history=False): """Return all revisions for this stack :param int toa: The time of action as a UTC timestamp :param bool show_history: Whether to show historical revisions """ if not toa: toa = time.mktime(datetime.datetime.now().timetuple()) query = { "$query": { "master_id": self.master_id, "processed": show_history, "toa" : {"$lte" : toa} }, "$orderby": { "toa": 1 } } revisions = yield self.revisions.find(query) raise Return(revisions)
[ "def", "list", "(", "self", ",", "toa", "=", "None", ",", "show_history", "=", "False", ")", ":", "if", "not", "toa", ":", "toa", "=", "time", ".", "mktime", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "timetuple", "(", ")", ")", "query", "=", "{", "\"$query\"", ":", "{", "\"master_id\"", ":", "self", ".", "master_id", ",", "\"processed\"", ":", "show_history", ",", "\"toa\"", ":", "{", "\"$lte\"", ":", "toa", "}", "}", ",", "\"$orderby\"", ":", "{", "\"toa\"", ":", "1", "}", "}", "revisions", "=", "yield", "self", ".", "revisions", ".", "find", "(", "query", ")", "raise", "Return", "(", "revisions", ")" ]
Return all revisions for this stack :param int toa: The time of action as a UTC timestamp :param bool show_history: Whether to show historical revisions
[ "Return", "all", "revisions", "for", "this", "stack" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L373-L395
243,509
urbn/Caesium
caesium/document.py
AsyncSchedulableDocumentRevisionStack._lazy_migration
def _lazy_migration(self, patch=None, meta=None, toa=None): """ Handle when a revision scheduling is turned onto a collection that was previously not scheduleable. This method will create the first revision for each object before its every used in the context of scheduling. :param dict patch: The patch that should be used :param dict meta: Meta data for this action :param int toa: The time of action :return: A legacy revision for a document that was previously :rtype: list """ objects = yield self.revisions.find({"master_id": self.master_id}, limit=1) if len(objects) > 0: raise Return(objects) if not patch: patch = yield self.collection.find_one_by_id(self.master_id) if not toa: toa = long(time.mktime(datetime.datetime.now().timetuple())) meta["comment"] = "This document was migrated automatically." if isinstance(patch, dict) and patch.get("id"): del patch["id"] if isinstance(patch, dict) and patch.get("_id"): del patch["_id"] #Here we separate patch and snapshot, and make sure that the snapshot looks like the master document snapshot = copy.deepcopy(patch) snapshot["id"] = self.master_id snapshot["published"] = self.settings.get("scheduler", {}).get("lazy_migrated_published_by_default", False) #If no objects are returned, this is some legacy object that needs a first revision #Create it here legacy_revision = { "toa": toa, "processed": True, "collection": self.collection_name, "master_id": self.master_id, "action": self.INSERT_ACTION, "patch": self.collection._dictionary_to_cursor(patch), "snapshot": snapshot, "meta": meta, } response = yield self.revisions.insert(legacy_revision) if isinstance(response, str): raise Return([legacy_revision]) raise Return(None)
python
def _lazy_migration(self, patch=None, meta=None, toa=None): """ Handle when a revision scheduling is turned onto a collection that was previously not scheduleable. This method will create the first revision for each object before its every used in the context of scheduling. :param dict patch: The patch that should be used :param dict meta: Meta data for this action :param int toa: The time of action :return: A legacy revision for a document that was previously :rtype: list """ objects = yield self.revisions.find({"master_id": self.master_id}, limit=1) if len(objects) > 0: raise Return(objects) if not patch: patch = yield self.collection.find_one_by_id(self.master_id) if not toa: toa = long(time.mktime(datetime.datetime.now().timetuple())) meta["comment"] = "This document was migrated automatically." if isinstance(patch, dict) and patch.get("id"): del patch["id"] if isinstance(patch, dict) and patch.get("_id"): del patch["_id"] #Here we separate patch and snapshot, and make sure that the snapshot looks like the master document snapshot = copy.deepcopy(patch) snapshot["id"] = self.master_id snapshot["published"] = self.settings.get("scheduler", {}).get("lazy_migrated_published_by_default", False) #If no objects are returned, this is some legacy object that needs a first revision #Create it here legacy_revision = { "toa": toa, "processed": True, "collection": self.collection_name, "master_id": self.master_id, "action": self.INSERT_ACTION, "patch": self.collection._dictionary_to_cursor(patch), "snapshot": snapshot, "meta": meta, } response = yield self.revisions.insert(legacy_revision) if isinstance(response, str): raise Return([legacy_revision]) raise Return(None)
[ "def", "_lazy_migration", "(", "self", ",", "patch", "=", "None", ",", "meta", "=", "None", ",", "toa", "=", "None", ")", ":", "objects", "=", "yield", "self", ".", "revisions", ".", "find", "(", "{", "\"master_id\"", ":", "self", ".", "master_id", "}", ",", "limit", "=", "1", ")", "if", "len", "(", "objects", ")", ">", "0", ":", "raise", "Return", "(", "objects", ")", "if", "not", "patch", ":", "patch", "=", "yield", "self", ".", "collection", ".", "find_one_by_id", "(", "self", ".", "master_id", ")", "if", "not", "toa", ":", "toa", "=", "long", "(", "time", ".", "mktime", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "timetuple", "(", ")", ")", ")", "meta", "[", "\"comment\"", "]", "=", "\"This document was migrated automatically.\"", "if", "isinstance", "(", "patch", ",", "dict", ")", "and", "patch", ".", "get", "(", "\"id\"", ")", ":", "del", "patch", "[", "\"id\"", "]", "if", "isinstance", "(", "patch", ",", "dict", ")", "and", "patch", ".", "get", "(", "\"_id\"", ")", ":", "del", "patch", "[", "\"_id\"", "]", "#Here we separate patch and snapshot, and make sure that the snapshot looks like the master document", "snapshot", "=", "copy", ".", "deepcopy", "(", "patch", ")", "snapshot", "[", "\"id\"", "]", "=", "self", ".", "master_id", "snapshot", "[", "\"published\"", "]", "=", "self", ".", "settings", ".", "get", "(", "\"scheduler\"", ",", "{", "}", ")", ".", "get", "(", "\"lazy_migrated_published_by_default\"", ",", "False", ")", "#If no objects are returned, this is some legacy object that needs a first revision", "#Create it here", "legacy_revision", "=", "{", "\"toa\"", ":", "toa", ",", "\"processed\"", ":", "True", ",", "\"collection\"", ":", "self", ".", "collection_name", ",", "\"master_id\"", ":", "self", ".", "master_id", ",", "\"action\"", ":", "self", ".", "INSERT_ACTION", ",", "\"patch\"", ":", "self", ".", "collection", ".", "_dictionary_to_cursor", "(", "patch", ")", ",", "\"snapshot\"", ":", "snapshot", ",", "\"meta\"", ":", "meta", ",", "}", "response", "=", "yield", "self", ".", "revisions", ".", "insert", "(", "legacy_revision", ")", "if", "isinstance", "(", "response", ",", "str", ")", ":", "raise", "Return", "(", "[", "legacy_revision", "]", ")", "raise", "Return", "(", "None", ")" ]
Handle when a revision scheduling is turned onto a collection that was previously not scheduleable. This method will create the first revision for each object before its every used in the context of scheduling. :param dict patch: The patch that should be used :param dict meta: Meta data for this action :param int toa: The time of action :return: A legacy revision for a document that was previously :rtype: list
[ "Handle", "when", "a", "revision", "scheduling", "is", "turned", "onto", "a", "collection", "that", "was", "previously", "not", "scheduleable", ".", "This", "method", "will", "create", "the", "first", "revision", "for", "each", "object", "before", "its", "every", "used", "in", "the", "context", "of", "scheduling", "." ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L399-L451
243,510
urbn/Caesium
caesium/document.py
AsyncSchedulableDocumentRevisionStack.__create_preview_object_base
def __create_preview_object_base(self, dct): """ The starting point for a preview of a future object. This is the object which will have future revisions iterated and applied to. :param dict dct: The starting object dictionary :return: The preview object id :rtype: str """ if dct.get("_id"): del dct["_id"] preview_object_id = yield self.previews.insert(dct) raise Return(preview_object_id)
python
def __create_preview_object_base(self, dct): """ The starting point for a preview of a future object. This is the object which will have future revisions iterated and applied to. :param dict dct: The starting object dictionary :return: The preview object id :rtype: str """ if dct.get("_id"): del dct["_id"] preview_object_id = yield self.previews.insert(dct) raise Return(preview_object_id)
[ "def", "__create_preview_object_base", "(", "self", ",", "dct", ")", ":", "if", "dct", ".", "get", "(", "\"_id\"", ")", ":", "del", "dct", "[", "\"_id\"", "]", "preview_object_id", "=", "yield", "self", ".", "previews", ".", "insert", "(", "dct", ")", "raise", "Return", "(", "preview_object_id", ")" ]
The starting point for a preview of a future object. This is the object which will have future revisions iterated and applied to. :param dict dct: The starting object dictionary :return: The preview object id :rtype: str
[ "The", "starting", "point", "for", "a", "preview", "of", "a", "future", "object", ".", "This", "is", "the", "object", "which", "will", "have", "future", "revisions", "iterated", "and", "applied", "to", "." ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L454-L469
243,511
urbn/Caesium
caesium/document.py
AsyncSchedulableDocumentRevisionStack.preview
def preview(self, revision_id): """Get an ephemeral preview of a revision with all revisions applied between it and the current state :param str revision_id: The ID of the revision state you want to preview the master id at. :return: A snapshot of a future state of the object :rtype: dict """ target_revision = yield self.revisions.find_one_by_id(revision_id) if isinstance(target_revision.get("snapshot"), dict): raise Return(target_revision) preview_object = None if not isinstance(target_revision, dict): raise RevisionNotFound() revision_collection_client = BaseAsyncMotorDocument(target_revision.get("collection"), self.settings) self.master_id = target_revision.get("master_id") action = target_revision.get("action") if action == self.DELETE_ACTION: raise Return(preview_object) if action in [self.INSERT_ACTION, self.UPDATE_ACTION]: revisions = yield self.list(toa=target_revision.get("toa")) if len(revisions) == 0: raise NoRevisionsAvailable() first_revision = revisions[0] current_document = None if first_revision.get("action") == self.UPDATE_ACTION: current_document = yield revision_collection_client.find_one_by_id(target_revision.get("master_id")) elif first_revision.get("action") == self.INSERT_ACTION: # If we are doing an insert, the first revision patch is the current state current_document = first_revision.get("patch") if not current_document: raise RevisionNotFound() preview_id = yield self.__create_preview_object_base(current_document) for revision in revisions: patch = revision.get("patch") if patch.get("_id"): del patch["_id"] yield self.previews.patch(preview_id, self.__make_storeable_patch_patchable(patch)) preview_object = yield self.previews.find_one_by_id(preview_id) preview_object["id"] = target_revision["id"] target_revision["snapshot"] = self.collection._obj_cursor_to_dictionary(preview_object) target_revision["snapshot"]["id"] = target_revision["master_id"] # Delete the last preview yield self.previews.delete(preview_id) raise Return(target_revision)
python
def preview(self, revision_id): """Get an ephemeral preview of a revision with all revisions applied between it and the current state :param str revision_id: The ID of the revision state you want to preview the master id at. :return: A snapshot of a future state of the object :rtype: dict """ target_revision = yield self.revisions.find_one_by_id(revision_id) if isinstance(target_revision.get("snapshot"), dict): raise Return(target_revision) preview_object = None if not isinstance(target_revision, dict): raise RevisionNotFound() revision_collection_client = BaseAsyncMotorDocument(target_revision.get("collection"), self.settings) self.master_id = target_revision.get("master_id") action = target_revision.get("action") if action == self.DELETE_ACTION: raise Return(preview_object) if action in [self.INSERT_ACTION, self.UPDATE_ACTION]: revisions = yield self.list(toa=target_revision.get("toa")) if len(revisions) == 0: raise NoRevisionsAvailable() first_revision = revisions[0] current_document = None if first_revision.get("action") == self.UPDATE_ACTION: current_document = yield revision_collection_client.find_one_by_id(target_revision.get("master_id")) elif first_revision.get("action") == self.INSERT_ACTION: # If we are doing an insert, the first revision patch is the current state current_document = first_revision.get("patch") if not current_document: raise RevisionNotFound() preview_id = yield self.__create_preview_object_base(current_document) for revision in revisions: patch = revision.get("patch") if patch.get("_id"): del patch["_id"] yield self.previews.patch(preview_id, self.__make_storeable_patch_patchable(patch)) preview_object = yield self.previews.find_one_by_id(preview_id) preview_object["id"] = target_revision["id"] target_revision["snapshot"] = self.collection._obj_cursor_to_dictionary(preview_object) target_revision["snapshot"]["id"] = target_revision["master_id"] # Delete the last preview yield self.previews.delete(preview_id) raise Return(target_revision)
[ "def", "preview", "(", "self", ",", "revision_id", ")", ":", "target_revision", "=", "yield", "self", ".", "revisions", ".", "find_one_by_id", "(", "revision_id", ")", "if", "isinstance", "(", "target_revision", ".", "get", "(", "\"snapshot\"", ")", ",", "dict", ")", ":", "raise", "Return", "(", "target_revision", ")", "preview_object", "=", "None", "if", "not", "isinstance", "(", "target_revision", ",", "dict", ")", ":", "raise", "RevisionNotFound", "(", ")", "revision_collection_client", "=", "BaseAsyncMotorDocument", "(", "target_revision", ".", "get", "(", "\"collection\"", ")", ",", "self", ".", "settings", ")", "self", ".", "master_id", "=", "target_revision", ".", "get", "(", "\"master_id\"", ")", "action", "=", "target_revision", ".", "get", "(", "\"action\"", ")", "if", "action", "==", "self", ".", "DELETE_ACTION", ":", "raise", "Return", "(", "preview_object", ")", "if", "action", "in", "[", "self", ".", "INSERT_ACTION", ",", "self", ".", "UPDATE_ACTION", "]", ":", "revisions", "=", "yield", "self", ".", "list", "(", "toa", "=", "target_revision", ".", "get", "(", "\"toa\"", ")", ")", "if", "len", "(", "revisions", ")", "==", "0", ":", "raise", "NoRevisionsAvailable", "(", ")", "first_revision", "=", "revisions", "[", "0", "]", "current_document", "=", "None", "if", "first_revision", ".", "get", "(", "\"action\"", ")", "==", "self", ".", "UPDATE_ACTION", ":", "current_document", "=", "yield", "revision_collection_client", ".", "find_one_by_id", "(", "target_revision", ".", "get", "(", "\"master_id\"", ")", ")", "elif", "first_revision", ".", "get", "(", "\"action\"", ")", "==", "self", ".", "INSERT_ACTION", ":", "# If we are doing an insert, the first revision patch is the current state", "current_document", "=", "first_revision", ".", "get", "(", "\"patch\"", ")", "if", "not", "current_document", ":", "raise", "RevisionNotFound", "(", ")", "preview_id", "=", "yield", "self", ".", "__create_preview_object_base", "(", "current_document", ")", "for", "revision", "in", "revisions", ":", "patch", "=", "revision", ".", "get", "(", "\"patch\"", ")", "if", "patch", ".", "get", "(", "\"_id\"", ")", ":", "del", "patch", "[", "\"_id\"", "]", "yield", "self", ".", "previews", ".", "patch", "(", "preview_id", ",", "self", ".", "__make_storeable_patch_patchable", "(", "patch", ")", ")", "preview_object", "=", "yield", "self", ".", "previews", ".", "find_one_by_id", "(", "preview_id", ")", "preview_object", "[", "\"id\"", "]", "=", "target_revision", "[", "\"id\"", "]", "target_revision", "[", "\"snapshot\"", "]", "=", "self", ".", "collection", ".", "_obj_cursor_to_dictionary", "(", "preview_object", ")", "target_revision", "[", "\"snapshot\"", "]", "[", "\"id\"", "]", "=", "target_revision", "[", "\"master_id\"", "]", "# Delete the last preview", "yield", "self", ".", "previews", ".", "delete", "(", "preview_id", ")", "raise", "Return", "(", "target_revision", ")" ]
Get an ephemeral preview of a revision with all revisions applied between it and the current state :param str revision_id: The ID of the revision state you want to preview the master id at. :return: A snapshot of a future state of the object :rtype: dict
[ "Get", "an", "ephemeral", "preview", "of", "a", "revision", "with", "all", "revisions", "applied", "between", "it", "and", "the", "current", "state" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L472-L540
243,512
urbn/Caesium
caesium/document.py
BaseAsyncMotorDocument.insert
def insert(self, dct, toa=None, comment=""): """Create a document :param dict dct: :param toa toa: Optional time of action, triggers this to be handled as a future insert action for a new document :param str comment: A comment :rtype str: :returns string bson id: """ if self.schema: jsonschema.validate(dct, self.schema) bson_obj = yield self.collection.insert(dct) raise Return(bson_obj.__str__())
python
def insert(self, dct, toa=None, comment=""): """Create a document :param dict dct: :param toa toa: Optional time of action, triggers this to be handled as a future insert action for a new document :param str comment: A comment :rtype str: :returns string bson id: """ if self.schema: jsonschema.validate(dct, self.schema) bson_obj = yield self.collection.insert(dct) raise Return(bson_obj.__str__())
[ "def", "insert", "(", "self", ",", "dct", ",", "toa", "=", "None", ",", "comment", "=", "\"\"", ")", ":", "if", "self", ".", "schema", ":", "jsonschema", ".", "validate", "(", "dct", ",", "self", ".", "schema", ")", "bson_obj", "=", "yield", "self", ".", "collection", ".", "insert", "(", "dct", ")", "raise", "Return", "(", "bson_obj", ".", "__str__", "(", ")", ")" ]
Create a document :param dict dct: :param toa toa: Optional time of action, triggers this to be handled as a future insert action for a new document :param str comment: A comment :rtype str: :returns string bson id:
[ "Create", "a", "document" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L583-L597
243,513
urbn/Caesium
caesium/document.py
BaseAsyncMotorDocument.upsert
def upsert(self, _id, dct, attribute="_id"): """Update or Insert a new document :param str _id: The document id :param dict dct: The dictionary to set on the document :param str attribute: The attribute to query for to find the object to set this data on :returns: JSON Mongo client response including the "n" key to show number of objects effected """ mongo_response = yield self.update(_id, dct, upsert=True, attribute=attribute) raise Return(mongo_response)
python
def upsert(self, _id, dct, attribute="_id"): """Update or Insert a new document :param str _id: The document id :param dict dct: The dictionary to set on the document :param str attribute: The attribute to query for to find the object to set this data on :returns: JSON Mongo client response including the "n" key to show number of objects effected """ mongo_response = yield self.update(_id, dct, upsert=True, attribute=attribute) raise Return(mongo_response)
[ "def", "upsert", "(", "self", ",", "_id", ",", "dct", ",", "attribute", "=", "\"_id\"", ")", ":", "mongo_response", "=", "yield", "self", ".", "update", "(", "_id", ",", "dct", ",", "upsert", "=", "True", ",", "attribute", "=", "attribute", ")", "raise", "Return", "(", "mongo_response", ")" ]
Update or Insert a new document :param str _id: The document id :param dict dct: The dictionary to set on the document :param str attribute: The attribute to query for to find the object to set this data on :returns: JSON Mongo client response including the "n" key to show number of objects effected
[ "Update", "or", "Insert", "a", "new", "document" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L600-L611
243,514
urbn/Caesium
caesium/document.py
BaseAsyncMotorDocument.update
def update(self, predicate_value, dct, upsert=False, attribute="_id"): """Update an existing document :param predicate_value: The value of the predicate :param dict dct: The dictionary to update with :param bool upsert: Whether this is an upsert action :param str attribute: The attribute to query for to find the object to set this data ond :returns: JSON Mongo client response including the "n" key to show number of objects effected """ if self.schema: jsonschema.validate(dct, self.schema) if attribute=="_id" and not isinstance(predicate_value, ObjectId): predicate_value = ObjectId(predicate_value) predicate = {attribute: predicate_value} dct = self._dictionary_to_cursor(dct) mongo_response = yield self.collection.update(predicate, dct, upsert) raise Return(self._obj_cursor_to_dictionary(mongo_response))
python
def update(self, predicate_value, dct, upsert=False, attribute="_id"): """Update an existing document :param predicate_value: The value of the predicate :param dict dct: The dictionary to update with :param bool upsert: Whether this is an upsert action :param str attribute: The attribute to query for to find the object to set this data ond :returns: JSON Mongo client response including the "n" key to show number of objects effected """ if self.schema: jsonschema.validate(dct, self.schema) if attribute=="_id" and not isinstance(predicate_value, ObjectId): predicate_value = ObjectId(predicate_value) predicate = {attribute: predicate_value} dct = self._dictionary_to_cursor(dct) mongo_response = yield self.collection.update(predicate, dct, upsert) raise Return(self._obj_cursor_to_dictionary(mongo_response))
[ "def", "update", "(", "self", ",", "predicate_value", ",", "dct", ",", "upsert", "=", "False", ",", "attribute", "=", "\"_id\"", ")", ":", "if", "self", ".", "schema", ":", "jsonschema", ".", "validate", "(", "dct", ",", "self", ".", "schema", ")", "if", "attribute", "==", "\"_id\"", "and", "not", "isinstance", "(", "predicate_value", ",", "ObjectId", ")", ":", "predicate_value", "=", "ObjectId", "(", "predicate_value", ")", "predicate", "=", "{", "attribute", ":", "predicate_value", "}", "dct", "=", "self", ".", "_dictionary_to_cursor", "(", "dct", ")", "mongo_response", "=", "yield", "self", ".", "collection", ".", "update", "(", "predicate", ",", "dct", ",", "upsert", ")", "raise", "Return", "(", "self", ".", "_obj_cursor_to_dictionary", "(", "mongo_response", ")", ")" ]
Update an existing document :param predicate_value: The value of the predicate :param dict dct: The dictionary to update with :param bool upsert: Whether this is an upsert action :param str attribute: The attribute to query for to find the object to set this data ond :returns: JSON Mongo client response including the "n" key to show number of objects effected
[ "Update", "an", "existing", "document" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L614-L636
243,515
urbn/Caesium
caesium/document.py
BaseAsyncMotorDocument.delete
def delete(self, _id): """Delete a document or create a DELETE revision :param str _id: The ID of the document to be deleted :returns: JSON Mongo client response including the "n" key to show number of objects effected """ mongo_response = yield self.collection.remove({"_id": ObjectId(_id)}) raise Return(mongo_response)
python
def delete(self, _id): """Delete a document or create a DELETE revision :param str _id: The ID of the document to be deleted :returns: JSON Mongo client response including the "n" key to show number of objects effected """ mongo_response = yield self.collection.remove({"_id": ObjectId(_id)}) raise Return(mongo_response)
[ "def", "delete", "(", "self", ",", "_id", ")", ":", "mongo_response", "=", "yield", "self", ".", "collection", ".", "remove", "(", "{", "\"_id\"", ":", "ObjectId", "(", "_id", ")", "}", ")", "raise", "Return", "(", "mongo_response", ")" ]
Delete a document or create a DELETE revision :param str _id: The ID of the document to be deleted :returns: JSON Mongo client response including the "n" key to show number of objects effected
[ "Delete", "a", "document", "or", "create", "a", "DELETE", "revision" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L666-L674
243,516
urbn/Caesium
caesium/document.py
BaseAsyncMotorDocument.find_one
def find_one(self, query): """Find one wrapper with conversion to dictionary :param dict query: A Mongo query """ mongo_response = yield self.collection.find_one(query) raise Return(self._obj_cursor_to_dictionary(mongo_response))
python
def find_one(self, query): """Find one wrapper with conversion to dictionary :param dict query: A Mongo query """ mongo_response = yield self.collection.find_one(query) raise Return(self._obj_cursor_to_dictionary(mongo_response))
[ "def", "find_one", "(", "self", ",", "query", ")", ":", "mongo_response", "=", "yield", "self", ".", "collection", ".", "find_one", "(", "query", ")", "raise", "Return", "(", "self", ".", "_obj_cursor_to_dictionary", "(", "mongo_response", ")", ")" ]
Find one wrapper with conversion to dictionary :param dict query: A Mongo query
[ "Find", "one", "wrapper", "with", "conversion", "to", "dictionary" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L677-L683
243,517
urbn/Caesium
caesium/document.py
BaseAsyncMotorDocument.find
def find(self, query, orderby=None, order_by_direction=1, page=0, limit=0): """Find a document by any criteria :param dict query: The query to perform :param str orderby: The attribute to order results by :param int order_by_direction: 1 or -1 :param int page: The page to return :param int limit: Number of results per page :returns: A list of results :rtype: list """ cursor = self.collection.find(query) if orderby: cursor.sort(orderby, order_by_direction) cursor.skip(page*limit).limit(limit) results = [] while (yield cursor.fetch_next): results.append(self._obj_cursor_to_dictionary(cursor.next_object())) raise Return(results)
python
def find(self, query, orderby=None, order_by_direction=1, page=0, limit=0): """Find a document by any criteria :param dict query: The query to perform :param str orderby: The attribute to order results by :param int order_by_direction: 1 or -1 :param int page: The page to return :param int limit: Number of results per page :returns: A list of results :rtype: list """ cursor = self.collection.find(query) if orderby: cursor.sort(orderby, order_by_direction) cursor.skip(page*limit).limit(limit) results = [] while (yield cursor.fetch_next): results.append(self._obj_cursor_to_dictionary(cursor.next_object())) raise Return(results)
[ "def", "find", "(", "self", ",", "query", ",", "orderby", "=", "None", ",", "order_by_direction", "=", "1", ",", "page", "=", "0", ",", "limit", "=", "0", ")", ":", "cursor", "=", "self", ".", "collection", ".", "find", "(", "query", ")", "if", "orderby", ":", "cursor", ".", "sort", "(", "orderby", ",", "order_by_direction", ")", "cursor", ".", "skip", "(", "page", "*", "limit", ")", ".", "limit", "(", "limit", ")", "results", "=", "[", "]", "while", "(", "yield", "cursor", ".", "fetch_next", ")", ":", "results", ".", "append", "(", "self", ".", "_obj_cursor_to_dictionary", "(", "cursor", ".", "next_object", "(", ")", ")", ")", "raise", "Return", "(", "results", ")" ]
Find a document by any criteria :param dict query: The query to perform :param str orderby: The attribute to order results by :param int order_by_direction: 1 or -1 :param int page: The page to return :param int limit: Number of results per page :returns: A list of results :rtype: list
[ "Find", "a", "document", "by", "any", "criteria" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L686-L710
243,518
urbn/Caesium
caesium/document.py
BaseAsyncMotorDocument.find_one_by_id
def find_one_by_id(self, _id): """ Find a single document by id :param str _id: BSON string repreentation of the Id :return: a signle object :rtype: dict """ document = (yield self.collection.find_one({"_id": ObjectId(_id)})) raise Return(self._obj_cursor_to_dictionary(document))
python
def find_one_by_id(self, _id): """ Find a single document by id :param str _id: BSON string repreentation of the Id :return: a signle object :rtype: dict """ document = (yield self.collection.find_one({"_id": ObjectId(_id)})) raise Return(self._obj_cursor_to_dictionary(document))
[ "def", "find_one_by_id", "(", "self", ",", "_id", ")", ":", "document", "=", "(", "yield", "self", ".", "collection", ".", "find_one", "(", "{", "\"_id\"", ":", "ObjectId", "(", "_id", ")", "}", ")", ")", "raise", "Return", "(", "self", ".", "_obj_cursor_to_dictionary", "(", "document", ")", ")" ]
Find a single document by id :param str _id: BSON string repreentation of the Id :return: a signle object :rtype: dict
[ "Find", "a", "single", "document", "by", "id" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L713-L723
243,519
urbn/Caesium
caesium/document.py
BaseAsyncMotorDocument.create_index
def create_index(self, index, index_type=GEO2D): """Create an index on a given attribute :param str index: Attribute to set index on :param str index_type: See PyMongo index types for further information, defaults to GEO2D index. """ self.logger.info("Adding %s index to stores on attribute: %s" % (index_type, index)) yield self.collection.create_index([(index, index_type)])
python
def create_index(self, index, index_type=GEO2D): """Create an index on a given attribute :param str index: Attribute to set index on :param str index_type: See PyMongo index types for further information, defaults to GEO2D index. """ self.logger.info("Adding %s index to stores on attribute: %s" % (index_type, index)) yield self.collection.create_index([(index, index_type)])
[ "def", "create_index", "(", "self", ",", "index", ",", "index_type", "=", "GEO2D", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Adding %s index to stores on attribute: %s\"", "%", "(", "index_type", ",", "index", ")", ")", "yield", "self", ".", "collection", ".", "create_index", "(", "[", "(", "index", ",", "index_type", ")", "]", ")" ]
Create an index on a given attribute :param str index: Attribute to set index on :param str index_type: See PyMongo index types for further information, defaults to GEO2D index.
[ "Create", "an", "index", "on", "a", "given", "attribute" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L726-L733
243,520
urbn/Caesium
caesium/document.py
BaseAsyncMotorDocument.location_based_search
def location_based_search(self, lng, lat, distance, unit="miles", attribute_map=None, page=0, limit=50): """Search based on location and other attribute filters :param long lng: Longitude parameter :param long lat: Latitude parameter :param int distance: The radius of the query :param str unit: The unit of measure for the query, defaults to miles :param dict attribute_map: Additional attributes to apply to the location bases query :param int page: The page to return :param int limit: Number of results per page :returns: List of objects :rtype: list """ #Determine what type of radian conversion you want base on a unit of measure if unit == "miles": distance = float(distance/69) else: distance = float(distance/111.045) #Start with geospatial query query = { "loc" : { "$within": { "$center" : [[lng, lat], distance]} } } #Allow querying additional attributes if attribute_map: query = dict(query.items() + attribute_map.items()) results = yield self.find(query, page=page, limit=limit) raise Return(self._list_cursor_to_json(results))
python
def location_based_search(self, lng, lat, distance, unit="miles", attribute_map=None, page=0, limit=50): """Search based on location and other attribute filters :param long lng: Longitude parameter :param long lat: Latitude parameter :param int distance: The radius of the query :param str unit: The unit of measure for the query, defaults to miles :param dict attribute_map: Additional attributes to apply to the location bases query :param int page: The page to return :param int limit: Number of results per page :returns: List of objects :rtype: list """ #Determine what type of radian conversion you want base on a unit of measure if unit == "miles": distance = float(distance/69) else: distance = float(distance/111.045) #Start with geospatial query query = { "loc" : { "$within": { "$center" : [[lng, lat], distance]} } } #Allow querying additional attributes if attribute_map: query = dict(query.items() + attribute_map.items()) results = yield self.find(query, page=page, limit=limit) raise Return(self._list_cursor_to_json(results))
[ "def", "location_based_search", "(", "self", ",", "lng", ",", "lat", ",", "distance", ",", "unit", "=", "\"miles\"", ",", "attribute_map", "=", "None", ",", "page", "=", "0", ",", "limit", "=", "50", ")", ":", "#Determine what type of radian conversion you want base on a unit of measure", "if", "unit", "==", "\"miles\"", ":", "distance", "=", "float", "(", "distance", "/", "69", ")", "else", ":", "distance", "=", "float", "(", "distance", "/", "111.045", ")", "#Start with geospatial query", "query", "=", "{", "\"loc\"", ":", "{", "\"$within\"", ":", "{", "\"$center\"", ":", "[", "[", "lng", ",", "lat", "]", ",", "distance", "]", "}", "}", "}", "#Allow querying additional attributes", "if", "attribute_map", ":", "query", "=", "dict", "(", "query", ".", "items", "(", ")", "+", "attribute_map", ".", "items", "(", ")", ")", "results", "=", "yield", "self", ".", "find", "(", "query", ",", "page", "=", "page", ",", "limit", "=", "limit", ")", "raise", "Return", "(", "self", ".", "_list_cursor_to_json", "(", "results", ")", ")" ]
Search based on location and other attribute filters :param long lng: Longitude parameter :param long lat: Latitude parameter :param int distance: The radius of the query :param str unit: The unit of measure for the query, defaults to miles :param dict attribute_map: Additional attributes to apply to the location bases query :param int page: The page to return :param int limit: Number of results per page :returns: List of objects :rtype: list
[ "Search", "based", "on", "location", "and", "other", "attribute", "filters" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L736-L770
243,521
urbn/Caesium
caesium/document.py
BSONEncoder.default
def default(self, obj, **kwargs): """Handles the adapting of special types from mongo""" if isinstance(obj, datetime.datetime): return time.mktime(obj.timetuple()) if isinstance(obj, Timestamp): return obj.time if isinstance(obj, ObjectId): return obj.__str__() return JSONEncoder.default(self, obj)
python
def default(self, obj, **kwargs): """Handles the adapting of special types from mongo""" if isinstance(obj, datetime.datetime): return time.mktime(obj.timetuple()) if isinstance(obj, Timestamp): return obj.time if isinstance(obj, ObjectId): return obj.__str__() return JSONEncoder.default(self, obj)
[ "def", "default", "(", "self", ",", "obj", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "obj", ",", "datetime", ".", "datetime", ")", ":", "return", "time", ".", "mktime", "(", "obj", ".", "timetuple", "(", ")", ")", "if", "isinstance", "(", "obj", ",", "Timestamp", ")", ":", "return", "obj", ".", "time", "if", "isinstance", "(", "obj", ",", "ObjectId", ")", ":", "return", "obj", ".", "__str__", "(", ")", "return", "JSONEncoder", ".", "default", "(", "self", ",", "obj", ")" ]
Handles the adapting of special types from mongo
[ "Handles", "the", "adapting", "of", "special", "types", "from", "mongo" ]
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L813-L824
243,522
etcher-be/emiz
emiz/avwx/remarks.py
_tdec
def _tdec(code: str, unit: str = 'C') -> str: """ Translates a 4-digit decimal temperature representation Ex: 1045 -> -4.5°C 0237 -> 23.7°C """ ret = f"{'-' if code[0] == '1' else ''}{int(code[1:3])}.{code[3]}" if unit: ret += f'°{unit}' return ret
python
def _tdec(code: str, unit: str = 'C') -> str: """ Translates a 4-digit decimal temperature representation Ex: 1045 -> -4.5°C 0237 -> 23.7°C """ ret = f"{'-' if code[0] == '1' else ''}{int(code[1:3])}.{code[3]}" if unit: ret += f'°{unit}' return ret
[ "def", "_tdec", "(", "code", ":", "str", ",", "unit", ":", "str", "=", "'C'", ")", "->", "str", ":", "ret", "=", "f\"{'-' if code[0] == '1' else ''}{int(code[1:3])}.{code[3]}\"", "if", "unit", ":", "ret", "+=", "f'°{unit}'", "return", "ret" ]
Translates a 4-digit decimal temperature representation Ex: 1045 -> -4.5°C 0237 -> 23.7°C
[ "Translates", "a", "4", "-", "digit", "decimal", "temperature", "representation" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/remarks.py#L11-L20
243,523
etcher-be/emiz
emiz/avwx/remarks.py
pressure_tendency
def pressure_tendency(code: str, unit: str = 'mb') -> str: """ Translates a 5-digit pressure outlook code Ex: 50123 -> 12.3 mb: Increasing, then decreasing """ width, precision = int(code[2:4]), code[4] return ('3-hour pressure difference: +/- ' f'{width}.{precision} {unit} - {PRESSURE_TENDENCIES[code[1]]}')
python
def pressure_tendency(code: str, unit: str = 'mb') -> str: """ Translates a 5-digit pressure outlook code Ex: 50123 -> 12.3 mb: Increasing, then decreasing """ width, precision = int(code[2:4]), code[4] return ('3-hour pressure difference: +/- ' f'{width}.{precision} {unit} - {PRESSURE_TENDENCIES[code[1]]}')
[ "def", "pressure_tendency", "(", "code", ":", "str", ",", "unit", ":", "str", "=", "'mb'", ")", "->", "str", ":", "width", ",", "precision", "=", "int", "(", "code", "[", "2", ":", "4", "]", ")", ",", "code", "[", "4", "]", "return", "(", "'3-hour pressure difference: +/- '", "f'{width}.{precision} {unit} - {PRESSURE_TENDENCIES[code[1]]}'", ")" ]
Translates a 5-digit pressure outlook code Ex: 50123 -> 12.3 mb: Increasing, then decreasing
[ "Translates", "a", "5", "-", "digit", "pressure", "outlook", "code" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/remarks.py#L31-L39
243,524
etcher-be/emiz
emiz/avwx/remarks.py
parse
def parse(rmk: str) -> RemarksData: """ Finds temperature and dewpoint decimal values from the remarks """ rmkdata = {} for item in rmk.split(' '): if len(item) in [5, 9] and item[0] == 'T' and item[1:].isdigit(): rmkdata['temperature_decimal'] = core.make_number(_tdec(item[1:5], None)) # type: ignore rmkdata['dewpoint_decimal'] = core.make_number(_tdec(item[5:], None)) # type: ignore return RemarksData(**rmkdata)
python
def parse(rmk: str) -> RemarksData: """ Finds temperature and dewpoint decimal values from the remarks """ rmkdata = {} for item in rmk.split(' '): if len(item) in [5, 9] and item[0] == 'T' and item[1:].isdigit(): rmkdata['temperature_decimal'] = core.make_number(_tdec(item[1:5], None)) # type: ignore rmkdata['dewpoint_decimal'] = core.make_number(_tdec(item[5:], None)) # type: ignore return RemarksData(**rmkdata)
[ "def", "parse", "(", "rmk", ":", "str", ")", "->", "RemarksData", ":", "rmkdata", "=", "{", "}", "for", "item", "in", "rmk", ".", "split", "(", "' '", ")", ":", "if", "len", "(", "item", ")", "in", "[", "5", ",", "9", "]", "and", "item", "[", "0", "]", "==", "'T'", "and", "item", "[", "1", ":", "]", ".", "isdigit", "(", ")", ":", "rmkdata", "[", "'temperature_decimal'", "]", "=", "core", ".", "make_number", "(", "_tdec", "(", "item", "[", "1", ":", "5", "]", ",", "None", ")", ")", "# type: ignore", "rmkdata", "[", "'dewpoint_decimal'", "]", "=", "core", ".", "make_number", "(", "_tdec", "(", "item", "[", "5", ":", "]", ",", "None", ")", ")", "# type: ignore", "return", "RemarksData", "(", "*", "*", "rmkdata", ")" ]
Finds temperature and dewpoint decimal values from the remarks
[ "Finds", "temperature", "and", "dewpoint", "decimal", "values", "from", "the", "remarks" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/remarks.py#L74-L83
243,525
etcher-be/emiz
emiz/avwx/remarks.py
translate
def translate(remarks: str) -> typing.Dict[str, str]: # noqa """ Translates elements in the remarks string """ ret = {} # Add and replace static multi-word elements for key in REMARKS_GROUPS: if key in remarks: ret[key.strip()] = REMARKS_GROUPS[key] remarks.replace(key, ' ') # For each remaining element for rmk in remarks.split()[1:]: rlen = len(rmk) # Static single-word elements if rmk in REMARKS_ELEMENTS: ret[rmk] = REMARKS_ELEMENTS[rmk] # Digit-only encoded elements elif rmk.isdigit(): if rlen == 5 and rmk[0] in LEN5_DECODE: rmk_ = LEN5_DECODE[rmk[0]](rmk) # type: ignore ret[rmk] = rmk_ # 24-hour min/max temperature elif rlen == 9: ret[rmk] = f'24-hour temperature: max {_tdec(rmk[1:5])} min {_tdec(rmk[5:])}' # Sea level pressure: SLP218 elif rmk.startswith('SLP'): ret[rmk] = f'Sea level pressure: 10{rmk[3:5]}.{rmk[5]} hPa' # Temp/Dew with decimal: T02220183 elif rlen == 9 and rmk[0] == 'T' and rmk[1:].isdigit(): ret[rmk] = f'Temperature {_tdec(rmk[1:5])} and dewpoint {_tdec(rmk[5:])}' # Precipitation amount: P0123 elif rlen == 5 and rmk[0] == 'P' and rmk[1:].isdigit(): ret[rmk] = f'Hourly precipitation: {int(rmk[1:3])}.{rmk[3:]} in.' # Weather began/ended elif rlen == 5 and rmk[2] in ('B', 'E') and rmk[3:].isdigit() and rmk[:2] in WX_TRANSLATIONS: state = 'began' if rmk[2] == 'B' else 'ended' ret[rmk] = f'{WX_TRANSLATIONS[rmk[:2]]} {state} at :{rmk[3:]}' return ret
python
def translate(remarks: str) -> typing.Dict[str, str]: # noqa """ Translates elements in the remarks string """ ret = {} # Add and replace static multi-word elements for key in REMARKS_GROUPS: if key in remarks: ret[key.strip()] = REMARKS_GROUPS[key] remarks.replace(key, ' ') # For each remaining element for rmk in remarks.split()[1:]: rlen = len(rmk) # Static single-word elements if rmk in REMARKS_ELEMENTS: ret[rmk] = REMARKS_ELEMENTS[rmk] # Digit-only encoded elements elif rmk.isdigit(): if rlen == 5 and rmk[0] in LEN5_DECODE: rmk_ = LEN5_DECODE[rmk[0]](rmk) # type: ignore ret[rmk] = rmk_ # 24-hour min/max temperature elif rlen == 9: ret[rmk] = f'24-hour temperature: max {_tdec(rmk[1:5])} min {_tdec(rmk[5:])}' # Sea level pressure: SLP218 elif rmk.startswith('SLP'): ret[rmk] = f'Sea level pressure: 10{rmk[3:5]}.{rmk[5]} hPa' # Temp/Dew with decimal: T02220183 elif rlen == 9 and rmk[0] == 'T' and rmk[1:].isdigit(): ret[rmk] = f'Temperature {_tdec(rmk[1:5])} and dewpoint {_tdec(rmk[5:])}' # Precipitation amount: P0123 elif rlen == 5 and rmk[0] == 'P' and rmk[1:].isdigit(): ret[rmk] = f'Hourly precipitation: {int(rmk[1:3])}.{rmk[3:]} in.' # Weather began/ended elif rlen == 5 and rmk[2] in ('B', 'E') and rmk[3:].isdigit() and rmk[:2] in WX_TRANSLATIONS: state = 'began' if rmk[2] == 'B' else 'ended' ret[rmk] = f'{WX_TRANSLATIONS[rmk[:2]]} {state} at :{rmk[3:]}' return ret
[ "def", "translate", "(", "remarks", ":", "str", ")", "->", "typing", ".", "Dict", "[", "str", ",", "str", "]", ":", "# noqa", "ret", "=", "{", "}", "# Add and replace static multi-word elements", "for", "key", "in", "REMARKS_GROUPS", ":", "if", "key", "in", "remarks", ":", "ret", "[", "key", ".", "strip", "(", ")", "]", "=", "REMARKS_GROUPS", "[", "key", "]", "remarks", ".", "replace", "(", "key", ",", "' '", ")", "# For each remaining element", "for", "rmk", "in", "remarks", ".", "split", "(", ")", "[", "1", ":", "]", ":", "rlen", "=", "len", "(", "rmk", ")", "# Static single-word elements", "if", "rmk", "in", "REMARKS_ELEMENTS", ":", "ret", "[", "rmk", "]", "=", "REMARKS_ELEMENTS", "[", "rmk", "]", "# Digit-only encoded elements", "elif", "rmk", ".", "isdigit", "(", ")", ":", "if", "rlen", "==", "5", "and", "rmk", "[", "0", "]", "in", "LEN5_DECODE", ":", "rmk_", "=", "LEN5_DECODE", "[", "rmk", "[", "0", "]", "]", "(", "rmk", ")", "# type: ignore", "ret", "[", "rmk", "]", "=", "rmk_", "# 24-hour min/max temperature", "elif", "rlen", "==", "9", ":", "ret", "[", "rmk", "]", "=", "f'24-hour temperature: max {_tdec(rmk[1:5])} min {_tdec(rmk[5:])}'", "# Sea level pressure: SLP218", "elif", "rmk", ".", "startswith", "(", "'SLP'", ")", ":", "ret", "[", "rmk", "]", "=", "f'Sea level pressure: 10{rmk[3:5]}.{rmk[5]} hPa'", "# Temp/Dew with decimal: T02220183", "elif", "rlen", "==", "9", "and", "rmk", "[", "0", "]", "==", "'T'", "and", "rmk", "[", "1", ":", "]", ".", "isdigit", "(", ")", ":", "ret", "[", "rmk", "]", "=", "f'Temperature {_tdec(rmk[1:5])} and dewpoint {_tdec(rmk[5:])}'", "# Precipitation amount: P0123", "elif", "rlen", "==", "5", "and", "rmk", "[", "0", "]", "==", "'P'", "and", "rmk", "[", "1", ":", "]", ".", "isdigit", "(", ")", ":", "ret", "[", "rmk", "]", "=", "f'Hourly precipitation: {int(rmk[1:3])}.{rmk[3:]} in.'", "# Weather began/ended", "elif", "rlen", "==", "5", "and", "rmk", "[", "2", "]", "in", "(", "'B'", ",", "'E'", ")", "and", "rmk", "[", "3", ":", "]", ".", "isdigit", "(", ")", "and", "rmk", "[", ":", "2", "]", "in", "WX_TRANSLATIONS", ":", "state", "=", "'began'", "if", "rmk", "[", "2", "]", "==", "'B'", "else", "'ended'", "ret", "[", "rmk", "]", "=", "f'{WX_TRANSLATIONS[rmk[:2]]} {state} at :{rmk[3:]}'", "return", "ret" ]
Translates elements in the remarks string
[ "Translates", "elements", "in", "the", "remarks", "string" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/remarks.py#L86-L123
243,526
sahilchinoy/django-irs-filings
irs/management/commands/loadIRS.py
RowParser.parse_row
def parse_row(self): """ Parses a row, cell-by-cell, returning a dict of field names to the cleaned field values. """ fields = self.mapping for i, cell in enumerate(self.row[0:len(fields)]): field_name, field_type = fields[str(i)] parsed_cell = self.clean_cell(cell, field_type) self.parsed_row[field_name] = parsed_cell
python
def parse_row(self): """ Parses a row, cell-by-cell, returning a dict of field names to the cleaned field values. """ fields = self.mapping for i, cell in enumerate(self.row[0:len(fields)]): field_name, field_type = fields[str(i)] parsed_cell = self.clean_cell(cell, field_type) self.parsed_row[field_name] = parsed_cell
[ "def", "parse_row", "(", "self", ")", ":", "fields", "=", "self", ".", "mapping", "for", "i", ",", "cell", "in", "enumerate", "(", "self", ".", "row", "[", "0", ":", "len", "(", "fields", ")", "]", ")", ":", "field_name", ",", "field_type", "=", "fields", "[", "str", "(", "i", ")", "]", "parsed_cell", "=", "self", ".", "clean_cell", "(", "cell", ",", "field_type", ")", "self", ".", "parsed_row", "[", "field_name", "]", "=", "parsed_cell" ]
Parses a row, cell-by-cell, returning a dict of field names to the cleaned field values.
[ "Parses", "a", "row", "cell", "-", "by", "-", "cell", "returning", "a", "dict", "of", "field", "names", "to", "the", "cleaned", "field", "values", "." ]
efe80cc57ce1d9d8488f4e9496cf2347e29b6d8b
https://github.com/sahilchinoy/django-irs-filings/blob/efe80cc57ce1d9d8488f4e9496cf2347e29b6d8b/irs/management/commands/loadIRS.py#L78-L87
243,527
sahilchinoy/django-irs-filings
irs/management/commands/loadIRS.py
Command.build_mappings
def build_mappings(self): """ Uses CSV files of field names and positions for different filing types to load mappings into memory, for use in parsing different types of rows. """ self.mappings = {} for record_type in ('sa', 'sb', 'F8872'): path = os.path.join( os.path.dirname( os.path.dirname( os.path.dirname(__file__))), 'mappings', '{}.csv'.format(record_type)) mapping = {} with open(path, 'r') as csvfile: reader = csv.DictReader(csvfile) for row in reader: mapping[row['position']] = ( row['model_name'], row['field_type']) self.mappings[record_type] = mapping
python
def build_mappings(self): """ Uses CSV files of field names and positions for different filing types to load mappings into memory, for use in parsing different types of rows. """ self.mappings = {} for record_type in ('sa', 'sb', 'F8872'): path = os.path.join( os.path.dirname( os.path.dirname( os.path.dirname(__file__))), 'mappings', '{}.csv'.format(record_type)) mapping = {} with open(path, 'r') as csvfile: reader = csv.DictReader(csvfile) for row in reader: mapping[row['position']] = ( row['model_name'], row['field_type']) self.mappings[record_type] = mapping
[ "def", "build_mappings", "(", "self", ")", ":", "self", ".", "mappings", "=", "{", "}", "for", "record_type", "in", "(", "'sa'", ",", "'sb'", ",", "'F8872'", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", ")", ",", "'mappings'", ",", "'{}.csv'", ".", "format", "(", "record_type", ")", ")", "mapping", "=", "{", "}", "with", "open", "(", "path", ",", "'r'", ")", "as", "csvfile", ":", "reader", "=", "csv", ".", "DictReader", "(", "csvfile", ")", "for", "row", "in", "reader", ":", "mapping", "[", "row", "[", "'position'", "]", "]", "=", "(", "row", "[", "'model_name'", "]", ",", "row", "[", "'field_type'", "]", ")", "self", ".", "mappings", "[", "record_type", "]", "=", "mapping" ]
Uses CSV files of field names and positions for different filing types to load mappings into memory, for use in parsing different types of rows.
[ "Uses", "CSV", "files", "of", "field", "names", "and", "positions", "for", "different", "filing", "types", "to", "load", "mappings", "into", "memory", "for", "use", "in", "parsing", "different", "types", "of", "rows", "." ]
efe80cc57ce1d9d8488f4e9496cf2347e29b6d8b
https://github.com/sahilchinoy/django-irs-filings/blob/efe80cc57ce1d9d8488f4e9496cf2347e29b6d8b/irs/management/commands/loadIRS.py#L244-L266
243,528
jic-dtool/dtool-info
dtool_info/dataset.py
diff
def diff(full, dataset_uri, reference_dataset_uri): """Report the difference between two datasets. 1. Checks that the identifiers are identicial 2. Checks that the sizes are identical 3. Checks that the hashes are identical, if the '--full' option is used If a differences is detected in step 1, steps 2 and 3 will not be carried out. Similarly if a difference is detected in step 2, step 3 will not be carried out. When checking that the hashes are identical the hashes for the first dataset are recalculated using the hashing algorithm of the reference dataset. """ def echo_header(desc, ds_name, ref_ds_name, prop): click.secho("Different {}".format(desc), fg="red") click.secho("ID, {} in '{}', {} in '{}'".format( prop, ds_name, prop, ref_ds_name)) def echo_diff(diff): for d in diff: line = "{}, {}, {}".format(d[0], d[1], d[2]) click.secho(line) ds = dtoolcore.DataSet.from_uri(dataset_uri) ref_ds = dtoolcore.DataSet.from_uri(reference_dataset_uri) num_items = len(list(ref_ds.identifiers)) ids_diff = diff_identifiers(ds, ref_ds) if len(ids_diff) > 0: echo_header("identifiers", ds.name, ref_ds.name, "present") echo_diff(ids_diff) sys.exit(1) with click.progressbar(length=num_items, label="Comparing sizes") as progressbar: sizes_diff = diff_sizes(ds, ref_ds, progressbar) if len(sizes_diff) > 0: echo_header("sizes", ds.name, ref_ds.name, "size") echo_diff(sizes_diff) sys.exit(2) if full: with click.progressbar(length=num_items, label="Comparing hashes") as progressbar: content_diff = diff_content(ds, ref_ds, progressbar) if len(content_diff) > 0: echo_header("content", ds.name, ref_ds.name, "hash") echo_diff(content_diff) sys.exit(3)
python
def diff(full, dataset_uri, reference_dataset_uri): """Report the difference between two datasets. 1. Checks that the identifiers are identicial 2. Checks that the sizes are identical 3. Checks that the hashes are identical, if the '--full' option is used If a differences is detected in step 1, steps 2 and 3 will not be carried out. Similarly if a difference is detected in step 2, step 3 will not be carried out. When checking that the hashes are identical the hashes for the first dataset are recalculated using the hashing algorithm of the reference dataset. """ def echo_header(desc, ds_name, ref_ds_name, prop): click.secho("Different {}".format(desc), fg="red") click.secho("ID, {} in '{}', {} in '{}'".format( prop, ds_name, prop, ref_ds_name)) def echo_diff(diff): for d in diff: line = "{}, {}, {}".format(d[0], d[1], d[2]) click.secho(line) ds = dtoolcore.DataSet.from_uri(dataset_uri) ref_ds = dtoolcore.DataSet.from_uri(reference_dataset_uri) num_items = len(list(ref_ds.identifiers)) ids_diff = diff_identifiers(ds, ref_ds) if len(ids_diff) > 0: echo_header("identifiers", ds.name, ref_ds.name, "present") echo_diff(ids_diff) sys.exit(1) with click.progressbar(length=num_items, label="Comparing sizes") as progressbar: sizes_diff = diff_sizes(ds, ref_ds, progressbar) if len(sizes_diff) > 0: echo_header("sizes", ds.name, ref_ds.name, "size") echo_diff(sizes_diff) sys.exit(2) if full: with click.progressbar(length=num_items, label="Comparing hashes") as progressbar: content_diff = diff_content(ds, ref_ds, progressbar) if len(content_diff) > 0: echo_header("content", ds.name, ref_ds.name, "hash") echo_diff(content_diff) sys.exit(3)
[ "def", "diff", "(", "full", ",", "dataset_uri", ",", "reference_dataset_uri", ")", ":", "def", "echo_header", "(", "desc", ",", "ds_name", ",", "ref_ds_name", ",", "prop", ")", ":", "click", ".", "secho", "(", "\"Different {}\"", ".", "format", "(", "desc", ")", ",", "fg", "=", "\"red\"", ")", "click", ".", "secho", "(", "\"ID, {} in '{}', {} in '{}'\"", ".", "format", "(", "prop", ",", "ds_name", ",", "prop", ",", "ref_ds_name", ")", ")", "def", "echo_diff", "(", "diff", ")", ":", "for", "d", "in", "diff", ":", "line", "=", "\"{}, {}, {}\"", ".", "format", "(", "d", "[", "0", "]", ",", "d", "[", "1", "]", ",", "d", "[", "2", "]", ")", "click", ".", "secho", "(", "line", ")", "ds", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "dataset_uri", ")", "ref_ds", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "reference_dataset_uri", ")", "num_items", "=", "len", "(", "list", "(", "ref_ds", ".", "identifiers", ")", ")", "ids_diff", "=", "diff_identifiers", "(", "ds", ",", "ref_ds", ")", "if", "len", "(", "ids_diff", ")", ">", "0", ":", "echo_header", "(", "\"identifiers\"", ",", "ds", ".", "name", ",", "ref_ds", ".", "name", ",", "\"present\"", ")", "echo_diff", "(", "ids_diff", ")", "sys", ".", "exit", "(", "1", ")", "with", "click", ".", "progressbar", "(", "length", "=", "num_items", ",", "label", "=", "\"Comparing sizes\"", ")", "as", "progressbar", ":", "sizes_diff", "=", "diff_sizes", "(", "ds", ",", "ref_ds", ",", "progressbar", ")", "if", "len", "(", "sizes_diff", ")", ">", "0", ":", "echo_header", "(", "\"sizes\"", ",", "ds", ".", "name", ",", "ref_ds", ".", "name", ",", "\"size\"", ")", "echo_diff", "(", "sizes_diff", ")", "sys", ".", "exit", "(", "2", ")", "if", "full", ":", "with", "click", ".", "progressbar", "(", "length", "=", "num_items", ",", "label", "=", "\"Comparing hashes\"", ")", "as", "progressbar", ":", "content_diff", "=", "diff_content", "(", "ds", ",", "ref_ds", ",", "progressbar", ")", "if", "len", "(", "content_diff", ")", ">", "0", ":", "echo_header", "(", "\"content\"", ",", "ds", ".", "name", ",", "ref_ds", ".", "name", ",", "\"hash\"", ")", "echo_diff", "(", "content_diff", ")", "sys", ".", "exit", "(", "3", ")" ]
Report the difference between two datasets. 1. Checks that the identifiers are identicial 2. Checks that the sizes are identical 3. Checks that the hashes are identical, if the '--full' option is used If a differences is detected in step 1, steps 2 and 3 will not be carried out. Similarly if a difference is detected in step 2, step 3 will not be carried out. When checking that the hashes are identical the hashes for the first dataset are recalculated using the hashing algorithm of the reference dataset.
[ "Report", "the", "difference", "between", "two", "datasets", "." ]
3c6c7755f4c142e548bbfdf3b38230612fd4060a
https://github.com/jic-dtool/dtool-info/blob/3c6c7755f4c142e548bbfdf3b38230612fd4060a/dtool_info/dataset.py#L38-L90
243,529
jic-dtool/dtool-info
dtool_info/dataset.py
identifiers
def identifiers(dataset_uri): """List the item identifiers in the dataset.""" dataset = dtoolcore.DataSet.from_uri(dataset_uri) for i in dataset.identifiers: click.secho(i)
python
def identifiers(dataset_uri): """List the item identifiers in the dataset.""" dataset = dtoolcore.DataSet.from_uri(dataset_uri) for i in dataset.identifiers: click.secho(i)
[ "def", "identifiers", "(", "dataset_uri", ")", ":", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "dataset_uri", ")", "for", "i", "in", "dataset", ".", "identifiers", ":", "click", ".", "secho", "(", "i", ")" ]
List the item identifiers in the dataset.
[ "List", "the", "item", "identifiers", "in", "the", "dataset", "." ]
3c6c7755f4c142e548bbfdf3b38230612fd4060a
https://github.com/jic-dtool/dtool-info/blob/3c6c7755f4c142e548bbfdf3b38230612fd4060a/dtool_info/dataset.py#L178-L182
243,530
jic-dtool/dtool-info
dtool_info/dataset.py
summary
def summary(dataset_uri, format): """Report summary information about a dataset.""" dataset = dtoolcore.DataSet.from_uri(dataset_uri) creator_username = dataset._admin_metadata["creator_username"] frozen_at = dataset._admin_metadata["frozen_at"] num_items = len(dataset.identifiers) tot_size = sum([dataset.item_properties(i)["size_in_bytes"] for i in dataset.identifiers]) if format == "json": json_lines = [ '{', ' "name": "{}",'.format(dataset.name), ' "uuid": "{}",'.format(dataset.uuid), ' "creator_username": "{}",'.format(creator_username), ' "number_of_items": {},'.format(num_items), ' "size_in_bytes": {},'.format(tot_size), ' "frozen_at": {}'.format(frozen_at), '}', ] formatted_json = "\n".join(json_lines) colorful_json = pygments.highlight( formatted_json, pygments.lexers.JsonLexer(), pygments.formatters.TerminalFormatter()) click.secho(colorful_json, nl=False) else: info = [ ("name", dataset.name), ("uuid", dataset.uuid), ("creator_username", creator_username), ("number_of_items", str(num_items)), ("size", sizeof_fmt(tot_size).strip()), ("frozen_at", date_fmt(frozen_at)), ] for key, value in info: click.secho("{}: ".format(key), nl=False) click.secho(value, fg="green")
python
def summary(dataset_uri, format): """Report summary information about a dataset.""" dataset = dtoolcore.DataSet.from_uri(dataset_uri) creator_username = dataset._admin_metadata["creator_username"] frozen_at = dataset._admin_metadata["frozen_at"] num_items = len(dataset.identifiers) tot_size = sum([dataset.item_properties(i)["size_in_bytes"] for i in dataset.identifiers]) if format == "json": json_lines = [ '{', ' "name": "{}",'.format(dataset.name), ' "uuid": "{}",'.format(dataset.uuid), ' "creator_username": "{}",'.format(creator_username), ' "number_of_items": {},'.format(num_items), ' "size_in_bytes": {},'.format(tot_size), ' "frozen_at": {}'.format(frozen_at), '}', ] formatted_json = "\n".join(json_lines) colorful_json = pygments.highlight( formatted_json, pygments.lexers.JsonLexer(), pygments.formatters.TerminalFormatter()) click.secho(colorful_json, nl=False) else: info = [ ("name", dataset.name), ("uuid", dataset.uuid), ("creator_username", creator_username), ("number_of_items", str(num_items)), ("size", sizeof_fmt(tot_size).strip()), ("frozen_at", date_fmt(frozen_at)), ] for key, value in info: click.secho("{}: ".format(key), nl=False) click.secho(value, fg="green")
[ "def", "summary", "(", "dataset_uri", ",", "format", ")", ":", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "dataset_uri", ")", "creator_username", "=", "dataset", ".", "_admin_metadata", "[", "\"creator_username\"", "]", "frozen_at", "=", "dataset", ".", "_admin_metadata", "[", "\"frozen_at\"", "]", "num_items", "=", "len", "(", "dataset", ".", "identifiers", ")", "tot_size", "=", "sum", "(", "[", "dataset", ".", "item_properties", "(", "i", ")", "[", "\"size_in_bytes\"", "]", "for", "i", "in", "dataset", ".", "identifiers", "]", ")", "if", "format", "==", "\"json\"", ":", "json_lines", "=", "[", "'{'", ",", "' \"name\": \"{}\",'", ".", "format", "(", "dataset", ".", "name", ")", ",", "' \"uuid\": \"{}\",'", ".", "format", "(", "dataset", ".", "uuid", ")", ",", "' \"creator_username\": \"{}\",'", ".", "format", "(", "creator_username", ")", ",", "' \"number_of_items\": {},'", ".", "format", "(", "num_items", ")", ",", "' \"size_in_bytes\": {},'", ".", "format", "(", "tot_size", ")", ",", "' \"frozen_at\": {}'", ".", "format", "(", "frozen_at", ")", ",", "'}'", ",", "]", "formatted_json", "=", "\"\\n\"", ".", "join", "(", "json_lines", ")", "colorful_json", "=", "pygments", ".", "highlight", "(", "formatted_json", ",", "pygments", ".", "lexers", ".", "JsonLexer", "(", ")", ",", "pygments", ".", "formatters", ".", "TerminalFormatter", "(", ")", ")", "click", ".", "secho", "(", "colorful_json", ",", "nl", "=", "False", ")", "else", ":", "info", "=", "[", "(", "\"name\"", ",", "dataset", ".", "name", ")", ",", "(", "\"uuid\"", ",", "dataset", ".", "uuid", ")", ",", "(", "\"creator_username\"", ",", "creator_username", ")", ",", "(", "\"number_of_items\"", ",", "str", "(", "num_items", ")", ")", ",", "(", "\"size\"", ",", "sizeof_fmt", "(", "tot_size", ")", ".", "strip", "(", ")", ")", ",", "(", "\"frozen_at\"", ",", "date_fmt", "(", "frozen_at", ")", ")", ",", "]", "for", "key", ",", "value", "in", "info", ":", "click", ".", "secho", "(", "\"{}: \"", ".", "format", "(", "key", ")", ",", "nl", "=", "False", ")", "click", ".", "secho", "(", "value", ",", "fg", "=", "\"green\"", ")" ]
Report summary information about a dataset.
[ "Report", "summary", "information", "about", "a", "dataset", "." ]
3c6c7755f4c142e548bbfdf3b38230612fd4060a
https://github.com/jic-dtool/dtool-info/blob/3c6c7755f4c142e548bbfdf3b38230612fd4060a/dtool_info/dataset.py#L193-L231
243,531
jic-dtool/dtool-info
dtool_info/dataset.py
properties
def properties(dataset_uri, item_identifier): """Report item properties.""" dataset = dtoolcore.DataSet.from_uri(dataset_uri) try: props = dataset.item_properties(item_identifier) except KeyError: click.secho( "No such item in dataset: {}".format(item_identifier), fg="red", err=True ) sys.exit(20) json_lines = [ '{', ' "relpath": "{}",'.format(props["relpath"]), ' "size_in_bytes": {},'.format(props["size_in_bytes"]), ' "utc_timestamp": {},'.format(props["utc_timestamp"]), ' "hash": "{}"'.format(props["hash"]), '}', ] formatted_json = "\n".join(json_lines) colorful_json = pygments.highlight( formatted_json, pygments.lexers.JsonLexer(), pygments.formatters.TerminalFormatter()) click.secho(colorful_json, nl=False)
python
def properties(dataset_uri, item_identifier): """Report item properties.""" dataset = dtoolcore.DataSet.from_uri(dataset_uri) try: props = dataset.item_properties(item_identifier) except KeyError: click.secho( "No such item in dataset: {}".format(item_identifier), fg="red", err=True ) sys.exit(20) json_lines = [ '{', ' "relpath": "{}",'.format(props["relpath"]), ' "size_in_bytes": {},'.format(props["size_in_bytes"]), ' "utc_timestamp": {},'.format(props["utc_timestamp"]), ' "hash": "{}"'.format(props["hash"]), '}', ] formatted_json = "\n".join(json_lines) colorful_json = pygments.highlight( formatted_json, pygments.lexers.JsonLexer(), pygments.formatters.TerminalFormatter()) click.secho(colorful_json, nl=False)
[ "def", "properties", "(", "dataset_uri", ",", "item_identifier", ")", ":", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "dataset_uri", ")", "try", ":", "props", "=", "dataset", ".", "item_properties", "(", "item_identifier", ")", "except", "KeyError", ":", "click", ".", "secho", "(", "\"No such item in dataset: {}\"", ".", "format", "(", "item_identifier", ")", ",", "fg", "=", "\"red\"", ",", "err", "=", "True", ")", "sys", ".", "exit", "(", "20", ")", "json_lines", "=", "[", "'{'", ",", "' \"relpath\": \"{}\",'", ".", "format", "(", "props", "[", "\"relpath\"", "]", ")", ",", "' \"size_in_bytes\": {},'", ".", "format", "(", "props", "[", "\"size_in_bytes\"", "]", ")", ",", "' \"utc_timestamp\": {},'", ".", "format", "(", "props", "[", "\"utc_timestamp\"", "]", ")", ",", "' \"hash\": \"{}\"'", ".", "format", "(", "props", "[", "\"hash\"", "]", ")", ",", "'}'", ",", "]", "formatted_json", "=", "\"\\n\"", ".", "join", "(", "json_lines", ")", "colorful_json", "=", "pygments", ".", "highlight", "(", "formatted_json", ",", "pygments", ".", "lexers", ".", "JsonLexer", "(", ")", ",", "pygments", ".", "formatters", ".", "TerminalFormatter", "(", ")", ")", "click", ".", "secho", "(", "colorful_json", ",", "nl", "=", "False", ")" ]
Report item properties.
[ "Report", "item", "properties", "." ]
3c6c7755f4c142e548bbfdf3b38230612fd4060a
https://github.com/jic-dtool/dtool-info/blob/3c6c7755f4c142e548bbfdf3b38230612fd4060a/dtool_info/dataset.py#L244-L270
243,532
jic-dtool/dtool-info
dtool_info/dataset.py
relpath
def relpath(dataset_uri, item_identifier): """Return relpath associated with the item. """ dataset = dtoolcore.DataSet.from_uri(dataset_uri) try: props = dataset.item_properties(item_identifier) except KeyError: click.secho( "No such item in dataset: {}".format(item_identifier), fg="red", err=True ) sys.exit(21) click.secho(props["relpath"])
python
def relpath(dataset_uri, item_identifier): """Return relpath associated with the item. """ dataset = dtoolcore.DataSet.from_uri(dataset_uri) try: props = dataset.item_properties(item_identifier) except KeyError: click.secho( "No such item in dataset: {}".format(item_identifier), fg="red", err=True ) sys.exit(21) click.secho(props["relpath"])
[ "def", "relpath", "(", "dataset_uri", ",", "item_identifier", ")", ":", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "dataset_uri", ")", "try", ":", "props", "=", "dataset", ".", "item_properties", "(", "item_identifier", ")", "except", "KeyError", ":", "click", ".", "secho", "(", "\"No such item in dataset: {}\"", ".", "format", "(", "item_identifier", ")", ",", "fg", "=", "\"red\"", ",", "err", "=", "True", ")", "sys", ".", "exit", "(", "21", ")", "click", ".", "secho", "(", "props", "[", "\"relpath\"", "]", ")" ]
Return relpath associated with the item.
[ "Return", "relpath", "associated", "with", "the", "item", "." ]
3c6c7755f4c142e548bbfdf3b38230612fd4060a
https://github.com/jic-dtool/dtool-info/blob/3c6c7755f4c142e548bbfdf3b38230612fd4060a/dtool_info/dataset.py#L318-L331
243,533
jic-dtool/dtool-info
dtool_info/dataset.py
verify
def verify(full, dataset_uri): """Verify the integrity of a dataset. """ dataset = dtoolcore.DataSet.from_uri(dataset_uri) all_okay = True generated_manifest = dataset.generate_manifest() generated_identifiers = set(generated_manifest["items"].keys()) manifest_identifiers = set(dataset.identifiers) for i in generated_identifiers.difference(manifest_identifiers): message = "Unknown item: {} {}".format( i, generated_manifest["items"][i]["relpath"] ) click.secho(message, fg="red") all_okay = False for i in manifest_identifiers.difference(generated_identifiers): message = "Missing item: {} {}".format( i, dataset.item_properties(i)["relpath"] ) click.secho(message, fg="red") all_okay = False for i in manifest_identifiers.intersection(generated_identifiers): generated_hash = generated_manifest["items"][i]["size_in_bytes"] manifest_hash = dataset.item_properties(i)["size_in_bytes"] if generated_hash != manifest_hash: message = "Altered item size: {} {}".format( i, dataset.item_properties(i)["relpath"] ) click.secho(message, fg="red") all_okay = False if full: for i in manifest_identifiers.intersection(generated_identifiers): generated_hash = generated_manifest["items"][i]["hash"] manifest_hash = dataset.item_properties(i)["hash"] if generated_hash != manifest_hash: message = "Altered item hash: {} {}".format( i, dataset.item_properties(i)["relpath"] ) click.secho(message, fg="red") all_okay = False if not all_okay: sys.exit(1) else: click.secho("All good :)", fg="green")
python
def verify(full, dataset_uri): """Verify the integrity of a dataset. """ dataset = dtoolcore.DataSet.from_uri(dataset_uri) all_okay = True generated_manifest = dataset.generate_manifest() generated_identifiers = set(generated_manifest["items"].keys()) manifest_identifiers = set(dataset.identifiers) for i in generated_identifiers.difference(manifest_identifiers): message = "Unknown item: {} {}".format( i, generated_manifest["items"][i]["relpath"] ) click.secho(message, fg="red") all_okay = False for i in manifest_identifiers.difference(generated_identifiers): message = "Missing item: {} {}".format( i, dataset.item_properties(i)["relpath"] ) click.secho(message, fg="red") all_okay = False for i in manifest_identifiers.intersection(generated_identifiers): generated_hash = generated_manifest["items"][i]["size_in_bytes"] manifest_hash = dataset.item_properties(i)["size_in_bytes"] if generated_hash != manifest_hash: message = "Altered item size: {} {}".format( i, dataset.item_properties(i)["relpath"] ) click.secho(message, fg="red") all_okay = False if full: for i in manifest_identifiers.intersection(generated_identifiers): generated_hash = generated_manifest["items"][i]["hash"] manifest_hash = dataset.item_properties(i)["hash"] if generated_hash != manifest_hash: message = "Altered item hash: {} {}".format( i, dataset.item_properties(i)["relpath"] ) click.secho(message, fg="red") all_okay = False if not all_okay: sys.exit(1) else: click.secho("All good :)", fg="green")
[ "def", "verify", "(", "full", ",", "dataset_uri", ")", ":", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "dataset_uri", ")", "all_okay", "=", "True", "generated_manifest", "=", "dataset", ".", "generate_manifest", "(", ")", "generated_identifiers", "=", "set", "(", "generated_manifest", "[", "\"items\"", "]", ".", "keys", "(", ")", ")", "manifest_identifiers", "=", "set", "(", "dataset", ".", "identifiers", ")", "for", "i", "in", "generated_identifiers", ".", "difference", "(", "manifest_identifiers", ")", ":", "message", "=", "\"Unknown item: {} {}\"", ".", "format", "(", "i", ",", "generated_manifest", "[", "\"items\"", "]", "[", "i", "]", "[", "\"relpath\"", "]", ")", "click", ".", "secho", "(", "message", ",", "fg", "=", "\"red\"", ")", "all_okay", "=", "False", "for", "i", "in", "manifest_identifiers", ".", "difference", "(", "generated_identifiers", ")", ":", "message", "=", "\"Missing item: {} {}\"", ".", "format", "(", "i", ",", "dataset", ".", "item_properties", "(", "i", ")", "[", "\"relpath\"", "]", ")", "click", ".", "secho", "(", "message", ",", "fg", "=", "\"red\"", ")", "all_okay", "=", "False", "for", "i", "in", "manifest_identifiers", ".", "intersection", "(", "generated_identifiers", ")", ":", "generated_hash", "=", "generated_manifest", "[", "\"items\"", "]", "[", "i", "]", "[", "\"size_in_bytes\"", "]", "manifest_hash", "=", "dataset", ".", "item_properties", "(", "i", ")", "[", "\"size_in_bytes\"", "]", "if", "generated_hash", "!=", "manifest_hash", ":", "message", "=", "\"Altered item size: {} {}\"", ".", "format", "(", "i", ",", "dataset", ".", "item_properties", "(", "i", ")", "[", "\"relpath\"", "]", ")", "click", ".", "secho", "(", "message", ",", "fg", "=", "\"red\"", ")", "all_okay", "=", "False", "if", "full", ":", "for", "i", "in", "manifest_identifiers", ".", "intersection", "(", "generated_identifiers", ")", ":", "generated_hash", "=", "generated_manifest", "[", "\"items\"", "]", "[", "i", "]", "[", "\"hash\"", "]", "manifest_hash", "=", "dataset", ".", "item_properties", "(", "i", ")", "[", "\"hash\"", "]", "if", "generated_hash", "!=", "manifest_hash", ":", "message", "=", "\"Altered item hash: {} {}\"", ".", "format", "(", "i", ",", "dataset", ".", "item_properties", "(", "i", ")", "[", "\"relpath\"", "]", ")", "click", ".", "secho", "(", "message", ",", "fg", "=", "\"red\"", ")", "all_okay", "=", "False", "if", "not", "all_okay", ":", "sys", ".", "exit", "(", "1", ")", "else", ":", "click", ".", "secho", "(", "\"All good :)\"", ",", "fg", "=", "\"green\"", ")" ]
Verify the integrity of a dataset.
[ "Verify", "the", "integrity", "of", "a", "dataset", "." ]
3c6c7755f4c142e548bbfdf3b38230612fd4060a
https://github.com/jic-dtool/dtool-info/blob/3c6c7755f4c142e548bbfdf3b38230612fd4060a/dtool_info/dataset.py#L342-L394
243,534
jic-dtool/dtool-info
dtool_info/dataset.py
uuid
def uuid(dataset_uri): """Return the UUID of the dataset.""" dataset = dtoolcore.DataSet.from_uri(dataset_uri) click.secho(dataset.uuid)
python
def uuid(dataset_uri): """Return the UUID of the dataset.""" dataset = dtoolcore.DataSet.from_uri(dataset_uri) click.secho(dataset.uuid)
[ "def", "uuid", "(", "dataset_uri", ")", ":", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "dataset_uri", ")", "click", ".", "secho", "(", "dataset", ".", "uuid", ")" ]
Return the UUID of the dataset.
[ "Return", "the", "UUID", "of", "the", "dataset", "." ]
3c6c7755f4c142e548bbfdf3b38230612fd4060a
https://github.com/jic-dtool/dtool-info/blob/3c6c7755f4c142e548bbfdf3b38230612fd4060a/dtool_info/dataset.py#L399-L402
243,535
carlosp420/dataset-creator
dataset_creator/base_dataset.py
DatasetBlock.split_data
def split_data(self): """Splits the list of SeqRecordExpanded objects into lists, which are kept into a bigger list. If the file_format is Nexus, then it is only partitioned by gene. If it is FASTA, then it needs partitioning by codon positions if required. Example: >>> blocks = [ ... [SeqRecord1, SeqRecord2], # for gene 1 ... [SeqRecord1, SeqRecord2], # for gene 2 ... [SeqRecord1, SeqRecord2], # for gene 3 ... [SeqRecord1, SeqRecord2], # for gene 4 ... ] """ this_gene_code = None for seq_record in self.data.seq_records: if this_gene_code is None or this_gene_code != seq_record.gene_code: this_gene_code = seq_record.gene_code self._blocks.append([]) list_length = len(self._blocks) self._blocks[list_length - 1].append(seq_record)
python
def split_data(self): """Splits the list of SeqRecordExpanded objects into lists, which are kept into a bigger list. If the file_format is Nexus, then it is only partitioned by gene. If it is FASTA, then it needs partitioning by codon positions if required. Example: >>> blocks = [ ... [SeqRecord1, SeqRecord2], # for gene 1 ... [SeqRecord1, SeqRecord2], # for gene 2 ... [SeqRecord1, SeqRecord2], # for gene 3 ... [SeqRecord1, SeqRecord2], # for gene 4 ... ] """ this_gene_code = None for seq_record in self.data.seq_records: if this_gene_code is None or this_gene_code != seq_record.gene_code: this_gene_code = seq_record.gene_code self._blocks.append([]) list_length = len(self._blocks) self._blocks[list_length - 1].append(seq_record)
[ "def", "split_data", "(", "self", ")", ":", "this_gene_code", "=", "None", "for", "seq_record", "in", "self", ".", "data", ".", "seq_records", ":", "if", "this_gene_code", "is", "None", "or", "this_gene_code", "!=", "seq_record", ".", "gene_code", ":", "this_gene_code", "=", "seq_record", ".", "gene_code", "self", ".", "_blocks", ".", "append", "(", "[", "]", ")", "list_length", "=", "len", "(", "self", ".", "_blocks", ")", "self", ".", "_blocks", "[", "list_length", "-", "1", "]", ".", "append", "(", "seq_record", ")" ]
Splits the list of SeqRecordExpanded objects into lists, which are kept into a bigger list. If the file_format is Nexus, then it is only partitioned by gene. If it is FASTA, then it needs partitioning by codon positions if required. Example: >>> blocks = [ ... [SeqRecord1, SeqRecord2], # for gene 1 ... [SeqRecord1, SeqRecord2], # for gene 2 ... [SeqRecord1, SeqRecord2], # for gene 3 ... [SeqRecord1, SeqRecord2], # for gene 4 ... ]
[ "Splits", "the", "list", "of", "SeqRecordExpanded", "objects", "into", "lists", "which", "are", "kept", "into", "a", "bigger", "list", "." ]
ea27340b145cb566a36c1836ff42263f1b2003a0
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L61-L84
243,536
carlosp420/dataset-creator
dataset_creator/base_dataset.py
DatasetBlock.convert_to_string
def convert_to_string(self, block): """Makes gene_block as str from list of SeqRecordExpanded objects of a gene_code. Override this function if the dataset block needs to be different due to file format. This block will need to be split further if the dataset is FASTA or TNT and the partitioning scheme is 1st-2nd, 3rd. As the dataset is split into several blocks due to 1st-2nd, 3rd we cannot translate to aminoacids or degenerate the sequences. """ if self.partitioning != '1st-2nd, 3rd': return self.make_datablock_by_gene(block) else: if self.format == 'FASTA': return self.make_datablock_considering_codon_positions_as_fasta_format(block) else: return self.make_datablock_by_gene(block)
python
def convert_to_string(self, block): """Makes gene_block as str from list of SeqRecordExpanded objects of a gene_code. Override this function if the dataset block needs to be different due to file format. This block will need to be split further if the dataset is FASTA or TNT and the partitioning scheme is 1st-2nd, 3rd. As the dataset is split into several blocks due to 1st-2nd, 3rd we cannot translate to aminoacids or degenerate the sequences. """ if self.partitioning != '1st-2nd, 3rd': return self.make_datablock_by_gene(block) else: if self.format == 'FASTA': return self.make_datablock_considering_codon_positions_as_fasta_format(block) else: return self.make_datablock_by_gene(block)
[ "def", "convert_to_string", "(", "self", ",", "block", ")", ":", "if", "self", ".", "partitioning", "!=", "'1st-2nd, 3rd'", ":", "return", "self", ".", "make_datablock_by_gene", "(", "block", ")", "else", ":", "if", "self", ".", "format", "==", "'FASTA'", ":", "return", "self", ".", "make_datablock_considering_codon_positions_as_fasta_format", "(", "block", ")", "else", ":", "return", "self", ".", "make_datablock_by_gene", "(", "block", ")" ]
Makes gene_block as str from list of SeqRecordExpanded objects of a gene_code. Override this function if the dataset block needs to be different due to file format. This block will need to be split further if the dataset is FASTA or TNT and the partitioning scheme is 1st-2nd, 3rd. As the dataset is split into several blocks due to 1st-2nd, 3rd we cannot translate to aminoacids or degenerate the sequences.
[ "Makes", "gene_block", "as", "str", "from", "list", "of", "SeqRecordExpanded", "objects", "of", "a", "gene_code", "." ]
ea27340b145cb566a36c1836ff42263f1b2003a0
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L86-L105
243,537
carlosp420/dataset-creator
dataset_creator/base_dataset.py
DatasetBlock.convert_block_dicts_to_string
def convert_block_dicts_to_string(self, block_1st2nd, block_1st, block_2nd, block_3rd): """Takes into account whether we need to output all codon positions.""" out = "" # We need 1st and 2nd positions if self.codon_positions in ['ALL', '1st-2nd']: for gene_code, seqs in block_1st2nd.items(): out += '>{0}_1st-2nd\n----\n'.format(gene_code) for seq in seqs: out += seq elif self.codon_positions == '1st': for gene_code, seqs in block_1st.items(): out += '>{0}_1st\n----\n'.format(gene_code) for seq in seqs: out += seq elif self.codon_positions == '2nd': for gene_code, seqs in block_2nd.items(): out += '>{0}_2nd\n----\n'.format(gene_code) for seq in seqs: out += seq # We also need 3rd positions if self.codon_positions in ['ALL', '3rd']: for gene_code, seqs in block_3rd.items(): out += '\n>{0}_3rd\n----\n'.format(gene_code) for seq in seqs: out += seq return out
python
def convert_block_dicts_to_string(self, block_1st2nd, block_1st, block_2nd, block_3rd): """Takes into account whether we need to output all codon positions.""" out = "" # We need 1st and 2nd positions if self.codon_positions in ['ALL', '1st-2nd']: for gene_code, seqs in block_1st2nd.items(): out += '>{0}_1st-2nd\n----\n'.format(gene_code) for seq in seqs: out += seq elif self.codon_positions == '1st': for gene_code, seqs in block_1st.items(): out += '>{0}_1st\n----\n'.format(gene_code) for seq in seqs: out += seq elif self.codon_positions == '2nd': for gene_code, seqs in block_2nd.items(): out += '>{0}_2nd\n----\n'.format(gene_code) for seq in seqs: out += seq # We also need 3rd positions if self.codon_positions in ['ALL', '3rd']: for gene_code, seqs in block_3rd.items(): out += '\n>{0}_3rd\n----\n'.format(gene_code) for seq in seqs: out += seq return out
[ "def", "convert_block_dicts_to_string", "(", "self", ",", "block_1st2nd", ",", "block_1st", ",", "block_2nd", ",", "block_3rd", ")", ":", "out", "=", "\"\"", "# We need 1st and 2nd positions", "if", "self", ".", "codon_positions", "in", "[", "'ALL'", ",", "'1st-2nd'", "]", ":", "for", "gene_code", ",", "seqs", "in", "block_1st2nd", ".", "items", "(", ")", ":", "out", "+=", "'>{0}_1st-2nd\\n----\\n'", ".", "format", "(", "gene_code", ")", "for", "seq", "in", "seqs", ":", "out", "+=", "seq", "elif", "self", ".", "codon_positions", "==", "'1st'", ":", "for", "gene_code", ",", "seqs", "in", "block_1st", ".", "items", "(", ")", ":", "out", "+=", "'>{0}_1st\\n----\\n'", ".", "format", "(", "gene_code", ")", "for", "seq", "in", "seqs", ":", "out", "+=", "seq", "elif", "self", ".", "codon_positions", "==", "'2nd'", ":", "for", "gene_code", ",", "seqs", "in", "block_2nd", ".", "items", "(", ")", ":", "out", "+=", "'>{0}_2nd\\n----\\n'", ".", "format", "(", "gene_code", ")", "for", "seq", "in", "seqs", ":", "out", "+=", "seq", "# We also need 3rd positions", "if", "self", ".", "codon_positions", "in", "[", "'ALL'", ",", "'3rd'", "]", ":", "for", "gene_code", ",", "seqs", "in", "block_3rd", ".", "items", "(", ")", ":", "out", "+=", "'\\n>{0}_3rd\\n----\\n'", ".", "format", "(", "gene_code", ")", "for", "seq", "in", "seqs", ":", "out", "+=", "seq", "return", "out" ]
Takes into account whether we need to output all codon positions.
[ "Takes", "into", "account", "whether", "we", "need", "to", "output", "all", "codon", "positions", "." ]
ea27340b145cb566a36c1836ff42263f1b2003a0
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L139-L165
243,538
carlosp420/dataset-creator
dataset_creator/base_dataset.py
DatasetFooter.make_charsets
def make_charsets(self): """ Override this function for Phylip dataset as the content is different and goes into a separate file. """ count_start = 1 out = '' for gene_code, lengths in self.data.gene_codes_and_lengths.items(): count_end = lengths[0] + count_start - 1 out += self.format_charset_line(gene_code, count_start, count_end) count_start = count_end + 1 return out
python
def make_charsets(self): """ Override this function for Phylip dataset as the content is different and goes into a separate file. """ count_start = 1 out = '' for gene_code, lengths in self.data.gene_codes_and_lengths.items(): count_end = lengths[0] + count_start - 1 out += self.format_charset_line(gene_code, count_start, count_end) count_start = count_end + 1 return out
[ "def", "make_charsets", "(", "self", ")", ":", "count_start", "=", "1", "out", "=", "''", "for", "gene_code", ",", "lengths", "in", "self", ".", "data", ".", "gene_codes_and_lengths", ".", "items", "(", ")", ":", "count_end", "=", "lengths", "[", "0", "]", "+", "count_start", "-", "1", "out", "+=", "self", ".", "format_charset_line", "(", "gene_code", ",", "count_start", ",", "count_end", ")", "count_start", "=", "count_end", "+", "1", "return", "out" ]
Override this function for Phylip dataset as the content is different and goes into a separate file.
[ "Override", "this", "function", "for", "Phylip", "dataset", "as", "the", "content", "is", "different", "and", "goes", "into", "a", "separate", "file", "." ]
ea27340b145cb566a36c1836ff42263f1b2003a0
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L308-L319
243,539
carlosp420/dataset-creator
dataset_creator/base_dataset.py
DatasetFooter.make_slash_number
def make_slash_number(self): """ Charset lines have \2 or \3 depending on type of partitioning and codon positions requested for our dataset. :return: """ if self.partitioning == 'by codon position' and self.codon_positions == '1st-2nd': return '\\2' elif self.partitioning in ['by codon position', '1st-2nd, 3rd'] and self.codon_positions in ['ALL', None]: return '\\3' else: return ''
python
def make_slash_number(self): """ Charset lines have \2 or \3 depending on type of partitioning and codon positions requested for our dataset. :return: """ if self.partitioning == 'by codon position' and self.codon_positions == '1st-2nd': return '\\2' elif self.partitioning in ['by codon position', '1st-2nd, 3rd'] and self.codon_positions in ['ALL', None]: return '\\3' else: return ''
[ "def", "make_slash_number", "(", "self", ")", ":", "if", "self", ".", "partitioning", "==", "'by codon position'", "and", "self", ".", "codon_positions", "==", "'1st-2nd'", ":", "return", "'\\\\2'", "elif", "self", ".", "partitioning", "in", "[", "'by codon position'", ",", "'1st-2nd, 3rd'", "]", "and", "self", ".", "codon_positions", "in", "[", "'ALL'", ",", "None", "]", ":", "return", "'\\\\3'", "else", ":", "return", "''" ]
Charset lines have \2 or \3 depending on type of partitioning and codon positions requested for our dataset. :return:
[ "Charset", "lines", "have", "\\", "2", "or", "\\", "3", "depending", "on", "type", "of", "partitioning", "and", "codon", "positions", "requested", "for", "our", "dataset", "." ]
ea27340b145cb566a36c1836ff42263f1b2003a0
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L332-L344
243,540
carlosp420/dataset-creator
dataset_creator/base_dataset.py
DatasetFooter.add_suffixes_to_gene_codes
def add_suffixes_to_gene_codes(self): """Appends pos1, pos2, etc to the gene_code if needed.""" out = [] for gene_code in self.data.gene_codes: for sufix in self.make_gene_code_suffixes(): out.append('{0}{1}'.format(gene_code, sufix)) return out
python
def add_suffixes_to_gene_codes(self): """Appends pos1, pos2, etc to the gene_code if needed.""" out = [] for gene_code in self.data.gene_codes: for sufix in self.make_gene_code_suffixes(): out.append('{0}{1}'.format(gene_code, sufix)) return out
[ "def", "add_suffixes_to_gene_codes", "(", "self", ")", ":", "out", "=", "[", "]", "for", "gene_code", "in", "self", ".", "data", ".", "gene_codes", ":", "for", "sufix", "in", "self", ".", "make_gene_code_suffixes", "(", ")", ":", "out", ".", "append", "(", "'{0}{1}'", ".", "format", "(", "gene_code", ",", "sufix", ")", ")", "return", "out" ]
Appends pos1, pos2, etc to the gene_code if needed.
[ "Appends", "pos1", "pos2", "etc", "to", "the", "gene_code", "if", "needed", "." ]
ea27340b145cb566a36c1836ff42263f1b2003a0
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L385-L391
243,541
carlosp420/dataset-creator
dataset_creator/base_dataset.py
DatasetFooter.get_outgroup
def get_outgroup(self): """Generates the outgroup line from the voucher code specified by the user. """ if self.outgroup is not None: outgroup_taxonomy = '' for i in self.data.seq_records: if self.outgroup == i.voucher_code: outgroup_taxonomy = '{0}_{1}'.format(i.taxonomy['genus'], i.taxonomy['species']) break outgroup = '\noutgroup {0}_{1};'.format(self.outgroup, outgroup_taxonomy) else: outgroup = '' return outgroup
python
def get_outgroup(self): """Generates the outgroup line from the voucher code specified by the user. """ if self.outgroup is not None: outgroup_taxonomy = '' for i in self.data.seq_records: if self.outgroup == i.voucher_code: outgroup_taxonomy = '{0}_{1}'.format(i.taxonomy['genus'], i.taxonomy['species']) break outgroup = '\noutgroup {0}_{1};'.format(self.outgroup, outgroup_taxonomy) else: outgroup = '' return outgroup
[ "def", "get_outgroup", "(", "self", ")", ":", "if", "self", ".", "outgroup", "is", "not", "None", ":", "outgroup_taxonomy", "=", "''", "for", "i", "in", "self", ".", "data", ".", "seq_records", ":", "if", "self", ".", "outgroup", "==", "i", ".", "voucher_code", ":", "outgroup_taxonomy", "=", "'{0}_{1}'", ".", "format", "(", "i", ".", "taxonomy", "[", "'genus'", "]", ",", "i", ".", "taxonomy", "[", "'species'", "]", ")", "break", "outgroup", "=", "'\\noutgroup {0}_{1};'", ".", "format", "(", "self", ".", "outgroup", ",", "outgroup_taxonomy", ")", "else", ":", "outgroup", "=", "''", "return", "outgroup" ]
Generates the outgroup line from the voucher code specified by the user.
[ "Generates", "the", "outgroup", "line", "from", "the", "voucher", "code", "specified", "by", "the", "user", "." ]
ea27340b145cb566a36c1836ff42263f1b2003a0
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/base_dataset.py#L413-L428
243,542
etcher-be/emiz
emiz/mission_time.py
MissionTime.from_string
def from_string(input_str) -> 'MissionTime': # noinspection SpellCheckingInspection """ Creates a MissionTime instance from a string Format: YYYYMMDDHHMMSS Args: input_str: string to parse Returns: MissionTime instance """ match = RE_INPUT_STRING.match(input_str) if not match: raise ValueError(f'badly formatted date/time: {input_str}') return MissionTime( datetime.datetime( int(match.group('year')), int(match.group('month')), int(match.group('day')), int(match.group('hour')), int(match.group('minute')), int(match.group('second')), ) )
python
def from_string(input_str) -> 'MissionTime': # noinspection SpellCheckingInspection """ Creates a MissionTime instance from a string Format: YYYYMMDDHHMMSS Args: input_str: string to parse Returns: MissionTime instance """ match = RE_INPUT_STRING.match(input_str) if not match: raise ValueError(f'badly formatted date/time: {input_str}') return MissionTime( datetime.datetime( int(match.group('year')), int(match.group('month')), int(match.group('day')), int(match.group('hour')), int(match.group('minute')), int(match.group('second')), ) )
[ "def", "from_string", "(", "input_str", ")", "->", "'MissionTime'", ":", "# noinspection SpellCheckingInspection", "match", "=", "RE_INPUT_STRING", ".", "match", "(", "input_str", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "f'badly formatted date/time: {input_str}'", ")", "return", "MissionTime", "(", "datetime", ".", "datetime", "(", "int", "(", "match", ".", "group", "(", "'year'", ")", ")", ",", "int", "(", "match", ".", "group", "(", "'month'", ")", ")", ",", "int", "(", "match", ".", "group", "(", "'day'", ")", ")", ",", "int", "(", "match", ".", "group", "(", "'hour'", ")", ")", ",", "int", "(", "match", ".", "group", "(", "'minute'", ")", ")", ",", "int", "(", "match", ".", "group", "(", "'second'", ")", ")", ",", ")", ")" ]
Creates a MissionTime instance from a string Format: YYYYMMDDHHMMSS Args: input_str: string to parse Returns: MissionTime instance
[ "Creates", "a", "MissionTime", "instance", "from", "a", "string" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/mission_time.py#L51-L77
243,543
dossier/dossier.models
dossier/models/report.py
ReportGenerator.run
def run(self, output): '''Generate the report to the given output. :param output: writable file-like object or file path ''' # Ensure folder exists. if self.folder_id not in self.folders.folders(self.user): print("E: folder not found: %s" % self.folder_name, file=sys.stderr) return # Create workbook. wb = self.workbook = xlsxwriter.Workbook(output) # Create the different styles used by this report generator. self.formats['title'] = wb.add_format({'font_size': '18', 'bold': True}) self.formats['default'] = wb.add_format({'align': 'top'}) self.formats['bold'] = wb.add_format({'bold': True}) self.formats['header'] = wb.add_format({ 'bold': True, 'align': 'center', 'valign': 'top', 'font_size': '14', 'font_color': '#506050', 'bg_color': '#f5f5f5', 'right': 1, 'border_color': 'white'}) self.formats['pre'] = wb.add_format({'font_name': 'Courier', 'valign': 'top'}) self.formats['link'] = wb.add_format({'valign': 'top', 'font_color': 'blue', 'underline': True}) self.formats['type_text'] = wb.add_format({ 'font_color': '#BF8645', 'valign': 'top', 'align': 'center'}) self.formats['type_image'] = wb.add_format({ 'font_color': '#84BF45', 'valign': 'top', 'align': 'center'}) # Generate report for a specific subfolder or *all* subfolders of # self.folder . if self.subfolder_id is None: self._generate_report_all() else: self._generate_report_single(self.subfolder_id) # done and outta here self.workbook.close()
python
def run(self, output): '''Generate the report to the given output. :param output: writable file-like object or file path ''' # Ensure folder exists. if self.folder_id not in self.folders.folders(self.user): print("E: folder not found: %s" % self.folder_name, file=sys.stderr) return # Create workbook. wb = self.workbook = xlsxwriter.Workbook(output) # Create the different styles used by this report generator. self.formats['title'] = wb.add_format({'font_size': '18', 'bold': True}) self.formats['default'] = wb.add_format({'align': 'top'}) self.formats['bold'] = wb.add_format({'bold': True}) self.formats['header'] = wb.add_format({ 'bold': True, 'align': 'center', 'valign': 'top', 'font_size': '14', 'font_color': '#506050', 'bg_color': '#f5f5f5', 'right': 1, 'border_color': 'white'}) self.formats['pre'] = wb.add_format({'font_name': 'Courier', 'valign': 'top'}) self.formats['link'] = wb.add_format({'valign': 'top', 'font_color': 'blue', 'underline': True}) self.formats['type_text'] = wb.add_format({ 'font_color': '#BF8645', 'valign': 'top', 'align': 'center'}) self.formats['type_image'] = wb.add_format({ 'font_color': '#84BF45', 'valign': 'top', 'align': 'center'}) # Generate report for a specific subfolder or *all* subfolders of # self.folder . if self.subfolder_id is None: self._generate_report_all() else: self._generate_report_single(self.subfolder_id) # done and outta here self.workbook.close()
[ "def", "run", "(", "self", ",", "output", ")", ":", "# Ensure folder exists.", "if", "self", ".", "folder_id", "not", "in", "self", ".", "folders", ".", "folders", "(", "self", ".", "user", ")", ":", "print", "(", "\"E: folder not found: %s\"", "%", "self", ".", "folder_name", ",", "file", "=", "sys", ".", "stderr", ")", "return", "# Create workbook.", "wb", "=", "self", ".", "workbook", "=", "xlsxwriter", ".", "Workbook", "(", "output", ")", "# Create the different styles used by this report generator.", "self", ".", "formats", "[", "'title'", "]", "=", "wb", ".", "add_format", "(", "{", "'font_size'", ":", "'18'", ",", "'bold'", ":", "True", "}", ")", "self", ".", "formats", "[", "'default'", "]", "=", "wb", ".", "add_format", "(", "{", "'align'", ":", "'top'", "}", ")", "self", ".", "formats", "[", "'bold'", "]", "=", "wb", ".", "add_format", "(", "{", "'bold'", ":", "True", "}", ")", "self", ".", "formats", "[", "'header'", "]", "=", "wb", ".", "add_format", "(", "{", "'bold'", ":", "True", ",", "'align'", ":", "'center'", ",", "'valign'", ":", "'top'", ",", "'font_size'", ":", "'14'", ",", "'font_color'", ":", "'#506050'", ",", "'bg_color'", ":", "'#f5f5f5'", ",", "'right'", ":", "1", ",", "'border_color'", ":", "'white'", "}", ")", "self", ".", "formats", "[", "'pre'", "]", "=", "wb", ".", "add_format", "(", "{", "'font_name'", ":", "'Courier'", ",", "'valign'", ":", "'top'", "}", ")", "self", ".", "formats", "[", "'link'", "]", "=", "wb", ".", "add_format", "(", "{", "'valign'", ":", "'top'", ",", "'font_color'", ":", "'blue'", ",", "'underline'", ":", "True", "}", ")", "self", ".", "formats", "[", "'type_text'", "]", "=", "wb", ".", "add_format", "(", "{", "'font_color'", ":", "'#BF8645'", ",", "'valign'", ":", "'top'", ",", "'align'", ":", "'center'", "}", ")", "self", ".", "formats", "[", "'type_image'", "]", "=", "wb", ".", "add_format", "(", "{", "'font_color'", ":", "'#84BF45'", ",", "'valign'", ":", "'top'", ",", "'align'", ":", "'center'", "}", ")", "# Generate report for a specific subfolder or *all* subfolders of", "# self.folder .", "if", "self", ".", "subfolder_id", "is", "None", ":", "self", ".", "_generate_report_all", "(", ")", "else", ":", "self", ".", "_generate_report_single", "(", "self", ".", "subfolder_id", ")", "# done and outta here", "self", ".", "workbook", ".", "close", "(", ")" ]
Generate the report to the given output. :param output: writable file-like object or file path
[ "Generate", "the", "report", "to", "the", "given", "output", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/report.py#L84-L140
243,544
dossier/dossier.models
dossier/models/report.py
ReportGenerator._generate_report_all
def _generate_report_all(self): '''Generate report for all subfolders contained by self.folder_id.''' assert self.workbook is not None count = 0 # Do all subfolders for sid in self.folders.subfolders(self.folder_id, self.user): count += 1 self._generate_for_subfolder(sid) if count == 0: print("I: empty workbook created: no subfolders found")
python
def _generate_report_all(self): '''Generate report for all subfolders contained by self.folder_id.''' assert self.workbook is not None count = 0 # Do all subfolders for sid in self.folders.subfolders(self.folder_id, self.user): count += 1 self._generate_for_subfolder(sid) if count == 0: print("I: empty workbook created: no subfolders found")
[ "def", "_generate_report_all", "(", "self", ")", ":", "assert", "self", ".", "workbook", "is", "not", "None", "count", "=", "0", "# Do all subfolders", "for", "sid", "in", "self", ".", "folders", ".", "subfolders", "(", "self", ".", "folder_id", ",", "self", ".", "user", ")", ":", "count", "+=", "1", "self", ".", "_generate_for_subfolder", "(", "sid", ")", "if", "count", "==", "0", ":", "print", "(", "\"I: empty workbook created: no subfolders found\"", ")" ]
Generate report for all subfolders contained by self.folder_id.
[ "Generate", "report", "for", "all", "subfolders", "contained", "by", "self", ".", "folder_id", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/report.py#L142-L153
243,545
dossier/dossier.models
dossier/models/report.py
ReportGenerator._generate_report_single
def _generate_report_single(self, sid): '''Generate report for subfolder given by sid . The main purpose of this method is to make sure the subfolder given by sid does indeed exist. All real work is delegated to _generate_for_subfolder. :param sid: The subfolder id Private method. ''' assert self.workbook is not None assert sid is not None # Ensure subfolder exists if not sid in self.folders.subfolders(self.folder_id, self.user): subfolder = Folders.id_to_name(sid) print("E: subfolder not found: %s" % subfolder, file=sys.stderr) return self._generate_for_subfolder(sid)
python
def _generate_report_single(self, sid): '''Generate report for subfolder given by sid . The main purpose of this method is to make sure the subfolder given by sid does indeed exist. All real work is delegated to _generate_for_subfolder. :param sid: The subfolder id Private method. ''' assert self.workbook is not None assert sid is not None # Ensure subfolder exists if not sid in self.folders.subfolders(self.folder_id, self.user): subfolder = Folders.id_to_name(sid) print("E: subfolder not found: %s" % subfolder, file=sys.stderr) return self._generate_for_subfolder(sid)
[ "def", "_generate_report_single", "(", "self", ",", "sid", ")", ":", "assert", "self", ".", "workbook", "is", "not", "None", "assert", "sid", "is", "not", "None", "# Ensure subfolder exists", "if", "not", "sid", "in", "self", ".", "folders", ".", "subfolders", "(", "self", ".", "folder_id", ",", "self", ".", "user", ")", ":", "subfolder", "=", "Folders", ".", "id_to_name", "(", "sid", ")", "print", "(", "\"E: subfolder not found: %s\"", "%", "subfolder", ",", "file", "=", "sys", ".", "stderr", ")", "return", "self", ".", "_generate_for_subfolder", "(", "sid", ")" ]
Generate report for subfolder given by sid . The main purpose of this method is to make sure the subfolder given by sid does indeed exist. All real work is delegated to _generate_for_subfolder. :param sid: The subfolder id Private method.
[ "Generate", "report", "for", "subfolder", "given", "by", "sid", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/report.py#L155-L175
243,546
dossier/dossier.models
dossier/models/report.py
ReportGenerator._generate_for_subfolder
def _generate_for_subfolder(self, sid): ''' Generate report for a subfolder. :param sid: The subfolder id; assumed valid ''' # TODO: the following assumes subfolder names can be constructed from a # subfolder id, which might not be the case in the future. name = self._sanitise_sheetname(uni(Folders.id_to_name(sid))) ws = self.workbook.add_worksheet(name) fmt = self.formats ws.write("A1", "Dossier report", fmt['title']) ws.write("A2", "%s | %s" % (uni(self.folder_name), name)) # Column dimensions ws.set_column('A:A', 37) ws.set_column('B:B', 37) ws.set_column('C:C', 37) ws.set_column('D:D', 8) ws.set_column('E:E', 30) ws.set_column('F:F', 37) # Header ws.write("A4", "Id", fmt['header']) ws.write("B4", "URL", fmt['header']) ws.write("C4", "Subtopic Id", fmt['header']) ws.write("D4", "Type", fmt['header']) ws.write("E4", "Content", fmt['header']) ws.write("F4", "Image URL", fmt['header']) # TODO: we probably want to wrap the following in a try-catch block, in # case the call to folders.subtopics fails. row = 4 for i in subtopics(self.store, self.folders, self.folder_id, sid, self.user): Item.construct(self, i).generate_to(ws, row) row += 1
python
def _generate_for_subfolder(self, sid): ''' Generate report for a subfolder. :param sid: The subfolder id; assumed valid ''' # TODO: the following assumes subfolder names can be constructed from a # subfolder id, which might not be the case in the future. name = self._sanitise_sheetname(uni(Folders.id_to_name(sid))) ws = self.workbook.add_worksheet(name) fmt = self.formats ws.write("A1", "Dossier report", fmt['title']) ws.write("A2", "%s | %s" % (uni(self.folder_name), name)) # Column dimensions ws.set_column('A:A', 37) ws.set_column('B:B', 37) ws.set_column('C:C', 37) ws.set_column('D:D', 8) ws.set_column('E:E', 30) ws.set_column('F:F', 37) # Header ws.write("A4", "Id", fmt['header']) ws.write("B4", "URL", fmt['header']) ws.write("C4", "Subtopic Id", fmt['header']) ws.write("D4", "Type", fmt['header']) ws.write("E4", "Content", fmt['header']) ws.write("F4", "Image URL", fmt['header']) # TODO: we probably want to wrap the following in a try-catch block, in # case the call to folders.subtopics fails. row = 4 for i in subtopics(self.store, self.folders, self.folder_id, sid, self.user): Item.construct(self, i).generate_to(ws, row) row += 1
[ "def", "_generate_for_subfolder", "(", "self", ",", "sid", ")", ":", "# TODO: the following assumes subfolder names can be constructed from a", "# subfolder id, which might not be the case in the future.", "name", "=", "self", ".", "_sanitise_sheetname", "(", "uni", "(", "Folders", ".", "id_to_name", "(", "sid", ")", ")", ")", "ws", "=", "self", ".", "workbook", ".", "add_worksheet", "(", "name", ")", "fmt", "=", "self", ".", "formats", "ws", ".", "write", "(", "\"A1\"", ",", "\"Dossier report\"", ",", "fmt", "[", "'title'", "]", ")", "ws", ".", "write", "(", "\"A2\"", ",", "\"%s | %s\"", "%", "(", "uni", "(", "self", ".", "folder_name", ")", ",", "name", ")", ")", "# Column dimensions", "ws", ".", "set_column", "(", "'A:A'", ",", "37", ")", "ws", ".", "set_column", "(", "'B:B'", ",", "37", ")", "ws", ".", "set_column", "(", "'C:C'", ",", "37", ")", "ws", ".", "set_column", "(", "'D:D'", ",", "8", ")", "ws", ".", "set_column", "(", "'E:E'", ",", "30", ")", "ws", ".", "set_column", "(", "'F:F'", ",", "37", ")", "# Header", "ws", ".", "write", "(", "\"A4\"", ",", "\"Id\"", ",", "fmt", "[", "'header'", "]", ")", "ws", ".", "write", "(", "\"B4\"", ",", "\"URL\"", ",", "fmt", "[", "'header'", "]", ")", "ws", ".", "write", "(", "\"C4\"", ",", "\"Subtopic Id\"", ",", "fmt", "[", "'header'", "]", ")", "ws", ".", "write", "(", "\"D4\"", ",", "\"Type\"", ",", "fmt", "[", "'header'", "]", ")", "ws", ".", "write", "(", "\"E4\"", ",", "\"Content\"", ",", "fmt", "[", "'header'", "]", ")", "ws", ".", "write", "(", "\"F4\"", ",", "\"Image URL\"", ",", "fmt", "[", "'header'", "]", ")", "# TODO: we probably want to wrap the following in a try-catch block, in", "# case the call to folders.subtopics fails.", "row", "=", "4", "for", "i", "in", "subtopics", "(", "self", ".", "store", ",", "self", ".", "folders", ",", "self", ".", "folder_id", ",", "sid", ",", "self", ".", "user", ")", ":", "Item", ".", "construct", "(", "self", ",", "i", ")", ".", "generate_to", "(", "ws", ",", "row", ")", "row", "+=", "1" ]
Generate report for a subfolder. :param sid: The subfolder id; assumed valid
[ "Generate", "report", "for", "a", "subfolder", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/report.py#L177-L211
243,547
dossier/dossier.models
dossier/models/report.py
Item.construct
def construct(generator, subtopic): '''Method constructor of Item-derived classes. Given a subtopic tuple, this method attempts to construct an Item-derived class, currently either ItemText or ItemImage, from the subtopic's type, found in its 4th element. :param generator: Reference to the owning ReportGenerator instance :param subtopic: Tuple containing content_id, meta_url, subtopic_id, type and type-specific data. :returns An instantiated Item-derived class. ''' type = subtopic[3] if type not in Item.constructors: raise LookupError(type) # perhaps customise this exception? return Item.constructors[type](generator, subtopic)
python
def construct(generator, subtopic): '''Method constructor of Item-derived classes. Given a subtopic tuple, this method attempts to construct an Item-derived class, currently either ItemText or ItemImage, from the subtopic's type, found in its 4th element. :param generator: Reference to the owning ReportGenerator instance :param subtopic: Tuple containing content_id, meta_url, subtopic_id, type and type-specific data. :returns An instantiated Item-derived class. ''' type = subtopic[3] if type not in Item.constructors: raise LookupError(type) # perhaps customise this exception? return Item.constructors[type](generator, subtopic)
[ "def", "construct", "(", "generator", ",", "subtopic", ")", ":", "type", "=", "subtopic", "[", "3", "]", "if", "type", "not", "in", "Item", ".", "constructors", ":", "raise", "LookupError", "(", "type", ")", "# perhaps customise this exception?", "return", "Item", ".", "constructors", "[", "type", "]", "(", "generator", ",", "subtopic", ")" ]
Method constructor of Item-derived classes. Given a subtopic tuple, this method attempts to construct an Item-derived class, currently either ItemText or ItemImage, from the subtopic's type, found in its 4th element. :param generator: Reference to the owning ReportGenerator instance :param subtopic: Tuple containing content_id, meta_url, subtopic_id, type and type-specific data. :returns An instantiated Item-derived class.
[ "Method", "constructor", "of", "Item", "-", "derived", "classes", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/report.py#L229-L247
243,548
dossier/dossier.models
dossier/models/report.py
ItemImage.resize_image
def resize_image(self, data): '''Resize image if height over 50 pixels and convert to JPEG. Given a ByteIO or StringIO data input, this method ensures that the image is not over 50 pixels high. If it is over 50 pixels high, the image is resized to precisely 50 pixels in height and the width is adjusted accordingly in keeping with the width/height ratio. The image is always converted to JPEG to minimize any potentials issues while embedding the image in the Excel workbook. :param data: ByteIO or StringIO stream containing image data :returns Reference to a BytesIO instance containing resized image data. ''' image = Image.open(data) stream_out = BytesIO() width, height = image.size[:] if height > 50: width = int(width * 50 / height) height = 50 image = image.resize((width, 50)) image.save(stream_out, format="JPEG", quality=100) stream_out.seek(0) return stream_out
python
def resize_image(self, data): '''Resize image if height over 50 pixels and convert to JPEG. Given a ByteIO or StringIO data input, this method ensures that the image is not over 50 pixels high. If it is over 50 pixels high, the image is resized to precisely 50 pixels in height and the width is adjusted accordingly in keeping with the width/height ratio. The image is always converted to JPEG to minimize any potentials issues while embedding the image in the Excel workbook. :param data: ByteIO or StringIO stream containing image data :returns Reference to a BytesIO instance containing resized image data. ''' image = Image.open(data) stream_out = BytesIO() width, height = image.size[:] if height > 50: width = int(width * 50 / height) height = 50 image = image.resize((width, 50)) image.save(stream_out, format="JPEG", quality=100) stream_out.seek(0) return stream_out
[ "def", "resize_image", "(", "self", ",", "data", ")", ":", "image", "=", "Image", ".", "open", "(", "data", ")", "stream_out", "=", "BytesIO", "(", ")", "width", ",", "height", "=", "image", ".", "size", "[", ":", "]", "if", "height", ">", "50", ":", "width", "=", "int", "(", "width", "*", "50", "/", "height", ")", "height", "=", "50", "image", "=", "image", ".", "resize", "(", "(", "width", ",", "50", ")", ")", "image", ".", "save", "(", "stream_out", ",", "format", "=", "\"JPEG\"", ",", "quality", "=", "100", ")", "stream_out", ".", "seek", "(", "0", ")", "return", "stream_out" ]
Resize image if height over 50 pixels and convert to JPEG. Given a ByteIO or StringIO data input, this method ensures that the image is not over 50 pixels high. If it is over 50 pixels high, the image is resized to precisely 50 pixels in height and the width is adjusted accordingly in keeping with the width/height ratio. The image is always converted to JPEG to minimize any potentials issues while embedding the image in the Excel workbook. :param data: ByteIO or StringIO stream containing image data :returns Reference to a BytesIO instance containing resized image data.
[ "Resize", "image", "if", "height", "over", "50", "pixels", "and", "convert", "to", "JPEG", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/report.py#L316-L341
243,549
bfontaine/p7magma
magma/courses.py
Course.desc
def desc(self): """ A textual description of this course """ if 'ects' in self: fmt = '%s (%s, S%d) [%s, %.2f ECTS]' fields = ('title', 'code', 'semester', 'status', 'ects') else: fmt = '%s' fields = ('title',) s = fmt % tuple([self[f] for f in fields]) if self['followed'] and self['session']: res = self['result'] if self.get('jury', 0) > 0: res = self['jury'] s += ' --> %.2f/20 (%s)' % (res, self['session']) return s
python
def desc(self): """ A textual description of this course """ if 'ects' in self: fmt = '%s (%s, S%d) [%s, %.2f ECTS]' fields = ('title', 'code', 'semester', 'status', 'ects') else: fmt = '%s' fields = ('title',) s = fmt % tuple([self[f] for f in fields]) if self['followed'] and self['session']: res = self['result'] if self.get('jury', 0) > 0: res = self['jury'] s += ' --> %.2f/20 (%s)' % (res, self['session']) return s
[ "def", "desc", "(", "self", ")", ":", "if", "'ects'", "in", "self", ":", "fmt", "=", "'%s (%s, S%d) [%s, %.2f ECTS]'", "fields", "=", "(", "'title'", ",", "'code'", ",", "'semester'", ",", "'status'", ",", "'ects'", ")", "else", ":", "fmt", "=", "'%s'", "fields", "=", "(", "'title'", ",", ")", "s", "=", "fmt", "%", "tuple", "(", "[", "self", "[", "f", "]", "for", "f", "in", "fields", "]", ")", "if", "self", "[", "'followed'", "]", "and", "self", "[", "'session'", "]", ":", "res", "=", "self", "[", "'result'", "]", "if", "self", ".", "get", "(", "'jury'", ",", "0", ")", ">", "0", ":", "res", "=", "self", "[", "'jury'", "]", "s", "+=", "' --> %.2f/20 (%s)'", "%", "(", "res", ",", "self", "[", "'session'", "]", ")", "return", "s" ]
A textual description of this course
[ "A", "textual", "description", "of", "this", "course" ]
713647aa9e3187c93c2577ef812f33ec42ae5494
https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/courses.py#L28-L48
243,550
bfontaine/p7magma
magma/courses.py
CoursesList._populate
def _populate(self, soup): """ Populate the list, assuming ``soup`` is a ``BeautifulSoup`` object. """ tables = soup.select('table[rules=all]') if not tables: return trs = tables[0].select('tr')[1:] if len(trs[0]) == 5: # M1 self._populate_small_table(trs) else: # M2 self._populate_large_table(trs)
python
def _populate(self, soup): """ Populate the list, assuming ``soup`` is a ``BeautifulSoup`` object. """ tables = soup.select('table[rules=all]') if not tables: return trs = tables[0].select('tr')[1:] if len(trs[0]) == 5: # M1 self._populate_small_table(trs) else: # M2 self._populate_large_table(trs)
[ "def", "_populate", "(", "self", ",", "soup", ")", ":", "tables", "=", "soup", ".", "select", "(", "'table[rules=all]'", ")", "if", "not", "tables", ":", "return", "trs", "=", "tables", "[", "0", "]", ".", "select", "(", "'tr'", ")", "[", "1", ":", "]", "if", "len", "(", "trs", "[", "0", "]", ")", "==", "5", ":", "# M1", "self", ".", "_populate_small_table", "(", "trs", ")", "else", ":", "# M2", "self", ".", "_populate_large_table", "(", "trs", ")" ]
Populate the list, assuming ``soup`` is a ``BeautifulSoup`` object.
[ "Populate", "the", "list", "assuming", "soup", "is", "a", "BeautifulSoup", "object", "." ]
713647aa9e3187c93c2577ef812f33ec42ae5494
https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/courses.py#L70-L84
243,551
pip-services3-python/pip-services3-components-python
pip_services3_components/connect/ConnectionParams.py
ConnectionParams.get_host
def get_host(self): """ Gets the host name or IP address. :return: the host name or IP address. """ host = self.get_as_nullable_string("host") host = host if host != None else self.get_as_nullable_string("ip") return host
python
def get_host(self): """ Gets the host name or IP address. :return: the host name or IP address. """ host = self.get_as_nullable_string("host") host = host if host != None else self.get_as_nullable_string("ip") return host
[ "def", "get_host", "(", "self", ")", ":", "host", "=", "self", ".", "get_as_nullable_string", "(", "\"host\"", ")", "host", "=", "host", "if", "host", "!=", "None", "else", "self", ".", "get_as_nullable_string", "(", "\"ip\"", ")", "return", "host" ]
Gets the host name or IP address. :return: the host name or IP address.
[ "Gets", "the", "host", "name", "or", "IP", "address", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/connect/ConnectionParams.py#L103-L111
243,552
etcher-be/emiz
emiz/weather/awc/awc.py
AWC.query_icao
def query_icao(icao: str): """ Queries AWC for the METAR of a given station Args: icao: station ID as a four letters-digits ICAO code Returns: AWC result for the station """ params = { 'dataSource': 'metars', 'requestType': 'retrieve', 'format': 'csv', 'hoursBeforeNow': 24, } AWC._validate_icao(icao) params['stationString'] = icao try: return AWC._query(params) except RequestsConnectionError: raise AWCRequestFailed('failed to obtain requested data from AWC')
python
def query_icao(icao: str): """ Queries AWC for the METAR of a given station Args: icao: station ID as a four letters-digits ICAO code Returns: AWC result for the station """ params = { 'dataSource': 'metars', 'requestType': 'retrieve', 'format': 'csv', 'hoursBeforeNow': 24, } AWC._validate_icao(icao) params['stationString'] = icao try: return AWC._query(params) except RequestsConnectionError: raise AWCRequestFailed('failed to obtain requested data from AWC')
[ "def", "query_icao", "(", "icao", ":", "str", ")", ":", "params", "=", "{", "'dataSource'", ":", "'metars'", ",", "'requestType'", ":", "'retrieve'", ",", "'format'", ":", "'csv'", ",", "'hoursBeforeNow'", ":", "24", ",", "}", "AWC", ".", "_validate_icao", "(", "icao", ")", "params", "[", "'stationString'", "]", "=", "icao", "try", ":", "return", "AWC", ".", "_query", "(", "params", ")", "except", "RequestsConnectionError", ":", "raise", "AWCRequestFailed", "(", "'failed to obtain requested data from AWC'", ")" ]
Queries AWC for the METAR of a given station Args: icao: station ID as a four letters-digits ICAO code Returns: AWC result for the station
[ "Queries", "AWC", "for", "the", "METAR", "of", "a", "given", "station" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/awc/awc.py#L48-L69
243,553
canonical-web-and-design/asset-mapper
ubuntudesign/__init__.py
AssetMapper.create
def create(self, asset_content, friendly_name, tags='', optimize=False): """ Create an asset on the server You must provide the asset with a friendly name for the server to generate a path from. """ return self._create_asset({ 'asset': b64encode(asset_content), 'friendly-name': friendly_name, 'tags': tags, 'optimize': optimize, 'type': 'base64' })
python
def create(self, asset_content, friendly_name, tags='', optimize=False): """ Create an asset on the server You must provide the asset with a friendly name for the server to generate a path from. """ return self._create_asset({ 'asset': b64encode(asset_content), 'friendly-name': friendly_name, 'tags': tags, 'optimize': optimize, 'type': 'base64' })
[ "def", "create", "(", "self", ",", "asset_content", ",", "friendly_name", ",", "tags", "=", "''", ",", "optimize", "=", "False", ")", ":", "return", "self", ".", "_create_asset", "(", "{", "'asset'", ":", "b64encode", "(", "asset_content", ")", ",", "'friendly-name'", ":", "friendly_name", ",", "'tags'", ":", "tags", ",", "'optimize'", ":", "optimize", ",", "'type'", ":", "'base64'", "}", ")" ]
Create an asset on the server You must provide the asset with a friendly name for the server to generate a path from.
[ "Create", "an", "asset", "on", "the", "server", "You", "must", "provide", "the", "asset", "with", "a", "friendly", "name", "for", "the", "server", "to", "generate", "a", "path", "from", "." ]
5dc438a9cb3290cf54c5607913be02ed9d58b02d
https://github.com/canonical-web-and-design/asset-mapper/blob/5dc438a9cb3290cf54c5607913be02ed9d58b02d/ubuntudesign/__init__.py#L64-L77
243,554
canonical-web-and-design/asset-mapper
ubuntudesign/__init__.py
AssetMapper.create_at_path
def create_at_path(self, asset_content, url_path, tags=''): """ Create asset at a specific URL path on the server """ return self._create_asset({ 'asset': b64encode(asset_content), 'url-path': url_path, 'tags': tags, 'type': 'base64' })
python
def create_at_path(self, asset_content, url_path, tags=''): """ Create asset at a specific URL path on the server """ return self._create_asset({ 'asset': b64encode(asset_content), 'url-path': url_path, 'tags': tags, 'type': 'base64' })
[ "def", "create_at_path", "(", "self", ",", "asset_content", ",", "url_path", ",", "tags", "=", "''", ")", ":", "return", "self", ".", "_create_asset", "(", "{", "'asset'", ":", "b64encode", "(", "asset_content", ")", ",", "'url-path'", ":", "url_path", ",", "'tags'", ":", "tags", ",", "'type'", ":", "'base64'", "}", ")" ]
Create asset at a specific URL path on the server
[ "Create", "asset", "at", "a", "specific", "URL", "path", "on", "the", "server" ]
5dc438a9cb3290cf54c5607913be02ed9d58b02d
https://github.com/canonical-web-and-design/asset-mapper/blob/5dc438a9cb3290cf54c5607913be02ed9d58b02d/ubuntudesign/__init__.py#L79-L89
243,555
lwcook/horsetail-matching
horsetailmatching/hm.py
_matrix_integration
def _matrix_integration(q, h, t): ''' Returns the dp metric for a single horsetail curve at a given value of the epistemic uncertainties''' N = len(q) # correction if CDF has gone out of trapezium range if h[-1] < 0.9: h[-1] = 1.0 W = np.zeros([N, N]) for i in range(N): W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)]) dp = (q - t).T.dot(W).dot(q - t) return dp
python
def _matrix_integration(q, h, t): ''' Returns the dp metric for a single horsetail curve at a given value of the epistemic uncertainties''' N = len(q) # correction if CDF has gone out of trapezium range if h[-1] < 0.9: h[-1] = 1.0 W = np.zeros([N, N]) for i in range(N): W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)]) dp = (q - t).T.dot(W).dot(q - t) return dp
[ "def", "_matrix_integration", "(", "q", ",", "h", ",", "t", ")", ":", "N", "=", "len", "(", "q", ")", "# correction if CDF has gone out of trapezium range", "if", "h", "[", "-", "1", "]", "<", "0.9", ":", "h", "[", "-", "1", "]", "=", "1.0", "W", "=", "np", ".", "zeros", "(", "[", "N", ",", "N", "]", ")", "for", "i", "in", "range", "(", "N", ")", ":", "W", "[", "i", ",", "i", "]", "=", "0.5", "*", "(", "h", "[", "min", "(", "i", "+", "1", ",", "N", "-", "1", ")", "]", "-", "h", "[", "max", "(", "i", "-", "1", ",", "0", ")", "]", ")", "dp", "=", "(", "q", "-", "t", ")", ".", "T", ".", "dot", "(", "W", ")", ".", "dot", "(", "q", "-", "t", ")", "return", "dp" ]
Returns the dp metric for a single horsetail curve at a given value of the epistemic uncertainties
[ "Returns", "the", "dp", "metric", "for", "a", "single", "horsetail", "curve", "at", "a", "given", "value", "of", "the", "epistemic", "uncertainties" ]
f3d5f8d01249debbca978f412ce4eae017458119
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L921-L936
243,556
lwcook/horsetail-matching
horsetailmatching/hm.py
_matrix_grad
def _matrix_grad(q, h, h_dx, t, t_prime): ''' Returns the gradient with respect to a single variable''' N = len(q) W = np.zeros([N, N]) Wprime = np.zeros([N, N]) for i in range(N): W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)]) Wprime[i, i] = \ 0.5*(h_dx[min(i+1, N-1)] - h_dx[max(i-1, 0)]) tgrad = np.array([t_prime[i]*h_dx[i] for i in np.arange(N)]) grad = 2.0*(q - t).T.dot(W).dot(-1.0*tgrad) \ + (q - t).T.dot(Wprime).dot(q - t) return grad
python
def _matrix_grad(q, h, h_dx, t, t_prime): ''' Returns the gradient with respect to a single variable''' N = len(q) W = np.zeros([N, N]) Wprime = np.zeros([N, N]) for i in range(N): W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)]) Wprime[i, i] = \ 0.5*(h_dx[min(i+1, N-1)] - h_dx[max(i-1, 0)]) tgrad = np.array([t_prime[i]*h_dx[i] for i in np.arange(N)]) grad = 2.0*(q - t).T.dot(W).dot(-1.0*tgrad) \ + (q - t).T.dot(Wprime).dot(q - t) return grad
[ "def", "_matrix_grad", "(", "q", ",", "h", ",", "h_dx", ",", "t", ",", "t_prime", ")", ":", "N", "=", "len", "(", "q", ")", "W", "=", "np", ".", "zeros", "(", "[", "N", ",", "N", "]", ")", "Wprime", "=", "np", ".", "zeros", "(", "[", "N", ",", "N", "]", ")", "for", "i", "in", "range", "(", "N", ")", ":", "W", "[", "i", ",", "i", "]", "=", "0.5", "*", "(", "h", "[", "min", "(", "i", "+", "1", ",", "N", "-", "1", ")", "]", "-", "h", "[", "max", "(", "i", "-", "1", ",", "0", ")", "]", ")", "Wprime", "[", "i", ",", "i", "]", "=", "0.5", "*", "(", "h_dx", "[", "min", "(", "i", "+", "1", ",", "N", "-", "1", ")", "]", "-", "h_dx", "[", "max", "(", "i", "-", "1", ",", "0", ")", "]", ")", "tgrad", "=", "np", ".", "array", "(", "[", "t_prime", "[", "i", "]", "*", "h_dx", "[", "i", "]", "for", "i", "in", "np", ".", "arange", "(", "N", ")", "]", ")", "grad", "=", "2.0", "*", "(", "q", "-", "t", ")", ".", "T", ".", "dot", "(", "W", ")", ".", "dot", "(", "-", "1.0", "*", "tgrad", ")", "+", "(", "q", "-", "t", ")", ".", "T", ".", "dot", "(", "Wprime", ")", ".", "dot", "(", "q", "-", "t", ")", "return", "grad" ]
Returns the gradient with respect to a single variable
[ "Returns", "the", "gradient", "with", "respect", "to", "a", "single", "variable" ]
f3d5f8d01249debbca978f412ce4eae017458119
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L938-L954
243,557
lwcook/horsetail-matching
horsetailmatching/hm.py
HorsetailMatching.evalMetric
def evalMetric(self, x, method=None): '''Evaluates the horsetail matching metric at given values of the design variables. :param iterable x: values of the design variables, this is passed as the first argument to the function fqoi :param str method: method to use to evaluate the metric ('empirical' or 'kernel') :return: metric_value - value of the metric evaluated at the design point given by x :rtype: float *Example Usage*:: >>> def myFunc(x, u): return x[0]*x[1] + u >>> u1 = UniformParameter() >>> theHM = HorsetailMatching(myFunc, u) >>> x0 = [1, 2] >>> theHM.evalMetric(x0) ''' # Make sure dimensions are correct # u_sample_dimensions = self._processDimensions() if self.verbose: print('----------') print('At design: ' + str(x)) q_samples, grad_samples = self.evalSamples(x) if self.verbose: print('Evaluating metric') return self.evalMetricFromSamples(q_samples, grad_samples, method)
python
def evalMetric(self, x, method=None): '''Evaluates the horsetail matching metric at given values of the design variables. :param iterable x: values of the design variables, this is passed as the first argument to the function fqoi :param str method: method to use to evaluate the metric ('empirical' or 'kernel') :return: metric_value - value of the metric evaluated at the design point given by x :rtype: float *Example Usage*:: >>> def myFunc(x, u): return x[0]*x[1] + u >>> u1 = UniformParameter() >>> theHM = HorsetailMatching(myFunc, u) >>> x0 = [1, 2] >>> theHM.evalMetric(x0) ''' # Make sure dimensions are correct # u_sample_dimensions = self._processDimensions() if self.verbose: print('----------') print('At design: ' + str(x)) q_samples, grad_samples = self.evalSamples(x) if self.verbose: print('Evaluating metric') return self.evalMetricFromSamples(q_samples, grad_samples, method)
[ "def", "evalMetric", "(", "self", ",", "x", ",", "method", "=", "None", ")", ":", "# Make sure dimensions are correct", "# u_sample_dimensions = self._processDimensions()", "if", "self", ".", "verbose", ":", "print", "(", "'----------'", ")", "print", "(", "'At design: '", "+", "str", "(", "x", ")", ")", "q_samples", ",", "grad_samples", "=", "self", ".", "evalSamples", "(", "x", ")", "if", "self", ".", "verbose", ":", "print", "(", "'Evaluating metric'", ")", "return", "self", ".", "evalMetricFromSamples", "(", "q_samples", ",", "grad_samples", ",", "method", ")" ]
Evaluates the horsetail matching metric at given values of the design variables. :param iterable x: values of the design variables, this is passed as the first argument to the function fqoi :param str method: method to use to evaluate the metric ('empirical' or 'kernel') :return: metric_value - value of the metric evaluated at the design point given by x :rtype: float *Example Usage*:: >>> def myFunc(x, u): return x[0]*x[1] + u >>> u1 = UniformParameter() >>> theHM = HorsetailMatching(myFunc, u) >>> x0 = [1, 2] >>> theHM.evalMetric(x0)
[ "Evaluates", "the", "horsetail", "matching", "metric", "at", "given", "values", "of", "the", "design", "variables", "." ]
f3d5f8d01249debbca978f412ce4eae017458119
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L328-L363
243,558
lwcook/horsetail-matching
horsetailmatching/hm.py
HorsetailMatching.evalMetricFromSamples
def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None): '''Evaluates the horsetail matching metric from given samples of the quantity of interest and gradient instead of evaluating them at a design. :param np.ndarray q_samples: samples of the quantity of interest, size (M_int, M_prob) :param np.ndarray grad_samples: samples of the gradien, size (M_int, M_prob, n_x) :return: metric_value - value of the metric :rtype: float ''' # Make sure dimensions are correct # u_sample_dimensions = self._processDimensions() q_samples = np.array(q_samples) if not (q_samples.shape[0] == self.samples_int and q_samples.shape[1] == self.samples_prob): raise ValueError('Shape of q_samples should be [M_int, M_prob]') if grad_samples is not None: grad_samples = np.array(grad_samples) if not (grad_samples.shape[0] == self.samples_int and grad_samples.shape[1] == self.samples_prob): raise ValueError('''Shape of grad_samples should be [M_int, M_prob, n_dv]''') if method is None: method = self.method if method.lower() == 'empirical': return self._evalMetricEmpirical(q_samples, grad_samples) elif method.lower() == 'kernel': return self._evalMetricKernel(q_samples, grad_samples) else: raise ValueError('Unsupported metric evalation method')
python
def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None): '''Evaluates the horsetail matching metric from given samples of the quantity of interest and gradient instead of evaluating them at a design. :param np.ndarray q_samples: samples of the quantity of interest, size (M_int, M_prob) :param np.ndarray grad_samples: samples of the gradien, size (M_int, M_prob, n_x) :return: metric_value - value of the metric :rtype: float ''' # Make sure dimensions are correct # u_sample_dimensions = self._processDimensions() q_samples = np.array(q_samples) if not (q_samples.shape[0] == self.samples_int and q_samples.shape[1] == self.samples_prob): raise ValueError('Shape of q_samples should be [M_int, M_prob]') if grad_samples is not None: grad_samples = np.array(grad_samples) if not (grad_samples.shape[0] == self.samples_int and grad_samples.shape[1] == self.samples_prob): raise ValueError('''Shape of grad_samples should be [M_int, M_prob, n_dv]''') if method is None: method = self.method if method.lower() == 'empirical': return self._evalMetricEmpirical(q_samples, grad_samples) elif method.lower() == 'kernel': return self._evalMetricKernel(q_samples, grad_samples) else: raise ValueError('Unsupported metric evalation method')
[ "def", "evalMetricFromSamples", "(", "self", ",", "q_samples", ",", "grad_samples", "=", "None", ",", "method", "=", "None", ")", ":", "# Make sure dimensions are correct", "# u_sample_dimensions = self._processDimensions()", "q_samples", "=", "np", ".", "array", "(", "q_samples", ")", "if", "not", "(", "q_samples", ".", "shape", "[", "0", "]", "==", "self", ".", "samples_int", "and", "q_samples", ".", "shape", "[", "1", "]", "==", "self", ".", "samples_prob", ")", ":", "raise", "ValueError", "(", "'Shape of q_samples should be [M_int, M_prob]'", ")", "if", "grad_samples", "is", "not", "None", ":", "grad_samples", "=", "np", ".", "array", "(", "grad_samples", ")", "if", "not", "(", "grad_samples", ".", "shape", "[", "0", "]", "==", "self", ".", "samples_int", "and", "grad_samples", ".", "shape", "[", "1", "]", "==", "self", ".", "samples_prob", ")", ":", "raise", "ValueError", "(", "'''Shape of grad_samples\n should be [M_int, M_prob, n_dv]'''", ")", "if", "method", "is", "None", ":", "method", "=", "self", ".", "method", "if", "method", ".", "lower", "(", ")", "==", "'empirical'", ":", "return", "self", ".", "_evalMetricEmpirical", "(", "q_samples", ",", "grad_samples", ")", "elif", "method", ".", "lower", "(", ")", "==", "'kernel'", ":", "return", "self", ".", "_evalMetricKernel", "(", "q_samples", ",", "grad_samples", ")", "else", ":", "raise", "ValueError", "(", "'Unsupported metric evalation method'", ")" ]
Evaluates the horsetail matching metric from given samples of the quantity of interest and gradient instead of evaluating them at a design. :param np.ndarray q_samples: samples of the quantity of interest, size (M_int, M_prob) :param np.ndarray grad_samples: samples of the gradien, size (M_int, M_prob, n_x) :return: metric_value - value of the metric :rtype: float
[ "Evaluates", "the", "horsetail", "matching", "metric", "from", "given", "samples", "of", "the", "quantity", "of", "interest", "and", "gradient", "instead", "of", "evaluating", "them", "at", "a", "design", "." ]
f3d5f8d01249debbca978f412ce4eae017458119
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L365-L402
243,559
lwcook/horsetail-matching
horsetailmatching/hm.py
HorsetailMatching.getHorsetail
def getHorsetail(self): '''Function that gets vectors of the horsetail plot at the last design evaluated. :return: upper_curve, lower_curve, CDFs - returns three parameters, the first two are tuples containing pairs of x/y vectors of the upper and lower bounds on the CDFs (the horsetail plot). The third parameter is a list of x/y tuples for individual CDFs propagated at each sampled value of the interval uncertainties *Example Usage*:: >>> def myFunc(x, u): return x[0]*x[1] + u >>> u = UniformParameter() >>> theHM = HorsetailMatching(myFunc, u) >>> (x1, y1, t1), (x2, y2, t2), CDFs = theHM.getHorsetail() >>> matplotlib.pyplot(x1, y1, 'b') >>> matplotlib.pyplot(x2, y2, 'b') >>> for (x, y) in CDFs: ... matplotlib.pyplot(x, y, 'k:') >>> matplotlib.pyplot.show() ''' if hasattr(self, '_ql'): ql, qu, hl, hu = self._ql, self._qu, self._hl, self._hu qh, hh = self._qh, self._hh if self._qis is not None: ql, hl = _appendPlotArrays(ql, hl, self._qis) qu, hu = _appendPlotArrays(qu, hu, self._qis) CDFs = [] for qi, hi in zip(qh, hh): CDFs.append((qi, hi)) upper_target = [self._ftarg_u(h) for h in hu] upper_curve = (qu, hu, upper_target) lower_target = [self._ftarg_l(h) for h in hl] lower_curve = (ql, hl, lower_target) return upper_curve, lower_curve, CDFs else: raise ValueError('''The metric has not been evaluated at any design point so the horsetail does not exist''')
python
def getHorsetail(self): '''Function that gets vectors of the horsetail plot at the last design evaluated. :return: upper_curve, lower_curve, CDFs - returns three parameters, the first two are tuples containing pairs of x/y vectors of the upper and lower bounds on the CDFs (the horsetail plot). The third parameter is a list of x/y tuples for individual CDFs propagated at each sampled value of the interval uncertainties *Example Usage*:: >>> def myFunc(x, u): return x[0]*x[1] + u >>> u = UniformParameter() >>> theHM = HorsetailMatching(myFunc, u) >>> (x1, y1, t1), (x2, y2, t2), CDFs = theHM.getHorsetail() >>> matplotlib.pyplot(x1, y1, 'b') >>> matplotlib.pyplot(x2, y2, 'b') >>> for (x, y) in CDFs: ... matplotlib.pyplot(x, y, 'k:') >>> matplotlib.pyplot.show() ''' if hasattr(self, '_ql'): ql, qu, hl, hu = self._ql, self._qu, self._hl, self._hu qh, hh = self._qh, self._hh if self._qis is not None: ql, hl = _appendPlotArrays(ql, hl, self._qis) qu, hu = _appendPlotArrays(qu, hu, self._qis) CDFs = [] for qi, hi in zip(qh, hh): CDFs.append((qi, hi)) upper_target = [self._ftarg_u(h) for h in hu] upper_curve = (qu, hu, upper_target) lower_target = [self._ftarg_l(h) for h in hl] lower_curve = (ql, hl, lower_target) return upper_curve, lower_curve, CDFs else: raise ValueError('''The metric has not been evaluated at any design point so the horsetail does not exist''')
[ "def", "getHorsetail", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_ql'", ")", ":", "ql", ",", "qu", ",", "hl", ",", "hu", "=", "self", ".", "_ql", ",", "self", ".", "_qu", ",", "self", ".", "_hl", ",", "self", ".", "_hu", "qh", ",", "hh", "=", "self", ".", "_qh", ",", "self", ".", "_hh", "if", "self", ".", "_qis", "is", "not", "None", ":", "ql", ",", "hl", "=", "_appendPlotArrays", "(", "ql", ",", "hl", ",", "self", ".", "_qis", ")", "qu", ",", "hu", "=", "_appendPlotArrays", "(", "qu", ",", "hu", ",", "self", ".", "_qis", ")", "CDFs", "=", "[", "]", "for", "qi", ",", "hi", "in", "zip", "(", "qh", ",", "hh", ")", ":", "CDFs", ".", "append", "(", "(", "qi", ",", "hi", ")", ")", "upper_target", "=", "[", "self", ".", "_ftarg_u", "(", "h", ")", "for", "h", "in", "hu", "]", "upper_curve", "=", "(", "qu", ",", "hu", ",", "upper_target", ")", "lower_target", "=", "[", "self", ".", "_ftarg_l", "(", "h", ")", "for", "h", "in", "hl", "]", "lower_curve", "=", "(", "ql", ",", "hl", ",", "lower_target", ")", "return", "upper_curve", ",", "lower_curve", ",", "CDFs", "else", ":", "raise", "ValueError", "(", "'''The metric has not been evaluated at any\n design point so the horsetail does not exist'''", ")" ]
Function that gets vectors of the horsetail plot at the last design evaluated. :return: upper_curve, lower_curve, CDFs - returns three parameters, the first two are tuples containing pairs of x/y vectors of the upper and lower bounds on the CDFs (the horsetail plot). The third parameter is a list of x/y tuples for individual CDFs propagated at each sampled value of the interval uncertainties *Example Usage*:: >>> def myFunc(x, u): return x[0]*x[1] + u >>> u = UniformParameter() >>> theHM = HorsetailMatching(myFunc, u) >>> (x1, y1, t1), (x2, y2, t2), CDFs = theHM.getHorsetail() >>> matplotlib.pyplot(x1, y1, 'b') >>> matplotlib.pyplot(x2, y2, 'b') >>> for (x, y) in CDFs: ... matplotlib.pyplot(x, y, 'k:') >>> matplotlib.pyplot.show()
[ "Function", "that", "gets", "vectors", "of", "the", "horsetail", "plot", "at", "the", "last", "design", "evaluated", "." ]
f3d5f8d01249debbca978f412ce4eae017458119
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L405-L450
243,560
goldhand/django-nupages
nupages/views.py
PageList.get_queryset
def get_queryset(self): ''' If MultiTenantMiddleware is used, filter queryset by request.site_id ''' queryset = super(PageList, self).get_queryset() if hasattr(self.request, 'site_id'): queryset = queryset.filter(site_id=self.request.site_id) return queryset
python
def get_queryset(self): ''' If MultiTenantMiddleware is used, filter queryset by request.site_id ''' queryset = super(PageList, self).get_queryset() if hasattr(self.request, 'site_id'): queryset = queryset.filter(site_id=self.request.site_id) return queryset
[ "def", "get_queryset", "(", "self", ")", ":", "queryset", "=", "super", "(", "PageList", ",", "self", ")", ".", "get_queryset", "(", ")", "if", "hasattr", "(", "self", ".", "request", ",", "'site_id'", ")", ":", "queryset", "=", "queryset", ".", "filter", "(", "site_id", "=", "self", ".", "request", ".", "site_id", ")", "return", "queryset" ]
If MultiTenantMiddleware is used, filter queryset by request.site_id
[ "If", "MultiTenantMiddleware", "is", "used", "filter", "queryset", "by", "request", ".", "site_id" ]
4e54fae7e057f9530c22dc30c03812fd660cb7f4
https://github.com/goldhand/django-nupages/blob/4e54fae7e057f9530c22dc30c03812fd660cb7f4/nupages/views.py#L54-L61
243,561
etcher-be/emiz
emiz/weather/noaa/__init__.py
retrieve_taf
def retrieve_taf(station_icao) -> typing.Tuple[typing.Union[str, None], typing.Union[str, None]]: """ Retrieves a TAF string from an online database Args: station_icao: ICAO of the station Returns: tuple of error, metar_str """ url = _BASE_TAF_URL.format(station=station_icao) with requests.get(url) as resp: if not resp.ok: return f'unable to obtain TAF for station {station_icao}\n' \ f'Got to "http://tgftp.nws.noaa.gov/data/observations/metar/stations" ' \ f'for a list of valid stations', None return None, resp.content.decode().split('\n')[1]
python
def retrieve_taf(station_icao) -> typing.Tuple[typing.Union[str, None], typing.Union[str, None]]: """ Retrieves a TAF string from an online database Args: station_icao: ICAO of the station Returns: tuple of error, metar_str """ url = _BASE_TAF_URL.format(station=station_icao) with requests.get(url) as resp: if not resp.ok: return f'unable to obtain TAF for station {station_icao}\n' \ f'Got to "http://tgftp.nws.noaa.gov/data/observations/metar/stations" ' \ f'for a list of valid stations', None return None, resp.content.decode().split('\n')[1]
[ "def", "retrieve_taf", "(", "station_icao", ")", "->", "typing", ".", "Tuple", "[", "typing", ".", "Union", "[", "str", ",", "None", "]", ",", "typing", ".", "Union", "[", "str", ",", "None", "]", "]", ":", "url", "=", "_BASE_TAF_URL", ".", "format", "(", "station", "=", "station_icao", ")", "with", "requests", ".", "get", "(", "url", ")", "as", "resp", ":", "if", "not", "resp", ".", "ok", ":", "return", "f'unable to obtain TAF for station {station_icao}\\n'", "f'Got to \"http://tgftp.nws.noaa.gov/data/observations/metar/stations\" '", "f'for a list of valid stations'", ",", "None", "return", "None", ",", "resp", ".", "content", ".", "decode", "(", ")", ".", "split", "(", "'\\n'", ")", "[", "1", "]" ]
Retrieves a TAF string from an online database Args: station_icao: ICAO of the station Returns: tuple of error, metar_str
[ "Retrieves", "a", "TAF", "string", "from", "an", "online", "database" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/noaa/__init__.py#L13-L29
243,562
etcher-be/emiz
emiz/weather/noaa/__init__.py
retrieve_metar
def retrieve_metar(station_icao) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]: """ Retrieves a METAR string from an online database Args: station_icao: ICAO of the station Returns: tuple of error, metar_str """ url = _BASE_METAR_URL.format(station=station_icao) with requests.get(url) as resp: if not resp.ok: return f'unable to obtain METAR for station {station_icao}\n' \ f'Got to "http://tgftp.nws.noaa.gov/data/observations/metar/stations" ' \ f'for a list of valid stations', None return None, resp.content.decode().split('\n')[1]
python
def retrieve_metar(station_icao) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]: """ Retrieves a METAR string from an online database Args: station_icao: ICAO of the station Returns: tuple of error, metar_str """ url = _BASE_METAR_URL.format(station=station_icao) with requests.get(url) as resp: if not resp.ok: return f'unable to obtain METAR for station {station_icao}\n' \ f'Got to "http://tgftp.nws.noaa.gov/data/observations/metar/stations" ' \ f'for a list of valid stations', None return None, resp.content.decode().split('\n')[1]
[ "def", "retrieve_metar", "(", "station_icao", ")", "->", "typing", ".", "Tuple", "[", "typing", ".", "Optional", "[", "str", "]", ",", "typing", ".", "Optional", "[", "str", "]", "]", ":", "url", "=", "_BASE_METAR_URL", ".", "format", "(", "station", "=", "station_icao", ")", "with", "requests", ".", "get", "(", "url", ")", "as", "resp", ":", "if", "not", "resp", ".", "ok", ":", "return", "f'unable to obtain METAR for station {station_icao}\\n'", "f'Got to \"http://tgftp.nws.noaa.gov/data/observations/metar/stations\" '", "f'for a list of valid stations'", ",", "None", "return", "None", ",", "resp", ".", "content", ".", "decode", "(", ")", ".", "split", "(", "'\\n'", ")", "[", "1", "]" ]
Retrieves a METAR string from an online database Args: station_icao: ICAO of the station Returns: tuple of error, metar_str
[ "Retrieves", "a", "METAR", "string", "from", "an", "online", "database" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/noaa/__init__.py#L32-L48
243,563
etcher-be/emiz
emiz/weather/custom_metar/custom_metar_pressure.py
CustomPressure.value
def value(self, units=None): """Return the pressure in the specified units.""" if units is None: return self._value if not units.upper() in CustomPressure.legal_units: raise UnitsError("unrecognized pressure unit: '" + units + "'") units = units.upper() if units == self._units: return self._value if self._units == "IN": mb_value = self._value * 33.86398 elif self._units == "MM": mb_value = self._value * 1.3332239 else: mb_value = self._value if units in ("MB", "HPA"): return mb_value if units == "IN": return mb_value / 33.86398 if units == "MM": return mb_value / 1.3332239 raise UnitsError("unrecognized pressure unit: '" + units + "'")
python
def value(self, units=None): """Return the pressure in the specified units.""" if units is None: return self._value if not units.upper() in CustomPressure.legal_units: raise UnitsError("unrecognized pressure unit: '" + units + "'") units = units.upper() if units == self._units: return self._value if self._units == "IN": mb_value = self._value * 33.86398 elif self._units == "MM": mb_value = self._value * 1.3332239 else: mb_value = self._value if units in ("MB", "HPA"): return mb_value if units == "IN": return mb_value / 33.86398 if units == "MM": return mb_value / 1.3332239 raise UnitsError("unrecognized pressure unit: '" + units + "'")
[ "def", "value", "(", "self", ",", "units", "=", "None", ")", ":", "if", "units", "is", "None", ":", "return", "self", ".", "_value", "if", "not", "units", ".", "upper", "(", ")", "in", "CustomPressure", ".", "legal_units", ":", "raise", "UnitsError", "(", "\"unrecognized pressure unit: '\"", "+", "units", "+", "\"'\"", ")", "units", "=", "units", ".", "upper", "(", ")", "if", "units", "==", "self", ".", "_units", ":", "return", "self", ".", "_value", "if", "self", ".", "_units", "==", "\"IN\"", ":", "mb_value", "=", "self", ".", "_value", "*", "33.86398", "elif", "self", ".", "_units", "==", "\"MM\"", ":", "mb_value", "=", "self", ".", "_value", "*", "1.3332239", "else", ":", "mb_value", "=", "self", ".", "_value", "if", "units", "in", "(", "\"MB\"", ",", "\"HPA\"", ")", ":", "return", "mb_value", "if", "units", "==", "\"IN\"", ":", "return", "mb_value", "/", "33.86398", "if", "units", "==", "\"MM\"", ":", "return", "mb_value", "/", "1.3332239", "raise", "UnitsError", "(", "\"unrecognized pressure unit: '\"", "+", "units", "+", "\"'\"", ")" ]
Return the pressure in the specified units.
[ "Return", "the", "pressure", "in", "the", "specified", "units", "." ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/custom_metar/custom_metar_pressure.py#L16-L39
243,564
etcher-be/emiz
emiz/weather/custom_metar/custom_metar_pressure.py
CustomPressure.string
def string(self, units: typing.Optional[str] = None) -> str: """Return a string representation of the pressure, using the given units.""" if not units: _units: str = self._units else: if not units.upper() in CustomPressure.legal_units: raise UnitsError("unrecognized pressure unit: '" + units + "'") _units = units.upper() val = self.value(units) if _units == "MB": return "%.0f mb" % val if _units == "HPA": return "%.0f hPa" % val if _units == "IN": return "%.2f inches" % val if _units == "MM": return "%.0f mmHg" % val raise ValueError(_units)
python
def string(self, units: typing.Optional[str] = None) -> str: """Return a string representation of the pressure, using the given units.""" if not units: _units: str = self._units else: if not units.upper() in CustomPressure.legal_units: raise UnitsError("unrecognized pressure unit: '" + units + "'") _units = units.upper() val = self.value(units) if _units == "MB": return "%.0f mb" % val if _units == "HPA": return "%.0f hPa" % val if _units == "IN": return "%.2f inches" % val if _units == "MM": return "%.0f mmHg" % val raise ValueError(_units)
[ "def", "string", "(", "self", ",", "units", ":", "typing", ".", "Optional", "[", "str", "]", "=", "None", ")", "->", "str", ":", "if", "not", "units", ":", "_units", ":", "str", "=", "self", ".", "_units", "else", ":", "if", "not", "units", ".", "upper", "(", ")", "in", "CustomPressure", ".", "legal_units", ":", "raise", "UnitsError", "(", "\"unrecognized pressure unit: '\"", "+", "units", "+", "\"'\"", ")", "_units", "=", "units", ".", "upper", "(", ")", "val", "=", "self", ".", "value", "(", "units", ")", "if", "_units", "==", "\"MB\"", ":", "return", "\"%.0f mb\"", "%", "val", "if", "_units", "==", "\"HPA\"", ":", "return", "\"%.0f hPa\"", "%", "val", "if", "_units", "==", "\"IN\"", ":", "return", "\"%.2f inches\"", "%", "val", "if", "_units", "==", "\"MM\"", ":", "return", "\"%.0f mmHg\"", "%", "val", "raise", "ValueError", "(", "_units", ")" ]
Return a string representation of the pressure, using the given units.
[ "Return", "a", "string", "representation", "of", "the", "pressure", "using", "the", "given", "units", "." ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/custom_metar/custom_metar_pressure.py#L41-L59
243,565
toastdriven/alligator
alligator/utils.py
determine_name
def determine_name(func): """ Given a function, returns the name of the function. Ex:: from random import choice determine_name(choice) # Returns 'choice' :param func: The callable :type func: function :returns: Name string """ if hasattr(func, '__name__'): return func.__name__ elif hasattr(func, '__class__'): return func.__class__.__name__ # This shouldn't be possible, but blow up if so. raise AttributeError("Provided callable '{}' has no name.".format( func ))
python
def determine_name(func): """ Given a function, returns the name of the function. Ex:: from random import choice determine_name(choice) # Returns 'choice' :param func: The callable :type func: function :returns: Name string """ if hasattr(func, '__name__'): return func.__name__ elif hasattr(func, '__class__'): return func.__class__.__name__ # This shouldn't be possible, but blow up if so. raise AttributeError("Provided callable '{}' has no name.".format( func ))
[ "def", "determine_name", "(", "func", ")", ":", "if", "hasattr", "(", "func", ",", "'__name__'", ")", ":", "return", "func", ".", "__name__", "elif", "hasattr", "(", "func", ",", "'__class__'", ")", ":", "return", "func", ".", "__class__", ".", "__name__", "# This shouldn't be possible, but blow up if so.", "raise", "AttributeError", "(", "\"Provided callable '{}' has no name.\"", ".", "format", "(", "func", ")", ")" ]
Given a function, returns the name of the function. Ex:: from random import choice determine_name(choice) # Returns 'choice' :param func: The callable :type func: function :returns: Name string
[ "Given", "a", "function", "returns", "the", "name", "of", "the", "function", "." ]
f18bcb35b350fc6b0886393f5246d69c892b36c7
https://github.com/toastdriven/alligator/blob/f18bcb35b350fc6b0886393f5246d69c892b36c7/alligator/utils.py#L24-L46
243,566
toastdriven/alligator
alligator/utils.py
import_module
def import_module(module_name): """ Given a dotted Python path, imports & returns the module. If not found, raises ``UnknownModuleError``. Ex:: mod = import_module('random') :param module_name: The dotted Python path :type module_name: string :returns: module """ try: return importlib.import_module(module_name) except ImportError as err: raise UnknownModuleError(str(err))
python
def import_module(module_name): """ Given a dotted Python path, imports & returns the module. If not found, raises ``UnknownModuleError``. Ex:: mod = import_module('random') :param module_name: The dotted Python path :type module_name: string :returns: module """ try: return importlib.import_module(module_name) except ImportError as err: raise UnknownModuleError(str(err))
[ "def", "import_module", "(", "module_name", ")", ":", "try", ":", "return", "importlib", ".", "import_module", "(", "module_name", ")", "except", "ImportError", "as", "err", ":", "raise", "UnknownModuleError", "(", "str", "(", "err", ")", ")" ]
Given a dotted Python path, imports & returns the module. If not found, raises ``UnknownModuleError``. Ex:: mod = import_module('random') :param module_name: The dotted Python path :type module_name: string :returns: module
[ "Given", "a", "dotted", "Python", "path", "imports", "&", "returns", "the", "module", "." ]
f18bcb35b350fc6b0886393f5246d69c892b36c7
https://github.com/toastdriven/alligator/blob/f18bcb35b350fc6b0886393f5246d69c892b36c7/alligator/utils.py#L49-L67
243,567
toastdriven/alligator
alligator/utils.py
import_attr
def import_attr(module_name, attr_name): """ Given a dotted Python path & an attribute name, imports the module & returns the attribute. If not found, raises ``UnknownCallableError``. Ex:: choice = import_attr('random', 'choice') :param module_name: The dotted Python path :type module_name: string :param attr_name: The attribute name :type attr_name: string :returns: attribute """ module = import_module(module_name) try: return getattr(module, attr_name) except AttributeError as err: raise UnknownCallableError(str(err))
python
def import_attr(module_name, attr_name): """ Given a dotted Python path & an attribute name, imports the module & returns the attribute. If not found, raises ``UnknownCallableError``. Ex:: choice = import_attr('random', 'choice') :param module_name: The dotted Python path :type module_name: string :param attr_name: The attribute name :type attr_name: string :returns: attribute """ module = import_module(module_name) try: return getattr(module, attr_name) except AttributeError as err: raise UnknownCallableError(str(err))
[ "def", "import_attr", "(", "module_name", ",", "attr_name", ")", ":", "module", "=", "import_module", "(", "module_name", ")", "try", ":", "return", "getattr", "(", "module", ",", "attr_name", ")", "except", "AttributeError", "as", "err", ":", "raise", "UnknownCallableError", "(", "str", "(", "err", ")", ")" ]
Given a dotted Python path & an attribute name, imports the module & returns the attribute. If not found, raises ``UnknownCallableError``. Ex:: choice = import_attr('random', 'choice') :param module_name: The dotted Python path :type module_name: string :param attr_name: The attribute name :type attr_name: string :returns: attribute
[ "Given", "a", "dotted", "Python", "path", "&", "an", "attribute", "name", "imports", "the", "module", "&", "returns", "the", "attribute", "." ]
f18bcb35b350fc6b0886393f5246d69c892b36c7
https://github.com/toastdriven/alligator/blob/f18bcb35b350fc6b0886393f5246d69c892b36c7/alligator/utils.py#L70-L94
243,568
nefarioustim/parker
parker/configloader.py
load_site_config
def load_site_config(name): """Load and return site configuration as a dict.""" return _load_config_json( os.path.join( CONFIG_PATH, CONFIG_SITES_PATH, name + CONFIG_EXT ) )
python
def load_site_config(name): """Load and return site configuration as a dict.""" return _load_config_json( os.path.join( CONFIG_PATH, CONFIG_SITES_PATH, name + CONFIG_EXT ) )
[ "def", "load_site_config", "(", "name", ")", ":", "return", "_load_config_json", "(", "os", ".", "path", ".", "join", "(", "CONFIG_PATH", ",", "CONFIG_SITES_PATH", ",", "name", "+", "CONFIG_EXT", ")", ")" ]
Load and return site configuration as a dict.
[ "Load", "and", "return", "site", "configuration", "as", "a", "dict", "." ]
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/configloader.py#L37-L45
243,569
carlosp420/dataset-creator
dataset_creator/phylip.py
PhylipDatasetFooter.make_charsets
def make_charsets(self): """ Overridden function for Phylip dataset as the content is different and goes into a separate file. """ count_start = 1 out = '' for gene_code, lengths in self.data.gene_codes_and_lengths.items(): count_end = lengths[0] + count_start - 1 formatted_line = self.format_charset_line(gene_code, count_start, count_end) converted_line = formatted_line.replace(' charset', 'DNA,').replace(';', '') out += converted_line count_start = count_end + 1 return out
python
def make_charsets(self): """ Overridden function for Phylip dataset as the content is different and goes into a separate file. """ count_start = 1 out = '' for gene_code, lengths in self.data.gene_codes_and_lengths.items(): count_end = lengths[0] + count_start - 1 formatted_line = self.format_charset_line(gene_code, count_start, count_end) converted_line = formatted_line.replace(' charset', 'DNA,').replace(';', '') out += converted_line count_start = count_end + 1 return out
[ "def", "make_charsets", "(", "self", ")", ":", "count_start", "=", "1", "out", "=", "''", "for", "gene_code", ",", "lengths", "in", "self", ".", "data", ".", "gene_codes_and_lengths", ".", "items", "(", ")", ":", "count_end", "=", "lengths", "[", "0", "]", "+", "count_start", "-", "1", "formatted_line", "=", "self", ".", "format_charset_line", "(", "gene_code", ",", "count_start", ",", "count_end", ")", "converted_line", "=", "formatted_line", ".", "replace", "(", "' charset'", ",", "'DNA,'", ")", ".", "replace", "(", "';'", ",", "''", ")", "out", "+=", "converted_line", "count_start", "=", "count_end", "+", "1", "return", "out" ]
Overridden function for Phylip dataset as the content is different and goes into a separate file.
[ "Overridden", "function", "for", "Phylip", "dataset", "as", "the", "content", "is", "different", "and", "goes", "into", "a", "separate", "file", "." ]
ea27340b145cb566a36c1836ff42263f1b2003a0
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/phylip.py#L13-L26
243,570
kstark/months
months/months.py
Month.from_date
def from_date(cls, date): """ Returns a Month instance from the given datetime.date or datetime.datetime object """ try: date = date.date() except AttributeError: pass return cls(date.year, date.month)
python
def from_date(cls, date): """ Returns a Month instance from the given datetime.date or datetime.datetime object """ try: date = date.date() except AttributeError: pass return cls(date.year, date.month)
[ "def", "from_date", "(", "cls", ",", "date", ")", ":", "try", ":", "date", "=", "date", ".", "date", "(", ")", "except", "AttributeError", ":", "pass", "return", "cls", "(", "date", ".", "year", ",", "date", ".", "month", ")" ]
Returns a Month instance from the given datetime.date or datetime.datetime object
[ "Returns", "a", "Month", "instance", "from", "the", "given", "datetime", ".", "date", "or", "datetime", ".", "datetime", "object" ]
676910a7328c3204f7366558b038644057d34561
https://github.com/kstark/months/blob/676910a7328c3204f7366558b038644057d34561/months/months.py#L66-L75
243,571
mbodenhamer/syn
syn/tree/b/tree.py
Tree.rebuild
def rebuild(self, **kwargs): '''Repopulate the node-tracking data structures. Shouldn't really ever be needed. ''' self.nodes = [] self.node_types = [] self.id_dict = {} self.type_dict = {} self.add_node(self.root)
python
def rebuild(self, **kwargs): '''Repopulate the node-tracking data structures. Shouldn't really ever be needed. ''' self.nodes = [] self.node_types = [] self.id_dict = {} self.type_dict = {} self.add_node(self.root)
[ "def", "rebuild", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "nodes", "=", "[", "]", "self", ".", "node_types", "=", "[", "]", "self", ".", "id_dict", "=", "{", "}", "self", ".", "type_dict", "=", "{", "}", "self", ".", "add_node", "(", "self", ".", "root", ")" ]
Repopulate the node-tracking data structures. Shouldn't really ever be needed.
[ "Repopulate", "the", "node", "-", "tracking", "data", "structures", ".", "Shouldn", "t", "really", "ever", "be", "needed", "." ]
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/tree/b/tree.py#L125-L134
243,572
mbodenhamer/syn
syn/tree/b/tree.py
Tree._check_search_kwarg_types
def _check_search_kwarg_types(self, kwargs): '''Checks that every element of kwargs is a valid type in this tree.''' for key in kwargs: if key not in self.node_types: raise TypeError("Invalid search type: {}".format(key))
python
def _check_search_kwarg_types(self, kwargs): '''Checks that every element of kwargs is a valid type in this tree.''' for key in kwargs: if key not in self.node_types: raise TypeError("Invalid search type: {}".format(key))
[ "def", "_check_search_kwarg_types", "(", "self", ",", "kwargs", ")", ":", "for", "key", "in", "kwargs", ":", "if", "key", "not", "in", "self", ".", "node_types", ":", "raise", "TypeError", "(", "\"Invalid search type: {}\"", ".", "format", "(", "key", ")", ")" ]
Checks that every element of kwargs is a valid type in this tree.
[ "Checks", "that", "every", "element", "of", "kwargs", "is", "a", "valid", "type", "in", "this", "tree", "." ]
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/tree/b/tree.py#L147-L151
243,573
lambdalisue/maidenhair
src/maidenhair/statistics/__init__.py
average
def average(x): """ Return a numpy array of column average. It does not affect if the array is one dimension Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column average Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.array_equal(average(a), [2, 5, 8]) True >>> a = np.array([1, 2, 3]) >>> np.array_equal(average(a), [1, 2, 3]) True """ if x.ndim > 1 and len(x[0]) > 1: return np.average(x, axis=1) return x
python
def average(x): """ Return a numpy array of column average. It does not affect if the array is one dimension Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column average Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.array_equal(average(a), [2, 5, 8]) True >>> a = np.array([1, 2, 3]) >>> np.array_equal(average(a), [1, 2, 3]) True """ if x.ndim > 1 and len(x[0]) > 1: return np.average(x, axis=1) return x
[ "def", "average", "(", "x", ")", ":", "if", "x", ".", "ndim", ">", "1", "and", "len", "(", "x", "[", "0", "]", ")", ">", "1", ":", "return", "np", ".", "average", "(", "x", ",", "axis", "=", "1", ")", "return", "x" ]
Return a numpy array of column average. It does not affect if the array is one dimension Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column average Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.array_equal(average(a), [2, 5, 8]) True >>> a = np.array([1, 2, 3]) >>> np.array_equal(average(a), [1, 2, 3]) True
[ "Return", "a", "numpy", "array", "of", "column", "average", ".", "It", "does", "not", "affect", "if", "the", "array", "is", "one", "dimension" ]
d5095c1087d1f4d71cc57410492151d2803a9f0d
https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/statistics/__init__.py#L8-L35
243,574
lambdalisue/maidenhair
src/maidenhair/statistics/__init__.py
mean
def mean(x): """ Return a numpy array of column mean. It does not affect if the array is one dimension Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column mean Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.array_equal(mean(a), [2, 5, 8]) True >>> a = np.array([1, 2, 3]) >>> np.array_equal(mean(a), [1, 2, 3]) True """ if x.ndim > 1 and len(x[0]) > 1: return np.mean(x, axis=1) return x
python
def mean(x): """ Return a numpy array of column mean. It does not affect if the array is one dimension Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column mean Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.array_equal(mean(a), [2, 5, 8]) True >>> a = np.array([1, 2, 3]) >>> np.array_equal(mean(a), [1, 2, 3]) True """ if x.ndim > 1 and len(x[0]) > 1: return np.mean(x, axis=1) return x
[ "def", "mean", "(", "x", ")", ":", "if", "x", ".", "ndim", ">", "1", "and", "len", "(", "x", "[", "0", "]", ")", ">", "1", ":", "return", "np", ".", "mean", "(", "x", ",", "axis", "=", "1", ")", "return", "x" ]
Return a numpy array of column mean. It does not affect if the array is one dimension Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column mean Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.array_equal(mean(a), [2, 5, 8]) True >>> a = np.array([1, 2, 3]) >>> np.array_equal(mean(a), [1, 2, 3]) True
[ "Return", "a", "numpy", "array", "of", "column", "mean", ".", "It", "does", "not", "affect", "if", "the", "array", "is", "one", "dimension" ]
d5095c1087d1f4d71cc57410492151d2803a9f0d
https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/statistics/__init__.py#L37-L64
243,575
lambdalisue/maidenhair
src/maidenhair/statistics/__init__.py
median
def median(x): """ Return a numpy array of column median. It does not affect if the array is one dimension Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column median Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.array_equal(median(a), [2, 5, 8]) True >>> a = np.array([1, 2, 3]) >>> np.array_equal(median(a), [1, 2, 3]) True """ if x.ndim > 1 and len(x[0]) > 1: return np.median(x, axis=1) return x
python
def median(x): """ Return a numpy array of column median. It does not affect if the array is one dimension Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column median Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.array_equal(median(a), [2, 5, 8]) True >>> a = np.array([1, 2, 3]) >>> np.array_equal(median(a), [1, 2, 3]) True """ if x.ndim > 1 and len(x[0]) > 1: return np.median(x, axis=1) return x
[ "def", "median", "(", "x", ")", ":", "if", "x", ".", "ndim", ">", "1", "and", "len", "(", "x", "[", "0", "]", ")", ">", "1", ":", "return", "np", ".", "median", "(", "x", ",", "axis", "=", "1", ")", "return", "x" ]
Return a numpy array of column median. It does not affect if the array is one dimension Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column median Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.array_equal(median(a), [2, 5, 8]) True >>> a = np.array([1, 2, 3]) >>> np.array_equal(median(a), [1, 2, 3]) True
[ "Return", "a", "numpy", "array", "of", "column", "median", ".", "It", "does", "not", "affect", "if", "the", "array", "is", "one", "dimension" ]
d5095c1087d1f4d71cc57410492151d2803a9f0d
https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/statistics/__init__.py#L66-L93
243,576
lambdalisue/maidenhair
src/maidenhair/statistics/__init__.py
variance
def variance(x): """ Return a numpy array of column variance Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column variance Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.testing.assert_array_almost_equal( ... variance(a), ... [0.666666, 0.666666, 0.666666]) >>> a = np.array([1, 2, 3]) >>> np.testing.assert_array_almost_equal( ... variance(a), ... 0.666666) """ if x.ndim > 1 and len(x[0]) > 1: return np.var(x, axis=1) return np.var(x)
python
def variance(x): """ Return a numpy array of column variance Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column variance Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.testing.assert_array_almost_equal( ... variance(a), ... [0.666666, 0.666666, 0.666666]) >>> a = np.array([1, 2, 3]) >>> np.testing.assert_array_almost_equal( ... variance(a), ... 0.666666) """ if x.ndim > 1 and len(x[0]) > 1: return np.var(x, axis=1) return np.var(x)
[ "def", "variance", "(", "x", ")", ":", "if", "x", ".", "ndim", ">", "1", "and", "len", "(", "x", "[", "0", "]", ")", ">", "1", ":", "return", "np", ".", "var", "(", "x", ",", "axis", "=", "1", ")", "return", "np", ".", "var", "(", "x", ")" ]
Return a numpy array of column variance Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column variance Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.testing.assert_array_almost_equal( ... variance(a), ... [0.666666, 0.666666, 0.666666]) >>> a = np.array([1, 2, 3]) >>> np.testing.assert_array_almost_equal( ... variance(a), ... 0.666666)
[ "Return", "a", "numpy", "array", "of", "column", "variance" ]
d5095c1087d1f4d71cc57410492151d2803a9f0d
https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/statistics/__init__.py#L95-L123
243,577
lambdalisue/maidenhair
src/maidenhair/statistics/__init__.py
standard_deviation
def standard_deviation(x): """ Return a numpy array of column standard deviation Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column standard deviation Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.testing.assert_array_almost_equal( ... standard_deviation(a), ... [0.816496, 0.816496, 0.816496]) >>> a = np.array([1, 2, 3]) >>> np.testing.assert_array_almost_equal( ... standard_deviation(a), ... 0.816496) """ if x.ndim > 1 and len(x[0]) > 1: return np.std(x, axis=1) return np.std(x)
python
def standard_deviation(x): """ Return a numpy array of column standard deviation Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column standard deviation Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.testing.assert_array_almost_equal( ... standard_deviation(a), ... [0.816496, 0.816496, 0.816496]) >>> a = np.array([1, 2, 3]) >>> np.testing.assert_array_almost_equal( ... standard_deviation(a), ... 0.816496) """ if x.ndim > 1 and len(x[0]) > 1: return np.std(x, axis=1) return np.std(x)
[ "def", "standard_deviation", "(", "x", ")", ":", "if", "x", ".", "ndim", ">", "1", "and", "len", "(", "x", "[", "0", "]", ")", ">", "1", ":", "return", "np", ".", "std", "(", "x", ",", "axis", "=", "1", ")", "return", "np", ".", "std", "(", "x", ")" ]
Return a numpy array of column standard deviation Parameters ---------- x : ndarray A numpy array instance Returns ------- ndarray A 1 x n numpy array instance of column standard deviation Examples -------- >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> np.testing.assert_array_almost_equal( ... standard_deviation(a), ... [0.816496, 0.816496, 0.816496]) >>> a = np.array([1, 2, 3]) >>> np.testing.assert_array_almost_equal( ... standard_deviation(a), ... 0.816496)
[ "Return", "a", "numpy", "array", "of", "column", "standard", "deviation" ]
d5095c1087d1f4d71cc57410492151d2803a9f0d
https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/statistics/__init__.py#L125-L153
243,578
lambdalisue/maidenhair
src/maidenhair/statistics/__init__.py
confidential_interval
def confidential_interval(x, alpha=0.98): """ Return a numpy array of column confidential interval Parameters ---------- x : ndarray A numpy array instance alpha : float Alpha value of confidential interval Returns ------- ndarray A 1 x n numpy array which indicate the each difference from sample average point to confidential interval point """ from scipy.stats import t if x.ndim == 1: df = len(x) - 1 # calculate positive critical value of student's T distribution cv = t.interval(alpha, df) # calculate sample standard distribution std = np.std(x) else: # calculate degree of freedom df = len(x[0]) - 1 # calculate positive critical value of student's T distribution cv = t.interval(alpha, df)[1] # calculate sample standard distribution std = np.std(x, axis=1) # calculate positive difference from # sample average to confidential interval return std * cv / np.sqrt(df)
python
def confidential_interval(x, alpha=0.98): """ Return a numpy array of column confidential interval Parameters ---------- x : ndarray A numpy array instance alpha : float Alpha value of confidential interval Returns ------- ndarray A 1 x n numpy array which indicate the each difference from sample average point to confidential interval point """ from scipy.stats import t if x.ndim == 1: df = len(x) - 1 # calculate positive critical value of student's T distribution cv = t.interval(alpha, df) # calculate sample standard distribution std = np.std(x) else: # calculate degree of freedom df = len(x[0]) - 1 # calculate positive critical value of student's T distribution cv = t.interval(alpha, df)[1] # calculate sample standard distribution std = np.std(x, axis=1) # calculate positive difference from # sample average to confidential interval return std * cv / np.sqrt(df)
[ "def", "confidential_interval", "(", "x", ",", "alpha", "=", "0.98", ")", ":", "from", "scipy", ".", "stats", "import", "t", "if", "x", ".", "ndim", "==", "1", ":", "df", "=", "len", "(", "x", ")", "-", "1", "# calculate positive critical value of student's T distribution", "cv", "=", "t", ".", "interval", "(", "alpha", ",", "df", ")", "# calculate sample standard distribution", "std", "=", "np", ".", "std", "(", "x", ")", "else", ":", "# calculate degree of freedom", "df", "=", "len", "(", "x", "[", "0", "]", ")", "-", "1", "# calculate positive critical value of student's T distribution", "cv", "=", "t", ".", "interval", "(", "alpha", ",", "df", ")", "[", "1", "]", "# calculate sample standard distribution", "std", "=", "np", ".", "std", "(", "x", ",", "axis", "=", "1", ")", "# calculate positive difference from", "# sample average to confidential interval", "return", "std", "*", "cv", "/", "np", ".", "sqrt", "(", "df", ")" ]
Return a numpy array of column confidential interval Parameters ---------- x : ndarray A numpy array instance alpha : float Alpha value of confidential interval Returns ------- ndarray A 1 x n numpy array which indicate the each difference from sample average point to confidential interval point
[ "Return", "a", "numpy", "array", "of", "column", "confidential", "interval" ]
d5095c1087d1f4d71cc57410492151d2803a9f0d
https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/statistics/__init__.py#L155-L188
243,579
lambdalisue/maidenhair
src/maidenhair/statistics/__init__.py
simple_moving_matrix
def simple_moving_matrix(x, n=10): """ Create simple moving matrix. Parameters ---------- x : ndarray A numpy array n : integer The number of sample points used to make average Returns ------- ndarray A n x n numpy array which will be useful for calculating confidentail interval of simple moving average """ if x.ndim > 1 and len(x[0]) > 1: x = np.average(x, axis=1) h = n / 2 o = 0 if h * 2 == n else 1 xx = [] for i in range(h, len(x) - h): xx.append(x[i-h:i+h+o]) return np.array(xx)
python
def simple_moving_matrix(x, n=10): """ Create simple moving matrix. Parameters ---------- x : ndarray A numpy array n : integer The number of sample points used to make average Returns ------- ndarray A n x n numpy array which will be useful for calculating confidentail interval of simple moving average """ if x.ndim > 1 and len(x[0]) > 1: x = np.average(x, axis=1) h = n / 2 o = 0 if h * 2 == n else 1 xx = [] for i in range(h, len(x) - h): xx.append(x[i-h:i+h+o]) return np.array(xx)
[ "def", "simple_moving_matrix", "(", "x", ",", "n", "=", "10", ")", ":", "if", "x", ".", "ndim", ">", "1", "and", "len", "(", "x", "[", "0", "]", ")", ">", "1", ":", "x", "=", "np", ".", "average", "(", "x", ",", "axis", "=", "1", ")", "h", "=", "n", "/", "2", "o", "=", "0", "if", "h", "*", "2", "==", "n", "else", "1", "xx", "=", "[", "]", "for", "i", "in", "range", "(", "h", ",", "len", "(", "x", ")", "-", "h", ")", ":", "xx", ".", "append", "(", "x", "[", "i", "-", "h", ":", "i", "+", "h", "+", "o", "]", ")", "return", "np", ".", "array", "(", "xx", ")" ]
Create simple moving matrix. Parameters ---------- x : ndarray A numpy array n : integer The number of sample points used to make average Returns ------- ndarray A n x n numpy array which will be useful for calculating confidentail interval of simple moving average
[ "Create", "simple", "moving", "matrix", "." ]
d5095c1087d1f4d71cc57410492151d2803a9f0d
https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/statistics/__init__.py#L190-L215
243,580
lambdalisue/maidenhair
src/maidenhair/statistics/__init__.py
simple_moving_average
def simple_moving_average(x, n=10): """ Calculate simple moving average Parameters ---------- x : ndarray A numpy array n : integer The number of sample points used to make average Returns ------- ndarray A 1 x n numpy array instance """ if x.ndim > 1 and len(x[0]) > 1: x = np.average(x, axis=1) a = np.ones(n) / float(n) return np.convolve(x, a, 'valid')
python
def simple_moving_average(x, n=10): """ Calculate simple moving average Parameters ---------- x : ndarray A numpy array n : integer The number of sample points used to make average Returns ------- ndarray A 1 x n numpy array instance """ if x.ndim > 1 and len(x[0]) > 1: x = np.average(x, axis=1) a = np.ones(n) / float(n) return np.convolve(x, a, 'valid')
[ "def", "simple_moving_average", "(", "x", ",", "n", "=", "10", ")", ":", "if", "x", ".", "ndim", ">", "1", "and", "len", "(", "x", "[", "0", "]", ")", ">", "1", ":", "x", "=", "np", ".", "average", "(", "x", ",", "axis", "=", "1", ")", "a", "=", "np", ".", "ones", "(", "n", ")", "/", "float", "(", "n", ")", "return", "np", ".", "convolve", "(", "x", ",", "a", ",", "'valid'", ")" ]
Calculate simple moving average Parameters ---------- x : ndarray A numpy array n : integer The number of sample points used to make average Returns ------- ndarray A 1 x n numpy array instance
[ "Calculate", "simple", "moving", "average" ]
d5095c1087d1f4d71cc57410492151d2803a9f0d
https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/statistics/__init__.py#L217-L236
243,581
rbarrois/volatildap
volatildap/server.py
find_available_port
def find_available_port(): """Find an available port. Simple trick: open a socket to localhost, see what port was allocated. Could fail in highly concurrent setups, though. """ s = socket.socket() s.bind(('localhost', 0)) _address, port = s.getsockname() s.close() return port
python
def find_available_port(): """Find an available port. Simple trick: open a socket to localhost, see what port was allocated. Could fail in highly concurrent setups, though. """ s = socket.socket() s.bind(('localhost', 0)) _address, port = s.getsockname() s.close() return port
[ "def", "find_available_port", "(", ")", ":", "s", "=", "socket", ".", "socket", "(", ")", "s", ".", "bind", "(", "(", "'localhost'", ",", "0", ")", ")", "_address", ",", "port", "=", "s", ".", "getsockname", "(", ")", "s", ".", "close", "(", ")", "return", "port" ]
Find an available port. Simple trick: open a socket to localhost, see what port was allocated. Could fail in highly concurrent setups, though.
[ "Find", "an", "available", "port", "." ]
08df9d5f2f65400e51eae7a50e64759bdbdce6f2
https://github.com/rbarrois/volatildap/blob/08df9d5f2f65400e51eae7a50e64759bdbdce6f2/volatildap/server.py#L378-L389
243,582
rbarrois/volatildap
volatildap/server.py
OpenLdapPaths._find_file
def _find_file(self, needle, candidates): """Find the first directory containing a given candidate file.""" for candidate in candidates: fullpath = os.path.join(candidate, needle) if os.path.isfile(fullpath): return fullpath raise PathError("Unable to locate file %s; tried %s" % (needle, candidates))
python
def _find_file(self, needle, candidates): """Find the first directory containing a given candidate file.""" for candidate in candidates: fullpath = os.path.join(candidate, needle) if os.path.isfile(fullpath): return fullpath raise PathError("Unable to locate file %s; tried %s" % (needle, candidates))
[ "def", "_find_file", "(", "self", ",", "needle", ",", "candidates", ")", ":", "for", "candidate", "in", "candidates", ":", "fullpath", "=", "os", ".", "path", ".", "join", "(", "candidate", ",", "needle", ")", "if", "os", ".", "path", ".", "isfile", "(", "fullpath", ")", ":", "return", "fullpath", "raise", "PathError", "(", "\"Unable to locate file %s; tried %s\"", "%", "(", "needle", ",", "candidates", ")", ")" ]
Find the first directory containing a given candidate file.
[ "Find", "the", "first", "directory", "containing", "a", "given", "candidate", "file", "." ]
08df9d5f2f65400e51eae7a50e64759bdbdce6f2
https://github.com/rbarrois/volatildap/blob/08df9d5f2f65400e51eae7a50e64759bdbdce6f2/volatildap/server.py#L53-L59
243,583
rbarrois/volatildap
volatildap/server.py
LdapServer._poll_slapd
def _poll_slapd(self, timeout=DEFAULT_STARTUP_DELAY): """Poll slapd port until available.""" begin = time.time() time.sleep(0.5) while time.time() < begin + timeout: if self._process.poll() is not None: raise RuntimeError("LDAP server has exited before starting listen.") s = socket.socket() try: s.connect(('localhost', self.port)) except socket.error: # Not ready yet, sleep time.sleep(0.5) else: return finally: s.close() raise RuntimeError("LDAP server not responding within %s seconds." % timeout)
python
def _poll_slapd(self, timeout=DEFAULT_STARTUP_DELAY): """Poll slapd port until available.""" begin = time.time() time.sleep(0.5) while time.time() < begin + timeout: if self._process.poll() is not None: raise RuntimeError("LDAP server has exited before starting listen.") s = socket.socket() try: s.connect(('localhost', self.port)) except socket.error: # Not ready yet, sleep time.sleep(0.5) else: return finally: s.close() raise RuntimeError("LDAP server not responding within %s seconds." % timeout)
[ "def", "_poll_slapd", "(", "self", ",", "timeout", "=", "DEFAULT_STARTUP_DELAY", ")", ":", "begin", "=", "time", ".", "time", "(", ")", "time", ".", "sleep", "(", "0.5", ")", "while", "time", ".", "time", "(", ")", "<", "begin", "+", "timeout", ":", "if", "self", ".", "_process", ".", "poll", "(", ")", "is", "not", "None", ":", "raise", "RuntimeError", "(", "\"LDAP server has exited before starting listen.\"", ")", "s", "=", "socket", ".", "socket", "(", ")", "try", ":", "s", ".", "connect", "(", "(", "'localhost'", ",", "self", ".", "port", ")", ")", "except", "socket", ".", "error", ":", "# Not ready yet, sleep", "time", ".", "sleep", "(", "0.5", ")", "else", ":", "return", "finally", ":", "s", ".", "close", "(", ")", "raise", "RuntimeError", "(", "\"LDAP server not responding within %s seconds.\"", "%", "timeout", ")" ]
Poll slapd port until available.
[ "Poll", "slapd", "port", "until", "available", "." ]
08df9d5f2f65400e51eae7a50e64759bdbdce6f2
https://github.com/rbarrois/volatildap/blob/08df9d5f2f65400e51eae7a50e64759bdbdce6f2/volatildap/server.py#L335-L355
243,584
collectiveacuity/labPack
labpack/databases/couchbase.py
syncGatewayClient.list_users
def list_users(self): ''' a method to list all the user ids of all users in the bucket ''' # construct url url = self.bucket_url + '/_user/' # send request and unwrap response response = requests.get(url) response = response.json() return response
python
def list_users(self): ''' a method to list all the user ids of all users in the bucket ''' # construct url url = self.bucket_url + '/_user/' # send request and unwrap response response = requests.get(url) response = response.json() return response
[ "def", "list_users", "(", "self", ")", ":", "# construct url", "url", "=", "self", ".", "bucket_url", "+", "'/_user/'", "# send request and unwrap response", "response", "=", "requests", ".", "get", "(", "url", ")", "response", "=", "response", ".", "json", "(", ")", "return", "response" ]
a method to list all the user ids of all users in the bucket
[ "a", "method", "to", "list", "all", "the", "user", "ids", "of", "all", "users", "in", "the", "bucket" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/couchbase.py#L456-L467
243,585
collectiveacuity/labPack
labpack/databases/couchbase.py
syncGatewayClient.create
def create(self, doc_details): ''' a method to create a new document in the collection :param doc_details: dictionary with document details and user id value :return: dictionary with document details and _id and _rev values ''' # https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/document/post__db___doc_ title = '%s.create' % self.__class__.__name__ # validate input if self.model: doc_details = self.model.validate(doc_details, path_to_root='', object_title='%s(doc_details={...}' % title) # define request fields from copy import deepcopy new_record = deepcopy(doc_details) url = self.bucket_url + '/' # send request and construct output response = requests.post(url, json=new_record) if response.status_code not in (200, 201): response = response.json() raise Exception('%s() error: %s' % (title, response)) response = response.json() new_record['_id'] = response['id'] new_record['_rev'] = response['rev'] return new_record
python
def create(self, doc_details): ''' a method to create a new document in the collection :param doc_details: dictionary with document details and user id value :return: dictionary with document details and _id and _rev values ''' # https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/document/post__db___doc_ title = '%s.create' % self.__class__.__name__ # validate input if self.model: doc_details = self.model.validate(doc_details, path_to_root='', object_title='%s(doc_details={...}' % title) # define request fields from copy import deepcopy new_record = deepcopy(doc_details) url = self.bucket_url + '/' # send request and construct output response = requests.post(url, json=new_record) if response.status_code not in (200, 201): response = response.json() raise Exception('%s() error: %s' % (title, response)) response = response.json() new_record['_id'] = response['id'] new_record['_rev'] = response['rev'] return new_record
[ "def", "create", "(", "self", ",", "doc_details", ")", ":", "# https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/document/post__db___doc_", "title", "=", "'%s.create'", "%", "self", ".", "__class__", ".", "__name__", "# validate input", "if", "self", ".", "model", ":", "doc_details", "=", "self", ".", "model", ".", "validate", "(", "doc_details", ",", "path_to_root", "=", "''", ",", "object_title", "=", "'%s(doc_details={...}'", "%", "title", ")", "# define request fields", "from", "copy", "import", "deepcopy", "new_record", "=", "deepcopy", "(", "doc_details", ")", "url", "=", "self", ".", "bucket_url", "+", "'/'", "# send request and construct output", "response", "=", "requests", ".", "post", "(", "url", ",", "json", "=", "new_record", ")", "if", "response", ".", "status_code", "not", "in", "(", "200", ",", "201", ")", ":", "response", "=", "response", ".", "json", "(", ")", "raise", "Exception", "(", "'%s() error: %s'", "%", "(", "title", ",", "response", ")", ")", "response", "=", "response", ".", "json", "(", ")", "new_record", "[", "'_id'", "]", "=", "response", "[", "'id'", "]", "new_record", "[", "'_rev'", "]", "=", "response", "[", "'rev'", "]", "return", "new_record" ]
a method to create a new document in the collection :param doc_details: dictionary with document details and user id value :return: dictionary with document details and _id and _rev values
[ "a", "method", "to", "create", "a", "new", "document", "in", "the", "collection" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/couchbase.py#L905-L936
243,586
collectiveacuity/labPack
labpack/databases/couchbase.py
syncGatewayClient.remove
def remove(self): ''' a method to remove the entire bucket from the database :return: string with confirmation message ''' # https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/database/delete__db__ title = '%s.remove' % self.__class__.__name__ # validate admin access if not self.admin_access: raise Exception('%s requires admin access.' % title) # flush files on server if self.configs['server'].find('http:') > -1: flush_url = self.configs['server'] + '/pools/%s/buckets/%s/controller/doFlush' % (self.configs['pool'], self.bucket_name) response = requests.post(flush_url) try: print(response.json()) except: print(response.status_code) # else (in dev) iterate over files and users and purge elif self.configs['server'].find('walrus') > -1: for doc in self.list(purge_deleted=True): self.purge(doc['_id']) user_list = self.list_users() for uid in user_list: self.delete_user(uid) self.delete_view() # delete bucket from configs delete_url = self.bucket_url + '/' requests.delete(delete_url) # report outcome exit_msg = 'Bucket "%s" removed from database.' % self.bucket_name self.printer(exit_msg) return exit_msg
python
def remove(self): ''' a method to remove the entire bucket from the database :return: string with confirmation message ''' # https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/database/delete__db__ title = '%s.remove' % self.__class__.__name__ # validate admin access if not self.admin_access: raise Exception('%s requires admin access.' % title) # flush files on server if self.configs['server'].find('http:') > -1: flush_url = self.configs['server'] + '/pools/%s/buckets/%s/controller/doFlush' % (self.configs['pool'], self.bucket_name) response = requests.post(flush_url) try: print(response.json()) except: print(response.status_code) # else (in dev) iterate over files and users and purge elif self.configs['server'].find('walrus') > -1: for doc in self.list(purge_deleted=True): self.purge(doc['_id']) user_list = self.list_users() for uid in user_list: self.delete_user(uid) self.delete_view() # delete bucket from configs delete_url = self.bucket_url + '/' requests.delete(delete_url) # report outcome exit_msg = 'Bucket "%s" removed from database.' % self.bucket_name self.printer(exit_msg) return exit_msg
[ "def", "remove", "(", "self", ")", ":", "# https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/database/delete__db__", "title", "=", "'%s.remove'", "%", "self", ".", "__class__", ".", "__name__", "# validate admin access", "if", "not", "self", ".", "admin_access", ":", "raise", "Exception", "(", "'%s requires admin access.'", "%", "title", ")", "# flush files on server", "if", "self", ".", "configs", "[", "'server'", "]", ".", "find", "(", "'http:'", ")", ">", "-", "1", ":", "flush_url", "=", "self", ".", "configs", "[", "'server'", "]", "+", "'/pools/%s/buckets/%s/controller/doFlush'", "%", "(", "self", ".", "configs", "[", "'pool'", "]", ",", "self", ".", "bucket_name", ")", "response", "=", "requests", ".", "post", "(", "flush_url", ")", "try", ":", "print", "(", "response", ".", "json", "(", ")", ")", "except", ":", "print", "(", "response", ".", "status_code", ")", "# else (in dev) iterate over files and users and purge ", "elif", "self", ".", "configs", "[", "'server'", "]", ".", "find", "(", "'walrus'", ")", ">", "-", "1", ":", "for", "doc", "in", "self", ".", "list", "(", "purge_deleted", "=", "True", ")", ":", "self", ".", "purge", "(", "doc", "[", "'_id'", "]", ")", "user_list", "=", "self", ".", "list_users", "(", ")", "for", "uid", "in", "user_list", ":", "self", ".", "delete_user", "(", "uid", ")", "self", ".", "delete_view", "(", ")", "# delete bucket from configs", "delete_url", "=", "self", ".", "bucket_url", "+", "'/'", "requests", ".", "delete", "(", "delete_url", ")", "# report outcome", "exit_msg", "=", "'Bucket \"%s\" removed from database.'", "%", "self", ".", "bucket_name", "self", ".", "printer", "(", "exit_msg", ")", "return", "exit_msg" ]
a method to remove the entire bucket from the database :return: string with confirmation message
[ "a", "method", "to", "remove", "the", "entire", "bucket", "from", "the", "database" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/couchbase.py#L1083-L1125
243,587
Zsailer/yerkes
yerkes/base.py
EmptyPalette.to_chart
def to_chart(self): """Write color palette to Altair Chart. """ encoding, properties = {}, {} if self.orientation == "horizontal": # Set the axis encoding["x"] = alt.X( "hex", axis=None, scale=alt.Scale(zero=False, padding=0) ) # Set the rectangle size. properties["width"] = len(self.df)*self.square_size properties["height"] = self.square_size elif self.orientation == "vertical": # Set the axis encoding["y"] = alt.Y( "hex", axis=None, scale=alt.Scale(zero=False, padding=0) ) # Set the rectangle size. properties["height"] = len(self.df)*self.square_size properties["width"] = self.square_size # Build a chart. tooltip = list(self.df.columns) chart = alt.Chart(self.df).mark_rect().encode( color=alt.Color("hex", legend=None, scale=alt.Scale(range=self.colors)), tooltip=tooltip, **encoding ).properties( **properties ) return chart
python
def to_chart(self): """Write color palette to Altair Chart. """ encoding, properties = {}, {} if self.orientation == "horizontal": # Set the axis encoding["x"] = alt.X( "hex", axis=None, scale=alt.Scale(zero=False, padding=0) ) # Set the rectangle size. properties["width"] = len(self.df)*self.square_size properties["height"] = self.square_size elif self.orientation == "vertical": # Set the axis encoding["y"] = alt.Y( "hex", axis=None, scale=alt.Scale(zero=False, padding=0) ) # Set the rectangle size. properties["height"] = len(self.df)*self.square_size properties["width"] = self.square_size # Build a chart. tooltip = list(self.df.columns) chart = alt.Chart(self.df).mark_rect().encode( color=alt.Color("hex", legend=None, scale=alt.Scale(range=self.colors)), tooltip=tooltip, **encoding ).properties( **properties ) return chart
[ "def", "to_chart", "(", "self", ")", ":", "encoding", ",", "properties", "=", "{", "}", ",", "{", "}", "if", "self", ".", "orientation", "==", "\"horizontal\"", ":", "# Set the axis", "encoding", "[", "\"x\"", "]", "=", "alt", ".", "X", "(", "\"hex\"", ",", "axis", "=", "None", ",", "scale", "=", "alt", ".", "Scale", "(", "zero", "=", "False", ",", "padding", "=", "0", ")", ")", "# Set the rectangle size.", "properties", "[", "\"width\"", "]", "=", "len", "(", "self", ".", "df", ")", "*", "self", ".", "square_size", "properties", "[", "\"height\"", "]", "=", "self", ".", "square_size", "elif", "self", ".", "orientation", "==", "\"vertical\"", ":", "# Set the axis", "encoding", "[", "\"y\"", "]", "=", "alt", ".", "Y", "(", "\"hex\"", ",", "axis", "=", "None", ",", "scale", "=", "alt", ".", "Scale", "(", "zero", "=", "False", ",", "padding", "=", "0", ")", ")", "# Set the rectangle size.", "properties", "[", "\"height\"", "]", "=", "len", "(", "self", ".", "df", ")", "*", "self", ".", "square_size", "properties", "[", "\"width\"", "]", "=", "self", ".", "square_size", "# Build a chart.", "tooltip", "=", "list", "(", "self", ".", "df", ".", "columns", ")", "chart", "=", "alt", ".", "Chart", "(", "self", ".", "df", ")", ".", "mark_rect", "(", ")", ".", "encode", "(", "color", "=", "alt", ".", "Color", "(", "\"hex\"", ",", "legend", "=", "None", ",", "scale", "=", "alt", ".", "Scale", "(", "range", "=", "self", ".", "colors", ")", ")", ",", "tooltip", "=", "tooltip", ",", "*", "*", "encoding", ")", ".", "properties", "(", "*", "*", "properties", ")", "return", "chart" ]
Write color palette to Altair Chart.
[ "Write", "color", "palette", "to", "Altair", "Chart", "." ]
0434e3b2ee124736e6ece63077430eac8eb6ac09
https://github.com/Zsailer/yerkes/blob/0434e3b2ee124736e6ece63077430eac8eb6ac09/yerkes/base.py#L60-L98
243,588
Zsailer/yerkes
yerkes/base.py
EmptyPalette._repr_mimebundle_
def _repr_mimebundle_(self, *args, **kwargs): """Return a MIME bundle for display in Jupyter frontends.""" chart = self.to_chart() dct = chart.to_dict() return alt.renderers.get()(dct)
python
def _repr_mimebundle_(self, *args, **kwargs): """Return a MIME bundle for display in Jupyter frontends.""" chart = self.to_chart() dct = chart.to_dict() return alt.renderers.get()(dct)
[ "def", "_repr_mimebundle_", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "chart", "=", "self", ".", "to_chart", "(", ")", "dct", "=", "chart", ".", "to_dict", "(", ")", "return", "alt", ".", "renderers", ".", "get", "(", ")", "(", "dct", ")" ]
Return a MIME bundle for display in Jupyter frontends.
[ "Return", "a", "MIME", "bundle", "for", "display", "in", "Jupyter", "frontends", "." ]
0434e3b2ee124736e6ece63077430eac8eb6ac09
https://github.com/Zsailer/yerkes/blob/0434e3b2ee124736e6ece63077430eac8eb6ac09/yerkes/base.py#L100-L104
243,589
jason2506/HTMLMinifier
HTMLMinifier/parser.py
Parser._enter_newline
def _enter_newline(self): """ Remove the trailing spaces in the current line, and then mark that the leading spaces of the next line need to be removed. .. seealso:: `CSS Text Module Level 3 - The White Space Processing Rules <http://www.w3.org/TR/css3-text/#white-space-phase-2>`_ """ last_text_idx = self._last_text_idx if last_text_idx >= 0: buf = self._buffer buf[last_text_idx] = buf[last_text_idx].rstrip() self._remove_begining_ws = True
python
def _enter_newline(self): """ Remove the trailing spaces in the current line, and then mark that the leading spaces of the next line need to be removed. .. seealso:: `CSS Text Module Level 3 - The White Space Processing Rules <http://www.w3.org/TR/css3-text/#white-space-phase-2>`_ """ last_text_idx = self._last_text_idx if last_text_idx >= 0: buf = self._buffer buf[last_text_idx] = buf[last_text_idx].rstrip() self._remove_begining_ws = True
[ "def", "_enter_newline", "(", "self", ")", ":", "last_text_idx", "=", "self", ".", "_last_text_idx", "if", "last_text_idx", ">=", "0", ":", "buf", "=", "self", ".", "_buffer", "buf", "[", "last_text_idx", "]", "=", "buf", "[", "last_text_idx", "]", ".", "rstrip", "(", ")", "self", ".", "_remove_begining_ws", "=", "True" ]
Remove the trailing spaces in the current line, and then mark that the leading spaces of the next line need to be removed. .. seealso:: `CSS Text Module Level 3 - The White Space Processing Rules <http://www.w3.org/TR/css3-text/#white-space-phase-2>`_
[ "Remove", "the", "trailing", "spaces", "in", "the", "current", "line", "and", "then", "mark", "that", "the", "leading", "spaces", "of", "the", "next", "line", "need", "to", "be", "removed", "." ]
9a6a41834b7753ac465f301f0d6b470b90920fad
https://github.com/jason2506/HTMLMinifier/blob/9a6a41834b7753ac465f301f0d6b470b90920fad/HTMLMinifier/parser.py#L102-L117
243,590
jason2506/HTMLMinifier
HTMLMinifier/parser.py
Parser.handle_comment
def handle_comment(self, comment): """ Remove comment except IE conditional comment. .. seealso:: `About conditional comments <http://msdn.microsoft.com/en-us/library/ms537512.ASPX>`_ """ match = _COND_COMMENT_PATTERN.match(comment) if match is not None: cond = match.group(1) content = match.group(2) self._buffer.append(_COND_COMMENT_START_FORMAT % cond) self._push_status() self.feed(content) self._pop_status() self._buffer.append(_COND_COMMENT_END_FORMAT) elif not self.remove_comments: self._buffer.append(_COMMENT_FORMAT % comment)
python
def handle_comment(self, comment): """ Remove comment except IE conditional comment. .. seealso:: `About conditional comments <http://msdn.microsoft.com/en-us/library/ms537512.ASPX>`_ """ match = _COND_COMMENT_PATTERN.match(comment) if match is not None: cond = match.group(1) content = match.group(2) self._buffer.append(_COND_COMMENT_START_FORMAT % cond) self._push_status() self.feed(content) self._pop_status() self._buffer.append(_COND_COMMENT_END_FORMAT) elif not self.remove_comments: self._buffer.append(_COMMENT_FORMAT % comment)
[ "def", "handle_comment", "(", "self", ",", "comment", ")", ":", "match", "=", "_COND_COMMENT_PATTERN", ".", "match", "(", "comment", ")", "if", "match", "is", "not", "None", ":", "cond", "=", "match", ".", "group", "(", "1", ")", "content", "=", "match", ".", "group", "(", "2", ")", "self", ".", "_buffer", ".", "append", "(", "_COND_COMMENT_START_FORMAT", "%", "cond", ")", "self", ".", "_push_status", "(", ")", "self", ".", "feed", "(", "content", ")", "self", ".", "_pop_status", "(", ")", "self", ".", "_buffer", ".", "append", "(", "_COND_COMMENT_END_FORMAT", ")", "elif", "not", "self", ".", "remove_comments", ":", "self", ".", "_buffer", ".", "append", "(", "_COMMENT_FORMAT", "%", "comment", ")" ]
Remove comment except IE conditional comment. .. seealso:: `About conditional comments <http://msdn.microsoft.com/en-us/library/ms537512.ASPX>`_
[ "Remove", "comment", "except", "IE", "conditional", "comment", "." ]
9a6a41834b7753ac465f301f0d6b470b90920fad
https://github.com/jason2506/HTMLMinifier/blob/9a6a41834b7753ac465f301f0d6b470b90920fad/HTMLMinifier/parser.py#L136-L156
243,591
jason2506/HTMLMinifier
HTMLMinifier/parser.py
Parser.handle_data
def handle_data(self, data): """ Any space immediately following another collapsible space will be collapsed. .. seealso:: `CSS Text Module Level 3 - The White Space Processing Rules <http://www.w3.org/TR/css3-text/#egbidiwscollapse>`_ """ tag_stack = self._tag_stack if tag_stack and tag_stack[-1] in _RM_WS_ELEMENTS: # just ignore the content of this element assert data.strip() == '' return if self._preserve == 0: if self._remove_begining_ws: data = data.lstrip() if not data: return self._remove_begining_ws = False self._last_text_idx = len(self._buffer) data = _WS_PATTERN.sub(' ', data) if data and data[-1] == ' ': # immediately followed spaces will be collapsed self._remove_begining_ws = True else: # the content cannot be stripped self._reset_newline_status() self._buffer.append(data)
python
def handle_data(self, data): """ Any space immediately following another collapsible space will be collapsed. .. seealso:: `CSS Text Module Level 3 - The White Space Processing Rules <http://www.w3.org/TR/css3-text/#egbidiwscollapse>`_ """ tag_stack = self._tag_stack if tag_stack and tag_stack[-1] in _RM_WS_ELEMENTS: # just ignore the content of this element assert data.strip() == '' return if self._preserve == 0: if self._remove_begining_ws: data = data.lstrip() if not data: return self._remove_begining_ws = False self._last_text_idx = len(self._buffer) data = _WS_PATTERN.sub(' ', data) if data and data[-1] == ' ': # immediately followed spaces will be collapsed self._remove_begining_ws = True else: # the content cannot be stripped self._reset_newline_status() self._buffer.append(data)
[ "def", "handle_data", "(", "self", ",", "data", ")", ":", "tag_stack", "=", "self", ".", "_tag_stack", "if", "tag_stack", "and", "tag_stack", "[", "-", "1", "]", "in", "_RM_WS_ELEMENTS", ":", "# just ignore the content of this element", "assert", "data", ".", "strip", "(", ")", "==", "''", "return", "if", "self", ".", "_preserve", "==", "0", ":", "if", "self", ".", "_remove_begining_ws", ":", "data", "=", "data", ".", "lstrip", "(", ")", "if", "not", "data", ":", "return", "self", ".", "_remove_begining_ws", "=", "False", "self", ".", "_last_text_idx", "=", "len", "(", "self", ".", "_buffer", ")", "data", "=", "_WS_PATTERN", ".", "sub", "(", "' '", ",", "data", ")", "if", "data", "and", "data", "[", "-", "1", "]", "==", "' '", ":", "# immediately followed spaces will be collapsed", "self", ".", "_remove_begining_ws", "=", "True", "else", ":", "# the content cannot be stripped", "self", ".", "_reset_newline_status", "(", ")", "self", ".", "_buffer", ".", "append", "(", "data", ")" ]
Any space immediately following another collapsible space will be collapsed. .. seealso:: `CSS Text Module Level 3 - The White Space Processing Rules <http://www.w3.org/TR/css3-text/#egbidiwscollapse>`_
[ "Any", "space", "immediately", "following", "another", "collapsible", "space", "will", "be", "collapsed", "." ]
9a6a41834b7753ac465f301f0d6b470b90920fad
https://github.com/jason2506/HTMLMinifier/blob/9a6a41834b7753ac465f301f0d6b470b90920fad/HTMLMinifier/parser.py#L207-L241
243,592
tommilligan/pypersonalassistant
pypersonalassistant/secure.py
secure.twilio_SMS
def twilio_SMS(self, from_, to, body): """ Send an SMS message from your `twilio`_ account. .. _twilio: https://www.twilio.com/ Login will be performed using stored credentials. * *stored credential name: TWILIO_ACCOUNT_SID* * *stored credential name: TWILIO_AUTH_TOKEN* :param string from_: The phone number in your twilio account to send the SMS message from. Full international format. :param string to: The phone number to send the SMS message to. Full international format. :param string body: The content of the SMS message. """ logging.debug('Texting from Twilio') client = TwilioRestClient(self._credentials['TWILIO_ACCOUNT_SID'], self._credentials['TWILIO_AUTH_TOKEN']) response = client.messages.create( to=to, from_=from_, body=body, ) logging.debug('Response from Twilio: {0}'.format(response)) return response
python
def twilio_SMS(self, from_, to, body): """ Send an SMS message from your `twilio`_ account. .. _twilio: https://www.twilio.com/ Login will be performed using stored credentials. * *stored credential name: TWILIO_ACCOUNT_SID* * *stored credential name: TWILIO_AUTH_TOKEN* :param string from_: The phone number in your twilio account to send the SMS message from. Full international format. :param string to: The phone number to send the SMS message to. Full international format. :param string body: The content of the SMS message. """ logging.debug('Texting from Twilio') client = TwilioRestClient(self._credentials['TWILIO_ACCOUNT_SID'], self._credentials['TWILIO_AUTH_TOKEN']) response = client.messages.create( to=to, from_=from_, body=body, ) logging.debug('Response from Twilio: {0}'.format(response)) return response
[ "def", "twilio_SMS", "(", "self", ",", "from_", ",", "to", ",", "body", ")", ":", "logging", ".", "debug", "(", "'Texting from Twilio'", ")", "client", "=", "TwilioRestClient", "(", "self", ".", "_credentials", "[", "'TWILIO_ACCOUNT_SID'", "]", ",", "self", ".", "_credentials", "[", "'TWILIO_AUTH_TOKEN'", "]", ")", "response", "=", "client", ".", "messages", ".", "create", "(", "to", "=", "to", ",", "from_", "=", "from_", ",", "body", "=", "body", ",", ")", "logging", ".", "debug", "(", "'Response from Twilio: {0}'", ".", "format", "(", "response", ")", ")", "return", "response" ]
Send an SMS message from your `twilio`_ account. .. _twilio: https://www.twilio.com/ Login will be performed using stored credentials. * *stored credential name: TWILIO_ACCOUNT_SID* * *stored credential name: TWILIO_AUTH_TOKEN* :param string from_: The phone number in your twilio account to send the SMS message from. Full international format. :param string to: The phone number to send the SMS message to. Full international format. :param string body: The content of the SMS message.
[ "Send", "an", "SMS", "message", "from", "your", "twilio", "_", "account", "." ]
123903189be3f3d73a6f480de5e4442ffd099058
https://github.com/tommilligan/pypersonalassistant/blob/123903189be3f3d73a6f480de5e4442ffd099058/pypersonalassistant/secure.py#L188-L211
243,593
delfick/aws_syncr
aws_syncr/formatter.py
MergedOptionStringFormatter.special_get_field
def special_get_field(self, value, args, kwargs, format_spec=None): """Also take the spec into account""" if value in self.chain: raise BadOptionFormat("Recursive option", chain=self.chain + [value])
python
def special_get_field(self, value, args, kwargs, format_spec=None): """Also take the spec into account""" if value in self.chain: raise BadOptionFormat("Recursive option", chain=self.chain + [value])
[ "def", "special_get_field", "(", "self", ",", "value", ",", "args", ",", "kwargs", ",", "format_spec", "=", "None", ")", ":", "if", "value", "in", "self", ".", "chain", ":", "raise", "BadOptionFormat", "(", "\"Recursive option\"", ",", "chain", "=", "self", ".", "chain", "+", "[", "value", "]", ")" ]
Also take the spec into account
[ "Also", "take", "the", "spec", "into", "account" ]
8cd214b27c1eee98dfba4632cbb8bc0ae36356bd
https://github.com/delfick/aws_syncr/blob/8cd214b27c1eee98dfba4632cbb8bc0ae36356bd/aws_syncr/formatter.py#L68-L71
243,594
incuna/incuna-auth
incuna_auth/middleware/permission_feincms.py
FeinCMSPermissionMiddleware._get_page_from_path
def _get_page_from_path(self, path): """ Fetches the FeinCMS Page object that the path points to. Override this to deal with different types of object from Page. """ from feincms.module.page.models import Page try: return Page.objects.best_match_for_path(path) except Page.DoesNotExist: return None
python
def _get_page_from_path(self, path): """ Fetches the FeinCMS Page object that the path points to. Override this to deal with different types of object from Page. """ from feincms.module.page.models import Page try: return Page.objects.best_match_for_path(path) except Page.DoesNotExist: return None
[ "def", "_get_page_from_path", "(", "self", ",", "path", ")", ":", "from", "feincms", ".", "module", ".", "page", ".", "models", "import", "Page", "try", ":", "return", "Page", ".", "objects", ".", "best_match_for_path", "(", "path", ")", "except", "Page", ".", "DoesNotExist", ":", "return", "None" ]
Fetches the FeinCMS Page object that the path points to. Override this to deal with different types of object from Page.
[ "Fetches", "the", "FeinCMS", "Page", "object", "that", "the", "path", "points", "to", "." ]
949ccd922da15a4b5de17b9595cc8f5114d5385c
https://github.com/incuna/incuna-auth/blob/949ccd922da15a4b5de17b9595cc8f5114d5385c/incuna_auth/middleware/permission_feincms.py#L31-L41
243,595
incuna/incuna-auth
incuna_auth/middleware/permission_feincms.py
FeinCMSPermissionMiddleware._get_resource_access_state
def _get_resource_access_state(self, request): """ Returns the FeinCMS resource's access_state, following any INHERITed values. Will return None if the resource has an access state that should never be protected. It should not be possible to protect a resource with an access_state of STATE_ALL_ALLOWED, or an access_state of STATE_INHERIT and no parent. Will also return None if the accessed URL doesn't contain a Page. """ feincms_page = self._get_page_from_path(request.path_info.lstrip('/')) if not feincms_page: return None # Chase inherited values up the tree of inheritance. INHERIT = AccessState.STATE_INHERIT while feincms_page.access_state == INHERIT and feincms_page.parent: feincms_page = feincms_page.parent # Resources with STATE_ALL_ALLOWED or STATE_INHERIT and no parent should never be # access-restricted. This code is here rather than in is_resource_protected to # emphasise its importance and help avoid accidentally overriding it. never_restricted = (INHERIT, AccessState.STATE_ALL_ALLOWED) if feincms_page.access_state in never_restricted: return None # Return the found value. return feincms_page.access_state
python
def _get_resource_access_state(self, request): """ Returns the FeinCMS resource's access_state, following any INHERITed values. Will return None if the resource has an access state that should never be protected. It should not be possible to protect a resource with an access_state of STATE_ALL_ALLOWED, or an access_state of STATE_INHERIT and no parent. Will also return None if the accessed URL doesn't contain a Page. """ feincms_page = self._get_page_from_path(request.path_info.lstrip('/')) if not feincms_page: return None # Chase inherited values up the tree of inheritance. INHERIT = AccessState.STATE_INHERIT while feincms_page.access_state == INHERIT and feincms_page.parent: feincms_page = feincms_page.parent # Resources with STATE_ALL_ALLOWED or STATE_INHERIT and no parent should never be # access-restricted. This code is here rather than in is_resource_protected to # emphasise its importance and help avoid accidentally overriding it. never_restricted = (INHERIT, AccessState.STATE_ALL_ALLOWED) if feincms_page.access_state in never_restricted: return None # Return the found value. return feincms_page.access_state
[ "def", "_get_resource_access_state", "(", "self", ",", "request", ")", ":", "feincms_page", "=", "self", ".", "_get_page_from_path", "(", "request", ".", "path_info", ".", "lstrip", "(", "'/'", ")", ")", "if", "not", "feincms_page", ":", "return", "None", "# Chase inherited values up the tree of inheritance.", "INHERIT", "=", "AccessState", ".", "STATE_INHERIT", "while", "feincms_page", ".", "access_state", "==", "INHERIT", "and", "feincms_page", ".", "parent", ":", "feincms_page", "=", "feincms_page", ".", "parent", "# Resources with STATE_ALL_ALLOWED or STATE_INHERIT and no parent should never be", "# access-restricted. This code is here rather than in is_resource_protected to", "# emphasise its importance and help avoid accidentally overriding it.", "never_restricted", "=", "(", "INHERIT", ",", "AccessState", ".", "STATE_ALL_ALLOWED", ")", "if", "feincms_page", ".", "access_state", "in", "never_restricted", ":", "return", "None", "# Return the found value.", "return", "feincms_page", ".", "access_state" ]
Returns the FeinCMS resource's access_state, following any INHERITed values. Will return None if the resource has an access state that should never be protected. It should not be possible to protect a resource with an access_state of STATE_ALL_ALLOWED, or an access_state of STATE_INHERIT and no parent. Will also return None if the accessed URL doesn't contain a Page.
[ "Returns", "the", "FeinCMS", "resource", "s", "access_state", "following", "any", "INHERITed", "values", "." ]
949ccd922da15a4b5de17b9595cc8f5114d5385c
https://github.com/incuna/incuna-auth/blob/949ccd922da15a4b5de17b9595cc8f5114d5385c/incuna_auth/middleware/permission_feincms.py#L43-L70
243,596
incuna/incuna-auth
incuna_auth/middleware/permission_feincms.py
FeinCMSPermissionMiddleware.is_resource_protected
def is_resource_protected(self, request, **kwargs): """ Determines if a resource should be protected. Returns true if and only if the resource's access_state matches an entry in the return value of get_protected_states(). """ access_state = self._get_resource_access_state(request) protected_states = self.get_protected_states() return access_state in protected_states
python
def is_resource_protected(self, request, **kwargs): """ Determines if a resource should be protected. Returns true if and only if the resource's access_state matches an entry in the return value of get_protected_states(). """ access_state = self._get_resource_access_state(request) protected_states = self.get_protected_states() return access_state in protected_states
[ "def", "is_resource_protected", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "access_state", "=", "self", ".", "_get_resource_access_state", "(", "request", ")", "protected_states", "=", "self", ".", "get_protected_states", "(", ")", "return", "access_state", "in", "protected_states" ]
Determines if a resource should be protected. Returns true if and only if the resource's access_state matches an entry in the return value of get_protected_states().
[ "Determines", "if", "a", "resource", "should", "be", "protected", "." ]
949ccd922da15a4b5de17b9595cc8f5114d5385c
https://github.com/incuna/incuna-auth/blob/949ccd922da15a4b5de17b9595cc8f5114d5385c/incuna_auth/middleware/permission_feincms.py#L72-L81
243,597
delfick/aws_syncr
aws_syncr/differ.py
Differ.compare_two_documents
def compare_two_documents(kls, doc1, doc2): """Compare two documents by converting them into json objects and back to strings and compare""" first = doc1 if isinstance(doc1, string_types): try: first = json.loads(doc1) except (ValueError, TypeError) as error: log.warning("Failed to convert doc into a json object\terror=%s", error) yield error.args[0] return second = doc2 if isinstance(doc2, string_types): try: second = json.loads(doc2) except (ValueError, TypeError) as error: log.warning("Failed to convert doc into a json object\terror=%s", error) yield error.args[0] return # Ordering the principals because the ordering amazon gives me hates me def sort_statement(statement): for principal in (statement.get("Principal", None), statement.get("NotPrincipal", None)): if principal: for principal_type in ("AWS", "Federated", "Service"): if principal_type in principal and type(principal[principal_type]) is list: principal[principal_type] = sorted(principal[principal_type]) def sort_key(statement, key): if key in statement and type(statement[key]) is list: statement[key] = sorted(statement[key]) for document in (first, second): if "Statement" in document: if type(document["Statement"]) is dict: sort_statement(document["Statement"]) sort_key(document["Statement"], "Action") sort_key(document["Statement"], "NotAction") sort_key(document["Statement"], "Resource") sort_key(document["Statement"], "NotResource") else: for statement in document["Statement"]: sort_statement(statement) sort_key(statement, "Action") sort_key(statement, "NotAction") sort_key(statement, "Resource") sort_key(statement, "NotResource") difference = diff(first, second, fromfile="current", tofile="new").stringify() if difference: lines = difference.split('\n') if not first or not second or first != second: for line in lines: yield line
python
def compare_two_documents(kls, doc1, doc2): """Compare two documents by converting them into json objects and back to strings and compare""" first = doc1 if isinstance(doc1, string_types): try: first = json.loads(doc1) except (ValueError, TypeError) as error: log.warning("Failed to convert doc into a json object\terror=%s", error) yield error.args[0] return second = doc2 if isinstance(doc2, string_types): try: second = json.loads(doc2) except (ValueError, TypeError) as error: log.warning("Failed to convert doc into a json object\terror=%s", error) yield error.args[0] return # Ordering the principals because the ordering amazon gives me hates me def sort_statement(statement): for principal in (statement.get("Principal", None), statement.get("NotPrincipal", None)): if principal: for principal_type in ("AWS", "Federated", "Service"): if principal_type in principal and type(principal[principal_type]) is list: principal[principal_type] = sorted(principal[principal_type]) def sort_key(statement, key): if key in statement and type(statement[key]) is list: statement[key] = sorted(statement[key]) for document in (first, second): if "Statement" in document: if type(document["Statement"]) is dict: sort_statement(document["Statement"]) sort_key(document["Statement"], "Action") sort_key(document["Statement"], "NotAction") sort_key(document["Statement"], "Resource") sort_key(document["Statement"], "NotResource") else: for statement in document["Statement"]: sort_statement(statement) sort_key(statement, "Action") sort_key(statement, "NotAction") sort_key(statement, "Resource") sort_key(statement, "NotResource") difference = diff(first, second, fromfile="current", tofile="new").stringify() if difference: lines = difference.split('\n') if not first or not second or first != second: for line in lines: yield line
[ "def", "compare_two_documents", "(", "kls", ",", "doc1", ",", "doc2", ")", ":", "first", "=", "doc1", "if", "isinstance", "(", "doc1", ",", "string_types", ")", ":", "try", ":", "first", "=", "json", ".", "loads", "(", "doc1", ")", "except", "(", "ValueError", ",", "TypeError", ")", "as", "error", ":", "log", ".", "warning", "(", "\"Failed to convert doc into a json object\\terror=%s\"", ",", "error", ")", "yield", "error", ".", "args", "[", "0", "]", "return", "second", "=", "doc2", "if", "isinstance", "(", "doc2", ",", "string_types", ")", ":", "try", ":", "second", "=", "json", ".", "loads", "(", "doc2", ")", "except", "(", "ValueError", ",", "TypeError", ")", "as", "error", ":", "log", ".", "warning", "(", "\"Failed to convert doc into a json object\\terror=%s\"", ",", "error", ")", "yield", "error", ".", "args", "[", "0", "]", "return", "# Ordering the principals because the ordering amazon gives me hates me", "def", "sort_statement", "(", "statement", ")", ":", "for", "principal", "in", "(", "statement", ".", "get", "(", "\"Principal\"", ",", "None", ")", ",", "statement", ".", "get", "(", "\"NotPrincipal\"", ",", "None", ")", ")", ":", "if", "principal", ":", "for", "principal_type", "in", "(", "\"AWS\"", ",", "\"Federated\"", ",", "\"Service\"", ")", ":", "if", "principal_type", "in", "principal", "and", "type", "(", "principal", "[", "principal_type", "]", ")", "is", "list", ":", "principal", "[", "principal_type", "]", "=", "sorted", "(", "principal", "[", "principal_type", "]", ")", "def", "sort_key", "(", "statement", ",", "key", ")", ":", "if", "key", "in", "statement", "and", "type", "(", "statement", "[", "key", "]", ")", "is", "list", ":", "statement", "[", "key", "]", "=", "sorted", "(", "statement", "[", "key", "]", ")", "for", "document", "in", "(", "first", ",", "second", ")", ":", "if", "\"Statement\"", "in", "document", ":", "if", "type", "(", "document", "[", "\"Statement\"", "]", ")", "is", "dict", ":", "sort_statement", "(", "document", "[", "\"Statement\"", "]", ")", "sort_key", "(", "document", "[", "\"Statement\"", "]", ",", "\"Action\"", ")", "sort_key", "(", "document", "[", "\"Statement\"", "]", ",", "\"NotAction\"", ")", "sort_key", "(", "document", "[", "\"Statement\"", "]", ",", "\"Resource\"", ")", "sort_key", "(", "document", "[", "\"Statement\"", "]", ",", "\"NotResource\"", ")", "else", ":", "for", "statement", "in", "document", "[", "\"Statement\"", "]", ":", "sort_statement", "(", "statement", ")", "sort_key", "(", "statement", ",", "\"Action\"", ")", "sort_key", "(", "statement", ",", "\"NotAction\"", ")", "sort_key", "(", "statement", ",", "\"Resource\"", ")", "sort_key", "(", "statement", ",", "\"NotResource\"", ")", "difference", "=", "diff", "(", "first", ",", "second", ",", "fromfile", "=", "\"current\"", ",", "tofile", "=", "\"new\"", ")", ".", "stringify", "(", ")", "if", "difference", ":", "lines", "=", "difference", ".", "split", "(", "'\\n'", ")", "if", "not", "first", "or", "not", "second", "or", "first", "!=", "second", ":", "for", "line", "in", "lines", ":", "yield", "line" ]
Compare two documents by converting them into json objects and back to strings and compare
[ "Compare", "two", "documents", "by", "converting", "them", "into", "json", "objects", "and", "back", "to", "strings", "and", "compare" ]
8cd214b27c1eee98dfba4632cbb8bc0ae36356bd
https://github.com/delfick/aws_syncr/blob/8cd214b27c1eee98dfba4632cbb8bc0ae36356bd/aws_syncr/differ.py#L11-L62
243,598
johnnoone/facts
facts/contribs/__init__.py
facter_info
def facter_info(): """Returns data from facter. """ with suppress(FileNotFoundError): # facter may not be installed proc = subprocess.Popen(['facter', '--yaml'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() if not proc.returncode: data = serializer.load(stdout) return {'facter': data}
python
def facter_info(): """Returns data from facter. """ with suppress(FileNotFoundError): # facter may not be installed proc = subprocess.Popen(['facter', '--yaml'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() if not proc.returncode: data = serializer.load(stdout) return {'facter': data}
[ "def", "facter_info", "(", ")", ":", "with", "suppress", "(", "FileNotFoundError", ")", ":", "# facter may not be installed", "proc", "=", "subprocess", ".", "Popen", "(", "[", "'facter'", ",", "'--yaml'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "stdout", ",", "stderr", "=", "proc", ".", "communicate", "(", ")", "if", "not", "proc", ".", "returncode", ":", "data", "=", "serializer", ".", "load", "(", "stdout", ")", "return", "{", "'facter'", ":", "data", "}" ]
Returns data from facter.
[ "Returns", "data", "from", "facter", "." ]
82d38a46c15d9c01200445526f4c0d1825fc1e51
https://github.com/johnnoone/facts/blob/82d38a46c15d9c01200445526f4c0d1825fc1e51/facts/contribs/__init__.py#L8-L19
243,599
jrheard/madison_wcb
madison_wcb/wcb.py
get_color
def get_color(index): """Dips the brush in paint. Arguments: index - an integer between 0 and 7, inclusive. Tells the bot which color you want. """ if index in range(0, 8): # Send the turtle to the top-left corner of the window to imitate the position of the WCB's brush. state['turtle'].goto(-WCB_WIDTH / 2, -WCB_HEIGHT / 2) _make_cnc_request("tool.color./" + str(index)) # This is the order of the colors in the palette in our classroom's bot; yours may vary! colors = ["black", "red", "orange", "yellow", "green", "blue", "purple", "brown"] state['turtle'].color(colors[index]) state['distance_traveled'] = 0 else: print("Color indexes must be between 0 and 7, but you gave me: " + index)
python
def get_color(index): """Dips the brush in paint. Arguments: index - an integer between 0 and 7, inclusive. Tells the bot which color you want. """ if index in range(0, 8): # Send the turtle to the top-left corner of the window to imitate the position of the WCB's brush. state['turtle'].goto(-WCB_WIDTH / 2, -WCB_HEIGHT / 2) _make_cnc_request("tool.color./" + str(index)) # This is the order of the colors in the palette in our classroom's bot; yours may vary! colors = ["black", "red", "orange", "yellow", "green", "blue", "purple", "brown"] state['turtle'].color(colors[index]) state['distance_traveled'] = 0 else: print("Color indexes must be between 0 and 7, but you gave me: " + index)
[ "def", "get_color", "(", "index", ")", ":", "if", "index", "in", "range", "(", "0", ",", "8", ")", ":", "# Send the turtle to the top-left corner of the window to imitate the position of the WCB's brush.", "state", "[", "'turtle'", "]", ".", "goto", "(", "-", "WCB_WIDTH", "/", "2", ",", "-", "WCB_HEIGHT", "/", "2", ")", "_make_cnc_request", "(", "\"tool.color./\"", "+", "str", "(", "index", ")", ")", "# This is the order of the colors in the palette in our classroom's bot; yours may vary!", "colors", "=", "[", "\"black\"", ",", "\"red\"", ",", "\"orange\"", ",", "\"yellow\"", ",", "\"green\"", ",", "\"blue\"", ",", "\"purple\"", ",", "\"brown\"", "]", "state", "[", "'turtle'", "]", ".", "color", "(", "colors", "[", "index", "]", ")", "state", "[", "'distance_traveled'", "]", "=", "0", "else", ":", "print", "(", "\"Color indexes must be between 0 and 7, but you gave me: \"", "+", "index", ")" ]
Dips the brush in paint. Arguments: index - an integer between 0 and 7, inclusive. Tells the bot which color you want.
[ "Dips", "the", "brush", "in", "paint", "." ]
545e92e13c8fb46e0d805edef6b6146ab25373e9
https://github.com/jrheard/madison_wcb/blob/545e92e13c8fb46e0d805edef6b6146ab25373e9/madison_wcb/wcb.py#L66-L85