id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
242,200
briancappello/flask-sqlalchemy-bundle
flask_sqlalchemy_bundle/meta/model_meta_factory.py
ModelMetaFactory._get_model_meta_options
def _get_model_meta_options(self) -> List[MetaOption]: """" Define fields allowed in the Meta class on end-user models, and the behavior of each. Custom ModelMetaOptions classes should override this method to customize the options supported on class Meta of end-user models. """ # we can't use current_app to determine if we're under test, because it # doesn't exist yet testing_options = ([] if os.getenv('FLASK_ENV', False) != TEST else [_TestingMetaOption()]) # when options require another option, its dependent must be listed. # options in this list are not order-dependent, except where noted. # all ColumnMetaOptions subclasses require PolymorphicMetaOption return testing_options + [ AbstractMetaOption(), # required; must be first LazyMappedMetaOption(), RelationshipsMetaOption(), # requires lazy_mapped TableMetaOption(), MaterializedViewForMetaOption(), PolymorphicMetaOption(), # must be first of all polymorphic options PolymorphicOnColumnMetaOption(), PolymorphicIdentityMetaOption(), PolymorphicBaseTablenameMetaOption(), PolymorphicJoinedPkColumnMetaOption(), # requires _BaseTablename # must be after PolymorphicJoinedPkColumnMetaOption PrimaryKeyColumnMetaOption(), CreatedAtColumnMetaOption(), UpdatedAtColumnMetaOption(), ]
python
def _get_model_meta_options(self) -> List[MetaOption]: """" Define fields allowed in the Meta class on end-user models, and the behavior of each. Custom ModelMetaOptions classes should override this method to customize the options supported on class Meta of end-user models. """ # we can't use current_app to determine if we're under test, because it # doesn't exist yet testing_options = ([] if os.getenv('FLASK_ENV', False) != TEST else [_TestingMetaOption()]) # when options require another option, its dependent must be listed. # options in this list are not order-dependent, except where noted. # all ColumnMetaOptions subclasses require PolymorphicMetaOption return testing_options + [ AbstractMetaOption(), # required; must be first LazyMappedMetaOption(), RelationshipsMetaOption(), # requires lazy_mapped TableMetaOption(), MaterializedViewForMetaOption(), PolymorphicMetaOption(), # must be first of all polymorphic options PolymorphicOnColumnMetaOption(), PolymorphicIdentityMetaOption(), PolymorphicBaseTablenameMetaOption(), PolymorphicJoinedPkColumnMetaOption(), # requires _BaseTablename # must be after PolymorphicJoinedPkColumnMetaOption PrimaryKeyColumnMetaOption(), CreatedAtColumnMetaOption(), UpdatedAtColumnMetaOption(), ]
[ "def", "_get_model_meta_options", "(", "self", ")", "->", "List", "[", "MetaOption", "]", ":", "# we can't use current_app to determine if we're under test, because it", "# doesn't exist yet", "testing_options", "=", "(", "[", "]", "if", "os", ".", "getenv", "(", "'FLASK_ENV'", ",", "False", ")", "!=", "TEST", "else", "[", "_TestingMetaOption", "(", ")", "]", ")", "# when options require another option, its dependent must be listed.", "# options in this list are not order-dependent, except where noted.", "# all ColumnMetaOptions subclasses require PolymorphicMetaOption", "return", "testing_options", "+", "[", "AbstractMetaOption", "(", ")", ",", "# required; must be first", "LazyMappedMetaOption", "(", ")", ",", "RelationshipsMetaOption", "(", ")", ",", "# requires lazy_mapped", "TableMetaOption", "(", ")", ",", "MaterializedViewForMetaOption", "(", ")", ",", "PolymorphicMetaOption", "(", ")", ",", "# must be first of all polymorphic options", "PolymorphicOnColumnMetaOption", "(", ")", ",", "PolymorphicIdentityMetaOption", "(", ")", ",", "PolymorphicBaseTablenameMetaOption", "(", ")", ",", "PolymorphicJoinedPkColumnMetaOption", "(", ")", ",", "# requires _BaseTablename", "# must be after PolymorphicJoinedPkColumnMetaOption", "PrimaryKeyColumnMetaOption", "(", ")", ",", "CreatedAtColumnMetaOption", "(", ")", ",", "UpdatedAtColumnMetaOption", "(", ")", ",", "]" ]
Define fields allowed in the Meta class on end-user models, and the behavior of each. Custom ModelMetaOptions classes should override this method to customize the options supported on class Meta of end-user models.
[ "Define", "fields", "allowed", "in", "the", "Meta", "class", "on", "end", "-", "user", "models", "and", "the", "behavior", "of", "each", "." ]
8150896787907ef0001839b5a6ef303edccb9b6c
https://github.com/briancappello/flask-sqlalchemy-bundle/blob/8150896787907ef0001839b5a6ef303edccb9b6c/flask_sqlalchemy_bundle/meta/model_meta_factory.py#L30-L63
242,201
rjw57/throw
throw/minus/minus.py
CreateGallery
def CreateGallery(): """Creates a Gallery on the server. Returns a Gallery object with the editor_id and reader_id. """ url = 'http://min.us/api/CreateGallery' response = _dopost(url) _editor_id = response["editor_id"] _reader_id = response["reader_id"] return Gallery(_reader_id, editor_id=_editor_id)
python
def CreateGallery(): """Creates a Gallery on the server. Returns a Gallery object with the editor_id and reader_id. """ url = 'http://min.us/api/CreateGallery' response = _dopost(url) _editor_id = response["editor_id"] _reader_id = response["reader_id"] return Gallery(_reader_id, editor_id=_editor_id)
[ "def", "CreateGallery", "(", ")", ":", "url", "=", "'http://min.us/api/CreateGallery'", "response", "=", "_dopost", "(", "url", ")", "_editor_id", "=", "response", "[", "\"editor_id\"", "]", "_reader_id", "=", "response", "[", "\"reader_id\"", "]", "return", "Gallery", "(", "_reader_id", ",", "editor_id", "=", "_editor_id", ")" ]
Creates a Gallery on the server. Returns a Gallery object with the editor_id and reader_id.
[ "Creates", "a", "Gallery", "on", "the", "server", ".", "Returns", "a", "Gallery", "object", "with", "the", "editor_id", "and", "reader_id", "." ]
74a7116362ba5b45635ab247472b25cfbdece4ee
https://github.com/rjw57/throw/blob/74a7116362ba5b45635ab247472b25cfbdece4ee/throw/minus/minus.py#L102-L114
242,202
renzon/gaepermission
gaepermission/facade.py
logged_user
def logged_user(request): """ Returns a command that retrieves the current logged user based on secure cookie If there is no logged user, the result from command is None """ dct = cookie_facade.retrive_cookie_data(request, USER_COOKIE_NAME).execute().result if dct is None: return Command() return NodeSearch(dct['id'])
python
def logged_user(request): """ Returns a command that retrieves the current logged user based on secure cookie If there is no logged user, the result from command is None """ dct = cookie_facade.retrive_cookie_data(request, USER_COOKIE_NAME).execute().result if dct is None: return Command() return NodeSearch(dct['id'])
[ "def", "logged_user", "(", "request", ")", ":", "dct", "=", "cookie_facade", ".", "retrive_cookie_data", "(", "request", ",", "USER_COOKIE_NAME", ")", ".", "execute", "(", ")", ".", "result", "if", "dct", "is", "None", ":", "return", "Command", "(", ")", "return", "NodeSearch", "(", "dct", "[", "'id'", "]", ")" ]
Returns a command that retrieves the current logged user based on secure cookie If there is no logged user, the result from command is None
[ "Returns", "a", "command", "that", "retrieves", "the", "current", "logged", "user", "based", "on", "secure", "cookie", "If", "there", "is", "no", "logged", "user", "the", "result", "from", "command", "is", "None" ]
1a3534a7ef150ba31fa8df3bc8445557cab3d79d
https://github.com/renzon/gaepermission/blob/1a3534a7ef150ba31fa8df3bc8445557cab3d79d/gaepermission/facade.py#L57-L65
242,203
renzon/gaepermission
gaepermission/facade.py
find_users_by_email_starting_with
def find_users_by_email_starting_with(email_prefix=None, cursor=None, page_size=30): """ Returns a command that retrieves users by its email_prefix, ordered by email. It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in a next call. It is provided in cursor attribute from command. """ email_prefix = email_prefix or '' return ModelSearchCommand(MainUser.query_email_starts_with(email_prefix), page_size, cursor, cache_begin=None)
python
def find_users_by_email_starting_with(email_prefix=None, cursor=None, page_size=30): """ Returns a command that retrieves users by its email_prefix, ordered by email. It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in a next call. It is provided in cursor attribute from command. """ email_prefix = email_prefix or '' return ModelSearchCommand(MainUser.query_email_starts_with(email_prefix), page_size, cursor, cache_begin=None)
[ "def", "find_users_by_email_starting_with", "(", "email_prefix", "=", "None", ",", "cursor", "=", "None", ",", "page_size", "=", "30", ")", ":", "email_prefix", "=", "email_prefix", "or", "''", "return", "ModelSearchCommand", "(", "MainUser", ".", "query_email_starts_with", "(", "email_prefix", ")", ",", "page_size", ",", "cursor", ",", "cache_begin", "=", "None", ")" ]
Returns a command that retrieves users by its email_prefix, ordered by email. It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in a next call. It is provided in cursor attribute from command.
[ "Returns", "a", "command", "that", "retrieves", "users", "by", "its", "email_prefix", "ordered", "by", "email", ".", "It", "returns", "a", "max", "number", "of", "users", "defined", "by", "page_size", "arg", ".", "Next", "result", "can", "be", "retrieved", "using", "cursor", "in", "a", "next", "call", ".", "It", "is", "provided", "in", "cursor", "attribute", "from", "command", "." ]
1a3534a7ef150ba31fa8df3bc8445557cab3d79d
https://github.com/renzon/gaepermission/blob/1a3534a7ef150ba31fa8df3bc8445557cab3d79d/gaepermission/facade.py#L108-L117
242,204
renzon/gaepermission
gaepermission/facade.py
find_users_by_email_and_group
def find_users_by_email_and_group(email_prefix=None, group=None, cursor=None, page_size=30): """ Returns a command that retrieves users by its email_prefix, ordered by email and by Group. If Group is None, only users without any group are going to be searched It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in a next call. It is provided in cursor attribute from command. """ email_prefix = email_prefix or '' return ModelSearchCommand(MainUser.query_email_and_group(email_prefix, group), page_size, cursor, cache_begin=None)
python
def find_users_by_email_and_group(email_prefix=None, group=None, cursor=None, page_size=30): """ Returns a command that retrieves users by its email_prefix, ordered by email and by Group. If Group is None, only users without any group are going to be searched It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in a next call. It is provided in cursor attribute from command. """ email_prefix = email_prefix or '' return ModelSearchCommand(MainUser.query_email_and_group(email_prefix, group), page_size, cursor, cache_begin=None)
[ "def", "find_users_by_email_and_group", "(", "email_prefix", "=", "None", ",", "group", "=", "None", ",", "cursor", "=", "None", ",", "page_size", "=", "30", ")", ":", "email_prefix", "=", "email_prefix", "or", "''", "return", "ModelSearchCommand", "(", "MainUser", ".", "query_email_and_group", "(", "email_prefix", ",", "group", ")", ",", "page_size", ",", "cursor", ",", "cache_begin", "=", "None", ")" ]
Returns a command that retrieves users by its email_prefix, ordered by email and by Group. If Group is None, only users without any group are going to be searched It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in a next call. It is provided in cursor attribute from command.
[ "Returns", "a", "command", "that", "retrieves", "users", "by", "its", "email_prefix", "ordered", "by", "email", "and", "by", "Group", ".", "If", "Group", "is", "None", "only", "users", "without", "any", "group", "are", "going", "to", "be", "searched", "It", "returns", "a", "max", "number", "of", "users", "defined", "by", "page_size", "arg", ".", "Next", "result", "can", "be", "retrieved", "using", "cursor", "in", "a", "next", "call", ".", "It", "is", "provided", "in", "cursor", "attribute", "from", "command", "." ]
1a3534a7ef150ba31fa8df3bc8445557cab3d79d
https://github.com/renzon/gaepermission/blob/1a3534a7ef150ba31fa8df3bc8445557cab3d79d/gaepermission/facade.py#L120-L130
242,205
rehandalal/buchner
buchner/project-template/PROJECTMODULE/errors.py
json_error
def json_error(code, message): """Returns a JSON-ified error object""" # Message can be an unserializable object. message = repr(message) return jsonify(dict(request=request.path, message=message)), code
python
def json_error(code, message): """Returns a JSON-ified error object""" # Message can be an unserializable object. message = repr(message) return jsonify(dict(request=request.path, message=message)), code
[ "def", "json_error", "(", "code", ",", "message", ")", ":", "# Message can be an unserializable object.", "message", "=", "repr", "(", "message", ")", "return", "jsonify", "(", "dict", "(", "request", "=", "request", ".", "path", ",", "message", "=", "message", ")", ")", ",", "code" ]
Returns a JSON-ified error object
[ "Returns", "a", "JSON", "-", "ified", "error", "object" ]
dc22a61c493b9d4a74d76e8b42a319aa13e385f3
https://github.com/rehandalal/buchner/blob/dc22a61c493b9d4a74d76e8b42a319aa13e385f3/buchner/project-template/PROJECTMODULE/errors.py#L12-L16
242,206
rehandalal/buchner
buchner/project-template/PROJECTMODULE/errors.py
error
def error(code, message, template): """A generic error handler""" if json_requested(): return json_error(code, message) else: return render_template(template, message=message), code
python
def error(code, message, template): """A generic error handler""" if json_requested(): return json_error(code, message) else: return render_template(template, message=message), code
[ "def", "error", "(", "code", ",", "message", ",", "template", ")", ":", "if", "json_requested", "(", ")", ":", "return", "json_error", "(", "code", ",", "message", ")", "else", ":", "return", "render_template", "(", "template", ",", "message", "=", "message", ")", ",", "code" ]
A generic error handler
[ "A", "generic", "error", "handler" ]
dc22a61c493b9d4a74d76e8b42a319aa13e385f3
https://github.com/rehandalal/buchner/blob/dc22a61c493b9d4a74d76e8b42a319aa13e385f3/buchner/project-template/PROJECTMODULE/errors.py#L19-L24
242,207
MacHu-GWU/angora-project
angora/text/strtemplate.py
StrTemplate.straight_line_show
def straight_line_show(title, length=100, linestyle="=", pad=0): """Print a formatted straight line. """ print(StrTemplate.straight_line( title=title, length=length, linestyle=linestyle, pad=pad))
python
def straight_line_show(title, length=100, linestyle="=", pad=0): """Print a formatted straight line. """ print(StrTemplate.straight_line( title=title, length=length, linestyle=linestyle, pad=pad))
[ "def", "straight_line_show", "(", "title", ",", "length", "=", "100", ",", "linestyle", "=", "\"=\"", ",", "pad", "=", "0", ")", ":", "print", "(", "StrTemplate", ".", "straight_line", "(", "title", "=", "title", ",", "length", "=", "length", ",", "linestyle", "=", "linestyle", ",", "pad", "=", "pad", ")", ")" ]
Print a formatted straight line.
[ "Print", "a", "formatted", "straight", "line", "." ]
689a60da51cd88680ddbe26e28dbe81e6b01d275
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/text/strtemplate.py#L42-L46
242,208
MacHu-GWU/angora-project
angora/text/strtemplate.py
StrTemplate.indented_show
def indented_show(text, howmany=1): """Print a formatted indented text. """ print(StrTemplate.pad_indent(text=text, howmany=howmany))
python
def indented_show(text, howmany=1): """Print a formatted indented text. """ print(StrTemplate.pad_indent(text=text, howmany=howmany))
[ "def", "indented_show", "(", "text", ",", "howmany", "=", "1", ")", ":", "print", "(", "StrTemplate", ".", "pad_indent", "(", "text", "=", "text", ",", "howmany", "=", "howmany", ")", ")" ]
Print a formatted indented text.
[ "Print", "a", "formatted", "indented", "text", "." ]
689a60da51cd88680ddbe26e28dbe81e6b01d275
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/text/strtemplate.py#L60-L63
242,209
MacHu-GWU/angora-project
angora/text/strtemplate.py
StrTemplate.box_show
def box_show(text, width=100, height=3, corner="+", horizontal="-", vertical="|"): """Print a formatted ascii text box. """ print(StrTemplate.box(text=text, width=width, height=height, corner=corner, horizontal=horizontal, vertical=vertical))
python
def box_show(text, width=100, height=3, corner="+", horizontal="-", vertical="|"): """Print a formatted ascii text box. """ print(StrTemplate.box(text=text, width=width, height=height, corner=corner, horizontal=horizontal, vertical=vertical))
[ "def", "box_show", "(", "text", ",", "width", "=", "100", ",", "height", "=", "3", ",", "corner", "=", "\"+\"", ",", "horizontal", "=", "\"-\"", ",", "vertical", "=", "\"|\"", ")", ":", "print", "(", "StrTemplate", ".", "box", "(", "text", "=", "text", ",", "width", "=", "width", ",", "height", "=", "height", ",", "corner", "=", "corner", ",", "horizontal", "=", "horizontal", ",", "vertical", "=", "vertical", ")", ")" ]
Print a formatted ascii text box.
[ "Print", "a", "formatted", "ascii", "text", "box", "." ]
689a60da51cd88680ddbe26e28dbe81e6b01d275
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/text/strtemplate.py#L98-L102
242,210
kevinsprong23/aperture
aperture/util.py
increment
def increment(d, key, val=1): """ increment dict d at key by amount val no need to return since d is mutable """ if key in d: d[key] += val else: d[key] = val
python
def increment(d, key, val=1): """ increment dict d at key by amount val no need to return since d is mutable """ if key in d: d[key] += val else: d[key] = val
[ "def", "increment", "(", "d", ",", "key", ",", "val", "=", "1", ")", ":", "if", "key", "in", "d", ":", "d", "[", "key", "]", "+=", "val", "else", ":", "d", "[", "key", "]", "=", "val" ]
increment dict d at key by amount val no need to return since d is mutable
[ "increment", "dict", "d", "at", "key", "by", "amount", "val", "no", "need", "to", "return", "since", "d", "is", "mutable" ]
d0420fef3b25d8afc0e5ddcfb6fe5f0ff42b9799
https://github.com/kevinsprong23/aperture/blob/d0420fef3b25d8afc0e5ddcfb6fe5f0ff42b9799/aperture/util.py#L7-L15
242,211
kevinsprong23/aperture
aperture/util.py
floor_nearest
def floor_nearest(x, dx=1): """ floor a number to within a given rounding accuracy """ precision = get_sig_digits(dx) return round(math.floor(float(x) / dx) * dx, precision)
python
def floor_nearest(x, dx=1): """ floor a number to within a given rounding accuracy """ precision = get_sig_digits(dx) return round(math.floor(float(x) / dx) * dx, precision)
[ "def", "floor_nearest", "(", "x", ",", "dx", "=", "1", ")", ":", "precision", "=", "get_sig_digits", "(", "dx", ")", "return", "round", "(", "math", ".", "floor", "(", "float", "(", "x", ")", "/", "dx", ")", "*", "dx", ",", "precision", ")" ]
floor a number to within a given rounding accuracy
[ "floor", "a", "number", "to", "within", "a", "given", "rounding", "accuracy" ]
d0420fef3b25d8afc0e5ddcfb6fe5f0ff42b9799
https://github.com/kevinsprong23/aperture/blob/d0420fef3b25d8afc0e5ddcfb6fe5f0ff42b9799/aperture/util.py#L25-L30
242,212
kevinsprong23/aperture
aperture/util.py
ceil_nearest
def ceil_nearest(x, dx=1): """ ceil a number to within a given rounding accuracy """ precision = get_sig_digits(dx) return round(math.ceil(float(x) / dx) * dx, precision)
python
def ceil_nearest(x, dx=1): """ ceil a number to within a given rounding accuracy """ precision = get_sig_digits(dx) return round(math.ceil(float(x) / dx) * dx, precision)
[ "def", "ceil_nearest", "(", "x", ",", "dx", "=", "1", ")", ":", "precision", "=", "get_sig_digits", "(", "dx", ")", "return", "round", "(", "math", ".", "ceil", "(", "float", "(", "x", ")", "/", "dx", ")", "*", "dx", ",", "precision", ")" ]
ceil a number to within a given rounding accuracy
[ "ceil", "a", "number", "to", "within", "a", "given", "rounding", "accuracy" ]
d0420fef3b25d8afc0e5ddcfb6fe5f0ff42b9799
https://github.com/kevinsprong23/aperture/blob/d0420fef3b25d8afc0e5ddcfb6fe5f0ff42b9799/aperture/util.py#L33-L38
242,213
kevinsprong23/aperture
aperture/util.py
frange
def frange(x, y, jump=1): """ range for floats """ precision = get_sig_digits(jump) while x < y: yield round(x, precision) x += jump
python
def frange(x, y, jump=1): """ range for floats """ precision = get_sig_digits(jump) while x < y: yield round(x, precision) x += jump
[ "def", "frange", "(", "x", ",", "y", ",", "jump", "=", "1", ")", ":", "precision", "=", "get_sig_digits", "(", "jump", ")", "while", "x", "<", "y", ":", "yield", "round", "(", "x", ",", "precision", ")", "x", "+=", "jump" ]
range for floats
[ "range", "for", "floats" ]
d0420fef3b25d8afc0e5ddcfb6fe5f0ff42b9799
https://github.com/kevinsprong23/aperture/blob/d0420fef3b25d8afc0e5ddcfb6fe5f0ff42b9799/aperture/util.py#L41-L48
242,214
MacHu-GWU/angora-project
angora/dtypes/orderedset.py
OrderedSet.discard
def discard(self, key): """Remove a item from its member if it is a member. Usage:: >>> s = OrderedSet([1, 2, 3]) >>> s.discard(2) >>> s OrderedSet([1, 3]) **中文文档** 从有序集合中删除一个元素, 同时保持集合依然有序。 """ if key in self.map: key, prev, next_item = self.map.pop(key) prev[2] = next_item next_item[1] = prev
python
def discard(self, key): """Remove a item from its member if it is a member. Usage:: >>> s = OrderedSet([1, 2, 3]) >>> s.discard(2) >>> s OrderedSet([1, 3]) **中文文档** 从有序集合中删除一个元素, 同时保持集合依然有序。 """ if key in self.map: key, prev, next_item = self.map.pop(key) prev[2] = next_item next_item[1] = prev
[ "def", "discard", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "map", ":", "key", ",", "prev", ",", "next_item", "=", "self", ".", "map", ".", "pop", "(", "key", ")", "prev", "[", "2", "]", "=", "next_item", "next_item", "[", "1", "]", "=", "prev" ]
Remove a item from its member if it is a member. Usage:: >>> s = OrderedSet([1, 2, 3]) >>> s.discard(2) >>> s OrderedSet([1, 3]) **中文文档** 从有序集合中删除一个元素, 同时保持集合依然有序。
[ "Remove", "a", "item", "from", "its", "member", "if", "it", "is", "a", "member", "." ]
689a60da51cd88680ddbe26e28dbe81e6b01d275
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/dtypes/orderedset.py#L113-L130
242,215
MacHu-GWU/angora-project
angora/dtypes/orderedset.py
OrderedSet.intersection
def intersection(*argv): """Returns the intersection of multiple sets. Items are ordered by set1, set2, ... **中文文档** 求多个有序集合的交集, 按照第一个集合, 第二个, ..., 这样的顺序。 """ res = OrderedSet(argv[0]) for ods in argv: res = ods & res return res
python
def intersection(*argv): """Returns the intersection of multiple sets. Items are ordered by set1, set2, ... **中文文档** 求多个有序集合的交集, 按照第一个集合, 第二个, ..., 这样的顺序。 """ res = OrderedSet(argv[0]) for ods in argv: res = ods & res return res
[ "def", "intersection", "(", "*", "argv", ")", ":", "res", "=", "OrderedSet", "(", "argv", "[", "0", "]", ")", "for", "ods", "in", "argv", ":", "res", "=", "ods", "&", "res", "return", "res" ]
Returns the intersection of multiple sets. Items are ordered by set1, set2, ... **中文文档** 求多个有序集合的交集, 按照第一个集合, 第二个, ..., 这样的顺序。
[ "Returns", "the", "intersection", "of", "multiple", "sets", ".", "Items", "are", "ordered", "by", "set1", "set2", "..." ]
689a60da51cd88680ddbe26e28dbe81e6b01d275
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/dtypes/orderedset.py#L192-L203
242,216
saintic/SpliceURL
SpliceURL.py
Splice.do
def do(self): "run it, you can get a good stitching of the complete URL." return urlunparse((self.scheme, self.netloc, self.path, self.params, self.query, self.fragment))
python
def do(self): "run it, you can get a good stitching of the complete URL." return urlunparse((self.scheme, self.netloc, self.path, self.params, self.query, self.fragment))
[ "def", "do", "(", "self", ")", ":", "return", "urlunparse", "(", "(", "self", ".", "scheme", ",", "self", ".", "netloc", ",", "self", ".", "path", ",", "self", ".", "params", ",", "self", ".", "query", ",", "self", ".", "fragment", ")", ")" ]
run it, you can get a good stitching of the complete URL.
[ "run", "it", "you", "can", "get", "a", "good", "stitching", "of", "the", "complete", "URL", "." ]
ac2d1e854cbdfcf984bce3682b7f05ccba20938e
https://github.com/saintic/SpliceURL/blob/ac2d1e854cbdfcf984bce3682b7f05ccba20938e/SpliceURL.py#L46-L48
242,217
saintic/SpliceURL
SpliceURL.py
Modify.do
def do(self): "run it, get a new url" scheme, netloc, path, params, query, fragment = Split(self.url).do() if isinstance(self.query, dict): query = query + "&" + urllib.urlencode(self.query) if query else urllib.urlencode(self.query) path = urljoin(path, self.path).replace('\\', '/') if self.path else path return Splice(scheme=scheme, netloc=netloc, path=path, params=params, query=query, fragment=fragment).geturl
python
def do(self): "run it, get a new url" scheme, netloc, path, params, query, fragment = Split(self.url).do() if isinstance(self.query, dict): query = query + "&" + urllib.urlencode(self.query) if query else urllib.urlencode(self.query) path = urljoin(path, self.path).replace('\\', '/') if self.path else path return Splice(scheme=scheme, netloc=netloc, path=path, params=params, query=query, fragment=fragment).geturl
[ "def", "do", "(", "self", ")", ":", "scheme", ",", "netloc", ",", "path", ",", "params", ",", "query", ",", "fragment", "=", "Split", "(", "self", ".", "url", ")", ".", "do", "(", ")", "if", "isinstance", "(", "self", ".", "query", ",", "dict", ")", ":", "query", "=", "query", "+", "\"&\"", "+", "urllib", ".", "urlencode", "(", "self", ".", "query", ")", "if", "query", "else", "urllib", ".", "urlencode", "(", "self", ".", "query", ")", "path", "=", "urljoin", "(", "path", ",", "self", ".", "path", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "if", "self", ".", "path", "else", "path", "return", "Splice", "(", "scheme", "=", "scheme", ",", "netloc", "=", "netloc", ",", "path", "=", "path", ",", "params", "=", "params", ",", "query", "=", "query", ",", "fragment", "=", "fragment", ")", ".", "geturl" ]
run it, get a new url
[ "run", "it", "get", "a", "new", "url" ]
ac2d1e854cbdfcf984bce3682b7f05ccba20938e
https://github.com/saintic/SpliceURL/blob/ac2d1e854cbdfcf984bce3682b7f05ccba20938e/SpliceURL.py#L89-L98
242,218
luismasuelli/django-trackmodels-ritual
grimoire/django/tracked/admin.py
TrackedLiveAdmin.changelist_view
def changelist_view(self, request, extra_context=None): """ Updates the changelist view to include settings from this admin. """ return super(TrackedLiveAdmin, self).changelist_view( request, dict(extra_context or {}, url_name='admin:%s_%s_tracking_report' % (self.model._meta.app_label, self.model._meta.model_name), period_options=self.get_period_options(), report_options=self.get_report_options()) )
python
def changelist_view(self, request, extra_context=None): """ Updates the changelist view to include settings from this admin. """ return super(TrackedLiveAdmin, self).changelist_view( request, dict(extra_context or {}, url_name='admin:%s_%s_tracking_report' % (self.model._meta.app_label, self.model._meta.model_name), period_options=self.get_period_options(), report_options=self.get_report_options()) )
[ "def", "changelist_view", "(", "self", ",", "request", ",", "extra_context", "=", "None", ")", ":", "return", "super", "(", "TrackedLiveAdmin", ",", "self", ")", ".", "changelist_view", "(", "request", ",", "dict", "(", "extra_context", "or", "{", "}", ",", "url_name", "=", "'admin:%s_%s_tracking_report'", "%", "(", "self", ".", "model", ".", "_meta", ".", "app_label", ",", "self", ".", "model", ".", "_meta", ".", "model_name", ")", ",", "period_options", "=", "self", ".", "get_period_options", "(", ")", ",", "report_options", "=", "self", ".", "get_report_options", "(", ")", ")", ")" ]
Updates the changelist view to include settings from this admin.
[ "Updates", "the", "changelist", "view", "to", "include", "settings", "from", "this", "admin", "." ]
ee0a6e07a5851ed477c9c1e3b9f8aafd9da35657
https://github.com/luismasuelli/django-trackmodels-ritual/blob/ee0a6e07a5851ed477c9c1e3b9f8aafd9da35657/grimoire/django/tracked/admin.py#L154-L164
242,219
luismasuelli/django-trackmodels-ritual
grimoire/django/tracked/admin.py
TrackedLiveAdmin.render_report_error
def render_report_error(self, request, error, status): """ Renders the report errors template. """ opts = self.model._meta app_label = opts.app_label request.current_app = self.admin_site.name context = dict( self.admin_site.each_context(request), module_name=force_text(opts.verbose_name_plural), title=(_('Tracking report error for %s') % force_text(opts.verbose_name)), opts=opts, app_label=app_label, error=error ) return TemplateResponse(request, self.report_error_template or [ "admin/{}/{}/tracking_report_error.html".format(app_label, opts.model_name), "admin/{}/tracking_report_error.html".format(app_label), "admin/tracking_report_error.html" ], context, status=status)
python
def render_report_error(self, request, error, status): """ Renders the report errors template. """ opts = self.model._meta app_label = opts.app_label request.current_app = self.admin_site.name context = dict( self.admin_site.each_context(request), module_name=force_text(opts.verbose_name_plural), title=(_('Tracking report error for %s') % force_text(opts.verbose_name)), opts=opts, app_label=app_label, error=error ) return TemplateResponse(request, self.report_error_template or [ "admin/{}/{}/tracking_report_error.html".format(app_label, opts.model_name), "admin/{}/tracking_report_error.html".format(app_label), "admin/tracking_report_error.html" ], context, status=status)
[ "def", "render_report_error", "(", "self", ",", "request", ",", "error", ",", "status", ")", ":", "opts", "=", "self", ".", "model", ".", "_meta", "app_label", "=", "opts", ".", "app_label", "request", ".", "current_app", "=", "self", ".", "admin_site", ".", "name", "context", "=", "dict", "(", "self", ".", "admin_site", ".", "each_context", "(", "request", ")", ",", "module_name", "=", "force_text", "(", "opts", ".", "verbose_name_plural", ")", ",", "title", "=", "(", "_", "(", "'Tracking report error for %s'", ")", "%", "force_text", "(", "opts", ".", "verbose_name", ")", ")", ",", "opts", "=", "opts", ",", "app_label", "=", "app_label", ",", "error", "=", "error", ")", "return", "TemplateResponse", "(", "request", ",", "self", ".", "report_error_template", "or", "[", "\"admin/{}/{}/tracking_report_error.html\"", ".", "format", "(", "app_label", ",", "opts", ".", "model_name", ")", ",", "\"admin/{}/tracking_report_error.html\"", ".", "format", "(", "app_label", ")", ",", "\"admin/tracking_report_error.html\"", "]", ",", "context", ",", "status", "=", "status", ")" ]
Renders the report errors template.
[ "Renders", "the", "report", "errors", "template", "." ]
ee0a6e07a5851ed477c9c1e3b9f8aafd9da35657
https://github.com/luismasuelli/django-trackmodels-ritual/blob/ee0a6e07a5851ed477c9c1e3b9f8aafd9da35657/grimoire/django/tracked/admin.py#L166-L185
242,220
luismasuelli/django-trackmodels-ritual
grimoire/django/tracked/admin.py
TrackedLiveAdmin.report_view
def report_view(self, request, key, period): """ Processes the reporting action. """ if not self.has_change_permission(request, None): raise PermissionDenied reporters = self.get_reporters() try: reporter = reporters[key] except KeyError: return self.render_report_error(request, _('Report not found'), 404) allowed_periods = [k for (k, v) in self.get_period_options()] if period == 'A': period = '' if period and period not in allowed_periods: return self.render_report_error(request, _('Invalid report type'), 400) try: return reporter.process(request, self.get_period_queryset(request, period), period) except: logger.exception('Tracking Reports could not generate the report due to an internal error') return self.render_report_error(request, _('An unexpected error has occurred'), 500)
python
def report_view(self, request, key, period): """ Processes the reporting action. """ if not self.has_change_permission(request, None): raise PermissionDenied reporters = self.get_reporters() try: reporter = reporters[key] except KeyError: return self.render_report_error(request, _('Report not found'), 404) allowed_periods = [k for (k, v) in self.get_period_options()] if period == 'A': period = '' if period and period not in allowed_periods: return self.render_report_error(request, _('Invalid report type'), 400) try: return reporter.process(request, self.get_period_queryset(request, period), period) except: logger.exception('Tracking Reports could not generate the report due to an internal error') return self.render_report_error(request, _('An unexpected error has occurred'), 500)
[ "def", "report_view", "(", "self", ",", "request", ",", "key", ",", "period", ")", ":", "if", "not", "self", ".", "has_change_permission", "(", "request", ",", "None", ")", ":", "raise", "PermissionDenied", "reporters", "=", "self", ".", "get_reporters", "(", ")", "try", ":", "reporter", "=", "reporters", "[", "key", "]", "except", "KeyError", ":", "return", "self", ".", "render_report_error", "(", "request", ",", "_", "(", "'Report not found'", ")", ",", "404", ")", "allowed_periods", "=", "[", "k", "for", "(", "k", ",", "v", ")", "in", "self", ".", "get_period_options", "(", ")", "]", "if", "period", "==", "'A'", ":", "period", "=", "''", "if", "period", "and", "period", "not", "in", "allowed_periods", ":", "return", "self", ".", "render_report_error", "(", "request", ",", "_", "(", "'Invalid report type'", ")", ",", "400", ")", "try", ":", "return", "reporter", ".", "process", "(", "request", ",", "self", ".", "get_period_queryset", "(", "request", ",", "period", ")", ",", "period", ")", "except", ":", "logger", ".", "exception", "(", "'Tracking Reports could not generate the report due to an internal error'", ")", "return", "self", ".", "render_report_error", "(", "request", ",", "_", "(", "'An unexpected error has occurred'", ")", ",", "500", ")" ]
Processes the reporting action.
[ "Processes", "the", "reporting", "action", "." ]
ee0a6e07a5851ed477c9c1e3b9f8aafd9da35657
https://github.com/luismasuelli/django-trackmodels-ritual/blob/ee0a6e07a5851ed477c9c1e3b9f8aafd9da35657/grimoire/django/tracked/admin.py#L207-L232
242,221
almcc/cinder-data
cinder_data/store.py
Store.find_record
def find_record(self, model_class, record_id, reload=False): """Return a instance of model_class from the API or the local cache. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. reload (bool, optional): Don't return the cached version if reload==True. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None. """ cached_model = self.peek_record(model_class, record_id) if cached_model is not None and reload is False: return cached_model else: return self._get_record(model_class, record_id)
python
def find_record(self, model_class, record_id, reload=False): """Return a instance of model_class from the API or the local cache. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. reload (bool, optional): Don't return the cached version if reload==True. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None. """ cached_model = self.peek_record(model_class, record_id) if cached_model is not None and reload is False: return cached_model else: return self._get_record(model_class, record_id)
[ "def", "find_record", "(", "self", ",", "model_class", ",", "record_id", ",", "reload", "=", "False", ")", ":", "cached_model", "=", "self", ".", "peek_record", "(", "model_class", ",", "record_id", ")", "if", "cached_model", "is", "not", "None", "and", "reload", "is", "False", ":", "return", "cached_model", "else", ":", "return", "self", ".", "_get_record", "(", "model_class", ",", "record_id", ")" ]
Return a instance of model_class from the API or the local cache. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. reload (bool, optional): Don't return the cached version if reload==True. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None.
[ "Return", "a", "instance", "of", "model_class", "from", "the", "API", "or", "the", "local", "cache", "." ]
4159a5186c4b4fc32354749892e86130530f6ec5
https://github.com/almcc/cinder-data/blob/4159a5186c4b4fc32354749892e86130530f6ec5/cinder_data/store.py#L23-L39
242,222
almcc/cinder-data
cinder_data/store.py
Store.peek_record
def peek_record(self, model_class, record_id): """Return an instance of the model_class from the cache if it is present. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None. """ if self._cache: return self._cache.get_record(model_class.__name__, record_id) else: return None
python
def peek_record(self, model_class, record_id): """Return an instance of the model_class from the cache if it is present. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None. """ if self._cache: return self._cache.get_record(model_class.__name__, record_id) else: return None
[ "def", "peek_record", "(", "self", ",", "model_class", ",", "record_id", ")", ":", "if", "self", ".", "_cache", ":", "return", "self", ".", "_cache", ".", "get_record", "(", "model_class", ".", "__name__", ",", "record_id", ")", "else", ":", "return", "None" ]
Return an instance of the model_class from the cache if it is present. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None.
[ "Return", "an", "instance", "of", "the", "model_class", "from", "the", "cache", "if", "it", "is", "present", "." ]
4159a5186c4b4fc32354749892e86130530f6ec5
https://github.com/almcc/cinder-data/blob/4159a5186c4b4fc32354749892e86130530f6ec5/cinder_data/store.py#L41-L55
242,223
almcc/cinder-data
cinder_data/store.py
Store.find_all
def find_all(self, model_class, params={}): """Return an list of models from the API and caches the result. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. params (dict, optional): Description Returns: list: A list of instances of you model_class or and empty list. """ url = '{host}/{namespace}/{model}{params}'.format( host=self._host, namespace=self._namespace, model=self._translate_name(model_class.__name__), params=self._build_param_string(params) ) data = self._get_json(url)['data'] fresh_models = [] for item in data: fresh_model = model_class(item['attributes']) fresh_model.id = item['id'] fresh_model.validate() fresh_models.append(fresh_model) if self._cache is not None: self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model) return fresh_models
python
def find_all(self, model_class, params={}): """Return an list of models from the API and caches the result. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. params (dict, optional): Description Returns: list: A list of instances of you model_class or and empty list. """ url = '{host}/{namespace}/{model}{params}'.format( host=self._host, namespace=self._namespace, model=self._translate_name(model_class.__name__), params=self._build_param_string(params) ) data = self._get_json(url)['data'] fresh_models = [] for item in data: fresh_model = model_class(item['attributes']) fresh_model.id = item['id'] fresh_model.validate() fresh_models.append(fresh_model) if self._cache is not None: self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model) return fresh_models
[ "def", "find_all", "(", "self", ",", "model_class", ",", "params", "=", "{", "}", ")", ":", "url", "=", "'{host}/{namespace}/{model}{params}'", ".", "format", "(", "host", "=", "self", ".", "_host", ",", "namespace", "=", "self", ".", "_namespace", ",", "model", "=", "self", ".", "_translate_name", "(", "model_class", ".", "__name__", ")", ",", "params", "=", "self", ".", "_build_param_string", "(", "params", ")", ")", "data", "=", "self", ".", "_get_json", "(", "url", ")", "[", "'data'", "]", "fresh_models", "=", "[", "]", "for", "item", "in", "data", ":", "fresh_model", "=", "model_class", "(", "item", "[", "'attributes'", "]", ")", "fresh_model", ".", "id", "=", "item", "[", "'id'", "]", "fresh_model", ".", "validate", "(", ")", "fresh_models", ".", "append", "(", "fresh_model", ")", "if", "self", ".", "_cache", "is", "not", "None", ":", "self", ".", "_cache", ".", "set_record", "(", "model_class", ".", "__name__", ",", "fresh_model", ".", "id", ",", "fresh_model", ")", "return", "fresh_models" ]
Return an list of models from the API and caches the result. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. params (dict, optional): Description Returns: list: A list of instances of you model_class or and empty list.
[ "Return", "an", "list", "of", "models", "from", "the", "API", "and", "caches", "the", "result", "." ]
4159a5186c4b4fc32354749892e86130530f6ec5
https://github.com/almcc/cinder-data/blob/4159a5186c4b4fc32354749892e86130530f6ec5/cinder_data/store.py#L57-L83
242,224
almcc/cinder-data
cinder_data/store.py
Store.peek_all
def peek_all(self, model_class): """Return a list of models from the local cache. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. Returns: list: A list of instances of you model_class or and empty list. """ if self._cache: return self._cache.get_records(model_class.__name__) else: return []
python
def peek_all(self, model_class): """Return a list of models from the local cache. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. Returns: list: A list of instances of you model_class or and empty list. """ if self._cache: return self._cache.get_records(model_class.__name__) else: return []
[ "def", "peek_all", "(", "self", ",", "model_class", ")", ":", "if", "self", ".", "_cache", ":", "return", "self", ".", "_cache", ".", "get_records", "(", "model_class", ".", "__name__", ")", "else", ":", "return", "[", "]" ]
Return a list of models from the local cache. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. Returns: list: A list of instances of you model_class or and empty list.
[ "Return", "a", "list", "of", "models", "from", "the", "local", "cache", "." ]
4159a5186c4b4fc32354749892e86130530f6ec5
https://github.com/almcc/cinder-data/blob/4159a5186c4b4fc32354749892e86130530f6ec5/cinder_data/store.py#L85-L98
242,225
almcc/cinder-data
cinder_data/store.py
Store._get_record
def _get_record(self, model_class, record_id): """Get a single record from the API. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None. """ url = '{host}/{namespace}/{model}/{id}'.format( host=self._host, namespace=self._namespace, model=self._translate_name(model_class.__name__), id=record_id ) data = self._get_json(url)['data'] fresh_model = model_class(data['attributes']) fresh_model.id = data['id'] fresh_model.validate() if self._cache is not None: self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model) return fresh_model
python
def _get_record(self, model_class, record_id): """Get a single record from the API. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None. """ url = '{host}/{namespace}/{model}/{id}'.format( host=self._host, namespace=self._namespace, model=self._translate_name(model_class.__name__), id=record_id ) data = self._get_json(url)['data'] fresh_model = model_class(data['attributes']) fresh_model.id = data['id'] fresh_model.validate() if self._cache is not None: self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model) return fresh_model
[ "def", "_get_record", "(", "self", ",", "model_class", ",", "record_id", ")", ":", "url", "=", "'{host}/{namespace}/{model}/{id}'", ".", "format", "(", "host", "=", "self", ".", "_host", ",", "namespace", "=", "self", ".", "_namespace", ",", "model", "=", "self", ".", "_translate_name", "(", "model_class", ".", "__name__", ")", ",", "id", "=", "record_id", ")", "data", "=", "self", ".", "_get_json", "(", "url", ")", "[", "'data'", "]", "fresh_model", "=", "model_class", "(", "data", "[", "'attributes'", "]", ")", "fresh_model", ".", "id", "=", "data", "[", "'id'", "]", "fresh_model", ".", "validate", "(", ")", "if", "self", ".", "_cache", "is", "not", "None", ":", "self", ".", "_cache", ".", "set_record", "(", "model_class", ".", "__name__", ",", "fresh_model", ".", "id", ",", "fresh_model", ")", "return", "fresh_model" ]
Get a single record from the API. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None.
[ "Get", "a", "single", "record", "from", "the", "API", "." ]
4159a5186c4b4fc32354749892e86130530f6ec5
https://github.com/almcc/cinder-data/blob/4159a5186c4b4fc32354749892e86130530f6ec5/cinder_data/store.py#L100-L123
242,226
almcc/cinder-data
cinder_data/store.py
Store._translate_name
def _translate_name(name): """Translate the class name to the API endpoint. For example, Car would become cars, FastCar would become fast-cars. Args: name (string): Camel case name (singular) Returns: string: A pluraised, dasherized string. """ underscored = inflection.underscore(name) dasherized = inflection.dasherize(underscored) words = dasherized.split('-') last_word = words.pop() words.append(inflection.pluralize(last_word)) return '-'.join(words)
python
def _translate_name(name): """Translate the class name to the API endpoint. For example, Car would become cars, FastCar would become fast-cars. Args: name (string): Camel case name (singular) Returns: string: A pluraised, dasherized string. """ underscored = inflection.underscore(name) dasherized = inflection.dasherize(underscored) words = dasherized.split('-') last_word = words.pop() words.append(inflection.pluralize(last_word)) return '-'.join(words)
[ "def", "_translate_name", "(", "name", ")", ":", "underscored", "=", "inflection", ".", "underscore", "(", "name", ")", "dasherized", "=", "inflection", ".", "dasherize", "(", "underscored", ")", "words", "=", "dasherized", ".", "split", "(", "'-'", ")", "last_word", "=", "words", ".", "pop", "(", ")", "words", ".", "append", "(", "inflection", ".", "pluralize", "(", "last_word", ")", ")", "return", "'-'", ".", "join", "(", "words", ")" ]
Translate the class name to the API endpoint. For example, Car would become cars, FastCar would become fast-cars. Args: name (string): Camel case name (singular) Returns: string: A pluraised, dasherized string.
[ "Translate", "the", "class", "name", "to", "the", "API", "endpoint", "." ]
4159a5186c4b4fc32354749892e86130530f6ec5
https://github.com/almcc/cinder-data/blob/4159a5186c4b4fc32354749892e86130530f6ec5/cinder_data/store.py#L139-L155
242,227
almcc/cinder-data
cinder_data/store.py
Store._build_param_string
def _build_param_string(params): """Build query params string from a dictionary. Args: params (dict): A dictionary of params Returns: string: A valid url query params string. """ pairs = [] for key, value in params.iteritems(): if value is None: value = '' pairs.append('{0}={1}'.format(key, value)) if len(pairs) > 0: return '?{0}'.format('&'.join(pairs)) return ''
python
def _build_param_string(params): """Build query params string from a dictionary. Args: params (dict): A dictionary of params Returns: string: A valid url query params string. """ pairs = [] for key, value in params.iteritems(): if value is None: value = '' pairs.append('{0}={1}'.format(key, value)) if len(pairs) > 0: return '?{0}'.format('&'.join(pairs)) return ''
[ "def", "_build_param_string", "(", "params", ")", ":", "pairs", "=", "[", "]", "for", "key", ",", "value", "in", "params", ".", "iteritems", "(", ")", ":", "if", "value", "is", "None", ":", "value", "=", "''", "pairs", ".", "append", "(", "'{0}={1}'", ".", "format", "(", "key", ",", "value", ")", ")", "if", "len", "(", "pairs", ")", ">", "0", ":", "return", "'?{0}'", ".", "format", "(", "'&'", ".", "join", "(", "pairs", ")", ")", "return", "''" ]
Build query params string from a dictionary. Args: params (dict): A dictionary of params Returns: string: A valid url query params string.
[ "Build", "query", "params", "string", "from", "a", "dictionary", "." ]
4159a5186c4b4fc32354749892e86130530f6ec5
https://github.com/almcc/cinder-data/blob/4159a5186c4b4fc32354749892e86130530f6ec5/cinder_data/store.py#L158-L174
242,228
selenol/selenol-python
selenol_python/connections.py
SelenolWSConnection.send
def send(self, message): """Send a the defined message to the backend. :param message: Message to be send, usually a Python dictionary. """ try: self.ws.send(json.dumps(message)) except websocket._exceptions.WebSocketConnectionClosedException: raise SelenolWebSocketClosedException()
python
def send(self, message): """Send a the defined message to the backend. :param message: Message to be send, usually a Python dictionary. """ try: self.ws.send(json.dumps(message)) except websocket._exceptions.WebSocketConnectionClosedException: raise SelenolWebSocketClosedException()
[ "def", "send", "(", "self", ",", "message", ")", ":", "try", ":", "self", ".", "ws", ".", "send", "(", "json", ".", "dumps", "(", "message", ")", ")", "except", "websocket", ".", "_exceptions", ".", "WebSocketConnectionClosedException", ":", "raise", "SelenolWebSocketClosedException", "(", ")" ]
Send a the defined message to the backend. :param message: Message to be send, usually a Python dictionary.
[ "Send", "a", "the", "defined", "message", "to", "the", "backend", "." ]
53775fdfc95161f4aca350305cb3459e6f2f808d
https://github.com/selenol/selenol-python/blob/53775fdfc95161f4aca350305cb3459e6f2f808d/selenol_python/connections.py#L37-L45
242,229
selenol/selenol-python
selenol_python/connections.py
SelenolWSConnection.recv
def recv(self): """Receive message from the backend or wait unilt next message.""" try: message = self.ws.recv() return json.loads(message) except websocket._exceptions.WebSocketConnectionClosedException as ex: raise SelenolWebSocketClosedException() from ex
python
def recv(self): """Receive message from the backend or wait unilt next message.""" try: message = self.ws.recv() return json.loads(message) except websocket._exceptions.WebSocketConnectionClosedException as ex: raise SelenolWebSocketClosedException() from ex
[ "def", "recv", "(", "self", ")", ":", "try", ":", "message", "=", "self", ".", "ws", ".", "recv", "(", ")", "return", "json", ".", "loads", "(", "message", ")", "except", "websocket", ".", "_exceptions", ".", "WebSocketConnectionClosedException", "as", "ex", ":", "raise", "SelenolWebSocketClosedException", "(", ")", "from", "ex" ]
Receive message from the backend or wait unilt next message.
[ "Receive", "message", "from", "the", "backend", "or", "wait", "unilt", "next", "message", "." ]
53775fdfc95161f4aca350305cb3459e6f2f808d
https://github.com/selenol/selenol-python/blob/53775fdfc95161f4aca350305cb3459e6f2f808d/selenol_python/connections.py#L47-L53
242,230
krukas/Trionyx
trionyx/trionyx/management/commands/create_app.py
Command.handle
def handle(self, *args, **options): """Create new app""" quickstart = Quickstart() try: quickstart.create_app(os.path.join(settings.BASE_DIR, 'apps'), options.get('name')) self.stdout.write( self.style.SUCCESS("Successfully created app ({name}), don't forget to add 'apps.{name}' to INSTALLED_APPS".format( name=options.get('name') )) ) except FileExistsError as e: print(e) raise CommandError("App with same name already exists")
python
def handle(self, *args, **options): """Create new app""" quickstart = Quickstart() try: quickstart.create_app(os.path.join(settings.BASE_DIR, 'apps'), options.get('name')) self.stdout.write( self.style.SUCCESS("Successfully created app ({name}), don't forget to add 'apps.{name}' to INSTALLED_APPS".format( name=options.get('name') )) ) except FileExistsError as e: print(e) raise CommandError("App with same name already exists")
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "quickstart", "=", "Quickstart", "(", ")", "try", ":", "quickstart", ".", "create_app", "(", "os", ".", "path", ".", "join", "(", "settings", ".", "BASE_DIR", ",", "'apps'", ")", ",", "options", ".", "get", "(", "'name'", ")", ")", "self", ".", "stdout", ".", "write", "(", "self", ".", "style", ".", "SUCCESS", "(", "\"Successfully created app ({name}), don't forget to add 'apps.{name}' to INSTALLED_APPS\"", ".", "format", "(", "name", "=", "options", ".", "get", "(", "'name'", ")", ")", ")", ")", "except", "FileExistsError", "as", "e", ":", "print", "(", "e", ")", "raise", "CommandError", "(", "\"App with same name already exists\"", ")" ]
Create new app
[ "Create", "new", "app" ]
edac132cc0797190153f2e60bc7e88cb50e80da6
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/management/commands/create_app.py#L25-L38
242,231
ANCIR/granoloader
granoloader/command.py
make_client
def make_client(host, project_name, api_key, create_project): """ Instantiate the grano client based on environment variables or command line settings. """ if host is None: raise click.BadParameter('No grano server host is set', param=host) if project_name is None: raise click.BadParameter('No grano project slug is set', param=project_name) if api_key is None: raise click.BadParameter('No grano API key is set', param=api_key) client = Grano(api_host=host, api_key=api_key) try: return client.get(project_name) except NotFound: if not create_project: sys.exit(-1) data = {'slug': project_name, 'label': project_name} return client.projects.create(data)
python
def make_client(host, project_name, api_key, create_project): """ Instantiate the grano client based on environment variables or command line settings. """ if host is None: raise click.BadParameter('No grano server host is set', param=host) if project_name is None: raise click.BadParameter('No grano project slug is set', param=project_name) if api_key is None: raise click.BadParameter('No grano API key is set', param=api_key) client = Grano(api_host=host, api_key=api_key) try: return client.get(project_name) except NotFound: if not create_project: sys.exit(-1) data = {'slug': project_name, 'label': project_name} return client.projects.create(data)
[ "def", "make_client", "(", "host", ",", "project_name", ",", "api_key", ",", "create_project", ")", ":", "if", "host", "is", "None", ":", "raise", "click", ".", "BadParameter", "(", "'No grano server host is set'", ",", "param", "=", "host", ")", "if", "project_name", "is", "None", ":", "raise", "click", ".", "BadParameter", "(", "'No grano project slug is set'", ",", "param", "=", "project_name", ")", "if", "api_key", "is", "None", ":", "raise", "click", ".", "BadParameter", "(", "'No grano API key is set'", ",", "param", "=", "api_key", ")", "client", "=", "Grano", "(", "api_host", "=", "host", ",", "api_key", "=", "api_key", ")", "try", ":", "return", "client", ".", "get", "(", "project_name", ")", "except", "NotFound", ":", "if", "not", "create_project", ":", "sys", ".", "exit", "(", "-", "1", ")", "data", "=", "{", "'slug'", ":", "project_name", ",", "'label'", ":", "project_name", "}", "return", "client", ".", "projects", ".", "create", "(", "data", ")" ]
Instantiate the grano client based on environment variables or command line settings.
[ "Instantiate", "the", "grano", "client", "based", "on", "environment", "variables", "or", "command", "line", "settings", "." ]
c48b1bd50403dd611340c5f51637f7c5ca54059c
https://github.com/ANCIR/granoloader/blob/c48b1bd50403dd611340c5f51637f7c5ca54059c/granoloader/command.py#L21-L40
242,232
ANCIR/granoloader
granoloader/command.py
csv
def csv(ctx, force, threads, mapping, data): """ Load CSV data into a grano instance using a mapping specification. """ # Find out how many lines there are (for the progress bar). lines = 0 for line in DictReader(data): lines += 1 data.seek(0) # set up objects mapping = yaml.load(mapping) mapping_loader = MappingLoader(ctx.obj['grano'], mapping) def process_row(row): try: mapping_loader.load(row) except GranoException, ge: msg = '\nServer error: %s' % ge.message click.secho(msg, fg='red', bold=True) if not force: os._exit(1) except RowException, re: if not force: msg = '\nRow %s: %s' % (row['__row_id__'], re.message) click.secho(msg, fg='red', bold=True) os._exit(1) def generate(): with click.progressbar(DictReader(data), label=data.name, length=lines) as bar: for i, row in enumerate(bar): row['__row_id__'] = i yield row threaded(generate(), process_row, num_threads=threads, max_queue=1)
python
def csv(ctx, force, threads, mapping, data): """ Load CSV data into a grano instance using a mapping specification. """ # Find out how many lines there are (for the progress bar). lines = 0 for line in DictReader(data): lines += 1 data.seek(0) # set up objects mapping = yaml.load(mapping) mapping_loader = MappingLoader(ctx.obj['grano'], mapping) def process_row(row): try: mapping_loader.load(row) except GranoException, ge: msg = '\nServer error: %s' % ge.message click.secho(msg, fg='red', bold=True) if not force: os._exit(1) except RowException, re: if not force: msg = '\nRow %s: %s' % (row['__row_id__'], re.message) click.secho(msg, fg='red', bold=True) os._exit(1) def generate(): with click.progressbar(DictReader(data), label=data.name, length=lines) as bar: for i, row in enumerate(bar): row['__row_id__'] = i yield row threaded(generate(), process_row, num_threads=threads, max_queue=1)
[ "def", "csv", "(", "ctx", ",", "force", ",", "threads", ",", "mapping", ",", "data", ")", ":", "# Find out how many lines there are (for the progress bar).", "lines", "=", "0", "for", "line", "in", "DictReader", "(", "data", ")", ":", "lines", "+=", "1", "data", ".", "seek", "(", "0", ")", "# set up objects", "mapping", "=", "yaml", ".", "load", "(", "mapping", ")", "mapping_loader", "=", "MappingLoader", "(", "ctx", ".", "obj", "[", "'grano'", "]", ",", "mapping", ")", "def", "process_row", "(", "row", ")", ":", "try", ":", "mapping_loader", ".", "load", "(", "row", ")", "except", "GranoException", ",", "ge", ":", "msg", "=", "'\\nServer error: %s'", "%", "ge", ".", "message", "click", ".", "secho", "(", "msg", ",", "fg", "=", "'red'", ",", "bold", "=", "True", ")", "if", "not", "force", ":", "os", ".", "_exit", "(", "1", ")", "except", "RowException", ",", "re", ":", "if", "not", "force", ":", "msg", "=", "'\\nRow %s: %s'", "%", "(", "row", "[", "'__row_id__'", "]", ",", "re", ".", "message", ")", "click", ".", "secho", "(", "msg", ",", "fg", "=", "'red'", ",", "bold", "=", "True", ")", "os", ".", "_exit", "(", "1", ")", "def", "generate", "(", ")", ":", "with", "click", ".", "progressbar", "(", "DictReader", "(", "data", ")", ",", "label", "=", "data", ".", "name", ",", "length", "=", "lines", ")", "as", "bar", ":", "for", "i", ",", "row", "in", "enumerate", "(", "bar", ")", ":", "row", "[", "'__row_id__'", "]", "=", "i", "yield", "row", "threaded", "(", "generate", "(", ")", ",", "process_row", ",", "num_threads", "=", "threads", ",", "max_queue", "=", "1", ")" ]
Load CSV data into a grano instance using a mapping specification.
[ "Load", "CSV", "data", "into", "a", "grano", "instance", "using", "a", "mapping", "specification", "." ]
c48b1bd50403dd611340c5f51637f7c5ca54059c
https://github.com/ANCIR/granoloader/blob/c48b1bd50403dd611340c5f51637f7c5ca54059c/granoloader/command.py#L69-L105
242,233
ANCIR/granoloader
granoloader/command.py
schema
def schema(ctx, schema): """ Load schema definitions from a YAML file. """ data = yaml.load(schema) if not isinstance(data, (list, tuple)): data = [data] with click.progressbar(data, label=schema.name) as bar: for schema in bar: ctx.obj['grano'].schemata.upsert(schema)
python
def schema(ctx, schema): """ Load schema definitions from a YAML file. """ data = yaml.load(schema) if not isinstance(data, (list, tuple)): data = [data] with click.progressbar(data, label=schema.name) as bar: for schema in bar: ctx.obj['grano'].schemata.upsert(schema)
[ "def", "schema", "(", "ctx", ",", "schema", ")", ":", "data", "=", "yaml", ".", "load", "(", "schema", ")", "if", "not", "isinstance", "(", "data", ",", "(", "list", ",", "tuple", ")", ")", ":", "data", "=", "[", "data", "]", "with", "click", ".", "progressbar", "(", "data", ",", "label", "=", "schema", ".", "name", ")", "as", "bar", ":", "for", "schema", "in", "bar", ":", "ctx", ".", "obj", "[", "'grano'", "]", ".", "schemata", ".", "upsert", "(", "schema", ")" ]
Load schema definitions from a YAML file.
[ "Load", "schema", "definitions", "from", "a", "YAML", "file", "." ]
c48b1bd50403dd611340c5f51637f7c5ca54059c
https://github.com/ANCIR/granoloader/blob/c48b1bd50403dd611340c5f51637f7c5ca54059c/granoloader/command.py#L111-L118
242,234
JNRowe/jnrbase
jnrbase/json_datetime.py
json_using_iso8601
def json_using_iso8601(__obj: Dict) -> Dict: """Parse ISO-8601 values from JSON databases. See :class:`json.JSONDecoder` Args: __obj: Object to decode """ for key, value in __obj.items(): with suppress(TypeError, ValueError): __obj[key] = parse_datetime(value) with suppress(TypeError, ValueError): __obj[key] = parse_delta(value) return __obj
python
def json_using_iso8601(__obj: Dict) -> Dict: """Parse ISO-8601 values from JSON databases. See :class:`json.JSONDecoder` Args: __obj: Object to decode """ for key, value in __obj.items(): with suppress(TypeError, ValueError): __obj[key] = parse_datetime(value) with suppress(TypeError, ValueError): __obj[key] = parse_delta(value) return __obj
[ "def", "json_using_iso8601", "(", "__obj", ":", "Dict", ")", "->", "Dict", ":", "for", "key", ",", "value", "in", "__obj", ".", "items", "(", ")", ":", "with", "suppress", "(", "TypeError", ",", "ValueError", ")", ":", "__obj", "[", "key", "]", "=", "parse_datetime", "(", "value", ")", "with", "suppress", "(", "TypeError", ",", "ValueError", ")", ":", "__obj", "[", "key", "]", "=", "parse_delta", "(", "value", ")", "return", "__obj" ]
Parse ISO-8601 values from JSON databases. See :class:`json.JSONDecoder` Args: __obj: Object to decode
[ "Parse", "ISO", "-", "8601", "values", "from", "JSON", "databases", "." ]
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/json_datetime.py#L61-L74
242,235
lddubeau/glerbl
glerbl/__init__.py
get_tmpdir
def get_tmpdir(): """ On first invocation, creates a temporary directory and returns its path. Subsequent invocations uses the same directory. :returns: A temporary directory created for this run of glerbl. :rtype: :class:`str` """ global __tmpdir if __tmpdir is not None: return __tmpdir __tmpdir = tempfile.mkdtemp(prefix='.tmp.glerbl.', dir=".") atexit.register(__clean_tmpdir) return __tmpdir
python
def get_tmpdir(): """ On first invocation, creates a temporary directory and returns its path. Subsequent invocations uses the same directory. :returns: A temporary directory created for this run of glerbl. :rtype: :class:`str` """ global __tmpdir if __tmpdir is not None: return __tmpdir __tmpdir = tempfile.mkdtemp(prefix='.tmp.glerbl.', dir=".") atexit.register(__clean_tmpdir) return __tmpdir
[ "def", "get_tmpdir", "(", ")", ":", "global", "__tmpdir", "if", "__tmpdir", "is", "not", "None", ":", "return", "__tmpdir", "__tmpdir", "=", "tempfile", ".", "mkdtemp", "(", "prefix", "=", "'.tmp.glerbl.'", ",", "dir", "=", "\".\"", ")", "atexit", ".", "register", "(", "__clean_tmpdir", ")", "return", "__tmpdir" ]
On first invocation, creates a temporary directory and returns its path. Subsequent invocations uses the same directory. :returns: A temporary directory created for this run of glerbl. :rtype: :class:`str`
[ "On", "first", "invocation", "creates", "a", "temporary", "directory", "and", "returns", "its", "path", ".", "Subsequent", "invocations", "uses", "the", "same", "directory", "." ]
b43adf851f8ce4c5b1dba22ee3556a169681bce1
https://github.com/lddubeau/glerbl/blob/b43adf851f8ce4c5b1dba22ee3556a169681bce1/glerbl/__init__.py#L17-L32
242,236
lddubeau/glerbl
glerbl/__init__.py
get_against
def get_against(): """ Determines the revision against which the staged data ought to be checked. :returns: The revision. :rtype: :class:`str` """ global __cached_against if __cached_against is not None: return __cached_against status = subprocess.call(["git", "rev-parse", "--verify", "HEAD"], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT) if not status: against = 'HEAD' else: # Initial commit: diff against an empty tree object against = '4b825dc642cb6eb9a060e54bf8d69288fbee4904' __cached_against = against return against
python
def get_against(): """ Determines the revision against which the staged data ought to be checked. :returns: The revision. :rtype: :class:`str` """ global __cached_against if __cached_against is not None: return __cached_against status = subprocess.call(["git", "rev-parse", "--verify", "HEAD"], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT) if not status: against = 'HEAD' else: # Initial commit: diff against an empty tree object against = '4b825dc642cb6eb9a060e54bf8d69288fbee4904' __cached_against = against return against
[ "def", "get_against", "(", ")", ":", "global", "__cached_against", "if", "__cached_against", "is", "not", "None", ":", "return", "__cached_against", "status", "=", "subprocess", ".", "call", "(", "[", "\"git\"", ",", "\"rev-parse\"", ",", "\"--verify\"", ",", "\"HEAD\"", "]", ",", "stdout", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "if", "not", "status", ":", "against", "=", "'HEAD'", "else", ":", "# Initial commit: diff against an empty tree object", "against", "=", "'4b825dc642cb6eb9a060e54bf8d69288fbee4904'", "__cached_against", "=", "against", "return", "against" ]
Determines the revision against which the staged data ought to be checked. :returns: The revision. :rtype: :class:`str`
[ "Determines", "the", "revision", "against", "which", "the", "staged", "data", "ought", "to", "be", "checked", "." ]
b43adf851f8ce4c5b1dba22ee3556a169681bce1
https://github.com/lddubeau/glerbl/blob/b43adf851f8ce4c5b1dba22ee3556a169681bce1/glerbl/__init__.py#L36-L57
242,237
JNRowe/jnrbase
jnrbase/entry.py
entry_point
def entry_point(__func: Callable) -> Callable: """Execute function when module is run directly. Note: This allows fall through for importing modules that use it. Args: __func: Function to run """ if __func.__module__ == '__main__': import sys sys.exit(__func()) else: return __func
python
def entry_point(__func: Callable) -> Callable: """Execute function when module is run directly. Note: This allows fall through for importing modules that use it. Args: __func: Function to run """ if __func.__module__ == '__main__': import sys sys.exit(__func()) else: return __func
[ "def", "entry_point", "(", "__func", ":", "Callable", ")", "->", "Callable", ":", "if", "__func", ".", "__module__", "==", "'__main__'", ":", "import", "sys", "sys", ".", "exit", "(", "__func", "(", ")", ")", "else", ":", "return", "__func" ]
Execute function when module is run directly. Note: This allows fall through for importing modules that use it. Args: __func: Function to run
[ "Execute", "function", "when", "module", "is", "run", "directly", "." ]
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/entry.py#L24-L37
242,238
DallasMorningNews/django-datafreezer
datafreezer/models.py
create_col_nums
def create_col_nums(): """Return column numbers and letters that repeat up to NUM_REPEATS. I.e., NUM_REPEATS = 2 would return a list of 26 * 26 = 676 2-tuples. """ NUM_REPEATS = 2 column_letters = list( string.ascii_uppercase ) + map( ''.join, itertools.product( string.ascii_uppercase, repeat=NUM_REPEATS ) ) letter_numbers = [] count = 1 for letter in column_letters: letter_numbers.append((count, str(count) + ' (' + letter + ')')) count += 1 return tuple(letter_numbers)
python
def create_col_nums(): """Return column numbers and letters that repeat up to NUM_REPEATS. I.e., NUM_REPEATS = 2 would return a list of 26 * 26 = 676 2-tuples. """ NUM_REPEATS = 2 column_letters = list( string.ascii_uppercase ) + map( ''.join, itertools.product( string.ascii_uppercase, repeat=NUM_REPEATS ) ) letter_numbers = [] count = 1 for letter in column_letters: letter_numbers.append((count, str(count) + ' (' + letter + ')')) count += 1 return tuple(letter_numbers)
[ "def", "create_col_nums", "(", ")", ":", "NUM_REPEATS", "=", "2", "column_letters", "=", "list", "(", "string", ".", "ascii_uppercase", ")", "+", "map", "(", "''", ".", "join", ",", "itertools", ".", "product", "(", "string", ".", "ascii_uppercase", ",", "repeat", "=", "NUM_REPEATS", ")", ")", "letter_numbers", "=", "[", "]", "count", "=", "1", "for", "letter", "in", "column_letters", ":", "letter_numbers", ".", "append", "(", "(", "count", ",", "str", "(", "count", ")", "+", "' ('", "+", "letter", "+", "')'", ")", ")", "count", "+=", "1", "return", "tuple", "(", "letter_numbers", ")" ]
Return column numbers and letters that repeat up to NUM_REPEATS. I.e., NUM_REPEATS = 2 would return a list of 26 * 26 = 676 2-tuples.
[ "Return", "column", "numbers", "and", "letters", "that", "repeat", "up", "to", "NUM_REPEATS", "." ]
982dcf2015c80a280f1a093e32977cb71d4ea7aa
https://github.com/DallasMorningNews/django-datafreezer/blob/982dcf2015c80a280f1a093e32977cb71d4ea7aa/datafreezer/models.py#L18-L41
242,239
emin63/eyap
eyap/core/redis_comments.py
RedisCommentThread.add_comment
def add_comment(self, body, allow_create=False, allow_hashes=False, summary=None): """Add comment as required by comments.CommentThread parent class. """ thread_id = self.lookup_thread_id() if not allow_create and not self.redis.exists(thread_id): raise ValueError('Tried to add comment to non-exist thread %s' % ( thread_id)) comment = comments.SingleComment( self.user, datetime.datetime.now(datetime.timezone.utc), body, summary=summary) lpush = self.redis.lpush(thread_id, comment.to_json()) logging.debug('Pushing comment to redis returned %s', str(lpush)) if self.ltrim: ltrim = self.redis.ltrim(thread_id, 0, self.ltrim) logging.debug('Redis ltrim returend %s', str(ltrim)) else: ltrim = None return {'status': 'OK', 'lpush': lpush, 'ltrim': ltrim}
python
def add_comment(self, body, allow_create=False, allow_hashes=False, summary=None): """Add comment as required by comments.CommentThread parent class. """ thread_id = self.lookup_thread_id() if not allow_create and not self.redis.exists(thread_id): raise ValueError('Tried to add comment to non-exist thread %s' % ( thread_id)) comment = comments.SingleComment( self.user, datetime.datetime.now(datetime.timezone.utc), body, summary=summary) lpush = self.redis.lpush(thread_id, comment.to_json()) logging.debug('Pushing comment to redis returned %s', str(lpush)) if self.ltrim: ltrim = self.redis.ltrim(thread_id, 0, self.ltrim) logging.debug('Redis ltrim returend %s', str(ltrim)) else: ltrim = None return {'status': 'OK', 'lpush': lpush, 'ltrim': ltrim}
[ "def", "add_comment", "(", "self", ",", "body", ",", "allow_create", "=", "False", ",", "allow_hashes", "=", "False", ",", "summary", "=", "None", ")", ":", "thread_id", "=", "self", ".", "lookup_thread_id", "(", ")", "if", "not", "allow_create", "and", "not", "self", ".", "redis", ".", "exists", "(", "thread_id", ")", ":", "raise", "ValueError", "(", "'Tried to add comment to non-exist thread %s'", "%", "(", "thread_id", ")", ")", "comment", "=", "comments", ".", "SingleComment", "(", "self", ".", "user", ",", "datetime", ".", "datetime", ".", "now", "(", "datetime", ".", "timezone", ".", "utc", ")", ",", "body", ",", "summary", "=", "summary", ")", "lpush", "=", "self", ".", "redis", ".", "lpush", "(", "thread_id", ",", "comment", ".", "to_json", "(", ")", ")", "logging", ".", "debug", "(", "'Pushing comment to redis returned %s'", ",", "str", "(", "lpush", ")", ")", "if", "self", ".", "ltrim", ":", "ltrim", "=", "self", ".", "redis", ".", "ltrim", "(", "thread_id", ",", "0", ",", "self", ".", "ltrim", ")", "logging", ".", "debug", "(", "'Redis ltrim returend %s'", ",", "str", "(", "ltrim", ")", ")", "else", ":", "ltrim", "=", "None", "return", "{", "'status'", ":", "'OK'", ",", "'lpush'", ":", "lpush", ",", "'ltrim'", ":", "ltrim", "}" ]
Add comment as required by comments.CommentThread parent class.
[ "Add", "comment", "as", "required", "by", "comments", ".", "CommentThread", "parent", "class", "." ]
a610761973b478ca0e864e970be05ce29d5994a5
https://github.com/emin63/eyap/blob/a610761973b478ca0e864e970be05ce29d5994a5/eyap/core/redis_comments.py#L25-L45
242,240
CivicSpleen/ckcache
ckcache/async.py
submit_task
def submit_task(rel_path, cache_string, buffer): """Put an upload job on the queue, and start the thread if required""" global upload_queue global upload_thread upload_queue.put((rel_path, cache_string, buffer)) if upload_thread is None or not upload_thread.is_alive(): upload_thread = UploaderThread() upload_thread.start()
python
def submit_task(rel_path, cache_string, buffer): """Put an upload job on the queue, and start the thread if required""" global upload_queue global upload_thread upload_queue.put((rel_path, cache_string, buffer)) if upload_thread is None or not upload_thread.is_alive(): upload_thread = UploaderThread() upload_thread.start()
[ "def", "submit_task", "(", "rel_path", ",", "cache_string", ",", "buffer", ")", ":", "global", "upload_queue", "global", "upload_thread", "upload_queue", ".", "put", "(", "(", "rel_path", ",", "cache_string", ",", "buffer", ")", ")", "if", "upload_thread", "is", "None", "or", "not", "upload_thread", ".", "is_alive", "(", ")", ":", "upload_thread", "=", "UploaderThread", "(", ")", "upload_thread", ".", "start", "(", ")" ]
Put an upload job on the queue, and start the thread if required
[ "Put", "an", "upload", "job", "on", "the", "queue", "and", "start", "the", "thread", "if", "required" ]
0c699b6ba97ff164e9702504f0e1643dd4cd39e1
https://github.com/CivicSpleen/ckcache/blob/0c699b6ba97ff164e9702504f0e1643dd4cd39e1/ckcache/async.py#L53-L61
242,241
herereadthis/anyone
anyone/anyone.py
Poem.get_verse
def get_verse(self, v=1): """Get a specific verse.""" verse_count = len(self.verses) if v - 1 < verse_count: return self.verses[v - 1]
python
def get_verse(self, v=1): """Get a specific verse.""" verse_count = len(self.verses) if v - 1 < verse_count: return self.verses[v - 1]
[ "def", "get_verse", "(", "self", ",", "v", "=", "1", ")", ":", "verse_count", "=", "len", "(", "self", ".", "verses", ")", "if", "v", "-", "1", "<", "verse_count", ":", "return", "self", ".", "verses", "[", "v", "-", "1", "]" ]
Get a specific verse.
[ "Get", "a", "specific", "verse", "." ]
ac9917014ec5d4882df89b1757416e9ceca86edb
https://github.com/herereadthis/anyone/blob/ac9917014ec5d4882df89b1757416e9ceca86edb/anyone/anyone.py#L35-L39
242,242
herereadthis/anyone
anyone/anyone.py
Poem.get_line
def get_line(self, line=1): """Return a specific line.""" verse_size = len(self.get_verse()) + 1 if line > 1: verse = math.floor((line - 1) / verse_size) line_in_verse = (line - 1) % verse_size try: return self.verses[verse][line_in_verse] except IndexError: return '' else: return self.verses[0][0]
python
def get_line(self, line=1): """Return a specific line.""" verse_size = len(self.get_verse()) + 1 if line > 1: verse = math.floor((line - 1) / verse_size) line_in_verse = (line - 1) % verse_size try: return self.verses[verse][line_in_verse] except IndexError: return '' else: return self.verses[0][0]
[ "def", "get_line", "(", "self", ",", "line", "=", "1", ")", ":", "verse_size", "=", "len", "(", "self", ".", "get_verse", "(", ")", ")", "+", "1", "if", "line", ">", "1", ":", "verse", "=", "math", ".", "floor", "(", "(", "line", "-", "1", ")", "/", "verse_size", ")", "line_in_verse", "=", "(", "line", "-", "1", ")", "%", "verse_size", "try", ":", "return", "self", ".", "verses", "[", "verse", "]", "[", "line_in_verse", "]", "except", "IndexError", ":", "return", "''", "else", ":", "return", "self", ".", "verses", "[", "0", "]", "[", "0", "]" ]
Return a specific line.
[ "Return", "a", "specific", "line", "." ]
ac9917014ec5d4882df89b1757416e9ceca86edb
https://github.com/herereadthis/anyone/blob/ac9917014ec5d4882df89b1757416e9ceca86edb/anyone/anyone.py#L41-L52
242,243
herereadthis/anyone
anyone/anyone.py
Poem.print_poem
def print_poem(self): """Print all the verses.""" for index, verse in enumerate(self.verses): for line in verse: print(line) if index != len(self.verses) - 1: print('')
python
def print_poem(self): """Print all the verses.""" for index, verse in enumerate(self.verses): for line in verse: print(line) if index != len(self.verses) - 1: print('')
[ "def", "print_poem", "(", "self", ")", ":", "for", "index", ",", "verse", "in", "enumerate", "(", "self", ".", "verses", ")", ":", "for", "line", "in", "verse", ":", "print", "(", "line", ")", "if", "index", "!=", "len", "(", "self", ".", "verses", ")", "-", "1", ":", "print", "(", "''", ")" ]
Print all the verses.
[ "Print", "all", "the", "verses", "." ]
ac9917014ec5d4882df89b1757416e9ceca86edb
https://github.com/herereadthis/anyone/blob/ac9917014ec5d4882df89b1757416e9ceca86edb/anyone/anyone.py#L54-L60
242,244
Othernet-Project/squery-pg
squery_pg/migrations.py
get_mods
def get_mods(package): """ List all loadable python modules in a directory This function looks inside the specified directory for all files that look like Python modules with a numeric prefix and returns them. It will omit any duplicates and return file names without extension. :param package: package object :returns: list of tuples containing filename without extension, major_version and minor_version """ pkgdir = package.__path__[0] matches = filter(None, [PYMOD_RE.match(f) for f in os.listdir(pkgdir)]) parse_match = lambda groups: (groups[0], int(groups[1]), int(groups[2])) return sorted(list(set([parse_match(m.groups()) for m in matches])), key=lambda x: (x[1], x[2]))
python
def get_mods(package): """ List all loadable python modules in a directory This function looks inside the specified directory for all files that look like Python modules with a numeric prefix and returns them. It will omit any duplicates and return file names without extension. :param package: package object :returns: list of tuples containing filename without extension, major_version and minor_version """ pkgdir = package.__path__[0] matches = filter(None, [PYMOD_RE.match(f) for f in os.listdir(pkgdir)]) parse_match = lambda groups: (groups[0], int(groups[1]), int(groups[2])) return sorted(list(set([parse_match(m.groups()) for m in matches])), key=lambda x: (x[1], x[2]))
[ "def", "get_mods", "(", "package", ")", ":", "pkgdir", "=", "package", ".", "__path__", "[", "0", "]", "matches", "=", "filter", "(", "None", ",", "[", "PYMOD_RE", ".", "match", "(", "f", ")", "for", "f", "in", "os", ".", "listdir", "(", "pkgdir", ")", "]", ")", "parse_match", "=", "lambda", "groups", ":", "(", "groups", "[", "0", "]", ",", "int", "(", "groups", "[", "1", "]", ")", ",", "int", "(", "groups", "[", "2", "]", ")", ")", "return", "sorted", "(", "list", "(", "set", "(", "[", "parse_match", "(", "m", ".", "groups", "(", ")", ")", "for", "m", "in", "matches", "]", ")", ")", ",", "key", "=", "lambda", "x", ":", "(", "x", "[", "1", "]", ",", "x", "[", "2", "]", ")", ")" ]
List all loadable python modules in a directory This function looks inside the specified directory for all files that look like Python modules with a numeric prefix and returns them. It will omit any duplicates and return file names without extension. :param package: package object :returns: list of tuples containing filename without extension, major_version and minor_version
[ "List", "all", "loadable", "python", "modules", "in", "a", "directory" ]
eaa695c3719e2d2b7e1b049bb58c987c132b6b34
https://github.com/Othernet-Project/squery-pg/blob/eaa695c3719e2d2b7e1b049bb58c987c132b6b34/squery_pg/migrations.py#L45-L60
242,245
Othernet-Project/squery-pg
squery_pg/migrations.py
get_new
def get_new(modules, min_major_version, min_minor_version): """ Get list of migrations that haven't been run yet :param modules: iterable containing module names :param min_major_version: minimum major version :param min_minor_version: minimum minor version :returns: return an iterator that yields only items which versions are >= min_ver """ for mod_data in modules: (modname, mod_major_version, mod_minor_version) = mod_data if (mod_major_version > min_major_version or (mod_major_version == min_major_version and mod_minor_version >= min_minor_version)): yield mod_data
python
def get_new(modules, min_major_version, min_minor_version): """ Get list of migrations that haven't been run yet :param modules: iterable containing module names :param min_major_version: minimum major version :param min_minor_version: minimum minor version :returns: return an iterator that yields only items which versions are >= min_ver """ for mod_data in modules: (modname, mod_major_version, mod_minor_version) = mod_data if (mod_major_version > min_major_version or (mod_major_version == min_major_version and mod_minor_version >= min_minor_version)): yield mod_data
[ "def", "get_new", "(", "modules", ",", "min_major_version", ",", "min_minor_version", ")", ":", "for", "mod_data", "in", "modules", ":", "(", "modname", ",", "mod_major_version", ",", "mod_minor_version", ")", "=", "mod_data", "if", "(", "mod_major_version", ">", "min_major_version", "or", "(", "mod_major_version", "==", "min_major_version", "and", "mod_minor_version", ">=", "min_minor_version", ")", ")", ":", "yield", "mod_data" ]
Get list of migrations that haven't been run yet :param modules: iterable containing module names :param min_major_version: minimum major version :param min_minor_version: minimum minor version :returns: return an iterator that yields only items which versions are >= min_ver
[ "Get", "list", "of", "migrations", "that", "haven", "t", "been", "run", "yet" ]
eaa695c3719e2d2b7e1b049bb58c987c132b6b34
https://github.com/Othernet-Project/squery-pg/blob/eaa695c3719e2d2b7e1b049bb58c987c132b6b34/squery_pg/migrations.py#L63-L77
242,246
Othernet-Project/squery-pg
squery_pg/migrations.py
load_mod
def load_mod(module, package): """ Load a module named ``module`` from given search``path`` The module path prefix is set according to the ``prefix`` argument. By defualt the module is loaded as if it comes from a global 'db_migrations' package. As such, it may conflict with any 'db_migration' package. The module can be looked up in ``sys.modules`` as ``db_migration.MODNAME`` where ``MODNAME`` is the name supplied as ``module`` argument. Keep in mind that relative imports from within the module depend on this prefix. This function raises an ``ImportError`` exception if module is not found. :param module: name of the module to load :param package: package object :returns: module object """ name = '%s.%s' % (package.__name__, module) if name in sys.modules: return sys.modules[name] return importlib.import_module(name, package=package.__name__)
python
def load_mod(module, package): """ Load a module named ``module`` from given search``path`` The module path prefix is set according to the ``prefix`` argument. By defualt the module is loaded as if it comes from a global 'db_migrations' package. As such, it may conflict with any 'db_migration' package. The module can be looked up in ``sys.modules`` as ``db_migration.MODNAME`` where ``MODNAME`` is the name supplied as ``module`` argument. Keep in mind that relative imports from within the module depend on this prefix. This function raises an ``ImportError`` exception if module is not found. :param module: name of the module to load :param package: package object :returns: module object """ name = '%s.%s' % (package.__name__, module) if name in sys.modules: return sys.modules[name] return importlib.import_module(name, package=package.__name__)
[ "def", "load_mod", "(", "module", ",", "package", ")", ":", "name", "=", "'%s.%s'", "%", "(", "package", ".", "__name__", ",", "module", ")", "if", "name", "in", "sys", ".", "modules", ":", "return", "sys", ".", "modules", "[", "name", "]", "return", "importlib", ".", "import_module", "(", "name", ",", "package", "=", "package", ".", "__name__", ")" ]
Load a module named ``module`` from given search``path`` The module path prefix is set according to the ``prefix`` argument. By defualt the module is loaded as if it comes from a global 'db_migrations' package. As such, it may conflict with any 'db_migration' package. The module can be looked up in ``sys.modules`` as ``db_migration.MODNAME`` where ``MODNAME`` is the name supplied as ``module`` argument. Keep in mind that relative imports from within the module depend on this prefix. This function raises an ``ImportError`` exception if module is not found. :param module: name of the module to load :param package: package object :returns: module object
[ "Load", "a", "module", "named", "module", "from", "given", "search", "path" ]
eaa695c3719e2d2b7e1b049bb58c987c132b6b34
https://github.com/Othernet-Project/squery-pg/blob/eaa695c3719e2d2b7e1b049bb58c987c132b6b34/squery_pg/migrations.py#L80-L100
242,247
Othernet-Project/squery-pg
squery_pg/migrations.py
unpack_version
def unpack_version(version): """Unpack a single version integer into the two major and minor components.""" minor_version = version % VERSION_MULTIPLIER major_version = (version - minor_version) / VERSION_MULTIPLIER return (major_version, minor_version)
python
def unpack_version(version): """Unpack a single version integer into the two major and minor components.""" minor_version = version % VERSION_MULTIPLIER major_version = (version - minor_version) / VERSION_MULTIPLIER return (major_version, minor_version)
[ "def", "unpack_version", "(", "version", ")", ":", "minor_version", "=", "version", "%", "VERSION_MULTIPLIER", "major_version", "=", "(", "version", "-", "minor_version", ")", "/", "VERSION_MULTIPLIER", "return", "(", "major_version", ",", "minor_version", ")" ]
Unpack a single version integer into the two major and minor components.
[ "Unpack", "a", "single", "version", "integer", "into", "the", "two", "major", "and", "minor", "components", "." ]
eaa695c3719e2d2b7e1b049bb58c987c132b6b34
https://github.com/Othernet-Project/squery-pg/blob/eaa695c3719e2d2b7e1b049bb58c987c132b6b34/squery_pg/migrations.py#L108-L113
242,248
Othernet-Project/squery-pg
squery_pg/migrations.py
set_version
def set_version(db, name, major_version, minor_version): """ Set database migration version :param db: connetion object :param name: associated name :param major_version: integer major version of migration :param minor_version: integer minor version of migration """ version = pack_version(major_version, minor_version) db.execute(SET_VERSION_SQL, dict(name=name, version=version))
python
def set_version(db, name, major_version, minor_version): """ Set database migration version :param db: connetion object :param name: associated name :param major_version: integer major version of migration :param minor_version: integer minor version of migration """ version = pack_version(major_version, minor_version) db.execute(SET_VERSION_SQL, dict(name=name, version=version))
[ "def", "set_version", "(", "db", ",", "name", ",", "major_version", ",", "minor_version", ")", ":", "version", "=", "pack_version", "(", "major_version", ",", "minor_version", ")", "db", ".", "execute", "(", "SET_VERSION_SQL", ",", "dict", "(", "name", "=", "name", ",", "version", "=", "version", ")", ")" ]
Set database migration version :param db: connetion object :param name: associated name :param major_version: integer major version of migration :param minor_version: integer minor version of migration
[ "Set", "database", "migration", "version" ]
eaa695c3719e2d2b7e1b049bb58c987c132b6b34
https://github.com/Othernet-Project/squery-pg/blob/eaa695c3719e2d2b7e1b049bb58c987c132b6b34/squery_pg/migrations.py#L146-L155
242,249
Othernet-Project/squery-pg
squery_pg/migrations.py
run_migration
def run_migration(name, major_version, minor_version, db, mod, conf={}): """ Run migration script :param major_version: major version number of the migration :param minor_version: minor version number of the migration :param db: database connection object :param path: path of the migration script :param conf: application configuration (if any) """ with db.transaction(): mod.up(db, conf) set_version(db, name, major_version, minor_version)
python
def run_migration(name, major_version, minor_version, db, mod, conf={}): """ Run migration script :param major_version: major version number of the migration :param minor_version: minor version number of the migration :param db: database connection object :param path: path of the migration script :param conf: application configuration (if any) """ with db.transaction(): mod.up(db, conf) set_version(db, name, major_version, minor_version)
[ "def", "run_migration", "(", "name", ",", "major_version", ",", "minor_version", ",", "db", ",", "mod", ",", "conf", "=", "{", "}", ")", ":", "with", "db", ".", "transaction", "(", ")", ":", "mod", ".", "up", "(", "db", ",", "conf", ")", "set_version", "(", "db", ",", "name", ",", "major_version", ",", "minor_version", ")" ]
Run migration script :param major_version: major version number of the migration :param minor_version: minor version number of the migration :param db: database connection object :param path: path of the migration script :param conf: application configuration (if any)
[ "Run", "migration", "script" ]
eaa695c3719e2d2b7e1b049bb58c987c132b6b34
https://github.com/Othernet-Project/squery-pg/blob/eaa695c3719e2d2b7e1b049bb58c987c132b6b34/squery_pg/migrations.py#L158-L169
242,250
Othernet-Project/squery-pg
squery_pg/migrations.py
migrate
def migrate(db, name, package, conf={}): """ Run all migrations that have not been run Migrations will be run inside a transaction. :param db: database connection object :param name: name associated with the migrations :param package: package that contains the migrations :param conf: application configuration object """ (current_major_version, current_minor_version) = get_version(db, name) package = importlib.import_module(package) logging.debug('Migration version for %s is %s.%s', package.__name__, current_major_version, current_minor_version) mods = get_mods(package) migrations = get_new(mods, current_major_version, current_minor_version + 1) for (modname, major_version, minor_version) in migrations: mod = load_mod(modname, package) run_migration(name, major_version, minor_version, db, mod, conf) logging.debug("Finished migrating to %s", modname)
python
def migrate(db, name, package, conf={}): """ Run all migrations that have not been run Migrations will be run inside a transaction. :param db: database connection object :param name: name associated with the migrations :param package: package that contains the migrations :param conf: application configuration object """ (current_major_version, current_minor_version) = get_version(db, name) package = importlib.import_module(package) logging.debug('Migration version for %s is %s.%s', package.__name__, current_major_version, current_minor_version) mods = get_mods(package) migrations = get_new(mods, current_major_version, current_minor_version + 1) for (modname, major_version, minor_version) in migrations: mod = load_mod(modname, package) run_migration(name, major_version, minor_version, db, mod, conf) logging.debug("Finished migrating to %s", modname)
[ "def", "migrate", "(", "db", ",", "name", ",", "package", ",", "conf", "=", "{", "}", ")", ":", "(", "current_major_version", ",", "current_minor_version", ")", "=", "get_version", "(", "db", ",", "name", ")", "package", "=", "importlib", ".", "import_module", "(", "package", ")", "logging", ".", "debug", "(", "'Migration version for %s is %s.%s'", ",", "package", ".", "__name__", ",", "current_major_version", ",", "current_minor_version", ")", "mods", "=", "get_mods", "(", "package", ")", "migrations", "=", "get_new", "(", "mods", ",", "current_major_version", ",", "current_minor_version", "+", "1", ")", "for", "(", "modname", ",", "major_version", ",", "minor_version", ")", "in", "migrations", ":", "mod", "=", "load_mod", "(", "modname", ",", "package", ")", "run_migration", "(", "name", ",", "major_version", ",", "minor_version", ",", "db", ",", "mod", ",", "conf", ")", "logging", ".", "debug", "(", "\"Finished migrating to %s\"", ",", "modname", ")" ]
Run all migrations that have not been run Migrations will be run inside a transaction. :param db: database connection object :param name: name associated with the migrations :param package: package that contains the migrations :param conf: application configuration object
[ "Run", "all", "migrations", "that", "have", "not", "been", "run" ]
eaa695c3719e2d2b7e1b049bb58c987c132b6b34
https://github.com/Othernet-Project/squery-pg/blob/eaa695c3719e2d2b7e1b049bb58c987c132b6b34/squery_pg/migrations.py#L172-L195
242,251
mayfield/shellish
shellish/rendering/html.py
html2vtml
def html2vtml(vtmarkup): """ Convert hypertext markup into vt markup. The output can be given to `vtmlrender` for converstion to VT100 sequences. """ try: htmlconv.feed(vtmarkup) htmlconv.close() return htmlconv.getvalue() finally: htmlconv.reset()
python
def html2vtml(vtmarkup): """ Convert hypertext markup into vt markup. The output can be given to `vtmlrender` for converstion to VT100 sequences. """ try: htmlconv.feed(vtmarkup) htmlconv.close() return htmlconv.getvalue() finally: htmlconv.reset()
[ "def", "html2vtml", "(", "vtmarkup", ")", ":", "try", ":", "htmlconv", ".", "feed", "(", "vtmarkup", ")", "htmlconv", ".", "close", "(", ")", "return", "htmlconv", ".", "getvalue", "(", ")", "finally", ":", "htmlconv", ".", "reset", "(", ")" ]
Convert hypertext markup into vt markup. The output can be given to `vtmlrender` for converstion to VT100 sequences.
[ "Convert", "hypertext", "markup", "into", "vt", "markup", ".", "The", "output", "can", "be", "given", "to", "vtmlrender", "for", "converstion", "to", "VT100", "sequences", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/rendering/html.py#L206-L215
242,252
OiNutter/lean
lean/template.py
Template.render
def render(self,scope=None,local_vars=None,block=None): ''' Render the template in the given scope with the locals specified. If a block is given, it is typically available within the template via +yield+. ''' if not scope: class Scope(object): pass scope = Scope() return self.evaluate(scope,local_vars or {}, block)
python
def render(self,scope=None,local_vars=None,block=None): ''' Render the template in the given scope with the locals specified. If a block is given, it is typically available within the template via +yield+. ''' if not scope: class Scope(object): pass scope = Scope() return self.evaluate(scope,local_vars or {}, block)
[ "def", "render", "(", "self", ",", "scope", "=", "None", ",", "local_vars", "=", "None", ",", "block", "=", "None", ")", ":", "if", "not", "scope", ":", "class", "Scope", "(", "object", ")", ":", "pass", "scope", "=", "Scope", "(", ")", "return", "self", ".", "evaluate", "(", "scope", ",", "local_vars", "or", "{", "}", ",", "block", ")" ]
Render the template in the given scope with the locals specified. If a block is given, it is typically available within the template via +yield+.
[ "Render", "the", "template", "in", "the", "given", "scope", "with", "the", "locals", "specified", ".", "If", "a", "block", "is", "given", "it", "is", "typically", "available", "within", "the", "template", "via", "+", "yield", "+", "." ]
5d251f923acd44265ed401de14a9ead6752c543f
https://github.com/OiNutter/lean/blob/5d251f923acd44265ed401de14a9ead6752c543f/lean/template.py#L66-L78
242,253
OiNutter/lean
lean/template.py
Template.basename
def basename(self, suffix=''): ''' The basename of the template file.''' return os.path.basename(self._file, suffix) if self._file else None
python
def basename(self, suffix=''): ''' The basename of the template file.''' return os.path.basename(self._file, suffix) if self._file else None
[ "def", "basename", "(", "self", ",", "suffix", "=", "''", ")", ":", "return", "os", ".", "path", ".", "basename", "(", "self", ".", "_file", ",", "suffix", ")", "if", "self", ".", "_file", "else", "None" ]
The basename of the template file.
[ "The", "basename", "of", "the", "template", "file", "." ]
5d251f923acd44265ed401de14a9ead6752c543f
https://github.com/OiNutter/lean/blob/5d251f923acd44265ed401de14a9ead6752c543f/lean/template.py#L80-L82
242,254
firstprayer/monsql
monsql/queryset.py
QuerySet.distinct
def distinct(self): """ Only return distinct row. Return a new query set with distinct mark """ new_query_set = self.clone() new_query_set.query.distinct = True return new_query_set
python
def distinct(self): """ Only return distinct row. Return a new query set with distinct mark """ new_query_set = self.clone() new_query_set.query.distinct = True return new_query_set
[ "def", "distinct", "(", "self", ")", ":", "new_query_set", "=", "self", ".", "clone", "(", ")", "new_query_set", ".", "query", ".", "distinct", "=", "True", "return", "new_query_set" ]
Only return distinct row. Return a new query set with distinct mark
[ "Only", "return", "distinct", "row", ".", "Return", "a", "new", "query", "set", "with", "distinct", "mark" ]
6285c15b574c8664046eae2edfeb548c7b173efd
https://github.com/firstprayer/monsql/blob/6285c15b574c8664046eae2edfeb548c7b173efd/monsql/queryset.py#L110-L117
242,255
uw-it-aca/uw-restclients-grad
uw_grad/degree.py
_process_json
def _process_json(json_data): """ return a list of GradDegree objects. """ requests = [] for item in json_data: degree = GradDegree() degree.degree_title = item["degreeTitle"] degree.exam_place = item["examPlace"] degree.exam_date = parse_datetime(item.get("examDate")) degree.req_type = item["requestType"] degree.major_full_name = item["majorFullName"] degree.submit_date = parse_datetime(item.get("requestSubmitDate")) degree.decision_date = parse_datetime(item.get('decisionDate')) degree.status = item["status"] degree.target_award_year = item["targetAwardYear"] if item.get("targetAwardQuarter")and\ len(item.get("targetAwardQuarter")): degree.target_award_quarter = item["targetAwardQuarter"].lower() requests.append(degree) return requests
python
def _process_json(json_data): """ return a list of GradDegree objects. """ requests = [] for item in json_data: degree = GradDegree() degree.degree_title = item["degreeTitle"] degree.exam_place = item["examPlace"] degree.exam_date = parse_datetime(item.get("examDate")) degree.req_type = item["requestType"] degree.major_full_name = item["majorFullName"] degree.submit_date = parse_datetime(item.get("requestSubmitDate")) degree.decision_date = parse_datetime(item.get('decisionDate')) degree.status = item["status"] degree.target_award_year = item["targetAwardYear"] if item.get("targetAwardQuarter")and\ len(item.get("targetAwardQuarter")): degree.target_award_quarter = item["targetAwardQuarter"].lower() requests.append(degree) return requests
[ "def", "_process_json", "(", "json_data", ")", ":", "requests", "=", "[", "]", "for", "item", "in", "json_data", ":", "degree", "=", "GradDegree", "(", ")", "degree", ".", "degree_title", "=", "item", "[", "\"degreeTitle\"", "]", "degree", ".", "exam_place", "=", "item", "[", "\"examPlace\"", "]", "degree", ".", "exam_date", "=", "parse_datetime", "(", "item", ".", "get", "(", "\"examDate\"", ")", ")", "degree", ".", "req_type", "=", "item", "[", "\"requestType\"", "]", "degree", ".", "major_full_name", "=", "item", "[", "\"majorFullName\"", "]", "degree", ".", "submit_date", "=", "parse_datetime", "(", "item", ".", "get", "(", "\"requestSubmitDate\"", ")", ")", "degree", ".", "decision_date", "=", "parse_datetime", "(", "item", ".", "get", "(", "'decisionDate'", ")", ")", "degree", ".", "status", "=", "item", "[", "\"status\"", "]", "degree", ".", "target_award_year", "=", "item", "[", "\"targetAwardYear\"", "]", "if", "item", ".", "get", "(", "\"targetAwardQuarter\"", ")", "and", "len", "(", "item", ".", "get", "(", "\"targetAwardQuarter\"", ")", ")", ":", "degree", ".", "target_award_quarter", "=", "item", "[", "\"targetAwardQuarter\"", "]", ".", "lower", "(", ")", "requests", ".", "append", "(", "degree", ")", "return", "requests" ]
return a list of GradDegree objects.
[ "return", "a", "list", "of", "GradDegree", "objects", "." ]
ca06ed2f24f3683314a5690f6078e97d37fc8e52
https://github.com/uw-it-aca/uw-restclients-grad/blob/ca06ed2f24f3683314a5690f6078e97d37fc8e52/uw_grad/degree.py#L23-L44
242,256
ryanjdillon/pylleo
pylleo/lleoio.py
read_meta
def read_meta(path_dir, tag_model, tag_id): '''Read meta data from Little Leonardo data header rows Args ---- path_dir: str Parent directory containing lleo data files tag_model: str Little Leonardo tag model name tag_id: str, int Little Leonardo tag ID number Returns ------- meta: dict dictionary with meta data from header lines of lleo data files ''' from collections import OrderedDict import os import yamlord from . import utils def _parse_meta_line(line): '''Return key, value pair parsed from data header line''' # Parse the key and its value from the line key, val = line.replace(':', '').replace('"', '').split(',') return key.strip(), val.strip() def _read_meta_all(f, meta, n_header): '''Read all meta data from header rows of data file''' # Skip 'File name' line f.seek(0) _ = f.readline() # Create child dictionary for channel / file line = f.readline() key_ch, val_ch = _parse_meta_line(line) val_ch = utils.posix_string(val_ch) meta['parameters'][val_ch] = OrderedDict() # Write header values to channel dict for _ in range(n_header-2): line = f.readline() key, val = _parse_meta_line(line) meta['parameters'][val_ch][key] = val.strip() return meta def _create_meta(path_dir, tag_model, tag_id): '''Create meta data dictionary''' import datetime from . import utils param_strs = utils.get_tag_params(tag_model) # Create dictionary of meta data meta = OrderedDict() # Create fields for the parameters in data directory name exp_name = os.path.split(path_dir)[1] params_tag = utils.parse_experiment_params(exp_name) for key, value in params_tag.items(): meta[key] = value fmt = "%Y-%m-%d %H:%M:%S" meta['date_modified'] = datetime.datetime.now().strftime(fmt) meta['parameters'] = OrderedDict() for param_str in param_strs: print('Create meta entry for {}'.format(param_str)) path_file = utils.find_file(path_dir, param_str, '.TXT') # Get number of header rows enc = utils.predict_encoding(path_file, n_lines=20) with open(path_file, 'r', encoding=enc) as f: n_header = utils.get_n_header(f) f.seek(0) meta = _read_meta_all(f, meta, n_header=n_header) return meta # Load meta data from YAML file if it already exists meta_yaml_path = os.path.join(path_dir, 'meta.yml') # Load file if exists else create if os.path.isfile(meta_yaml_path): meta = yamlord.read_yaml(meta_yaml_path) # Else create meta dictionary and save to YAML else: meta = _create_meta(path_dir, tag_model, tag_id) yamlord.write_yaml(meta, meta_yaml_path) return meta
python
def read_meta(path_dir, tag_model, tag_id): '''Read meta data from Little Leonardo data header rows Args ---- path_dir: str Parent directory containing lleo data files tag_model: str Little Leonardo tag model name tag_id: str, int Little Leonardo tag ID number Returns ------- meta: dict dictionary with meta data from header lines of lleo data files ''' from collections import OrderedDict import os import yamlord from . import utils def _parse_meta_line(line): '''Return key, value pair parsed from data header line''' # Parse the key and its value from the line key, val = line.replace(':', '').replace('"', '').split(',') return key.strip(), val.strip() def _read_meta_all(f, meta, n_header): '''Read all meta data from header rows of data file''' # Skip 'File name' line f.seek(0) _ = f.readline() # Create child dictionary for channel / file line = f.readline() key_ch, val_ch = _parse_meta_line(line) val_ch = utils.posix_string(val_ch) meta['parameters'][val_ch] = OrderedDict() # Write header values to channel dict for _ in range(n_header-2): line = f.readline() key, val = _parse_meta_line(line) meta['parameters'][val_ch][key] = val.strip() return meta def _create_meta(path_dir, tag_model, tag_id): '''Create meta data dictionary''' import datetime from . import utils param_strs = utils.get_tag_params(tag_model) # Create dictionary of meta data meta = OrderedDict() # Create fields for the parameters in data directory name exp_name = os.path.split(path_dir)[1] params_tag = utils.parse_experiment_params(exp_name) for key, value in params_tag.items(): meta[key] = value fmt = "%Y-%m-%d %H:%M:%S" meta['date_modified'] = datetime.datetime.now().strftime(fmt) meta['parameters'] = OrderedDict() for param_str in param_strs: print('Create meta entry for {}'.format(param_str)) path_file = utils.find_file(path_dir, param_str, '.TXT') # Get number of header rows enc = utils.predict_encoding(path_file, n_lines=20) with open(path_file, 'r', encoding=enc) as f: n_header = utils.get_n_header(f) f.seek(0) meta = _read_meta_all(f, meta, n_header=n_header) return meta # Load meta data from YAML file if it already exists meta_yaml_path = os.path.join(path_dir, 'meta.yml') # Load file if exists else create if os.path.isfile(meta_yaml_path): meta = yamlord.read_yaml(meta_yaml_path) # Else create meta dictionary and save to YAML else: meta = _create_meta(path_dir, tag_model, tag_id) yamlord.write_yaml(meta, meta_yaml_path) return meta
[ "def", "read_meta", "(", "path_dir", ",", "tag_model", ",", "tag_id", ")", ":", "from", "collections", "import", "OrderedDict", "import", "os", "import", "yamlord", "from", ".", "import", "utils", "def", "_parse_meta_line", "(", "line", ")", ":", "'''Return key, value pair parsed from data header line'''", "# Parse the key and its value from the line", "key", ",", "val", "=", "line", ".", "replace", "(", "':'", ",", "''", ")", ".", "replace", "(", "'\"'", ",", "''", ")", ".", "split", "(", "','", ")", "return", "key", ".", "strip", "(", ")", ",", "val", ".", "strip", "(", ")", "def", "_read_meta_all", "(", "f", ",", "meta", ",", "n_header", ")", ":", "'''Read all meta data from header rows of data file'''", "# Skip 'File name' line", "f", ".", "seek", "(", "0", ")", "_", "=", "f", ".", "readline", "(", ")", "# Create child dictionary for channel / file", "line", "=", "f", ".", "readline", "(", ")", "key_ch", ",", "val_ch", "=", "_parse_meta_line", "(", "line", ")", "val_ch", "=", "utils", ".", "posix_string", "(", "val_ch", ")", "meta", "[", "'parameters'", "]", "[", "val_ch", "]", "=", "OrderedDict", "(", ")", "# Write header values to channel dict", "for", "_", "in", "range", "(", "n_header", "-", "2", ")", ":", "line", "=", "f", ".", "readline", "(", ")", "key", ",", "val", "=", "_parse_meta_line", "(", "line", ")", "meta", "[", "'parameters'", "]", "[", "val_ch", "]", "[", "key", "]", "=", "val", ".", "strip", "(", ")", "return", "meta", "def", "_create_meta", "(", "path_dir", ",", "tag_model", ",", "tag_id", ")", ":", "'''Create meta data dictionary'''", "import", "datetime", "from", ".", "import", "utils", "param_strs", "=", "utils", ".", "get_tag_params", "(", "tag_model", ")", "# Create dictionary of meta data", "meta", "=", "OrderedDict", "(", ")", "# Create fields for the parameters in data directory name", "exp_name", "=", "os", ".", "path", ".", "split", "(", "path_dir", ")", "[", "1", "]", "params_tag", "=", "utils", ".", "parse_experiment_params", "(", "exp_name", ")", "for", "key", ",", "value", "in", "params_tag", ".", "items", "(", ")", ":", "meta", "[", "key", "]", "=", "value", "fmt", "=", "\"%Y-%m-%d %H:%M:%S\"", "meta", "[", "'date_modified'", "]", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "fmt", ")", "meta", "[", "'parameters'", "]", "=", "OrderedDict", "(", ")", "for", "param_str", "in", "param_strs", ":", "print", "(", "'Create meta entry for {}'", ".", "format", "(", "param_str", ")", ")", "path_file", "=", "utils", ".", "find_file", "(", "path_dir", ",", "param_str", ",", "'.TXT'", ")", "# Get number of header rows", "enc", "=", "utils", ".", "predict_encoding", "(", "path_file", ",", "n_lines", "=", "20", ")", "with", "open", "(", "path_file", ",", "'r'", ",", "encoding", "=", "enc", ")", "as", "f", ":", "n_header", "=", "utils", ".", "get_n_header", "(", "f", ")", "f", ".", "seek", "(", "0", ")", "meta", "=", "_read_meta_all", "(", "f", ",", "meta", ",", "n_header", "=", "n_header", ")", "return", "meta", "# Load meta data from YAML file if it already exists", "meta_yaml_path", "=", "os", ".", "path", ".", "join", "(", "path_dir", ",", "'meta.yml'", ")", "# Load file if exists else create", "if", "os", ".", "path", ".", "isfile", "(", "meta_yaml_path", ")", ":", "meta", "=", "yamlord", ".", "read_yaml", "(", "meta_yaml_path", ")", "# Else create meta dictionary and save to YAML", "else", ":", "meta", "=", "_create_meta", "(", "path_dir", ",", "tag_model", ",", "tag_id", ")", "yamlord", ".", "write_yaml", "(", "meta", ",", "meta_yaml_path", ")", "return", "meta" ]
Read meta data from Little Leonardo data header rows Args ---- path_dir: str Parent directory containing lleo data files tag_model: str Little Leonardo tag model name tag_id: str, int Little Leonardo tag ID number Returns ------- meta: dict dictionary with meta data from header lines of lleo data files
[ "Read", "meta", "data", "from", "Little", "Leonardo", "data", "header", "rows" ]
b9b999fef19eaeccce4f207ab1b6198287c1bfec
https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/lleoio.py#L2-L103
242,257
ikumen/flask-cfg
flask_cfg/core.py
AbstractConfig._merge_values
def _merge_values(self, to_values, from_values): """Merges two dictionaries of values recursively. This is a very naive implementation that expects the two dictionaries to be fairly similar in structure. @param to_values destination dictionary @param from_values dictionary with values to copy """ if from_values is not None: for k, v in from_values.items(): if k in to_values and isinstance(to_values[k], dict): self._merge_values(to_values[k], v) # merge else: to_values[k] = v # replaces instead of merge return to_values
python
def _merge_values(self, to_values, from_values): """Merges two dictionaries of values recursively. This is a very naive implementation that expects the two dictionaries to be fairly similar in structure. @param to_values destination dictionary @param from_values dictionary with values to copy """ if from_values is not None: for k, v in from_values.items(): if k in to_values and isinstance(to_values[k], dict): self._merge_values(to_values[k], v) # merge else: to_values[k] = v # replaces instead of merge return to_values
[ "def", "_merge_values", "(", "self", ",", "to_values", ",", "from_values", ")", ":", "if", "from_values", "is", "not", "None", ":", "for", "k", ",", "v", "in", "from_values", ".", "items", "(", ")", ":", "if", "k", "in", "to_values", "and", "isinstance", "(", "to_values", "[", "k", "]", ",", "dict", ")", ":", "self", ".", "_merge_values", "(", "to_values", "[", "k", "]", ",", "v", ")", "# merge", "else", ":", "to_values", "[", "k", "]", "=", "v", "# replaces instead of merge", "return", "to_values" ]
Merges two dictionaries of values recursively. This is a very naive implementation that expects the two dictionaries to be fairly similar in structure. @param to_values destination dictionary @param from_values dictionary with values to copy
[ "Merges", "two", "dictionaries", "of", "values", "recursively", ".", "This", "is", "a", "very", "naive", "implementation", "that", "expects", "the", "two", "dictionaries", "to", "be", "fairly", "similar", "in", "structure", "." ]
28f3370121419d22b6a5a3713ab3cb8bb2da6e43
https://github.com/ikumen/flask-cfg/blob/28f3370121419d22b6a5a3713ab3cb8bb2da6e43/flask_cfg/core.py#L160-L174
242,258
ikumen/flask-cfg
flask_cfg/core.py
AbstractConfig._load_config
def _load_config(self, path): """Return YAML values from given config file. @param path file to load """ try: with open(path) as f: values = yaml.safe_load(f) if isinstance(values, dict): return values else: raise yaml.YAMLError('Unable to parse/load {}'.format(path)) except(IOError, yaml.YAMLError) as e: if self.ignore_errors: return None else: raise e
python
def _load_config(self, path): """Return YAML values from given config file. @param path file to load """ try: with open(path) as f: values = yaml.safe_load(f) if isinstance(values, dict): return values else: raise yaml.YAMLError('Unable to parse/load {}'.format(path)) except(IOError, yaml.YAMLError) as e: if self.ignore_errors: return None else: raise e
[ "def", "_load_config", "(", "self", ",", "path", ")", ":", "try", ":", "with", "open", "(", "path", ")", "as", "f", ":", "values", "=", "yaml", ".", "safe_load", "(", "f", ")", "if", "isinstance", "(", "values", ",", "dict", ")", ":", "return", "values", "else", ":", "raise", "yaml", ".", "YAMLError", "(", "'Unable to parse/load {}'", ".", "format", "(", "path", ")", ")", "except", "(", "IOError", ",", "yaml", ".", "YAMLError", ")", "as", "e", ":", "if", "self", ".", "ignore_errors", ":", "return", "None", "else", ":", "raise", "e" ]
Return YAML values from given config file. @param path file to load
[ "Return", "YAML", "values", "from", "given", "config", "file", "." ]
28f3370121419d22b6a5a3713ab3cb8bb2da6e43
https://github.com/ikumen/flask-cfg/blob/28f3370121419d22b6a5a3713ab3cb8bb2da6e43/flask_cfg/core.py#L177-L193
242,259
ikumen/flask-cfg
flask_cfg/core.py
AbstractConfig._normalize_file_paths
def _normalize_file_paths(self, *args): """Returns all given configuration file paths as one list.""" paths = [] for arg in args: if arg is None: continue elif self._is_valid_file(arg): paths.append(arg) elif isinstance(arg, list) and all(self._is_valid_file(_) for _ in arg): paths = paths + arg elif not self.ignore_errors: raise TypeError('Config file paths must be string path or list of paths!') return paths
python
def _normalize_file_paths(self, *args): """Returns all given configuration file paths as one list.""" paths = [] for arg in args: if arg is None: continue elif self._is_valid_file(arg): paths.append(arg) elif isinstance(arg, list) and all(self._is_valid_file(_) for _ in arg): paths = paths + arg elif not self.ignore_errors: raise TypeError('Config file paths must be string path or list of paths!') return paths
[ "def", "_normalize_file_paths", "(", "self", ",", "*", "args", ")", ":", "paths", "=", "[", "]", "for", "arg", "in", "args", ":", "if", "arg", "is", "None", ":", "continue", "elif", "self", ".", "_is_valid_file", "(", "arg", ")", ":", "paths", ".", "append", "(", "arg", ")", "elif", "isinstance", "(", "arg", ",", "list", ")", "and", "all", "(", "self", ".", "_is_valid_file", "(", "_", ")", "for", "_", "in", "arg", ")", ":", "paths", "=", "paths", "+", "arg", "elif", "not", "self", ".", "ignore_errors", ":", "raise", "TypeError", "(", "'Config file paths must be string path or list of paths!'", ")", "return", "paths" ]
Returns all given configuration file paths as one list.
[ "Returns", "all", "given", "configuration", "file", "paths", "as", "one", "list", "." ]
28f3370121419d22b6a5a3713ab3cb8bb2da6e43
https://github.com/ikumen/flask-cfg/blob/28f3370121419d22b6a5a3713ab3cb8bb2da6e43/flask_cfg/core.py#L196-L208
242,260
ikumen/flask-cfg
flask_cfg/core.py
AbstractConfig._is_valid_file
def _is_valid_file(self, path): """Simple check to see if file path exists. Does not check for valid YAML format.""" return isinstance(path, basestring) and os.path.isfile(path)
python
def _is_valid_file(self, path): """Simple check to see if file path exists. Does not check for valid YAML format.""" return isinstance(path, basestring) and os.path.isfile(path)
[ "def", "_is_valid_file", "(", "self", ",", "path", ")", ":", "return", "isinstance", "(", "path", ",", "basestring", ")", "and", "os", ".", "path", ".", "isfile", "(", "path", ")" ]
Simple check to see if file path exists. Does not check for valid YAML format.
[ "Simple", "check", "to", "see", "if", "file", "path", "exists", ".", "Does", "not", "check", "for", "valid", "YAML", "format", "." ]
28f3370121419d22b6a5a3713ab3cb8bb2da6e43
https://github.com/ikumen/flask-cfg/blob/28f3370121419d22b6a5a3713ab3cb8bb2da6e43/flask_cfg/core.py#L211-L213
242,261
almcc/cinder-data
docs/conf.py
run_apidoc
def run_apidoc(_): """Heler function for run apidoc as part of the build.""" current_directory = os.path.abspath(os.path.dirname(__file__)) output_path = os.path.join(current_directory, 'source') cmd_path = 'sphinx-apidoc' if hasattr(sys, 'real_prefix'): # Check to see if we are in a virtualenv # If we are, assemble the path manually cmd_path = os.path.abspath(os.path.join(sys.prefix, 'bin', 'sphinx-apidoc')) main([cmd_path, '-e', '-o', output_path, '../cinder_data', '--force'])
python
def run_apidoc(_): """Heler function for run apidoc as part of the build.""" current_directory = os.path.abspath(os.path.dirname(__file__)) output_path = os.path.join(current_directory, 'source') cmd_path = 'sphinx-apidoc' if hasattr(sys, 'real_prefix'): # Check to see if we are in a virtualenv # If we are, assemble the path manually cmd_path = os.path.abspath(os.path.join(sys.prefix, 'bin', 'sphinx-apidoc')) main([cmd_path, '-e', '-o', output_path, '../cinder_data', '--force'])
[ "def", "run_apidoc", "(", "_", ")", ":", "current_directory", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "output_path", "=", "os", ".", "path", ".", "join", "(", "current_directory", ",", "'source'", ")", "cmd_path", "=", "'sphinx-apidoc'", "if", "hasattr", "(", "sys", ",", "'real_prefix'", ")", ":", "# Check to see if we are in a virtualenv", "# If we are, assemble the path manually", "cmd_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "sys", ".", "prefix", ",", "'bin'", ",", "'sphinx-apidoc'", ")", ")", "main", "(", "[", "cmd_path", ",", "'-e'", ",", "'-o'", ",", "output_path", ",", "'../cinder_data'", ",", "'--force'", "]", ")" ]
Heler function for run apidoc as part of the build.
[ "Heler", "function", "for", "run", "apidoc", "as", "part", "of", "the", "build", "." ]
4159a5186c4b4fc32354749892e86130530f6ec5
https://github.com/almcc/cinder-data/blob/4159a5186c4b4fc32354749892e86130530f6ec5/docs/conf.py#L23-L31
242,262
mayfield/shellish
shellish/layout/column.py
columnize
def columnize(items, width=None, file=sys.stdout): """ Smart display width handling when showing a list of stuff. """ if not items: return if width is None: width = shutil.get_terminal_size()[0] if file is sys.stdout else 80 items = [rendering.vtmlrender(x) for x in items] maxcol = max(items, key=len) colsize = len(maxcol) + 2 cols = width // colsize if cols < 2: for x in items: print(x, file=file) return lines = math.ceil(len(items) / cols) for i in range(lines): row = items[i:None:lines] print(*[x.ljust(colsize) for x in row], sep='', file=file)
python
def columnize(items, width=None, file=sys.stdout): """ Smart display width handling when showing a list of stuff. """ if not items: return if width is None: width = shutil.get_terminal_size()[0] if file is sys.stdout else 80 items = [rendering.vtmlrender(x) for x in items] maxcol = max(items, key=len) colsize = len(maxcol) + 2 cols = width // colsize if cols < 2: for x in items: print(x, file=file) return lines = math.ceil(len(items) / cols) for i in range(lines): row = items[i:None:lines] print(*[x.ljust(colsize) for x in row], sep='', file=file)
[ "def", "columnize", "(", "items", ",", "width", "=", "None", ",", "file", "=", "sys", ".", "stdout", ")", ":", "if", "not", "items", ":", "return", "if", "width", "is", "None", ":", "width", "=", "shutil", ".", "get_terminal_size", "(", ")", "[", "0", "]", "if", "file", "is", "sys", ".", "stdout", "else", "80", "items", "=", "[", "rendering", ".", "vtmlrender", "(", "x", ")", "for", "x", "in", "items", "]", "maxcol", "=", "max", "(", "items", ",", "key", "=", "len", ")", "colsize", "=", "len", "(", "maxcol", ")", "+", "2", "cols", "=", "width", "//", "colsize", "if", "cols", "<", "2", ":", "for", "x", "in", "items", ":", "print", "(", "x", ",", "file", "=", "file", ")", "return", "lines", "=", "math", ".", "ceil", "(", "len", "(", "items", ")", "/", "cols", ")", "for", "i", "in", "range", "(", "lines", ")", ":", "row", "=", "items", "[", "i", ":", "None", ":", "lines", "]", "print", "(", "*", "[", "x", ".", "ljust", "(", "colsize", ")", "for", "x", "in", "row", "]", ",", "sep", "=", "''", ",", "file", "=", "file", ")" ]
Smart display width handling when showing a list of stuff.
[ "Smart", "display", "width", "handling", "when", "showing", "a", "list", "of", "stuff", "." ]
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/column.py#L11-L28
242,263
drongo-framework/drongo
drongo/managers/url.py
UrlManager.find_call
def find_call(self, path, method): """Find callable for the specified URL path and HTTP method. Args: path (:obj:`str`): URL path to match method (:obj:`str`): HTTP method Note: A trailing '/' is always assumed in the path. """ if not path.endswith('/'): path += '/' path = path.split('/')[1:] return self._recursive_route_match(self._routes, path, method, [])
python
def find_call(self, path, method): """Find callable for the specified URL path and HTTP method. Args: path (:obj:`str`): URL path to match method (:obj:`str`): HTTP method Note: A trailing '/' is always assumed in the path. """ if not path.endswith('/'): path += '/' path = path.split('/')[1:] return self._recursive_route_match(self._routes, path, method, [])
[ "def", "find_call", "(", "self", ",", "path", ",", "method", ")", ":", "if", "not", "path", ".", "endswith", "(", "'/'", ")", ":", "path", "+=", "'/'", "path", "=", "path", ".", "split", "(", "'/'", ")", "[", "1", ":", "]", "return", "self", ".", "_recursive_route_match", "(", "self", ".", "_routes", ",", "path", ",", "method", ",", "[", "]", ")" ]
Find callable for the specified URL path and HTTP method. Args: path (:obj:`str`): URL path to match method (:obj:`str`): HTTP method Note: A trailing '/' is always assumed in the path.
[ "Find", "callable", "for", "the", "specified", "URL", "path", "and", "HTTP", "method", "." ]
487edb370ae329f370bcf3b433ed3f28ba4c1d8c
https://github.com/drongo-framework/drongo/blob/487edb370ae329f370bcf3b433ed3f28ba4c1d8c/drongo/managers/url.py#L50-L63
242,264
edwards-lab/libGWAS
libgwas/standardizer.py
StandardizedVariable.get_variables
def get_variables(self, missing_in_geno=None): """Extract the complete set of data based on missingness over all for the current locus. :param missing_in_geno: mask associated with missingness in genotype :return: (phenotypes, covariates, nonmissing used for this set of vars) """ count = 0 mismatch = 0 if missing_in_geno is None: nonmissing = numpy.invert(self.missing[self.idx]) else: nonmissing = numpy.invert(self.missing[self.idx] | missing_in_geno) nmcount = sum(nonmissing) covars = numpy.zeros((self.covar_count, nmcount)) for idx in range(0, self.covar_count): covars[idx] = self.covariates[idx][nonmissing] min = covars[idx][covars[idx] != pheno_covar.PhenoCovar.missing_encoding].min() max = covars[idx][covars[idx] != pheno_covar.PhenoCovar.missing_encoding].max() if min == max: raise InvariantVar("Covar %s doesn't have enough variation to continue" % (self.datasource.covariate_labels[idx])) min = self.phenotypes[self.idx][nonmissing].min() max = self.phenotypes[self.idx][nonmissing].max() if min == max: raise InvariantVar("Phenotype %s doesn't have enough variation to continue" % (self.datasource.phenotype_names[self.idx])) return (self.phenotypes[self.idx][nonmissing], covars, nonmissing)
python
def get_variables(self, missing_in_geno=None): """Extract the complete set of data based on missingness over all for the current locus. :param missing_in_geno: mask associated with missingness in genotype :return: (phenotypes, covariates, nonmissing used for this set of vars) """ count = 0 mismatch = 0 if missing_in_geno is None: nonmissing = numpy.invert(self.missing[self.idx]) else: nonmissing = numpy.invert(self.missing[self.idx] | missing_in_geno) nmcount = sum(nonmissing) covars = numpy.zeros((self.covar_count, nmcount)) for idx in range(0, self.covar_count): covars[idx] = self.covariates[idx][nonmissing] min = covars[idx][covars[idx] != pheno_covar.PhenoCovar.missing_encoding].min() max = covars[idx][covars[idx] != pheno_covar.PhenoCovar.missing_encoding].max() if min == max: raise InvariantVar("Covar %s doesn't have enough variation to continue" % (self.datasource.covariate_labels[idx])) min = self.phenotypes[self.idx][nonmissing].min() max = self.phenotypes[self.idx][nonmissing].max() if min == max: raise InvariantVar("Phenotype %s doesn't have enough variation to continue" % (self.datasource.phenotype_names[self.idx])) return (self.phenotypes[self.idx][nonmissing], covars, nonmissing)
[ "def", "get_variables", "(", "self", ",", "missing_in_geno", "=", "None", ")", ":", "count", "=", "0", "mismatch", "=", "0", "if", "missing_in_geno", "is", "None", ":", "nonmissing", "=", "numpy", ".", "invert", "(", "self", ".", "missing", "[", "self", ".", "idx", "]", ")", "else", ":", "nonmissing", "=", "numpy", ".", "invert", "(", "self", ".", "missing", "[", "self", ".", "idx", "]", "|", "missing_in_geno", ")", "nmcount", "=", "sum", "(", "nonmissing", ")", "covars", "=", "numpy", ".", "zeros", "(", "(", "self", ".", "covar_count", ",", "nmcount", ")", ")", "for", "idx", "in", "range", "(", "0", ",", "self", ".", "covar_count", ")", ":", "covars", "[", "idx", "]", "=", "self", ".", "covariates", "[", "idx", "]", "[", "nonmissing", "]", "min", "=", "covars", "[", "idx", "]", "[", "covars", "[", "idx", "]", "!=", "pheno_covar", ".", "PhenoCovar", ".", "missing_encoding", "]", ".", "min", "(", ")", "max", "=", "covars", "[", "idx", "]", "[", "covars", "[", "idx", "]", "!=", "pheno_covar", ".", "PhenoCovar", ".", "missing_encoding", "]", ".", "max", "(", ")", "if", "min", "==", "max", ":", "raise", "InvariantVar", "(", "\"Covar %s doesn't have enough variation to continue\"", "%", "(", "self", ".", "datasource", ".", "covariate_labels", "[", "idx", "]", ")", ")", "min", "=", "self", ".", "phenotypes", "[", "self", ".", "idx", "]", "[", "nonmissing", "]", ".", "min", "(", ")", "max", "=", "self", ".", "phenotypes", "[", "self", ".", "idx", "]", "[", "nonmissing", "]", ".", "max", "(", ")", "if", "min", "==", "max", ":", "raise", "InvariantVar", "(", "\"Phenotype %s doesn't have enough variation to continue\"", "%", "(", "self", ".", "datasource", ".", "phenotype_names", "[", "self", ".", "idx", "]", ")", ")", "return", "(", "self", ".", "phenotypes", "[", "self", ".", "idx", "]", "[", "nonmissing", "]", ",", "covars", ",", "nonmissing", ")" ]
Extract the complete set of data based on missingness over all for the current locus. :param missing_in_geno: mask associated with missingness in genotype :return: (phenotypes, covariates, nonmissing used for this set of vars)
[ "Extract", "the", "complete", "set", "of", "data", "based", "on", "missingness", "over", "all", "for", "the", "current", "locus", "." ]
d68c9a083d443dfa5d7c5112de29010909cfe23f
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/standardizer.py#L65-L92
242,265
gambogi/CSHLDAP
CSHLDAP.py
CSHLDAP.member
def member(self, user, objects=False): """ Returns a user as a dict of attributes """ try: member = self.search(uid=user, objects=objects)[0] except IndexError: return None if objects: return member return member[1]
python
def member(self, user, objects=False): """ Returns a user as a dict of attributes """ try: member = self.search(uid=user, objects=objects)[0] except IndexError: return None if objects: return member return member[1]
[ "def", "member", "(", "self", ",", "user", ",", "objects", "=", "False", ")", ":", "try", ":", "member", "=", "self", ".", "search", "(", "uid", "=", "user", ",", "objects", "=", "objects", ")", "[", "0", "]", "except", "IndexError", ":", "return", "None", "if", "objects", ":", "return", "member", "return", "member", "[", "1", "]" ]
Returns a user as a dict of attributes
[ "Returns", "a", "user", "as", "a", "dict", "of", "attributes" ]
09cb754b1e72437834e0d8cb4c7ac1830cfa6829
https://github.com/gambogi/CSHLDAP/blob/09cb754b1e72437834e0d8cb4c7ac1830cfa6829/CSHLDAP.py#L49-L58
242,266
gambogi/CSHLDAP
CSHLDAP.py
CSHLDAP.drinkAdmins
def drinkAdmins(self, objects=False): """ Returns a list of drink admins uids """ admins = self.group('drink', objects=objects) return admins
python
def drinkAdmins(self, objects=False): """ Returns a list of drink admins uids """ admins = self.group('drink', objects=objects) return admins
[ "def", "drinkAdmins", "(", "self", ",", "objects", "=", "False", ")", ":", "admins", "=", "self", ".", "group", "(", "'drink'", ",", "objects", "=", "objects", ")", "return", "admins" ]
Returns a list of drink admins uids
[ "Returns", "a", "list", "of", "drink", "admins", "uids" ]
09cb754b1e72437834e0d8cb4c7ac1830cfa6829
https://github.com/gambogi/CSHLDAP/blob/09cb754b1e72437834e0d8cb4c7ac1830cfa6829/CSHLDAP.py#L99-L103
242,267
gambogi/CSHLDAP
CSHLDAP.py
Member.isBirthday
def isBirthday(self): """ Is it the user's birthday today? """ if not self.birthday: return False birthday = self.birthdate() today = date.today() return (birthday.month == today.month and birthday.day == today.day)
python
def isBirthday(self): """ Is it the user's birthday today? """ if not self.birthday: return False birthday = self.birthdate() today = date.today() return (birthday.month == today.month and birthday.day == today.day)
[ "def", "isBirthday", "(", "self", ")", ":", "if", "not", "self", ".", "birthday", ":", "return", "False", "birthday", "=", "self", ".", "birthdate", "(", ")", "today", "=", "date", ".", "today", "(", ")", "return", "(", "birthday", ".", "month", "==", "today", ".", "month", "and", "birthday", ".", "day", "==", "today", ".", "day", ")" ]
Is it the user's birthday today?
[ "Is", "it", "the", "user", "s", "birthday", "today?" ]
09cb754b1e72437834e0d8cb4c7ac1830cfa6829
https://github.com/gambogi/CSHLDAP/blob/09cb754b1e72437834e0d8cb4c7ac1830cfa6829/CSHLDAP.py#L263-L271
242,268
gambogi/CSHLDAP
CSHLDAP.py
Member.reload
def reload(self): """ If there is an LDAP connection, query it for another instance of this member and set its internal dictionary to that result. """ if not self.ldap: return self.memberDict = self.ldap.member(self.uid)
python
def reload(self): """ If there is an LDAP connection, query it for another instance of this member and set its internal dictionary to that result. """ if not self.ldap: return self.memberDict = self.ldap.member(self.uid)
[ "def", "reload", "(", "self", ")", ":", "if", "not", "self", ".", "ldap", ":", "return", "self", ".", "memberDict", "=", "self", ".", "ldap", ".", "member", "(", "self", ".", "uid", ")" ]
If there is an LDAP connection, query it for another instance of this member and set its internal dictionary to that result.
[ "If", "there", "is", "an", "LDAP", "connection", "query", "it", "for", "another", "instance", "of", "this", "member", "and", "set", "its", "internal", "dictionary", "to", "that", "result", "." ]
09cb754b1e72437834e0d8cb4c7ac1830cfa6829
https://github.com/gambogi/CSHLDAP/blob/09cb754b1e72437834e0d8cb4c7ac1830cfa6829/CSHLDAP.py#L305-L312
242,269
skitazaki/python-clitool
clitool/processor.py
Streamer.consume
def consume(self, stream, source=None, chunksize=1): """ Consuming given strem object and returns processing stats. :param stream: streaming object to consume :type stream: iterable :param source: source of stream to consume :type source: string :param chunksize: chunk size for multiprocessing :type chunksize: integer :rtype: dict """ stats = { PROCESSING_TOTAL: 0, PROCESSING_SKIPPED: 0, PROCESSING_SUCCESS: 0, PROCESSING_ERROR: 0 } if source: stats['source'] = source def skip_unless(r): if r: return r stats[PROCESSING_SKIPPED] += 1 stats[PROCESSING_TOTAL] += 1 rs = ifilter(skip_unless, stream) if self.processes: pool = multiprocessing.Pool(processes=self.processes) for f in self.procedures: rs = pool.imap_unordered(f, ifilter(skip_unless, rs), chunksize=chunksize) else: for f in self.procedures: rs = imap(f, ifilter(skip_unless, rs)) start = time.time() i = 0 try: while 1: processed = next(rs) if processed is None: stats[PROCESSING_SKIPPED] += 1 elif processed is False: stats[PROCESSING_ERROR] += 1 else: stats[PROCESSING_SUCCESS] += 1 self.collect(processed) i += 1 stats[PROCESSING_TOTAL] += 1 if i % self.reporting_interval == 0: logging.info(" ===> Processed %dth item <=== ", i) except StopIteration: pass except KeyboardInterrupt: logging.info("Stopped by user interruption at %dth item.", i) raise except: e = sys.exc_info()[1] logging.error(e) raise finally: if self.processes: pool.close() pool.join() stats[PROCESSING_TIME] = time.time() - start logging.info( 'STATS: total=%d, skipped=%d, success=%d, error=%d on %f[sec]' ' from "%s"', stats[PROCESSING_TOTAL], stats[PROCESSING_SKIPPED], stats[PROCESSING_SUCCESS], stats[PROCESSING_ERROR], stats[PROCESSING_TIME], stats.get('source', 'unknown')) return stats
python
def consume(self, stream, source=None, chunksize=1): """ Consuming given strem object and returns processing stats. :param stream: streaming object to consume :type stream: iterable :param source: source of stream to consume :type source: string :param chunksize: chunk size for multiprocessing :type chunksize: integer :rtype: dict """ stats = { PROCESSING_TOTAL: 0, PROCESSING_SKIPPED: 0, PROCESSING_SUCCESS: 0, PROCESSING_ERROR: 0 } if source: stats['source'] = source def skip_unless(r): if r: return r stats[PROCESSING_SKIPPED] += 1 stats[PROCESSING_TOTAL] += 1 rs = ifilter(skip_unless, stream) if self.processes: pool = multiprocessing.Pool(processes=self.processes) for f in self.procedures: rs = pool.imap_unordered(f, ifilter(skip_unless, rs), chunksize=chunksize) else: for f in self.procedures: rs = imap(f, ifilter(skip_unless, rs)) start = time.time() i = 0 try: while 1: processed = next(rs) if processed is None: stats[PROCESSING_SKIPPED] += 1 elif processed is False: stats[PROCESSING_ERROR] += 1 else: stats[PROCESSING_SUCCESS] += 1 self.collect(processed) i += 1 stats[PROCESSING_TOTAL] += 1 if i % self.reporting_interval == 0: logging.info(" ===> Processed %dth item <=== ", i) except StopIteration: pass except KeyboardInterrupt: logging.info("Stopped by user interruption at %dth item.", i) raise except: e = sys.exc_info()[1] logging.error(e) raise finally: if self.processes: pool.close() pool.join() stats[PROCESSING_TIME] = time.time() - start logging.info( 'STATS: total=%d, skipped=%d, success=%d, error=%d on %f[sec]' ' from "%s"', stats[PROCESSING_TOTAL], stats[PROCESSING_SKIPPED], stats[PROCESSING_SUCCESS], stats[PROCESSING_ERROR], stats[PROCESSING_TIME], stats.get('source', 'unknown')) return stats
[ "def", "consume", "(", "self", ",", "stream", ",", "source", "=", "None", ",", "chunksize", "=", "1", ")", ":", "stats", "=", "{", "PROCESSING_TOTAL", ":", "0", ",", "PROCESSING_SKIPPED", ":", "0", ",", "PROCESSING_SUCCESS", ":", "0", ",", "PROCESSING_ERROR", ":", "0", "}", "if", "source", ":", "stats", "[", "'source'", "]", "=", "source", "def", "skip_unless", "(", "r", ")", ":", "if", "r", ":", "return", "r", "stats", "[", "PROCESSING_SKIPPED", "]", "+=", "1", "stats", "[", "PROCESSING_TOTAL", "]", "+=", "1", "rs", "=", "ifilter", "(", "skip_unless", ",", "stream", ")", "if", "self", ".", "processes", ":", "pool", "=", "multiprocessing", ".", "Pool", "(", "processes", "=", "self", ".", "processes", ")", "for", "f", "in", "self", ".", "procedures", ":", "rs", "=", "pool", ".", "imap_unordered", "(", "f", ",", "ifilter", "(", "skip_unless", ",", "rs", ")", ",", "chunksize", "=", "chunksize", ")", "else", ":", "for", "f", "in", "self", ".", "procedures", ":", "rs", "=", "imap", "(", "f", ",", "ifilter", "(", "skip_unless", ",", "rs", ")", ")", "start", "=", "time", ".", "time", "(", ")", "i", "=", "0", "try", ":", "while", "1", ":", "processed", "=", "next", "(", "rs", ")", "if", "processed", "is", "None", ":", "stats", "[", "PROCESSING_SKIPPED", "]", "+=", "1", "elif", "processed", "is", "False", ":", "stats", "[", "PROCESSING_ERROR", "]", "+=", "1", "else", ":", "stats", "[", "PROCESSING_SUCCESS", "]", "+=", "1", "self", ".", "collect", "(", "processed", ")", "i", "+=", "1", "stats", "[", "PROCESSING_TOTAL", "]", "+=", "1", "if", "i", "%", "self", ".", "reporting_interval", "==", "0", ":", "logging", ".", "info", "(", "\" ===> Processed %dth item <=== \"", ",", "i", ")", "except", "StopIteration", ":", "pass", "except", "KeyboardInterrupt", ":", "logging", ".", "info", "(", "\"Stopped by user interruption at %dth item.\"", ",", "i", ")", "raise", "except", ":", "e", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "logging", ".", "error", "(", "e", ")", "raise", "finally", ":", "if", "self", ".", "processes", ":", "pool", ".", "close", "(", ")", "pool", ".", "join", "(", ")", "stats", "[", "PROCESSING_TIME", "]", "=", "time", ".", "time", "(", ")", "-", "start", "logging", ".", "info", "(", "'STATS: total=%d, skipped=%d, success=%d, error=%d on %f[sec]'", "' from \"%s\"'", ",", "stats", "[", "PROCESSING_TOTAL", "]", ",", "stats", "[", "PROCESSING_SKIPPED", "]", ",", "stats", "[", "PROCESSING_SUCCESS", "]", ",", "stats", "[", "PROCESSING_ERROR", "]", ",", "stats", "[", "PROCESSING_TIME", "]", ",", "stats", ".", "get", "(", "'source'", ",", "'unknown'", ")", ")", "return", "stats" ]
Consuming given strem object and returns processing stats. :param stream: streaming object to consume :type stream: iterable :param source: source of stream to consume :type source: string :param chunksize: chunk size for multiprocessing :type chunksize: integer :rtype: dict
[ "Consuming", "given", "strem", "object", "and", "returns", "processing", "stats", "." ]
4971f8d093d51c6fd0e6cc536bbb597f78b570ab
https://github.com/skitazaki/python-clitool/blob/4971f8d093d51c6fd0e6cc536bbb597f78b570ab/clitool/processor.py#L169-L240
242,270
skitazaki/python-clitool
clitool/processor.py
CliHandler.reader
def reader(self, fp, encoding): """ Simple `open` wrapper for several file types. This supports ``.gz`` and ``.json``. :param fp: opened file :type fp: file pointer :param encoding: encoding of opened file :type encoding: string :rtype: file pointer """ _, suffix = os.path.splitext(fp.name) if suffix == '.gz': fp.close() return gzip.open(fp.name) elif suffix == '.json': return json.load(fp) elif suffix == '.csv' or self.delimiter: return csvreader(fp, encoding, delimiter=self.delimiter or ',') elif suffix == '.tsv': return csvreader(fp, encoding, delimiter='\t') return fp
python
def reader(self, fp, encoding): """ Simple `open` wrapper for several file types. This supports ``.gz`` and ``.json``. :param fp: opened file :type fp: file pointer :param encoding: encoding of opened file :type encoding: string :rtype: file pointer """ _, suffix = os.path.splitext(fp.name) if suffix == '.gz': fp.close() return gzip.open(fp.name) elif suffix == '.json': return json.load(fp) elif suffix == '.csv' or self.delimiter: return csvreader(fp, encoding, delimiter=self.delimiter or ',') elif suffix == '.tsv': return csvreader(fp, encoding, delimiter='\t') return fp
[ "def", "reader", "(", "self", ",", "fp", ",", "encoding", ")", ":", "_", ",", "suffix", "=", "os", ".", "path", ".", "splitext", "(", "fp", ".", "name", ")", "if", "suffix", "==", "'.gz'", ":", "fp", ".", "close", "(", ")", "return", "gzip", ".", "open", "(", "fp", ".", "name", ")", "elif", "suffix", "==", "'.json'", ":", "return", "json", ".", "load", "(", "fp", ")", "elif", "suffix", "==", "'.csv'", "or", "self", ".", "delimiter", ":", "return", "csvreader", "(", "fp", ",", "encoding", ",", "delimiter", "=", "self", ".", "delimiter", "or", "','", ")", "elif", "suffix", "==", "'.tsv'", ":", "return", "csvreader", "(", "fp", ",", "encoding", ",", "delimiter", "=", "'\\t'", ")", "return", "fp" ]
Simple `open` wrapper for several file types. This supports ``.gz`` and ``.json``. :param fp: opened file :type fp: file pointer :param encoding: encoding of opened file :type encoding: string :rtype: file pointer
[ "Simple", "open", "wrapper", "for", "several", "file", "types", ".", "This", "supports", ".", "gz", "and", ".", "json", "." ]
4971f8d093d51c6fd0e6cc536bbb597f78b570ab
https://github.com/skitazaki/python-clitool/blob/4971f8d093d51c6fd0e6cc536bbb597f78b570ab/clitool/processor.py#L257-L277
242,271
skitazaki/python-clitool
clitool/processor.py
CliHandler.handle
def handle(self, files, encoding, chunksize=1): """ Handle given files with given encoding. :param files: opened files. :type files: list :param encoding: encoding of opened file :type encoding: string :param chunksize: a number of chunk :type chunksize: int :rtype: list """ stats = [] if files: logging.info("Input file count: %d", len(files)) for fp in files: stream = self.reader(fp, encoding) parsed = self.streamer.consume(stream, source=fp.name, chunksize=chunksize) stats.append(parsed) if not fp.closed: fp.close() else: stream = sys.stdin if self.delimiter: stream = csvreader(stream, encoding, delimiter=self.delimiter) parsed = self.streamer.consume(stream, chunksize=chunksize) stats.append(parsed) return stats
python
def handle(self, files, encoding, chunksize=1): """ Handle given files with given encoding. :param files: opened files. :type files: list :param encoding: encoding of opened file :type encoding: string :param chunksize: a number of chunk :type chunksize: int :rtype: list """ stats = [] if files: logging.info("Input file count: %d", len(files)) for fp in files: stream = self.reader(fp, encoding) parsed = self.streamer.consume(stream, source=fp.name, chunksize=chunksize) stats.append(parsed) if not fp.closed: fp.close() else: stream = sys.stdin if self.delimiter: stream = csvreader(stream, encoding, delimiter=self.delimiter) parsed = self.streamer.consume(stream, chunksize=chunksize) stats.append(parsed) return stats
[ "def", "handle", "(", "self", ",", "files", ",", "encoding", ",", "chunksize", "=", "1", ")", ":", "stats", "=", "[", "]", "if", "files", ":", "logging", ".", "info", "(", "\"Input file count: %d\"", ",", "len", "(", "files", ")", ")", "for", "fp", "in", "files", ":", "stream", "=", "self", ".", "reader", "(", "fp", ",", "encoding", ")", "parsed", "=", "self", ".", "streamer", ".", "consume", "(", "stream", ",", "source", "=", "fp", ".", "name", ",", "chunksize", "=", "chunksize", ")", "stats", ".", "append", "(", "parsed", ")", "if", "not", "fp", ".", "closed", ":", "fp", ".", "close", "(", ")", "else", ":", "stream", "=", "sys", ".", "stdin", "if", "self", ".", "delimiter", ":", "stream", "=", "csvreader", "(", "stream", ",", "encoding", ",", "delimiter", "=", "self", ".", "delimiter", ")", "parsed", "=", "self", ".", "streamer", ".", "consume", "(", "stream", ",", "chunksize", "=", "chunksize", ")", "stats", ".", "append", "(", "parsed", ")", "return", "stats" ]
Handle given files with given encoding. :param files: opened files. :type files: list :param encoding: encoding of opened file :type encoding: string :param chunksize: a number of chunk :type chunksize: int :rtype: list
[ "Handle", "given", "files", "with", "given", "encoding", "." ]
4971f8d093d51c6fd0e6cc536bbb597f78b570ab
https://github.com/skitazaki/python-clitool/blob/4971f8d093d51c6fd0e6cc536bbb597f78b570ab/clitool/processor.py#L279-L306
242,272
vivainio/argp
argp.py
init
def init(parser = None): """ module needs to be initialized by 'init'. Can be called with parser to use a pre-built parser, otherwise a simple default parser is created """ global p,subparsers if parser is None: p = argparse.ArgumentParser() else: p = parser arg = p.add_argument subparsers = p.add_subparsers()
python
def init(parser = None): """ module needs to be initialized by 'init'. Can be called with parser to use a pre-built parser, otherwise a simple default parser is created """ global p,subparsers if parser is None: p = argparse.ArgumentParser() else: p = parser arg = p.add_argument subparsers = p.add_subparsers()
[ "def", "init", "(", "parser", "=", "None", ")", ":", "global", "p", ",", "subparsers", "if", "parser", "is", "None", ":", "p", "=", "argparse", ".", "ArgumentParser", "(", ")", "else", ":", "p", "=", "parser", "arg", "=", "p", ".", "add_argument", "subparsers", "=", "p", ".", "add_subparsers", "(", ")" ]
module needs to be initialized by 'init'. Can be called with parser to use a pre-built parser, otherwise a simple default parser is created
[ "module", "needs", "to", "be", "initialized", "by", "init", "." ]
ad90edbecd5f84d7dce09a2bd98e1e9d98140f1e
https://github.com/vivainio/argp/blob/ad90edbecd5f84d7dce09a2bd98e1e9d98140f1e/argp.py#L21-L36
242,273
ncc-tools/python-pa-api
paapi/paapi.py
PaAuth.authenticate
def authenticate(self): """ Authenticates with the PA Oauth system """ if self._auth_token is None or self._token_expiry < time.time(): self._perform_auth() yield self._auth_token
python
def authenticate(self): """ Authenticates with the PA Oauth system """ if self._auth_token is None or self._token_expiry < time.time(): self._perform_auth() yield self._auth_token
[ "def", "authenticate", "(", "self", ")", ":", "if", "self", ".", "_auth_token", "is", "None", "or", "self", ".", "_token_expiry", "<", "time", ".", "time", "(", ")", ":", "self", ".", "_perform_auth", "(", ")", "yield", "self", ".", "_auth_token" ]
Authenticates with the PA Oauth system
[ "Authenticates", "with", "the", "PA", "Oauth", "system" ]
a27481dd323d282d0f4457586198d9faec896f11
https://github.com/ncc-tools/python-pa-api/blob/a27481dd323d282d0f4457586198d9faec896f11/paapi/paapi.py#L65-L72
242,274
ncc-tools/python-pa-api
paapi/paapi.py
PaApi._query_api
def _query_api(self, method, url, fields=None, extra_headers=None, req_body=None): """ Abstracts http queries to the API """ with self.auth.authenticate() as token: logging.debug('PA Authentication returned token %s', token) headers = { 'Authorization': 'Bearer %s' % (token,), 'Realm': self.auth_realm } if extra_headers is not None: headers.update(extra_headers) logging.info('[%s] %s', method, url) if req_body is not None: response = self.http.request(method, url, fields, headers, body=req_body) else: response = self.http.request(method, url, fields, headers) if response.status != 200: print(response.data) logging.warning('Got non-200 HTTP status from API: %d', response.status) raise ApiQueryError("Failed to get API data", response.status) return json.loads(response.data.decode())
python
def _query_api(self, method, url, fields=None, extra_headers=None, req_body=None): """ Abstracts http queries to the API """ with self.auth.authenticate() as token: logging.debug('PA Authentication returned token %s', token) headers = { 'Authorization': 'Bearer %s' % (token,), 'Realm': self.auth_realm } if extra_headers is not None: headers.update(extra_headers) logging.info('[%s] %s', method, url) if req_body is not None: response = self.http.request(method, url, fields, headers, body=req_body) else: response = self.http.request(method, url, fields, headers) if response.status != 200: print(response.data) logging.warning('Got non-200 HTTP status from API: %d', response.status) raise ApiQueryError("Failed to get API data", response.status) return json.loads(response.data.decode())
[ "def", "_query_api", "(", "self", ",", "method", ",", "url", ",", "fields", "=", "None", ",", "extra_headers", "=", "None", ",", "req_body", "=", "None", ")", ":", "with", "self", ".", "auth", ".", "authenticate", "(", ")", "as", "token", ":", "logging", ".", "debug", "(", "'PA Authentication returned token %s'", ",", "token", ")", "headers", "=", "{", "'Authorization'", ":", "'Bearer %s'", "%", "(", "token", ",", ")", ",", "'Realm'", ":", "self", ".", "auth_realm", "}", "if", "extra_headers", "is", "not", "None", ":", "headers", ".", "update", "(", "extra_headers", ")", "logging", ".", "info", "(", "'[%s] %s'", ",", "method", ",", "url", ")", "if", "req_body", "is", "not", "None", ":", "response", "=", "self", ".", "http", ".", "request", "(", "method", ",", "url", ",", "fields", ",", "headers", ",", "body", "=", "req_body", ")", "else", ":", "response", "=", "self", ".", "http", ".", "request", "(", "method", ",", "url", ",", "fields", ",", "headers", ")", "if", "response", ".", "status", "!=", "200", ":", "print", "(", "response", ".", "data", ")", "logging", ".", "warning", "(", "'Got non-200 HTTP status from API: %d'", ",", "response", ".", "status", ")", "raise", "ApiQueryError", "(", "\"Failed to get API data\"", ",", "response", ".", "status", ")", "return", "json", ".", "loads", "(", "response", ".", "data", ".", "decode", "(", ")", ")" ]
Abstracts http queries to the API
[ "Abstracts", "http", "queries", "to", "the", "API" ]
a27481dd323d282d0f4457586198d9faec896f11
https://github.com/ncc-tools/python-pa-api/blob/a27481dd323d282d0f4457586198d9faec896f11/paapi/paapi.py#L121-L143
242,275
ncc-tools/python-pa-api
paapi/paapi.py
PaApi.get_all_jobtemplates
def get_all_jobtemplates(self): """ Retrieves the list of jobTemplates for the current realm. """ endpoint = self._build_url('jobTemplates', { 'paginationPageSize': self.PAGE_SIZE }) data = self._query_api('GET', endpoint) return data['results']
python
def get_all_jobtemplates(self): """ Retrieves the list of jobTemplates for the current realm. """ endpoint = self._build_url('jobTemplates', { 'paginationPageSize': self.PAGE_SIZE }) data = self._query_api('GET', endpoint) return data['results']
[ "def", "get_all_jobtemplates", "(", "self", ")", ":", "endpoint", "=", "self", ".", "_build_url", "(", "'jobTemplates'", ",", "{", "'paginationPageSize'", ":", "self", ".", "PAGE_SIZE", "}", ")", "data", "=", "self", ".", "_query_api", "(", "'GET'", ",", "endpoint", ")", "return", "data", "[", "'results'", "]" ]
Retrieves the list of jobTemplates for the current realm.
[ "Retrieves", "the", "list", "of", "jobTemplates", "for", "the", "current", "realm", "." ]
a27481dd323d282d0f4457586198d9faec896f11
https://github.com/ncc-tools/python-pa-api/blob/a27481dd323d282d0f4457586198d9faec896f11/paapi/paapi.py#L151-L159
242,276
ncc-tools/python-pa-api
paapi/paapi.py
PaApi.create_job_template
def create_job_template(self, template): """ Creates a job template """ endpoint = self._build_url('jobTemplates') data = self._query_api('POST', endpoint, None, {'Content-Type': 'application/json'}, json.dumps(template)) return data['results']
python
def create_job_template(self, template): """ Creates a job template """ endpoint = self._build_url('jobTemplates') data = self._query_api('POST', endpoint, None, {'Content-Type': 'application/json'}, json.dumps(template)) return data['results']
[ "def", "create_job_template", "(", "self", ",", "template", ")", ":", "endpoint", "=", "self", ".", "_build_url", "(", "'jobTemplates'", ")", "data", "=", "self", ".", "_query_api", "(", "'POST'", ",", "endpoint", ",", "None", ",", "{", "'Content-Type'", ":", "'application/json'", "}", ",", "json", ".", "dumps", "(", "template", ")", ")", "return", "data", "[", "'results'", "]" ]
Creates a job template
[ "Creates", "a", "job", "template" ]
a27481dd323d282d0f4457586198d9faec896f11
https://github.com/ncc-tools/python-pa-api/blob/a27481dd323d282d0f4457586198d9faec896f11/paapi/paapi.py#L182-L192
242,277
ncc-tools/python-pa-api
paapi/paapi.py
PaApi.create_job
def create_job(self, job_template_uri): """ Creates a job """ endpoint = self._build_url('jobs') data = self._query_api('POST', endpoint, None, {'Content-Type': 'application/json'}, json.dumps({'jobTemplateUri': job_template_uri})) return data['results']
python
def create_job(self, job_template_uri): """ Creates a job """ endpoint = self._build_url('jobs') data = self._query_api('POST', endpoint, None, {'Content-Type': 'application/json'}, json.dumps({'jobTemplateUri': job_template_uri})) return data['results']
[ "def", "create_job", "(", "self", ",", "job_template_uri", ")", ":", "endpoint", "=", "self", ".", "_build_url", "(", "'jobs'", ")", "data", "=", "self", ".", "_query_api", "(", "'POST'", ",", "endpoint", ",", "None", ",", "{", "'Content-Type'", ":", "'application/json'", "}", ",", "json", ".", "dumps", "(", "{", "'jobTemplateUri'", ":", "job_template_uri", "}", ")", ")", "return", "data", "[", "'results'", "]" ]
Creates a job
[ "Creates", "a", "job" ]
a27481dd323d282d0f4457586198d9faec896f11
https://github.com/ncc-tools/python-pa-api/blob/a27481dd323d282d0f4457586198d9faec896f11/paapi/paapi.py#L193-L203
242,278
wdbm/datavision
datavision.py
normalize_to_range
def normalize_to_range( values, minimum = 0.0, maximum = 1.0 ): """ This function normalizes values of a list to a specified range and returns the original object if the values are not of the types integer or float. """ normalized_values = [] minimum_value = min(values) maximum_value = max(values) for value in values: numerator = value - minimum_value denominator = maximum_value - minimum_value value_normalized = (maximum - minimum) * numerator/denominator + minimum normalized_values.append(value_normalized) return normalized_values
python
def normalize_to_range( values, minimum = 0.0, maximum = 1.0 ): """ This function normalizes values of a list to a specified range and returns the original object if the values are not of the types integer or float. """ normalized_values = [] minimum_value = min(values) maximum_value = max(values) for value in values: numerator = value - minimum_value denominator = maximum_value - minimum_value value_normalized = (maximum - minimum) * numerator/denominator + minimum normalized_values.append(value_normalized) return normalized_values
[ "def", "normalize_to_range", "(", "values", ",", "minimum", "=", "0.0", ",", "maximum", "=", "1.0", ")", ":", "normalized_values", "=", "[", "]", "minimum_value", "=", "min", "(", "values", ")", "maximum_value", "=", "max", "(", "values", ")", "for", "value", "in", "values", ":", "numerator", "=", "value", "-", "minimum_value", "denominator", "=", "maximum_value", "-", "minimum_value", "value_normalized", "=", "(", "maximum", "-", "minimum", ")", "*", "numerator", "/", "denominator", "+", "minimum", "normalized_values", ".", "append", "(", "value_normalized", ")", "return", "normalized_values" ]
This function normalizes values of a list to a specified range and returns the original object if the values are not of the types integer or float.
[ "This", "function", "normalizes", "values", "of", "a", "list", "to", "a", "specified", "range", "and", "returns", "the", "original", "object", "if", "the", "values", "are", "not", "of", "the", "types", "integer", "or", "float", "." ]
b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5
https://github.com/wdbm/datavision/blob/b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5/datavision.py#L1206-L1225
242,279
wdbm/datavision
datavision.py
list_element_combinations_variadic
def list_element_combinations_variadic( elements_specification ): """ This function accepts a specification of lists of elements for each place in lists in the form of a list, the elements of which are lists of possible elements and returns a list of lists corresponding to the combinations of elements of the specification with varying numbers of elements. For example, the list elements specification [[10, 20], [30, 40], [50, 60]] yields the following lists: [10] [20] [10, 30] [10, 40] [20, 30] [20, 40] [10, 30, 50] [10, 30, 60] [10, 40, 50] [10, 40, 60] [20, 30, 50] [20, 30, 60] [20, 40, 50] [20, 40, 60] """ lists = [list(list_generated) for index, element_specification in enumerate(elements_specification) for list_generated in itertools.product(*elements_specification[:index + 1])] return lists
python
def list_element_combinations_variadic( elements_specification ): """ This function accepts a specification of lists of elements for each place in lists in the form of a list, the elements of which are lists of possible elements and returns a list of lists corresponding to the combinations of elements of the specification with varying numbers of elements. For example, the list elements specification [[10, 20], [30, 40], [50, 60]] yields the following lists: [10] [20] [10, 30] [10, 40] [20, 30] [20, 40] [10, 30, 50] [10, 30, 60] [10, 40, 50] [10, 40, 60] [20, 30, 50] [20, 30, 60] [20, 40, 50] [20, 40, 60] """ lists = [list(list_generated) for index, element_specification in enumerate(elements_specification) for list_generated in itertools.product(*elements_specification[:index + 1])] return lists
[ "def", "list_element_combinations_variadic", "(", "elements_specification", ")", ":", "lists", "=", "[", "list", "(", "list_generated", ")", "for", "index", ",", "element_specification", "in", "enumerate", "(", "elements_specification", ")", "for", "list_generated", "in", "itertools", ".", "product", "(", "*", "elements_specification", "[", ":", "index", "+", "1", "]", ")", "]", "return", "lists" ]
This function accepts a specification of lists of elements for each place in lists in the form of a list, the elements of which are lists of possible elements and returns a list of lists corresponding to the combinations of elements of the specification with varying numbers of elements. For example, the list elements specification [[10, 20], [30, 40], [50, 60]] yields the following lists: [10] [20] [10, 30] [10, 40] [20, 30] [20, 40] [10, 30, 50] [10, 30, 60] [10, 40, 50] [10, 40, 60] [20, 30, 50] [20, 30, 60] [20, 40, 50] [20, 40, 60]
[ "This", "function", "accepts", "a", "specification", "of", "lists", "of", "elements", "for", "each", "place", "in", "lists", "in", "the", "form", "of", "a", "list", "the", "elements", "of", "which", "are", "lists", "of", "possible", "elements", "and", "returns", "a", "list", "of", "lists", "corresponding", "to", "the", "combinations", "of", "elements", "of", "the", "specification", "with", "varying", "numbers", "of", "elements", "." ]
b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5
https://github.com/wdbm/datavision/blob/b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5/datavision.py#L1263-L1293
242,280
wdbm/datavision
datavision.py
correlation_linear
def correlation_linear( values_1, values_2, printout = None ): """ This function calculates the Pearson product-moment correlation coefficient. This is a measure of the linear collelation of two variables. The value can be between +1 and -1 inclusive, where 1 is total positive correlation, 0 is no correlation and -1 is total negative correlation. It is a measure of the linear dependence between two variables. This function also calculates the significance (2-tailed p-value) of the correlation coefficient given the sample size. """ r, p_value = scipy.stats.pearsonr(values_1, values_2) if printout is not True: return r, p_value else: text = ( "Pearson linear correlation coefficient: {r}\n" "2-tailed p-value: {p_value}" ).format( r = r, p_value = p_value ) return text
python
def correlation_linear( values_1, values_2, printout = None ): """ This function calculates the Pearson product-moment correlation coefficient. This is a measure of the linear collelation of two variables. The value can be between +1 and -1 inclusive, where 1 is total positive correlation, 0 is no correlation and -1 is total negative correlation. It is a measure of the linear dependence between two variables. This function also calculates the significance (2-tailed p-value) of the correlation coefficient given the sample size. """ r, p_value = scipy.stats.pearsonr(values_1, values_2) if printout is not True: return r, p_value else: text = ( "Pearson linear correlation coefficient: {r}\n" "2-tailed p-value: {p_value}" ).format( r = r, p_value = p_value ) return text
[ "def", "correlation_linear", "(", "values_1", ",", "values_2", ",", "printout", "=", "None", ")", ":", "r", ",", "p_value", "=", "scipy", ".", "stats", ".", "pearsonr", "(", "values_1", ",", "values_2", ")", "if", "printout", "is", "not", "True", ":", "return", "r", ",", "p_value", "else", ":", "text", "=", "(", "\"Pearson linear correlation coefficient: {r}\\n\"", "\"2-tailed p-value: {p_value}\"", ")", ".", "format", "(", "r", "=", "r", ",", "p_value", "=", "p_value", ")", "return", "text" ]
This function calculates the Pearson product-moment correlation coefficient. This is a measure of the linear collelation of two variables. The value can be between +1 and -1 inclusive, where 1 is total positive correlation, 0 is no correlation and -1 is total negative correlation. It is a measure of the linear dependence between two variables. This function also calculates the significance (2-tailed p-value) of the correlation coefficient given the sample size.
[ "This", "function", "calculates", "the", "Pearson", "product", "-", "moment", "correlation", "coefficient", ".", "This", "is", "a", "measure", "of", "the", "linear", "collelation", "of", "two", "variables", ".", "The", "value", "can", "be", "between", "+", "1", "and", "-", "1", "inclusive", "where", "1", "is", "total", "positive", "correlation", "0", "is", "no", "correlation", "and", "-", "1", "is", "total", "negative", "correlation", ".", "It", "is", "a", "measure", "of", "the", "linear", "dependence", "between", "two", "variables", "." ]
b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5
https://github.com/wdbm/datavision/blob/b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5/datavision.py#L1295-L1323
242,281
wdbm/datavision
datavision.py
propose_number_of_bins
def propose_number_of_bins( values, binning_logic_system = None, ): """ This function returns a proposal for binning for a histogram of a specified list using an optional specified binning logic system. Freedman-Diaconis: bin width is proportional to the interquartile range of the data divided by the cube root of the size of the data Scott: bin width is proportional to the standard deviation of the values divided by the cube root of the size of the data """ # Set the default binning logic system. if binning_logic_system is None: binning_logic_system = "Scott" # Engage the requested logic system. if binning_logic_system == "Freedman-Diaconis": #log.debug("engage Freedman-Diaconis binning logic") bin_size =\ 2 * interquartile_range(values) * \ len(values) ** (-1/3) elif binning_logic_system == "Scott": #log.debug("engage Scott binning logic") bin_size =\ 3.5 * standard_deviation(values) * \ len(values) ** (-1/3) else: log.error("undefined binning logic system requested") raise(ValueError) number_of_bins = (max(values) - min(values)) / bin_size if numpy.isinf(number_of_bins) or numpy.isnan(number_of_bins): number_of_bins = len(set(values)) # number of unique values #log.debug( # "binning algorithms ineffective -- " + # "propose binning by unique values" #) return int(round(number_of_bins))
python
def propose_number_of_bins( values, binning_logic_system = None, ): """ This function returns a proposal for binning for a histogram of a specified list using an optional specified binning logic system. Freedman-Diaconis: bin width is proportional to the interquartile range of the data divided by the cube root of the size of the data Scott: bin width is proportional to the standard deviation of the values divided by the cube root of the size of the data """ # Set the default binning logic system. if binning_logic_system is None: binning_logic_system = "Scott" # Engage the requested logic system. if binning_logic_system == "Freedman-Diaconis": #log.debug("engage Freedman-Diaconis binning logic") bin_size =\ 2 * interquartile_range(values) * \ len(values) ** (-1/3) elif binning_logic_system == "Scott": #log.debug("engage Scott binning logic") bin_size =\ 3.5 * standard_deviation(values) * \ len(values) ** (-1/3) else: log.error("undefined binning logic system requested") raise(ValueError) number_of_bins = (max(values) - min(values)) / bin_size if numpy.isinf(number_of_bins) or numpy.isnan(number_of_bins): number_of_bins = len(set(values)) # number of unique values #log.debug( # "binning algorithms ineffective -- " + # "propose binning by unique values" #) return int(round(number_of_bins))
[ "def", "propose_number_of_bins", "(", "values", ",", "binning_logic_system", "=", "None", ",", ")", ":", "# Set the default binning logic system.", "if", "binning_logic_system", "is", "None", ":", "binning_logic_system", "=", "\"Scott\"", "# Engage the requested logic system.", "if", "binning_logic_system", "==", "\"Freedman-Diaconis\"", ":", "#log.debug(\"engage Freedman-Diaconis binning logic\")", "bin_size", "=", "2", "*", "interquartile_range", "(", "values", ")", "*", "len", "(", "values", ")", "**", "(", "-", "1", "/", "3", ")", "elif", "binning_logic_system", "==", "\"Scott\"", ":", "#log.debug(\"engage Scott binning logic\")", "bin_size", "=", "3.5", "*", "standard_deviation", "(", "values", ")", "*", "len", "(", "values", ")", "**", "(", "-", "1", "/", "3", ")", "else", ":", "log", ".", "error", "(", "\"undefined binning logic system requested\"", ")", "raise", "(", "ValueError", ")", "number_of_bins", "=", "(", "max", "(", "values", ")", "-", "min", "(", "values", ")", ")", "/", "bin_size", "if", "numpy", ".", "isinf", "(", "number_of_bins", ")", "or", "numpy", ".", "isnan", "(", "number_of_bins", ")", ":", "number_of_bins", "=", "len", "(", "set", "(", "values", ")", ")", "# number of unique values", "#log.debug(", "# \"binning algorithms ineffective -- \" +", "# \"propose binning by unique values\"", "#)", "return", "int", "(", "round", "(", "number_of_bins", ")", ")" ]
This function returns a proposal for binning for a histogram of a specified list using an optional specified binning logic system. Freedman-Diaconis: bin width is proportional to the interquartile range of the data divided by the cube root of the size of the data Scott: bin width is proportional to the standard deviation of the values divided by the cube root of the size of the data
[ "This", "function", "returns", "a", "proposal", "for", "binning", "for", "a", "histogram", "of", "a", "specified", "list", "using", "an", "optional", "specified", "binning", "logic", "system", "." ]
b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5
https://github.com/wdbm/datavision/blob/b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5/datavision.py#L1401-L1442
242,282
wdbm/datavision
datavision.py
TTYFigureData.extent
def extent(self): """ return range of 2D data """ return [min(self.x), max(self.x), min(self.y), max(self.y)]
python
def extent(self): """ return range of 2D data """ return [min(self.x), max(self.x), min(self.y), max(self.y)]
[ "def", "extent", "(", "self", ")", ":", "return", "[", "min", "(", "self", ".", "x", ")", ",", "max", "(", "self", ".", "x", ")", ",", "min", "(", "self", ".", "y", ")", ",", "max", "(", "self", ".", "y", ")", "]" ]
return range of 2D data
[ "return", "range", "of", "2D", "data" ]
b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5
https://github.com/wdbm/datavision/blob/b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5/datavision.py#L2030-L2036
242,283
wdbm/datavision
datavision.py
TTYFigure._get_symbol_by_slope
def _get_symbol_by_slope( self, slope, default_symbol ): """ return line oriented approximatively along the slope value """ if slope > math.tan(3 * math.pi / 8): draw_symbol = "|" elif math.tan(math.pi / 8) < slope < math.tan(3 * math.pi / 8): draw_symbol = u"\u27cb" # "/" elif abs(slope) < math.tan(math.pi / 8): draw_symbol = "-" elif slope < math.tan(-math.pi / 8) and\ slope > math.tan(-3 * math.pi / 8): draw_symbol = u"\u27CD" # "\\" elif slope < math.tan(-3 * math.pi / 8): draw_symbol = "|" else: draw_symbol = default_symbol return draw_symbol
python
def _get_symbol_by_slope( self, slope, default_symbol ): """ return line oriented approximatively along the slope value """ if slope > math.tan(3 * math.pi / 8): draw_symbol = "|" elif math.tan(math.pi / 8) < slope < math.tan(3 * math.pi / 8): draw_symbol = u"\u27cb" # "/" elif abs(slope) < math.tan(math.pi / 8): draw_symbol = "-" elif slope < math.tan(-math.pi / 8) and\ slope > math.tan(-3 * math.pi / 8): draw_symbol = u"\u27CD" # "\\" elif slope < math.tan(-3 * math.pi / 8): draw_symbol = "|" else: draw_symbol = default_symbol return draw_symbol
[ "def", "_get_symbol_by_slope", "(", "self", ",", "slope", ",", "default_symbol", ")", ":", "if", "slope", ">", "math", ".", "tan", "(", "3", "*", "math", ".", "pi", "/", "8", ")", ":", "draw_symbol", "=", "\"|\"", "elif", "math", ".", "tan", "(", "math", ".", "pi", "/", "8", ")", "<", "slope", "<", "math", ".", "tan", "(", "3", "*", "math", ".", "pi", "/", "8", ")", ":", "draw_symbol", "=", "u\"\\u27cb\"", "# \"/\"", "elif", "abs", "(", "slope", ")", "<", "math", ".", "tan", "(", "math", ".", "pi", "/", "8", ")", ":", "draw_symbol", "=", "\"-\"", "elif", "slope", "<", "math", ".", "tan", "(", "-", "math", ".", "pi", "/", "8", ")", "and", "slope", ">", "math", ".", "tan", "(", "-", "3", "*", "math", ".", "pi", "/", "8", ")", ":", "draw_symbol", "=", "u\"\\u27CD\"", "# \"\\\\\"", "elif", "slope", "<", "math", ".", "tan", "(", "-", "3", "*", "math", ".", "pi", "/", "8", ")", ":", "draw_symbol", "=", "\"|\"", "else", ":", "draw_symbol", "=", "default_symbol", "return", "draw_symbol" ]
return line oriented approximatively along the slope value
[ "return", "line", "oriented", "approximatively", "along", "the", "slope", "value" ]
b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5
https://github.com/wdbm/datavision/blob/b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5/datavision.py#L2124-L2147
242,284
wdbm/datavision
datavision.py
TTYCanvas.limit_x
def limit_x( self, limit_lower = None, # float limit_upper = None # float ): """ get or set x limits of the current axes x_min, x_max = limit_x() # return the current limit_x limit_x(x_min, x_max) # set the limit_x to x_min, x_max """ if limit_lower is None and limit_upper is None: return self._limit_x elif hasattr(limit_lower, "__iter__"): self._limit_x = limit_lower[:2] else: self._limit_x = [limit_lower, limit_upper] if self._limit_x[0] == self._limit_x[1]: self._limit_x[1] += 1 self._limit_x[0] -= self.mod_x self._limit_x[1] += self.mod_x
python
def limit_x( self, limit_lower = None, # float limit_upper = None # float ): """ get or set x limits of the current axes x_min, x_max = limit_x() # return the current limit_x limit_x(x_min, x_max) # set the limit_x to x_min, x_max """ if limit_lower is None and limit_upper is None: return self._limit_x elif hasattr(limit_lower, "__iter__"): self._limit_x = limit_lower[:2] else: self._limit_x = [limit_lower, limit_upper] if self._limit_x[0] == self._limit_x[1]: self._limit_x[1] += 1 self._limit_x[0] -= self.mod_x self._limit_x[1] += self.mod_x
[ "def", "limit_x", "(", "self", ",", "limit_lower", "=", "None", ",", "# float", "limit_upper", "=", "None", "# float", ")", ":", "if", "limit_lower", "is", "None", "and", "limit_upper", "is", "None", ":", "return", "self", ".", "_limit_x", "elif", "hasattr", "(", "limit_lower", ",", "\"__iter__\"", ")", ":", "self", ".", "_limit_x", "=", "limit_lower", "[", ":", "2", "]", "else", ":", "self", ".", "_limit_x", "=", "[", "limit_lower", ",", "limit_upper", "]", "if", "self", ".", "_limit_x", "[", "0", "]", "==", "self", ".", "_limit_x", "[", "1", "]", ":", "self", ".", "_limit_x", "[", "1", "]", "+=", "1", "self", ".", "_limit_x", "[", "0", "]", "-=", "self", ".", "mod_x", "self", ".", "_limit_x", "[", "1", "]", "+=", "self", ".", "mod_x" ]
get or set x limits of the current axes x_min, x_max = limit_x() # return the current limit_x limit_x(x_min, x_max) # set the limit_x to x_min, x_max
[ "get", "or", "set", "x", "limits", "of", "the", "current", "axes" ]
b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5
https://github.com/wdbm/datavision/blob/b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5/datavision.py#L2538-L2560
242,285
wdbm/datavision
datavision.py
TTYCanvas.limit_y
def limit_y( self, limit_lower = None, limit_upper = None ): """ get or set y limits of the current axes y_min, y_max = limit_x() # return the current limit_y limit_y(y_min, y_max) # set the limit_y to y_min, y_max """ if limit_lower is None and limit_upper is None: return self._limit_y elif hasattr(limit_lower, "__iter__"): self._limit_y = limit_lower[:2] else: self._limit_y = [limit_lower, limit_upper] if self._limit_y[0] == self._limit_y[1]: self._limit_y[1] += 1 self._limit_y[0] -= self.mod_y self._limit_y[1] += self.mod_y
python
def limit_y( self, limit_lower = None, limit_upper = None ): """ get or set y limits of the current axes y_min, y_max = limit_x() # return the current limit_y limit_y(y_min, y_max) # set the limit_y to y_min, y_max """ if limit_lower is None and limit_upper is None: return self._limit_y elif hasattr(limit_lower, "__iter__"): self._limit_y = limit_lower[:2] else: self._limit_y = [limit_lower, limit_upper] if self._limit_y[0] == self._limit_y[1]: self._limit_y[1] += 1 self._limit_y[0] -= self.mod_y self._limit_y[1] += self.mod_y
[ "def", "limit_y", "(", "self", ",", "limit_lower", "=", "None", ",", "limit_upper", "=", "None", ")", ":", "if", "limit_lower", "is", "None", "and", "limit_upper", "is", "None", ":", "return", "self", ".", "_limit_y", "elif", "hasattr", "(", "limit_lower", ",", "\"__iter__\"", ")", ":", "self", ".", "_limit_y", "=", "limit_lower", "[", ":", "2", "]", "else", ":", "self", ".", "_limit_y", "=", "[", "limit_lower", ",", "limit_upper", "]", "if", "self", ".", "_limit_y", "[", "0", "]", "==", "self", ".", "_limit_y", "[", "1", "]", ":", "self", ".", "_limit_y", "[", "1", "]", "+=", "1", "self", ".", "_limit_y", "[", "0", "]", "-=", "self", ".", "mod_y", "self", ".", "_limit_y", "[", "1", "]", "+=", "self", ".", "mod_y" ]
get or set y limits of the current axes y_min, y_max = limit_x() # return the current limit_y limit_y(y_min, y_max) # set the limit_y to y_min, y_max
[ "get", "or", "set", "y", "limits", "of", "the", "current", "axes" ]
b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5
https://github.com/wdbm/datavision/blob/b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5/datavision.py#L2562-L2584
242,286
wdbm/datavision
datavision.py
TTYCanvas._clip_line
def _clip_line( self, line_pt_1, line_pt_2 ): """ clip line to canvas """ x_min = min(line_pt_1[0], line_pt_2[0]) x_max = max(line_pt_1[0], line_pt_2[0]) y_min = min(line_pt_1[1], line_pt_2[1]) y_max = max(line_pt_1[1], line_pt_2[1]) extent = self.extent() if line_pt_1[0] == line_pt_2[0]: return ( (line_pt_1[0], max(y_min, extent[1])), (line_pt_1[0], min(y_max, extent[3])) ) if line_pt_1[1] == line_pt_2[1]: return ( (max(x_min, extent[0]), line_pt_1[1]), (min(x_max, extent[2]), line_pt_1[1]) ) if ((extent[0] <= line_pt_1[0] < extent[2]) and (extent[1] <= line_pt_1[1] < extent[3]) and (extent[0] <= line_pt_2[0] < extent[2]) and (extent[1] <= line_pt_2[1] < extent[3])): return line_pt_1, line_pt_2 ts = [0.0, 1.0, float(extent[0] - line_pt_1[0]) / (line_pt_2[0] - line_pt_1[0]), float(extent[2] - line_pt_1[0]) / (line_pt_2[0] - line_pt_1[0]), float(extent[1] - line_pt_1[1]) / (line_pt_2[1] - line_pt_1[1]), float(extent[3] - line_pt_1[1]) / (line_pt_2[1] - line_pt_1[1]) ] ts.sort() if (ts[2] < 0) or (ts[2] >= 1) or (ts[3] < 0) or (ts[2] >= 1): return None result =\ [(pt_1 + t * (pt_2 - pt_1))\ for t in (ts[2], ts[3])\ for (pt_1, pt_2) in zip(line_pt_1, line_pt_2)] return (result[:2], result[2:])
python
def _clip_line( self, line_pt_1, line_pt_2 ): """ clip line to canvas """ x_min = min(line_pt_1[0], line_pt_2[0]) x_max = max(line_pt_1[0], line_pt_2[0]) y_min = min(line_pt_1[1], line_pt_2[1]) y_max = max(line_pt_1[1], line_pt_2[1]) extent = self.extent() if line_pt_1[0] == line_pt_2[0]: return ( (line_pt_1[0], max(y_min, extent[1])), (line_pt_1[0], min(y_max, extent[3])) ) if line_pt_1[1] == line_pt_2[1]: return ( (max(x_min, extent[0]), line_pt_1[1]), (min(x_max, extent[2]), line_pt_1[1]) ) if ((extent[0] <= line_pt_1[0] < extent[2]) and (extent[1] <= line_pt_1[1] < extent[3]) and (extent[0] <= line_pt_2[0] < extent[2]) and (extent[1] <= line_pt_2[1] < extent[3])): return line_pt_1, line_pt_2 ts = [0.0, 1.0, float(extent[0] - line_pt_1[0]) / (line_pt_2[0] - line_pt_1[0]), float(extent[2] - line_pt_1[0]) / (line_pt_2[0] - line_pt_1[0]), float(extent[1] - line_pt_1[1]) / (line_pt_2[1] - line_pt_1[1]), float(extent[3] - line_pt_1[1]) / (line_pt_2[1] - line_pt_1[1]) ] ts.sort() if (ts[2] < 0) or (ts[2] >= 1) or (ts[3] < 0) or (ts[2] >= 1): return None result =\ [(pt_1 + t * (pt_2 - pt_1))\ for t in (ts[2], ts[3])\ for (pt_1, pt_2) in zip(line_pt_1, line_pt_2)] return (result[:2], result[2:])
[ "def", "_clip_line", "(", "self", ",", "line_pt_1", ",", "line_pt_2", ")", ":", "x_min", "=", "min", "(", "line_pt_1", "[", "0", "]", ",", "line_pt_2", "[", "0", "]", ")", "x_max", "=", "max", "(", "line_pt_1", "[", "0", "]", ",", "line_pt_2", "[", "0", "]", ")", "y_min", "=", "min", "(", "line_pt_1", "[", "1", "]", ",", "line_pt_2", "[", "1", "]", ")", "y_max", "=", "max", "(", "line_pt_1", "[", "1", "]", ",", "line_pt_2", "[", "1", "]", ")", "extent", "=", "self", ".", "extent", "(", ")", "if", "line_pt_1", "[", "0", "]", "==", "line_pt_2", "[", "0", "]", ":", "return", "(", "(", "line_pt_1", "[", "0", "]", ",", "max", "(", "y_min", ",", "extent", "[", "1", "]", ")", ")", ",", "(", "line_pt_1", "[", "0", "]", ",", "min", "(", "y_max", ",", "extent", "[", "3", "]", ")", ")", ")", "if", "line_pt_1", "[", "1", "]", "==", "line_pt_2", "[", "1", "]", ":", "return", "(", "(", "max", "(", "x_min", ",", "extent", "[", "0", "]", ")", ",", "line_pt_1", "[", "1", "]", ")", ",", "(", "min", "(", "x_max", ",", "extent", "[", "2", "]", ")", ",", "line_pt_1", "[", "1", "]", ")", ")", "if", "(", "(", "extent", "[", "0", "]", "<=", "line_pt_1", "[", "0", "]", "<", "extent", "[", "2", "]", ")", "and", "(", "extent", "[", "1", "]", "<=", "line_pt_1", "[", "1", "]", "<", "extent", "[", "3", "]", ")", "and", "(", "extent", "[", "0", "]", "<=", "line_pt_2", "[", "0", "]", "<", "extent", "[", "2", "]", ")", "and", "(", "extent", "[", "1", "]", "<=", "line_pt_2", "[", "1", "]", "<", "extent", "[", "3", "]", ")", ")", ":", "return", "line_pt_1", ",", "line_pt_2", "ts", "=", "[", "0.0", ",", "1.0", ",", "float", "(", "extent", "[", "0", "]", "-", "line_pt_1", "[", "0", "]", ")", "/", "(", "line_pt_2", "[", "0", "]", "-", "line_pt_1", "[", "0", "]", ")", ",", "float", "(", "extent", "[", "2", "]", "-", "line_pt_1", "[", "0", "]", ")", "/", "(", "line_pt_2", "[", "0", "]", "-", "line_pt_1", "[", "0", "]", ")", ",", "float", "(", "extent", "[", "1", "]", "-", "line_pt_1", "[", "1", "]", ")", "/", "(", "line_pt_2", "[", "1", "]", "-", "line_pt_1", "[", "1", "]", ")", ",", "float", "(", "extent", "[", "3", "]", "-", "line_pt_1", "[", "1", "]", ")", "/", "(", "line_pt_2", "[", "1", "]", "-", "line_pt_1", "[", "1", "]", ")", "]", "ts", ".", "sort", "(", ")", "if", "(", "ts", "[", "2", "]", "<", "0", ")", "or", "(", "ts", "[", "2", "]", ">=", "1", ")", "or", "(", "ts", "[", "3", "]", "<", "0", ")", "or", "(", "ts", "[", "2", "]", ">=", "1", ")", ":", "return", "None", "result", "=", "[", "(", "pt_1", "+", "t", "*", "(", "pt_2", "-", "pt_1", ")", ")", "for", "t", "in", "(", "ts", "[", "2", "]", ",", "ts", "[", "3", "]", ")", "for", "(", "pt_1", ",", "pt_2", ")", "in", "zip", "(", "line_pt_1", ",", "line_pt_2", ")", "]", "return", "(", "result", "[", ":", "2", "]", ",", "result", "[", "2", ":", "]", ")" ]
clip line to canvas
[ "clip", "line", "to", "canvas" ]
b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5
https://github.com/wdbm/datavision/blob/b6f26287264632d6f8c9f8911aaf3a8e4fc4dcf5/datavision.py#L2710-L2762
242,287
atdt/afraid
afraid/__init__.py
get_dyndns_records
def get_dyndns_records(login, password): """Gets the set of dynamic DNS records associated with this account""" params = dict(action='getdyndns', sha=get_auth_key(login, password)) response = requests.get('http://freedns.afraid.org/api/', params=params, timeout=timeout) raw_records = (line.split('|') for line in response.content.split()) try: records = frozenset(DnsRecord(*record) for record in raw_records) except TypeError: raise ApiError("Couldn't parse the server's response", response.content) return records
python
def get_dyndns_records(login, password): """Gets the set of dynamic DNS records associated with this account""" params = dict(action='getdyndns', sha=get_auth_key(login, password)) response = requests.get('http://freedns.afraid.org/api/', params=params, timeout=timeout) raw_records = (line.split('|') for line in response.content.split()) try: records = frozenset(DnsRecord(*record) for record in raw_records) except TypeError: raise ApiError("Couldn't parse the server's response", response.content) return records
[ "def", "get_dyndns_records", "(", "login", ",", "password", ")", ":", "params", "=", "dict", "(", "action", "=", "'getdyndns'", ",", "sha", "=", "get_auth_key", "(", "login", ",", "password", ")", ")", "response", "=", "requests", ".", "get", "(", "'http://freedns.afraid.org/api/'", ",", "params", "=", "params", ",", "timeout", "=", "timeout", ")", "raw_records", "=", "(", "line", ".", "split", "(", "'|'", ")", "for", "line", "in", "response", ".", "content", ".", "split", "(", ")", ")", "try", ":", "records", "=", "frozenset", "(", "DnsRecord", "(", "*", "record", ")", "for", "record", "in", "raw_records", ")", "except", "TypeError", ":", "raise", "ApiError", "(", "\"Couldn't parse the server's response\"", ",", "response", ".", "content", ")", "return", "records" ]
Gets the set of dynamic DNS records associated with this account
[ "Gets", "the", "set", "of", "dynamic", "DNS", "records", "associated", "with", "this", "account" ]
d74b2d4e41ed14e420da2793a89bef5d9b26ea26
https://github.com/atdt/afraid/blob/d74b2d4e41ed14e420da2793a89bef5d9b26ea26/afraid/__init__.py#L91-L103
242,288
atdt/afraid
afraid/__init__.py
update_continuously
def update_continuously(records, update_interval=600): """Update `records` every `update_interval` seconds""" while True: for record in records: try: record.update() except (ApiError, RequestException): pass time.sleep(update_interval)
python
def update_continuously(records, update_interval=600): """Update `records` every `update_interval` seconds""" while True: for record in records: try: record.update() except (ApiError, RequestException): pass time.sleep(update_interval)
[ "def", "update_continuously", "(", "records", ",", "update_interval", "=", "600", ")", ":", "while", "True", ":", "for", "record", "in", "records", ":", "try", ":", "record", ".", "update", "(", ")", "except", "(", "ApiError", ",", "RequestException", ")", ":", "pass", "time", ".", "sleep", "(", "update_interval", ")" ]
Update `records` every `update_interval` seconds
[ "Update", "records", "every", "update_interval", "seconds" ]
d74b2d4e41ed14e420da2793a89bef5d9b26ea26
https://github.com/atdt/afraid/blob/d74b2d4e41ed14e420da2793a89bef5d9b26ea26/afraid/__init__.py#L106-L114
242,289
atdt/afraid
afraid/__init__.py
DnsRecord.update
def update(self): """Updates remote DNS record by requesting its special endpoint URL""" response = requests.get(self.update_url, timeout=timeout) match = ip_pattern.search(response.content) # response must contain an ip address, or else we can't parse it if not match: raise ApiError("Couldn't parse the server's response", response.content) self.ip = match.group(0)
python
def update(self): """Updates remote DNS record by requesting its special endpoint URL""" response = requests.get(self.update_url, timeout=timeout) match = ip_pattern.search(response.content) # response must contain an ip address, or else we can't parse it if not match: raise ApiError("Couldn't parse the server's response", response.content) self.ip = match.group(0)
[ "def", "update", "(", "self", ")", ":", "response", "=", "requests", ".", "get", "(", "self", ".", "update_url", ",", "timeout", "=", "timeout", ")", "match", "=", "ip_pattern", ".", "search", "(", "response", ".", "content", ")", "# response must contain an ip address, or else we can't parse it", "if", "not", "match", ":", "raise", "ApiError", "(", "\"Couldn't parse the server's response\"", ",", "response", ".", "content", ")", "self", ".", "ip", "=", "match", ".", "group", "(", "0", ")" ]
Updates remote DNS record by requesting its special endpoint URL
[ "Updates", "remote", "DNS", "record", "by", "requesting", "its", "special", "endpoint", "URL" ]
d74b2d4e41ed14e420da2793a89bef5d9b26ea26
https://github.com/atdt/afraid/blob/d74b2d4e41ed14e420da2793a89bef5d9b26ea26/afraid/__init__.py#L72-L82
242,290
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/restful.py
setup_app
def setup_app(app, api): """setup the resources urls.""" api.add_resource( KnwKBAllResource, '/api/knowledge' ) api.add_resource( KnwKBResource, '/api/knowledge/<string:slug>' ) api.add_resource( KnwKBMappingsResource, '/api/knowledge/<string:slug>/mappings' ) api.add_resource( KnwKBMappingsToResource, '/api/knowledge/<string:slug>/mappings/to' ) api.add_resource( KnwKBMappingsFromResource, '/api/knowledge/<string:slug>/mappings/from' ) # for other urls, return "Method Not Allowed" api.add_resource( NotImplementedKnowledegeResource, '/api/knowledge/<string:slug>/<path:foo>' )
python
def setup_app(app, api): """setup the resources urls.""" api.add_resource( KnwKBAllResource, '/api/knowledge' ) api.add_resource( KnwKBResource, '/api/knowledge/<string:slug>' ) api.add_resource( KnwKBMappingsResource, '/api/knowledge/<string:slug>/mappings' ) api.add_resource( KnwKBMappingsToResource, '/api/knowledge/<string:slug>/mappings/to' ) api.add_resource( KnwKBMappingsFromResource, '/api/knowledge/<string:slug>/mappings/from' ) # for other urls, return "Method Not Allowed" api.add_resource( NotImplementedKnowledegeResource, '/api/knowledge/<string:slug>/<path:foo>' )
[ "def", "setup_app", "(", "app", ",", "api", ")", ":", "api", ".", "add_resource", "(", "KnwKBAllResource", ",", "'/api/knowledge'", ")", "api", ".", "add_resource", "(", "KnwKBResource", ",", "'/api/knowledge/<string:slug>'", ")", "api", ".", "add_resource", "(", "KnwKBMappingsResource", ",", "'/api/knowledge/<string:slug>/mappings'", ")", "api", ".", "add_resource", "(", "KnwKBMappingsToResource", ",", "'/api/knowledge/<string:slug>/mappings/to'", ")", "api", ".", "add_resource", "(", "KnwKBMappingsFromResource", ",", "'/api/knowledge/<string:slug>/mappings/from'", ")", "# for other urls, return \"Method Not Allowed\"", "api", ".", "add_resource", "(", "NotImplementedKnowledegeResource", ",", "'/api/knowledge/<string:slug>/<path:foo>'", ")" ]
setup the resources urls.
[ "setup", "the", "resources", "urls", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/restful.py#L487-L514
242,291
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/restful.py
KnwKBResource.get
def get(self, slug): """Get KnwKB. Url parameters: - from: filter "mappings from" - to: filter "mappings to" - page - per_page - match_type: s=substring, e=exact, sw=startswith - sortby: 'from' or 'to' """ kb = api.get_kb_by_slug(slug) # check if is accessible from api check_knowledge_access(kb) parser = reqparse.RequestParser() parser.add_argument( 'from', type=str, help="Return only entries where key matches this.") parser.add_argument( 'to', type=str, help="Return only entries where value matches this.") parser.add_argument('page', type=int, help="Require a specific page") parser.add_argument('per_page', type=int, help="Set how much result per page") parser.add_argument('match_type', type=str, help="s=substring, e=exact, sw=startswith") parser.add_argument('sortby', type=str, help="the sorting criteria ('from' or 'to')") args = parser.parse_args() kb_dict = kb.to_dict() kb_dict['mappings'] = KnwKBMappingsResource \ .search_mappings(kb=kb, key=args['from'], value=args['to'], match_type=args['match_type'], sortby=args['sortby'], page=args['page'], per_page=args['per_page']) return kb_dict
python
def get(self, slug): """Get KnwKB. Url parameters: - from: filter "mappings from" - to: filter "mappings to" - page - per_page - match_type: s=substring, e=exact, sw=startswith - sortby: 'from' or 'to' """ kb = api.get_kb_by_slug(slug) # check if is accessible from api check_knowledge_access(kb) parser = reqparse.RequestParser() parser.add_argument( 'from', type=str, help="Return only entries where key matches this.") parser.add_argument( 'to', type=str, help="Return only entries where value matches this.") parser.add_argument('page', type=int, help="Require a specific page") parser.add_argument('per_page', type=int, help="Set how much result per page") parser.add_argument('match_type', type=str, help="s=substring, e=exact, sw=startswith") parser.add_argument('sortby', type=str, help="the sorting criteria ('from' or 'to')") args = parser.parse_args() kb_dict = kb.to_dict() kb_dict['mappings'] = KnwKBMappingsResource \ .search_mappings(kb=kb, key=args['from'], value=args['to'], match_type=args['match_type'], sortby=args['sortby'], page=args['page'], per_page=args['per_page']) return kb_dict
[ "def", "get", "(", "self", ",", "slug", ")", ":", "kb", "=", "api", ".", "get_kb_by_slug", "(", "slug", ")", "# check if is accessible from api", "check_knowledge_access", "(", "kb", ")", "parser", "=", "reqparse", ".", "RequestParser", "(", ")", "parser", ".", "add_argument", "(", "'from'", ",", "type", "=", "str", ",", "help", "=", "\"Return only entries where key matches this.\"", ")", "parser", ".", "add_argument", "(", "'to'", ",", "type", "=", "str", ",", "help", "=", "\"Return only entries where value matches this.\"", ")", "parser", ".", "add_argument", "(", "'page'", ",", "type", "=", "int", ",", "help", "=", "\"Require a specific page\"", ")", "parser", ".", "add_argument", "(", "'per_page'", ",", "type", "=", "int", ",", "help", "=", "\"Set how much result per page\"", ")", "parser", ".", "add_argument", "(", "'match_type'", ",", "type", "=", "str", ",", "help", "=", "\"s=substring, e=exact, sw=startswith\"", ")", "parser", ".", "add_argument", "(", "'sortby'", ",", "type", "=", "str", ",", "help", "=", "\"the sorting criteria ('from' or 'to')\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "kb_dict", "=", "kb", ".", "to_dict", "(", ")", "kb_dict", "[", "'mappings'", "]", "=", "KnwKBMappingsResource", ".", "search_mappings", "(", "kb", "=", "kb", ",", "key", "=", "args", "[", "'from'", "]", ",", "value", "=", "args", "[", "'to'", "]", ",", "match_type", "=", "args", "[", "'match_type'", "]", ",", "sortby", "=", "args", "[", "'sortby'", "]", ",", "page", "=", "args", "[", "'page'", "]", ",", "per_page", "=", "args", "[", "'per_page'", "]", ")", "return", "kb_dict" ]
Get KnwKB. Url parameters: - from: filter "mappings from" - to: filter "mappings to" - page - per_page - match_type: s=substring, e=exact, sw=startswith - sortby: 'from' or 'to'
[ "Get", "KnwKB", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/restful.py#L106-L144
242,292
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/restful.py
KnwKBMappingsResource.search_mappings
def search_mappings(kb, key=None, value=None, match_type=None, sortby=None, page=None, per_page=None): """Search tags for knowledge.""" if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']: return pagination.RestfulSQLAlchemyPagination( api.query_kb_mappings( kbid=kb.id, key=key or '', value=value or '', match_type=match_type or 's', sortby=sortby or 'to', ), page=page or 1, per_page=per_page or 10 ).items return []
python
def search_mappings(kb, key=None, value=None, match_type=None, sortby=None, page=None, per_page=None): """Search tags for knowledge.""" if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']: return pagination.RestfulSQLAlchemyPagination( api.query_kb_mappings( kbid=kb.id, key=key or '', value=value or '', match_type=match_type or 's', sortby=sortby or 'to', ), page=page or 1, per_page=per_page or 10 ).items return []
[ "def", "search_mappings", "(", "kb", ",", "key", "=", "None", ",", "value", "=", "None", ",", "match_type", "=", "None", ",", "sortby", "=", "None", ",", "page", "=", "None", ",", "per_page", "=", "None", ")", ":", "if", "kb", ".", "kbtype", "==", "models", ".", "KnwKB", ".", "KNWKB_TYPES", "[", "'written_as'", "]", ":", "return", "pagination", ".", "RestfulSQLAlchemyPagination", "(", "api", ".", "query_kb_mappings", "(", "kbid", "=", "kb", ".", "id", ",", "key", "=", "key", "or", "''", ",", "value", "=", "value", "or", "''", ",", "match_type", "=", "match_type", "or", "'s'", ",", "sortby", "=", "sortby", "or", "'to'", ",", ")", ",", "page", "=", "page", "or", "1", ",", "per_page", "=", "per_page", "or", "10", ")", ".", "items", "return", "[", "]" ]
Search tags for knowledge.
[ "Search", "tags", "for", "knowledge", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/restful.py#L185-L198
242,293
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/restful.py
KnwKBMappingsResource.get
def get(self, slug): """Get list of mappings. Url parameters: - from: filter "mappings from" - to: filter "mappings to" - page - per_page - match_type: s=substring, e=exact, sw=startswith - sortby: 'from' or 'to' """ kb = api.get_kb_by_slug(slug) # check if is accessible from api check_knowledge_access(kb) parser = reqparse.RequestParser() parser.add_argument( 'from', type=str, help="Return only entries where 'from' matches this.") parser.add_argument( 'to', type=str, help="Return only entries where 'to' matches this.") parser.add_argument('page', type=int, help="Require a specific page") parser.add_argument('per_page', type=int, help="Set how much result per page") parser.add_argument('match_type', type=str, help="s=substring, e=exact, sw=startswith") parser.add_argument('sortby', type=str, help="the sorting criteria ('from' or 'to')") args = parser.parse_args() return KnwKBMappingsResource \ .search_mappings(kb, args['from'], args['to'], args['match_type'], args['sortby'], args['page'], args['per_page'])
python
def get(self, slug): """Get list of mappings. Url parameters: - from: filter "mappings from" - to: filter "mappings to" - page - per_page - match_type: s=substring, e=exact, sw=startswith - sortby: 'from' or 'to' """ kb = api.get_kb_by_slug(slug) # check if is accessible from api check_knowledge_access(kb) parser = reqparse.RequestParser() parser.add_argument( 'from', type=str, help="Return only entries where 'from' matches this.") parser.add_argument( 'to', type=str, help="Return only entries where 'to' matches this.") parser.add_argument('page', type=int, help="Require a specific page") parser.add_argument('per_page', type=int, help="Set how much result per page") parser.add_argument('match_type', type=str, help="s=substring, e=exact, sw=startswith") parser.add_argument('sortby', type=str, help="the sorting criteria ('from' or 'to')") args = parser.parse_args() return KnwKBMappingsResource \ .search_mappings(kb, args['from'], args['to'], args['match_type'], args['sortby'], args['page'], args['per_page'])
[ "def", "get", "(", "self", ",", "slug", ")", ":", "kb", "=", "api", ".", "get_kb_by_slug", "(", "slug", ")", "# check if is accessible from api", "check_knowledge_access", "(", "kb", ")", "parser", "=", "reqparse", ".", "RequestParser", "(", ")", "parser", ".", "add_argument", "(", "'from'", ",", "type", "=", "str", ",", "help", "=", "\"Return only entries where 'from' matches this.\"", ")", "parser", ".", "add_argument", "(", "'to'", ",", "type", "=", "str", ",", "help", "=", "\"Return only entries where 'to' matches this.\"", ")", "parser", ".", "add_argument", "(", "'page'", ",", "type", "=", "int", ",", "help", "=", "\"Require a specific page\"", ")", "parser", ".", "add_argument", "(", "'per_page'", ",", "type", "=", "int", ",", "help", "=", "\"Set how much result per page\"", ")", "parser", ".", "add_argument", "(", "'match_type'", ",", "type", "=", "str", ",", "help", "=", "\"s=substring, e=exact, sw=startswith\"", ")", "parser", ".", "add_argument", "(", "'sortby'", ",", "type", "=", "str", ",", "help", "=", "\"the sorting criteria ('from' or 'to')\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "return", "KnwKBMappingsResource", ".", "search_mappings", "(", "kb", ",", "args", "[", "'from'", "]", ",", "args", "[", "'to'", "]", ",", "args", "[", "'match_type'", "]", ",", "args", "[", "'sortby'", "]", ",", "args", "[", "'page'", "]", ",", "args", "[", "'per_page'", "]", ")" ]
Get list of mappings. Url parameters: - from: filter "mappings from" - to: filter "mappings to" - page - per_page - match_type: s=substring, e=exact, sw=startswith - sortby: 'from' or 'to'
[ "Get", "list", "of", "mappings", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/restful.py#L201-L236
242,294
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/restful.py
KnwKBMappingsToResource.search_list
def search_list(kb, value=None, match_type=None, page=None, per_page=None, unique=False): """Search "mappings to" for knowledge.""" # init page = page or 1 per_page = per_page or 10 if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']: # get the base query query = api.query_kb_mappings( kbid=kb.id, value=value or '', match_type=match_type or 's' ).with_entities(models.KnwKBRVAL.m_value) # if you want a 'unique' list if unique: query = query.distinct() # run query and paginate return [item.m_value for item in pagination.RestfulSQLAlchemyPagination( query, page=page or 1, per_page=per_page or 10 ).items] elif kb.kbtype == models.KnwKB.KNWKB_TYPES['dynamic']: items = api.get_kbd_values(kb.name, value) return pagination.RestfulPagination( page=page, per_page=per_page, total_count=len(items) ).slice(items) return []
python
def search_list(kb, value=None, match_type=None, page=None, per_page=None, unique=False): """Search "mappings to" for knowledge.""" # init page = page or 1 per_page = per_page or 10 if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']: # get the base query query = api.query_kb_mappings( kbid=kb.id, value=value or '', match_type=match_type or 's' ).with_entities(models.KnwKBRVAL.m_value) # if you want a 'unique' list if unique: query = query.distinct() # run query and paginate return [item.m_value for item in pagination.RestfulSQLAlchemyPagination( query, page=page or 1, per_page=per_page or 10 ).items] elif kb.kbtype == models.KnwKB.KNWKB_TYPES['dynamic']: items = api.get_kbd_values(kb.name, value) return pagination.RestfulPagination( page=page, per_page=per_page, total_count=len(items) ).slice(items) return []
[ "def", "search_list", "(", "kb", ",", "value", "=", "None", ",", "match_type", "=", "None", ",", "page", "=", "None", ",", "per_page", "=", "None", ",", "unique", "=", "False", ")", ":", "# init", "page", "=", "page", "or", "1", "per_page", "=", "per_page", "or", "10", "if", "kb", ".", "kbtype", "==", "models", ".", "KnwKB", ".", "KNWKB_TYPES", "[", "'written_as'", "]", ":", "# get the base query", "query", "=", "api", ".", "query_kb_mappings", "(", "kbid", "=", "kb", ".", "id", ",", "value", "=", "value", "or", "''", ",", "match_type", "=", "match_type", "or", "'s'", ")", ".", "with_entities", "(", "models", ".", "KnwKBRVAL", ".", "m_value", ")", "# if you want a 'unique' list", "if", "unique", ":", "query", "=", "query", ".", "distinct", "(", ")", "# run query and paginate", "return", "[", "item", ".", "m_value", "for", "item", "in", "pagination", ".", "RestfulSQLAlchemyPagination", "(", "query", ",", "page", "=", "page", "or", "1", ",", "per_page", "=", "per_page", "or", "10", ")", ".", "items", "]", "elif", "kb", ".", "kbtype", "==", "models", ".", "KnwKB", ".", "KNWKB_TYPES", "[", "'dynamic'", "]", ":", "items", "=", "api", ".", "get_kbd_values", "(", "kb", ".", "name", ",", "value", ")", "return", "pagination", ".", "RestfulPagination", "(", "page", "=", "page", ",", "per_page", "=", "per_page", ",", "total_count", "=", "len", "(", "items", ")", ")", ".", "slice", "(", "items", ")", "return", "[", "]" ]
Search "mappings to" for knowledge.
[ "Search", "mappings", "to", "for", "knowledge", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/restful.py#L277-L306
242,295
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/restful.py
KnwKBMappingsFromResource.search_list
def search_list(kb, from_=None, match_type=None, page=None, per_page=None, unique=False): """Search "mapping from" for knowledge.""" # init page = page or 1 per_page = per_page or 10 if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']: # get the base query query = api.query_kb_mappings( kbid=kb.id, key=from_ or '', match_type=match_type or 's' ).with_entities(models.KnwKBRVAL.m_key) # if you want a 'unique' list if unique: query = query.distinct() # run query and paginate return [item.m_key for item in pagination.RestfulSQLAlchemyPagination( query, page=page or 1, per_page=per_page or 10 ).items] return []
python
def search_list(kb, from_=None, match_type=None, page=None, per_page=None, unique=False): """Search "mapping from" for knowledge.""" # init page = page or 1 per_page = per_page or 10 if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']: # get the base query query = api.query_kb_mappings( kbid=kb.id, key=from_ or '', match_type=match_type or 's' ).with_entities(models.KnwKBRVAL.m_key) # if you want a 'unique' list if unique: query = query.distinct() # run query and paginate return [item.m_key for item in pagination.RestfulSQLAlchemyPagination( query, page=page or 1, per_page=per_page or 10 ).items] return []
[ "def", "search_list", "(", "kb", ",", "from_", "=", "None", ",", "match_type", "=", "None", ",", "page", "=", "None", ",", "per_page", "=", "None", ",", "unique", "=", "False", ")", ":", "# init", "page", "=", "page", "or", "1", "per_page", "=", "per_page", "or", "10", "if", "kb", ".", "kbtype", "==", "models", ".", "KnwKB", ".", "KNWKB_TYPES", "[", "'written_as'", "]", ":", "# get the base query", "query", "=", "api", ".", "query_kb_mappings", "(", "kbid", "=", "kb", ".", "id", ",", "key", "=", "from_", "or", "''", ",", "match_type", "=", "match_type", "or", "'s'", ")", ".", "with_entities", "(", "models", ".", "KnwKBRVAL", ".", "m_key", ")", "# if you want a 'unique' list", "if", "unique", ":", "query", "=", "query", ".", "distinct", "(", ")", "# run query and paginate", "return", "[", "item", ".", "m_key", "for", "item", "in", "pagination", ".", "RestfulSQLAlchemyPagination", "(", "query", ",", "page", "=", "page", "or", "1", ",", "per_page", "=", "per_page", "or", "10", ")", ".", "items", "]", "return", "[", "]" ]
Search "mapping from" for knowledge.
[ "Search", "mapping", "from", "for", "knowledge", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/restful.py#L382-L405
242,296
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/restful.py
KnwKBMappingsFromResource.get
def get(self, slug): """Get list of "mappings from". Url parameters - unique: if set, return a unique list - filter: filter "mappings from" - page - per_page - match_type: s=substring, e=exact, sw=startswith """ kb = api.get_kb_by_slug(slug) # check if is accessible from api check_knowledge_access(kb) parser = reqparse.RequestParser() parser.add_argument( 'unique', type=bool, help="The list contains unique names of 'mapping to'") parser.add_argument( 'filter', type=str, help="Return only entries where 'from' matches this.") parser.add_argument('page', type=int, help="Require a specific page") parser.add_argument('per_page', type=int, help="Set how much result per page") parser.add_argument('match_type', type=str, help="s=substring, e=exact, sw=startswith") args = parser.parse_args() return KnwKBMappingsFromResource \ .search_list(kb, args['filter'], args['match_type'], args['page'], args['per_page'], args['unique'])
python
def get(self, slug): """Get list of "mappings from". Url parameters - unique: if set, return a unique list - filter: filter "mappings from" - page - per_page - match_type: s=substring, e=exact, sw=startswith """ kb = api.get_kb_by_slug(slug) # check if is accessible from api check_knowledge_access(kb) parser = reqparse.RequestParser() parser.add_argument( 'unique', type=bool, help="The list contains unique names of 'mapping to'") parser.add_argument( 'filter', type=str, help="Return only entries where 'from' matches this.") parser.add_argument('page', type=int, help="Require a specific page") parser.add_argument('per_page', type=int, help="Set how much result per page") parser.add_argument('match_type', type=str, help="s=substring, e=exact, sw=startswith") args = parser.parse_args() return KnwKBMappingsFromResource \ .search_list(kb, args['filter'], args['match_type'], args['page'], args['per_page'], args['unique'])
[ "def", "get", "(", "self", ",", "slug", ")", ":", "kb", "=", "api", ".", "get_kb_by_slug", "(", "slug", ")", "# check if is accessible from api", "check_knowledge_access", "(", "kb", ")", "parser", "=", "reqparse", ".", "RequestParser", "(", ")", "parser", ".", "add_argument", "(", "'unique'", ",", "type", "=", "bool", ",", "help", "=", "\"The list contains unique names of 'mapping to'\"", ")", "parser", ".", "add_argument", "(", "'filter'", ",", "type", "=", "str", ",", "help", "=", "\"Return only entries where 'from' matches this.\"", ")", "parser", ".", "add_argument", "(", "'page'", ",", "type", "=", "int", ",", "help", "=", "\"Require a specific page\"", ")", "parser", ".", "add_argument", "(", "'per_page'", ",", "type", "=", "int", ",", "help", "=", "\"Set how much result per page\"", ")", "parser", ".", "add_argument", "(", "'match_type'", ",", "type", "=", "str", ",", "help", "=", "\"s=substring, e=exact, sw=startswith\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "return", "KnwKBMappingsFromResource", ".", "search_list", "(", "kb", ",", "args", "[", "'filter'", "]", ",", "args", "[", "'match_type'", "]", ",", "args", "[", "'page'", "]", ",", "args", "[", "'per_page'", "]", ",", "args", "[", "'unique'", "]", ")" ]
Get list of "mappings from". Url parameters - unique: if set, return a unique list - filter: filter "mappings from" - page - per_page - match_type: s=substring, e=exact, sw=startswith
[ "Get", "list", "of", "mappings", "from", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/restful.py#L407-L439
242,297
political-memory/django-representatives
representatives/contrib/parltrack/import_representatives.py
ParltrackImporter.manage_mep
def manage_mep(self, mep_json): ''' Import a mep as a representative from the json dict fetched from parltrack ''' # Some versions of memopol will connect to this and skip inactive meps. responses = representative_pre_import.send(sender=self, representative_data=mep_json) for receiver, response in responses: if response is False: logger.debug( 'Skipping MEP %s', mep_json['Name']['full']) return changed = False slug = slugify('%s-%s' % ( mep_json["Name"]["full"] if 'full' in mep_json["Name"] else mep_json["Name"]["sur"] + " " + mep_json["Name"]["family"], _parse_date(mep_json["Birth"]["date"]) )) try: representative = Representative.objects.get(slug=slug) except Representative.DoesNotExist: representative = Representative(slug=slug) changed = True # Save representative attributes self.import_representative_details(representative, mep_json, changed) self.add_mandates(representative, mep_json) self.add_contacts(representative, mep_json) logger.debug('Imported MEP %s', unicode(representative)) return representative
python
def manage_mep(self, mep_json): ''' Import a mep as a representative from the json dict fetched from parltrack ''' # Some versions of memopol will connect to this and skip inactive meps. responses = representative_pre_import.send(sender=self, representative_data=mep_json) for receiver, response in responses: if response is False: logger.debug( 'Skipping MEP %s', mep_json['Name']['full']) return changed = False slug = slugify('%s-%s' % ( mep_json["Name"]["full"] if 'full' in mep_json["Name"] else mep_json["Name"]["sur"] + " " + mep_json["Name"]["family"], _parse_date(mep_json["Birth"]["date"]) )) try: representative = Representative.objects.get(slug=slug) except Representative.DoesNotExist: representative = Representative(slug=slug) changed = True # Save representative attributes self.import_representative_details(representative, mep_json, changed) self.add_mandates(representative, mep_json) self.add_contacts(representative, mep_json) logger.debug('Imported MEP %s', unicode(representative)) return representative
[ "def", "manage_mep", "(", "self", ",", "mep_json", ")", ":", "# Some versions of memopol will connect to this and skip inactive meps.", "responses", "=", "representative_pre_import", ".", "send", "(", "sender", "=", "self", ",", "representative_data", "=", "mep_json", ")", "for", "receiver", ",", "response", "in", "responses", ":", "if", "response", "is", "False", ":", "logger", ".", "debug", "(", "'Skipping MEP %s'", ",", "mep_json", "[", "'Name'", "]", "[", "'full'", "]", ")", "return", "changed", "=", "False", "slug", "=", "slugify", "(", "'%s-%s'", "%", "(", "mep_json", "[", "\"Name\"", "]", "[", "\"full\"", "]", "if", "'full'", "in", "mep_json", "[", "\"Name\"", "]", "else", "mep_json", "[", "\"Name\"", "]", "[", "\"sur\"", "]", "+", "\" \"", "+", "mep_json", "[", "\"Name\"", "]", "[", "\"family\"", "]", ",", "_parse_date", "(", "mep_json", "[", "\"Birth\"", "]", "[", "\"date\"", "]", ")", ")", ")", "try", ":", "representative", "=", "Representative", ".", "objects", ".", "get", "(", "slug", "=", "slug", ")", "except", "Representative", ".", "DoesNotExist", ":", "representative", "=", "Representative", "(", "slug", "=", "slug", ")", "changed", "=", "True", "# Save representative attributes", "self", ".", "import_representative_details", "(", "representative", ",", "mep_json", ",", "changed", ")", "self", ".", "add_mandates", "(", "representative", ",", "mep_json", ")", "self", ".", "add_contacts", "(", "representative", ",", "mep_json", ")", "logger", ".", "debug", "(", "'Imported MEP %s'", ",", "unicode", "(", "representative", ")", ")", "return", "representative" ]
Import a mep as a representative from the json dict fetched from parltrack
[ "Import", "a", "mep", "as", "a", "representative", "from", "the", "json", "dict", "fetched", "from", "parltrack" ]
811c90d0250149e913e6196f0ab11c97d396be39
https://github.com/political-memory/django-representatives/blob/811c90d0250149e913e6196f0ab11c97d396be39/representatives/contrib/parltrack/import_representatives.py#L77-L114
242,298
edeposit/edeposit.amqp.ltp
src/edeposit/amqp/ltp/info_composer.py
_calc_dir_size
def _calc_dir_size(path): """ Calculate size of all files in `path`. Args: path (str): Path to the directory. Returns: int: Size of the directory in bytes. """ dir_size = 0 for (root, dirs, files) in os.walk(path): for fn in files: full_fn = os.path.join(root, fn) dir_size += os.path.getsize(full_fn) return dir_size
python
def _calc_dir_size(path): """ Calculate size of all files in `path`. Args: path (str): Path to the directory. Returns: int: Size of the directory in bytes. """ dir_size = 0 for (root, dirs, files) in os.walk(path): for fn in files: full_fn = os.path.join(root, fn) dir_size += os.path.getsize(full_fn) return dir_size
[ "def", "_calc_dir_size", "(", "path", ")", ":", "dir_size", "=", "0", "for", "(", "root", ",", "dirs", ",", "files", ")", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "fn", "in", "files", ":", "full_fn", "=", "os", ".", "path", ".", "join", "(", "root", ",", "fn", ")", "dir_size", "+=", "os", ".", "path", ".", "getsize", "(", "full_fn", ")", "return", "dir_size" ]
Calculate size of all files in `path`. Args: path (str): Path to the directory. Returns: int: Size of the directory in bytes.
[ "Calculate", "size", "of", "all", "files", "in", "path", "." ]
df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e
https://github.com/edeposit/edeposit.amqp.ltp/blob/df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e/src/edeposit/amqp/ltp/info_composer.py#L38-L54
242,299
edeposit/edeposit.amqp.ltp
src/edeposit/amqp/ltp/info_composer.py
_get_localized_fn
def _get_localized_fn(path, root_dir): """ Return absolute `path` relative to `root_dir`. When `path` == ``/home/xex/somefile.txt`` and `root_dir` == ``/home``, returned path will be ``/xex/somefile.txt``. Args: path (str): Absolute path beginning in `root_dir`. root_dir (str): Absolute path containing `path` argument. Returns: str: Local `path` when `root_dir` is considered as root of FS. """ local_fn = path if path.startswith(root_dir): local_fn = path.replace(root_dir, "", 1) if not local_fn.startswith("/"): return "/" + local_fn return local_fn
python
def _get_localized_fn(path, root_dir): """ Return absolute `path` relative to `root_dir`. When `path` == ``/home/xex/somefile.txt`` and `root_dir` == ``/home``, returned path will be ``/xex/somefile.txt``. Args: path (str): Absolute path beginning in `root_dir`. root_dir (str): Absolute path containing `path` argument. Returns: str: Local `path` when `root_dir` is considered as root of FS. """ local_fn = path if path.startswith(root_dir): local_fn = path.replace(root_dir, "", 1) if not local_fn.startswith("/"): return "/" + local_fn return local_fn
[ "def", "_get_localized_fn", "(", "path", ",", "root_dir", ")", ":", "local_fn", "=", "path", "if", "path", ".", "startswith", "(", "root_dir", ")", ":", "local_fn", "=", "path", ".", "replace", "(", "root_dir", ",", "\"\"", ",", "1", ")", "if", "not", "local_fn", ".", "startswith", "(", "\"/\"", ")", ":", "return", "\"/\"", "+", "local_fn", "return", "local_fn" ]
Return absolute `path` relative to `root_dir`. When `path` == ``/home/xex/somefile.txt`` and `root_dir` == ``/home``, returned path will be ``/xex/somefile.txt``. Args: path (str): Absolute path beginning in `root_dir`. root_dir (str): Absolute path containing `path` argument. Returns: str: Local `path` when `root_dir` is considered as root of FS.
[ "Return", "absolute", "path", "relative", "to", "root_dir", "." ]
df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e
https://github.com/edeposit/edeposit.amqp.ltp/blob/df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e/src/edeposit/amqp/ltp/info_composer.py#L57-L78