id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
243,800
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/client.py
|
_Binding.ReceiveFault
|
def ReceiveFault(self, **kw):
'''Parse incoming message as a fault. Raise TypeError if no
fault found.
'''
self.ReceiveSOAP(**kw)
if not self.ps.IsAFault():
raise TypeError("Expected SOAP Fault not found")
return FaultFromFaultMessage(self.ps)
|
python
|
def ReceiveFault(self, **kw):
'''Parse incoming message as a fault. Raise TypeError if no
fault found.
'''
self.ReceiveSOAP(**kw)
if not self.ps.IsAFault():
raise TypeError("Expected SOAP Fault not found")
return FaultFromFaultMessage(self.ps)
|
[
"def",
"ReceiveFault",
"(",
"self",
",",
"*",
"*",
"kw",
")",
":",
"self",
".",
"ReceiveSOAP",
"(",
"*",
"*",
"kw",
")",
"if",
"not",
"self",
".",
"ps",
".",
"IsAFault",
"(",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected SOAP Fault not found\"",
")",
"return",
"FaultFromFaultMessage",
"(",
"self",
".",
"ps",
")"
] |
Parse incoming message as a fault. Raise TypeError if no
fault found.
|
[
"Parse",
"incoming",
"message",
"as",
"a",
"fault",
".",
"Raise",
"TypeError",
"if",
"no",
"fault",
"found",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/client.py#L435-L442
|
243,801
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/client.py
|
Binding.__parse_child
|
def __parse_child(self, node):
'''for rpc-style map each message part to a class in typesmodule
'''
try:
tc = self.gettypecode(self.typesmodule, node)
except:
self.logger.debug('didnt find typecode for "%s" in typesmodule: %s',
node.localName, self.typesmodule)
tc = TC.Any(aslist=1)
return tc.parse(node, self.ps)
self.logger.debug('parse child with typecode : %s', tc)
try:
return tc.parse(node, self.ps)
except Exception:
self.logger.debug('parse failed try Any : %s', tc)
tc = TC.Any(aslist=1)
return tc.parse(node, self.ps)
|
python
|
def __parse_child(self, node):
'''for rpc-style map each message part to a class in typesmodule
'''
try:
tc = self.gettypecode(self.typesmodule, node)
except:
self.logger.debug('didnt find typecode for "%s" in typesmodule: %s',
node.localName, self.typesmodule)
tc = TC.Any(aslist=1)
return tc.parse(node, self.ps)
self.logger.debug('parse child with typecode : %s', tc)
try:
return tc.parse(node, self.ps)
except Exception:
self.logger.debug('parse failed try Any : %s', tc)
tc = TC.Any(aslist=1)
return tc.parse(node, self.ps)
|
[
"def",
"__parse_child",
"(",
"self",
",",
"node",
")",
":",
"try",
":",
"tc",
"=",
"self",
".",
"gettypecode",
"(",
"self",
".",
"typesmodule",
",",
"node",
")",
"except",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'didnt find typecode for \"%s\" in typesmodule: %s'",
",",
"node",
".",
"localName",
",",
"self",
".",
"typesmodule",
")",
"tc",
"=",
"TC",
".",
"Any",
"(",
"aslist",
"=",
"1",
")",
"return",
"tc",
".",
"parse",
"(",
"node",
",",
"self",
".",
"ps",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'parse child with typecode : %s'",
",",
"tc",
")",
"try",
":",
"return",
"tc",
".",
"parse",
"(",
"node",
",",
"self",
".",
"ps",
")",
"except",
"Exception",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'parse failed try Any : %s'",
",",
"tc",
")",
"tc",
"=",
"TC",
".",
"Any",
"(",
"aslist",
"=",
"1",
")",
"return",
"tc",
".",
"parse",
"(",
"node",
",",
"self",
".",
"ps",
")"
] |
for rpc-style map each message part to a class in typesmodule
|
[
"for",
"rpc",
"-",
"style",
"map",
"each",
"message",
"part",
"to",
"a",
"class",
"in",
"typesmodule"
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/client.py#L503-L521
|
243,802
|
sassoo/goldman
|
goldman/middleware/basicauth/__init__.py
|
Middleware._get_creds
|
def _get_creds(self, req):
""" Get the username & password from the Authorization header
If the header is actually malformed where Basic Auth was
indicated by the request then an InvalidAuthSyntax exception
is raised. Otherwise an AuthRequired exception since it's
unclear in this scenario if the requestor was even aware
Authentication was required & if so which "scheme".
Calls _validate_auth_scheme first & bubbles up it's
exceptions.
:return:
tuple (username, password)
:raise:
AuthRequired, InvalidAuthSyntax
"""
self._validate_auth_scheme(req)
try:
creds = naked(req.auth.split(' ')[1])
creds = b64decode(creds)
username, password = creds.split(':')
return username, password
except IndexError:
raise InvalidAuthSyntax(**{
'detail': 'You are using the Basic Authentication scheme as '
'required to login but your Authorization header is '
'completely missing the login credentials.',
'links': 'tools.ietf.org/html/rfc2617#section-2',
})
except TypeError:
raise InvalidAuthSyntax(**{
'detail': 'Our API failed to base64 decode your Basic '
'Authentication login credentials in the '
'Authorization header. They seem to be malformed.',
'links': 'tools.ietf.org/html/rfc2617#section-2',
})
except ValueError:
raise InvalidAuthSyntax(**{
'detail': 'Our API failed to identify a username & password '
'in your Basic Authentication Authorization header '
'after decoding them. The username or password is '
'either missing or not separated by a ":" per the '
'spec. Either way the credentials are malformed.',
'links': 'tools.ietf.org/html/rfc2617#section-2',
})
|
python
|
def _get_creds(self, req):
""" Get the username & password from the Authorization header
If the header is actually malformed where Basic Auth was
indicated by the request then an InvalidAuthSyntax exception
is raised. Otherwise an AuthRequired exception since it's
unclear in this scenario if the requestor was even aware
Authentication was required & if so which "scheme".
Calls _validate_auth_scheme first & bubbles up it's
exceptions.
:return:
tuple (username, password)
:raise:
AuthRequired, InvalidAuthSyntax
"""
self._validate_auth_scheme(req)
try:
creds = naked(req.auth.split(' ')[1])
creds = b64decode(creds)
username, password = creds.split(':')
return username, password
except IndexError:
raise InvalidAuthSyntax(**{
'detail': 'You are using the Basic Authentication scheme as '
'required to login but your Authorization header is '
'completely missing the login credentials.',
'links': 'tools.ietf.org/html/rfc2617#section-2',
})
except TypeError:
raise InvalidAuthSyntax(**{
'detail': 'Our API failed to base64 decode your Basic '
'Authentication login credentials in the '
'Authorization header. They seem to be malformed.',
'links': 'tools.ietf.org/html/rfc2617#section-2',
})
except ValueError:
raise InvalidAuthSyntax(**{
'detail': 'Our API failed to identify a username & password '
'in your Basic Authentication Authorization header '
'after decoding them. The username or password is '
'either missing or not separated by a ":" per the '
'spec. Either way the credentials are malformed.',
'links': 'tools.ietf.org/html/rfc2617#section-2',
})
|
[
"def",
"_get_creds",
"(",
"self",
",",
"req",
")",
":",
"self",
".",
"_validate_auth_scheme",
"(",
"req",
")",
"try",
":",
"creds",
"=",
"naked",
"(",
"req",
".",
"auth",
".",
"split",
"(",
"' '",
")",
"[",
"1",
"]",
")",
"creds",
"=",
"b64decode",
"(",
"creds",
")",
"username",
",",
"password",
"=",
"creds",
".",
"split",
"(",
"':'",
")",
"return",
"username",
",",
"password",
"except",
"IndexError",
":",
"raise",
"InvalidAuthSyntax",
"(",
"*",
"*",
"{",
"'detail'",
":",
"'You are using the Basic Authentication scheme as '",
"'required to login but your Authorization header is '",
"'completely missing the login credentials.'",
",",
"'links'",
":",
"'tools.ietf.org/html/rfc2617#section-2'",
",",
"}",
")",
"except",
"TypeError",
":",
"raise",
"InvalidAuthSyntax",
"(",
"*",
"*",
"{",
"'detail'",
":",
"'Our API failed to base64 decode your Basic '",
"'Authentication login credentials in the '",
"'Authorization header. They seem to be malformed.'",
",",
"'links'",
":",
"'tools.ietf.org/html/rfc2617#section-2'",
",",
"}",
")",
"except",
"ValueError",
":",
"raise",
"InvalidAuthSyntax",
"(",
"*",
"*",
"{",
"'detail'",
":",
"'Our API failed to identify a username & password '",
"'in your Basic Authentication Authorization header '",
"'after decoding them. The username or password is '",
"'either missing or not separated by a \":\" per the '",
"'spec. Either way the credentials are malformed.'",
",",
"'links'",
":",
"'tools.ietf.org/html/rfc2617#section-2'",
",",
"}",
")"
] |
Get the username & password from the Authorization header
If the header is actually malformed where Basic Auth was
indicated by the request then an InvalidAuthSyntax exception
is raised. Otherwise an AuthRequired exception since it's
unclear in this scenario if the requestor was even aware
Authentication was required & if so which "scheme".
Calls _validate_auth_scheme first & bubbles up it's
exceptions.
:return:
tuple (username, password)
:raise:
AuthRequired, InvalidAuthSyntax
|
[
"Get",
"the",
"username",
"&",
"password",
"from",
"the",
"Authorization",
"header"
] |
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
|
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/middleware/basicauth/__init__.py#L81-L129
|
243,803
|
nanvel/c2p2
|
c2p2/models.py
|
Site._update_page
|
def _update_page(self, uri, path):
"""Update page content."""
if uri in self._pages:
self._pages[uri].update()
else:
self._pages[uri] = Page(uri=uri, path=path)
|
python
|
def _update_page(self, uri, path):
"""Update page content."""
if uri in self._pages:
self._pages[uri].update()
else:
self._pages[uri] = Page(uri=uri, path=path)
|
[
"def",
"_update_page",
"(",
"self",
",",
"uri",
",",
"path",
")",
":",
"if",
"uri",
"in",
"self",
".",
"_pages",
":",
"self",
".",
"_pages",
"[",
"uri",
"]",
".",
"update",
"(",
")",
"else",
":",
"self",
".",
"_pages",
"[",
"uri",
"]",
"=",
"Page",
"(",
"uri",
"=",
"uri",
",",
"path",
"=",
"path",
")"
] |
Update page content.
|
[
"Update",
"page",
"content",
"."
] |
3900a9bb54d35e1332b92d6560f3cb1e77943209
|
https://github.com/nanvel/c2p2/blob/3900a9bb54d35e1332b92d6560f3cb1e77943209/c2p2/models.py#L189-L194
|
243,804
|
nanvel/c2p2
|
c2p2/models.py
|
Site._update_labels
|
def _update_labels(self):
"""Updates list of available labels."""
labels = set()
for page in self.get_pages():
for label in page.labels:
labels.add(label)
to_delete = self._labels - labels
for label in labels:
self._labels.add(label)
for label in to_delete:
self._labels.discard(label)
|
python
|
def _update_labels(self):
"""Updates list of available labels."""
labels = set()
for page in self.get_pages():
for label in page.labels:
labels.add(label)
to_delete = self._labels - labels
for label in labels:
self._labels.add(label)
for label in to_delete:
self._labels.discard(label)
|
[
"def",
"_update_labels",
"(",
"self",
")",
":",
"labels",
"=",
"set",
"(",
")",
"for",
"page",
"in",
"self",
".",
"get_pages",
"(",
")",
":",
"for",
"label",
"in",
"page",
".",
"labels",
":",
"labels",
".",
"add",
"(",
"label",
")",
"to_delete",
"=",
"self",
".",
"_labels",
"-",
"labels",
"for",
"label",
"in",
"labels",
":",
"self",
".",
"_labels",
".",
"add",
"(",
"label",
")",
"for",
"label",
"in",
"to_delete",
":",
"self",
".",
"_labels",
".",
"discard",
"(",
"label",
")"
] |
Updates list of available labels.
|
[
"Updates",
"list",
"of",
"available",
"labels",
"."
] |
3900a9bb54d35e1332b92d6560f3cb1e77943209
|
https://github.com/nanvel/c2p2/blob/3900a9bb54d35e1332b92d6560f3cb1e77943209/c2p2/models.py#L201-L211
|
243,805
|
nanvel/c2p2
|
c2p2/models.py
|
Site.get_pages
|
def get_pages(self, label=None):
"""Returns list of pages with specified label."""
return (
page for page in sorted(
self._pages.values(), key=lambda i: i.created, reverse=True
) if ((not label or label in page.labels) and page.visible)
)
|
python
|
def get_pages(self, label=None):
"""Returns list of pages with specified label."""
return (
page for page in sorted(
self._pages.values(), key=lambda i: i.created, reverse=True
) if ((not label or label in page.labels) and page.visible)
)
|
[
"def",
"get_pages",
"(",
"self",
",",
"label",
"=",
"None",
")",
":",
"return",
"(",
"page",
"for",
"page",
"in",
"sorted",
"(",
"self",
".",
"_pages",
".",
"values",
"(",
")",
",",
"key",
"=",
"lambda",
"i",
":",
"i",
".",
"created",
",",
"reverse",
"=",
"True",
")",
"if",
"(",
"(",
"not",
"label",
"or",
"label",
"in",
"page",
".",
"labels",
")",
"and",
"page",
".",
"visible",
")",
")"
] |
Returns list of pages with specified label.
|
[
"Returns",
"list",
"of",
"pages",
"with",
"specified",
"label",
"."
] |
3900a9bb54d35e1332b92d6560f3cb1e77943209
|
https://github.com/nanvel/c2p2/blob/3900a9bb54d35e1332b92d6560f3cb1e77943209/c2p2/models.py#L216-L222
|
243,806
|
collectiveacuity/labPack
|
labpack/banking/capitalone.py
|
depositsClient._requests
|
def _requests(self, url, method='GET', headers=None, params=None, data=None, errors=None):
''' a helper method for relaying requests from client to api '''
title = '%s._requests' % self.__class__.__name__
# import dependencies
from time import time
import requests
# validate access token
if not self._access_token:
self.access_token()
if self.retrieve_details:
self._get_products()
# refresh token
current_time = time()
if current_time > self.expires_at:
self.access_token()
if self.retrieve_details:
self._get_products()
# construct request kwargs
request_kwargs = {
'url': url,
'headers': {
'Authorization': 'Bearer %s' % self._access_token,
'Accept': 'application/json;v=2'
},
'params': {},
'data': {}
}
if headers:
request_kwargs['headers'].update(headers)
if params:
request_kwargs['params'].update(params)
if data:
request_kwargs['data'].update(data)
# send request
if method == 'POST':
try:
response = requests.post(**request_kwargs)
except Exception:
if self.requests_handler:
request_kwargs['method'] = 'POST'
request_object = requests.Request(**request_kwargs)
return self.requests_handler(request_object)
else:
raise
elif method == 'GET':
try:
response = requests.get(**request_kwargs)
except Exception:
if self.requests_handler:
request_kwargs['method'] = 'GET'
request_object = requests.Request(**request_kwargs)
return self.requests_handler(request_object)
else:
raise
else:
raise ValueError('%s(method='') must be either GET or POST' % title)
# handle response
response_details = self.response_handler.handle(response, errors)
return response_details
|
python
|
def _requests(self, url, method='GET', headers=None, params=None, data=None, errors=None):
''' a helper method for relaying requests from client to api '''
title = '%s._requests' % self.__class__.__name__
# import dependencies
from time import time
import requests
# validate access token
if not self._access_token:
self.access_token()
if self.retrieve_details:
self._get_products()
# refresh token
current_time = time()
if current_time > self.expires_at:
self.access_token()
if self.retrieve_details:
self._get_products()
# construct request kwargs
request_kwargs = {
'url': url,
'headers': {
'Authorization': 'Bearer %s' % self._access_token,
'Accept': 'application/json;v=2'
},
'params': {},
'data': {}
}
if headers:
request_kwargs['headers'].update(headers)
if params:
request_kwargs['params'].update(params)
if data:
request_kwargs['data'].update(data)
# send request
if method == 'POST':
try:
response = requests.post(**request_kwargs)
except Exception:
if self.requests_handler:
request_kwargs['method'] = 'POST'
request_object = requests.Request(**request_kwargs)
return self.requests_handler(request_object)
else:
raise
elif method == 'GET':
try:
response = requests.get(**request_kwargs)
except Exception:
if self.requests_handler:
request_kwargs['method'] = 'GET'
request_object = requests.Request(**request_kwargs)
return self.requests_handler(request_object)
else:
raise
else:
raise ValueError('%s(method='') must be either GET or POST' % title)
# handle response
response_details = self.response_handler.handle(response, errors)
return response_details
|
[
"def",
"_requests",
"(",
"self",
",",
"url",
",",
"method",
"=",
"'GET'",
",",
"headers",
"=",
"None",
",",
"params",
"=",
"None",
",",
"data",
"=",
"None",
",",
"errors",
"=",
"None",
")",
":",
"title",
"=",
"'%s._requests'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# import dependencies",
"from",
"time",
"import",
"time",
"import",
"requests",
"# validate access token",
"if",
"not",
"self",
".",
"_access_token",
":",
"self",
".",
"access_token",
"(",
")",
"if",
"self",
".",
"retrieve_details",
":",
"self",
".",
"_get_products",
"(",
")",
"# refresh token",
"current_time",
"=",
"time",
"(",
")",
"if",
"current_time",
">",
"self",
".",
"expires_at",
":",
"self",
".",
"access_token",
"(",
")",
"if",
"self",
".",
"retrieve_details",
":",
"self",
".",
"_get_products",
"(",
")",
"# construct request kwargs",
"request_kwargs",
"=",
"{",
"'url'",
":",
"url",
",",
"'headers'",
":",
"{",
"'Authorization'",
":",
"'Bearer %s'",
"%",
"self",
".",
"_access_token",
",",
"'Accept'",
":",
"'application/json;v=2'",
"}",
",",
"'params'",
":",
"{",
"}",
",",
"'data'",
":",
"{",
"}",
"}",
"if",
"headers",
":",
"request_kwargs",
"[",
"'headers'",
"]",
".",
"update",
"(",
"headers",
")",
"if",
"params",
":",
"request_kwargs",
"[",
"'params'",
"]",
".",
"update",
"(",
"params",
")",
"if",
"data",
":",
"request_kwargs",
"[",
"'data'",
"]",
".",
"update",
"(",
"data",
")",
"# send request",
"if",
"method",
"==",
"'POST'",
":",
"try",
":",
"response",
"=",
"requests",
".",
"post",
"(",
"*",
"*",
"request_kwargs",
")",
"except",
"Exception",
":",
"if",
"self",
".",
"requests_handler",
":",
"request_kwargs",
"[",
"'method'",
"]",
"=",
"'POST'",
"request_object",
"=",
"requests",
".",
"Request",
"(",
"*",
"*",
"request_kwargs",
")",
"return",
"self",
".",
"requests_handler",
"(",
"request_object",
")",
"else",
":",
"raise",
"elif",
"method",
"==",
"'GET'",
":",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"*",
"*",
"request_kwargs",
")",
"except",
"Exception",
":",
"if",
"self",
".",
"requests_handler",
":",
"request_kwargs",
"[",
"'method'",
"]",
"=",
"'GET'",
"request_object",
"=",
"requests",
".",
"Request",
"(",
"*",
"*",
"request_kwargs",
")",
"return",
"self",
".",
"requests_handler",
"(",
"request_object",
")",
"else",
":",
"raise",
"else",
":",
"raise",
"ValueError",
"(",
"'%s(method='",
"') must be either GET or POST'",
"%",
"title",
")",
"# handle response",
"response_details",
"=",
"self",
".",
"response_handler",
".",
"handle",
"(",
"response",
",",
"errors",
")",
"return",
"response_details"
] |
a helper method for relaying requests from client to api
|
[
"a",
"helper",
"method",
"for",
"relaying",
"requests",
"from",
"client",
"to",
"api"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/banking/capitalone.py#L230-L297
|
243,807
|
collectiveacuity/labPack
|
labpack/banking/capitalone.py
|
depositsClient._get_products
|
def _get_products(self):
''' a method to retrieve account product details at initialization '''
# request product list
products_request = self.account_products()
if products_request['error']:
raise Exception(products_request['error'])
# construct list of product ids
product_ids = []
for product in products_request["json"]["entries"]:
product_ids.append(product['productId'])
# construct default product map
self.products = {}
# request product details
for id in product_ids:
product_request = self.account_product(id)
if product_request['error']:
raise Exception(product_request['error'])
self.products[id] = product_request['json']
return self.products
|
python
|
def _get_products(self):
''' a method to retrieve account product details at initialization '''
# request product list
products_request = self.account_products()
if products_request['error']:
raise Exception(products_request['error'])
# construct list of product ids
product_ids = []
for product in products_request["json"]["entries"]:
product_ids.append(product['productId'])
# construct default product map
self.products = {}
# request product details
for id in product_ids:
product_request = self.account_product(id)
if product_request['error']:
raise Exception(product_request['error'])
self.products[id] = product_request['json']
return self.products
|
[
"def",
"_get_products",
"(",
"self",
")",
":",
"# request product list",
"products_request",
"=",
"self",
".",
"account_products",
"(",
")",
"if",
"products_request",
"[",
"'error'",
"]",
":",
"raise",
"Exception",
"(",
"products_request",
"[",
"'error'",
"]",
")",
"# construct list of product ids",
"product_ids",
"=",
"[",
"]",
"for",
"product",
"in",
"products_request",
"[",
"\"json\"",
"]",
"[",
"\"entries\"",
"]",
":",
"product_ids",
".",
"append",
"(",
"product",
"[",
"'productId'",
"]",
")",
"# construct default product map",
"self",
".",
"products",
"=",
"{",
"}",
"# request product details",
"for",
"id",
"in",
"product_ids",
":",
"product_request",
"=",
"self",
".",
"account_product",
"(",
"id",
")",
"if",
"product_request",
"[",
"'error'",
"]",
":",
"raise",
"Exception",
"(",
"product_request",
"[",
"'error'",
"]",
")",
"self",
".",
"products",
"[",
"id",
"]",
"=",
"product_request",
"[",
"'json'",
"]",
"return",
"self",
".",
"products"
] |
a method to retrieve account product details at initialization
|
[
"a",
"method",
"to",
"retrieve",
"account",
"product",
"details",
"at",
"initialization"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/banking/capitalone.py#L299-L323
|
243,808
|
collectiveacuity/labPack
|
labpack/banking/capitalone.py
|
depositsClient.access_token
|
def access_token(self):
''' a method to acquire an oauth access token '''
title = '%s.access_token' % self.__class__.__name__
# import dependencies
from time import time
import requests
# construct request kwargs
request_kwargs = {
'url': self.token_endpoint,
'data': {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'client_credentials'
}
}
# send request
try:
current_time = time()
response = requests.post(**request_kwargs)
except Exception:
if self.requests_handler:
request_kwargs['method'] = 'POST'
request_object = requests.Request(**request_kwargs)
return self.requests_handler(request_object)
else:
raise
response_details = self.response_handler.handle(response)
if response_details['json']:
self._access_token = response_details['json']['access_token']
expires_in = response_details['json']['expires_in']
self.expires_at = current_time + expires_in
return self._access_token
|
python
|
def access_token(self):
''' a method to acquire an oauth access token '''
title = '%s.access_token' % self.__class__.__name__
# import dependencies
from time import time
import requests
# construct request kwargs
request_kwargs = {
'url': self.token_endpoint,
'data': {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'client_credentials'
}
}
# send request
try:
current_time = time()
response = requests.post(**request_kwargs)
except Exception:
if self.requests_handler:
request_kwargs['method'] = 'POST'
request_object = requests.Request(**request_kwargs)
return self.requests_handler(request_object)
else:
raise
response_details = self.response_handler.handle(response)
if response_details['json']:
self._access_token = response_details['json']['access_token']
expires_in = response_details['json']['expires_in']
self.expires_at = current_time + expires_in
return self._access_token
|
[
"def",
"access_token",
"(",
"self",
")",
":",
"title",
"=",
"'%s.access_token'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# import dependencies",
"from",
"time",
"import",
"time",
"import",
"requests",
"# construct request kwargs",
"request_kwargs",
"=",
"{",
"'url'",
":",
"self",
".",
"token_endpoint",
",",
"'data'",
":",
"{",
"'client_id'",
":",
"self",
".",
"client_id",
",",
"'client_secret'",
":",
"self",
".",
"client_secret",
",",
"'grant_type'",
":",
"'client_credentials'",
"}",
"}",
"# send request",
"try",
":",
"current_time",
"=",
"time",
"(",
")",
"response",
"=",
"requests",
".",
"post",
"(",
"*",
"*",
"request_kwargs",
")",
"except",
"Exception",
":",
"if",
"self",
".",
"requests_handler",
":",
"request_kwargs",
"[",
"'method'",
"]",
"=",
"'POST'",
"request_object",
"=",
"requests",
".",
"Request",
"(",
"*",
"*",
"request_kwargs",
")",
"return",
"self",
".",
"requests_handler",
"(",
"request_object",
")",
"else",
":",
"raise",
"response_details",
"=",
"self",
".",
"response_handler",
".",
"handle",
"(",
"response",
")",
"if",
"response_details",
"[",
"'json'",
"]",
":",
"self",
".",
"_access_token",
"=",
"response_details",
"[",
"'json'",
"]",
"[",
"'access_token'",
"]",
"expires_in",
"=",
"response_details",
"[",
"'json'",
"]",
"[",
"'expires_in'",
"]",
"self",
".",
"expires_at",
"=",
"current_time",
"+",
"expires_in",
"return",
"self",
".",
"_access_token"
] |
a method to acquire an oauth access token
|
[
"a",
"method",
"to",
"acquire",
"an",
"oauth",
"access",
"token"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/banking/capitalone.py#L325-L364
|
243,809
|
collectiveacuity/labPack
|
labpack/banking/capitalone.py
|
depositsClient.account_products
|
def account_products(self):
''' a method to retrieve a list of the account products
returns:
{
"error": "",
"code": 200,
"method": "GET",
"url": "https://...",
"headers": { },
"json": {
"entries": [
{
"productId": "3000",
"productName": "Capital One 360 Savings Account"
}
]
}
}
'''
title = '%s.account_products' % self.__class__.__name__
# construct url
url = self.deposits_endpoint + 'account-products'
# send request
details = self._requests(url)
return details
|
python
|
def account_products(self):
''' a method to retrieve a list of the account products
returns:
{
"error": "",
"code": 200,
"method": "GET",
"url": "https://...",
"headers": { },
"json": {
"entries": [
{
"productId": "3000",
"productName": "Capital One 360 Savings Account"
}
]
}
}
'''
title = '%s.account_products' % self.__class__.__name__
# construct url
url = self.deposits_endpoint + 'account-products'
# send request
details = self._requests(url)
return details
|
[
"def",
"account_products",
"(",
"self",
")",
":",
"title",
"=",
"'%s.account_products'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# construct url",
"url",
"=",
"self",
".",
"deposits_endpoint",
"+",
"'account-products'",
"# send request",
"details",
"=",
"self",
".",
"_requests",
"(",
"url",
")",
"return",
"details"
] |
a method to retrieve a list of the account products
returns:
{
"error": "",
"code": 200,
"method": "GET",
"url": "https://...",
"headers": { },
"json": {
"entries": [
{
"productId": "3000",
"productName": "Capital One 360 Savings Account"
}
]
}
}
|
[
"a",
"method",
"to",
"retrieve",
"a",
"list",
"of",
"the",
"account",
"products"
] |
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
|
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/banking/capitalone.py#L366-L396
|
243,810
|
uw-it-aca/uw-restclients-mailman
|
uw_mailman/instructor_term_list.py
|
get_instructor_term_list_name
|
def get_instructor_term_list_name(instructor_netid, year, quarter):
"""
Return the list address of UW instructor email list for
the given year and quarter
"""
return "{uwnetid}_{quarter}{year}".format(
uwnetid=instructor_netid,
quarter=quarter.lower()[:2],
year=str(year)[-2:])
|
python
|
def get_instructor_term_list_name(instructor_netid, year, quarter):
"""
Return the list address of UW instructor email list for
the given year and quarter
"""
return "{uwnetid}_{quarter}{year}".format(
uwnetid=instructor_netid,
quarter=quarter.lower()[:2],
year=str(year)[-2:])
|
[
"def",
"get_instructor_term_list_name",
"(",
"instructor_netid",
",",
"year",
",",
"quarter",
")",
":",
"return",
"\"{uwnetid}_{quarter}{year}\"",
".",
"format",
"(",
"uwnetid",
"=",
"instructor_netid",
",",
"quarter",
"=",
"quarter",
".",
"lower",
"(",
")",
"[",
":",
"2",
"]",
",",
"year",
"=",
"str",
"(",
"year",
")",
"[",
"-",
"2",
":",
"]",
")"
] |
Return the list address of UW instructor email list for
the given year and quarter
|
[
"Return",
"the",
"list",
"address",
"of",
"UW",
"instructor",
"email",
"list",
"for",
"the",
"given",
"year",
"and",
"quarter"
] |
ef077f2cc945871422fcd66391e82264e2384b2c
|
https://github.com/uw-it-aca/uw-restclients-mailman/blob/ef077f2cc945871422fcd66391e82264e2384b2c/uw_mailman/instructor_term_list.py#L8-L16
|
243,811
|
wooyek/django-pascal-templates
|
src/pascal_templates/views.py
|
TemplatePathMixin.get_template_path
|
def get_template_path(self, meta=None, **kwargs):
"""
Formats template_name_path_pattern with kwargs given.
"""
if 'template_name_suffix' not in kwargs or kwargs.get('template_name_suffix') is None:
kwargs['template_name_suffix'] = self.get_template_name_suffix()
return self.template_name_path_pattern.format(**kwargs)
|
python
|
def get_template_path(self, meta=None, **kwargs):
"""
Formats template_name_path_pattern with kwargs given.
"""
if 'template_name_suffix' not in kwargs or kwargs.get('template_name_suffix') is None:
kwargs['template_name_suffix'] = self.get_template_name_suffix()
return self.template_name_path_pattern.format(**kwargs)
|
[
"def",
"get_template_path",
"(",
"self",
",",
"meta",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'template_name_suffix'",
"not",
"in",
"kwargs",
"or",
"kwargs",
".",
"get",
"(",
"'template_name_suffix'",
")",
"is",
"None",
":",
"kwargs",
"[",
"'template_name_suffix'",
"]",
"=",
"self",
".",
"get_template_name_suffix",
"(",
")",
"return",
"self",
".",
"template_name_path_pattern",
".",
"format",
"(",
"*",
"*",
"kwargs",
")"
] |
Formats template_name_path_pattern with kwargs given.
|
[
"Formats",
"template_name_path_pattern",
"with",
"kwargs",
"given",
"."
] |
4c2216de7f7e53c7db6c2ab1f75f0b9065ce580e
|
https://github.com/wooyek/django-pascal-templates/blob/4c2216de7f7e53c7db6c2ab1f75f0b9065ce580e/src/pascal_templates/views.py#L14-L20
|
243,812
|
sliem/barrett
|
barrett/util.py
|
threenum
|
def threenum(h5file, var, post_col='mult'):
""" Calculates the three number summary for a variable.
The three number summary is the minimum, maximum and the mean
of the data. Traditionally one would summerise data with the
five number summary: max, min, 1st, 2nd (median), 3rd quartile.
But quantiles are hard to calculate without sorting the data
which hard to do out-of-core.
"""
f = h5py.File(h5file, 'r')
d = f[var]
w = f[post_col]
s = d.chunks[0]
n = d.shape[0]
maxval = -np.abs(d[0])
minval = np.abs(d[0])
total = 0
wsum = 0
for x in range(0, n, s):
aN = ~np.logical_or(np.isnan(d[x:x+s]), np.isinf(d[x:x+s]))
d_c = d[x:x+s][aN]
w_c = w[x:x+s][aN]
chunk_max = np.max(d_c)
chunk_min = np.min(d_c)
maxval = chunk_max if chunk_max > maxval else maxval
minval = chunk_min if chunk_min < minval else minval
total += np.sum(w_c*d_c)
wsum += np.sum(w_c)
f.close()
mean = total/float(wsum)
return (minval, maxval, mean)
|
python
|
def threenum(h5file, var, post_col='mult'):
""" Calculates the three number summary for a variable.
The three number summary is the minimum, maximum and the mean
of the data. Traditionally one would summerise data with the
five number summary: max, min, 1st, 2nd (median), 3rd quartile.
But quantiles are hard to calculate without sorting the data
which hard to do out-of-core.
"""
f = h5py.File(h5file, 'r')
d = f[var]
w = f[post_col]
s = d.chunks[0]
n = d.shape[0]
maxval = -np.abs(d[0])
minval = np.abs(d[0])
total = 0
wsum = 0
for x in range(0, n, s):
aN = ~np.logical_or(np.isnan(d[x:x+s]), np.isinf(d[x:x+s]))
d_c = d[x:x+s][aN]
w_c = w[x:x+s][aN]
chunk_max = np.max(d_c)
chunk_min = np.min(d_c)
maxval = chunk_max if chunk_max > maxval else maxval
minval = chunk_min if chunk_min < minval else minval
total += np.sum(w_c*d_c)
wsum += np.sum(w_c)
f.close()
mean = total/float(wsum)
return (minval, maxval, mean)
|
[
"def",
"threenum",
"(",
"h5file",
",",
"var",
",",
"post_col",
"=",
"'mult'",
")",
":",
"f",
"=",
"h5py",
".",
"File",
"(",
"h5file",
",",
"'r'",
")",
"d",
"=",
"f",
"[",
"var",
"]",
"w",
"=",
"f",
"[",
"post_col",
"]",
"s",
"=",
"d",
".",
"chunks",
"[",
"0",
"]",
"n",
"=",
"d",
".",
"shape",
"[",
"0",
"]",
"maxval",
"=",
"-",
"np",
".",
"abs",
"(",
"d",
"[",
"0",
"]",
")",
"minval",
"=",
"np",
".",
"abs",
"(",
"d",
"[",
"0",
"]",
")",
"total",
"=",
"0",
"wsum",
"=",
"0",
"for",
"x",
"in",
"range",
"(",
"0",
",",
"n",
",",
"s",
")",
":",
"aN",
"=",
"~",
"np",
".",
"logical_or",
"(",
"np",
".",
"isnan",
"(",
"d",
"[",
"x",
":",
"x",
"+",
"s",
"]",
")",
",",
"np",
".",
"isinf",
"(",
"d",
"[",
"x",
":",
"x",
"+",
"s",
"]",
")",
")",
"d_c",
"=",
"d",
"[",
"x",
":",
"x",
"+",
"s",
"]",
"[",
"aN",
"]",
"w_c",
"=",
"w",
"[",
"x",
":",
"x",
"+",
"s",
"]",
"[",
"aN",
"]",
"chunk_max",
"=",
"np",
".",
"max",
"(",
"d_c",
")",
"chunk_min",
"=",
"np",
".",
"min",
"(",
"d_c",
")",
"maxval",
"=",
"chunk_max",
"if",
"chunk_max",
">",
"maxval",
"else",
"maxval",
"minval",
"=",
"chunk_min",
"if",
"chunk_min",
"<",
"minval",
"else",
"minval",
"total",
"+=",
"np",
".",
"sum",
"(",
"w_c",
"*",
"d_c",
")",
"wsum",
"+=",
"np",
".",
"sum",
"(",
"w_c",
")",
"f",
".",
"close",
"(",
")",
"mean",
"=",
"total",
"/",
"float",
"(",
"wsum",
")",
"return",
"(",
"minval",
",",
"maxval",
",",
"mean",
")"
] |
Calculates the three number summary for a variable.
The three number summary is the minimum, maximum and the mean
of the data. Traditionally one would summerise data with the
five number summary: max, min, 1st, 2nd (median), 3rd quartile.
But quantiles are hard to calculate without sorting the data
which hard to do out-of-core.
|
[
"Calculates",
"the",
"three",
"number",
"summary",
"for",
"a",
"variable",
"."
] |
d48e96591577d1fcecd50c21a9be71573218cde7
|
https://github.com/sliem/barrett/blob/d48e96591577d1fcecd50c21a9be71573218cde7/barrett/util.py#L6-L45
|
243,813
|
sliem/barrett
|
barrett/util.py
|
filechunk
|
def filechunk(f, chunksize):
"""Iterator that allow for piecemeal processing of a file."""
while True:
chunk = tuple(itertools.islice(f, chunksize))
if not chunk:
return
yield np.loadtxt(iter(chunk), dtype=np.float64)
|
python
|
def filechunk(f, chunksize):
"""Iterator that allow for piecemeal processing of a file."""
while True:
chunk = tuple(itertools.islice(f, chunksize))
if not chunk:
return
yield np.loadtxt(iter(chunk), dtype=np.float64)
|
[
"def",
"filechunk",
"(",
"f",
",",
"chunksize",
")",
":",
"while",
"True",
":",
"chunk",
"=",
"tuple",
"(",
"itertools",
".",
"islice",
"(",
"f",
",",
"chunksize",
")",
")",
"if",
"not",
"chunk",
":",
"return",
"yield",
"np",
".",
"loadtxt",
"(",
"iter",
"(",
"chunk",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")"
] |
Iterator that allow for piecemeal processing of a file.
|
[
"Iterator",
"that",
"allow",
"for",
"piecemeal",
"processing",
"of",
"a",
"file",
"."
] |
d48e96591577d1fcecd50c21a9be71573218cde7
|
https://github.com/sliem/barrett/blob/d48e96591577d1fcecd50c21a9be71573218cde7/barrett/util.py#L48-L54
|
243,814
|
sliem/barrett
|
barrett/util.py
|
convert_chain
|
def convert_chain(
txtfiles,
headers,
h5file,
chunksize):
"""Converts chain in plain text format into HDF5 format.
Keyword arguments:
txtfiles -- list of paths to the plain text chains.
headers -- name of each column.
h5file -- where to put the resulting HDF5 file.
chunksize -- how large the HDF5 chunk, i.e. number of rows.
Chunking - How to pick a chunksize
TODO Optimal chunk size unknown, our usage make caching irrelevant, and
we use all read variable. Larger size should make compression more efficient,
and less require less IO reads. Measurements needed.
"""
h5 = h5py.File(h5file, 'w')
for h in headers:
h5.create_dataset(h,
shape=(0,),
maxshape=(None,),
dtype=np.float64,
chunks=(chunksize,),
compression='gzip',
shuffle=True)
for txtfile in txtfiles:
d = np.loadtxt(txtfile, dtype=np.float64)
if len(d.shape) == 1:
d = np.array([d])
dnrows = d.shape[0]
for pos, h in enumerate(headers):
x = h5[h]
xnrows = x.shape[0]
x.resize(dnrows+xnrows, axis=0)
x[xnrows:] = d[:,pos]
h5.close()
|
python
|
def convert_chain(
txtfiles,
headers,
h5file,
chunksize):
"""Converts chain in plain text format into HDF5 format.
Keyword arguments:
txtfiles -- list of paths to the plain text chains.
headers -- name of each column.
h5file -- where to put the resulting HDF5 file.
chunksize -- how large the HDF5 chunk, i.e. number of rows.
Chunking - How to pick a chunksize
TODO Optimal chunk size unknown, our usage make caching irrelevant, and
we use all read variable. Larger size should make compression more efficient,
and less require less IO reads. Measurements needed.
"""
h5 = h5py.File(h5file, 'w')
for h in headers:
h5.create_dataset(h,
shape=(0,),
maxshape=(None,),
dtype=np.float64,
chunks=(chunksize,),
compression='gzip',
shuffle=True)
for txtfile in txtfiles:
d = np.loadtxt(txtfile, dtype=np.float64)
if len(d.shape) == 1:
d = np.array([d])
dnrows = d.shape[0]
for pos, h in enumerate(headers):
x = h5[h]
xnrows = x.shape[0]
x.resize(dnrows+xnrows, axis=0)
x[xnrows:] = d[:,pos]
h5.close()
|
[
"def",
"convert_chain",
"(",
"txtfiles",
",",
"headers",
",",
"h5file",
",",
"chunksize",
")",
":",
"h5",
"=",
"h5py",
".",
"File",
"(",
"h5file",
",",
"'w'",
")",
"for",
"h",
"in",
"headers",
":",
"h5",
".",
"create_dataset",
"(",
"h",
",",
"shape",
"=",
"(",
"0",
",",
")",
",",
"maxshape",
"=",
"(",
"None",
",",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
",",
"chunks",
"=",
"(",
"chunksize",
",",
")",
",",
"compression",
"=",
"'gzip'",
",",
"shuffle",
"=",
"True",
")",
"for",
"txtfile",
"in",
"txtfiles",
":",
"d",
"=",
"np",
".",
"loadtxt",
"(",
"txtfile",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"if",
"len",
"(",
"d",
".",
"shape",
")",
"==",
"1",
":",
"d",
"=",
"np",
".",
"array",
"(",
"[",
"d",
"]",
")",
"dnrows",
"=",
"d",
".",
"shape",
"[",
"0",
"]",
"for",
"pos",
",",
"h",
"in",
"enumerate",
"(",
"headers",
")",
":",
"x",
"=",
"h5",
"[",
"h",
"]",
"xnrows",
"=",
"x",
".",
"shape",
"[",
"0",
"]",
"x",
".",
"resize",
"(",
"dnrows",
"+",
"xnrows",
",",
"axis",
"=",
"0",
")",
"x",
"[",
"xnrows",
":",
"]",
"=",
"d",
"[",
":",
",",
"pos",
"]",
"h5",
".",
"close",
"(",
")"
] |
Converts chain in plain text format into HDF5 format.
Keyword arguments:
txtfiles -- list of paths to the plain text chains.
headers -- name of each column.
h5file -- where to put the resulting HDF5 file.
chunksize -- how large the HDF5 chunk, i.e. number of rows.
Chunking - How to pick a chunksize
TODO Optimal chunk size unknown, our usage make caching irrelevant, and
we use all read variable. Larger size should make compression more efficient,
and less require less IO reads. Measurements needed.
|
[
"Converts",
"chain",
"in",
"plain",
"text",
"format",
"into",
"HDF5",
"format",
"."
] |
d48e96591577d1fcecd50c21a9be71573218cde7
|
https://github.com/sliem/barrett/blob/d48e96591577d1fcecd50c21a9be71573218cde7/barrett/util.py#L57-L102
|
243,815
|
radjkarl/fancyTools
|
fancytools/pystructure/getMembers.py
|
getAvailableClassesInModule
|
def getAvailableClassesInModule(prooveModule):
"""
return a list of all classes in the given module
that dont begin with '_'
"""
l = tuple(x[1] for x in inspect.getmembers(prooveModule, inspect.isclass))
l = [x for x in l if x.__name__[0] != "_"]
return l
|
python
|
def getAvailableClassesInModule(prooveModule):
"""
return a list of all classes in the given module
that dont begin with '_'
"""
l = tuple(x[1] for x in inspect.getmembers(prooveModule, inspect.isclass))
l = [x for x in l if x.__name__[0] != "_"]
return l
|
[
"def",
"getAvailableClassesInModule",
"(",
"prooveModule",
")",
":",
"l",
"=",
"tuple",
"(",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"inspect",
".",
"getmembers",
"(",
"prooveModule",
",",
"inspect",
".",
"isclass",
")",
")",
"l",
"=",
"[",
"x",
"for",
"x",
"in",
"l",
"if",
"x",
".",
"__name__",
"[",
"0",
"]",
"!=",
"\"_\"",
"]",
"return",
"l"
] |
return a list of all classes in the given module
that dont begin with '_'
|
[
"return",
"a",
"list",
"of",
"all",
"classes",
"in",
"the",
"given",
"module",
"that",
"dont",
"begin",
"with",
"_"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/pystructure/getMembers.py#L11-L18
|
243,816
|
radjkarl/fancyTools
|
fancytools/pystructure/getMembers.py
|
getAvailableClassesInPackage
|
def getAvailableClassesInPackage(package):
"""
return a list of all classes in the given package
whose modules dont begin with '_'
"""
l = list(x[1] for x in inspect.getmembers(package, inspect.isclass))
modules = list(x[1] for x in inspect.getmembers(package, inspect.ismodule))
for m in modules:
l.extend(list(x[1] for x in inspect.getmembers(m, inspect.isclass)))
l = [x for x in l if x.__name__[0] != "_"]
n = 0
while n < len(l):
cls = l[n]
if not cls.__module__.startswith(package.__name__):
l.pop(n)
n -= 1
n += 1
return l
|
python
|
def getAvailableClassesInPackage(package):
"""
return a list of all classes in the given package
whose modules dont begin with '_'
"""
l = list(x[1] for x in inspect.getmembers(package, inspect.isclass))
modules = list(x[1] for x in inspect.getmembers(package, inspect.ismodule))
for m in modules:
l.extend(list(x[1] for x in inspect.getmembers(m, inspect.isclass)))
l = [x for x in l if x.__name__[0] != "_"]
n = 0
while n < len(l):
cls = l[n]
if not cls.__module__.startswith(package.__name__):
l.pop(n)
n -= 1
n += 1
return l
|
[
"def",
"getAvailableClassesInPackage",
"(",
"package",
")",
":",
"l",
"=",
"list",
"(",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"inspect",
".",
"getmembers",
"(",
"package",
",",
"inspect",
".",
"isclass",
")",
")",
"modules",
"=",
"list",
"(",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"inspect",
".",
"getmembers",
"(",
"package",
",",
"inspect",
".",
"ismodule",
")",
")",
"for",
"m",
"in",
"modules",
":",
"l",
".",
"extend",
"(",
"list",
"(",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"inspect",
".",
"getmembers",
"(",
"m",
",",
"inspect",
".",
"isclass",
")",
")",
")",
"l",
"=",
"[",
"x",
"for",
"x",
"in",
"l",
"if",
"x",
".",
"__name__",
"[",
"0",
"]",
"!=",
"\"_\"",
"]",
"n",
"=",
"0",
"while",
"n",
"<",
"len",
"(",
"l",
")",
":",
"cls",
"=",
"l",
"[",
"n",
"]",
"if",
"not",
"cls",
".",
"__module__",
".",
"startswith",
"(",
"package",
".",
"__name__",
")",
":",
"l",
".",
"pop",
"(",
"n",
")",
"n",
"-=",
"1",
"n",
"+=",
"1",
"return",
"l"
] |
return a list of all classes in the given package
whose modules dont begin with '_'
|
[
"return",
"a",
"list",
"of",
"all",
"classes",
"in",
"the",
"given",
"package",
"whose",
"modules",
"dont",
"begin",
"with",
"_"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/pystructure/getMembers.py#L21-L39
|
243,817
|
radjkarl/fancyTools
|
fancytools/pystructure/getMembers.py
|
getClassInPackageFromName
|
def getClassInPackageFromName(className, pkg):
"""
get a class from name within a package
"""
# TODO: more efficiency!
n = getAvClassNamesInPackage(pkg)
i = n.index(className)
c = getAvailableClassesInPackage(pkg)
return c[i]
|
python
|
def getClassInPackageFromName(className, pkg):
"""
get a class from name within a package
"""
# TODO: more efficiency!
n = getAvClassNamesInPackage(pkg)
i = n.index(className)
c = getAvailableClassesInPackage(pkg)
return c[i]
|
[
"def",
"getClassInPackageFromName",
"(",
"className",
",",
"pkg",
")",
":",
"# TODO: more efficiency!",
"n",
"=",
"getAvClassNamesInPackage",
"(",
"pkg",
")",
"i",
"=",
"n",
".",
"index",
"(",
"className",
")",
"c",
"=",
"getAvailableClassesInPackage",
"(",
"pkg",
")",
"return",
"c",
"[",
"i",
"]"
] |
get a class from name within a package
|
[
"get",
"a",
"class",
"from",
"name",
"within",
"a",
"package"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/pystructure/getMembers.py#L54-L62
|
243,818
|
radjkarl/fancyTools
|
fancytools/pystructure/getMembers.py
|
getClassInModuleFromName
|
def getClassInModuleFromName(className, module):
"""
get a class from name within a module
"""
n = getAvClassNamesInModule(module)
i = n.index(className)
c = getAvailableClassesInModule(module)
return c[i]
|
python
|
def getClassInModuleFromName(className, module):
"""
get a class from name within a module
"""
n = getAvClassNamesInModule(module)
i = n.index(className)
c = getAvailableClassesInModule(module)
return c[i]
|
[
"def",
"getClassInModuleFromName",
"(",
"className",
",",
"module",
")",
":",
"n",
"=",
"getAvClassNamesInModule",
"(",
"module",
")",
"i",
"=",
"n",
".",
"index",
"(",
"className",
")",
"c",
"=",
"getAvailableClassesInModule",
"(",
"module",
")",
"return",
"c",
"[",
"i",
"]"
] |
get a class from name within a module
|
[
"get",
"a",
"class",
"from",
"name",
"within",
"a",
"module"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/pystructure/getMembers.py#L65-L72
|
243,819
|
rikrd/inspire
|
inspirespeech/htk_model_utils.py
|
create_prototype
|
def create_prototype(sample_dimension,
parameter_kind_base='user',
parameter_kind_options=[],
state_stay_probabilities=[0.6, 0.6, 0.7]):
"""Create a prototype HTK model file using a feature file.
"""
parameter_kind = create_parameter_kind(base=parameter_kind_base,
options=parameter_kind_options)
transition = create_transition(state_stay_probabilities)
state_count = len(state_stay_probabilities)
states = []
for i in range(state_count):
state = create_gmm(np.zeros(sample_dimension),
np.ones(sample_dimension),
weights=None,
gconsts=None)
states.append(state)
hmms = [create_hmm(states, transition)]
macros = [create_options(vector_size=sample_dimension,
parameter_kind=parameter_kind)]
model = create_model(macros, hmms)
return model
|
python
|
def create_prototype(sample_dimension,
parameter_kind_base='user',
parameter_kind_options=[],
state_stay_probabilities=[0.6, 0.6, 0.7]):
"""Create a prototype HTK model file using a feature file.
"""
parameter_kind = create_parameter_kind(base=parameter_kind_base,
options=parameter_kind_options)
transition = create_transition(state_stay_probabilities)
state_count = len(state_stay_probabilities)
states = []
for i in range(state_count):
state = create_gmm(np.zeros(sample_dimension),
np.ones(sample_dimension),
weights=None,
gconsts=None)
states.append(state)
hmms = [create_hmm(states, transition)]
macros = [create_options(vector_size=sample_dimension,
parameter_kind=parameter_kind)]
model = create_model(macros, hmms)
return model
|
[
"def",
"create_prototype",
"(",
"sample_dimension",
",",
"parameter_kind_base",
"=",
"'user'",
",",
"parameter_kind_options",
"=",
"[",
"]",
",",
"state_stay_probabilities",
"=",
"[",
"0.6",
",",
"0.6",
",",
"0.7",
"]",
")",
":",
"parameter_kind",
"=",
"create_parameter_kind",
"(",
"base",
"=",
"parameter_kind_base",
",",
"options",
"=",
"parameter_kind_options",
")",
"transition",
"=",
"create_transition",
"(",
"state_stay_probabilities",
")",
"state_count",
"=",
"len",
"(",
"state_stay_probabilities",
")",
"states",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"state_count",
")",
":",
"state",
"=",
"create_gmm",
"(",
"np",
".",
"zeros",
"(",
"sample_dimension",
")",
",",
"np",
".",
"ones",
"(",
"sample_dimension",
")",
",",
"weights",
"=",
"None",
",",
"gconsts",
"=",
"None",
")",
"states",
".",
"append",
"(",
"state",
")",
"hmms",
"=",
"[",
"create_hmm",
"(",
"states",
",",
"transition",
")",
"]",
"macros",
"=",
"[",
"create_options",
"(",
"vector_size",
"=",
"sample_dimension",
",",
"parameter_kind",
"=",
"parameter_kind",
")",
"]",
"model",
"=",
"create_model",
"(",
"macros",
",",
"hmms",
")",
"return",
"model"
] |
Create a prototype HTK model file using a feature file.
|
[
"Create",
"a",
"prototype",
"HTK",
"model",
"file",
"using",
"a",
"feature",
"file",
"."
] |
e281c0266a9a9633f34ab70f9c3ad58036c19b59
|
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/htk_model_utils.py#L140-L168
|
243,820
|
rikrd/inspire
|
inspirespeech/htk_model_utils.py
|
map_hmms
|
def map_hmms(input_model, mapping):
"""Create a new HTK HMM model given a model and a mapping dictionary.
:param input_model: The model to transform of type dict
:param mapping: A dictionary from string -> list(string)
:return: The transformed model of type dict
"""
output_model = copy.copy(input_model)
o_hmms = []
for i_hmm in input_model['hmms']:
i_hmm_name = i_hmm['name']
o_hmm_names = mapping.get(i_hmm_name, [i_hmm_name])
for o_hmm_name in o_hmm_names:
o_hmm = copy.copy(i_hmm)
o_hmm['name'] = o_hmm_name
o_hmms.append(o_hmm)
output_model['hmms'] = o_hmms
return output_model
|
python
|
def map_hmms(input_model, mapping):
"""Create a new HTK HMM model given a model and a mapping dictionary.
:param input_model: The model to transform of type dict
:param mapping: A dictionary from string -> list(string)
:return: The transformed model of type dict
"""
output_model = copy.copy(input_model)
o_hmms = []
for i_hmm in input_model['hmms']:
i_hmm_name = i_hmm['name']
o_hmm_names = mapping.get(i_hmm_name, [i_hmm_name])
for o_hmm_name in o_hmm_names:
o_hmm = copy.copy(i_hmm)
o_hmm['name'] = o_hmm_name
o_hmms.append(o_hmm)
output_model['hmms'] = o_hmms
return output_model
|
[
"def",
"map_hmms",
"(",
"input_model",
",",
"mapping",
")",
":",
"output_model",
"=",
"copy",
".",
"copy",
"(",
"input_model",
")",
"o_hmms",
"=",
"[",
"]",
"for",
"i_hmm",
"in",
"input_model",
"[",
"'hmms'",
"]",
":",
"i_hmm_name",
"=",
"i_hmm",
"[",
"'name'",
"]",
"o_hmm_names",
"=",
"mapping",
".",
"get",
"(",
"i_hmm_name",
",",
"[",
"i_hmm_name",
"]",
")",
"for",
"o_hmm_name",
"in",
"o_hmm_names",
":",
"o_hmm",
"=",
"copy",
".",
"copy",
"(",
"i_hmm",
")",
"o_hmm",
"[",
"'name'",
"]",
"=",
"o_hmm_name",
"o_hmms",
".",
"append",
"(",
"o_hmm",
")",
"output_model",
"[",
"'hmms'",
"]",
"=",
"o_hmms",
"return",
"output_model"
] |
Create a new HTK HMM model given a model and a mapping dictionary.
:param input_model: The model to transform of type dict
:param mapping: A dictionary from string -> list(string)
:return: The transformed model of type dict
|
[
"Create",
"a",
"new",
"HTK",
"HMM",
"model",
"given",
"a",
"model",
"and",
"a",
"mapping",
"dictionary",
"."
] |
e281c0266a9a9633f34ab70f9c3ad58036c19b59
|
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/htk_model_utils.py#L185-L208
|
243,821
|
fusionbox/django-decoratormixins
|
decoratormixins/__init__.py
|
DecoratorMixin
|
def DecoratorMixin(decorator):
"""
Converts a decorator written for a function view into a mixin for a
class-based view.
::
LoginRequiredMixin = DecoratorMixin(login_required)
class MyView(LoginRequiredMixin):
pass
class SomeView(DecoratorMixin(some_decorator),
DecoratorMixin(something_else)):
pass
"""
class Mixin(object):
__doc__ = decorator.__doc__
@classmethod
def as_view(cls, *args, **kwargs):
view = super(Mixin, cls).as_view(*args, **kwargs)
return decorator(view)
Mixin.__name__ = str('DecoratorMixin(%s)' % decorator.__name__)
return Mixin
|
python
|
def DecoratorMixin(decorator):
"""
Converts a decorator written for a function view into a mixin for a
class-based view.
::
LoginRequiredMixin = DecoratorMixin(login_required)
class MyView(LoginRequiredMixin):
pass
class SomeView(DecoratorMixin(some_decorator),
DecoratorMixin(something_else)):
pass
"""
class Mixin(object):
__doc__ = decorator.__doc__
@classmethod
def as_view(cls, *args, **kwargs):
view = super(Mixin, cls).as_view(*args, **kwargs)
return decorator(view)
Mixin.__name__ = str('DecoratorMixin(%s)' % decorator.__name__)
return Mixin
|
[
"def",
"DecoratorMixin",
"(",
"decorator",
")",
":",
"class",
"Mixin",
"(",
"object",
")",
":",
"__doc__",
"=",
"decorator",
".",
"__doc__",
"@",
"classmethod",
"def",
"as_view",
"(",
"cls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"view",
"=",
"super",
"(",
"Mixin",
",",
"cls",
")",
".",
"as_view",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"decorator",
"(",
"view",
")",
"Mixin",
".",
"__name__",
"=",
"str",
"(",
"'DecoratorMixin(%s)'",
"%",
"decorator",
".",
"__name__",
")",
"return",
"Mixin"
] |
Converts a decorator written for a function view into a mixin for a
class-based view.
::
LoginRequiredMixin = DecoratorMixin(login_required)
class MyView(LoginRequiredMixin):
pass
class SomeView(DecoratorMixin(some_decorator),
DecoratorMixin(something_else)):
pass
|
[
"Converts",
"a",
"decorator",
"written",
"for",
"a",
"function",
"view",
"into",
"a",
"mixin",
"for",
"a",
"class",
"-",
"based",
"view",
"."
] |
d1c9b193532d9904e288bb6471f2e7abc4a5b707
|
https://github.com/fusionbox/django-decoratormixins/blob/d1c9b193532d9904e288bb6471f2e7abc4a5b707/decoratormixins/__init__.py#L3-L30
|
243,822
|
mbodenhamer/syn
|
syn/base_utils/tree.py
|
seq_list_nested
|
def seq_list_nested(b, d, x=0, top_level=True):
'''
Create a nested list of iteratively increasing values.
b: branching factor
d: max depth
x: starting value (default = 0)
'''
x += 1
if d == 0:
ret = [x]
else:
val = x
ret = []
for i in range(b):
lst, x = seq_list_nested(b, d-1, x, False)
ret.extend(lst)
ret = [val, ret]
if top_level:
return ret
else:
return ret, x
|
python
|
def seq_list_nested(b, d, x=0, top_level=True):
'''
Create a nested list of iteratively increasing values.
b: branching factor
d: max depth
x: starting value (default = 0)
'''
x += 1
if d == 0:
ret = [x]
else:
val = x
ret = []
for i in range(b):
lst, x = seq_list_nested(b, d-1, x, False)
ret.extend(lst)
ret = [val, ret]
if top_level:
return ret
else:
return ret, x
|
[
"def",
"seq_list_nested",
"(",
"b",
",",
"d",
",",
"x",
"=",
"0",
",",
"top_level",
"=",
"True",
")",
":",
"x",
"+=",
"1",
"if",
"d",
"==",
"0",
":",
"ret",
"=",
"[",
"x",
"]",
"else",
":",
"val",
"=",
"x",
"ret",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"b",
")",
":",
"lst",
",",
"x",
"=",
"seq_list_nested",
"(",
"b",
",",
"d",
"-",
"1",
",",
"x",
",",
"False",
")",
"ret",
".",
"extend",
"(",
"lst",
")",
"ret",
"=",
"[",
"val",
",",
"ret",
"]",
"if",
"top_level",
":",
"return",
"ret",
"else",
":",
"return",
"ret",
",",
"x"
] |
Create a nested list of iteratively increasing values.
b: branching factor
d: max depth
x: starting value (default = 0)
|
[
"Create",
"a",
"nested",
"list",
"of",
"iteratively",
"increasing",
"values",
"."
] |
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
|
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/base_utils/tree.py#L4-L27
|
243,823
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/resolvers.py
|
Opaque
|
def Opaque(uri, tc, ps, **keywords):
'''Resolve a URI and return its content as a string.
'''
source = urllib.urlopen(uri, **keywords)
enc = source.info().getencoding()
if enc in ['7bit', '8bit', 'binary']: return source.read()
data = StringIO.StringIO()
mimetools.decode(source, data, enc)
return data.getvalue()
|
python
|
def Opaque(uri, tc, ps, **keywords):
'''Resolve a URI and return its content as a string.
'''
source = urllib.urlopen(uri, **keywords)
enc = source.info().getencoding()
if enc in ['7bit', '8bit', 'binary']: return source.read()
data = StringIO.StringIO()
mimetools.decode(source, data, enc)
return data.getvalue()
|
[
"def",
"Opaque",
"(",
"uri",
",",
"tc",
",",
"ps",
",",
"*",
"*",
"keywords",
")",
":",
"source",
"=",
"urllib",
".",
"urlopen",
"(",
"uri",
",",
"*",
"*",
"keywords",
")",
"enc",
"=",
"source",
".",
"info",
"(",
")",
".",
"getencoding",
"(",
")",
"if",
"enc",
"in",
"[",
"'7bit'",
",",
"'8bit'",
",",
"'binary'",
"]",
":",
"return",
"source",
".",
"read",
"(",
")",
"data",
"=",
"StringIO",
".",
"StringIO",
"(",
")",
"mimetools",
".",
"decode",
"(",
"source",
",",
"data",
",",
"enc",
")",
"return",
"data",
".",
"getvalue",
"(",
")"
] |
Resolve a URI and return its content as a string.
|
[
"Resolve",
"a",
"URI",
"and",
"return",
"its",
"content",
"as",
"a",
"string",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/resolvers.py#L12-L21
|
243,824
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/resolvers.py
|
XML
|
def XML(uri, tc, ps, **keywords):
'''Resolve a URI and return its content as an XML DOM.
'''
source = urllib.urlopen(uri, **keywords)
enc = source.info().getencoding()
if enc in ['7bit', '8bit', 'binary']:
data = source
else:
data = StringIO.StringIO()
mimetools.decode(source, data, enc)
data.seek(0)
dom = ps.readerclass().fromStream(data)
return _child_elements(dom)[0]
|
python
|
def XML(uri, tc, ps, **keywords):
'''Resolve a URI and return its content as an XML DOM.
'''
source = urllib.urlopen(uri, **keywords)
enc = source.info().getencoding()
if enc in ['7bit', '8bit', 'binary']:
data = source
else:
data = StringIO.StringIO()
mimetools.decode(source, data, enc)
data.seek(0)
dom = ps.readerclass().fromStream(data)
return _child_elements(dom)[0]
|
[
"def",
"XML",
"(",
"uri",
",",
"tc",
",",
"ps",
",",
"*",
"*",
"keywords",
")",
":",
"source",
"=",
"urllib",
".",
"urlopen",
"(",
"uri",
",",
"*",
"*",
"keywords",
")",
"enc",
"=",
"source",
".",
"info",
"(",
")",
".",
"getencoding",
"(",
")",
"if",
"enc",
"in",
"[",
"'7bit'",
",",
"'8bit'",
",",
"'binary'",
"]",
":",
"data",
"=",
"source",
"else",
":",
"data",
"=",
"StringIO",
".",
"StringIO",
"(",
")",
"mimetools",
".",
"decode",
"(",
"source",
",",
"data",
",",
"enc",
")",
"data",
".",
"seek",
"(",
"0",
")",
"dom",
"=",
"ps",
".",
"readerclass",
"(",
")",
".",
"fromStream",
"(",
"data",
")",
"return",
"_child_elements",
"(",
"dom",
")",
"[",
"0",
"]"
] |
Resolve a URI and return its content as an XML DOM.
|
[
"Resolve",
"a",
"URI",
"and",
"return",
"its",
"content",
"as",
"an",
"XML",
"DOM",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/resolvers.py#L24-L36
|
243,825
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/resolvers.py
|
MIMEResolver.GetSOAPPart
|
def GetSOAPPart(self):
'''Get the SOAP body part.
'''
head, part = self.parts[0]
return StringIO.StringIO(part.getvalue())
|
python
|
def GetSOAPPart(self):
'''Get the SOAP body part.
'''
head, part = self.parts[0]
return StringIO.StringIO(part.getvalue())
|
[
"def",
"GetSOAPPart",
"(",
"self",
")",
":",
"head",
",",
"part",
"=",
"self",
".",
"parts",
"[",
"0",
"]",
"return",
"StringIO",
".",
"StringIO",
"(",
"part",
".",
"getvalue",
"(",
")",
")"
] |
Get the SOAP body part.
|
[
"Get",
"the",
"SOAP",
"body",
"part",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/resolvers.py#L105-L109
|
243,826
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/resolvers.py
|
MIMEResolver.get
|
def get(self, uri):
'''Get the content for the bodypart identified by the uri.
'''
if uri.startswith('cid:'):
# Content-ID, so raise exception if not found.
head, part = self.id_dict[uri[4:]]
return StringIO.StringIO(part.getvalue())
if self.loc_dict.has_key(uri):
head, part = self.loc_dict[uri]
return StringIO.StringIO(part.getvalue())
return None
|
python
|
def get(self, uri):
'''Get the content for the bodypart identified by the uri.
'''
if uri.startswith('cid:'):
# Content-ID, so raise exception if not found.
head, part = self.id_dict[uri[4:]]
return StringIO.StringIO(part.getvalue())
if self.loc_dict.has_key(uri):
head, part = self.loc_dict[uri]
return StringIO.StringIO(part.getvalue())
return None
|
[
"def",
"get",
"(",
"self",
",",
"uri",
")",
":",
"if",
"uri",
".",
"startswith",
"(",
"'cid:'",
")",
":",
"# Content-ID, so raise exception if not found.",
"head",
",",
"part",
"=",
"self",
".",
"id_dict",
"[",
"uri",
"[",
"4",
":",
"]",
"]",
"return",
"StringIO",
".",
"StringIO",
"(",
"part",
".",
"getvalue",
"(",
")",
")",
"if",
"self",
".",
"loc_dict",
".",
"has_key",
"(",
"uri",
")",
":",
"head",
",",
"part",
"=",
"self",
".",
"loc_dict",
"[",
"uri",
"]",
"return",
"StringIO",
".",
"StringIO",
"(",
"part",
".",
"getvalue",
"(",
")",
")",
"return",
"None"
] |
Get the content for the bodypart identified by the uri.
|
[
"Get",
"the",
"content",
"for",
"the",
"bodypart",
"identified",
"by",
"the",
"uri",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/resolvers.py#L111-L121
|
243,827
|
ravenac95/lxc4u
|
lxc4u/meta.py
|
LXCMeta.load_from_file
|
def load_from_file(cls, file_path):
"""Load the meta data given a file_path or empty meta data"""
data = None
if os.path.exists(file_path):
metadata_file = open(file_path)
data = json.loads(metadata_file.read())
return cls(initial=data)
|
python
|
def load_from_file(cls, file_path):
"""Load the meta data given a file_path or empty meta data"""
data = None
if os.path.exists(file_path):
metadata_file = open(file_path)
data = json.loads(metadata_file.read())
return cls(initial=data)
|
[
"def",
"load_from_file",
"(",
"cls",
",",
"file_path",
")",
":",
"data",
"=",
"None",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"metadata_file",
"=",
"open",
"(",
"file_path",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"metadata_file",
".",
"read",
"(",
")",
")",
"return",
"cls",
"(",
"initial",
"=",
"data",
")"
] |
Load the meta data given a file_path or empty meta data
|
[
"Load",
"the",
"meta",
"data",
"given",
"a",
"file_path",
"or",
"empty",
"meta",
"data"
] |
4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32
|
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/meta.py#L10-L16
|
243,828
|
ravenac95/lxc4u
|
lxc4u/meta.py
|
LXCMeta.bind_and_save
|
def bind_and_save(self, lxc):
"""Binds metadata to an LXC and saves it"""
bound_meta = self.bind(lxc)
bound_meta.save()
return bound_meta
|
python
|
def bind_and_save(self, lxc):
"""Binds metadata to an LXC and saves it"""
bound_meta = self.bind(lxc)
bound_meta.save()
return bound_meta
|
[
"def",
"bind_and_save",
"(",
"self",
",",
"lxc",
")",
":",
"bound_meta",
"=",
"self",
".",
"bind",
"(",
"lxc",
")",
"bound_meta",
".",
"save",
"(",
")",
"return",
"bound_meta"
] |
Binds metadata to an LXC and saves it
|
[
"Binds",
"metadata",
"to",
"an",
"LXC",
"and",
"saves",
"it"
] |
4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32
|
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/meta.py#L41-L45
|
243,829
|
jeroyang/txttk
|
txttk/report.py
|
Report.update
|
def update(self, report):
"""
Add the items from the given report.
"""
self.tp.extend(pack_boxes(report.tp, self.title))
self.fp.extend(pack_boxes(report.fp, self.title))
self.fn.extend(pack_boxes(report.fn, self.title))
|
python
|
def update(self, report):
"""
Add the items from the given report.
"""
self.tp.extend(pack_boxes(report.tp, self.title))
self.fp.extend(pack_boxes(report.fp, self.title))
self.fn.extend(pack_boxes(report.fn, self.title))
|
[
"def",
"update",
"(",
"self",
",",
"report",
")",
":",
"self",
".",
"tp",
".",
"extend",
"(",
"pack_boxes",
"(",
"report",
".",
"tp",
",",
"self",
".",
"title",
")",
")",
"self",
".",
"fp",
".",
"extend",
"(",
"pack_boxes",
"(",
"report",
".",
"fp",
",",
"self",
".",
"title",
")",
")",
"self",
".",
"fn",
".",
"extend",
"(",
"pack_boxes",
"(",
"report",
".",
"fn",
",",
"self",
".",
"title",
")",
")"
] |
Add the items from the given report.
|
[
"Add",
"the",
"items",
"from",
"the",
"given",
"report",
"."
] |
8e6daf9cbb7dfbc4900870fb365add17929bd4ab
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/report.py#L117-L123
|
243,830
|
jeroyang/txttk
|
txttk/report.py
|
Report.from_scale
|
def from_scale(cls, gold_number, precision, recall, title):
"""
deprecated, for backward compactbility
try to use from_score
"""
tp_count = get_numerator(recall, gold_number)
positive_count = get_denominator(precision, tp_count)
fp_count = positive_count - tp_count
fn_count = gold_number - tp_count
scale_report = cls(['tp'] * tp_count,
['fp'] * fp_count,
['fn'] * fn_count,
title)
return scale_report
|
python
|
def from_scale(cls, gold_number, precision, recall, title):
"""
deprecated, for backward compactbility
try to use from_score
"""
tp_count = get_numerator(recall, gold_number)
positive_count = get_denominator(precision, tp_count)
fp_count = positive_count - tp_count
fn_count = gold_number - tp_count
scale_report = cls(['tp'] * tp_count,
['fp'] * fp_count,
['fn'] * fn_count,
title)
return scale_report
|
[
"def",
"from_scale",
"(",
"cls",
",",
"gold_number",
",",
"precision",
",",
"recall",
",",
"title",
")",
":",
"tp_count",
"=",
"get_numerator",
"(",
"recall",
",",
"gold_number",
")",
"positive_count",
"=",
"get_denominator",
"(",
"precision",
",",
"tp_count",
")",
"fp_count",
"=",
"positive_count",
"-",
"tp_count",
"fn_count",
"=",
"gold_number",
"-",
"tp_count",
"scale_report",
"=",
"cls",
"(",
"[",
"'tp'",
"]",
"*",
"tp_count",
",",
"[",
"'fp'",
"]",
"*",
"fp_count",
",",
"[",
"'fn'",
"]",
"*",
"fn_count",
",",
"title",
")",
"return",
"scale_report"
] |
deprecated, for backward compactbility
try to use from_score
|
[
"deprecated",
"for",
"backward",
"compactbility",
"try",
"to",
"use",
"from_score"
] |
8e6daf9cbb7dfbc4900870fb365add17929bd4ab
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/report.py#L150-L163
|
243,831
|
dossier/dossier.models
|
dossier/models/linker/worker.py
|
worker
|
def worker(work_unit):
'''Expects a WorkUnit from coordinated, obtains a config, and runs
traverse_extract_fetch
'''
if 'config' not in work_unit.spec:
raise coordinate.exceptions.ProgrammerError(
'could not run extraction without global config')
web_conf = Config()
unitconf = work_unit.spec['config']
#logger.info(unitconf)
with yakonfig.defaulted_config([coordinate, kvlayer, dblogger, web_conf],
config=unitconf):
traverse_extract_fetch(web_conf, work_unit.key)
|
python
|
def worker(work_unit):
'''Expects a WorkUnit from coordinated, obtains a config, and runs
traverse_extract_fetch
'''
if 'config' not in work_unit.spec:
raise coordinate.exceptions.ProgrammerError(
'could not run extraction without global config')
web_conf = Config()
unitconf = work_unit.spec['config']
#logger.info(unitconf)
with yakonfig.defaulted_config([coordinate, kvlayer, dblogger, web_conf],
config=unitconf):
traverse_extract_fetch(web_conf, work_unit.key)
|
[
"def",
"worker",
"(",
"work_unit",
")",
":",
"if",
"'config'",
"not",
"in",
"work_unit",
".",
"spec",
":",
"raise",
"coordinate",
".",
"exceptions",
".",
"ProgrammerError",
"(",
"'could not run extraction without global config'",
")",
"web_conf",
"=",
"Config",
"(",
")",
"unitconf",
"=",
"work_unit",
".",
"spec",
"[",
"'config'",
"]",
"#logger.info(unitconf)",
"with",
"yakonfig",
".",
"defaulted_config",
"(",
"[",
"coordinate",
",",
"kvlayer",
",",
"dblogger",
",",
"web_conf",
"]",
",",
"config",
"=",
"unitconf",
")",
":",
"traverse_extract_fetch",
"(",
"web_conf",
",",
"work_unit",
".",
"key",
")"
] |
Expects a WorkUnit from coordinated, obtains a config, and runs
traverse_extract_fetch
|
[
"Expects",
"a",
"WorkUnit",
"from",
"coordinated",
"obtains",
"a",
"config",
"and",
"runs",
"traverse_extract_fetch"
] |
c9e282f690eab72963926329efe1600709e48b13
|
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/linker/worker.py#L38-L52
|
243,832
|
dossier/dossier.models
|
dossier/models/linker/worker.py
|
name_filter
|
def name_filter(keywords, names):
'''
Returns the first keyword from the list, unless
that keyword is one of the names in names, in which case
it continues to the next keyword.
Since keywords consists of tuples, it just returns the first
element of the tuple, the keyword. It also adds double
quotes around the keywords, as is appropriate for google queries.
Input Arguments:
keywords -- a list of (keyword, strength) tuples
names -- a list of names to be skipped
'''
name_set = set(name.lower() for name in names)
for key_tuple in keywords:
if not key_tuple[0] in name_set:
return '\"' + key_tuple[0] +'\"'
## returns empty string if we run out, which we shouldn't
return ''
|
python
|
def name_filter(keywords, names):
'''
Returns the first keyword from the list, unless
that keyword is one of the names in names, in which case
it continues to the next keyword.
Since keywords consists of tuples, it just returns the first
element of the tuple, the keyword. It also adds double
quotes around the keywords, as is appropriate for google queries.
Input Arguments:
keywords -- a list of (keyword, strength) tuples
names -- a list of names to be skipped
'''
name_set = set(name.lower() for name in names)
for key_tuple in keywords:
if not key_tuple[0] in name_set:
return '\"' + key_tuple[0] +'\"'
## returns empty string if we run out, which we shouldn't
return ''
|
[
"def",
"name_filter",
"(",
"keywords",
",",
"names",
")",
":",
"name_set",
"=",
"set",
"(",
"name",
".",
"lower",
"(",
")",
"for",
"name",
"in",
"names",
")",
"for",
"key_tuple",
"in",
"keywords",
":",
"if",
"not",
"key_tuple",
"[",
"0",
"]",
"in",
"name_set",
":",
"return",
"'\\\"'",
"+",
"key_tuple",
"[",
"0",
"]",
"+",
"'\\\"'",
"## returns empty string if we run out, which we shouldn't",
"return",
"''"
] |
Returns the first keyword from the list, unless
that keyword is one of the names in names, in which case
it continues to the next keyword.
Since keywords consists of tuples, it just returns the first
element of the tuple, the keyword. It also adds double
quotes around the keywords, as is appropriate for google queries.
Input Arguments:
keywords -- a list of (keyword, strength) tuples
names -- a list of names to be skipped
|
[
"Returns",
"the",
"first",
"keyword",
"from",
"the",
"list",
"unless",
"that",
"keyword",
"is",
"one",
"of",
"the",
"names",
"in",
"names",
"in",
"which",
"case",
"it",
"continues",
"to",
"the",
"next",
"keyword",
"."
] |
c9e282f690eab72963926329efe1600709e48b13
|
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/linker/worker.py#L300-L321
|
243,833
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/__init__.py
|
_resolve_prefix
|
def _resolve_prefix(celt, prefix):
'''resolve prefix to a namespaceURI. If None or
empty str, return default namespace or None.
Parameters:
celt -- element node
prefix -- xmlns:prefix, or empty str or None
'''
namespace = None
while _is_element(celt):
if prefix:
namespaceURI = _find_xmlns_prefix(celt, prefix)
else:
namespaceURI = _find_default_namespace(celt)
if namespaceURI: break
celt = celt.parentNode
else:
if prefix:
raise EvaluateException, 'cant resolve xmlns:%s' %prefix
return namespaceURI
|
python
|
def _resolve_prefix(celt, prefix):
'''resolve prefix to a namespaceURI. If None or
empty str, return default namespace or None.
Parameters:
celt -- element node
prefix -- xmlns:prefix, or empty str or None
'''
namespace = None
while _is_element(celt):
if prefix:
namespaceURI = _find_xmlns_prefix(celt, prefix)
else:
namespaceURI = _find_default_namespace(celt)
if namespaceURI: break
celt = celt.parentNode
else:
if prefix:
raise EvaluateException, 'cant resolve xmlns:%s' %prefix
return namespaceURI
|
[
"def",
"_resolve_prefix",
"(",
"celt",
",",
"prefix",
")",
":",
"namespace",
"=",
"None",
"while",
"_is_element",
"(",
"celt",
")",
":",
"if",
"prefix",
":",
"namespaceURI",
"=",
"_find_xmlns_prefix",
"(",
"celt",
",",
"prefix",
")",
"else",
":",
"namespaceURI",
"=",
"_find_default_namespace",
"(",
"celt",
")",
"if",
"namespaceURI",
":",
"break",
"celt",
"=",
"celt",
".",
"parentNode",
"else",
":",
"if",
"prefix",
":",
"raise",
"EvaluateException",
",",
"'cant resolve xmlns:%s'",
"%",
"prefix",
"return",
"namespaceURI"
] |
resolve prefix to a namespaceURI. If None or
empty str, return default namespace or None.
Parameters:
celt -- element node
prefix -- xmlns:prefix, or empty str or None
|
[
"resolve",
"prefix",
"to",
"a",
"namespaceURI",
".",
"If",
"None",
"or",
"empty",
"str",
"return",
"default",
"namespace",
"or",
"None",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/__init__.py#L235-L254
|
243,834
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/__init__.py
|
_valid_encoding
|
def _valid_encoding(elt):
'''Does this node have a valid encoding?
'''
enc = _find_encstyle(elt)
if not enc or enc == _SOAP.ENC: return 1
for e in enc.split():
if e.startswith(_SOAP.ENC):
# XXX Is this correct? Once we find a Sec5 compatible
# XXX encoding, should we check that all the rest are from
# XXX that same base? Perhaps. But since the if test above
# XXX will surely get 99% of the cases, leave it for now.
return 1
return 0
|
python
|
def _valid_encoding(elt):
'''Does this node have a valid encoding?
'''
enc = _find_encstyle(elt)
if not enc or enc == _SOAP.ENC: return 1
for e in enc.split():
if e.startswith(_SOAP.ENC):
# XXX Is this correct? Once we find a Sec5 compatible
# XXX encoding, should we check that all the rest are from
# XXX that same base? Perhaps. But since the if test above
# XXX will surely get 99% of the cases, leave it for now.
return 1
return 0
|
[
"def",
"_valid_encoding",
"(",
"elt",
")",
":",
"enc",
"=",
"_find_encstyle",
"(",
"elt",
")",
"if",
"not",
"enc",
"or",
"enc",
"==",
"_SOAP",
".",
"ENC",
":",
"return",
"1",
"for",
"e",
"in",
"enc",
".",
"split",
"(",
")",
":",
"if",
"e",
".",
"startswith",
"(",
"_SOAP",
".",
"ENC",
")",
":",
"# XXX Is this correct? Once we find a Sec5 compatible",
"# XXX encoding, should we check that all the rest are from",
"# XXX that same base? Perhaps. But since the if test above",
"# XXX will surely get 99% of the cases, leave it for now.",
"return",
"1",
"return",
"0"
] |
Does this node have a valid encoding?
|
[
"Does",
"this",
"node",
"have",
"a",
"valid",
"encoding?"
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/__init__.py#L256-L268
|
243,835
|
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/__init__.py
|
_backtrace
|
def _backtrace(elt, dom):
'''Return a "backtrace" from the given element to the DOM root,
in XPath syntax.
'''
s = ''
while elt != dom:
name, parent = elt.nodeName, elt.parentNode
if parent is None: break
matches = [ c for c in _child_elements(parent)
if c.nodeName == name ]
if len(matches) == 1:
s = '/' + name + s
else:
i = matches.index(elt) + 1
s = ('/%s[%d]' % (name, i)) + s
elt = parent
return s
|
python
|
def _backtrace(elt, dom):
'''Return a "backtrace" from the given element to the DOM root,
in XPath syntax.
'''
s = ''
while elt != dom:
name, parent = elt.nodeName, elt.parentNode
if parent is None: break
matches = [ c for c in _child_elements(parent)
if c.nodeName == name ]
if len(matches) == 1:
s = '/' + name + s
else:
i = matches.index(elt) + 1
s = ('/%s[%d]' % (name, i)) + s
elt = parent
return s
|
[
"def",
"_backtrace",
"(",
"elt",
",",
"dom",
")",
":",
"s",
"=",
"''",
"while",
"elt",
"!=",
"dom",
":",
"name",
",",
"parent",
"=",
"elt",
".",
"nodeName",
",",
"elt",
".",
"parentNode",
"if",
"parent",
"is",
"None",
":",
"break",
"matches",
"=",
"[",
"c",
"for",
"c",
"in",
"_child_elements",
"(",
"parent",
")",
"if",
"c",
".",
"nodeName",
"==",
"name",
"]",
"if",
"len",
"(",
"matches",
")",
"==",
"1",
":",
"s",
"=",
"'/'",
"+",
"name",
"+",
"s",
"else",
":",
"i",
"=",
"matches",
".",
"index",
"(",
"elt",
")",
"+",
"1",
"s",
"=",
"(",
"'/%s[%d]'",
"%",
"(",
"name",
",",
"i",
")",
")",
"+",
"s",
"elt",
"=",
"parent",
"return",
"s"
] |
Return a "backtrace" from the given element to the DOM root,
in XPath syntax.
|
[
"Return",
"a",
"backtrace",
"from",
"the",
"given",
"element",
"to",
"the",
"DOM",
"root",
"in",
"XPath",
"syntax",
"."
] |
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/__init__.py#L270-L286
|
243,836
|
sarenji/pyrc
|
pyrc/bots.py
|
Bot.connect
|
def connect(self):
'''
Connects to the IRC server with the options defined in `config`
'''
self._connect()
try:
self._listen()
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.close()
|
python
|
def connect(self):
'''
Connects to the IRC server with the options defined in `config`
'''
self._connect()
try:
self._listen()
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.close()
|
[
"def",
"connect",
"(",
"self",
")",
":",
"self",
".",
"_connect",
"(",
")",
"try",
":",
"self",
".",
"_listen",
"(",
")",
"except",
"(",
"KeyboardInterrupt",
",",
"SystemExit",
")",
":",
"pass",
"finally",
":",
"self",
".",
"close",
"(",
")"
] |
Connects to the IRC server with the options defined in `config`
|
[
"Connects",
"to",
"the",
"IRC",
"server",
"with",
"the",
"options",
"defined",
"in",
"config"
] |
5e8377ddcda6e0ef4ba7d66cf400e243b1fb8f68
|
https://github.com/sarenji/pyrc/blob/5e8377ddcda6e0ef4ba7d66cf400e243b1fb8f68/pyrc/bots.py#L43-L54
|
243,837
|
sarenji/pyrc
|
pyrc/bots.py
|
Bot._listen
|
def _listen(self):
"""
Constantly listens to the input from the server. Since the messages come
in pieces, we wait until we receive 1 or more full lines to start parsing.
A new line is defined as ending in \r\n in the RFC, but some servers
separate by \n. This script takes care of both.
"""
while True:
self._inbuffer = self._inbuffer + self.socket.recv(1024)
# Some IRC servers disregard the RFC and split lines by \n rather than \r\n.
temp = self._inbuffer.split("\n")
self._inbuffer = temp.pop()
for line in temp:
# Strip \r from \r\n for RFC-compliant IRC servers.
line = line.rstrip('\r')
if self.config['verbose']: print line
self._run_listeners(line)
|
python
|
def _listen(self):
"""
Constantly listens to the input from the server. Since the messages come
in pieces, we wait until we receive 1 or more full lines to start parsing.
A new line is defined as ending in \r\n in the RFC, but some servers
separate by \n. This script takes care of both.
"""
while True:
self._inbuffer = self._inbuffer + self.socket.recv(1024)
# Some IRC servers disregard the RFC and split lines by \n rather than \r\n.
temp = self._inbuffer.split("\n")
self._inbuffer = temp.pop()
for line in temp:
# Strip \r from \r\n for RFC-compliant IRC servers.
line = line.rstrip('\r')
if self.config['verbose']: print line
self._run_listeners(line)
|
[
"def",
"_listen",
"(",
"self",
")",
":",
"while",
"True",
":",
"self",
".",
"_inbuffer",
"=",
"self",
".",
"_inbuffer",
"+",
"self",
".",
"socket",
".",
"recv",
"(",
"1024",
")",
"# Some IRC servers disregard the RFC and split lines by \\n rather than \\r\\n.",
"temp",
"=",
"self",
".",
"_inbuffer",
".",
"split",
"(",
"\"\\n\"",
")",
"self",
".",
"_inbuffer",
"=",
"temp",
".",
"pop",
"(",
")",
"for",
"line",
"in",
"temp",
":",
"# Strip \\r from \\r\\n for RFC-compliant IRC servers.",
"line",
"=",
"line",
".",
"rstrip",
"(",
"'\\r'",
")",
"if",
"self",
".",
"config",
"[",
"'verbose'",
"]",
":",
"print",
"line",
"self",
".",
"_run_listeners",
"(",
"line",
")"
] |
Constantly listens to the input from the server. Since the messages come
in pieces, we wait until we receive 1 or more full lines to start parsing.
A new line is defined as ending in \r\n in the RFC, but some servers
separate by \n. This script takes care of both.
|
[
"Constantly",
"listens",
"to",
"the",
"input",
"from",
"the",
"server",
".",
"Since",
"the",
"messages",
"come",
"in",
"pieces",
"we",
"wait",
"until",
"we",
"receive",
"1",
"or",
"more",
"full",
"lines",
"to",
"start",
"parsing",
"."
] |
5e8377ddcda6e0ef4ba7d66cf400e243b1fb8f68
|
https://github.com/sarenji/pyrc/blob/5e8377ddcda6e0ef4ba7d66cf400e243b1fb8f68/pyrc/bots.py#L62-L81
|
243,838
|
sarenji/pyrc
|
pyrc/bots.py
|
Bot._run_listeners
|
def _run_listeners(self, line):
"""
Each listener's associated regular expression is matched against raw IRC
input. If there is a match, the listener's associated function is called
with all the regular expression's matched subgroups.
"""
for regex, callbacks in self.listeners.iteritems():
match = regex.match(line)
if not match:
continue
for callback in callbacks:
callback(*match.groups())
|
python
|
def _run_listeners(self, line):
"""
Each listener's associated regular expression is matched against raw IRC
input. If there is a match, the listener's associated function is called
with all the regular expression's matched subgroups.
"""
for regex, callbacks in self.listeners.iteritems():
match = regex.match(line)
if not match:
continue
for callback in callbacks:
callback(*match.groups())
|
[
"def",
"_run_listeners",
"(",
"self",
",",
"line",
")",
":",
"for",
"regex",
",",
"callbacks",
"in",
"self",
".",
"listeners",
".",
"iteritems",
"(",
")",
":",
"match",
"=",
"regex",
".",
"match",
"(",
"line",
")",
"if",
"not",
"match",
":",
"continue",
"for",
"callback",
"in",
"callbacks",
":",
"callback",
"(",
"*",
"match",
".",
"groups",
"(",
")",
")"
] |
Each listener's associated regular expression is matched against raw IRC
input. If there is a match, the listener's associated function is called
with all the regular expression's matched subgroups.
|
[
"Each",
"listener",
"s",
"associated",
"regular",
"expression",
"is",
"matched",
"against",
"raw",
"IRC",
"input",
".",
"If",
"there",
"is",
"a",
"match",
"the",
"listener",
"s",
"associated",
"function",
"is",
"called",
"with",
"all",
"the",
"regular",
"expression",
"s",
"matched",
"subgroups",
"."
] |
5e8377ddcda6e0ef4ba7d66cf400e243b1fb8f68
|
https://github.com/sarenji/pyrc/blob/5e8377ddcda6e0ef4ba7d66cf400e243b1fb8f68/pyrc/bots.py#L83-L96
|
243,839
|
sarenji/pyrc
|
pyrc/bots.py
|
Bot._strip_prefix
|
def _strip_prefix(self, message):
"""
Checks if the bot was called by a user.
Returns the suffix if so.
Prefixes include the bot's nick as well as a set symbol.
"""
if not hasattr(self, "name_regex"):
"""
regex example:
^(((BotA|BotB)[,:]?\s+)|%)(.+)$
names = [BotA, BotB]
prefix = %
"""
names = self.config['names']
prefix = self.config['prefix']
name_regex_str = r'^(?:(?:(%s)[,:]?\s+)|%s)(.+)$' % (re.escape("|".join(names)), prefix)
self.name_regex = re.compile(name_regex_str, re.IGNORECASE)
search = self.name_regex.search(message)
if search:
return search.groups()[1]
return None
|
python
|
def _strip_prefix(self, message):
"""
Checks if the bot was called by a user.
Returns the suffix if so.
Prefixes include the bot's nick as well as a set symbol.
"""
if not hasattr(self, "name_regex"):
"""
regex example:
^(((BotA|BotB)[,:]?\s+)|%)(.+)$
names = [BotA, BotB]
prefix = %
"""
names = self.config['names']
prefix = self.config['prefix']
name_regex_str = r'^(?:(?:(%s)[,:]?\s+)|%s)(.+)$' % (re.escape("|".join(names)), prefix)
self.name_regex = re.compile(name_regex_str, re.IGNORECASE)
search = self.name_regex.search(message)
if search:
return search.groups()[1]
return None
|
[
"def",
"_strip_prefix",
"(",
"self",
",",
"message",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"name_regex\"",
")",
":",
"\"\"\"\n regex example:\n ^(((BotA|BotB)[,:]?\\s+)|%)(.+)$\n \n names = [BotA, BotB]\n prefix = %\n \"\"\"",
"names",
"=",
"self",
".",
"config",
"[",
"'names'",
"]",
"prefix",
"=",
"self",
".",
"config",
"[",
"'prefix'",
"]",
"name_regex_str",
"=",
"r'^(?:(?:(%s)[,:]?\\s+)|%s)(.+)$'",
"%",
"(",
"re",
".",
"escape",
"(",
"\"|\"",
".",
"join",
"(",
"names",
")",
")",
",",
"prefix",
")",
"self",
".",
"name_regex",
"=",
"re",
".",
"compile",
"(",
"name_regex_str",
",",
"re",
".",
"IGNORECASE",
")",
"search",
"=",
"self",
".",
"name_regex",
".",
"search",
"(",
"message",
")",
"if",
"search",
":",
"return",
"search",
".",
"groups",
"(",
")",
"[",
"1",
"]",
"return",
"None"
] |
Checks if the bot was called by a user.
Returns the suffix if so.
Prefixes include the bot's nick as well as a set symbol.
|
[
"Checks",
"if",
"the",
"bot",
"was",
"called",
"by",
"a",
"user",
".",
"Returns",
"the",
"suffix",
"if",
"so",
"."
] |
5e8377ddcda6e0ef4ba7d66cf400e243b1fb8f68
|
https://github.com/sarenji/pyrc/blob/5e8377ddcda6e0ef4ba7d66cf400e243b1fb8f68/pyrc/bots.py#L144-L171
|
243,840
|
sarenji/pyrc
|
pyrc/bots.py
|
Bot._connect
|
def _connect(self):
"Connects a socket to the server using options defined in `config`."
self.socket = socket.socket()
self.socket.connect((self.config['host'], self.config['port']))
self.cmd("NICK %s" % self.config['nick'])
self.cmd("USER %s %s bla :%s" %
(self.config['ident'], self.config['host'], self.config['realname']))
|
python
|
def _connect(self):
"Connects a socket to the server using options defined in `config`."
self.socket = socket.socket()
self.socket.connect((self.config['host'], self.config['port']))
self.cmd("NICK %s" % self.config['nick'])
self.cmd("USER %s %s bla :%s" %
(self.config['ident'], self.config['host'], self.config['realname']))
|
[
"def",
"_connect",
"(",
"self",
")",
":",
"self",
".",
"socket",
"=",
"socket",
".",
"socket",
"(",
")",
"self",
".",
"socket",
".",
"connect",
"(",
"(",
"self",
".",
"config",
"[",
"'host'",
"]",
",",
"self",
".",
"config",
"[",
"'port'",
"]",
")",
")",
"self",
".",
"cmd",
"(",
"\"NICK %s\"",
"%",
"self",
".",
"config",
"[",
"'nick'",
"]",
")",
"self",
".",
"cmd",
"(",
"\"USER %s %s bla :%s\"",
"%",
"(",
"self",
".",
"config",
"[",
"'ident'",
"]",
",",
"self",
".",
"config",
"[",
"'host'",
"]",
",",
"self",
".",
"config",
"[",
"'realname'",
"]",
")",
")"
] |
Connects a socket to the server using options defined in `config`.
|
[
"Connects",
"a",
"socket",
"to",
"the",
"server",
"using",
"options",
"defined",
"in",
"config",
"."
] |
5e8377ddcda6e0ef4ba7d66cf400e243b1fb8f68
|
https://github.com/sarenji/pyrc/blob/5e8377ddcda6e0ef4ba7d66cf400e243b1fb8f68/pyrc/bots.py#L173-L179
|
243,841
|
sampottinger/pycotracer
|
pycotracer/report_interpreters.py
|
interpret_contribution_entry
|
def interpret_contribution_entry(entry):
"""Interpret data fields within a CO-TRACER contributions report.
Interpret the contribution amount, contribution date, filed date, amended,
and amendment fields of the provided entry. All dates (contribution and
filed) are interpreted together and, if any fails, all will retain their
original value. Likewise, amended and amendment are interpreted together and
if one is malformed, both will retain their original value. Entry may be
edited in place and side-effects are possible in coupled code. However,
client code should use the return value to guard against future changes.
A value with the key 'AmountsInterpreted' will be set to True or False in
the returned entry if floating point values are successfully interpreted
(ContributionAmount) or not respectively.
A value with the key 'DatesInterpreted' will be set to True or False in
the returned entry if ISO 8601 strings are successfully interpreted
(ContributionDate and FiledDate) or not respectively.
A value with the key 'BooleanFieldsInterpreted' will be set to True or
False in the returned entry if boolean strings are successfully interpreted
(Amended and Amendment) or not respectively.
@param entry: The contribution report data to manipulate / interpret.
@type entry: dict
@return: The entry passed
@raise ValueError: Raised if any expected field cannot be found in entry.
"""
try:
new_contribution_amount = float(entry['ContributionAmount'])
entry['AmountsInterpreted'] = True
entry['ContributionAmount'] = new_contribution_amount
except ValueError:
entry['AmountsInterpreted'] = False
except TypeError:
entry['AmountsInterpreted'] = False
except AttributeError:
entry['AmountsInterpreted'] = False
try:
contribution_date = parse_iso_str(entry['ContributionDate'])
filed_date = parse_iso_str(entry['FiledDate'])
entry['DatesInterpreted'] = True
entry['ContributionDate'] = contribution_date
entry['FiledDate'] = filed_date
except ValueError:
entry['DatesInterpreted'] = False
except TypeError:
entry['DatesInterpreted'] = False
except AttributeError:
entry['DatesInterpreted'] = False
try:
amended = parse_yes_no_str(entry['Amended'])
amendment = parse_yes_no_str(entry['Amendment'])
entry['BooleanFieldsInterpreted'] = True
entry['Amended'] = amended
entry['Amendment'] = amendment
except ValueError:
entry['BooleanFieldsInterpreted'] = False
except TypeError:
entry['BooleanFieldsInterpreted'] = False
except AttributeError:
entry['BooleanFieldsInterpreted'] = False
return entry
|
python
|
def interpret_contribution_entry(entry):
"""Interpret data fields within a CO-TRACER contributions report.
Interpret the contribution amount, contribution date, filed date, amended,
and amendment fields of the provided entry. All dates (contribution and
filed) are interpreted together and, if any fails, all will retain their
original value. Likewise, amended and amendment are interpreted together and
if one is malformed, both will retain their original value. Entry may be
edited in place and side-effects are possible in coupled code. However,
client code should use the return value to guard against future changes.
A value with the key 'AmountsInterpreted' will be set to True or False in
the returned entry if floating point values are successfully interpreted
(ContributionAmount) or not respectively.
A value with the key 'DatesInterpreted' will be set to True or False in
the returned entry if ISO 8601 strings are successfully interpreted
(ContributionDate and FiledDate) or not respectively.
A value with the key 'BooleanFieldsInterpreted' will be set to True or
False in the returned entry if boolean strings are successfully interpreted
(Amended and Amendment) or not respectively.
@param entry: The contribution report data to manipulate / interpret.
@type entry: dict
@return: The entry passed
@raise ValueError: Raised if any expected field cannot be found in entry.
"""
try:
new_contribution_amount = float(entry['ContributionAmount'])
entry['AmountsInterpreted'] = True
entry['ContributionAmount'] = new_contribution_amount
except ValueError:
entry['AmountsInterpreted'] = False
except TypeError:
entry['AmountsInterpreted'] = False
except AttributeError:
entry['AmountsInterpreted'] = False
try:
contribution_date = parse_iso_str(entry['ContributionDate'])
filed_date = parse_iso_str(entry['FiledDate'])
entry['DatesInterpreted'] = True
entry['ContributionDate'] = contribution_date
entry['FiledDate'] = filed_date
except ValueError:
entry['DatesInterpreted'] = False
except TypeError:
entry['DatesInterpreted'] = False
except AttributeError:
entry['DatesInterpreted'] = False
try:
amended = parse_yes_no_str(entry['Amended'])
amendment = parse_yes_no_str(entry['Amendment'])
entry['BooleanFieldsInterpreted'] = True
entry['Amended'] = amended
entry['Amendment'] = amendment
except ValueError:
entry['BooleanFieldsInterpreted'] = False
except TypeError:
entry['BooleanFieldsInterpreted'] = False
except AttributeError:
entry['BooleanFieldsInterpreted'] = False
return entry
|
[
"def",
"interpret_contribution_entry",
"(",
"entry",
")",
":",
"try",
":",
"new_contribution_amount",
"=",
"float",
"(",
"entry",
"[",
"'ContributionAmount'",
"]",
")",
"entry",
"[",
"'AmountsInterpreted'",
"]",
"=",
"True",
"entry",
"[",
"'ContributionAmount'",
"]",
"=",
"new_contribution_amount",
"except",
"ValueError",
":",
"entry",
"[",
"'AmountsInterpreted'",
"]",
"=",
"False",
"except",
"TypeError",
":",
"entry",
"[",
"'AmountsInterpreted'",
"]",
"=",
"False",
"except",
"AttributeError",
":",
"entry",
"[",
"'AmountsInterpreted'",
"]",
"=",
"False",
"try",
":",
"contribution_date",
"=",
"parse_iso_str",
"(",
"entry",
"[",
"'ContributionDate'",
"]",
")",
"filed_date",
"=",
"parse_iso_str",
"(",
"entry",
"[",
"'FiledDate'",
"]",
")",
"entry",
"[",
"'DatesInterpreted'",
"]",
"=",
"True",
"entry",
"[",
"'ContributionDate'",
"]",
"=",
"contribution_date",
"entry",
"[",
"'FiledDate'",
"]",
"=",
"filed_date",
"except",
"ValueError",
":",
"entry",
"[",
"'DatesInterpreted'",
"]",
"=",
"False",
"except",
"TypeError",
":",
"entry",
"[",
"'DatesInterpreted'",
"]",
"=",
"False",
"except",
"AttributeError",
":",
"entry",
"[",
"'DatesInterpreted'",
"]",
"=",
"False",
"try",
":",
"amended",
"=",
"parse_yes_no_str",
"(",
"entry",
"[",
"'Amended'",
"]",
")",
"amendment",
"=",
"parse_yes_no_str",
"(",
"entry",
"[",
"'Amendment'",
"]",
")",
"entry",
"[",
"'BooleanFieldsInterpreted'",
"]",
"=",
"True",
"entry",
"[",
"'Amended'",
"]",
"=",
"amended",
"entry",
"[",
"'Amendment'",
"]",
"=",
"amendment",
"except",
"ValueError",
":",
"entry",
"[",
"'BooleanFieldsInterpreted'",
"]",
"=",
"False",
"except",
"TypeError",
":",
"entry",
"[",
"'BooleanFieldsInterpreted'",
"]",
"=",
"False",
"except",
"AttributeError",
":",
"entry",
"[",
"'BooleanFieldsInterpreted'",
"]",
"=",
"False",
"return",
"entry"
] |
Interpret data fields within a CO-TRACER contributions report.
Interpret the contribution amount, contribution date, filed date, amended,
and amendment fields of the provided entry. All dates (contribution and
filed) are interpreted together and, if any fails, all will retain their
original value. Likewise, amended and amendment are interpreted together and
if one is malformed, both will retain their original value. Entry may be
edited in place and side-effects are possible in coupled code. However,
client code should use the return value to guard against future changes.
A value with the key 'AmountsInterpreted' will be set to True or False in
the returned entry if floating point values are successfully interpreted
(ContributionAmount) or not respectively.
A value with the key 'DatesInterpreted' will be set to True or False in
the returned entry if ISO 8601 strings are successfully interpreted
(ContributionDate and FiledDate) or not respectively.
A value with the key 'BooleanFieldsInterpreted' will be set to True or
False in the returned entry if boolean strings are successfully interpreted
(Amended and Amendment) or not respectively.
@param entry: The contribution report data to manipulate / interpret.
@type entry: dict
@return: The entry passed
@raise ValueError: Raised if any expected field cannot be found in entry.
|
[
"Interpret",
"data",
"fields",
"within",
"a",
"CO",
"-",
"TRACER",
"contributions",
"report",
"."
] |
c66c3230949b7bee8c9fec5fc00ab392865a0c8b
|
https://github.com/sampottinger/pycotracer/blob/c66c3230949b7bee8c9fec5fc00ab392865a0c8b/pycotracer/report_interpreters.py#L54-L119
|
243,842
|
sampottinger/pycotracer
|
pycotracer/report_interpreters.py
|
interpret_expenditure_entry
|
def interpret_expenditure_entry(entry):
"""Interpret data fields within a CO-TRACER expediture report.
Interpret the expenditure amount, expenditure date, filed date, amended,
and amendment fields of the provided entry. All dates (expenditure and
filed) are interpreted together and, if any fails, all will retain their
original value. Likewise, amended and amendment are interpreted together and
if one is malformed, both will retain their original value. Entry may be
edited in place and side-effects are possible in coupled code. However,
client code should use the return value to guard against future changes.
A value with the key 'AmountsInterpreted' will be set to True or False in
the returned entry if floating point values are successfully interpreted
(ExpenditureAmount) or not respectively.
A value with the key 'DatesInterpreted' will be set to True or False in
the returned entry if ISO 8601 strings are successfully interpreted
(ExpenditureDate and FiledDate) or not respectively.
A value with the key 'BooleanFieldsInterpreted' will be set to True or
False in the returned entry if boolean strings are successfully interpreted
(Amended and Amendment) or not respectively.
@param entry: The expenditure report data to manipulate / interpret.
@type entry: dict
@return: The entry passed
@raise ValueError: Raised if any expected field cannot be found in entry.
"""
try:
expenditure_amount = float(entry['ExpenditureAmount'])
entry['AmountsInterpreted'] = True
entry['ExpenditureAmount'] = expenditure_amount
except ValueError:
entry['AmountsInterpreted'] = False
try:
expenditure_date = parse_iso_str(entry['ExpenditureDate'])
filed_date = parse_iso_str(entry['FiledDate'])
entry['DatesInterpreted'] = True
entry['ExpenditureDate'] = expenditure_date
entry['FiledDate'] = filed_date
except ValueError:
entry['DatesInterpreted'] = False
try:
amended = parse_yes_no_str(entry['Amended'])
amendment = parse_yes_no_str(entry['Amendment'])
entry['BooleanFieldsInterpreted'] = True
entry['Amended'] = amended
entry['Amendment'] = amendment
except ValueError:
entry['BooleanFieldsInterpreted'] = False
return entry
|
python
|
def interpret_expenditure_entry(entry):
"""Interpret data fields within a CO-TRACER expediture report.
Interpret the expenditure amount, expenditure date, filed date, amended,
and amendment fields of the provided entry. All dates (expenditure and
filed) are interpreted together and, if any fails, all will retain their
original value. Likewise, amended and amendment are interpreted together and
if one is malformed, both will retain their original value. Entry may be
edited in place and side-effects are possible in coupled code. However,
client code should use the return value to guard against future changes.
A value with the key 'AmountsInterpreted' will be set to True or False in
the returned entry if floating point values are successfully interpreted
(ExpenditureAmount) or not respectively.
A value with the key 'DatesInterpreted' will be set to True or False in
the returned entry if ISO 8601 strings are successfully interpreted
(ExpenditureDate and FiledDate) or not respectively.
A value with the key 'BooleanFieldsInterpreted' will be set to True or
False in the returned entry if boolean strings are successfully interpreted
(Amended and Amendment) or not respectively.
@param entry: The expenditure report data to manipulate / interpret.
@type entry: dict
@return: The entry passed
@raise ValueError: Raised if any expected field cannot be found in entry.
"""
try:
expenditure_amount = float(entry['ExpenditureAmount'])
entry['AmountsInterpreted'] = True
entry['ExpenditureAmount'] = expenditure_amount
except ValueError:
entry['AmountsInterpreted'] = False
try:
expenditure_date = parse_iso_str(entry['ExpenditureDate'])
filed_date = parse_iso_str(entry['FiledDate'])
entry['DatesInterpreted'] = True
entry['ExpenditureDate'] = expenditure_date
entry['FiledDate'] = filed_date
except ValueError:
entry['DatesInterpreted'] = False
try:
amended = parse_yes_no_str(entry['Amended'])
amendment = parse_yes_no_str(entry['Amendment'])
entry['BooleanFieldsInterpreted'] = True
entry['Amended'] = amended
entry['Amendment'] = amendment
except ValueError:
entry['BooleanFieldsInterpreted'] = False
return entry
|
[
"def",
"interpret_expenditure_entry",
"(",
"entry",
")",
":",
"try",
":",
"expenditure_amount",
"=",
"float",
"(",
"entry",
"[",
"'ExpenditureAmount'",
"]",
")",
"entry",
"[",
"'AmountsInterpreted'",
"]",
"=",
"True",
"entry",
"[",
"'ExpenditureAmount'",
"]",
"=",
"expenditure_amount",
"except",
"ValueError",
":",
"entry",
"[",
"'AmountsInterpreted'",
"]",
"=",
"False",
"try",
":",
"expenditure_date",
"=",
"parse_iso_str",
"(",
"entry",
"[",
"'ExpenditureDate'",
"]",
")",
"filed_date",
"=",
"parse_iso_str",
"(",
"entry",
"[",
"'FiledDate'",
"]",
")",
"entry",
"[",
"'DatesInterpreted'",
"]",
"=",
"True",
"entry",
"[",
"'ExpenditureDate'",
"]",
"=",
"expenditure_date",
"entry",
"[",
"'FiledDate'",
"]",
"=",
"filed_date",
"except",
"ValueError",
":",
"entry",
"[",
"'DatesInterpreted'",
"]",
"=",
"False",
"try",
":",
"amended",
"=",
"parse_yes_no_str",
"(",
"entry",
"[",
"'Amended'",
"]",
")",
"amendment",
"=",
"parse_yes_no_str",
"(",
"entry",
"[",
"'Amendment'",
"]",
")",
"entry",
"[",
"'BooleanFieldsInterpreted'",
"]",
"=",
"True",
"entry",
"[",
"'Amended'",
"]",
"=",
"amended",
"entry",
"[",
"'Amendment'",
"]",
"=",
"amendment",
"except",
"ValueError",
":",
"entry",
"[",
"'BooleanFieldsInterpreted'",
"]",
"=",
"False",
"return",
"entry"
] |
Interpret data fields within a CO-TRACER expediture report.
Interpret the expenditure amount, expenditure date, filed date, amended,
and amendment fields of the provided entry. All dates (expenditure and
filed) are interpreted together and, if any fails, all will retain their
original value. Likewise, amended and amendment are interpreted together and
if one is malformed, both will retain their original value. Entry may be
edited in place and side-effects are possible in coupled code. However,
client code should use the return value to guard against future changes.
A value with the key 'AmountsInterpreted' will be set to True or False in
the returned entry if floating point values are successfully interpreted
(ExpenditureAmount) or not respectively.
A value with the key 'DatesInterpreted' will be set to True or False in
the returned entry if ISO 8601 strings are successfully interpreted
(ExpenditureDate and FiledDate) or not respectively.
A value with the key 'BooleanFieldsInterpreted' will be set to True or
False in the returned entry if boolean strings are successfully interpreted
(Amended and Amendment) or not respectively.
@param entry: The expenditure report data to manipulate / interpret.
@type entry: dict
@return: The entry passed
@raise ValueError: Raised if any expected field cannot be found in entry.
|
[
"Interpret",
"data",
"fields",
"within",
"a",
"CO",
"-",
"TRACER",
"expediture",
"report",
"."
] |
c66c3230949b7bee8c9fec5fc00ab392865a0c8b
|
https://github.com/sampottinger/pycotracer/blob/c66c3230949b7bee8c9fec5fc00ab392865a0c8b/pycotracer/report_interpreters.py#L156-L209
|
243,843
|
sampottinger/pycotracer
|
pycotracer/report_interpreters.py
|
interpret_loan_entry
|
def interpret_loan_entry(entry):
"""Interpret data fields within a CO-TRACER loan report.
Interpret the payment amount, loan amount, interest rate, interest payment,
loan balance, payment date, filed date, loan date, amended, and amendment
fields of the provided entry. All dates (payment, filed, and loan) are
interpreted together and, if any fails, all will retain their original
value. Likewise, amended and amendment are interpreted together and
if one is malformed, both will retain their original value. Finally,
the payment amount, loan amount, interest rate, interest payment, and
loan balance will be interpreted transactionally and, if any fail, all
will retain their original value.
Entry may be edited in place and side-effects are possible in coupled code.
However, client code should use the return value to guard against future
changes.
A value with the key 'AmountsInterpreted' will be set to True or False in
the returned entry if floating point values are successfully interpreted
or not respectively.
A value with the key 'DatesInterpreted' will be set to True or False in
the returned entry if ISO 8601 strings are successfully interpreted
or not respectively.
A value with the key 'BooleanFieldsInterpreted' will be set to True or
False in the returned entry if boolean strings are successfully interpreted
or not respectively.
@param entry: The loan report data to manipulate / interpret.
@type entry: dict
@return: The entry passed
@raise ValueError: Raised if any expected field cannot be found in entry.
"""
try:
payment_amount = float(entry['PaymentAmount'])
loan_amount = float(entry['LoanAmount'])
interest_rate = float(entry['InterestRate'])
interest_payment = float(entry['InterestPayment'])
loan_balance = float(entry['LoanBalance'])
entry['AmountsInterpreted'] = True
entry['PaymentAmount'] = payment_amount
entry['LoanAmount'] = loan_amount
entry['InterestRate'] = interest_rate
entry['InterestPayment'] = interest_payment
entry['LoanBalance'] = loan_balance
except ValueError:
entry['AmountsInterpreted'] = False
try:
payment_date = parse_iso_str(entry['PaymentDate'])
filed_date = parse_iso_str(entry['FiledDate'])
loan_date = parse_iso_str(entry['LoanDate'])
entry['DatesInterpreted'] = True
entry['PaymentDate'] = payment_date
entry['FiledDate'] = filed_date
entry['LoanDate'] = loan_date
except ValueError:
entry['DatesInterpreted'] = False
try:
amended = parse_yes_no_str(entry['Amended'])
amendment = parse_yes_no_str(entry['Amendment'])
entry['BooleanFieldsInterpreted'] = True
entry['Amended'] = amended
entry['Amendment'] = amendment
except ValueError:
entry['BooleanFieldsInterpreted'] = False
return entry
|
python
|
def interpret_loan_entry(entry):
"""Interpret data fields within a CO-TRACER loan report.
Interpret the payment amount, loan amount, interest rate, interest payment,
loan balance, payment date, filed date, loan date, amended, and amendment
fields of the provided entry. All dates (payment, filed, and loan) are
interpreted together and, if any fails, all will retain their original
value. Likewise, amended and amendment are interpreted together and
if one is malformed, both will retain their original value. Finally,
the payment amount, loan amount, interest rate, interest payment, and
loan balance will be interpreted transactionally and, if any fail, all
will retain their original value.
Entry may be edited in place and side-effects are possible in coupled code.
However, client code should use the return value to guard against future
changes.
A value with the key 'AmountsInterpreted' will be set to True or False in
the returned entry if floating point values are successfully interpreted
or not respectively.
A value with the key 'DatesInterpreted' will be set to True or False in
the returned entry if ISO 8601 strings are successfully interpreted
or not respectively.
A value with the key 'BooleanFieldsInterpreted' will be set to True or
False in the returned entry if boolean strings are successfully interpreted
or not respectively.
@param entry: The loan report data to manipulate / interpret.
@type entry: dict
@return: The entry passed
@raise ValueError: Raised if any expected field cannot be found in entry.
"""
try:
payment_amount = float(entry['PaymentAmount'])
loan_amount = float(entry['LoanAmount'])
interest_rate = float(entry['InterestRate'])
interest_payment = float(entry['InterestPayment'])
loan_balance = float(entry['LoanBalance'])
entry['AmountsInterpreted'] = True
entry['PaymentAmount'] = payment_amount
entry['LoanAmount'] = loan_amount
entry['InterestRate'] = interest_rate
entry['InterestPayment'] = interest_payment
entry['LoanBalance'] = loan_balance
except ValueError:
entry['AmountsInterpreted'] = False
try:
payment_date = parse_iso_str(entry['PaymentDate'])
filed_date = parse_iso_str(entry['FiledDate'])
loan_date = parse_iso_str(entry['LoanDate'])
entry['DatesInterpreted'] = True
entry['PaymentDate'] = payment_date
entry['FiledDate'] = filed_date
entry['LoanDate'] = loan_date
except ValueError:
entry['DatesInterpreted'] = False
try:
amended = parse_yes_no_str(entry['Amended'])
amendment = parse_yes_no_str(entry['Amendment'])
entry['BooleanFieldsInterpreted'] = True
entry['Amended'] = amended
entry['Amendment'] = amendment
except ValueError:
entry['BooleanFieldsInterpreted'] = False
return entry
|
[
"def",
"interpret_loan_entry",
"(",
"entry",
")",
":",
"try",
":",
"payment_amount",
"=",
"float",
"(",
"entry",
"[",
"'PaymentAmount'",
"]",
")",
"loan_amount",
"=",
"float",
"(",
"entry",
"[",
"'LoanAmount'",
"]",
")",
"interest_rate",
"=",
"float",
"(",
"entry",
"[",
"'InterestRate'",
"]",
")",
"interest_payment",
"=",
"float",
"(",
"entry",
"[",
"'InterestPayment'",
"]",
")",
"loan_balance",
"=",
"float",
"(",
"entry",
"[",
"'LoanBalance'",
"]",
")",
"entry",
"[",
"'AmountsInterpreted'",
"]",
"=",
"True",
"entry",
"[",
"'PaymentAmount'",
"]",
"=",
"payment_amount",
"entry",
"[",
"'LoanAmount'",
"]",
"=",
"loan_amount",
"entry",
"[",
"'InterestRate'",
"]",
"=",
"interest_rate",
"entry",
"[",
"'InterestPayment'",
"]",
"=",
"interest_payment",
"entry",
"[",
"'LoanBalance'",
"]",
"=",
"loan_balance",
"except",
"ValueError",
":",
"entry",
"[",
"'AmountsInterpreted'",
"]",
"=",
"False",
"try",
":",
"payment_date",
"=",
"parse_iso_str",
"(",
"entry",
"[",
"'PaymentDate'",
"]",
")",
"filed_date",
"=",
"parse_iso_str",
"(",
"entry",
"[",
"'FiledDate'",
"]",
")",
"loan_date",
"=",
"parse_iso_str",
"(",
"entry",
"[",
"'LoanDate'",
"]",
")",
"entry",
"[",
"'DatesInterpreted'",
"]",
"=",
"True",
"entry",
"[",
"'PaymentDate'",
"]",
"=",
"payment_date",
"entry",
"[",
"'FiledDate'",
"]",
"=",
"filed_date",
"entry",
"[",
"'LoanDate'",
"]",
"=",
"loan_date",
"except",
"ValueError",
":",
"entry",
"[",
"'DatesInterpreted'",
"]",
"=",
"False",
"try",
":",
"amended",
"=",
"parse_yes_no_str",
"(",
"entry",
"[",
"'Amended'",
"]",
")",
"amendment",
"=",
"parse_yes_no_str",
"(",
"entry",
"[",
"'Amendment'",
"]",
")",
"entry",
"[",
"'BooleanFieldsInterpreted'",
"]",
"=",
"True",
"entry",
"[",
"'Amended'",
"]",
"=",
"amended",
"entry",
"[",
"'Amendment'",
"]",
"=",
"amendment",
"except",
"ValueError",
":",
"entry",
"[",
"'BooleanFieldsInterpreted'",
"]",
"=",
"False",
"return",
"entry"
] |
Interpret data fields within a CO-TRACER loan report.
Interpret the payment amount, loan amount, interest rate, interest payment,
loan balance, payment date, filed date, loan date, amended, and amendment
fields of the provided entry. All dates (payment, filed, and loan) are
interpreted together and, if any fails, all will retain their original
value. Likewise, amended and amendment are interpreted together and
if one is malformed, both will retain their original value. Finally,
the payment amount, loan amount, interest rate, interest payment, and
loan balance will be interpreted transactionally and, if any fail, all
will retain their original value.
Entry may be edited in place and side-effects are possible in coupled code.
However, client code should use the return value to guard against future
changes.
A value with the key 'AmountsInterpreted' will be set to True or False in
the returned entry if floating point values are successfully interpreted
or not respectively.
A value with the key 'DatesInterpreted' will be set to True or False in
the returned entry if ISO 8601 strings are successfully interpreted
or not respectively.
A value with the key 'BooleanFieldsInterpreted' will be set to True or
False in the returned entry if boolean strings are successfully interpreted
or not respectively.
@param entry: The loan report data to manipulate / interpret.
@type entry: dict
@return: The entry passed
@raise ValueError: Raised if any expected field cannot be found in entry.
|
[
"Interpret",
"data",
"fields",
"within",
"a",
"CO",
"-",
"TRACER",
"loan",
"report",
"."
] |
c66c3230949b7bee8c9fec5fc00ab392865a0c8b
|
https://github.com/sampottinger/pycotracer/blob/c66c3230949b7bee8c9fec5fc00ab392865a0c8b/pycotracer/report_interpreters.py#L246-L315
|
243,844
|
RonenNess/Fileter
|
fileter/iterators/add_header.py
|
AddHeader.process_file
|
def process_file(self, path, dryrun):
"""
Add header to all files.
"""
if dryrun:
return path
# get file's current header
with open(path, "r") as infile:
head = infile.read(len(self.__header))
# normalize line breaks
if self.__normalize_br:
head = head.replace("\r\n", "\n")
# already contain header? skip
if head == self.__header:
return path
# add header to file
self.push_header(path)
# return processed file
return path
|
python
|
def process_file(self, path, dryrun):
"""
Add header to all files.
"""
if dryrun:
return path
# get file's current header
with open(path, "r") as infile:
head = infile.read(len(self.__header))
# normalize line breaks
if self.__normalize_br:
head = head.replace("\r\n", "\n")
# already contain header? skip
if head == self.__header:
return path
# add header to file
self.push_header(path)
# return processed file
return path
|
[
"def",
"process_file",
"(",
"self",
",",
"path",
",",
"dryrun",
")",
":",
"if",
"dryrun",
":",
"return",
"path",
"# get file's current header",
"with",
"open",
"(",
"path",
",",
"\"r\"",
")",
"as",
"infile",
":",
"head",
"=",
"infile",
".",
"read",
"(",
"len",
"(",
"self",
".",
"__header",
")",
")",
"# normalize line breaks",
"if",
"self",
".",
"__normalize_br",
":",
"head",
"=",
"head",
".",
"replace",
"(",
"\"\\r\\n\"",
",",
"\"\\n\"",
")",
"# already contain header? skip",
"if",
"head",
"==",
"self",
".",
"__header",
":",
"return",
"path",
"# add header to file",
"self",
".",
"push_header",
"(",
"path",
")",
"# return processed file",
"return",
"path"
] |
Add header to all files.
|
[
"Add",
"header",
"to",
"all",
"files",
"."
] |
5372221b4049d5d46a9926573b91af17681c81f3
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/iterators/add_header.py#L40-L63
|
243,845
|
ppo/django-guitar
|
guitar/utils/admin.py
|
csv_list
|
def csv_list(models, attr, link=False, separator=", "):
"""Return a comma-separated list of models, optionaly with a link."""
values = []
for model in models:
value = getattr(model, attr)
if link and hasattr(model, "get_admin_url") and callable(model.get_admin_url):
value = get_admin_html_link(model, label=value)
values.append(value)
return separator.join(values)
|
python
|
def csv_list(models, attr, link=False, separator=", "):
"""Return a comma-separated list of models, optionaly with a link."""
values = []
for model in models:
value = getattr(model, attr)
if link and hasattr(model, "get_admin_url") and callable(model.get_admin_url):
value = get_admin_html_link(model, label=value)
values.append(value)
return separator.join(values)
|
[
"def",
"csv_list",
"(",
"models",
",",
"attr",
",",
"link",
"=",
"False",
",",
"separator",
"=",
"\", \"",
")",
":",
"values",
"=",
"[",
"]",
"for",
"model",
"in",
"models",
":",
"value",
"=",
"getattr",
"(",
"model",
",",
"attr",
")",
"if",
"link",
"and",
"hasattr",
"(",
"model",
",",
"\"get_admin_url\"",
")",
"and",
"callable",
"(",
"model",
".",
"get_admin_url",
")",
":",
"value",
"=",
"get_admin_html_link",
"(",
"model",
",",
"label",
"=",
"value",
")",
"values",
".",
"append",
"(",
"value",
")",
"return",
"separator",
".",
"join",
"(",
"values",
")"
] |
Return a comma-separated list of models, optionaly with a link.
|
[
"Return",
"a",
"comma",
"-",
"separated",
"list",
"of",
"models",
"optionaly",
"with",
"a",
"link",
"."
] |
857282219c0c4ff5907c3ad04ef012281d245348
|
https://github.com/ppo/django-guitar/blob/857282219c0c4ff5907c3ad04ef012281d245348/guitar/utils/admin.py#L15-L23
|
243,846
|
ppo/django-guitar
|
guitar/utils/admin.py
|
get_admin_url
|
def get_admin_url(obj, page=None):
"""Return the URL to admin pages for this object."""
if obj is None:
return None
if page is None:
page = "change"
if page not in ADMIN_ALL_PAGES:
raise ValueError("Invalid page name '{}'. Available pages are: {}.".format(page, ADMIN_ALL_PAGES))
app_label = obj.__class__._meta.app_label
object_name = obj.__class__._meta.object_name.lower()
if page in ADMIN_GLOBAL_PAGES:
url_name = page
else:
url_name = "{}_{}_{}".format(app_label, object_name, page)
if page == "app_list":
url_args = (app_label,)
elif page == "view_on_site":
content_type = ContentType.objects.get_for_model(obj.__class__)
url_args = (content_type, obj._get_pk_val())
elif page in ADMIN_DETAIL_PAGES:
url_args = (obj._get_pk_val(),)
else:
url_args = None
return reverse("admin:{}".format(url_name), args=url_args)
|
python
|
def get_admin_url(obj, page=None):
"""Return the URL to admin pages for this object."""
if obj is None:
return None
if page is None:
page = "change"
if page not in ADMIN_ALL_PAGES:
raise ValueError("Invalid page name '{}'. Available pages are: {}.".format(page, ADMIN_ALL_PAGES))
app_label = obj.__class__._meta.app_label
object_name = obj.__class__._meta.object_name.lower()
if page in ADMIN_GLOBAL_PAGES:
url_name = page
else:
url_name = "{}_{}_{}".format(app_label, object_name, page)
if page == "app_list":
url_args = (app_label,)
elif page == "view_on_site":
content_type = ContentType.objects.get_for_model(obj.__class__)
url_args = (content_type, obj._get_pk_val())
elif page in ADMIN_DETAIL_PAGES:
url_args = (obj._get_pk_val(),)
else:
url_args = None
return reverse("admin:{}".format(url_name), args=url_args)
|
[
"def",
"get_admin_url",
"(",
"obj",
",",
"page",
"=",
"None",
")",
":",
"if",
"obj",
"is",
"None",
":",
"return",
"None",
"if",
"page",
"is",
"None",
":",
"page",
"=",
"\"change\"",
"if",
"page",
"not",
"in",
"ADMIN_ALL_PAGES",
":",
"raise",
"ValueError",
"(",
"\"Invalid page name '{}'. Available pages are: {}.\"",
".",
"format",
"(",
"page",
",",
"ADMIN_ALL_PAGES",
")",
")",
"app_label",
"=",
"obj",
".",
"__class__",
".",
"_meta",
".",
"app_label",
"object_name",
"=",
"obj",
".",
"__class__",
".",
"_meta",
".",
"object_name",
".",
"lower",
"(",
")",
"if",
"page",
"in",
"ADMIN_GLOBAL_PAGES",
":",
"url_name",
"=",
"page",
"else",
":",
"url_name",
"=",
"\"{}_{}_{}\"",
".",
"format",
"(",
"app_label",
",",
"object_name",
",",
"page",
")",
"if",
"page",
"==",
"\"app_list\"",
":",
"url_args",
"=",
"(",
"app_label",
",",
")",
"elif",
"page",
"==",
"\"view_on_site\"",
":",
"content_type",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"obj",
".",
"__class__",
")",
"url_args",
"=",
"(",
"content_type",
",",
"obj",
".",
"_get_pk_val",
"(",
")",
")",
"elif",
"page",
"in",
"ADMIN_DETAIL_PAGES",
":",
"url_args",
"=",
"(",
"obj",
".",
"_get_pk_val",
"(",
")",
",",
")",
"else",
":",
"url_args",
"=",
"None",
"return",
"reverse",
"(",
"\"admin:{}\"",
".",
"format",
"(",
"url_name",
")",
",",
"args",
"=",
"url_args",
")"
] |
Return the URL to admin pages for this object.
|
[
"Return",
"the",
"URL",
"to",
"admin",
"pages",
"for",
"this",
"object",
"."
] |
857282219c0c4ff5907c3ad04ef012281d245348
|
https://github.com/ppo/django-guitar/blob/857282219c0c4ff5907c3ad04ef012281d245348/guitar/utils/admin.py#L26-L54
|
243,847
|
ppo/django-guitar
|
guitar/utils/admin.py
|
get_fieldset_index
|
def get_fieldset_index(fieldsets, index_or_name):
"""
Return the index of a fieldset in the ``fieldsets`` list.
Args:
fieldsets (list): The original ``fieldsets`` list.
index_or_name (int or str): The value of the reference element, or directly its numeric index.
Returns:
(int) The index of the fieldset in the ``fieldsets`` list.
"""
if isinstance(index_or_name, six.integer_types):
return index_or_name
for key, value in enumerate(fieldsets):
if value[0] == index_or_name:
return key
raise KeyError("Key not found: '{}'.".format(index_or_name))
|
python
|
def get_fieldset_index(fieldsets, index_or_name):
"""
Return the index of a fieldset in the ``fieldsets`` list.
Args:
fieldsets (list): The original ``fieldsets`` list.
index_or_name (int or str): The value of the reference element, or directly its numeric index.
Returns:
(int) The index of the fieldset in the ``fieldsets`` list.
"""
if isinstance(index_or_name, six.integer_types):
return index_or_name
for key, value in enumerate(fieldsets):
if value[0] == index_or_name:
return key
raise KeyError("Key not found: '{}'.".format(index_or_name))
|
[
"def",
"get_fieldset_index",
"(",
"fieldsets",
",",
"index_or_name",
")",
":",
"if",
"isinstance",
"(",
"index_or_name",
",",
"six",
".",
"integer_types",
")",
":",
"return",
"index_or_name",
"for",
"key",
",",
"value",
"in",
"enumerate",
"(",
"fieldsets",
")",
":",
"if",
"value",
"[",
"0",
"]",
"==",
"index_or_name",
":",
"return",
"key",
"raise",
"KeyError",
"(",
"\"Key not found: '{}'.\"",
".",
"format",
"(",
"index_or_name",
")",
")"
] |
Return the index of a fieldset in the ``fieldsets`` list.
Args:
fieldsets (list): The original ``fieldsets`` list.
index_or_name (int or str): The value of the reference element, or directly its numeric index.
Returns:
(int) The index of the fieldset in the ``fieldsets`` list.
|
[
"Return",
"the",
"index",
"of",
"a",
"fieldset",
"in",
"the",
"fieldsets",
"list",
"."
] |
857282219c0c4ff5907c3ad04ef012281d245348
|
https://github.com/ppo/django-guitar/blob/857282219c0c4ff5907c3ad04ef012281d245348/guitar/utils/admin.py#L93-L111
|
243,848
|
ppo/django-guitar
|
guitar/utils/admin.py
|
get_list_index
|
def get_list_index(lst, index_or_name):
"""
Return the index of an element in the list.
Args:
lst (list): The list.
index_or_name (int or str): The value of the reference element, or directly its numeric index.
Returns:
(int) The index of the element in the list.
"""
if isinstance(index_or_name, six.integer_types):
return index_or_name
return lst.index(index_or_name)
|
python
|
def get_list_index(lst, index_or_name):
"""
Return the index of an element in the list.
Args:
lst (list): The list.
index_or_name (int or str): The value of the reference element, or directly its numeric index.
Returns:
(int) The index of the element in the list.
"""
if isinstance(index_or_name, six.integer_types):
return index_or_name
return lst.index(index_or_name)
|
[
"def",
"get_list_index",
"(",
"lst",
",",
"index_or_name",
")",
":",
"if",
"isinstance",
"(",
"index_or_name",
",",
"six",
".",
"integer_types",
")",
":",
"return",
"index_or_name",
"return",
"lst",
".",
"index",
"(",
"index_or_name",
")"
] |
Return the index of an element in the list.
Args:
lst (list): The list.
index_or_name (int or str): The value of the reference element, or directly its numeric index.
Returns:
(int) The index of the element in the list.
|
[
"Return",
"the",
"index",
"of",
"an",
"element",
"in",
"the",
"list",
"."
] |
857282219c0c4ff5907c3ad04ef012281d245348
|
https://github.com/ppo/django-guitar/blob/857282219c0c4ff5907c3ad04ef012281d245348/guitar/utils/admin.py#L114-L128
|
243,849
|
pip-services3-python/pip-services3-components-python
|
pip_services3_components/cache/CacheEntry.py
|
CacheEntry.set_value
|
def set_value(self, value, timeout):
"""
Sets a new value and extends its expiration.
:param value: a new cached value.
:param timeout: a expiration timeout in milliseconds.
"""
self.value = value
self.expiration = time.perf_counter() * 1000 + timeout
|
python
|
def set_value(self, value, timeout):
"""
Sets a new value and extends its expiration.
:param value: a new cached value.
:param timeout: a expiration timeout in milliseconds.
"""
self.value = value
self.expiration = time.perf_counter() * 1000 + timeout
|
[
"def",
"set_value",
"(",
"self",
",",
"value",
",",
"timeout",
")",
":",
"self",
".",
"value",
"=",
"value",
"self",
".",
"expiration",
"=",
"time",
".",
"perf_counter",
"(",
")",
"*",
"1000",
"+",
"timeout"
] |
Sets a new value and extends its expiration.
:param value: a new cached value.
:param timeout: a expiration timeout in milliseconds.
|
[
"Sets",
"a",
"new",
"value",
"and",
"extends",
"its",
"expiration",
"."
] |
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
|
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/cache/CacheEntry.py#L36-L45
|
243,850
|
rdo-management/python-rdomanager-oscplugin
|
rdomanager_oscplugin/utils.py
|
_generate_password
|
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest()
|
python
|
def _generate_password():
"""Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
"""
uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
return hashlib.sha1(uuid_str).hexdigest()
|
[
"def",
"_generate_password",
"(",
")",
":",
"uuid_str",
"=",
"six",
".",
"text_type",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
".",
"encode",
"(",
"\"UTF-8\"",
")",
"return",
"hashlib",
".",
"sha1",
"(",
"uuid_str",
")",
".",
"hexdigest",
"(",
")"
] |
Create a random password
The password is made by taking a uuid and passing it though sha1sum.
We may change this in future to gain more entropy.
This is based on the tripleo command os-make-password
|
[
"Create",
"a",
"random",
"password"
] |
165a166fb2e5a2598380779b35812b8b8478c4fb
|
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L51-L60
|
243,851
|
rdo-management/python-rdomanager-oscplugin
|
rdomanager_oscplugin/utils.py
|
generate_overcloud_passwords
|
def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords
|
python
|
def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
if os.path.isfile(output_file):
with open(output_file) as f:
return dict(line.split('=') for line in f.read().splitlines())
password_names = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
)
passwords = dict((p, _generate_password()) for p in password_names)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords
|
[
"def",
"generate_overcloud_passwords",
"(",
"output_file",
"=",
"\"tripleo-overcloud-passwords\"",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"output_file",
")",
":",
"with",
"open",
"(",
"output_file",
")",
"as",
"f",
":",
"return",
"dict",
"(",
"line",
".",
"split",
"(",
"'='",
")",
"for",
"line",
"in",
"f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
")",
"password_names",
"=",
"(",
"\"OVERCLOUD_ADMIN_PASSWORD\"",
",",
"\"OVERCLOUD_ADMIN_TOKEN\"",
",",
"\"OVERCLOUD_CEILOMETER_PASSWORD\"",
",",
"\"OVERCLOUD_CEILOMETER_SECRET\"",
",",
"\"OVERCLOUD_CINDER_PASSWORD\"",
",",
"\"OVERCLOUD_DEMO_PASSWORD\"",
",",
"\"OVERCLOUD_GLANCE_PASSWORD\"",
",",
"\"OVERCLOUD_HEAT_PASSWORD\"",
",",
"\"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD\"",
",",
"\"OVERCLOUD_NEUTRON_PASSWORD\"",
",",
"\"OVERCLOUD_NOVA_PASSWORD\"",
",",
"\"OVERCLOUD_SWIFT_HASH\"",
",",
"\"OVERCLOUD_SWIFT_PASSWORD\"",
",",
")",
"passwords",
"=",
"dict",
"(",
"(",
"p",
",",
"_generate_password",
"(",
")",
")",
"for",
"p",
"in",
"password_names",
")",
"with",
"open",
"(",
"output_file",
",",
"'w'",
")",
"as",
"f",
":",
"for",
"name",
",",
"password",
"in",
"passwords",
".",
"items",
"(",
")",
":",
"f",
".",
"write",
"(",
"\"{0}={1}\\n\"",
".",
"format",
"(",
"name",
",",
"password",
")",
")",
"return",
"passwords"
] |
Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
|
[
"Create",
"the",
"passwords",
"needed",
"for",
"the",
"overcloud"
] |
165a166fb2e5a2598380779b35812b8b8478c4fb
|
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L63-L97
|
243,852
|
rdo-management/python-rdomanager-oscplugin
|
rdomanager_oscplugin/utils.py
|
check_hypervisor_stats
|
def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
"""Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
"""
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None
|
python
|
def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
"""Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
"""
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None
|
[
"def",
"check_hypervisor_stats",
"(",
"compute_client",
",",
"nodes",
"=",
"1",
",",
"memory",
"=",
"0",
",",
"vcpu",
"=",
"0",
")",
":",
"statistics",
"=",
"compute_client",
".",
"hypervisors",
".",
"statistics",
"(",
")",
".",
"to_dict",
"(",
")",
"if",
"all",
"(",
"[",
"statistics",
"[",
"'count'",
"]",
">=",
"nodes",
",",
"statistics",
"[",
"'memory_mb'",
"]",
">=",
"memory",
",",
"statistics",
"[",
"'vcpus'",
"]",
">=",
"vcpu",
"]",
")",
":",
"return",
"statistics",
"else",
":",
"return",
"None"
] |
Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
|
[
"Check",
"the",
"Hypervisor",
"stats",
"meet",
"a",
"minimum",
"value"
] |
165a166fb2e5a2598380779b35812b8b8478c4fb
|
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L100-L126
|
243,853
|
rdo-management/python-rdomanager-oscplugin
|
rdomanager_oscplugin/utils.py
|
wait_for_stack_ready
|
def wait_for_stack_ready(orchestration_client, stack_name):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
"""
SUCCESSFUL_MATCH_OUTPUT = "(CREATE|UPDATE)_COMPLETE"
FAIL_MATCH_OUTPUT = "(CREATE|UPDATE)_FAILED"
while True:
stack = orchestration_client.stacks.get(stack_name)
if not stack:
return False
status = stack.stack_status
if re.match(SUCCESSFUL_MATCH_OUTPUT, status):
return True
if re.match(FAIL_MATCH_OUTPUT, status):
print("Stack failed with status: {}".format(
stack.stack_status_reason, file=sys.stderr))
return False
time.sleep(10)
|
python
|
def wait_for_stack_ready(orchestration_client, stack_name):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
"""
SUCCESSFUL_MATCH_OUTPUT = "(CREATE|UPDATE)_COMPLETE"
FAIL_MATCH_OUTPUT = "(CREATE|UPDATE)_FAILED"
while True:
stack = orchestration_client.stacks.get(stack_name)
if not stack:
return False
status = stack.stack_status
if re.match(SUCCESSFUL_MATCH_OUTPUT, status):
return True
if re.match(FAIL_MATCH_OUTPUT, status):
print("Stack failed with status: {}".format(
stack.stack_status_reason, file=sys.stderr))
return False
time.sleep(10)
|
[
"def",
"wait_for_stack_ready",
"(",
"orchestration_client",
",",
"stack_name",
")",
":",
"SUCCESSFUL_MATCH_OUTPUT",
"=",
"\"(CREATE|UPDATE)_COMPLETE\"",
"FAIL_MATCH_OUTPUT",
"=",
"\"(CREATE|UPDATE)_FAILED\"",
"while",
"True",
":",
"stack",
"=",
"orchestration_client",
".",
"stacks",
".",
"get",
"(",
"stack_name",
")",
"if",
"not",
"stack",
":",
"return",
"False",
"status",
"=",
"stack",
".",
"stack_status",
"if",
"re",
".",
"match",
"(",
"SUCCESSFUL_MATCH_OUTPUT",
",",
"status",
")",
":",
"return",
"True",
"if",
"re",
".",
"match",
"(",
"FAIL_MATCH_OUTPUT",
",",
"status",
")",
":",
"print",
"(",
"\"Stack failed with status: {}\"",
".",
"format",
"(",
"stack",
".",
"stack_status_reason",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
")",
"return",
"False",
"time",
".",
"sleep",
"(",
"10",
")"
] |
Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
|
[
"Check",
"the",
"status",
"of",
"an",
"orchestration",
"stack"
] |
165a166fb2e5a2598380779b35812b8b8478c4fb
|
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L129-L159
|
243,854
|
rdo-management/python-rdomanager-oscplugin
|
rdomanager_oscplugin/utils.py
|
wait_for_provision_state
|
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False
|
python
|
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
for _ in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return True
if node.provision_state == provision_state:
return True
time.sleep(sleep)
return False
|
[
"def",
"wait_for_provision_state",
"(",
"baremetal_client",
",",
"node_uuid",
",",
"provision_state",
",",
"loops",
"=",
"10",
",",
"sleep",
"=",
"1",
")",
":",
"for",
"_",
"in",
"range",
"(",
"0",
",",
"loops",
")",
":",
"node",
"=",
"baremetal_client",
".",
"node",
".",
"get",
"(",
"node_uuid",
")",
"if",
"node",
"is",
"None",
":",
"# The node can't be found in ironic, so we don't need to wait for",
"# the provision state",
"return",
"True",
"if",
"node",
".",
"provision_state",
"==",
"provision_state",
":",
"return",
"True",
"time",
".",
"sleep",
"(",
"sleep",
")",
"return",
"False"
] |
Wait for a given Provisioning state in Ironic Discoverd
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
|
[
"Wait",
"for",
"a",
"given",
"Provisioning",
"state",
"in",
"Ironic",
"Discoverd"
] |
165a166fb2e5a2598380779b35812b8b8478c4fb
|
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L162-L199
|
243,855
|
rdo-management/python-rdomanager-oscplugin
|
rdomanager_oscplugin/utils.py
|
wait_for_node_discovery
|
def wait_for_node_discovery(discoverd_client, auth_token, discoverd_url,
node_uuids, loops=220, sleep=10):
"""Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
log = logging.getLogger(__name__ + ".wait_for_node_discovery")
node_uuids = node_uuids[:]
for _ in range(0, loops):
for node_uuid in node_uuids:
status = discoverd_client.get_status(
node_uuid,
base_url=discoverd_url,
auth_token=auth_token)
if status['finished']:
log.debug("Discover finished for node {0} (Error: {1})".format(
node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Discovery didn't finish for nodes {0}".format(
','.join(node_uuids)))
|
python
|
def wait_for_node_discovery(discoverd_client, auth_token, discoverd_url,
node_uuids, loops=220, sleep=10):
"""Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
log = logging.getLogger(__name__ + ".wait_for_node_discovery")
node_uuids = node_uuids[:]
for _ in range(0, loops):
for node_uuid in node_uuids:
status = discoverd_client.get_status(
node_uuid,
base_url=discoverd_url,
auth_token=auth_token)
if status['finished']:
log.debug("Discover finished for node {0} (Error: {1})".format(
node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Discovery didn't finish for nodes {0}".format(
','.join(node_uuids)))
|
[
"def",
"wait_for_node_discovery",
"(",
"discoverd_client",
",",
"auth_token",
",",
"discoverd_url",
",",
"node_uuids",
",",
"loops",
"=",
"220",
",",
"sleep",
"=",
"10",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
"+",
"\".wait_for_node_discovery\"",
")",
"node_uuids",
"=",
"node_uuids",
"[",
":",
"]",
"for",
"_",
"in",
"range",
"(",
"0",
",",
"loops",
")",
":",
"for",
"node_uuid",
"in",
"node_uuids",
":",
"status",
"=",
"discoverd_client",
".",
"get_status",
"(",
"node_uuid",
",",
"base_url",
"=",
"discoverd_url",
",",
"auth_token",
"=",
"auth_token",
")",
"if",
"status",
"[",
"'finished'",
"]",
":",
"log",
".",
"debug",
"(",
"\"Discover finished for node {0} (Error: {1})\"",
".",
"format",
"(",
"node_uuid",
",",
"status",
"[",
"'error'",
"]",
")",
")",
"node_uuids",
".",
"remove",
"(",
"node_uuid",
")",
"yield",
"node_uuid",
",",
"status",
"if",
"not",
"len",
"(",
"node_uuids",
")",
":",
"raise",
"StopIteration",
"time",
".",
"sleep",
"(",
"sleep",
")",
"if",
"len",
"(",
"node_uuids",
")",
":",
"log",
".",
"error",
"(",
"\"Discovery didn't finish for nodes {0}\"",
".",
"format",
"(",
"','",
".",
"join",
"(",
"node_uuids",
")",
")",
")"
] |
Check the status of Node discovery in Ironic discoverd
Gets the status and waits for them to complete.
:param discoverd_client: Ironic Discoverd client
:type discoverd_client: ironic_discoverd.client
:param auth_token: Authorisation token used by discoverd client
:type auth_token: string
:param discoverd_url: URL used by the discoverd client
:type discoverd_url: string
:param node_uuids: List of Node UUID's to wait for discovery
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
|
[
"Check",
"the",
"status",
"of",
"Node",
"discovery",
"in",
"Ironic",
"discoverd"
] |
165a166fb2e5a2598380779b35812b8b8478c4fb
|
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L202-L251
|
243,856
|
rdo-management/python-rdomanager-oscplugin
|
rdomanager_oscplugin/utils.py
|
create_environment_file
|
def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
"""Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
"""
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameters": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path
|
python
|
def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
"""Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
"""
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameters": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path
|
[
"def",
"create_environment_file",
"(",
"path",
"=",
"\"~/overcloud-env.json\"",
",",
"control_scale",
"=",
"1",
",",
"compute_scale",
"=",
"1",
",",
"ceph_storage_scale",
"=",
"0",
",",
"block_storage_scale",
"=",
"0",
",",
"swift_storage_scale",
"=",
"0",
")",
":",
"env_path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
"with",
"open",
"(",
"env_path",
",",
"'w+'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"{",
"\"parameters\"",
":",
"{",
"\"ControllerCount\"",
":",
"control_scale",
",",
"\"ComputeCount\"",
":",
"compute_scale",
",",
"\"CephStorageCount\"",
":",
"ceph_storage_scale",
",",
"\"BlockStorageCount\"",
":",
"block_storage_scale",
",",
"\"ObjectStorageCount\"",
":",
"swift_storage_scale",
"}",
"}",
")",
")",
"return",
"env_path"
] |
Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
|
[
"Create",
"a",
"heat",
"environment",
"file"
] |
165a166fb2e5a2598380779b35812b8b8478c4fb
|
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L254-L289
|
243,857
|
rdo-management/python-rdomanager-oscplugin
|
rdomanager_oscplugin/utils.py
|
set_nodes_state
|
def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
"""Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
"""
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
if not wait_for_provision_state(baremetal_client, node.uuid,
target_state):
print("FAIL: State not updated for Node {0}".format(
node.uuid, file=sys.stderr))
else:
yield node.uuid
|
python
|
def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
"""Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
"""
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
if not wait_for_provision_state(baremetal_client, node.uuid,
target_state):
print("FAIL: State not updated for Node {0}".format(
node.uuid, file=sys.stderr))
else:
yield node.uuid
|
[
"def",
"set_nodes_state",
"(",
"baremetal_client",
",",
"nodes",
",",
"transition",
",",
"target_state",
",",
"skipped_states",
"=",
"(",
")",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
"+",
"\".set_nodes_state\"",
")",
"for",
"node",
"in",
"nodes",
":",
"if",
"node",
".",
"provision_state",
"in",
"skipped_states",
":",
"continue",
"log",
".",
"debug",
"(",
"\"Setting provision state from {0} to '{1} for Node {2}\"",
".",
"format",
"(",
"node",
".",
"provision_state",
",",
"transition",
",",
"node",
".",
"uuid",
")",
")",
"baremetal_client",
".",
"node",
".",
"set_provision_state",
"(",
"node",
".",
"uuid",
",",
"transition",
")",
"if",
"not",
"wait_for_provision_state",
"(",
"baremetal_client",
",",
"node",
".",
"uuid",
",",
"target_state",
")",
":",
"print",
"(",
"\"FAIL: State not updated for Node {0}\"",
".",
"format",
"(",
"node",
".",
"uuid",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
")",
"else",
":",
"yield",
"node",
".",
"uuid"
] |
Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
|
[
"Make",
"all",
"nodes",
"available",
"in",
"the",
"baremetal",
"service",
"for",
"a",
"deployment"
] |
165a166fb2e5a2598380779b35812b8b8478c4fb
|
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L292-L338
|
243,858
|
rdo-management/python-rdomanager-oscplugin
|
rdomanager_oscplugin/utils.py
|
get_hiera_key
|
def get_hiera_key(key_name):
"""Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
"""
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
|
python
|
def get_hiera_key(key_name):
"""Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
"""
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
|
[
"def",
"get_hiera_key",
"(",
"key_name",
")",
":",
"command",
"=",
"[",
"\"hiera\"",
",",
"key_name",
"]",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"out",
",",
"err",
"=",
"p",
".",
"communicate",
"(",
")",
"return",
"out"
] |
Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
|
[
"Retrieve",
"a",
"key",
"from",
"the",
"hiera",
"store"
] |
165a166fb2e5a2598380779b35812b8b8478c4fb
|
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L341-L351
|
243,859
|
rdo-management/python-rdomanager-oscplugin
|
rdomanager_oscplugin/utils.py
|
remove_known_hosts
|
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command)
|
python
|
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command)
|
[
"def",
"remove_known_hosts",
"(",
"overcloud_ip",
")",
":",
"known_hosts",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.ssh/known_hosts\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"known_hosts",
")",
":",
"command",
"=",
"[",
"'ssh-keygen'",
",",
"'-R'",
",",
"overcloud_ip",
",",
"'-f'",
",",
"known_hosts",
"]",
"subprocess",
".",
"check_call",
"(",
"command",
")"
] |
For a given IP address remove SSH keys from the known_hosts file
|
[
"For",
"a",
"given",
"IP",
"address",
"remove",
"SSH",
"keys",
"from",
"the",
"known_hosts",
"file"
] |
165a166fb2e5a2598380779b35812b8b8478c4fb
|
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L361-L368
|
243,860
|
rdo-management/python-rdomanager-oscplugin
|
rdomanager_oscplugin/utils.py
|
file_checksum
|
def file_checksum(filepath):
"""Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
"""
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
for fragment in iter(lambda: f.read(65536), ''):
checksum.update(fragment)
return checksum.hexdigest()
|
python
|
def file_checksum(filepath):
"""Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
"""
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
for fragment in iter(lambda: f.read(65536), ''):
checksum.update(fragment)
return checksum.hexdigest()
|
[
"def",
"file_checksum",
"(",
"filepath",
")",
":",
"checksum",
"=",
"hashlib",
".",
"md5",
"(",
")",
"with",
"open",
"(",
"filepath",
",",
"'rb'",
")",
"as",
"f",
":",
"for",
"fragment",
"in",
"iter",
"(",
"lambda",
":",
"f",
".",
"read",
"(",
"65536",
")",
",",
"''",
")",
":",
"checksum",
".",
"update",
"(",
"fragment",
")",
"return",
"checksum",
".",
"hexdigest",
"(",
")"
] |
Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
|
[
"Calculate",
"md5",
"checksum",
"on",
"file"
] |
165a166fb2e5a2598380779b35812b8b8478c4fb
|
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L388-L399
|
243,861
|
lsst-sqre/BitlyOAuth2ProxySession
|
examples/example.py
|
main
|
def main():
"""Do the thing"""
username = os.environ["GITHUB_USERNAME"]
password = os.environ["GITHUB_PASSWORD"]
# Unless you have an LSST account you'd want to change these
baseurl = "https://logging.lsst.codes/oauth2/start"
authpage = "https://logging.lsst.codes/app/kibana"
session = bses.Session(oauth2_username=username,
oauth2_password=password,
authentication_base_url=baseurl)
resp = session.get(authpage)
print(resp)
if resp.status_code == 200:
print(resp.content)
|
python
|
def main():
"""Do the thing"""
username = os.environ["GITHUB_USERNAME"]
password = os.environ["GITHUB_PASSWORD"]
# Unless you have an LSST account you'd want to change these
baseurl = "https://logging.lsst.codes/oauth2/start"
authpage = "https://logging.lsst.codes/app/kibana"
session = bses.Session(oauth2_username=username,
oauth2_password=password,
authentication_base_url=baseurl)
resp = session.get(authpage)
print(resp)
if resp.status_code == 200:
print(resp.content)
|
[
"def",
"main",
"(",
")",
":",
"username",
"=",
"os",
".",
"environ",
"[",
"\"GITHUB_USERNAME\"",
"]",
"password",
"=",
"os",
".",
"environ",
"[",
"\"GITHUB_PASSWORD\"",
"]",
"# Unless you have an LSST account you'd want to change these",
"baseurl",
"=",
"\"https://logging.lsst.codes/oauth2/start\"",
"authpage",
"=",
"\"https://logging.lsst.codes/app/kibana\"",
"session",
"=",
"bses",
".",
"Session",
"(",
"oauth2_username",
"=",
"username",
",",
"oauth2_password",
"=",
"password",
",",
"authentication_base_url",
"=",
"baseurl",
")",
"resp",
"=",
"session",
".",
"get",
"(",
"authpage",
")",
"print",
"(",
"resp",
")",
"if",
"resp",
".",
"status_code",
"==",
"200",
":",
"print",
"(",
"resp",
".",
"content",
")"
] |
Do the thing
|
[
"Do",
"the",
"thing"
] |
4d3839cfb9b897f46cffc41a5f6ff7c645a5f202
|
https://github.com/lsst-sqre/BitlyOAuth2ProxySession/blob/4d3839cfb9b897f46cffc41a5f6ff7c645a5f202/examples/example.py#L8-L21
|
243,862
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker._assert_correct_model
|
def _assert_correct_model(model_to_check, model_reference, obj_name):
"""
Helper that asserts the model_to_check is the model_reference or one of
its subclasses. If not, raise an ImplementationError, using "obj_name"
to describe the name of the argument.
"""
if not issubclass(model_to_check, model_reference):
raise ConfigurationException('The %s model must be a subclass of %s'
% (obj_name, model_reference.__name__))
|
python
|
def _assert_correct_model(model_to_check, model_reference, obj_name):
"""
Helper that asserts the model_to_check is the model_reference or one of
its subclasses. If not, raise an ImplementationError, using "obj_name"
to describe the name of the argument.
"""
if not issubclass(model_to_check, model_reference):
raise ConfigurationException('The %s model must be a subclass of %s'
% (obj_name, model_reference.__name__))
|
[
"def",
"_assert_correct_model",
"(",
"model_to_check",
",",
"model_reference",
",",
"obj_name",
")",
":",
"if",
"not",
"issubclass",
"(",
"model_to_check",
",",
"model_reference",
")",
":",
"raise",
"ConfigurationException",
"(",
"'The %s model must be a subclass of %s'",
"%",
"(",
"obj_name",
",",
"model_reference",
".",
"__name__",
")",
")"
] |
Helper that asserts the model_to_check is the model_reference or one of
its subclasses. If not, raise an ImplementationError, using "obj_name"
to describe the name of the argument.
|
[
"Helper",
"that",
"asserts",
"the",
"model_to_check",
"is",
"the",
"model_reference",
"or",
"one",
"of",
"its",
"subclasses",
".",
"If",
"not",
"raise",
"an",
"ImplementationError",
"using",
"obj_name",
"to",
"describe",
"the",
"name",
"of",
"the",
"argument",
"."
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L146-L154
|
243,863
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.handle_end_signal
|
def handle_end_signal(self):
"""
Catch some system signals to handle them internaly
"""
try:
signal.signal(signal.SIGTERM, self.catch_end_signal)
signal.signal(signal.SIGINT, self.catch_end_signal)
except ValueError:
self.log('Signals cannot be caught in a Thread', level='warning')
|
python
|
def handle_end_signal(self):
"""
Catch some system signals to handle them internaly
"""
try:
signal.signal(signal.SIGTERM, self.catch_end_signal)
signal.signal(signal.SIGINT, self.catch_end_signal)
except ValueError:
self.log('Signals cannot be caught in a Thread', level='warning')
|
[
"def",
"handle_end_signal",
"(",
"self",
")",
":",
"try",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"self",
".",
"catch_end_signal",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"self",
".",
"catch_end_signal",
")",
"except",
"ValueError",
":",
"self",
".",
"log",
"(",
"'Signals cannot be caught in a Thread'",
",",
"level",
"=",
"'warning'",
")"
] |
Catch some system signals to handle them internaly
|
[
"Catch",
"some",
"system",
"signals",
"to",
"handle",
"them",
"internaly"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L156-L164
|
243,864
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.stop_handling_end_signal
|
def stop_handling_end_signal(self):
"""
Stop handling the SIGINT and SIGTERM signals
"""
try:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
except ValueError:
self.log('Signals cannot be caught in a Thread', level='warning')
|
python
|
def stop_handling_end_signal(self):
"""
Stop handling the SIGINT and SIGTERM signals
"""
try:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
except ValueError:
self.log('Signals cannot be caught in a Thread', level='warning')
|
[
"def",
"stop_handling_end_signal",
"(",
"self",
")",
":",
"try",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"signal",
".",
"SIG_DFL",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"SIG_DFL",
")",
"except",
"ValueError",
":",
"self",
".",
"log",
"(",
"'Signals cannot be caught in a Thread'",
",",
"level",
"=",
"'warning'",
")"
] |
Stop handling the SIGINT and SIGTERM signals
|
[
"Stop",
"handling",
"the",
"SIGINT",
"and",
"SIGTERM",
"signals"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L166-L174
|
243,865
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.set_logger
|
def set_logger(self):
"""
Prepare the logger, using self.logger_name and self.logger_level
"""
self.logger = logging.getLogger(self.logger_name)
self.logger.setLevel(self.logger_level)
|
python
|
def set_logger(self):
"""
Prepare the logger, using self.logger_name and self.logger_level
"""
self.logger = logging.getLogger(self.logger_name)
self.logger.setLevel(self.logger_level)
|
[
"def",
"set_logger",
"(",
"self",
")",
":",
"self",
".",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"self",
".",
"logger_name",
")",
"self",
".",
"logger",
".",
"setLevel",
"(",
"self",
".",
"logger_level",
")"
] |
Prepare the logger, using self.logger_name and self.logger_level
|
[
"Prepare",
"the",
"logger",
"using",
"self",
".",
"logger_name",
"and",
"self",
".",
"logger_level"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L176-L181
|
243,866
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.id
|
def id(self):
"""
Return an identifier for the worker to use in logging
"""
if not hasattr(self, '_id'):
self._id = str(threading.current_thread().ident + id(self))[-6:]
return self._id
|
python
|
def id(self):
"""
Return an identifier for the worker to use in logging
"""
if not hasattr(self, '_id'):
self._id = str(threading.current_thread().ident + id(self))[-6:]
return self._id
|
[
"def",
"id",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_id'",
")",
":",
"self",
".",
"_id",
"=",
"str",
"(",
"threading",
".",
"current_thread",
"(",
")",
".",
"ident",
"+",
"id",
"(",
"self",
")",
")",
"[",
"-",
"6",
":",
"]",
"return",
"self",
".",
"_id"
] |
Return an identifier for the worker to use in logging
|
[
"Return",
"an",
"identifier",
"for",
"the",
"worker",
"to",
"use",
"in",
"logging"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L184-L190
|
243,867
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.must_stop
|
def must_stop(self):
"""
Return True if the worker must stop when the current loop is over.
"""
return bool(self.terminate_gracefuly and self.end_signal_caught
or self.num_loops >= self.max_loops or self.end_forced
or self.wanted_end_date and datetime.utcnow() >= self.wanted_end_date)
|
python
|
def must_stop(self):
"""
Return True if the worker must stop when the current loop is over.
"""
return bool(self.terminate_gracefuly and self.end_signal_caught
or self.num_loops >= self.max_loops or self.end_forced
or self.wanted_end_date and datetime.utcnow() >= self.wanted_end_date)
|
[
"def",
"must_stop",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"self",
".",
"terminate_gracefuly",
"and",
"self",
".",
"end_signal_caught",
"or",
"self",
".",
"num_loops",
">=",
"self",
".",
"max_loops",
"or",
"self",
".",
"end_forced",
"or",
"self",
".",
"wanted_end_date",
"and",
"datetime",
".",
"utcnow",
"(",
")",
">=",
"self",
".",
"wanted_end_date",
")"
] |
Return True if the worker must stop when the current loop is over.
|
[
"Return",
"True",
"if",
"the",
"worker",
"must",
"stop",
"when",
"the",
"current",
"loop",
"is",
"over",
"."
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L205-L211
|
243,868
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.set_status
|
def set_status(self, status):
"""
Save the new status and call all defined callbacks
"""
self.status = status
for callback in self._update_status_callbacks:
callback(self)
|
python
|
def set_status(self, status):
"""
Save the new status and call all defined callbacks
"""
self.status = status
for callback in self._update_status_callbacks:
callback(self)
|
[
"def",
"set_status",
"(",
"self",
",",
"status",
")",
":",
"self",
".",
"status",
"=",
"status",
"for",
"callback",
"in",
"self",
".",
"_update_status_callbacks",
":",
"callback",
"(",
"self",
")"
] |
Save the new status and call all defined callbacks
|
[
"Save",
"the",
"new",
"status",
"and",
"call",
"all",
"defined",
"callbacks"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L213-L219
|
243,869
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.wait_for_job
|
def wait_for_job(self):
"""
Use a redis blocking list call to wait for a job, and return it.
"""
blpop_result = self.connection.blpop(self.keys, self.timeout)
if blpop_result is None:
return None
queue_redis_key, job_ident = blpop_result
self.set_status('running')
return self.get_queue(queue_redis_key), self.get_job(job_ident)
|
python
|
def wait_for_job(self):
"""
Use a redis blocking list call to wait for a job, and return it.
"""
blpop_result = self.connection.blpop(self.keys, self.timeout)
if blpop_result is None:
return None
queue_redis_key, job_ident = blpop_result
self.set_status('running')
return self.get_queue(queue_redis_key), self.get_job(job_ident)
|
[
"def",
"wait_for_job",
"(",
"self",
")",
":",
"blpop_result",
"=",
"self",
".",
"connection",
".",
"blpop",
"(",
"self",
".",
"keys",
",",
"self",
".",
"timeout",
")",
"if",
"blpop_result",
"is",
"None",
":",
"return",
"None",
"queue_redis_key",
",",
"job_ident",
"=",
"blpop_result",
"self",
".",
"set_status",
"(",
"'running'",
")",
"return",
"self",
".",
"get_queue",
"(",
"queue_redis_key",
")",
",",
"self",
".",
"get_job",
"(",
"job_ident",
")"
] |
Use a redis blocking list call to wait for a job, and return it.
|
[
"Use",
"a",
"redis",
"blocking",
"list",
"call",
"to",
"wait",
"for",
"a",
"job",
"and",
"return",
"it",
"."
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L227-L236
|
243,870
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.get_queue
|
def get_queue(self, queue_redis_key):
"""
Return a queue based on the key used in redis to store the list
"""
try:
queue_pk = int(queue_redis_key.split(':')[-2])
except:
raise DoesNotExist('Unable to get the queue from the key %s' % queue_redis_key)
return self.queue_model.get(queue_pk)
|
python
|
def get_queue(self, queue_redis_key):
"""
Return a queue based on the key used in redis to store the list
"""
try:
queue_pk = int(queue_redis_key.split(':')[-2])
except:
raise DoesNotExist('Unable to get the queue from the key %s' % queue_redis_key)
return self.queue_model.get(queue_pk)
|
[
"def",
"get_queue",
"(",
"self",
",",
"queue_redis_key",
")",
":",
"try",
":",
"queue_pk",
"=",
"int",
"(",
"queue_redis_key",
".",
"split",
"(",
"':'",
")",
"[",
"-",
"2",
"]",
")",
"except",
":",
"raise",
"DoesNotExist",
"(",
"'Unable to get the queue from the key %s'",
"%",
"queue_redis_key",
")",
"return",
"self",
".",
"queue_model",
".",
"get",
"(",
"queue_pk",
")"
] |
Return a queue based on the key used in redis to store the list
|
[
"Return",
"a",
"queue",
"based",
"on",
"the",
"key",
"used",
"in",
"redis",
"to",
"store",
"the",
"list"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L244-L252
|
243,871
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.update_keys
|
def update_keys(self):
"""
Update the redis keys to listen for new jobs priorities.
"""
self.keys = self.queue_model.get_waiting_keys(self.queues)
if not self.keys:
self.log('No queues yet', level='warning')
self.last_update_keys = datetime.utcnow()
|
python
|
def update_keys(self):
"""
Update the redis keys to listen for new jobs priorities.
"""
self.keys = self.queue_model.get_waiting_keys(self.queues)
if not self.keys:
self.log('No queues yet', level='warning')
self.last_update_keys = datetime.utcnow()
|
[
"def",
"update_keys",
"(",
"self",
")",
":",
"self",
".",
"keys",
"=",
"self",
".",
"queue_model",
".",
"get_waiting_keys",
"(",
"self",
".",
"queues",
")",
"if",
"not",
"self",
".",
"keys",
":",
"self",
".",
"log",
"(",
"'No queues yet'",
",",
"level",
"=",
"'warning'",
")",
"self",
".",
"last_update_keys",
"=",
"datetime",
".",
"utcnow",
"(",
")"
] |
Update the redis keys to listen for new jobs priorities.
|
[
"Update",
"the",
"redis",
"keys",
"to",
"listen",
"for",
"new",
"jobs",
"priorities",
"."
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L285-L293
|
243,872
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.run
|
def run(self):
"""
The main method of the worker. Will ask redis for list items via
blocking calls, get jobs from them, try to execute these jobs, and end
when needed.
"""
# if status is not None, we already had a run !
if self.status:
self.set_status('aborted')
raise LimpydJobsException('This worker run is already terminated')
self.set_status('starting')
self.start_date = datetime.utcnow()
if self.max_duration:
self.wanted_end_date = self.start_date + self.max_duration
must_stop = self.must_stop()
if not must_stop:
# get keys
while not self.keys and not must_stop:
self.update_keys()
if not self.keys:
sleep(self.fetch_priorities_delay)
must_stop = self.must_stop()
if not must_stop:
# wait for queues available if no ones are yet
self.requeue_delayed_jobs()
self.run_started()
self._main_loop()
self.set_status('terminated')
self.end_date = datetime.utcnow()
self.run_ended()
if self.terminate_gracefuly:
self.stop_handling_end_signal()
|
python
|
def run(self):
"""
The main method of the worker. Will ask redis for list items via
blocking calls, get jobs from them, try to execute these jobs, and end
when needed.
"""
# if status is not None, we already had a run !
if self.status:
self.set_status('aborted')
raise LimpydJobsException('This worker run is already terminated')
self.set_status('starting')
self.start_date = datetime.utcnow()
if self.max_duration:
self.wanted_end_date = self.start_date + self.max_duration
must_stop = self.must_stop()
if not must_stop:
# get keys
while not self.keys and not must_stop:
self.update_keys()
if not self.keys:
sleep(self.fetch_priorities_delay)
must_stop = self.must_stop()
if not must_stop:
# wait for queues available if no ones are yet
self.requeue_delayed_jobs()
self.run_started()
self._main_loop()
self.set_status('terminated')
self.end_date = datetime.utcnow()
self.run_ended()
if self.terminate_gracefuly:
self.stop_handling_end_signal()
|
[
"def",
"run",
"(",
"self",
")",
":",
"# if status is not None, we already had a run !",
"if",
"self",
".",
"status",
":",
"self",
".",
"set_status",
"(",
"'aborted'",
")",
"raise",
"LimpydJobsException",
"(",
"'This worker run is already terminated'",
")",
"self",
".",
"set_status",
"(",
"'starting'",
")",
"self",
".",
"start_date",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"self",
".",
"max_duration",
":",
"self",
".",
"wanted_end_date",
"=",
"self",
".",
"start_date",
"+",
"self",
".",
"max_duration",
"must_stop",
"=",
"self",
".",
"must_stop",
"(",
")",
"if",
"not",
"must_stop",
":",
"# get keys",
"while",
"not",
"self",
".",
"keys",
"and",
"not",
"must_stop",
":",
"self",
".",
"update_keys",
"(",
")",
"if",
"not",
"self",
".",
"keys",
":",
"sleep",
"(",
"self",
".",
"fetch_priorities_delay",
")",
"must_stop",
"=",
"self",
".",
"must_stop",
"(",
")",
"if",
"not",
"must_stop",
":",
"# wait for queues available if no ones are yet",
"self",
".",
"requeue_delayed_jobs",
"(",
")",
"self",
".",
"run_started",
"(",
")",
"self",
".",
"_main_loop",
"(",
")",
"self",
".",
"set_status",
"(",
"'terminated'",
")",
"self",
".",
"end_date",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"self",
".",
"run_ended",
"(",
")",
"if",
"self",
".",
"terminate_gracefuly",
":",
"self",
".",
"stop_handling_end_signal",
"(",
")"
] |
The main method of the worker. Will ask redis for list items via
blocking calls, get jobs from them, try to execute these jobs, and end
when needed.
|
[
"The",
"main",
"method",
"of",
"the",
"worker",
".",
"Will",
"ask",
"redis",
"for",
"list",
"items",
"via",
"blocking",
"calls",
"get",
"jobs",
"from",
"them",
"try",
"to",
"execute",
"these",
"jobs",
"and",
"end",
"when",
"needed",
"."
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L313-L351
|
243,873
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.elapsed
|
def elapsed(self):
"""
Return a timedelta representation of the time passed sine the worker
was running.
"""
if not self.start_date:
return None
return (self.end_date or datetime.utcnow()) - self.start_date
|
python
|
def elapsed(self):
"""
Return a timedelta representation of the time passed sine the worker
was running.
"""
if not self.start_date:
return None
return (self.end_date or datetime.utcnow()) - self.start_date
|
[
"def",
"elapsed",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"start_date",
":",
"return",
"None",
"return",
"(",
"self",
".",
"end_date",
"or",
"datetime",
".",
"utcnow",
"(",
")",
")",
"-",
"self",
".",
"start_date"
] |
Return a timedelta representation of the time passed sine the worker
was running.
|
[
"Return",
"a",
"timedelta",
"representation",
"of",
"the",
"time",
"passed",
"sine",
"the",
"worker",
"was",
"running",
"."
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L354-L361
|
243,874
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.requeue_delayed_jobs
|
def requeue_delayed_jobs(self):
"""
Requeue each delayed job that are now ready to be executed
"""
failures = []
for queue in self.queue_model.get_all_by_priority(self.queues):
failures.extend(queue.requeue_delayed_jobs())
self.last_requeue_delayed = datetime.utcnow()
for failure in failures:
self.log('Unable to requeue %s: %s' % failure)
|
python
|
def requeue_delayed_jobs(self):
"""
Requeue each delayed job that are now ready to be executed
"""
failures = []
for queue in self.queue_model.get_all_by_priority(self.queues):
failures.extend(queue.requeue_delayed_jobs())
self.last_requeue_delayed = datetime.utcnow()
for failure in failures:
self.log('Unable to requeue %s: %s' % failure)
|
[
"def",
"requeue_delayed_jobs",
"(",
"self",
")",
":",
"failures",
"=",
"[",
"]",
"for",
"queue",
"in",
"self",
".",
"queue_model",
".",
"get_all_by_priority",
"(",
"self",
".",
"queues",
")",
":",
"failures",
".",
"extend",
"(",
"queue",
".",
"requeue_delayed_jobs",
"(",
")",
")",
"self",
".",
"last_requeue_delayed",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"for",
"failure",
"in",
"failures",
":",
"self",
".",
"log",
"(",
"'Unable to requeue %s: %s'",
"%",
"failure",
")"
] |
Requeue each delayed job that are now ready to be executed
|
[
"Requeue",
"each",
"delayed",
"job",
"that",
"are",
"now",
"ready",
"to",
"be",
"executed"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L363-L374
|
243,875
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker._main_loop
|
def _main_loop(self):
"""
Run jobs until must_stop returns True
"""
fetch_priorities_delay = timedelta(seconds=self.fetch_priorities_delay)
fetch_delayed_delay = timedelta(seconds=self.fetch_delayed_delay)
while not self.must_stop():
self.set_status('waiting')
if self.last_update_keys + fetch_priorities_delay < datetime.utcnow():
self.update_keys()
if self.last_requeue_delayed + fetch_delayed_delay < datetime.utcnow():
self.requeue_delayed_jobs()
try:
queue_and_job = self.wait_for_job()
if queue_and_job is None:
# timeout for blpop
continue
queue, job = queue_and_job
except Exception as e:
self.log('Unable to get job: %s\n%s'
% (str(e), traceback.format_exc()), level='error')
else:
self.num_loops += 1
try:
identifier = 'pk:%s' % job.pk.get()
except Exception as e:
identifier = '??'
try:
self.set_status('running')
identifier, status = job.hmget('identifier', 'status')
# some cache, don't count on it on subclasses
job._cached_identifier = identifier
job._cached_status = status
queue._cached_name = queue.name.hget()
if status == STATUSES.DELAYED:
self.job_delayed(job, queue)
elif status != STATUSES.WAITING:
self.job_skipped(job, queue)
else:
try:
self.job_started(job, queue)
job_result = self.callback(job, queue)
except Exception as e:
trace = None
if self.save_tracebacks:
trace = traceback.format_exc()
self.job_error(job, queue, e, trace)
else:
job._cached_status = job.status.hget()
if job._cached_status == STATUSES.DELAYED:
self.job_delayed(job, queue)
elif job._cached_status == STATUSES.CANCELED:
self.job_skipped(job, queue)
else:
self.job_success(job, queue, job_result)
except Exception as e:
self.log('[%s] unexpected error: %s\n%s'
% (identifier, str(e), traceback.format_exc()), level='error')
try:
queue.errors.rpush(job.ident)
except Exception as e:
self.log('[%s] unable to add the error in the queue: %s\n%s'
% (identifier, str(e), traceback.format_exc()), level='error')
|
python
|
def _main_loop(self):
"""
Run jobs until must_stop returns True
"""
fetch_priorities_delay = timedelta(seconds=self.fetch_priorities_delay)
fetch_delayed_delay = timedelta(seconds=self.fetch_delayed_delay)
while not self.must_stop():
self.set_status('waiting')
if self.last_update_keys + fetch_priorities_delay < datetime.utcnow():
self.update_keys()
if self.last_requeue_delayed + fetch_delayed_delay < datetime.utcnow():
self.requeue_delayed_jobs()
try:
queue_and_job = self.wait_for_job()
if queue_and_job is None:
# timeout for blpop
continue
queue, job = queue_and_job
except Exception as e:
self.log('Unable to get job: %s\n%s'
% (str(e), traceback.format_exc()), level='error')
else:
self.num_loops += 1
try:
identifier = 'pk:%s' % job.pk.get()
except Exception as e:
identifier = '??'
try:
self.set_status('running')
identifier, status = job.hmget('identifier', 'status')
# some cache, don't count on it on subclasses
job._cached_identifier = identifier
job._cached_status = status
queue._cached_name = queue.name.hget()
if status == STATUSES.DELAYED:
self.job_delayed(job, queue)
elif status != STATUSES.WAITING:
self.job_skipped(job, queue)
else:
try:
self.job_started(job, queue)
job_result = self.callback(job, queue)
except Exception as e:
trace = None
if self.save_tracebacks:
trace = traceback.format_exc()
self.job_error(job, queue, e, trace)
else:
job._cached_status = job.status.hget()
if job._cached_status == STATUSES.DELAYED:
self.job_delayed(job, queue)
elif job._cached_status == STATUSES.CANCELED:
self.job_skipped(job, queue)
else:
self.job_success(job, queue, job_result)
except Exception as e:
self.log('[%s] unexpected error: %s\n%s'
% (identifier, str(e), traceback.format_exc()), level='error')
try:
queue.errors.rpush(job.ident)
except Exception as e:
self.log('[%s] unable to add the error in the queue: %s\n%s'
% (identifier, str(e), traceback.format_exc()), level='error')
|
[
"def",
"_main_loop",
"(",
"self",
")",
":",
"fetch_priorities_delay",
"=",
"timedelta",
"(",
"seconds",
"=",
"self",
".",
"fetch_priorities_delay",
")",
"fetch_delayed_delay",
"=",
"timedelta",
"(",
"seconds",
"=",
"self",
".",
"fetch_delayed_delay",
")",
"while",
"not",
"self",
".",
"must_stop",
"(",
")",
":",
"self",
".",
"set_status",
"(",
"'waiting'",
")",
"if",
"self",
".",
"last_update_keys",
"+",
"fetch_priorities_delay",
"<",
"datetime",
".",
"utcnow",
"(",
")",
":",
"self",
".",
"update_keys",
"(",
")",
"if",
"self",
".",
"last_requeue_delayed",
"+",
"fetch_delayed_delay",
"<",
"datetime",
".",
"utcnow",
"(",
")",
":",
"self",
".",
"requeue_delayed_jobs",
"(",
")",
"try",
":",
"queue_and_job",
"=",
"self",
".",
"wait_for_job",
"(",
")",
"if",
"queue_and_job",
"is",
"None",
":",
"# timeout for blpop",
"continue",
"queue",
",",
"job",
"=",
"queue_and_job",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
"(",
"'Unable to get job: %s\\n%s'",
"%",
"(",
"str",
"(",
"e",
")",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
",",
"level",
"=",
"'error'",
")",
"else",
":",
"self",
".",
"num_loops",
"+=",
"1",
"try",
":",
"identifier",
"=",
"'pk:%s'",
"%",
"job",
".",
"pk",
".",
"get",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"identifier",
"=",
"'??'",
"try",
":",
"self",
".",
"set_status",
"(",
"'running'",
")",
"identifier",
",",
"status",
"=",
"job",
".",
"hmget",
"(",
"'identifier'",
",",
"'status'",
")",
"# some cache, don't count on it on subclasses",
"job",
".",
"_cached_identifier",
"=",
"identifier",
"job",
".",
"_cached_status",
"=",
"status",
"queue",
".",
"_cached_name",
"=",
"queue",
".",
"name",
".",
"hget",
"(",
")",
"if",
"status",
"==",
"STATUSES",
".",
"DELAYED",
":",
"self",
".",
"job_delayed",
"(",
"job",
",",
"queue",
")",
"elif",
"status",
"!=",
"STATUSES",
".",
"WAITING",
":",
"self",
".",
"job_skipped",
"(",
"job",
",",
"queue",
")",
"else",
":",
"try",
":",
"self",
".",
"job_started",
"(",
"job",
",",
"queue",
")",
"job_result",
"=",
"self",
".",
"callback",
"(",
"job",
",",
"queue",
")",
"except",
"Exception",
"as",
"e",
":",
"trace",
"=",
"None",
"if",
"self",
".",
"save_tracebacks",
":",
"trace",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"self",
".",
"job_error",
"(",
"job",
",",
"queue",
",",
"e",
",",
"trace",
")",
"else",
":",
"job",
".",
"_cached_status",
"=",
"job",
".",
"status",
".",
"hget",
"(",
")",
"if",
"job",
".",
"_cached_status",
"==",
"STATUSES",
".",
"DELAYED",
":",
"self",
".",
"job_delayed",
"(",
"job",
",",
"queue",
")",
"elif",
"job",
".",
"_cached_status",
"==",
"STATUSES",
".",
"CANCELED",
":",
"self",
".",
"job_skipped",
"(",
"job",
",",
"queue",
")",
"else",
":",
"self",
".",
"job_success",
"(",
"job",
",",
"queue",
",",
"job_result",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
"(",
"'[%s] unexpected error: %s\\n%s'",
"%",
"(",
"identifier",
",",
"str",
"(",
"e",
")",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
",",
"level",
"=",
"'error'",
")",
"try",
":",
"queue",
".",
"errors",
".",
"rpush",
"(",
"job",
".",
"ident",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
"(",
"'[%s] unable to add the error in the queue: %s\\n%s'",
"%",
"(",
"identifier",
",",
"str",
"(",
"e",
")",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
",",
"level",
"=",
"'error'",
")"
] |
Run jobs until must_stop returns True
|
[
"Run",
"jobs",
"until",
"must_stop",
"returns",
"True"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L376-L443
|
243,876
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.job_error
|
def job_error(self, job, queue, exception, trace=None):
"""
Called when an exception was raised during the execute call for a job.
"""
to_be_requeued = not job.must_be_cancelled_on_error and self.requeue_times and self.requeue_times >= int(job.tries.hget() or 0)
if not to_be_requeued:
job.queued.delete()
job.hmset(end=str(datetime.utcnow()), status=STATUSES.ERROR)
queue.errors.rpush(job.ident)
if self.save_errors:
additional_fields = self.additional_error_fields(job, queue, exception)
self.error_model.add_error(queue_name=queue._cached_name,
job=job,
error=exception,
trace=trace,
**additional_fields)
self.log(self.job_error_message(job, queue, to_be_requeued, exception, trace), level='error')
if hasattr(job, 'on_error'):
job.on_error(queue, exception, trace)
# requeue the job if needed
if to_be_requeued:
priority = queue.priority.hget()
if self.requeue_priority_delta:
priority = int(priority) + self.requeue_priority_delta
self.requeue_job(job, queue, priority, delayed_for=self.requeue_delay_delta)
|
python
|
def job_error(self, job, queue, exception, trace=None):
"""
Called when an exception was raised during the execute call for a job.
"""
to_be_requeued = not job.must_be_cancelled_on_error and self.requeue_times and self.requeue_times >= int(job.tries.hget() or 0)
if not to_be_requeued:
job.queued.delete()
job.hmset(end=str(datetime.utcnow()), status=STATUSES.ERROR)
queue.errors.rpush(job.ident)
if self.save_errors:
additional_fields = self.additional_error_fields(job, queue, exception)
self.error_model.add_error(queue_name=queue._cached_name,
job=job,
error=exception,
trace=trace,
**additional_fields)
self.log(self.job_error_message(job, queue, to_be_requeued, exception, trace), level='error')
if hasattr(job, 'on_error'):
job.on_error(queue, exception, trace)
# requeue the job if needed
if to_be_requeued:
priority = queue.priority.hget()
if self.requeue_priority_delta:
priority = int(priority) + self.requeue_priority_delta
self.requeue_job(job, queue, priority, delayed_for=self.requeue_delay_delta)
|
[
"def",
"job_error",
"(",
"self",
",",
"job",
",",
"queue",
",",
"exception",
",",
"trace",
"=",
"None",
")",
":",
"to_be_requeued",
"=",
"not",
"job",
".",
"must_be_cancelled_on_error",
"and",
"self",
".",
"requeue_times",
"and",
"self",
".",
"requeue_times",
">=",
"int",
"(",
"job",
".",
"tries",
".",
"hget",
"(",
")",
"or",
"0",
")",
"if",
"not",
"to_be_requeued",
":",
"job",
".",
"queued",
".",
"delete",
"(",
")",
"job",
".",
"hmset",
"(",
"end",
"=",
"str",
"(",
"datetime",
".",
"utcnow",
"(",
")",
")",
",",
"status",
"=",
"STATUSES",
".",
"ERROR",
")",
"queue",
".",
"errors",
".",
"rpush",
"(",
"job",
".",
"ident",
")",
"if",
"self",
".",
"save_errors",
":",
"additional_fields",
"=",
"self",
".",
"additional_error_fields",
"(",
"job",
",",
"queue",
",",
"exception",
")",
"self",
".",
"error_model",
".",
"add_error",
"(",
"queue_name",
"=",
"queue",
".",
"_cached_name",
",",
"job",
"=",
"job",
",",
"error",
"=",
"exception",
",",
"trace",
"=",
"trace",
",",
"*",
"*",
"additional_fields",
")",
"self",
".",
"log",
"(",
"self",
".",
"job_error_message",
"(",
"job",
",",
"queue",
",",
"to_be_requeued",
",",
"exception",
",",
"trace",
")",
",",
"level",
"=",
"'error'",
")",
"if",
"hasattr",
"(",
"job",
",",
"'on_error'",
")",
":",
"job",
".",
"on_error",
"(",
"queue",
",",
"exception",
",",
"trace",
")",
"# requeue the job if needed",
"if",
"to_be_requeued",
":",
"priority",
"=",
"queue",
".",
"priority",
".",
"hget",
"(",
")",
"if",
"self",
".",
"requeue_priority_delta",
":",
"priority",
"=",
"int",
"(",
"priority",
")",
"+",
"self",
".",
"requeue_priority_delta",
"self",
".",
"requeue_job",
"(",
"job",
",",
"queue",
",",
"priority",
",",
"delayed_for",
"=",
"self",
".",
"requeue_delay_delta",
")"
] |
Called when an exception was raised during the execute call for a job.
|
[
"Called",
"when",
"an",
"exception",
"was",
"raised",
"during",
"the",
"execute",
"call",
"for",
"a",
"job",
"."
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L458-L490
|
243,877
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.requeue_job
|
def requeue_job(self, job, queue, priority, delayed_for=None):
"""
Requeue a job in a queue with the given priority, possibly delayed
"""
job.requeue(queue_name=queue._cached_name,
priority=priority,
delayed_for=delayed_for,
queue_model=self.queue_model)
if hasattr(job, 'on_requeued'):
job.on_requeued(queue)
self.log(self.job_requeue_message(job, queue))
|
python
|
def requeue_job(self, job, queue, priority, delayed_for=None):
"""
Requeue a job in a queue with the given priority, possibly delayed
"""
job.requeue(queue_name=queue._cached_name,
priority=priority,
delayed_for=delayed_for,
queue_model=self.queue_model)
if hasattr(job, 'on_requeued'):
job.on_requeued(queue)
self.log(self.job_requeue_message(job, queue))
|
[
"def",
"requeue_job",
"(",
"self",
",",
"job",
",",
"queue",
",",
"priority",
",",
"delayed_for",
"=",
"None",
")",
":",
"job",
".",
"requeue",
"(",
"queue_name",
"=",
"queue",
".",
"_cached_name",
",",
"priority",
"=",
"priority",
",",
"delayed_for",
"=",
"delayed_for",
",",
"queue_model",
"=",
"self",
".",
"queue_model",
")",
"if",
"hasattr",
"(",
"job",
",",
"'on_requeued'",
")",
":",
"job",
".",
"on_requeued",
"(",
"queue",
")",
"self",
".",
"log",
"(",
"self",
".",
"job_requeue_message",
"(",
"job",
",",
"queue",
")",
")"
] |
Requeue a job in a queue with the given priority, possibly delayed
|
[
"Requeue",
"a",
"job",
"in",
"a",
"queue",
"with",
"the",
"given",
"priority",
"possibly",
"delayed"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L492-L504
|
243,878
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.job_error_message
|
def job_error_message(self, job, queue, to_be_requeued, exception, trace=None):
"""
Return the message to log when a job raised an error
"""
return '[%s|%s|%s] error: %s [%s]' % (queue._cached_name,
job.pk.get(),
job._cached_identifier,
str(exception),
'requeued' if to_be_requeued else 'NOT requeued')
|
python
|
def job_error_message(self, job, queue, to_be_requeued, exception, trace=None):
"""
Return the message to log when a job raised an error
"""
return '[%s|%s|%s] error: %s [%s]' % (queue._cached_name,
job.pk.get(),
job._cached_identifier,
str(exception),
'requeued' if to_be_requeued else 'NOT requeued')
|
[
"def",
"job_error_message",
"(",
"self",
",",
"job",
",",
"queue",
",",
"to_be_requeued",
",",
"exception",
",",
"trace",
"=",
"None",
")",
":",
"return",
"'[%s|%s|%s] error: %s [%s]'",
"%",
"(",
"queue",
".",
"_cached_name",
",",
"job",
".",
"pk",
".",
"get",
"(",
")",
",",
"job",
".",
"_cached_identifier",
",",
"str",
"(",
"exception",
")",
",",
"'requeued'",
"if",
"to_be_requeued",
"else",
"'NOT requeued'",
")"
] |
Return the message to log when a job raised an error
|
[
"Return",
"the",
"message",
"to",
"log",
"when",
"a",
"job",
"raised",
"an",
"error"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L506-L514
|
243,879
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.job_requeue_message
|
def job_requeue_message(self, job, queue):
"""
Return the message to log when a job is requeued
"""
priority, delayed_until = job.hmget('priority', 'delayed_until')
msg = '[%s|%s|%s] requeued with priority %s'
args = [queue._cached_name, job.pk.get(), job._cached_identifier, priority]
if delayed_until:
msg += ', delayed until %s'
args.append(delayed_until)
return msg % tuple(args)
|
python
|
def job_requeue_message(self, job, queue):
"""
Return the message to log when a job is requeued
"""
priority, delayed_until = job.hmget('priority', 'delayed_until')
msg = '[%s|%s|%s] requeued with priority %s'
args = [queue._cached_name, job.pk.get(), job._cached_identifier, priority]
if delayed_until:
msg += ', delayed until %s'
args.append(delayed_until)
return msg % tuple(args)
|
[
"def",
"job_requeue_message",
"(",
"self",
",",
"job",
",",
"queue",
")",
":",
"priority",
",",
"delayed_until",
"=",
"job",
".",
"hmget",
"(",
"'priority'",
",",
"'delayed_until'",
")",
"msg",
"=",
"'[%s|%s|%s] requeued with priority %s'",
"args",
"=",
"[",
"queue",
".",
"_cached_name",
",",
"job",
".",
"pk",
".",
"get",
"(",
")",
",",
"job",
".",
"_cached_identifier",
",",
"priority",
"]",
"if",
"delayed_until",
":",
"msg",
"+=",
"', delayed until %s'",
"args",
".",
"append",
"(",
"delayed_until",
")",
"return",
"msg",
"%",
"tuple",
"(",
"args",
")"
] |
Return the message to log when a job is requeued
|
[
"Return",
"the",
"message",
"to",
"log",
"when",
"a",
"job",
"is",
"requeued"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L516-L529
|
243,880
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.job_success
|
def job_success(self, job, queue, job_result):
"""
Called just after an execute call was successful.
job_result is the value returned by the callback, if any.
"""
job.queued.delete()
job.hmset(end=str(datetime.utcnow()), status=STATUSES.SUCCESS)
queue.success.rpush(job.ident)
self.log(self.job_success_message(job, queue, job_result))
if hasattr(job, 'on_success'):
job.on_success(queue, job_result)
|
python
|
def job_success(self, job, queue, job_result):
"""
Called just after an execute call was successful.
job_result is the value returned by the callback, if any.
"""
job.queued.delete()
job.hmset(end=str(datetime.utcnow()), status=STATUSES.SUCCESS)
queue.success.rpush(job.ident)
self.log(self.job_success_message(job, queue, job_result))
if hasattr(job, 'on_success'):
job.on_success(queue, job_result)
|
[
"def",
"job_success",
"(",
"self",
",",
"job",
",",
"queue",
",",
"job_result",
")",
":",
"job",
".",
"queued",
".",
"delete",
"(",
")",
"job",
".",
"hmset",
"(",
"end",
"=",
"str",
"(",
"datetime",
".",
"utcnow",
"(",
")",
")",
",",
"status",
"=",
"STATUSES",
".",
"SUCCESS",
")",
"queue",
".",
"success",
".",
"rpush",
"(",
"job",
".",
"ident",
")",
"self",
".",
"log",
"(",
"self",
".",
"job_success_message",
"(",
"job",
",",
"queue",
",",
"job_result",
")",
")",
"if",
"hasattr",
"(",
"job",
",",
"'on_success'",
")",
":",
"job",
".",
"on_success",
"(",
"queue",
",",
"job_result",
")"
] |
Called just after an execute call was successful.
job_result is the value returned by the callback, if any.
|
[
"Called",
"just",
"after",
"an",
"execute",
"call",
"was",
"successful",
".",
"job_result",
"is",
"the",
"value",
"returned",
"by",
"the",
"callback",
"if",
"any",
"."
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L531-L541
|
243,881
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.job_success_message
|
def job_success_message(self, job, queue, job_result):
"""
Return the message to log when a job is successful
"""
return '[%s|%s|%s] success, in %s' % (queue._cached_name, job.pk.get(),
job._cached_identifier, job.duration)
|
python
|
def job_success_message(self, job, queue, job_result):
"""
Return the message to log when a job is successful
"""
return '[%s|%s|%s] success, in %s' % (queue._cached_name, job.pk.get(),
job._cached_identifier, job.duration)
|
[
"def",
"job_success_message",
"(",
"self",
",",
"job",
",",
"queue",
",",
"job_result",
")",
":",
"return",
"'[%s|%s|%s] success, in %s'",
"%",
"(",
"queue",
".",
"_cached_name",
",",
"job",
".",
"pk",
".",
"get",
"(",
")",
",",
"job",
".",
"_cached_identifier",
",",
"job",
".",
"duration",
")"
] |
Return the message to log when a job is successful
|
[
"Return",
"the",
"message",
"to",
"log",
"when",
"a",
"job",
"is",
"successful"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L543-L548
|
243,882
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.job_started
|
def job_started(self, job, queue):
"""
Called just before the execution of the job
"""
job.hmset(start=str(datetime.utcnow()), status=STATUSES.RUNNING)
job.tries.hincrby(1)
self.log(self.job_started_message(job, queue))
if hasattr(job, 'on_started'):
job.on_started(queue)
|
python
|
def job_started(self, job, queue):
"""
Called just before the execution of the job
"""
job.hmset(start=str(datetime.utcnow()), status=STATUSES.RUNNING)
job.tries.hincrby(1)
self.log(self.job_started_message(job, queue))
if hasattr(job, 'on_started'):
job.on_started(queue)
|
[
"def",
"job_started",
"(",
"self",
",",
"job",
",",
"queue",
")",
":",
"job",
".",
"hmset",
"(",
"start",
"=",
"str",
"(",
"datetime",
".",
"utcnow",
"(",
")",
")",
",",
"status",
"=",
"STATUSES",
".",
"RUNNING",
")",
"job",
".",
"tries",
".",
"hincrby",
"(",
"1",
")",
"self",
".",
"log",
"(",
"self",
".",
"job_started_message",
"(",
"job",
",",
"queue",
")",
")",
"if",
"hasattr",
"(",
"job",
",",
"'on_started'",
")",
":",
"job",
".",
"on_started",
"(",
"queue",
")"
] |
Called just before the execution of the job
|
[
"Called",
"just",
"before",
"the",
"execution",
"of",
"the",
"job"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L550-L558
|
243,883
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.job_started_message
|
def job_started_message(self, job, queue):
"""
Return the message to log just befre the execution of the job
"""
return '[%s|%s|%s] starting' % (queue._cached_name, job.pk.get(),
job._cached_identifier)
|
python
|
def job_started_message(self, job, queue):
"""
Return the message to log just befre the execution of the job
"""
return '[%s|%s|%s] starting' % (queue._cached_name, job.pk.get(),
job._cached_identifier)
|
[
"def",
"job_started_message",
"(",
"self",
",",
"job",
",",
"queue",
")",
":",
"return",
"'[%s|%s|%s] starting'",
"%",
"(",
"queue",
".",
"_cached_name",
",",
"job",
".",
"pk",
".",
"get",
"(",
")",
",",
"job",
".",
"_cached_identifier",
")"
] |
Return the message to log just befre the execution of the job
|
[
"Return",
"the",
"message",
"to",
"log",
"just",
"befre",
"the",
"execution",
"of",
"the",
"job"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L560-L565
|
243,884
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.job_skipped
|
def job_skipped(self, job, queue):
"""
Called if a job, before trying to run it, has not the "waiting" status,
or, after run, if its status was set to "canceled"
"""
job.queued.delete()
self.log(self.job_skipped_message(job, queue), level='warning')
if hasattr(job, 'on_skipped'):
job.on_skipped(queue)
|
python
|
def job_skipped(self, job, queue):
"""
Called if a job, before trying to run it, has not the "waiting" status,
or, after run, if its status was set to "canceled"
"""
job.queued.delete()
self.log(self.job_skipped_message(job, queue), level='warning')
if hasattr(job, 'on_skipped'):
job.on_skipped(queue)
|
[
"def",
"job_skipped",
"(",
"self",
",",
"job",
",",
"queue",
")",
":",
"job",
".",
"queued",
".",
"delete",
"(",
")",
"self",
".",
"log",
"(",
"self",
".",
"job_skipped_message",
"(",
"job",
",",
"queue",
")",
",",
"level",
"=",
"'warning'",
")",
"if",
"hasattr",
"(",
"job",
",",
"'on_skipped'",
")",
":",
"job",
".",
"on_skipped",
"(",
"queue",
")"
] |
Called if a job, before trying to run it, has not the "waiting" status,
or, after run, if its status was set to "canceled"
|
[
"Called",
"if",
"a",
"job",
"before",
"trying",
"to",
"run",
"it",
"has",
"not",
"the",
"waiting",
"status",
"or",
"after",
"run",
"if",
"its",
"status",
"was",
"set",
"to",
"canceled"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L567-L575
|
243,885
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.job_skipped_message
|
def job_skipped_message(self, job, queue):
"""
Return the message to log when a job was skipped
"""
return '[%s|%s|%s] job skipped (current status: %s)' % (
queue._cached_name,
job.pk.get(),
job._cached_identifier,
STATUSES.by_value(job._cached_status, 'UNKNOWN'))
|
python
|
def job_skipped_message(self, job, queue):
"""
Return the message to log when a job was skipped
"""
return '[%s|%s|%s] job skipped (current status: %s)' % (
queue._cached_name,
job.pk.get(),
job._cached_identifier,
STATUSES.by_value(job._cached_status, 'UNKNOWN'))
|
[
"def",
"job_skipped_message",
"(",
"self",
",",
"job",
",",
"queue",
")",
":",
"return",
"'[%s|%s|%s] job skipped (current status: %s)'",
"%",
"(",
"queue",
".",
"_cached_name",
",",
"job",
".",
"pk",
".",
"get",
"(",
")",
",",
"job",
".",
"_cached_identifier",
",",
"STATUSES",
".",
"by_value",
"(",
"job",
".",
"_cached_status",
",",
"'UNKNOWN'",
")",
")"
] |
Return the message to log when a job was skipped
|
[
"Return",
"the",
"message",
"to",
"log",
"when",
"a",
"job",
"was",
"skipped"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L577-L585
|
243,886
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.job_delayed
|
def job_delayed(self, job, queue):
"""
Called if a job, before trying to run it, has the "delayed" status, or,
after run, if its status was set to "delayed"
If delayed_until was not set, or is invalid, set it to 60sec in the future
"""
delayed_until = job.delayed_until.hget()
if delayed_until:
try:
delayed_until = compute_delayed_until(delayed_until=parse(delayed_until))
except (ValueError, TypeError):
delayed_until = None
if not delayed_until:
# by default delay it for 60 seconds
delayed_until = compute_delayed_until(delayed_for=60)
job.enqueue_or_delay(
queue_name=queue._cached_name,
delayed_until=delayed_until,
queue_model=queue.__class__,
)
self.log(self.job_delayed_message(job, queue), level='warning')
if hasattr(job, 'on_delayed'):
job.on_delayed(queue)
|
python
|
def job_delayed(self, job, queue):
"""
Called if a job, before trying to run it, has the "delayed" status, or,
after run, if its status was set to "delayed"
If delayed_until was not set, or is invalid, set it to 60sec in the future
"""
delayed_until = job.delayed_until.hget()
if delayed_until:
try:
delayed_until = compute_delayed_until(delayed_until=parse(delayed_until))
except (ValueError, TypeError):
delayed_until = None
if not delayed_until:
# by default delay it for 60 seconds
delayed_until = compute_delayed_until(delayed_for=60)
job.enqueue_or_delay(
queue_name=queue._cached_name,
delayed_until=delayed_until,
queue_model=queue.__class__,
)
self.log(self.job_delayed_message(job, queue), level='warning')
if hasattr(job, 'on_delayed'):
job.on_delayed(queue)
|
[
"def",
"job_delayed",
"(",
"self",
",",
"job",
",",
"queue",
")",
":",
"delayed_until",
"=",
"job",
".",
"delayed_until",
".",
"hget",
"(",
")",
"if",
"delayed_until",
":",
"try",
":",
"delayed_until",
"=",
"compute_delayed_until",
"(",
"delayed_until",
"=",
"parse",
"(",
"delayed_until",
")",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"delayed_until",
"=",
"None",
"if",
"not",
"delayed_until",
":",
"# by default delay it for 60 seconds",
"delayed_until",
"=",
"compute_delayed_until",
"(",
"delayed_for",
"=",
"60",
")",
"job",
".",
"enqueue_or_delay",
"(",
"queue_name",
"=",
"queue",
".",
"_cached_name",
",",
"delayed_until",
"=",
"delayed_until",
",",
"queue_model",
"=",
"queue",
".",
"__class__",
",",
")",
"self",
".",
"log",
"(",
"self",
".",
"job_delayed_message",
"(",
"job",
",",
"queue",
")",
",",
"level",
"=",
"'warning'",
")",
"if",
"hasattr",
"(",
"job",
",",
"'on_delayed'",
")",
":",
"job",
".",
"on_delayed",
"(",
"queue",
")"
] |
Called if a job, before trying to run it, has the "delayed" status, or,
after run, if its status was set to "delayed"
If delayed_until was not set, or is invalid, set it to 60sec in the future
|
[
"Called",
"if",
"a",
"job",
"before",
"trying",
"to",
"run",
"it",
"has",
"the",
"delayed",
"status",
"or",
"after",
"run",
"if",
"its",
"status",
"was",
"set",
"to",
"delayed",
"If",
"delayed_until",
"was",
"not",
"set",
"or",
"is",
"invalid",
"set",
"it",
"to",
"60sec",
"in",
"the",
"future"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L587-L613
|
243,887
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
Worker.job_delayed_message
|
def job_delayed_message(self, job, queue):
"""
Return the message to log when a job was delayed just before or during
its execution
"""
return '[%s|%s|%s] job delayed until %s' % (
queue._cached_name,
job.pk.get(),
job._cached_identifier,
job.delayed_until.hget())
|
python
|
def job_delayed_message(self, job, queue):
"""
Return the message to log when a job was delayed just before or during
its execution
"""
return '[%s|%s|%s] job delayed until %s' % (
queue._cached_name,
job.pk.get(),
job._cached_identifier,
job.delayed_until.hget())
|
[
"def",
"job_delayed_message",
"(",
"self",
",",
"job",
",",
"queue",
")",
":",
"return",
"'[%s|%s|%s] job delayed until %s'",
"%",
"(",
"queue",
".",
"_cached_name",
",",
"job",
".",
"pk",
".",
"get",
"(",
")",
",",
"job",
".",
"_cached_identifier",
",",
"job",
".",
"delayed_until",
".",
"hget",
"(",
")",
")"
] |
Return the message to log when a job was delayed just before or during
its execution
|
[
"Return",
"the",
"message",
"to",
"log",
"when",
"a",
"job",
"was",
"delayed",
"just",
"before",
"or",
"during",
"its",
"execution"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L615-L624
|
243,888
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
WorkerConfig.create_parser
|
def create_parser(self):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to the worker.
"""
return OptionParser(prog=self.prog_name,
usage=self.usage(),
version='%%prog %s' % self.get_version(),
option_list=self.option_list)
|
python
|
def create_parser(self):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to the worker.
"""
return OptionParser(prog=self.prog_name,
usage=self.usage(),
version='%%prog %s' % self.get_version(),
option_list=self.option_list)
|
[
"def",
"create_parser",
"(",
"self",
")",
":",
"return",
"OptionParser",
"(",
"prog",
"=",
"self",
".",
"prog_name",
",",
"usage",
"=",
"self",
".",
"usage",
"(",
")",
",",
"version",
"=",
"'%%prog %s'",
"%",
"self",
".",
"get_version",
"(",
")",
",",
"option_list",
"=",
"self",
".",
"option_list",
")"
] |
Create and return the ``OptionParser`` which will be used to
parse the arguments to the worker.
|
[
"Create",
"and",
"return",
"the",
"OptionParser",
"which",
"will",
"be",
"used",
"to",
"parse",
"the",
"arguments",
"to",
"the",
"worker",
"."
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L740-L749
|
243,889
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
WorkerConfig.manage_options
|
def manage_options(self):
"""
Create a parser given the command-line arguments, creates a parser
Return True if the programme must exit.
"""
self.parser = self.create_parser()
self.options, self.args = self.parser.parse_args(self.argv)
self.do_imports()
if self.options.callback and not callable(self.options.callback):
self.parser.error('The callback is not callable')
self.logger_level = None
if self.options.logger_level:
if self.options.logger_level.isdigit():
self.options.logger_level = int(self.options.logger_level)
else:
try:
self.options.logger_level = getattr(logging, self.options.logger_level.upper())
except:
self.parser.error('Invalid logger-level %s' % self.options.logger_level)
if self.options.max_loops is not None and self.options.max_loops < 0:
self.parser.error('The max-loops argument (%s) must be a <positive></positive> integer' % self.options.max_loops)
if self.options.max_duration is not None and self.options.max_duration < 0:
self.parser.error('The max-duration argument (%s) must be a positive integer' % self.options.max_duration)
if self.options.timeout is not None and self.options.timeout < 0:
self.parser.error('The timeout argument (%s) must be a positive integer (including 0)' % self.options.timeout)
if self.options.fetch_priorities_delay is not None and self.options.fetch_priorities_delay <= 0:
self.parser.error('The fetch-priorities-delay argument (%s) must be a positive integer' % self.options.fetch_priorities_delay)
if self.options.fetch_delayed_delay is not None and self.options.fetch_delayed_delay <= 0:
self.parser.error('The fetch-delayed-delay argument (%s) must be a positive integer' % self.options.fetch_delayed_delay)
if self.options.requeue_times is not None and self.options.requeue_times < 0:
self.parser.error('The requeue-times argument (%s) must be a positive integer (including 0)' % self.options.requeue_times)
if self.options.requeue_delay_delta is not None and self.options.requeue_delay_delta < 0:
self.parser.error('The rrequeue-delay-delta argument (%s) must be a positive integer (including 0)' % self.options.requeue_delay_delta)
self.database_config = None
if self.options.database:
host, port, db = self.options.database.split(':')
self.database_config = dict(host=host, port=int(port), db=int(db))
self.update_title = self.options.update_title
|
python
|
def manage_options(self):
"""
Create a parser given the command-line arguments, creates a parser
Return True if the programme must exit.
"""
self.parser = self.create_parser()
self.options, self.args = self.parser.parse_args(self.argv)
self.do_imports()
if self.options.callback and not callable(self.options.callback):
self.parser.error('The callback is not callable')
self.logger_level = None
if self.options.logger_level:
if self.options.logger_level.isdigit():
self.options.logger_level = int(self.options.logger_level)
else:
try:
self.options.logger_level = getattr(logging, self.options.logger_level.upper())
except:
self.parser.error('Invalid logger-level %s' % self.options.logger_level)
if self.options.max_loops is not None and self.options.max_loops < 0:
self.parser.error('The max-loops argument (%s) must be a <positive></positive> integer' % self.options.max_loops)
if self.options.max_duration is not None and self.options.max_duration < 0:
self.parser.error('The max-duration argument (%s) must be a positive integer' % self.options.max_duration)
if self.options.timeout is not None and self.options.timeout < 0:
self.parser.error('The timeout argument (%s) must be a positive integer (including 0)' % self.options.timeout)
if self.options.fetch_priorities_delay is not None and self.options.fetch_priorities_delay <= 0:
self.parser.error('The fetch-priorities-delay argument (%s) must be a positive integer' % self.options.fetch_priorities_delay)
if self.options.fetch_delayed_delay is not None and self.options.fetch_delayed_delay <= 0:
self.parser.error('The fetch-delayed-delay argument (%s) must be a positive integer' % self.options.fetch_delayed_delay)
if self.options.requeue_times is not None and self.options.requeue_times < 0:
self.parser.error('The requeue-times argument (%s) must be a positive integer (including 0)' % self.options.requeue_times)
if self.options.requeue_delay_delta is not None and self.options.requeue_delay_delta < 0:
self.parser.error('The rrequeue-delay-delta argument (%s) must be a positive integer (including 0)' % self.options.requeue_delay_delta)
self.database_config = None
if self.options.database:
host, port, db = self.options.database.split(':')
self.database_config = dict(host=host, port=int(port), db=int(db))
self.update_title = self.options.update_title
|
[
"def",
"manage_options",
"(",
"self",
")",
":",
"self",
".",
"parser",
"=",
"self",
".",
"create_parser",
"(",
")",
"self",
".",
"options",
",",
"self",
".",
"args",
"=",
"self",
".",
"parser",
".",
"parse_args",
"(",
"self",
".",
"argv",
")",
"self",
".",
"do_imports",
"(",
")",
"if",
"self",
".",
"options",
".",
"callback",
"and",
"not",
"callable",
"(",
"self",
".",
"options",
".",
"callback",
")",
":",
"self",
".",
"parser",
".",
"error",
"(",
"'The callback is not callable'",
")",
"self",
".",
"logger_level",
"=",
"None",
"if",
"self",
".",
"options",
".",
"logger_level",
":",
"if",
"self",
".",
"options",
".",
"logger_level",
".",
"isdigit",
"(",
")",
":",
"self",
".",
"options",
".",
"logger_level",
"=",
"int",
"(",
"self",
".",
"options",
".",
"logger_level",
")",
"else",
":",
"try",
":",
"self",
".",
"options",
".",
"logger_level",
"=",
"getattr",
"(",
"logging",
",",
"self",
".",
"options",
".",
"logger_level",
".",
"upper",
"(",
")",
")",
"except",
":",
"self",
".",
"parser",
".",
"error",
"(",
"'Invalid logger-level %s'",
"%",
"self",
".",
"options",
".",
"logger_level",
")",
"if",
"self",
".",
"options",
".",
"max_loops",
"is",
"not",
"None",
"and",
"self",
".",
"options",
".",
"max_loops",
"<",
"0",
":",
"self",
".",
"parser",
".",
"error",
"(",
"'The max-loops argument (%s) must be a <positive></positive> integer'",
"%",
"self",
".",
"options",
".",
"max_loops",
")",
"if",
"self",
".",
"options",
".",
"max_duration",
"is",
"not",
"None",
"and",
"self",
".",
"options",
".",
"max_duration",
"<",
"0",
":",
"self",
".",
"parser",
".",
"error",
"(",
"'The max-duration argument (%s) must be a positive integer'",
"%",
"self",
".",
"options",
".",
"max_duration",
")",
"if",
"self",
".",
"options",
".",
"timeout",
"is",
"not",
"None",
"and",
"self",
".",
"options",
".",
"timeout",
"<",
"0",
":",
"self",
".",
"parser",
".",
"error",
"(",
"'The timeout argument (%s) must be a positive integer (including 0)'",
"%",
"self",
".",
"options",
".",
"timeout",
")",
"if",
"self",
".",
"options",
".",
"fetch_priorities_delay",
"is",
"not",
"None",
"and",
"self",
".",
"options",
".",
"fetch_priorities_delay",
"<=",
"0",
":",
"self",
".",
"parser",
".",
"error",
"(",
"'The fetch-priorities-delay argument (%s) must be a positive integer'",
"%",
"self",
".",
"options",
".",
"fetch_priorities_delay",
")",
"if",
"self",
".",
"options",
".",
"fetch_delayed_delay",
"is",
"not",
"None",
"and",
"self",
".",
"options",
".",
"fetch_delayed_delay",
"<=",
"0",
":",
"self",
".",
"parser",
".",
"error",
"(",
"'The fetch-delayed-delay argument (%s) must be a positive integer'",
"%",
"self",
".",
"options",
".",
"fetch_delayed_delay",
")",
"if",
"self",
".",
"options",
".",
"requeue_times",
"is",
"not",
"None",
"and",
"self",
".",
"options",
".",
"requeue_times",
"<",
"0",
":",
"self",
".",
"parser",
".",
"error",
"(",
"'The requeue-times argument (%s) must be a positive integer (including 0)'",
"%",
"self",
".",
"options",
".",
"requeue_times",
")",
"if",
"self",
".",
"options",
".",
"requeue_delay_delta",
"is",
"not",
"None",
"and",
"self",
".",
"options",
".",
"requeue_delay_delta",
"<",
"0",
":",
"self",
".",
"parser",
".",
"error",
"(",
"'The rrequeue-delay-delta argument (%s) must be a positive integer (including 0)'",
"%",
"self",
".",
"options",
".",
"requeue_delay_delta",
")",
"self",
".",
"database_config",
"=",
"None",
"if",
"self",
".",
"options",
".",
"database",
":",
"host",
",",
"port",
",",
"db",
"=",
"self",
".",
"options",
".",
"database",
".",
"split",
"(",
"':'",
")",
"self",
".",
"database_config",
"=",
"dict",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"int",
"(",
"port",
")",
",",
"db",
"=",
"int",
"(",
"db",
")",
")",
"self",
".",
"update_title",
"=",
"self",
".",
"options",
".",
"update_title"
] |
Create a parser given the command-line arguments, creates a parser
Return True if the programme must exit.
|
[
"Create",
"a",
"parser",
"given",
"the",
"command",
"-",
"line",
"arguments",
"creates",
"a",
"parser",
"Return",
"True",
"if",
"the",
"programme",
"must",
"exit",
"."
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L751-L800
|
243,890
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
WorkerConfig.do_imports
|
def do_imports(self):
"""
Import all importable options
"""
self.do_import('worker_class', Worker)
self.do_import('queue_model', self.options.worker_class.queue_model)
self.do_import('error_model', self.options.worker_class.error_model)
self.do_import('callback', self.options.worker_class.callback)
|
python
|
def do_imports(self):
"""
Import all importable options
"""
self.do_import('worker_class', Worker)
self.do_import('queue_model', self.options.worker_class.queue_model)
self.do_import('error_model', self.options.worker_class.error_model)
self.do_import('callback', self.options.worker_class.callback)
|
[
"def",
"do_imports",
"(",
"self",
")",
":",
"self",
".",
"do_import",
"(",
"'worker_class'",
",",
"Worker",
")",
"self",
".",
"do_import",
"(",
"'queue_model'",
",",
"self",
".",
"options",
".",
"worker_class",
".",
"queue_model",
")",
"self",
".",
"do_import",
"(",
"'error_model'",
",",
"self",
".",
"options",
".",
"worker_class",
".",
"error_model",
")",
"self",
".",
"do_import",
"(",
"'callback'",
",",
"self",
".",
"options",
".",
"worker_class",
".",
"callback",
")"
] |
Import all importable options
|
[
"Import",
"all",
"importable",
"options"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L816-L823
|
243,891
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
WorkerConfig.print_options
|
def print_options(self):
"""
Print all options as parsed by the script
"""
options = []
print("The script is running with the following options:")
options.append(("dry_run", self.options.dry_run))
options.append(("worker_config", self.__class__))
database_config = self.database_config or \
self.options.queue_model.database.connection_settings
options.append(("database", '%s:%s:%s' % (database_config['host'],
database_config['port'],
database_config['db'])))
if self.options.worker_class is not None:
options.append(("worker-class", self.options.worker_class))
for name, value in options:
print(" - %s = %s" % (name.replace('_', '-'), value))
print("The worker will run with the following options:")
for name in self.options.worker_class.parameters:
option = getattr(self.worker, name)
if name == 'callback' and \
self.options.worker_class.execute == Worker.execute:
option = '<jobs "run" method>'
elif isinstance(option, (list, tuple, set)):
option = ','.join(option)
print(" - %s = %s" % (name.replace('_', '-'), option))
|
python
|
def print_options(self):
"""
Print all options as parsed by the script
"""
options = []
print("The script is running with the following options:")
options.append(("dry_run", self.options.dry_run))
options.append(("worker_config", self.__class__))
database_config = self.database_config or \
self.options.queue_model.database.connection_settings
options.append(("database", '%s:%s:%s' % (database_config['host'],
database_config['port'],
database_config['db'])))
if self.options.worker_class is not None:
options.append(("worker-class", self.options.worker_class))
for name, value in options:
print(" - %s = %s" % (name.replace('_', '-'), value))
print("The worker will run with the following options:")
for name in self.options.worker_class.parameters:
option = getattr(self.worker, name)
if name == 'callback' and \
self.options.worker_class.execute == Worker.execute:
option = '<jobs "run" method>'
elif isinstance(option, (list, tuple, set)):
option = ','.join(option)
print(" - %s = %s" % (name.replace('_', '-'), option))
|
[
"def",
"print_options",
"(",
"self",
")",
":",
"options",
"=",
"[",
"]",
"print",
"(",
"\"The script is running with the following options:\"",
")",
"options",
".",
"append",
"(",
"(",
"\"dry_run\"",
",",
"self",
".",
"options",
".",
"dry_run",
")",
")",
"options",
".",
"append",
"(",
"(",
"\"worker_config\"",
",",
"self",
".",
"__class__",
")",
")",
"database_config",
"=",
"self",
".",
"database_config",
"or",
"self",
".",
"options",
".",
"queue_model",
".",
"database",
".",
"connection_settings",
"options",
".",
"append",
"(",
"(",
"\"database\"",
",",
"'%s:%s:%s'",
"%",
"(",
"database_config",
"[",
"'host'",
"]",
",",
"database_config",
"[",
"'port'",
"]",
",",
"database_config",
"[",
"'db'",
"]",
")",
")",
")",
"if",
"self",
".",
"options",
".",
"worker_class",
"is",
"not",
"None",
":",
"options",
".",
"append",
"(",
"(",
"\"worker-class\"",
",",
"self",
".",
"options",
".",
"worker_class",
")",
")",
"for",
"name",
",",
"value",
"in",
"options",
":",
"print",
"(",
"\" - %s = %s\"",
"%",
"(",
"name",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
",",
"value",
")",
")",
"print",
"(",
"\"The worker will run with the following options:\"",
")",
"for",
"name",
"in",
"self",
".",
"options",
".",
"worker_class",
".",
"parameters",
":",
"option",
"=",
"getattr",
"(",
"self",
".",
"worker",
",",
"name",
")",
"if",
"name",
"==",
"'callback'",
"and",
"self",
".",
"options",
".",
"worker_class",
".",
"execute",
"==",
"Worker",
".",
"execute",
":",
"option",
"=",
"'<jobs \"run\" method>'",
"elif",
"isinstance",
"(",
"option",
",",
"(",
"list",
",",
"tuple",
",",
"set",
")",
")",
":",
"option",
"=",
"','",
".",
"join",
"(",
"option",
")",
"print",
"(",
"\" - %s = %s\"",
"%",
"(",
"name",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
",",
"option",
")",
")"
] |
Print all options as parsed by the script
|
[
"Print",
"all",
"options",
"as",
"parsed",
"by",
"the",
"script"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L825-L857
|
243,892
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
WorkerConfig.execute
|
def execute(self):
"""
Main method to call to run the worker
"""
self.prepare_models()
self.prepare_worker()
if self.options.print_options:
self.print_options()
self.run()
|
python
|
def execute(self):
"""
Main method to call to run the worker
"""
self.prepare_models()
self.prepare_worker()
if self.options.print_options:
self.print_options()
self.run()
|
[
"def",
"execute",
"(",
"self",
")",
":",
"self",
".",
"prepare_models",
"(",
")",
"self",
".",
"prepare_worker",
"(",
")",
"if",
"self",
".",
"options",
".",
"print_options",
":",
"self",
".",
"print_options",
"(",
")",
"self",
".",
"run",
"(",
")"
] |
Main method to call to run the worker
|
[
"Main",
"method",
"to",
"call",
"to",
"run",
"the",
"worker"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L859-L867
|
243,893
|
limpyd/redis-limpyd-jobs
|
limpyd_jobs/workers.py
|
WorkerConfig.prepare_models
|
def prepare_models(self):
"""
If a database config ws given as argument, apply it to our models
"""
if self.database_config:
for model in (self.options.queue_model, self.options.error_model):
model.database.reset(**self.database_config)
|
python
|
def prepare_models(self):
"""
If a database config ws given as argument, apply it to our models
"""
if self.database_config:
for model in (self.options.queue_model, self.options.error_model):
model.database.reset(**self.database_config)
|
[
"def",
"prepare_models",
"(",
"self",
")",
":",
"if",
"self",
".",
"database_config",
":",
"for",
"model",
"in",
"(",
"self",
".",
"options",
".",
"queue_model",
",",
"self",
".",
"options",
".",
"error_model",
")",
":",
"model",
".",
"database",
".",
"reset",
"(",
"*",
"*",
"self",
".",
"database_config",
")"
] |
If a database config ws given as argument, apply it to our models
|
[
"If",
"a",
"database",
"config",
"ws",
"given",
"as",
"argument",
"apply",
"it",
"to",
"our",
"models"
] |
264c71029bad4377d6132bf8bb9c55c44f3b03a2
|
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L869-L875
|
243,894
|
Robpol86/docoptcfg
|
docoptcfg.py
|
settable_options
|
def settable_options(doc, argv, ignore, options_first):
"""Determine which options we can set, which ones are boolean, and which ones are repeatable.
All set items are option long names.
:param str doc: Docstring from docoptcfg().
:param iter argv: CLI arguments from docoptcfg().
:param iter ignore: Options to ignore from docoptcfg().
:param bool options_first: docopt argument from docoptcfg().
:return: Settable options, boolean options, repeatable options, and short to long option name mapping.
:rtype: tuple
"""
settable, booleans, repeatable, short_map = set(), set(), set(), dict()
# Determine which options are settable by docoptcfg and which ones are flags/booleans.
options = docopt.parse_defaults(doc)
short_map.update((o.short, o.long) for o in options)
parsed_argv = docopt.parse_argv(docopt.TokenStream(argv, docopt.DocoptExit), list(options), options_first)
overridden = [o.long for o in parsed_argv if hasattr(o, 'long')]
for option in options:
if option.long in overridden or (option.long in ignore or option.short in ignore):
continue
if option.argcount == 0:
booleans.add(option.long)
settable.add(option.long)
# Determine which options are repeatable.
if settable and '...' in doc:
pattern = docopt.parse_pattern(docopt.formal_usage(docopt.DocoptExit.usage), options)
for option in pattern.fix().flat():
if not hasattr(option, 'long'):
continue # Positional argument or sub-command.
if getattr(option, 'long') not in settable:
continue # Don't care about this if we can't set it.
if getattr(option, 'long') in booleans and getattr(option, 'value') == 0:
repeatable.add(getattr(option, 'long'))
elif hasattr(getattr(option, 'value'), '__iter__'):
repeatable.add(getattr(option, 'long'))
return settable, booleans, repeatable, short_map
|
python
|
def settable_options(doc, argv, ignore, options_first):
"""Determine which options we can set, which ones are boolean, and which ones are repeatable.
All set items are option long names.
:param str doc: Docstring from docoptcfg().
:param iter argv: CLI arguments from docoptcfg().
:param iter ignore: Options to ignore from docoptcfg().
:param bool options_first: docopt argument from docoptcfg().
:return: Settable options, boolean options, repeatable options, and short to long option name mapping.
:rtype: tuple
"""
settable, booleans, repeatable, short_map = set(), set(), set(), dict()
# Determine which options are settable by docoptcfg and which ones are flags/booleans.
options = docopt.parse_defaults(doc)
short_map.update((o.short, o.long) for o in options)
parsed_argv = docopt.parse_argv(docopt.TokenStream(argv, docopt.DocoptExit), list(options), options_first)
overridden = [o.long for o in parsed_argv if hasattr(o, 'long')]
for option in options:
if option.long in overridden or (option.long in ignore or option.short in ignore):
continue
if option.argcount == 0:
booleans.add(option.long)
settable.add(option.long)
# Determine which options are repeatable.
if settable and '...' in doc:
pattern = docopt.parse_pattern(docopt.formal_usage(docopt.DocoptExit.usage), options)
for option in pattern.fix().flat():
if not hasattr(option, 'long'):
continue # Positional argument or sub-command.
if getattr(option, 'long') not in settable:
continue # Don't care about this if we can't set it.
if getattr(option, 'long') in booleans and getattr(option, 'value') == 0:
repeatable.add(getattr(option, 'long'))
elif hasattr(getattr(option, 'value'), '__iter__'):
repeatable.add(getattr(option, 'long'))
return settable, booleans, repeatable, short_map
|
[
"def",
"settable_options",
"(",
"doc",
",",
"argv",
",",
"ignore",
",",
"options_first",
")",
":",
"settable",
",",
"booleans",
",",
"repeatable",
",",
"short_map",
"=",
"set",
"(",
")",
",",
"set",
"(",
")",
",",
"set",
"(",
")",
",",
"dict",
"(",
")",
"# Determine which options are settable by docoptcfg and which ones are flags/booleans.",
"options",
"=",
"docopt",
".",
"parse_defaults",
"(",
"doc",
")",
"short_map",
".",
"update",
"(",
"(",
"o",
".",
"short",
",",
"o",
".",
"long",
")",
"for",
"o",
"in",
"options",
")",
"parsed_argv",
"=",
"docopt",
".",
"parse_argv",
"(",
"docopt",
".",
"TokenStream",
"(",
"argv",
",",
"docopt",
".",
"DocoptExit",
")",
",",
"list",
"(",
"options",
")",
",",
"options_first",
")",
"overridden",
"=",
"[",
"o",
".",
"long",
"for",
"o",
"in",
"parsed_argv",
"if",
"hasattr",
"(",
"o",
",",
"'long'",
")",
"]",
"for",
"option",
"in",
"options",
":",
"if",
"option",
".",
"long",
"in",
"overridden",
"or",
"(",
"option",
".",
"long",
"in",
"ignore",
"or",
"option",
".",
"short",
"in",
"ignore",
")",
":",
"continue",
"if",
"option",
".",
"argcount",
"==",
"0",
":",
"booleans",
".",
"add",
"(",
"option",
".",
"long",
")",
"settable",
".",
"add",
"(",
"option",
".",
"long",
")",
"# Determine which options are repeatable.",
"if",
"settable",
"and",
"'...'",
"in",
"doc",
":",
"pattern",
"=",
"docopt",
".",
"parse_pattern",
"(",
"docopt",
".",
"formal_usage",
"(",
"docopt",
".",
"DocoptExit",
".",
"usage",
")",
",",
"options",
")",
"for",
"option",
"in",
"pattern",
".",
"fix",
"(",
")",
".",
"flat",
"(",
")",
":",
"if",
"not",
"hasattr",
"(",
"option",
",",
"'long'",
")",
":",
"continue",
"# Positional argument or sub-command.",
"if",
"getattr",
"(",
"option",
",",
"'long'",
")",
"not",
"in",
"settable",
":",
"continue",
"# Don't care about this if we can't set it.",
"if",
"getattr",
"(",
"option",
",",
"'long'",
")",
"in",
"booleans",
"and",
"getattr",
"(",
"option",
",",
"'value'",
")",
"==",
"0",
":",
"repeatable",
".",
"add",
"(",
"getattr",
"(",
"option",
",",
"'long'",
")",
")",
"elif",
"hasattr",
"(",
"getattr",
"(",
"option",
",",
"'value'",
")",
",",
"'__iter__'",
")",
":",
"repeatable",
".",
"add",
"(",
"getattr",
"(",
"option",
",",
"'long'",
")",
")",
"return",
"settable",
",",
"booleans",
",",
"repeatable",
",",
"short_map"
] |
Determine which options we can set, which ones are boolean, and which ones are repeatable.
All set items are option long names.
:param str doc: Docstring from docoptcfg().
:param iter argv: CLI arguments from docoptcfg().
:param iter ignore: Options to ignore from docoptcfg().
:param bool options_first: docopt argument from docoptcfg().
:return: Settable options, boolean options, repeatable options, and short to long option name mapping.
:rtype: tuple
|
[
"Determine",
"which",
"options",
"we",
"can",
"set",
"which",
"ones",
"are",
"boolean",
"and",
"which",
"ones",
"are",
"repeatable",
"."
] |
3746dc263549f7f3ef5a86e739d588546b084bde
|
https://github.com/Robpol86/docoptcfg/blob/3746dc263549f7f3ef5a86e739d588546b084bde/docoptcfg.py#L39-L79
|
243,895
|
Robpol86/docoptcfg
|
docoptcfg.py
|
values_from_env
|
def values_from_env(env_prefix, settable, booleans, repeatable):
"""Get all values from environment variables.
:param str env_prefix: Argument from docoptcfg().
:param iter settable: Option long names available to set by config file.
:param iter booleans: Option long names of boolean/flag types.
:param iter repeatable: Option long names of repeatable options.
:return: Settable values.
:rtype: dict
"""
defaults_env = dict()
for key in settable:
try:
defaults_env[key] = get_env(key, env_prefix, key in booleans, key in repeatable)
except KeyError:
pass
return defaults_env
|
python
|
def values_from_env(env_prefix, settable, booleans, repeatable):
"""Get all values from environment variables.
:param str env_prefix: Argument from docoptcfg().
:param iter settable: Option long names available to set by config file.
:param iter booleans: Option long names of boolean/flag types.
:param iter repeatable: Option long names of repeatable options.
:return: Settable values.
:rtype: dict
"""
defaults_env = dict()
for key in settable:
try:
defaults_env[key] = get_env(key, env_prefix, key in booleans, key in repeatable)
except KeyError:
pass
return defaults_env
|
[
"def",
"values_from_env",
"(",
"env_prefix",
",",
"settable",
",",
"booleans",
",",
"repeatable",
")",
":",
"defaults_env",
"=",
"dict",
"(",
")",
"for",
"key",
"in",
"settable",
":",
"try",
":",
"defaults_env",
"[",
"key",
"]",
"=",
"get_env",
"(",
"key",
",",
"env_prefix",
",",
"key",
"in",
"booleans",
",",
"key",
"in",
"repeatable",
")",
"except",
"KeyError",
":",
"pass",
"return",
"defaults_env"
] |
Get all values from environment variables.
:param str env_prefix: Argument from docoptcfg().
:param iter settable: Option long names available to set by config file.
:param iter booleans: Option long names of boolean/flag types.
:param iter repeatable: Option long names of repeatable options.
:return: Settable values.
:rtype: dict
|
[
"Get",
"all",
"values",
"from",
"environment",
"variables",
"."
] |
3746dc263549f7f3ef5a86e739d588546b084bde
|
https://github.com/Robpol86/docoptcfg/blob/3746dc263549f7f3ef5a86e739d588546b084bde/docoptcfg.py#L126-L143
|
243,896
|
Robpol86/docoptcfg
|
docoptcfg.py
|
get_opt
|
def get_opt(key, config, section, booleans, repeatable):
"""Get one value from config file.
:raise DocoptcfgFileError: If an option is the wrong type.
:param str key: Option long name (e.g. --config).
:param ConfigParser config: ConfigParser instance with config file data already loaded.
:param str section: Section in config file to focus on.
:param iter booleans: Option long names of boolean/flag types.
:param iter repeatable: Option long names of repeatable options.
:return: Value to set in the defaults dict.
"""
# Handle repeatable non-boolean options (e.g. --file=file1.txt --file=file2.txt).
if key in repeatable and key not in booleans:
return config.get(section, key[2:]).strip('\n').splitlines()
# Handle repeatable booleans.
if key in repeatable and key in booleans:
try:
return config.getint(section, key[2:])
except ValueError as exc:
raise DocoptcfgFileError('Repeatable boolean option "{0}" invalid.'.format(key[2:]), str(exc))
# Handle non-repeatable booleans.
if key in booleans:
try:
return config.getboolean(section, key[2:])
except ValueError as exc:
raise DocoptcfgFileError('Boolean option "{0}" invalid.'.format(key[2:]), str(exc))
# Handle the rest.
return str(config.get(section, key[2:]))
|
python
|
def get_opt(key, config, section, booleans, repeatable):
"""Get one value from config file.
:raise DocoptcfgFileError: If an option is the wrong type.
:param str key: Option long name (e.g. --config).
:param ConfigParser config: ConfigParser instance with config file data already loaded.
:param str section: Section in config file to focus on.
:param iter booleans: Option long names of boolean/flag types.
:param iter repeatable: Option long names of repeatable options.
:return: Value to set in the defaults dict.
"""
# Handle repeatable non-boolean options (e.g. --file=file1.txt --file=file2.txt).
if key in repeatable and key not in booleans:
return config.get(section, key[2:]).strip('\n').splitlines()
# Handle repeatable booleans.
if key in repeatable and key in booleans:
try:
return config.getint(section, key[2:])
except ValueError as exc:
raise DocoptcfgFileError('Repeatable boolean option "{0}" invalid.'.format(key[2:]), str(exc))
# Handle non-repeatable booleans.
if key in booleans:
try:
return config.getboolean(section, key[2:])
except ValueError as exc:
raise DocoptcfgFileError('Boolean option "{0}" invalid.'.format(key[2:]), str(exc))
# Handle the rest.
return str(config.get(section, key[2:]))
|
[
"def",
"get_opt",
"(",
"key",
",",
"config",
",",
"section",
",",
"booleans",
",",
"repeatable",
")",
":",
"# Handle repeatable non-boolean options (e.g. --file=file1.txt --file=file2.txt).",
"if",
"key",
"in",
"repeatable",
"and",
"key",
"not",
"in",
"booleans",
":",
"return",
"config",
".",
"get",
"(",
"section",
",",
"key",
"[",
"2",
":",
"]",
")",
".",
"strip",
"(",
"'\\n'",
")",
".",
"splitlines",
"(",
")",
"# Handle repeatable booleans.",
"if",
"key",
"in",
"repeatable",
"and",
"key",
"in",
"booleans",
":",
"try",
":",
"return",
"config",
".",
"getint",
"(",
"section",
",",
"key",
"[",
"2",
":",
"]",
")",
"except",
"ValueError",
"as",
"exc",
":",
"raise",
"DocoptcfgFileError",
"(",
"'Repeatable boolean option \"{0}\" invalid.'",
".",
"format",
"(",
"key",
"[",
"2",
":",
"]",
")",
",",
"str",
"(",
"exc",
")",
")",
"# Handle non-repeatable booleans.",
"if",
"key",
"in",
"booleans",
":",
"try",
":",
"return",
"config",
".",
"getboolean",
"(",
"section",
",",
"key",
"[",
"2",
":",
"]",
")",
"except",
"ValueError",
"as",
"exc",
":",
"raise",
"DocoptcfgFileError",
"(",
"'Boolean option \"{0}\" invalid.'",
".",
"format",
"(",
"key",
"[",
"2",
":",
"]",
")",
",",
"str",
"(",
"exc",
")",
")",
"# Handle the rest.",
"return",
"str",
"(",
"config",
".",
"get",
"(",
"section",
",",
"key",
"[",
"2",
":",
"]",
")",
")"
] |
Get one value from config file.
:raise DocoptcfgFileError: If an option is the wrong type.
:param str key: Option long name (e.g. --config).
:param ConfigParser config: ConfigParser instance with config file data already loaded.
:param str section: Section in config file to focus on.
:param iter booleans: Option long names of boolean/flag types.
:param iter repeatable: Option long names of repeatable options.
:return: Value to set in the defaults dict.
|
[
"Get",
"one",
"value",
"from",
"config",
"file",
"."
] |
3746dc263549f7f3ef5a86e739d588546b084bde
|
https://github.com/Robpol86/docoptcfg/blob/3746dc263549f7f3ef5a86e739d588546b084bde/docoptcfg.py#L146-L178
|
243,897
|
Robpol86/docoptcfg
|
docoptcfg.py
|
values_from_file
|
def values_from_file(docopt_dict, config_option, settable, booleans, repeatable):
"""Parse config file and read settable values.
Can be overridden by both command line arguments and environment variables.
:raise DocoptcfgError: If `config_option` isn't found in docstring.
:raise DocoptcfgFileError: On any error while trying to read and parse config file.
:param dict docopt_dict: Dictionary from docopt with environment variable defaults merged in by docoptcfg().
:param str config_option: Config option long name with file path as its value.
:param iter settable: Option long names available to set by config file.
:param iter booleans: Option long names of boolean/flag types.
:param iter repeatable: Option long names of repeatable options.
:return: Settable values.
:rtype: dict
"""
section = docopt.DocoptExit.usage.split()[1]
settable = set(o for o in settable if o != config_option)
config = ConfigParser()
defaults = dict()
# Sanity checks.
if config_option not in docopt_dict:
raise DocoptcfgError
if docopt_dict[config_option] is None or not settable:
return defaults
# Read config file.
path = DocoptcfgFileError.FILE_PATH = docopt_dict[config_option]
try:
with open(path) as handle:
if hasattr(config, 'read_file'):
config.read_file(handle)
else:
getattr(config, 'readfp')(handle)
except Error as exc:
raise DocoptcfgFileError('Unable to parse config file.', str(exc))
except IOError as exc:
raise DocoptcfgFileError('Unable to read config file.', str(exc))
# Make sure section is in config file.
if not config.has_section(section):
raise DocoptcfgFileError('Section [{0}] not in config file.'.format(section))
# Parse config file.
for key in settable:
if config.has_option(section, key[2:]):
defaults[key] = get_opt(key, config, section, booleans, repeatable)
return defaults
|
python
|
def values_from_file(docopt_dict, config_option, settable, booleans, repeatable):
"""Parse config file and read settable values.
Can be overridden by both command line arguments and environment variables.
:raise DocoptcfgError: If `config_option` isn't found in docstring.
:raise DocoptcfgFileError: On any error while trying to read and parse config file.
:param dict docopt_dict: Dictionary from docopt with environment variable defaults merged in by docoptcfg().
:param str config_option: Config option long name with file path as its value.
:param iter settable: Option long names available to set by config file.
:param iter booleans: Option long names of boolean/flag types.
:param iter repeatable: Option long names of repeatable options.
:return: Settable values.
:rtype: dict
"""
section = docopt.DocoptExit.usage.split()[1]
settable = set(o for o in settable if o != config_option)
config = ConfigParser()
defaults = dict()
# Sanity checks.
if config_option not in docopt_dict:
raise DocoptcfgError
if docopt_dict[config_option] is None or not settable:
return defaults
# Read config file.
path = DocoptcfgFileError.FILE_PATH = docopt_dict[config_option]
try:
with open(path) as handle:
if hasattr(config, 'read_file'):
config.read_file(handle)
else:
getattr(config, 'readfp')(handle)
except Error as exc:
raise DocoptcfgFileError('Unable to parse config file.', str(exc))
except IOError as exc:
raise DocoptcfgFileError('Unable to read config file.', str(exc))
# Make sure section is in config file.
if not config.has_section(section):
raise DocoptcfgFileError('Section [{0}] not in config file.'.format(section))
# Parse config file.
for key in settable:
if config.has_option(section, key[2:]):
defaults[key] = get_opt(key, config, section, booleans, repeatable)
return defaults
|
[
"def",
"values_from_file",
"(",
"docopt_dict",
",",
"config_option",
",",
"settable",
",",
"booleans",
",",
"repeatable",
")",
":",
"section",
"=",
"docopt",
".",
"DocoptExit",
".",
"usage",
".",
"split",
"(",
")",
"[",
"1",
"]",
"settable",
"=",
"set",
"(",
"o",
"for",
"o",
"in",
"settable",
"if",
"o",
"!=",
"config_option",
")",
"config",
"=",
"ConfigParser",
"(",
")",
"defaults",
"=",
"dict",
"(",
")",
"# Sanity checks.",
"if",
"config_option",
"not",
"in",
"docopt_dict",
":",
"raise",
"DocoptcfgError",
"if",
"docopt_dict",
"[",
"config_option",
"]",
"is",
"None",
"or",
"not",
"settable",
":",
"return",
"defaults",
"# Read config file.",
"path",
"=",
"DocoptcfgFileError",
".",
"FILE_PATH",
"=",
"docopt_dict",
"[",
"config_option",
"]",
"try",
":",
"with",
"open",
"(",
"path",
")",
"as",
"handle",
":",
"if",
"hasattr",
"(",
"config",
",",
"'read_file'",
")",
":",
"config",
".",
"read_file",
"(",
"handle",
")",
"else",
":",
"getattr",
"(",
"config",
",",
"'readfp'",
")",
"(",
"handle",
")",
"except",
"Error",
"as",
"exc",
":",
"raise",
"DocoptcfgFileError",
"(",
"'Unable to parse config file.'",
",",
"str",
"(",
"exc",
")",
")",
"except",
"IOError",
"as",
"exc",
":",
"raise",
"DocoptcfgFileError",
"(",
"'Unable to read config file.'",
",",
"str",
"(",
"exc",
")",
")",
"# Make sure section is in config file.",
"if",
"not",
"config",
".",
"has_section",
"(",
"section",
")",
":",
"raise",
"DocoptcfgFileError",
"(",
"'Section [{0}] not in config file.'",
".",
"format",
"(",
"section",
")",
")",
"# Parse config file.",
"for",
"key",
"in",
"settable",
":",
"if",
"config",
".",
"has_option",
"(",
"section",
",",
"key",
"[",
"2",
":",
"]",
")",
":",
"defaults",
"[",
"key",
"]",
"=",
"get_opt",
"(",
"key",
",",
"config",
",",
"section",
",",
"booleans",
",",
"repeatable",
")",
"return",
"defaults"
] |
Parse config file and read settable values.
Can be overridden by both command line arguments and environment variables.
:raise DocoptcfgError: If `config_option` isn't found in docstring.
:raise DocoptcfgFileError: On any error while trying to read and parse config file.
:param dict docopt_dict: Dictionary from docopt with environment variable defaults merged in by docoptcfg().
:param str config_option: Config option long name with file path as its value.
:param iter settable: Option long names available to set by config file.
:param iter booleans: Option long names of boolean/flag types.
:param iter repeatable: Option long names of repeatable options.
:return: Settable values.
:rtype: dict
|
[
"Parse",
"config",
"file",
"and",
"read",
"settable",
"values",
"."
] |
3746dc263549f7f3ef5a86e739d588546b084bde
|
https://github.com/Robpol86/docoptcfg/blob/3746dc263549f7f3ef5a86e739d588546b084bde/docoptcfg.py#L181-L231
|
243,898
|
TC01/calcpkg
|
calcrepo/repo.py
|
CalcRepository.setRepoData
|
def setRepoData(self, searchString, category="", extension="", math=False, game=False, searchFiles=False):
"""Call this function with all the settings to use for future operations on a repository, must be called FIRST"""
self.searchString = searchString
self.category = category
self.math = math
self.game = game
self.searchFiles = searchFiles
self.extension = extension
|
python
|
def setRepoData(self, searchString, category="", extension="", math=False, game=False, searchFiles=False):
"""Call this function with all the settings to use for future operations on a repository, must be called FIRST"""
self.searchString = searchString
self.category = category
self.math = math
self.game = game
self.searchFiles = searchFiles
self.extension = extension
|
[
"def",
"setRepoData",
"(",
"self",
",",
"searchString",
",",
"category",
"=",
"\"\"",
",",
"extension",
"=",
"\"\"",
",",
"math",
"=",
"False",
",",
"game",
"=",
"False",
",",
"searchFiles",
"=",
"False",
")",
":",
"self",
".",
"searchString",
"=",
"searchString",
"self",
".",
"category",
"=",
"category",
"self",
".",
"math",
"=",
"math",
"self",
".",
"game",
"=",
"game",
"self",
".",
"searchFiles",
"=",
"searchFiles",
"self",
".",
"extension",
"=",
"extension"
] |
Call this function with all the settings to use for future operations on a repository, must be called FIRST
|
[
"Call",
"this",
"function",
"with",
"all",
"the",
"settings",
"to",
"use",
"for",
"future",
"operations",
"on",
"a",
"repository",
"must",
"be",
"called",
"FIRST"
] |
5168f606264620a090b42a64354331d208b00d5f
|
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repo.py#L37-L44
|
243,899
|
TC01/calcpkg
|
calcrepo/repo.py
|
CalcRepository.setOutputObject
|
def setOutputObject(self, newOutput=output.CalcpkgOutput(True, True)):
"""Set an object where all output from calcpkg will be redirected to for this repository"""
self.output = newOutput
|
python
|
def setOutputObject(self, newOutput=output.CalcpkgOutput(True, True)):
"""Set an object where all output from calcpkg will be redirected to for this repository"""
self.output = newOutput
|
[
"def",
"setOutputObject",
"(",
"self",
",",
"newOutput",
"=",
"output",
".",
"CalcpkgOutput",
"(",
"True",
",",
"True",
")",
")",
":",
"self",
".",
"output",
"=",
"newOutput"
] |
Set an object where all output from calcpkg will be redirected to for this repository
|
[
"Set",
"an",
"object",
"where",
"all",
"output",
"from",
"calcpkg",
"will",
"be",
"redirected",
"to",
"for",
"this",
"repository"
] |
5168f606264620a090b42a64354331d208b00d5f
|
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repo.py#L46-L48
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.