_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q10400
|
Request.body
|
train
|
def body(self):
"""return the raw version of the body"""
body = None
if self.body_input:
body = self.body_input.read(int(self.get_header('content-length', -1)))
return body
|
python
|
{
"resource": ""
}
|
q10401
|
Request.body_kwargs
|
train
|
def body_kwargs(self):
"""
the request body, if this is a POST request
this tries to do the right thing with the body, so if you have set the body and
the content type is json, then it will return the body json decoded, if you need
the original string body, use body
example --
self.body = '{"foo":{"name":"bar"}}'
b = self.body_kwargs # dict with: {"foo": { "name": "bar"}}
print self.body # string with: '{"foo":{"name":"bar"}}'
"""
body_kwargs = {}
ct = self.get_header("content-type")
if ct:
ct = ct.lower()
if ct.rfind("json") >= 0:
body = self.body
if body:
body_kwargs = json.loads(body)
else:
if self.body_input:
body = RequestBody(
fp=self.body_input,
headers=self.headers,
environ=self.environ
#environ=self.raw_request
)
body_kwargs = dict(body)
else:
body = self.body
if body:
body_kwargs = self._parse_query_str(body)
return body_kwargs
|
python
|
{
"resource": ""
}
|
q10402
|
Request.kwargs
|
train
|
def kwargs(self):
"""combine GET and POST params to be passed to the controller"""
kwargs = dict(self.query_kwargs)
kwargs.update(self.body_kwargs)
return kwargs
|
python
|
{
"resource": ""
}
|
q10403
|
Request.get_auth_bearer
|
train
|
def get_auth_bearer(self):
"""return the bearer token in the authorization header if it exists"""
access_token = ''
auth_header = self.get_header('authorization')
if auth_header:
m = re.search(r"^Bearer\s+(\S+)$", auth_header, re.I)
if m: access_token = m.group(1)
return access_token
|
python
|
{
"resource": ""
}
|
q10404
|
Request.get_auth_basic
|
train
|
def get_auth_basic(self):
"""return the username and password of a basic auth header if it exists"""
username = ''
password = ''
auth_header = self.get_header('authorization')
if auth_header:
m = re.search(r"^Basic\s+(\S+)$", auth_header, re.I)
if m:
auth_str = Base64.decode(m.group(1))
username, password = auth_str.split(':', 1)
return username, password
|
python
|
{
"resource": ""
}
|
q10405
|
Response.code
|
train
|
def code(self):
"""the http status code to return to the client, by default, 200 if a body is present otherwise 204"""
code = getattr(self, '_code', None)
if not code:
if self.has_body():
code = 200
else:
code = 204
return code
|
python
|
{
"resource": ""
}
|
q10406
|
Response.normalize_body
|
train
|
def normalize_body(self, b):
"""return the body as a string, formatted to the appropriate content type
:param b: mixed, the current raw body
:returns: unicode string
"""
if b is None: return ''
if self.is_json():
# TODO ???
# I don't like this, if we have a content type but it isn't one
# of the supported ones we were returning the exception, which threw
# Jarid off, but now it just returns a string, which is not best either
# my thought is we could have a body_type_subtype method that would
# make it possible to easily handle custom types
# eg, "application/json" would become: self.body_application_json(b, is_error)
b = json.dumps(b, cls=ResponseBody)
else:
# just return a string representation of body if no content type
b = String(b, self.encoding)
return b
|
python
|
{
"resource": ""
}
|
q10407
|
TargetDecorator.normalize_target_params
|
train
|
def normalize_target_params(self, request, controller_args, controller_kwargs):
"""get params ready for calling target
this method exists because child classes might only really need certain params
passed to the method, this allows the child classes to decided what their
target methods need
:param request: the http.Request instance for this specific request
:param controller_args: the arguments that will be passed to the controller
:param controller_kwargs: the key/val arguments that will be passed to the
controller, these usually come from query strings and post bodies
:returns: a tuple (list, dict) that correspond to the *args, **kwargs that
will be passed to the target() method
"""
return [], dict(
request=request,
controller_args=controller_args,
controller_kwargs=controller_kwargs
)
|
python
|
{
"resource": ""
}
|
q10408
|
TargetDecorator.handle_target
|
train
|
def handle_target(self, request, controller_args, controller_kwargs):
"""Internal method for this class
handles normalizing the passed in values from the decorator using
.normalize_target_params() and then passes them to the set .target()
"""
try:
param_args, param_kwargs = self.normalize_target_params(
request=request,
controller_args=controller_args,
controller_kwargs=controller_kwargs
)
ret = self.target(*param_args, **param_kwargs)
if not ret:
raise ValueError("{} check failed".format(self.__class__.__name__))
except CallError:
raise
except Exception as e:
self.handle_error(e)
|
python
|
{
"resource": ""
}
|
q10409
|
TargetDecorator.decorate
|
train
|
def decorate(self, func, target, *anoop, **kwnoop):
"""decorate the passed in func calling target when func is called
:param func: the function being decorated
:param target: the target that will be run when func is called
:returns: the decorated func
"""
if target:
self.target = target
def decorated(decorated_self, *args, **kwargs):
self.handle_target(
request=decorated_self.request,
controller_args=args,
controller_kwargs=kwargs
)
return func(decorated_self, *args, **kwargs)
return decorated
|
python
|
{
"resource": ""
}
|
q10410
|
param.normalize_flags
|
train
|
def normalize_flags(self, flags):
"""normalize the flags to make sure needed values are there
after this method is called self.flags is available
:param flags: the flags that will be normalized
"""
flags['type'] = flags.get('type', None)
paction = flags.get('action', 'store')
if paction == 'store_false':
flags['default'] = True
flags['type'] = bool
elif paction == 'store_true':
flags['default'] = False
flags['type'] = bool
prequired = False if 'default' in flags else flags.get('required', True)
flags["action"] = paction
flags["required"] = prequired
self.flags = flags
|
python
|
{
"resource": ""
}
|
q10411
|
param.normalize_type
|
train
|
def normalize_type(self, names):
"""Decide if this param is an arg or a kwarg and set appropriate internal flags"""
self.name = names[0]
self.is_kwarg = False
self.is_arg = False
self.names = []
try:
# http://stackoverflow.com/a/16488383/5006 uses ask forgiveness because
# of py2/3 differences of integer check
self.index = int(self.name)
self.name = ""
self.is_arg = True
except ValueError:
self.is_kwarg = True
self.names = names
|
python
|
{
"resource": ""
}
|
q10412
|
param.normalize_param
|
train
|
def normalize_param(self, slf, args, kwargs):
"""this is where all the magic happens, this will try and find the param and
put its value in kwargs if it has a default and stuff"""
if self.is_kwarg:
kwargs = self.normalize_kwarg(slf.request, kwargs)
else:
args = self.normalize_arg(slf.request, args)
return slf, args, kwargs
|
python
|
{
"resource": ""
}
|
q10413
|
param.find_kwarg
|
train
|
def find_kwarg(self, request, names, required, default, kwargs):
"""actually try to retrieve names key from params dict
:param request: the current request instance, handy for child classes
:param names: the names this kwarg can be
:param required: True if a name has to be found in kwargs
:param default: the default value if name isn't found
:param kwargs: the kwargs that will be used to find the value
:returns: tuple, found_name, val where found_name is the actual name kwargs contained
"""
val = default
found_name = ''
for name in names:
if name in kwargs:
val = kwargs[name]
found_name = name
break
if not found_name and required:
raise ValueError("required param {} does not exist".format(self.name))
return found_name, val
|
python
|
{
"resource": ""
}
|
q10414
|
WebsocketClient.open
|
train
|
def open(cls, *args, **kwargs):
"""just something to make it easier to quickly open a connection, do something
and then close it"""
c = cls(*args, **kwargs)
c.connect()
try:
yield c
finally:
c.close()
|
python
|
{
"resource": ""
}
|
q10415
|
WebsocketClient.connect
|
train
|
def connect(self, path="", headers=None, query=None, timeout=0, **kwargs):
"""
make the actual connection to the websocket
:param headers: dict, key/val pairs of any headers to add to connection, if
you would like to override headers just pass in an empty value
:param query: dict, any query string params you want to send up with the connection
url
:returns: Payload, this will return the CONNECT response from the websocket
"""
ret = None
ws_url = self.get_fetch_url(path, query)
ws_headers = self.get_fetch_headers("GET", headers)
ws_headers = ['{}: {}'.format(h[0], h[1]) for h in ws_headers.items() if h[1]]
timeout = self.get_timeout(timeout=timeout, **kwargs)
self.set_trace(kwargs.pop("trace", False))
#pout.v(websocket_url, websocket_headers, self.query_kwargs, self.headers)
try:
logger.debug("{} connecting to {}".format(self.client_id, ws_url))
self.ws = websocket.create_connection(
ws_url,
header=ws_headers,
timeout=timeout,
sslopt={'cert_reqs':ssl.CERT_NONE},
)
ret = self.recv_callback(callback=lambda r: r.uuid == "CONNECT")
if ret.code >= 400:
raise IOError("Failed to connect with code {}".format(ret.code))
# self.headers = headers
# self.query_kwargs = query_kwargs
except websocket.WebSocketTimeoutException:
raise IOError("Failed to connect within {} seconds".format(timeout))
except websocket.WebSocketException as e:
raise IOError("Failed to connect with error: {}".format(e))
except socket.error as e:
# this is an IOError, I just wanted to be aware of that, most common
# problem is: [Errno 111] Connection refused
raise
return ret
|
python
|
{
"resource": ""
}
|
q10416
|
WebsocketClient.fetch
|
train
|
def fetch(self, method, path, query=None, body=None, timeout=0, **kwargs):
"""send a Message
:param method: string, something like "POST" or "GET"
:param path: string, the path part of a uri (eg, /foo/bar)
:param body: dict, what you want to send to "method path"
:param timeout: integer, how long to wait before failing trying to send
"""
ret = None
if not query: query = {}
if not body: body = {}
query.update(body) # body takes precedence
body = query
self.send_count += 1
payload = self.get_fetch_request(method, path, body)
attempts = 1
max_attempts = self.attempts
success = False
while not success:
kwargs['timeout'] = timeout
try:
try:
if not self.connected: self.connect(path)
with self.wstimeout(**kwargs) as timeout:
kwargs['timeout'] = timeout
logger.debug('{} send {} attempt {}/{} with timeout {}'.format(
self.client_id,
payload.uuid,
attempts,
max_attempts,
timeout
))
sent_bits = self.ws.send(payload.payload)
logger.debug('{} sent {} bytes'.format(self.client_id, sent_bits))
if sent_bits:
ret = self.fetch_response(payload, **kwargs)
if ret:
success = True
except websocket.WebSocketConnectionClosedException as e:
self.ws.shutdown()
raise IOError("connection is not open but reported it was open: {}".format(e))
except (IOError, TypeError) as e:
logger.debug('{} error on send attempt {}: {}'.format(self.client_id, attempts, e))
success = False
finally:
if not success:
attempts += 1
if attempts > max_attempts:
raise
else:
timeout *= 2
if (attempts / max_attempts) > 0.50:
logger.debug(
"{} closing and re-opening connection for next attempt".format(self.client_id)
)
self.close()
return ret
|
python
|
{
"resource": ""
}
|
q10417
|
WebsocketClient.ping
|
train
|
def ping(self, timeout=0, **kwargs):
"""THIS DOES NOT WORK, UWSGI DOES NOT RESPOND TO PINGS"""
# http://stackoverflow.com/a/2257449/5006
def rand_id(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
payload = rand_id()
self.ws.ping(payload)
opcode, data = self.recv_raw(timeout, [websocket.ABNF.OPCODE_PONG], **kwargs)
if data != payload:
raise IOError("Pinged server but did not receive correct pong")
|
python
|
{
"resource": ""
}
|
q10418
|
WebsocketClient.recv_raw
|
train
|
def recv_raw(self, timeout, opcodes, **kwargs):
"""this is very internal, it will return the raw opcode and data if they
match the passed in opcodes"""
orig_timeout = self.get_timeout(timeout)
timeout = orig_timeout
while timeout > 0.0:
start = time.time()
if not self.connected: self.connect(timeout=timeout, **kwargs)
with self.wstimeout(timeout, **kwargs) as timeout:
logger.debug('{} waiting to receive for {} seconds'.format(self.client_id, timeout))
try:
opcode, data = self.ws.recv_data()
if opcode in opcodes:
timeout = 0.0
break
else:
if opcode == websocket.ABNF.OPCODE_CLOSE:
raise websocket.WebSocketConnectionClosedException()
except websocket.WebSocketTimeoutException:
pass
except websocket.WebSocketConnectionClosedException:
# bug in Websocket.recv_data(), this should be done by Websocket
try:
self.ws.shutdown()
except AttributeError:
pass
#raise EOFError("websocket closed by server and reconnection did nothing")
if timeout:
stop = time.time()
timeout -= (stop - start)
else:
break
if timeout < 0.0:
raise IOError("recv timed out in {} seconds".format(orig_timeout))
return opcode, data
|
python
|
{
"resource": ""
}
|
q10419
|
WebsocketClient.get_fetch_response
|
train
|
def get_fetch_response(self, raw):
"""This just makes the payload instance more HTTPClient like"""
p = Payload(raw)
p._body = p.body
return p
|
python
|
{
"resource": ""
}
|
q10420
|
WebsocketClient.recv
|
train
|
def recv(self, timeout=0, **kwargs):
"""this will receive data and convert it into a message, really this is more
of an internal method, it is used in recv_callback and recv_msg"""
opcode, data = self.recv_raw(timeout, [websocket.ABNF.OPCODE_TEXT], **kwargs)
return self.get_fetch_response(data)
|
python
|
{
"resource": ""
}
|
q10421
|
WebsocketClient.recv_callback
|
train
|
def recv_callback(self, callback, **kwargs):
"""receive messages and validate them with the callback, if the callback
returns True then the message is valid and will be returned, if False then
this will try and receive another message until timeout is 0"""
payload = None
timeout = self.get_timeout(**kwargs)
full_timeout = timeout
while timeout > 0.0:
kwargs['timeout'] = timeout
start = time.time()
payload = self.recv(**kwargs)
if callback(payload):
break
payload = None
stop = time.time()
elapsed = stop - start
timeout -= elapsed
if not payload:
raise IOError("recv_callback timed out in {}".format(full_timeout))
return payload
|
python
|
{
"resource": ""
}
|
q10422
|
Call.create_controller
|
train
|
def create_controller(self):
"""Create a controller to handle the request
:returns: Controller, this Controller instance should be able to handle
the request
"""
body = None
req = self.request
res = self.response
rou = self.router
con = None
controller_info = {}
try:
controller_info = rou.find(req, res)
except IOError as e:
logger.warning(str(e), exc_info=True)
raise CallError(
408,
"The client went away before the request body was retrieved."
)
except (ImportError, AttributeError, TypeError) as e:
exc_info = sys.exc_info()
logger.warning(str(e), exc_info=exc_info)
raise CallError(
404,
"{} not found because of {} \"{}\" on {}:{}".format(
req.path,
exc_info[0].__name__,
str(e),
os.path.basename(exc_info[2].tb_frame.f_code.co_filename),
exc_info[2].tb_lineno
)
)
else:
con = controller_info['class_instance']
return con
|
python
|
{
"resource": ""
}
|
q10423
|
Call.handle
|
train
|
def handle(self):
"""Called from the interface to actually handle the request."""
body = None
req = self.request
res = self.response
rou = self.router
con = None
start = time.time()
try:
con = self.create_controller()
con.call = self
self.controller = con
if not self.quiet:
con.log_start(start)
# the controller handle method will manipulate self.response, it first
# tries to find a handle_HTTP_METHOD method, if it can't find that it
# will default to the handle method (which is implemented on Controller).
# method arguments are passed in so child classes can add decorators
# just like the HTTP_METHOD that will actually handle the request
controller_args, controller_kwargs = con.find_method_params()
controller_method = getattr(con, "handle_{}".format(req.method), None)
if not controller_method:
controller_method = getattr(con, "handle")
if not self.quiet:
logger.debug("Using handle method: {}.{}".format(
con.__class__.__name__,
controller_method.__name__
))
controller_method(*controller_args, **controller_kwargs)
except Exception as e:
self.handle_error(e) # this will manipulate self.response
finally:
if res.code == 204:
res.headers.pop('Content-Type', None)
res.body = None # just to be sure since body could've been ""
if con:
if not self.quiet:
con.log_stop(start)
return res
|
python
|
{
"resource": ""
}
|
q10424
|
Call.handle_error
|
train
|
def handle_error(self, e, **kwargs):
"""if an exception is raised while trying to handle the request it will
go through this method
This method will set the response body and then also call Controller.handle_error
for further customization if the Controller is available
:param e: Exception, the error that was raised
:param **kwargs: dict, any other information that might be handy
"""
req = self.request
res = self.response
con = self.controller
if isinstance(e, CallStop):
logger.info(str(e), exc_info=True)
res.code = e.code
res.add_headers(e.headers)
res.body = e.body
elif isinstance(e, Redirect):
logger.info(str(e), exc_info=True)
res.code = e.code
res.add_headers(e.headers)
res.body = None
elif isinstance(e, (AccessDenied, CallError)):
logger.warning(str(e), exc_info=True)
res.code = e.code
res.add_headers(e.headers)
res.body = e
elif isinstance(e, NotImplementedError):
logger.warning(str(e), exc_info=True)
res.code = 501
res.body = e
elif isinstance(e, TypeError):
e_msg = unicode(e)
if e_msg.startswith(req.method) and 'argument' in e_msg:
logger.debug(e_msg, exc_info=True)
logger.warning(
" ".join([
"Either the path arguments ({} args) or the keyword arguments",
"({} args) for {}.{} do not match the {} handling method's",
"definition"
]).format(
len(req.controller_info["method_args"]),
len(req.controller_info["method_kwargs"]),
req.controller_info['module_name'],
req.controller_info['class_name'],
req.method
)
)
res.code = 405
else:
logger.exception(e)
res.code = 500
res.body = e
else:
logger.exception(e)
res.code = 500
res.body = e
if con:
error_method = getattr(con, "handle_{}_error".format(req.method), None)
if not error_method:
error_method = getattr(con, "handle_error")
logger.debug("Using error method: {}.{}".format(
con.__class__.__name__,
error_method.__name__
))
error_method(e, **kwargs)
|
python
|
{
"resource": ""
}
|
q10425
|
Router.module_names
|
train
|
def module_names(self):
"""get all the modules in the controller_prefix
:returns: set, a set of string module names
"""
controller_prefix = self.controller_prefix
_module_name_cache = self._module_name_cache
if controller_prefix in _module_name_cache:
return _module_name_cache[controller_prefix]
module = self.get_module(controller_prefix)
if hasattr(module, "__path__"):
# path attr exists so this is a package
modules = self.find_modules(module.__path__[0], controller_prefix)
else:
# we have a lonely .py file
modules = set([controller_prefix])
_module_name_cache.setdefault(controller_prefix, {})
_module_name_cache[controller_prefix] = modules
return modules
|
python
|
{
"resource": ""
}
|
q10426
|
Router.modules
|
train
|
def modules(self):
"""Returns an iterator of the actual modules, not just their names
:returns: generator, each module under self.controller_prefix
"""
for modname in self.module_names:
module = importlib.import_module(modname)
yield module
|
python
|
{
"resource": ""
}
|
q10427
|
Router.find_modules
|
train
|
def find_modules(self, path, prefix):
"""recursive method that will find all the submodules of the given module
at prefix with path"""
modules = set([prefix])
# https://docs.python.org/2/library/pkgutil.html#pkgutil.iter_modules
for module_info in pkgutil.iter_modules([path]):
# we want to ignore any "private" modules
if module_info[1].startswith('_'): continue
module_prefix = ".".join([prefix, module_info[1]])
if module_info[2]:
# module is a package
submodules = self.find_modules(os.path.join(path, module_info[1]), module_prefix)
modules.update(submodules)
else:
modules.add(module_prefix)
return modules
|
python
|
{
"resource": ""
}
|
q10428
|
Router.get_module_name
|
train
|
def get_module_name(self, path_args):
"""returns the module_name and remaining path args.
return -- tuple -- (module_name, path_args)"""
controller_prefix = self.controller_prefix
cset = self.module_names
module_name = controller_prefix
mod_name = module_name
while path_args:
mod_name += "." + path_args[0]
if mod_name in cset:
module_name = mod_name
path_args.pop(0)
else:
break
return module_name, path_args
|
python
|
{
"resource": ""
}
|
q10429
|
Router.get_class
|
train
|
def get_class(self, module, class_name):
"""try and get the class_name from the module and make sure it is a valid
controller"""
# let's get the class
class_object = getattr(module, class_name, None)
if not class_object or not issubclass(class_object, Controller):
class_object = None
return class_object
|
python
|
{
"resource": ""
}
|
q10430
|
Controller.OPTIONS
|
train
|
def OPTIONS(self, *args, **kwargs):
"""Handles CORS requests for this controller
if self.cors is False then this will raise a 405, otherwise it sets everything
necessary to satisfy the request in self.response
"""
if not self.cors:
raise CallError(405)
req = self.request
origin = req.get_header('origin')
if not origin:
raise CallError(400, 'Need Origin header')
call_headers = [
('Access-Control-Request-Headers', 'Access-Control-Allow-Headers'),
('Access-Control-Request-Method', 'Access-Control-Allow-Methods')
]
for req_header, res_header in call_headers:
v = req.get_header(req_header)
if v:
self.response.set_header(res_header, v)
else:
raise CallError(400, 'Need {} header'.format(req_header))
other_headers = {
'Access-Control-Allow-Credentials': 'true',
'Access-Control-Max-Age': 3600
}
self.response.add_headers(other_headers)
|
python
|
{
"resource": ""
}
|
q10431
|
Controller.handle
|
train
|
def handle(self, *controller_args, **controller_kwargs):
"""handles the request and returns the response
This should set any response information directly onto self.response
this method has the same signature as the request handling methods
(eg, GET, POST) so subclasses can override this method and add decorators
:param *controller_args: tuple, the path arguments that will be passed to
the request handling method (eg, GET, POST)
:param **controller_kwargs: dict, the query and body params merged together
"""
req = self.request
res = self.response
res.set_header('Content-Type', "{};charset={}".format(
self.content_type,
self.encoding
))
encoding = req.accept_encoding
res.encoding = encoding if encoding else self.encoding
res_method_name = ""
controller_methods = self.find_methods()
#controller_args, controller_kwargs = self.find_method_params()
for controller_method_name, controller_method in controller_methods:
try:
logger.debug("Attempting to handle request with {}.{}.{}".format(
req.controller_info['module_name'],
req.controller_info['class_name'],
controller_method_name
))
res.body = controller_method(
*controller_args,
**controller_kwargs
)
res_method_name = controller_method_name
break
except VersionError as e:
logger.debug("Request {}.{}.{} failed version check [{} not in {}]".format(
req.controller_info['module_name'],
req.controller_info['class_name'],
controller_method_name,
e.request_version,
e.versions
))
except RouteError:
logger.debug("Request {}.{}.{} failed routing check".format(
req.controller_info['module_name'],
req.controller_info['class_name'],
controller_method_name
))
if not res_method_name:
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1
# An origin server SHOULD return the status code 405 (Method Not Allowed)
# if the method is known by the origin server but not allowed for the
# requested resource
raise CallError(405, "Could not find a method to satisfy {}".format(
req.path
))
|
python
|
{
"resource": ""
}
|
q10432
|
Controller.find_methods
|
train
|
def find_methods(self):
"""Find the methods that could satisfy this request
This will go through and find any method that starts with the request.method,
so if the request was GET /foo then this would find any methods that start
with GET
https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html
:returns: list of tuples (method_name, method), all the found methods
"""
methods = []
req = self.request
method_name = req.method.upper()
method_names = set()
members = inspect.getmembers(self)
for member_name, member in members:
if member_name.startswith(method_name):
if member:
methods.append((member_name, member))
method_names.add(member_name)
if len(methods) == 0:
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1
# and 501 (Not Implemented) if the method is unrecognized or not
# implemented by the origin server
logger.warning("No methods to handle {} found".format(method_name), exc_info=True)
raise CallError(501, "{} {} not implemented".format(req.method, req.path))
elif len(methods) > 1 and method_name in method_names:
raise ValueError(
" ".join([
"A multi method {} request should not have any methods named {}.",
"Instead, all {} methods should use use an appropriate decorator",
"like @route or @version and have a unique name starting with {}_"
]).format(
method_name,
method_name,
method_name,
method_name
)
)
return methods
|
python
|
{
"resource": ""
}
|
q10433
|
Controller.find_method_params
|
train
|
def find_method_params(self):
"""Return the method params
:returns: tuple (args, kwargs) that will be passed as *args, **kwargs
"""
req = self.request
args = req.controller_info["method_args"]
kwargs = req.controller_info["method_kwargs"]
return args, kwargs
|
python
|
{
"resource": ""
}
|
q10434
|
Controller.log_start
|
train
|
def log_start(self, start):
"""log all the headers and stuff at the start of the request"""
if not logger.isEnabledFor(logging.INFO): return
try:
req = self.request
logger.info("REQUEST {} {}?{}".format(req.method, req.path, req.query))
logger.info(datetime.datetime.strftime(datetime.datetime.utcnow(), "DATE %Y-%m-%dT%H:%M:%S.%f"))
ip = req.ip
if ip:
logger.info("\tIP ADDRESS: {}".format(ip))
if 'authorization' in req.headers:
logger.info('AUTH {}'.format(req.headers['authorization']))
ignore_hs = set([
'accept-language',
'accept-encoding',
'connection',
'authorization',
'host',
'x-forwarded-for'
])
hs = ["Request Headers..."]
for k, v in req.headers.items():
if k not in ignore_hs:
hs.append("\t{}: {}".format(k, v))
logger.info(os.linesep.join(hs))
except Exception as e:
logger.warn(e, exc_info=True)
|
python
|
{
"resource": ""
}
|
q10435
|
Controller.log_stop
|
train
|
def log_stop(self, start):
"""log a summary line on how the request went"""
if not logger.isEnabledFor(logging.INFO): return
stop = time.time()
get_elapsed = lambda start, stop, multiplier, rnd: round(abs(stop - start) * float(multiplier), rnd)
elapsed = get_elapsed(start, stop, 1000.00, 1)
total = "%0.1f ms" % (elapsed)
logger.info("RESPONSE {} {} in {}".format(self.response.code, self.response.status, total))
|
python
|
{
"resource": ""
}
|
q10436
|
build_lane_from_yaml
|
train
|
def build_lane_from_yaml(path):
"""Builds a `sparklanes.Lane` object from a YAML definition file.
Parameters
----------
path: str
Path to the YAML definition file
Returns
-------
Lane
Lane, built according to definition in YAML file
"""
# Open
with open(path, 'rb') as yaml_definition:
definition = yaml.load(yaml_definition)
# Validate schema
try:
validate_schema(definition)
except SchemaError as exc:
raise LaneSchemaError(**exc.__dict__)
def build(lb_def, branch=False):
"""Function to recursively build the `sparklanes.Lane` object from a YAML definition"""
init_kwargs = {k: lb_def[k] for k in (a for a in ('run_parallel', 'name') if a in lb_def)}
lane_or_branch = Lane(**init_kwargs) if not branch else Branch(**init_kwargs)
for task in lb_def['tasks']:
if 'branch' in task:
branch_def = task['branch']
lane_or_branch.add(build(branch_def, True))
else:
sep = task['class'].rfind('.')
if sep == -1:
raise LaneImportError('Class must include its parent module')
mdl = task['class'][:sep]
cls_ = task['class'][sep + 1:]
try:
cls = getattr(import_module(mdl), cls_)
except ImportError:
raise LaneImportError('Could not find module %s' % mdl)
except AttributeError:
raise LaneImportError('Could not find class %s' % cls_)
args = task['args'] if 'args' in task else []
args = [args] if not isinstance(args, list) else args
kwargs = task['kwargs'] if 'kwargs' in task else {}
lane_or_branch.add(cls, *args, **kwargs)
return lane_or_branch
return build(definition['lane'])
|
python
|
{
"resource": ""
}
|
q10437
|
Lane.add
|
train
|
def add(self, cls_or_branch, *args, **kwargs):
"""Adds a task or branch to the lane.
Parameters
----------
cls_or_branch : Class
*args
Variable length argument list to be passed to `cls_or_branch` during instantiation
**kwargs
Variable length keyword arguments to be passed to `cls_or_branch` during instantiation
Returns
-------
self: Returns `self` to allow method chaining
"""
if isinstance(cls_or_branch, Branch):
self.tasks.append(cls_or_branch) # Add branch with already validated tasks
else:
# Validate
self.__validate_task(cls_or_branch, '__init__', args, kwargs)
# Append
self.tasks.append({'cls_or_branch': cls_or_branch, 'args': args, 'kwargs': kwargs})
return self
|
python
|
{
"resource": ""
}
|
q10438
|
get_historical_data
|
train
|
def get_historical_data(nmr_problems):
"""Get the historical tank data.
Args:
nmr_problems (int): the number of problems
Returns:
tuple: (observations, nmr_tanks_ground_truth)
"""
observations = np.tile(np.array([[10, 256, 202, 97]]), (nmr_problems, 1))
nmr_tanks_ground_truth = np.ones((nmr_problems,)) * 276
return observations, nmr_tanks_ground_truth
|
python
|
{
"resource": ""
}
|
q10439
|
get_simulated_data
|
train
|
def get_simulated_data(nmr_problems):
"""Simulate some data.
This returns the simulated tank observations and the corresponding ground truth maximum number of tanks.
Args:
nmr_problems (int): the number of problems
Returns:
tuple: (observations, nmr_tanks_ground_truth)
"""
# The number of tanks we observe per problem
nmr_observed_tanks = 10
# Generate some maximum number of tanks. Basically the ground truth of the estimation problem.
nmr_tanks_ground_truth = normal(nmr_problems, 1, mean=250, std=30, ctype='uint')
# Generate some random tank observations
observations = uniform(nmr_problems, nmr_observed_tanks, low=0, high=nmr_tanks_ground_truth, ctype='uint')
return observations, nmr_tanks_ground_truth
|
python
|
{
"resource": ""
}
|
q10440
|
_get_initial_step
|
train
|
def _get_initial_step(parameters, lower_bounds, upper_bounds, max_step_sizes):
"""Get an initial step size to use for every parameter.
This chooses the step sizes based on the maximum step size and the lower and upper bounds.
Args:
parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems,
p parameters and n samples.
lower_bounds (list): lower bounds
upper_bounds (list): upper bounds
max_step_sizes (list or None): the maximum step size, or the maximum step size per parameter. Defaults to 0.1
Returns:
ndarray: for every problem instance the vector with the initial step size for each parameter.
"""
nmr_params = parameters.shape[1]
initial_step = np.zeros_like(parameters)
if max_step_sizes is None:
max_step_sizes = 0.1
if isinstance(max_step_sizes, Number):
max_step_sizes = [max_step_sizes] * nmr_params
max_step_sizes = np.array(max_step_sizes)
for ind in range(parameters.shape[1]):
minimum_allowed_step = np.minimum(np.abs(parameters[:, ind] - lower_bounds[ind]),
np.abs(upper_bounds[ind] - parameters[:, ind]))
initial_step[:, ind] = np.minimum(minimum_allowed_step, max_step_sizes[ind])
return initial_step / 2.
|
python
|
{
"resource": ""
}
|
q10441
|
SimpleConfigAction.apply
|
train
|
def apply(self):
"""Apply the current action to the current runtime configuration."""
self._old_config = {k: v for k, v in _config.items()}
self._apply()
|
python
|
{
"resource": ""
}
|
q10442
|
SimpleConfigAction.unapply
|
train
|
def unapply(self):
"""Reset the current configuration to the previous state."""
for key, value in self._old_config.items():
_config[key] = value
|
python
|
{
"resource": ""
}
|
q10443
|
Task
|
train
|
def Task(entry): # pylint: disable=invalid-name
"""
Decorator with which classes, who act as tasks in a `Lane`, must be decorated. When a class is
being decorated, it becomes a child of `LaneTask`.
Parameters
----------
entry: The name of the task's "main" method, i.e. the method which is executed when task is run
Returns
-------
wrapper (function): The actual decorator function
"""
if not isinstance(entry, string_types):
# In the event that no argument is supplied to the decorator, python passes the decorated
# class itself as an argument. That way, we can detect if no argument (or an argument of
# invalid type) was supplied. This allows passing of `entry` as both a named kwarg, and
# as an arg. Isn't neat, but for now it suffices.
raise TypeError('When decorating a class with `Task`, a single string argument must be '
'supplied, which specifies the "main" task method, i.e. the class\'s entry '
'point to the task.')
else:
def wrapper(cls):
"""The actual decorator function"""
if isclass(cls):
if not hasattr(cls, entry): # Check if cls has the specified entry method
raise TypeError('Method `%s` not found in class `%s`.' % (entry, cls.__name__))
# We will have to inspect the task class's `__init__` method later (by inspecting
# the arg signature, before it is instantiated). In various circumstances, classes
# will not have an unbound `__init__` method. Let's deal with that now already, by
# assigning an empty, unbound `__init__` method manually, in order to prevent
# errors later on during method inspection (not an issue in Python 3):
# - Whenever a class is not defined as a new-style class in Python 2.7, i.e. a
# sub-class of object, and it does not have a `__init__` method definition, the
# class will not have an attribute `__init__`
# - If a class misses a `__init__` method definition, but is defined as a
# new-style class, attribute `__init__` will be of type `slot wrapper`, which
# cannot be inspected (and it also doesn't seem possible to check if a method is of
# type `slot wrapper`, which is why we manually define one).
if not hasattr(cls, '__init__') or cls.__init__ == object.__init__:
init = MethodType(lambda self: None, None, cls) \
if PY2 else MethodType(lambda self: None, cls)
setattr(cls, '__init__', init)
# Check for attributes that will be overwritten, in order to warn the user
reserved_attributes = ('__getattr__', '__call__', '_entry_mtd', 'cache', 'uncache',
'clear_cache', '_log_lock')
for attr in dir(cls):
if attr in reserved_attributes:
make_default_logger(INTERNAL_LOGGER_NAME).warning(
'Attribute `%s` of class `%s` will be overwritten when decorated with '
'`sparklanes.Task`! Avoid assigning any of the following attributes '
'`%s`', attr, cls.__name__, str(reserved_attributes)
)
assignments = {'_entry_mtd': entry,
'__getattr__': lambda self, name: TaskCache.get(name),
'__init__': cls.__init__,
'_log_lock': Lock()}
for attr in WRAPPER_ASSIGNMENTS:
try:
assignments[attr] = getattr(cls, attr)
except AttributeError:
pass
# Build task as a subclass of LaneTask
return type('Task_%s' % cls.__name__, (LaneTask, cls, object), assignments)
else:
raise TypeError('Only classes can be decorated with `Task`')
return wrapper
|
python
|
{
"resource": ""
}
|
q10444
|
LaneTaskThread.run
|
train
|
def run(self):
"""Overwrites `threading.Thread.run`, to allow handling of exceptions thrown by threads
from within the main app."""
self.exc = None
try:
self.task()
except BaseException:
self.exc = sys.exc_info()
|
python
|
{
"resource": ""
}
|
q10445
|
LaneTaskThread.join
|
train
|
def join(self, timeout=None):
"""Overwrites `threading.Thread.join`, to allow handling of exceptions thrown by threads
from within the main app."""
Thread.join(self, timeout=timeout)
if self.exc:
msg = "Thread '%s' threw an exception `%s`: %s" \
% (self.getName(), self.exc[0].__name__, self.exc[1])
new_exc = LaneExecutionError(msg)
if PY3:
raise new_exc.with_traceback(self.exc[2]) # pylint: disable=no-member
else:
raise (new_exc.__class__, new_exc, self.exc[2])
|
python
|
{
"resource": ""
}
|
q10446
|
mock_decorator
|
train
|
def mock_decorator(*args, **kwargs):
"""Mocked decorator, needed in the case we need to mock a decorator"""
def _called_decorator(dec_func):
@wraps(dec_func)
def _decorator(*args, **kwargs):
return dec_func()
return _decorator
return _called_decorator
|
python
|
{
"resource": ""
}
|
q10447
|
import_mock
|
train
|
def import_mock(name, *args, **kwargs):
"""Mock all modules starting with one of the mock_modules names."""
if any(name.startswith(s) for s in mock_modules):
return MockModule()
return orig_import(name, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q10448
|
SimpleCLFunction._get_parameter_signatures
|
train
|
def _get_parameter_signatures(self):
"""Get the signature of the parameters for the CL function declaration.
This should return the list of signatures of the parameters for use inside the function signature.
Returns:
list: the signatures of the parameters for the use in the CL code.
"""
declarations = []
for p in self.get_parameters():
new_p = p.get_renamed(p.name.replace('.', '_'))
declarations.append(new_p.get_declaration())
return declarations
|
python
|
{
"resource": ""
}
|
q10449
|
SimpleCLFunction._get_cl_dependency_code
|
train
|
def _get_cl_dependency_code(self):
"""Get the CL code for all the CL code for all the dependencies.
Returns:
str: The CL code with the actual code.
"""
code = ''
for d in self._dependencies:
code += d.get_cl_code() + "\n"
return code
|
python
|
{
"resource": ""
}
|
q10450
|
_ProcedureWorker._build_kernel
|
train
|
def _build_kernel(self, kernel_source, compile_flags=()):
"""Convenience function for building the kernel for this worker.
Args:
kernel_source (str): the kernel source to use for building the kernel
Returns:
cl.Program: a compiled CL kernel
"""
return cl.Program(self._cl_context, kernel_source).build(' '.join(compile_flags))
|
python
|
{
"resource": ""
}
|
q10451
|
_ProcedureWorker._get_kernel_arguments
|
train
|
def _get_kernel_arguments(self):
"""Get the list of kernel arguments for loading the kernel data elements into the kernel.
This will use the sorted keys for looping through the kernel input items.
Returns:
list of str: the list of parameter definitions
"""
declarations = []
for name, data in self._kernel_data.items():
declarations.extend(data.get_kernel_parameters('_' + name))
return declarations
|
python
|
{
"resource": ""
}
|
q10452
|
_ProcedureWorker.get_scalar_arg_dtypes
|
train
|
def get_scalar_arg_dtypes(self):
"""Get the location and types of the input scalars.
Returns:
list: for every kernel input element either None if the data is a buffer or the numpy data type if
if is a scalar.
"""
dtypes = []
for name, data in self._kernel_data.items():
dtypes.extend(data.get_scalar_arg_dtypes())
return dtypes
|
python
|
{
"resource": ""
}
|
q10453
|
_package_and_submit
|
train
|
def _package_and_submit(args):
"""
Packages and submits a job, which is defined in a YAML file, to Spark.
Parameters
----------
args (List): Command-line arguments
"""
args = _parse_and_validate_args(args)
logging.debug(args)
dist = __make_tmp_dir()
try:
__package_dependencies(dist_dir=dist, additional_reqs=args['requirements'],
silent=args['silent'])
__package_app(tasks_pkg=args['package'],
dist_dir=dist,
custom_main=args['main'],
extra_data=args['extra_data'])
__run_spark_submit(lane_yaml=args['yaml'],
dist_dir=dist,
spark_home=args['spark_home'],
spark_args=args['spark_args'],
silent=args['silent'])
except Exception as exc:
__clean_up(dist)
raise exc
__clean_up(dist)
|
python
|
{
"resource": ""
}
|
q10454
|
__run_spark_submit
|
train
|
def __run_spark_submit(lane_yaml, dist_dir, spark_home, spark_args, silent):
"""
Submits the packaged application to spark using a `spark-submit` subprocess
Parameters
----------
lane_yaml (str): Path to the YAML lane definition file
dist_dir (str): Path to the directory where the packaged code is located
spark_args (str): String of any additional spark config args to be passed when submitting
silent (bool): Flag indicating whether job output should be printed to console
"""
# spark-submit binary
cmd = ['spark-submit' if spark_home is None else os.path.join(spark_home, 'bin/spark-submit')]
# Supplied spark arguments
if spark_args:
cmd += spark_args
# Packaged App & lane
cmd += ['--py-files', 'libs.zip,_framework.zip,tasks.zip', 'main.py']
cmd += ['--lane', lane_yaml]
logging.info('Submitting to Spark')
logging.debug(str(cmd))
# Submit
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
call(cmd, cwd=dist_dir, env=MY_ENV, **outp)
devnull.close()
|
python
|
{
"resource": ""
}
|
q10455
|
ctype_to_dtype
|
train
|
def ctype_to_dtype(cl_type, mot_float_type='float'):
"""Get the numpy dtype of the given cl_type string.
Args:
cl_type (str): the CL data type to match, for example 'float' or 'float4'.
mot_float_type (str): the C name of the ``mot_float_type``. The dtype will be looked up recursively.
Returns:
dtype: the numpy datatype
"""
if is_vector_ctype(cl_type):
raw_type, vector_length = split_vector_ctype(cl_type)
if raw_type == 'mot_float_type':
if is_vector_ctype(mot_float_type):
raw_type, _ = split_vector_ctype(mot_float_type)
else:
raw_type = mot_float_type
vector_type = raw_type + str(vector_length)
return getattr(cl_array.vec, vector_type)
else:
if cl_type == 'mot_float_type':
cl_type = mot_float_type
data_types = [
('char', np.int8),
('uchar', np.uint8),
('short', np.int16),
('ushort', np.uint16),
('int', np.int32),
('uint', np.uint32),
('long', np.int64),
('ulong', np.uint64),
('float', np.float32),
('double', np.float64),
]
for ctype, dtype in data_types:
if ctype == cl_type:
return dtype
|
python
|
{
"resource": ""
}
|
q10456
|
convert_data_to_dtype
|
train
|
def convert_data_to_dtype(data, data_type, mot_float_type='float'):
"""Convert the given input data to the correct numpy type.
Args:
data (ndarray): The value to convert to the correct numpy type
data_type (str): the data type we need to convert the data to
mot_float_type (str): the data type of the current ``mot_float_type``
Returns:
ndarray: the input data but then converted to the desired numpy data type
"""
scalar_dtype = ctype_to_dtype(data_type, mot_float_type)
if isinstance(data, numbers.Number):
data = scalar_dtype(data)
if is_vector_ctype(data_type):
shape = data.shape
dtype = ctype_to_dtype(data_type, mot_float_type)
ve = np.zeros(shape[:-1], dtype=dtype)
if len(shape) == 1:
for vector_ind in range(shape[0]):
ve[0][vector_ind] = data[vector_ind]
elif len(shape) == 2:
for i in range(data.shape[0]):
for vector_ind in range(data.shape[1]):
ve[i][vector_ind] = data[i, vector_ind]
elif len(shape) == 3:
for i in range(data.shape[0]):
for j in range(data.shape[1]):
for vector_ind in range(data.shape[2]):
ve[i, j][vector_ind] = data[i, j, vector_ind]
return np.require(ve, requirements=['C', 'A', 'O'])
return np.require(data, scalar_dtype, ['C', 'A', 'O'])
|
python
|
{
"resource": ""
}
|
q10457
|
split_vector_ctype
|
train
|
def split_vector_ctype(ctype):
"""Split a vector ctype into a raw ctype and the vector length.
If the given ctype is not a vector type, we raise an error. I
Args:
ctype (str): the ctype to possibly split into a raw ctype and the vector length
Returns:
tuple: the raw ctype and the vector length
"""
if not is_vector_ctype(ctype):
raise ValueError('The given ctype is not a vector type.')
for vector_length in [2, 3, 4, 8, 16]:
if ctype.endswith(str(vector_length)):
vector_str_len = len(str(vector_length))
return ctype[:-vector_str_len], int(ctype[-vector_str_len:])
|
python
|
{
"resource": ""
}
|
q10458
|
device_type_from_string
|
train
|
def device_type_from_string(cl_device_type_str):
"""Converts values like ``gpu`` to a pyopencl device type string.
Supported values are: ``accelerator``, ``cpu``, ``custom``, ``gpu``. If ``all`` is given, None is returned.
Args:
cl_device_type_str (str): The string we want to convert to a device type.
Returns:
cl.device_type: the pyopencl device type.
"""
cl_device_type_str = cl_device_type_str.upper()
if hasattr(cl.device_type, cl_device_type_str):
return getattr(cl.device_type, cl_device_type_str)
return None
|
python
|
{
"resource": ""
}
|
q10459
|
topological_sort
|
train
|
def topological_sort(data):
"""Topological sort the given dictionary structure.
Args:
data (dict); dictionary structure where the value is a list of dependencies for that given key.
For example: ``{'a': (), 'b': ('a',)}``, where ``a`` depends on nothing and ``b`` depends on ``a``.
Returns:
tuple: the dependencies in constructor order
"""
def check_self_dependencies(input_data):
"""Check if there are self dependencies within a node.
Self dependencies are for example: ``{'a': ('a',)}``.
Args:
input_data (dict): the input data. Of a structure similar to {key: (list of values), ...}.
Raises:
ValueError: if there are indeed self dependencies
"""
for k, v in input_data.items():
if k in v:
raise ValueError('Self-dependency, {} depends on itself.'.format(k))
def prepare_input_data(input_data):
"""Prepares the input data by making sets of the dependencies. This automatically removes redundant items.
Args:
input_data (dict): the input data. Of a structure similar to {key: (list of values), ...}.
Returns:
dict: a copy of the input dict but with sets instead of lists for the dependencies.
"""
return {k: set(v) for k, v in input_data.items()}
def find_items_without_dependencies(input_data):
"""This searches the dependencies of all the items for items that have no dependencies.
For example, suppose the input is: ``{'a': ('b',)}``, then ``a`` depends on ``b`` and ``b`` depends on nothing.
This class returns ``(b,)`` in this example.
Args:
input_data (dict): the input data. Of a structure similar to {key: (list of values), ...}.
Returns:
list: the list of items without any dependency.
"""
return list(reduce(set.union, input_data.values()) - set(input_data.keys()))
def add_empty_dependencies(data):
items_without_dependencies = find_items_without_dependencies(data)
data.update({item: set() for item in items_without_dependencies})
def get_sorted(input_data):
data = input_data
while True:
ordered = set(item for item, dep in data.items() if len(dep) == 0)
if not ordered:
break
yield ordered
data = {item: (dep - ordered) for item, dep in data.items() if item not in ordered}
if len(data) != 0:
raise ValueError('Cyclic dependencies exist '
'among these items: {}'.format(', '.join(repr(x) for x in data.items())))
check_self_dependencies(data)
if not len(data):
return []
data_copy = prepare_input_data(data)
add_empty_dependencies(data_copy)
result = []
for d in get_sorted(data_copy):
try:
d = sorted(d)
except TypeError:
d = list(d)
result.extend(d)
return result
|
python
|
{
"resource": ""
}
|
q10460
|
is_scalar
|
train
|
def is_scalar(value):
"""Test if the given value is a scalar.
This function also works with memory mapped array values, in contrast to the numpy is_scalar method.
Args:
value: the value to test for being a scalar value
Returns:
boolean: if the given value is a scalar or not
"""
return np.isscalar(value) or (isinstance(value, np.ndarray) and (len(np.squeeze(value).shape) == 0))
|
python
|
{
"resource": ""
}
|
q10461
|
all_elements_equal
|
train
|
def all_elements_equal(value):
"""Checks if all elements in the given value are equal to each other.
If the input is a single value the result is trivial. If not, we compare all the values to see
if they are exactly the same.
Args:
value (ndarray or number): a numpy array or a single number.
Returns:
bool: true if all elements are equal to each other, false otherwise
"""
if is_scalar(value):
return True
return np.array(value == value.flatten()[0]).all()
|
python
|
{
"resource": ""
}
|
q10462
|
get_single_value
|
train
|
def get_single_value(value):
"""Get a single value out of the given value.
This is meant to be used after a call to :func:`all_elements_equal` that returned True. With this
function we return a single number from the input value.
Args:
value (ndarray or number): a numpy array or a single number.
Returns:
number: a single number from the input
Raises:
ValueError: if not all elements are equal
"""
if not all_elements_equal(value):
raise ValueError('Not all values are equal to each other.')
if is_scalar(value):
return value
return value.item(0)
|
python
|
{
"resource": ""
}
|
q10463
|
all_logging_disabled
|
train
|
def all_logging_disabled(highest_level=logging.CRITICAL):
"""Disable all logging temporarily.
A context manager that will prevent any logging messages triggered during the body from being processed.
Args:
highest_level: the maximum logging level that is being blocked
"""
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
logging.disable(previous_level)
|
python
|
{
"resource": ""
}
|
q10464
|
split_in_batches
|
train
|
def split_in_batches(nmr_elements, max_batch_size):
"""Split the total number of elements into batches of the specified maximum size.
Examples::
split_in_batches(30, 8) -> [(0, 8), (8, 15), (16, 23), (24, 29)]
for batch_start, batch_end in split_in_batches(2000, 100):
array[batch_start:batch_end]
Yields:
tuple: the start and end point of the next batch
"""
offset = 0
elements_left = nmr_elements
while elements_left > 0:
next_batch = (offset, offset + min(elements_left, max_batch_size))
yield next_batch
batch_size = min(elements_left, max_batch_size)
elements_left -= batch_size
offset += batch_size
|
python
|
{
"resource": ""
}
|
q10465
|
covariance_to_correlations
|
train
|
def covariance_to_correlations(covariance):
"""Transform a covariance matrix into a correlations matrix.
This can be seen as dividing a covariance matrix by the outer product of the diagonal.
As post processing we replace the infinities and the NaNs with zeros and clip the result to [-1, 1].
Args:
covariance (ndarray): a matrix of shape (n, p, p) with for n problems the covariance matrix of shape (p, p).
Returns:
ndarray: the correlations matrix
"""
diagonal_ind = np.arange(covariance.shape[1])
diagonal_els = covariance[:, diagonal_ind, diagonal_ind]
result = covariance / np.sqrt(diagonal_els[:, :, None] * diagonal_els[:, None, :])
result[np.isinf(result)] = 0
return np.clip(np.nan_to_num(result), -1, 1)
|
python
|
{
"resource": ""
}
|
q10466
|
multiprocess_mapping
|
train
|
def multiprocess_mapping(func, iterable):
"""Multiprocess mapping the given function on the given iterable.
This only works in Linux and Mac systems since Windows has no forking capability. On Windows we fall back on
single processing. Also, if we reach memory limits we fall back on single cpu processing.
Args:
func (func): the function to apply
iterable (iterable): the iterable with the elements we want to apply the function on
"""
if os.name == 'nt': # In Windows there is no fork.
return list(map(func, iterable))
try:
p = multiprocessing.Pool()
return_data = list(p.imap(func, iterable))
p.close()
p.join()
return return_data
except OSError:
return list(map(func, iterable))
|
python
|
{
"resource": ""
}
|
q10467
|
parse_cl_function
|
train
|
def parse_cl_function(cl_code, dependencies=()):
"""Parse the given OpenCL string to a single SimpleCLFunction.
If the string contains more than one function, we will return only the last, with all the other added as a
dependency.
Args:
cl_code (str): the input string containing one or more functions.
dependencies (Iterable[CLCodeObject]): The list of CL libraries this function depends on
Returns:
mot.lib.cl_function.SimpleCLFunction: the CL function for the last function in the given strings.
"""
from mot.lib.cl_function import SimpleCLFunction
def separate_cl_functions(input_str):
"""Separate all the OpenCL functions.
This creates a list of strings, with for each function found the OpenCL code.
Args:
input_str (str): the string containing one or more functions.
Returns:
list: a list of strings, with one string per found CL function.
"""
class Semantics:
def __init__(self):
self._functions = []
def result(self, ast):
return self._functions
def arglist(self, ast):
return '({})'.format(', '.join(ast))
def function(self, ast):
def join(items):
result = ''
for item in items:
if isinstance(item, str):
result += item
else:
result += join(item)
return result
self._functions.append(join(ast).strip())
return ast
return _extract_cl_functions_parser.parse(input_str, semantics=Semantics())
functions = separate_cl_functions(cl_code)
return SimpleCLFunction.from_string(functions[-1], dependencies=list(dependencies or []) + [
SimpleCLFunction.from_string(s) for s in functions[:-1]])
|
python
|
{
"resource": ""
}
|
q10468
|
split_cl_function
|
train
|
def split_cl_function(cl_str):
"""Split an CL function into a return type, function name, parameters list and the body.
Args:
cl_str (str): the CL code to parse and plit into components
Returns:
tuple: string elements for the return type, function name, parameter list and the body
"""
class Semantics:
def __init__(self):
self._return_type = ''
self._function_name = ''
self._parameter_list = []
self._cl_body = ''
def result(self, ast):
return self._return_type, self._function_name, self._parameter_list, self._cl_body
def address_space(self, ast):
self._return_type = ast.strip() + ' '
return ast
def data_type(self, ast):
self._return_type += ''.join(ast).strip()
return ast
def function_name(self, ast):
self._function_name = ast.strip()
return ast
def arglist(self, ast):
if ast != '()':
self._parameter_list = ast
return ast
def body(self, ast):
def join(items):
result = ''
for item in items:
if isinstance(item, str):
result += item
else:
result += join(item)
return result
self._cl_body = join(ast).strip()[1:-1]
return ast
return _split_cl_function_parser.parse(cl_str, semantics=Semantics())
|
python
|
{
"resource": ""
}
|
q10469
|
make_default_logger
|
train
|
def make_default_logger(name=INTERNAL_LOGGER_NAME, level=logging.INFO,
fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s'):
"""Create a logger with the default configuration"""
logger = logging.getLogger(name)
logger.setLevel(level)
if not logger.handlers:
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(level)
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
|
python
|
{
"resource": ""
}
|
q10470
|
CLEnvironment.is_gpu
|
train
|
def is_gpu(self):
"""Check if the device associated with this environment is a GPU.
Returns:
boolean: True if the device is an GPU, false otherwise.
"""
return self._device.get_info(cl.device_info.TYPE) == cl.device_type.GPU
|
python
|
{
"resource": ""
}
|
q10471
|
CLEnvironment.is_cpu
|
train
|
def is_cpu(self):
"""Check if the device associated with this environment is a CPU.
Returns:
boolean: True if the device is an CPU, false otherwise.
"""
return self._device.get_info(cl.device_info.TYPE) == cl.device_type.CPU
|
python
|
{
"resource": ""
}
|
q10472
|
CLEnvironmentFactory.single_device
|
train
|
def single_device(cl_device_type='GPU', platform=None, fallback_to_any_device_type=False):
"""Get a list containing a single device environment, for a device of the given type on the given platform.
This will only fetch devices that support double (possibly only double with a pragma
defined, but still, it should support double).
Args:
cl_device_type (cl.device_type.* or string): The type of the device we want,
can be a opencl device type or a string matching 'GPU', 'CPU' or 'ALL'.
platform (opencl platform): The opencl platform to select the devices from
fallback_to_any_device_type (boolean): If True, try to fallback to any possible device in the system.
Returns:
list of CLEnvironment: List with one element, the CL runtime environment requested.
"""
if isinstance(cl_device_type, str):
cl_device_type = device_type_from_string(cl_device_type)
device = None
if platform is None:
platforms = cl.get_platforms()
else:
platforms = [platform]
for platform in platforms:
devices = platform.get_devices(device_type=cl_device_type)
for dev in devices:
if device_supports_double(dev):
try:
env = CLEnvironment(platform, dev)
return [env]
except cl.RuntimeError:
pass
if not device:
if fallback_to_any_device_type:
return cl.get_platforms()[0].get_devices()
else:
raise ValueError('No devices of the specified type ({}) found.'.format(
cl.device_type.to_string(cl_device_type)))
raise ValueError('No suitable OpenCL device found.')
|
python
|
{
"resource": ""
}
|
q10473
|
CLEnvironmentFactory.all_devices
|
train
|
def all_devices(cl_device_type=None, platform=None):
"""Get multiple device environments, optionally only of the indicated type.
This will only fetch devices that support double point precision.
Args:
cl_device_type (cl.device_type.* or string): The type of the device we want,
can be a opencl device type or a string matching 'GPU' or 'CPU'.
platform (opencl platform): The opencl platform to select the devices from
Returns:
list of CLEnvironment: List with the CL device environments.
"""
if isinstance(cl_device_type, str):
cl_device_type = device_type_from_string(cl_device_type)
runtime_list = []
if platform is None:
platforms = cl.get_platforms()
else:
platforms = [platform]
for platform in platforms:
if cl_device_type:
devices = platform.get_devices(device_type=cl_device_type)
else:
devices = platform.get_devices()
for device in devices:
if device_supports_double(device):
env = CLEnvironment(platform, device)
runtime_list.append(env)
return runtime_list
|
python
|
{
"resource": ""
}
|
q10474
|
CLEnvironmentFactory.smart_device_selection
|
train
|
def smart_device_selection(preferred_device_type=None):
"""Get a list of device environments that is suitable for use in MOT.
Basically this gets the total list of devices using all_devices() and applies a filter on it.
This filter does the following:
1) if the 'AMD Accelerated Parallel Processing' is available remove all environments using the 'Clover'
platform.
More things may be implemented in the future.
Args:
preferred_device_type (str): the preferred device type, one of 'CPU', 'GPU' or 'APU'.
If no devices of this type can be found, we will use any other device available.
Returns:
list of CLEnvironment: List with the CL device environments.
"""
cl_environments = CLEnvironmentFactory.all_devices(cl_device_type=preferred_device_type)
platform_names = [env.platform.name for env in cl_environments]
has_amd_pro_platform = any('AMD Accelerated Parallel Processing' in name for name in platform_names)
if has_amd_pro_platform:
return list(filter(lambda env: 'Clover' not in env.platform.name, cl_environments))
if preferred_device_type is not None and not len(cl_environments):
return CLEnvironmentFactory.all_devices()
return cl_environments
|
python
|
{
"resource": ""
}
|
q10475
|
multivariate_ess
|
train
|
def multivariate_ess(samples, batch_size_generator=None):
r"""Estimate the multivariate Effective Sample Size for the samples of every problem.
This essentially applies :func:`estimate_multivariate_ess` to every problem.
Args:
samples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and
n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally,
a generator function that yields sample arrays of shape (p, n).
batch_size_generator (MultiVariateESSBatchSizeGenerator): the batch size generator, tells us how many
batches and of which size we use in estimating the minimum ESS.
Returns:
ndarray: the multivariate ESS per problem
"""
samples_generator = _get_sample_generator(samples)
return np.array(multiprocess_mapping(_MultivariateESSMultiProcessing(batch_size_generator), samples_generator()))
|
python
|
{
"resource": ""
}
|
q10476
|
univariate_ess
|
train
|
def univariate_ess(samples, method='standard_error', **kwargs):
r"""Estimate the univariate Effective Sample Size for the samples of every problem.
This computes the ESS using:
.. math::
ESS(X) = n * \frac{\lambda^{2}}{\sigma^{2}}
Where :math:`\lambda` is the standard deviation of the chain and :math:`\sigma` is estimated using the
monte carlo standard error (which in turn is, by default, estimated using a batch means estimator).
Args:
samples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and
n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally,
a generator function that yields sample arrays of shape (p, n).
method (str): one of 'autocorrelation' or 'standard_error' defaults to 'standard_error'.
If 'autocorrelation' is chosen we apply the function: :func:`estimate_univariate_ess_autocorrelation`,
if 'standard_error` is choosen we apply the function: :func:`estimate_univariate_ess_standard_error`.
**kwargs: passed to the chosen compute method
Returns:
ndarray: a matrix of size (d, p) with for every problem and every parameter an ESS.
References:
* Flegal, J.M., Haran, M., and Jones, G.L. (2008). "Markov chain Monte Carlo: Can We
Trust the Third Significant Figure?". Statistical Science, 23, p. 250-260.
* Marc S. Meketon and Bruce Schmeiser. 1984. Overlapping batch means: something for nothing?.
In Proceedings of the 16th conference on Winter simulation (WSC '84), Sallie Sheppard (Ed.).
IEEE Press, Piscataway, NJ, USA, 226-230.
"""
samples_generator = _get_sample_generator(samples)
return np.array(multiprocess_mapping(_UnivariateESSMultiProcessing(method, **kwargs), samples_generator()))
|
python
|
{
"resource": ""
}
|
q10477
|
_get_sample_generator
|
train
|
def _get_sample_generator(samples):
"""Get a sample generator from the given polymorphic input.
Args:
samples (ndarray, dict or generator): either an matrix of shape (d, p, n) with d problems, p parameters and
n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally,
a generator function that yields sample arrays of shape (p, n).
Returns:
generator: a generator that yields a matrix of size (p, n) for every problem in the input.
"""
if isinstance(samples, Mapping):
def samples_generator():
for ind in range(samples[list(samples.keys())[0]].shape[0]):
yield np.array([samples[s][ind, :] for s in sorted(samples)])
elif isinstance(samples, np.ndarray):
def samples_generator():
for ind in range(samples.shape[0]):
yield samples[ind]
else:
samples_generator = samples
return samples_generator
|
python
|
{
"resource": ""
}
|
q10478
|
estimate_univariate_ess_standard_error
|
train
|
def estimate_univariate_ess_standard_error(chain, batch_size_generator=None, compute_method=None):
r"""Compute the univariate ESS using the standard error method.
This computes the ESS using:
.. math::
ESS(X) = n * \frac{\lambda^{2}}{\sigma^{2}}
Where :math:`\lambda` is the standard deviation of the chain and :math:`\sigma` is estimated using the monte carlo
standard error (which in turn is, by default, estimated using a batch means estimator).
Args:
chain (ndarray): the Markov chain
batch_size_generator (UniVariateESSBatchSizeGenerator): the method that generates that batch sizes
we will use. Per default it uses the :class:`SquareRootSingleBatch` method.
compute_method (ComputeMonteCarloStandardError): the method used to compute the standard error.
By default we will use the :class:`BatchMeansMCSE` method
Returns:
float: the estimated ESS
"""
sigma = (monte_carlo_standard_error(chain, batch_size_generator=batch_size_generator,
compute_method=compute_method) ** 2 * len(chain))
lambda_ = np.var(chain, dtype=np.float64)
return len(chain) * (lambda_ / sigma)
|
python
|
{
"resource": ""
}
|
q10479
|
minimum_multivariate_ess
|
train
|
def minimum_multivariate_ess(nmr_params, alpha=0.05, epsilon=0.05):
r"""Calculate the minimum multivariate Effective Sample Size you will need to obtain the desired precision.
This implements the inequality from Vats et al. (2016):
.. math::
\widehat{ESS} \geq \frac{2^{2/p}\pi}{(p\Gamma(p/2))^{2/p}} \frac{\chi^{2}_{1-\alpha,p}}{\epsilon^{2}}
Where :math:`p` is the number of free parameters.
Args:
nmr_params (int): the number of free parameters in the model
alpha (float): the level of confidence of the confidence region. For example, an alpha of 0.05 means
that we want to be in a 95% confidence region.
epsilon (float): the level of precision in our multivariate ESS estimate.
An epsilon of 0.05 means that we expect that the Monte Carlo error is 5% of the uncertainty in
the target distribution.
Returns:
float: the minimum multivariate Effective Sample Size that one should aim for in MCMC sample to
obtain the desired confidence region with the desired precision.
References:
Vats D, Flegal J, Jones G (2016). Multivariate Output Analysis for Markov Chain Monte Carlo.
arXiv:1512.07713v2 [math.ST]
"""
tmp = 2.0 / nmr_params
log_min_ess = tmp * np.log(2) + np.log(np.pi) - tmp * (np.log(nmr_params) + gammaln(nmr_params / 2)) \
+ np.log(chi2.ppf(1 - alpha, nmr_params)) - 2 * np.log(epsilon)
return int(round(np.exp(log_min_ess)))
|
python
|
{
"resource": ""
}
|
q10480
|
multivariate_ess_precision
|
train
|
def multivariate_ess_precision(nmr_params, multi_variate_ess, alpha=0.05):
r"""Calculate the precision given your multivariate Effective Sample Size.
Given that you obtained :math:`ESS` multivariate effective samples in your estimate you can calculate the
precision with which you approximated your desired confidence region.
This implements the inequality from Vats et al. (2016), slightly restructured to give :math:`\epsilon` back instead
of the minimum ESS.
.. math::
\epsilon = \sqrt{\frac{2^{2/p}\pi}{(p\Gamma(p/2))^{2/p}} \frac{\chi^{2}_{1-\alpha,p}}{\widehat{ESS}}}
Where :math:`p` is the number of free parameters and ESS is the multivariate ESS from your samples.
Args:
nmr_params (int): the number of free parameters in the model
multi_variate_ess (int): the number of iid samples you obtained in your sample results.
alpha (float): the level of confidence of the confidence region. For example, an alpha of 0.05 means
that we want to be in a 95% confidence region.
Returns:
float: the minimum multivariate Effective Sample Size that one should aim for in MCMC sample to
obtain the desired confidence region with the desired precision.
References:
Vats D, Flegal J, Jones G (2016). Multivariate Output Analysis for Markov Chain Monte Carlo.
arXiv:1512.07713v2 [math.ST]
"""
tmp = 2.0 / nmr_params
log_min_ess = tmp * np.log(2) + np.log(np.pi) - tmp * (np.log(nmr_params) + gammaln(nmr_params / 2)) \
+ np.log(chi2.ppf(1 - alpha, nmr_params)) - np.log(multi_variate_ess)
return np.sqrt(np.exp(log_min_ess))
|
python
|
{
"resource": ""
}
|
q10481
|
estimate_multivariate_ess_sigma
|
train
|
def estimate_multivariate_ess_sigma(samples, batch_size):
r"""Calculates the Sigma matrix which is part of the multivariate ESS calculation.
This implementation is based on the Matlab implementation found at: https://github.com/lacerbi/multiESS
The Sigma matrix is defined as:
.. math::
\Sigma = \Lambda + 2 * \sum_{k=1}^{\infty}{Cov(Y_{1}, Y_{1+k})}
Where :math:`Y` are our samples and :math:`\Lambda` is the covariance matrix of the samples.
This implementation computes the :math:`\Sigma` matrix using a Batch Mean estimator using the given batch size.
The batch size has to be :math:`1 \le b_n \le n` and a typical value is either :math:`\lfloor n^{1/2} \rfloor`
for slow mixing chains or :math:`\lfloor n^{1/3} \rfloor` for reasonable mixing chains.
If the length of the chain is longer than the sum of the length of all the batches, this implementation
calculates :math:`\Sigma` for every offset and returns the average of those offsets.
Args:
samples (ndarray): the samples for which we compute the sigma matrix. Expects an (p, n) array with
p the number of parameters and n the sample size
batch_size (int): the batch size used in the approximation of the correlation covariance
Returns:
ndarray: an pxp array with p the number of parameters in the samples.
References:
Vats D, Flegal J, Jones G (2016). Multivariate Output Analysis for Markov Chain Monte Carlo.
arXiv:1512.07713v2 [math.ST]
"""
sample_means = np.mean(samples, axis=1, dtype=np.float64)
nmr_params, chain_length = samples.shape
nmr_batches = int(np.floor(chain_length / batch_size))
sigma = np.zeros((nmr_params, nmr_params))
nmr_offsets = chain_length - nmr_batches * batch_size + 1
for offset in range(nmr_offsets):
batches = np.reshape(samples[:, np.array(offset + np.arange(0, nmr_batches * batch_size), dtype=np.int)].T,
[batch_size, nmr_batches, nmr_params], order='F')
batch_means = np.squeeze(np.mean(batches, axis=0, dtype=np.float64))
Z = batch_means - sample_means
for x, y in itertools.product(range(nmr_params), range(nmr_params)):
sigma[x, y] += np.sum(Z[:, x] * Z[:, y])
return sigma * batch_size / (nmr_batches - 1) / nmr_offsets
|
python
|
{
"resource": ""
}
|
q10482
|
monte_carlo_standard_error
|
train
|
def monte_carlo_standard_error(chain, batch_size_generator=None, compute_method=None):
"""Compute Monte Carlo standard errors for the expectations
This is a convenience function that calls the compute method for each batch size and returns the lowest ESS
over the used batch sizes.
Args:
chain (ndarray): the Markov chain
batch_size_generator (UniVariateESSBatchSizeGenerator): the method that generates that batch sizes
we will use. Per default it uses the :class:`SquareRootSingleBatch` method.
compute_method (ComputeMonteCarloStandardError): the method used to compute the standard error.
By default we will use the :class:`BatchMeansMCSE` method
"""
batch_size_generator = batch_size_generator or SquareRootSingleBatch()
compute_method = compute_method or BatchMeansMCSE()
batch_sizes = batch_size_generator.get_univariate_ess_batch_sizes(len(chain))
return np.min(list(compute_method.compute_standard_error(chain, b) for b in batch_sizes))
|
python
|
{
"resource": ""
}
|
q10483
|
fit_gaussian
|
train
|
def fit_gaussian(samples, ddof=0):
"""Calculates the mean and the standard deviation of the given samples.
Args:
samples (ndarray): a one or two dimensional array. If one dimensional we calculate the fit using all
values. If two dimensional, we fit the Gaussian for every set of samples over the first dimension.
ddof (int): the difference degrees of freedom in the std calculation. See numpy.
"""
if len(samples.shape) == 1:
return np.mean(samples), np.std(samples, ddof=ddof)
return np.mean(samples, axis=1), np.std(samples, axis=1, ddof=ddof)
|
python
|
{
"resource": ""
}
|
q10484
|
fit_circular_gaussian
|
train
|
def fit_circular_gaussian(samples, high=np.pi, low=0):
"""Compute the circular mean for samples in a range
Args:
samples (ndarray): a one or two dimensional array. If one dimensional we calculate the fit using all
values. If two dimensional, we fit the Gaussian for every set of samples over the first dimension.
high (float): The maximum wrap point
low (float): The minimum wrap point
"""
cl_func = SimpleCLFunction.from_string('''
void compute(global mot_float_type* samples,
global mot_float_type* means,
global mot_float_type* stds,
int nmr_samples,
int low,
int high){
double cos_mean = 0;
double sin_mean = 0;
double ang;
for(uint i = 0; i < nmr_samples; i++){
ang = (samples[i] - low)*2*M_PI / (high - low);
cos_mean += (cos(ang) - cos_mean) / (i + 1);
sin_mean += (sin(ang) - sin_mean) / (i + 1);
}
double R = hypot(cos_mean, sin_mean);
if(R > 1){
R = 1;
}
double stds = 1/2. * sqrt(-2 * log(R));
double res = atan2(sin_mean, cos_mean);
if(res < 0){
res += 2 * M_PI;
}
*(means) = res*(high - low)/2.0/M_PI + low;
*(stds) = ((high - low)/2.0/M_PI) * sqrt(-2*log(R));
}
''')
def run_cl(samples):
data = {'samples': Array(samples, 'mot_float_type'),
'means': Zeros(samples.shape[0], 'mot_float_type'),
'stds': Zeros(samples.shape[0], 'mot_float_type'),
'nmr_samples': Scalar(samples.shape[1]),
'low': Scalar(low),
'high': Scalar(high),
}
cl_func.evaluate(data, samples.shape[0])
return data['means'].get_data(), data['stds'].get_data()
if len(samples.shape) == 1:
mean, std = run_cl(samples[None, :])
return mean[0], std[0]
return run_cl(samples)
|
python
|
{
"resource": ""
}
|
q10485
|
fit_truncated_gaussian
|
train
|
def fit_truncated_gaussian(samples, lower_bounds, upper_bounds):
"""Fits a truncated gaussian distribution on the given samples.
This will do a maximum likelihood estimation of a truncated Gaussian on the provided samples, with the
truncation points given by the lower and upper bounds.
Args:
samples (ndarray): a one or two dimensional array. If one dimensional we fit the truncated Gaussian on all
values. If two dimensional, we calculate the truncated Gaussian for every set of samples over the
first dimension.
lower_bounds (ndarray or float): the lower bound, either a scalar or a lower bound per problem (first index of
samples)
upper_bounds (ndarray or float): the upper bound, either a scalar or an upper bound per problem (first index of
samples)
Returns:
mean, std: the mean and std of the fitted truncated Gaussian
"""
if len(samples.shape) == 1:
return _TruncatedNormalFitter()((samples, lower_bounds, upper_bounds))
def item_generator():
for ind in range(samples.shape[0]):
if is_scalar(lower_bounds):
lower_bound = lower_bounds
else:
lower_bound = lower_bounds[ind]
if is_scalar(upper_bounds):
upper_bound = upper_bounds
else:
upper_bound = upper_bounds[ind]
yield (samples[ind], lower_bound, upper_bound)
results = np.array(multiprocess_mapping(_TruncatedNormalFitter(), item_generator()))
return results[:, 0], results[:, 1]
|
python
|
{
"resource": ""
}
|
q10486
|
gaussian_overlapping_coefficient
|
train
|
def gaussian_overlapping_coefficient(means_0, stds_0, means_1, stds_1, lower=None, upper=None):
"""Compute the overlapping coefficient of two Gaussian continuous_distributions.
This computes the :math:`\int_{-\infty}^{\infty}{\min(f(x), g(x))\partial x}` where
:math:`f \sim \mathcal{N}(\mu_0, \sigma_0^{2})` and :math:`f \sim \mathcal{N}(\mu_1, \sigma_1^{2})` are normally
distributed variables.
This will compute the overlap for each element in the first dimension.
Args:
means_0 (ndarray): the set of means of the first distribution
stds_0 (ndarray): the set of stds of the fist distribution
means_1 (ndarray): the set of means of the second distribution
stds_1 (ndarray): the set of stds of the second distribution
lower (float): the lower limit of the integration. If not set we set it to -inf.
upper (float): the upper limit of the integration. If not set we set it to +inf.
"""
if lower is None:
lower = -np.inf
if upper is None:
upper = np.inf
def point_iterator():
for ind in range(means_0.shape[0]):
yield np.squeeze(means_0[ind]), np.squeeze(stds_0[ind]), np.squeeze(means_1[ind]), np.squeeze(stds_1[ind])
return np.array(list(multiprocess_mapping(_ComputeGaussianOverlap(lower, upper), point_iterator())))
|
python
|
{
"resource": ""
}
|
q10487
|
_TruncatedNormalFitter.truncated_normal_log_likelihood
|
train
|
def truncated_normal_log_likelihood(params, low, high, data):
"""Calculate the log likelihood of the truncated normal distribution.
Args:
params: tuple with (mean, std), the parameters under which we evaluate the model
low (float): the lower truncation bound
high (float): the upper truncation bound
data (ndarray): the one dimension list of data points for which we want to calculate the likelihood
Returns:
float: the negative log likelihood of observing the given data under the given parameters.
This is meant to be used in minimization routines.
"""
mu = params[0]
sigma = params[1]
if sigma == 0:
return np.inf
ll = np.sum(norm.logpdf(data, mu, sigma))
ll -= len(data) * np.log((norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma)))
return -ll
|
python
|
{
"resource": ""
}
|
q10488
|
_TruncatedNormalFitter.truncated_normal_ll_gradient
|
train
|
def truncated_normal_ll_gradient(params, low, high, data):
"""Return the gradient of the log likelihood of the truncated normal at the given position.
Args:
params: tuple with (mean, std), the parameters under which we evaluate the model
low (float): the lower truncation bound
high (float): the upper truncation bound
data (ndarray): the one dimension list of data points for which we want to calculate the likelihood
Returns:
tuple: the gradient of the log likelihood given as a tuple with (mean, std)
"""
if params[1] == 0:
return np.array([np.inf, np.inf])
return np.array([_TruncatedNormalFitter.partial_derivative_mu(params[0], params[1], low, high, data),
_TruncatedNormalFitter.partial_derivative_sigma(params[0], params[1], low, high, data)])
|
python
|
{
"resource": ""
}
|
q10489
|
_TruncatedNormalFitter.partial_derivative_mu
|
train
|
def partial_derivative_mu(mu, sigma, low, high, data):
"""The partial derivative with respect to the mean.
Args:
mu (float): the mean of the truncated normal
sigma (float): the std of the truncated normal
low (float): the lower truncation bound
high (float): the upper truncation bound
data (ndarray): the one dimension list of data points for which we want to calculate the likelihood
Returns:
float: the partial derivative evaluated at the given point
"""
pd_mu = np.sum(data - mu) / sigma ** 2
pd_mu -= len(data) * ((norm.pdf(low, mu, sigma) - norm.pdf(high, mu, sigma))
/ (norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma)))
return -pd_mu
|
python
|
{
"resource": ""
}
|
q10490
|
_TruncatedNormalFitter.partial_derivative_sigma
|
train
|
def partial_derivative_sigma(mu, sigma, low, high, data):
"""The partial derivative with respect to the standard deviation.
Args:
mu (float): the mean of the truncated normal
sigma (float): the std of the truncated normal
low (float): the lower truncation bound
high (float): the upper truncation bound
data (ndarray): the one dimension list of data points for which we want to calculate the likelihood
Returns:
float: the partial derivative evaluated at the given point
"""
pd_sigma = np.sum(-(1 / sigma) + ((data - mu) ** 2 / (sigma ** 3)))
pd_sigma -= len(data) * (((low - mu) * norm.pdf(low, mu, sigma) - (high - mu) * norm.pdf(high, mu, sigma))
/ (sigma * (norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma))))
return -pd_sigma
|
python
|
{
"resource": ""
}
|
q10491
|
minimize
|
train
|
def minimize(func, x0, data=None, method=None, lower_bounds=None, upper_bounds=None, constraints_func=None,
nmr_observations=None, cl_runtime_info=None, options=None):
"""Minimization of one or more variables.
For an easy wrapper of function maximization, see :func:`maximize`.
All boundary conditions are enforced using the penalty method. That is, we optimize the objective function:
.. math::
F(x) = f(x) \mu \sum \max(0, g_i(x))^2
where :math:`F(x)` is the new objective function, :math:`f(x)` is the old objective function, :math:`g_i` are
the boundary functions defined as :math:`g_i(x) \leq 0` and :math:`\mu` is the penalty weight.
The penalty weight is by default :math:`\mu = 1e20` and can be set
using the ``options`` dictionary as ``penalty_weight``.
Args:
func (mot.lib.cl_function.CLFunction): A CL function with the signature:
.. code-block:: c
double <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* objective_list);
The objective list needs to be filled when the provided pointer is not null. It should contain
the function values for each observation. This list is used by non-linear least-squares routines,
and will be squared by the least-square optimizer. This is only used by the ``Levenberg-Marquardt`` routine.
x0 (ndarray): Initial guess. Array of real elements of size (n, p), for 'n' problems and 'p'
independent variables.
data (mot.lib.kernel_data.KernelData): the kernel data we will load. This is returned to the likelihood function
as the ``void* data`` pointer.
method (str): Type of solver. Should be one of:
- 'Levenberg-Marquardt'
- 'Nelder-Mead'
- 'Powell'
- 'Subplex'
If not given, defaults to 'Powell'.
lower_bounds (tuple): per parameter a lower bound, if given, the optimizer ensures ``a <= x`` with
a the lower bound and x the parameter. If not given, -infinity is assumed for all parameters.
Each tuple element can either be a scalar or a vector. If a vector is given the first dimension length
should match that of the parameters.
upper_bounds (tuple): per parameter an upper bound, if given, the optimizer ensures ``x >= b`` with
b the upper bound and x the parameter. If not given, +infinity is assumed for all parameters.
Each tuple element can either be a scalar or a vector. If a vector is given the first dimension length
should match that of the parameters.
constraints_func (mot.optimize.base.ConstraintFunction): function to compute (inequality) constraints.
Should hold a CL function with the signature:
.. code-block:: c
void <func_name>(local const mot_float_type* const x,
void* data,
local mot_float_type* constraints);
Where ``constraints_values`` is filled as:
.. code-block:: c
constraints[i] = g_i(x)
That is, for each constraint function :math:`g_i`, formulated as :math:`g_i(x) <= 0`, we should return
the function value of :math:`g_i`.
nmr_observations (int): the number of observations returned by the optimization function.
This is only needed for the ``Levenberg-Marquardt`` method.
cl_runtime_info (mot.configuration.CLRuntimeInfo): the CL runtime information
options (dict): A dictionary of solver options. All methods accept the following generic options:
- patience (int): Maximum number of iterations to perform.
- penalty_weight (float): the weight of the penalty term for the boundary conditions
Returns:
mot.optimize.base.OptimizeResults:
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array.
"""
if not method:
method = 'Powell'
cl_runtime_info = cl_runtime_info or CLRuntimeInfo()
if len(x0.shape) < 2:
x0 = x0[..., None]
lower_bounds = _bounds_to_array(lower_bounds or np.ones(x0.shape[1]) * -np.inf)
upper_bounds = _bounds_to_array(upper_bounds or np.ones(x0.shape[1]) * np.inf)
if method == 'Powell':
return _minimize_powell(func, x0, cl_runtime_info, lower_bounds, upper_bounds,
constraints_func=constraints_func, data=data, options=options)
elif method == 'Nelder-Mead':
return _minimize_nmsimplex(func, x0, cl_runtime_info, lower_bounds, upper_bounds,
constraints_func=constraints_func, data=data, options=options)
elif method == 'Levenberg-Marquardt':
return _minimize_levenberg_marquardt(func, x0, nmr_observations, cl_runtime_info, lower_bounds, upper_bounds,
constraints_func=constraints_func, data=data, options=options)
elif method == 'Subplex':
return _minimize_subplex(func, x0, cl_runtime_info, lower_bounds, upper_bounds,
constraints_func=constraints_func, data=data, options=options)
raise ValueError('Could not find the specified method "{}".'.format(method))
|
python
|
{
"resource": ""
}
|
q10492
|
_bounds_to_array
|
train
|
def _bounds_to_array(bounds):
"""Create a CompositeArray to hold the bounds."""
elements = []
for value in bounds:
if all_elements_equal(value):
elements.append(Scalar(get_single_value(value), ctype='mot_float_type'))
else:
elements.append(Array(value, ctype='mot_float_type', as_scalar=True))
return CompositeArray(elements, 'mot_float_type', address_space='local')
|
python
|
{
"resource": ""
}
|
q10493
|
get_minimizer_options
|
train
|
def get_minimizer_options(method):
"""Return a dictionary with the default options for the given minimization method.
Args:
method (str): the name of the method we want the options off
Returns:
dict: a dictionary with the default options
"""
if method == 'Powell':
return {'patience': 2,
'patience_line_search': None,
'reset_method': 'EXTRAPOLATED_POINT'}
elif method == 'Nelder-Mead':
return {'patience': 200,
'alpha': 1.0, 'beta': 0.5, 'gamma': 2.0, 'delta': 0.5, 'scale': 0.1,
'adaptive_scales': True}
elif method == 'Levenberg-Marquardt':
return {'patience': 250, 'step_bound': 100.0, 'scale_diag': 1, 'usertol_mult': 30}
elif method == 'Subplex':
return {'patience': 10,
'patience_nmsimplex': 100,
'alpha': 1.0, 'beta': 0.5, 'gamma': 2.0, 'delta': 0.5, 'scale': 1.0, 'psi': 0.0001, 'omega': 0.01,
'adaptive_scales': True,
'min_subspace_length': 'auto',
'max_subspace_length': 'auto'}
raise ValueError('Could not find the specified method "{}".'.format(method))
|
python
|
{
"resource": ""
}
|
q10494
|
_clean_options
|
train
|
def _clean_options(method, provided_options):
"""Clean the given input options.
This will make sure that all options are present, either with their default values or with the given values,
and that no other options are present then those supported.
Args:
method (str): the method name
provided_options (dict): the given options
Returns:
dict: the resulting options dictionary
"""
provided_options = provided_options or {}
default_options = get_minimizer_options(method)
result = {}
for name, default in default_options.items():
if name in provided_options:
result[name] = provided_options[name]
else:
result[name] = default_options[name]
return result
|
python
|
{
"resource": ""
}
|
q10495
|
validate_schema
|
train
|
def validate_schema(yaml_def, branch=False):
"""Validates the schema of a dict
Parameters
----------
yaml_def : dict
dict whose schema shall be validated
branch : bool
Indicates whether `yaml_def` is a dict of a top-level lane, or of a branch
inside a lane (needed for recursion)
Returns
-------
bool
True if validation was successful
"""
schema = Schema({
'lane' if not branch else 'branch': {
Optional('name'): str,
Optional('run_parallel'): bool,
'tasks': list
}
})
schema.validate(yaml_def)
from schema import And, Use
task_schema = Schema({
'class': str,
Optional('kwargs'): Or({str: object}),
Optional('args'): Or([object], And(Use(lambda a: isinstance(a, dict)), False))
})
def validate_tasks(tasks): # pylint: disable=missing-docstring
for task in tasks:
try:
Schema({'branch': dict}).validate(task)
validate_schema(task, True)
except SchemaError:
task_schema.validate(task)
return True
return validate_tasks(yaml_def['lane']['tasks'] if not branch else yaml_def['branch']['tasks'])
|
python
|
{
"resource": ""
}
|
q10496
|
arg_spec
|
train
|
def arg_spec(cls, mtd_name):
"""Cross-version argument signature inspection
Parameters
----------
cls : class
mtd_name : str
Name of the method to be inspected
Returns
-------
required_params : list of str
List of required, positional parameters
optional_params : list of str
List of optional parameters, i.e. parameters with a default value
"""
mtd = getattr(cls, mtd_name)
required_params = []
optional_params = []
if hasattr(inspect, 'signature'): # Python 3
params = inspect.signature(mtd).parameters # pylint: disable=no-member
for k in params.keys():
if params[k].default == inspect.Parameter.empty: # pylint: disable=no-member
# Python 3 does not make a difference between unbound methods and functions, so the
# only way to distinguish if the first argument is of a regular method, or a class
# method, is to look for the conventional argument name. Yikes.
if not (params[k].name == 'self' or params[k].name == 'cls'):
required_params.append(k)
else:
optional_params.append(k)
else: # Python 2
params = inspect.getargspec(mtd) # pylint: disable=deprecated-method
num = len(params[0]) if params[0] else 0
n_opt = len(params[3]) if params[3] else 0
n_req = (num - n_opt) if n_opt <= num else 0
for i in range(0, n_req):
required_params.append(params[0][i])
for i in range(n_req, num):
optional_params.append(params[0][i])
if inspect.isroutine(getattr(cls, mtd_name)):
bound_mtd = cls.__dict__[mtd_name]
if not isinstance(bound_mtd, staticmethod):
del required_params[0]
return required_params, optional_params
|
python
|
{
"resource": ""
}
|
q10497
|
AbstractSampler.sample
|
train
|
def sample(self, nmr_samples, burnin=0, thinning=1):
"""Take additional samples from the given likelihood and prior, using this sampler.
This method can be called multiple times in which the sample state is stored in between.
Args:
nmr_samples (int): the number of samples to return
burnin (int): the number of samples to discard before returning samples
thinning (int): how many sample we wait before storing a new one. This will draw extra samples such that
the total number of samples generated is ``nmr_samples * (thinning)`` and the number of samples
stored is ``nmr_samples``. If set to one or lower we store every sample after the burn in.
Returns:
SamplingOutput: the sample output object
"""
if not thinning or thinning < 1:
thinning = 1
if not burnin or burnin < 0:
burnin = 0
max_samples_per_batch = max(1000 // thinning, 100)
with self._logging(nmr_samples, burnin, thinning):
if burnin > 0:
for batch_start, batch_end in split_in_batches(burnin, max_samples_per_batch):
self._sample(batch_end - batch_start, return_output=False)
if nmr_samples > 0:
outputs = []
for batch_start, batch_end in split_in_batches(nmr_samples, max_samples_per_batch):
outputs.append(self._sample(batch_end - batch_start, thinning=thinning))
return SimpleSampleOutput(*[np.concatenate([o[ind] for o in outputs], axis=-1) for ind in range(3)])
|
python
|
{
"resource": ""
}
|
q10498
|
AbstractSampler._sample
|
train
|
def _sample(self, nmr_samples, thinning=1, return_output=True):
"""Sample the given number of samples with the given thinning.
If ``return_output`` we will return the samples, log likelihoods and log priors. If not, we will advance the
state of the sampler without returning storing the samples.
Args:
nmr_samples (int): the number of iterations to advance the sampler
thinning (int): the thinning to apply
return_output (boolean): if we should return the output
Returns:
None or tuple: if ``return_output`` is True three ndarrays as (samples, log_likelihoods, log_priors)
"""
kernel_data = self._get_kernel_data(nmr_samples, thinning, return_output)
sample_func = self._get_compute_func(nmr_samples, thinning, return_output)
sample_func.evaluate(kernel_data, self._nmr_problems,
use_local_reduction=all(env.is_gpu for env in self._cl_runtime_info.cl_environments),
cl_runtime_info=self._cl_runtime_info)
self._sampling_index += nmr_samples * thinning
if return_output:
return (kernel_data['samples'].get_data(),
kernel_data['log_likelihoods'].get_data(),
kernel_data['log_priors'].get_data())
|
python
|
{
"resource": ""
}
|
q10499
|
AbstractSampler._get_kernel_data
|
train
|
def _get_kernel_data(self, nmr_samples, thinning, return_output):
"""Get the kernel data we will input to the MCMC sampler.
This sets the items:
* data: the pointer to the user provided data
* method_data: the data specific to the MCMC method
* nmr_iterations: the number of iterations to sample
* iteration_offset: the current sample index, that is, the offset to the given number of iterations
* rng_state: the random number generator state
* current_chain_position: the current position of the sampled chain
* current_log_likelihood: the log likelihood of the current position on the chain
* current_log_prior: the log prior of the current position on the chain
Additionally, if ``return_output`` is True, we add to that the arrays:
* samples: for the samples
* log_likelihoods: for storing the log likelihoods
* log_priors: for storing the priors
Args:
nmr_samples (int): the number of samples we will draw
thinning (int): the thinning factor we want to use
return_output (boolean): if the kernel should return output
Returns:
dict[str: mot.lib.utils.KernelData]: the kernel input data
"""
kernel_data = {
'data': self._data,
'method_data': self._get_mcmc_method_kernel_data(),
'nmr_iterations': Scalar(nmr_samples * thinning, ctype='ulong'),
'iteration_offset': Scalar(self._sampling_index, ctype='ulong'),
'rng_state': Array(self._rng_state, 'uint', mode='rw', ensure_zero_copy=True),
'current_chain_position': Array(self._current_chain_position, 'mot_float_type',
mode='rw', ensure_zero_copy=True),
'current_log_likelihood': Array(self._current_log_likelihood, 'mot_float_type',
mode='rw', ensure_zero_copy=True),
'current_log_prior': Array(self._current_log_prior, 'mot_float_type',
mode='rw', ensure_zero_copy=True),
}
if return_output:
kernel_data.update({
'samples': Zeros((self._nmr_problems, self._nmr_params, nmr_samples), ctype='mot_float_type'),
'log_likelihoods': Zeros((self._nmr_problems, nmr_samples), ctype='mot_float_type'),
'log_priors': Zeros((self._nmr_problems, nmr_samples), ctype='mot_float_type'),
})
return kernel_data
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.