code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
ret = None
if not query: query = {}
if not body: body = {}
query.update(body) # body takes precedence
body = query
self.send_count += 1
payload = self.get_fetch_request(method, path, body)
attempts = 1
max_attempts = self.attempts
success = False
while not success:
kwargs['timeout'] = timeout
try:
try:
if not self.connected: self.connect(path)
with self.wstimeout(**kwargs) as timeout:
kwargs['timeout'] = timeout
logger.debug('{} send {} attempt {}/{} with timeout {}'.format(
self.client_id,
payload.uuid,
attempts,
max_attempts,
timeout
))
sent_bits = self.ws.send(payload.payload)
logger.debug('{} sent {} bytes'.format(self.client_id, sent_bits))
if sent_bits:
ret = self.fetch_response(payload, **kwargs)
if ret:
success = True
except websocket.WebSocketConnectionClosedException as e:
self.ws.shutdown()
raise IOError("connection is not open but reported it was open: {}".format(e))
except (IOError, TypeError) as e:
logger.debug('{} error on send attempt {}: {}'.format(self.client_id, attempts, e))
success = False
finally:
if not success:
attempts += 1
if attempts > max_attempts:
raise
else:
timeout *= 2
if (attempts / max_attempts) > 0.50:
logger.debug(
"{} closing and re-opening connection for next attempt".format(self.client_id)
)
self.close()
return ret
|
def fetch(self, method, path, query=None, body=None, timeout=0, **kwargs)
|
send a Message
:param method: string, something like "POST" or "GET"
:param path: string, the path part of a uri (eg, /foo/bar)
:param body: dict, what you want to send to "method path"
:param timeout: integer, how long to wait before failing trying to send
| 4.086807
| 4.104345
| 0.995727
|
if req_payload.uuid:
uuids = set([req_payload.uuid, "CONNECT"])
def callback(res_payload):
#pout.v(req_payload, res_payload)
#ret = req_payload.uuid == res_payload.uuid or res_payload.uuid == "CONNECT"
ret = res_payload.uuid in uuids
if ret:
logger.debug('{} received {} response for {}'.format(
self.client_id,
res_payload.code,
res_payload.uuid,
))
return ret
res_payload = self.recv_callback(callback, **kwargs)
return res_payload
|
def fetch_response(self, req_payload, **kwargs)
|
payload has been sent, do anything else you need to do (eg, wait for response?)
:param req_payload: Payload, the payload sent to the server
:returns: Payload, the response payload
| 4.384504
| 4.482994
| 0.97803
|
# http://stackoverflow.com/a/2257449/5006
def rand_id(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
payload = rand_id()
self.ws.ping(payload)
opcode, data = self.recv_raw(timeout, [websocket.ABNF.OPCODE_PONG], **kwargs)
if data != payload:
raise IOError("Pinged server but did not receive correct pong")
|
def ping(self, timeout=0, **kwargs)
|
THIS DOES NOT WORK, UWSGI DOES NOT RESPOND TO PINGS
| 4.701562
| 4.659651
| 1.008994
|
orig_timeout = self.get_timeout(timeout)
timeout = orig_timeout
while timeout > 0.0:
start = time.time()
if not self.connected: self.connect(timeout=timeout, **kwargs)
with self.wstimeout(timeout, **kwargs) as timeout:
logger.debug('{} waiting to receive for {} seconds'.format(self.client_id, timeout))
try:
opcode, data = self.ws.recv_data()
if opcode in opcodes:
timeout = 0.0
break
else:
if opcode == websocket.ABNF.OPCODE_CLOSE:
raise websocket.WebSocketConnectionClosedException()
except websocket.WebSocketTimeoutException:
pass
except websocket.WebSocketConnectionClosedException:
# bug in Websocket.recv_data(), this should be done by Websocket
try:
self.ws.shutdown()
except AttributeError:
pass
#raise EOFError("websocket closed by server and reconnection did nothing")
if timeout:
stop = time.time()
timeout -= (stop - start)
else:
break
if timeout < 0.0:
raise IOError("recv timed out in {} seconds".format(orig_timeout))
return opcode, data
|
def recv_raw(self, timeout, opcodes, **kwargs)
|
this is very internal, it will return the raw opcode and data if they
match the passed in opcodes
| 4.087257
| 4.022437
| 1.016115
|
p = Payload(raw)
p._body = p.body
return p
|
def get_fetch_response(self, raw)
|
This just makes the payload instance more HTTPClient like
| 16.484077
| 9.530744
| 1.729569
|
opcode, data = self.recv_raw(timeout, [websocket.ABNF.OPCODE_TEXT], **kwargs)
return self.get_fetch_response(data)
|
def recv(self, timeout=0, **kwargs)
|
this will receive data and convert it into a message, really this is more
of an internal method, it is used in recv_callback and recv_msg
| 9.861719
| 9.134337
| 1.079632
|
payload = None
timeout = self.get_timeout(**kwargs)
full_timeout = timeout
while timeout > 0.0:
kwargs['timeout'] = timeout
start = time.time()
payload = self.recv(**kwargs)
if callback(payload):
break
payload = None
stop = time.time()
elapsed = stop - start
timeout -= elapsed
if not payload:
raise IOError("recv_callback timed out in {}".format(full_timeout))
return payload
|
def recv_callback(self, callback, **kwargs)
|
receive messages and validate them with the callback, if the callback
returns True then the message is valid and will be returned, if False then
this will try and receive another message until timeout is 0
| 3.900116
| 3.711998
| 1.050678
|
body = None
req = self.request
res = self.response
rou = self.router
con = None
controller_info = {}
try:
controller_info = rou.find(req, res)
except IOError as e:
logger.warning(str(e), exc_info=True)
raise CallError(
408,
"The client went away before the request body was retrieved."
)
except (ImportError, AttributeError, TypeError) as e:
exc_info = sys.exc_info()
logger.warning(str(e), exc_info=exc_info)
raise CallError(
404,
"{} not found because of {} \"{}\" on {}:{}".format(
req.path,
exc_info[0].__name__,
str(e),
os.path.basename(exc_info[2].tb_frame.f_code.co_filename),
exc_info[2].tb_lineno
)
)
else:
con = controller_info['class_instance']
return con
|
def create_controller(self)
|
Create a controller to handle the request
:returns: Controller, this Controller instance should be able to handle
the request
| 3.695395
| 3.669464
| 1.007067
|
body = None
req = self.request
res = self.response
rou = self.router
con = None
start = time.time()
try:
con = self.create_controller()
con.call = self
self.controller = con
if not self.quiet:
con.log_start(start)
# the controller handle method will manipulate self.response, it first
# tries to find a handle_HTTP_METHOD method, if it can't find that it
# will default to the handle method (which is implemented on Controller).
# method arguments are passed in so child classes can add decorators
# just like the HTTP_METHOD that will actually handle the request
controller_args, controller_kwargs = con.find_method_params()
controller_method = getattr(con, "handle_{}".format(req.method), None)
if not controller_method:
controller_method = getattr(con, "handle")
if not self.quiet:
logger.debug("Using handle method: {}.{}".format(
con.__class__.__name__,
controller_method.__name__
))
controller_method(*controller_args, **controller_kwargs)
except Exception as e:
self.handle_error(e) # this will manipulate self.response
finally:
if res.code == 204:
res.headers.pop('Content-Type', None)
res.body = None # just to be sure since body could've been ""
if con:
if not self.quiet:
con.log_stop(start)
return res
|
def handle(self)
|
Called from the interface to actually handle the request.
| 5.247037
| 5.092117
| 1.030423
|
req = self.request
res = self.response
con = self.controller
if isinstance(e, CallStop):
logger.info(str(e), exc_info=True)
res.code = e.code
res.add_headers(e.headers)
res.body = e.body
elif isinstance(e, Redirect):
logger.info(str(e), exc_info=True)
res.code = e.code
res.add_headers(e.headers)
res.body = None
elif isinstance(e, (AccessDenied, CallError)):
logger.warning(str(e), exc_info=True)
res.code = e.code
res.add_headers(e.headers)
res.body = e
elif isinstance(e, NotImplementedError):
logger.warning(str(e), exc_info=True)
res.code = 501
res.body = e
elif isinstance(e, TypeError):
e_msg = unicode(e)
if e_msg.startswith(req.method) and 'argument' in e_msg:
logger.debug(e_msg, exc_info=True)
logger.warning(
" ".join([
"Either the path arguments ({} args) or the keyword arguments",
"({} args) for {}.{} do not match the {} handling method's",
"definition"
]).format(
len(req.controller_info["method_args"]),
len(req.controller_info["method_kwargs"]),
req.controller_info['module_name'],
req.controller_info['class_name'],
req.method
)
)
res.code = 405
else:
logger.exception(e)
res.code = 500
res.body = e
else:
logger.exception(e)
res.code = 500
res.body = e
if con:
error_method = getattr(con, "handle_{}_error".format(req.method), None)
if not error_method:
error_method = getattr(con, "handle_error")
logger.debug("Using error method: {}.{}".format(
con.__class__.__name__,
error_method.__name__
))
error_method(e, **kwargs)
|
def handle_error(self, e, **kwargs)
|
if an exception is raised while trying to handle the request it will
go through this method
This method will set the response body and then also call Controller.handle_error
for further customization if the Controller is available
:param e: Exception, the error that was raised
:param **kwargs: dict, any other information that might be handy
| 2.487482
| 2.471079
| 1.006638
|
controller_prefix = self.controller_prefix
_module_name_cache = self._module_name_cache
if controller_prefix in _module_name_cache:
return _module_name_cache[controller_prefix]
module = self.get_module(controller_prefix)
if hasattr(module, "__path__"):
# path attr exists so this is a package
modules = self.find_modules(module.__path__[0], controller_prefix)
else:
# we have a lonely .py file
modules = set([controller_prefix])
_module_name_cache.setdefault(controller_prefix, {})
_module_name_cache[controller_prefix] = modules
return modules
|
def module_names(self)
|
get all the modules in the controller_prefix
:returns: set, a set of string module names
| 3.23817
| 2.923228
| 1.107738
|
for modname in self.module_names:
module = importlib.import_module(modname)
yield module
|
def modules(self)
|
Returns an iterator of the actual modules, not just their names
:returns: generator, each module under self.controller_prefix
| 4.190207
| 4.624832
| 0.906024
|
modules = set([prefix])
# https://docs.python.org/2/library/pkgutil.html#pkgutil.iter_modules
for module_info in pkgutil.iter_modules([path]):
# we want to ignore any "private" modules
if module_info[1].startswith('_'): continue
module_prefix = ".".join([prefix, module_info[1]])
if module_info[2]:
# module is a package
submodules = self.find_modules(os.path.join(path, module_info[1]), module_prefix)
modules.update(submodules)
else:
modules.add(module_prefix)
return modules
|
def find_modules(self, path, prefix)
|
recursive method that will find all the submodules of the given module
at prefix with path
| 2.280946
| 2.247857
| 1.01472
|
controller_prefix = self.controller_prefix
cset = self.module_names
module_name = controller_prefix
mod_name = module_name
while path_args:
mod_name += "." + path_args[0]
if mod_name in cset:
module_name = mod_name
path_args.pop(0)
else:
break
return module_name, path_args
|
def get_module_name(self, path_args)
|
returns the module_name and remaining path args.
return -- tuple -- (module_name, path_args)
| 3.024458
| 2.975533
| 1.016443
|
# let's get the class
class_object = getattr(module, class_name, None)
if not class_object or not issubclass(class_object, Controller):
class_object = None
return class_object
|
def get_class(self, module, class_name)
|
try and get the class_name from the module and make sure it is a valid
controller
| 3.609495
| 3.080774
| 1.171619
|
if not self.cors:
raise CallError(405)
req = self.request
origin = req.get_header('origin')
if not origin:
raise CallError(400, 'Need Origin header')
call_headers = [
('Access-Control-Request-Headers', 'Access-Control-Allow-Headers'),
('Access-Control-Request-Method', 'Access-Control-Allow-Methods')
]
for req_header, res_header in call_headers:
v = req.get_header(req_header)
if v:
self.response.set_header(res_header, v)
else:
raise CallError(400, 'Need {} header'.format(req_header))
other_headers = {
'Access-Control-Allow-Credentials': 'true',
'Access-Control-Max-Age': 3600
}
self.response.add_headers(other_headers)
|
def OPTIONS(self, *args, **kwargs)
|
Handles CORS requests for this controller
if self.cors is False then this will raise a 405, otherwise it sets everything
necessary to satisfy the request in self.response
| 2.449525
| 2.3049
| 1.062747
|
if not self.cors: return
req = self.request
origin = req.get_header('origin')
if origin:
self.response.set_header('Access-Control-Allow-Origin', origin)
|
def set_cors_common_headers(self)
|
This will set the headers that are needed for any cors request (OPTIONS or real)
| 3.212393
| 2.963929
| 1.083829
|
req = self.request
res = self.response
res.set_header('Content-Type', "{};charset={}".format(
self.content_type,
self.encoding
))
encoding = req.accept_encoding
res.encoding = encoding if encoding else self.encoding
res_method_name = ""
controller_methods = self.find_methods()
#controller_args, controller_kwargs = self.find_method_params()
for controller_method_name, controller_method in controller_methods:
try:
logger.debug("Attempting to handle request with {}.{}.{}".format(
req.controller_info['module_name'],
req.controller_info['class_name'],
controller_method_name
))
res.body = controller_method(
*controller_args,
**controller_kwargs
)
res_method_name = controller_method_name
break
except VersionError as e:
logger.debug("Request {}.{}.{} failed version check [{} not in {}]".format(
req.controller_info['module_name'],
req.controller_info['class_name'],
controller_method_name,
e.request_version,
e.versions
))
except RouteError:
logger.debug("Request {}.{}.{} failed routing check".format(
req.controller_info['module_name'],
req.controller_info['class_name'],
controller_method_name
))
if not res_method_name:
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1
# An origin server SHOULD return the status code 405 (Method Not Allowed)
# if the method is known by the origin server but not allowed for the
# requested resource
raise CallError(405, "Could not find a method to satisfy {}".format(
req.path
))
|
def handle(self, *controller_args, **controller_kwargs)
|
handles the request and returns the response
This should set any response information directly onto self.response
this method has the same signature as the request handling methods
(eg, GET, POST) so subclasses can override this method and add decorators
:param *controller_args: tuple, the path arguments that will be passed to
the request handling method (eg, GET, POST)
:param **controller_kwargs: dict, the query and body params merged together
| 2.773232
| 2.806272
| 0.988226
|
methods = []
req = self.request
method_name = req.method.upper()
method_names = set()
members = inspect.getmembers(self)
for member_name, member in members:
if member_name.startswith(method_name):
if member:
methods.append((member_name, member))
method_names.add(member_name)
if len(methods) == 0:
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1
# and 501 (Not Implemented) if the method is unrecognized or not
# implemented by the origin server
logger.warning("No methods to handle {} found".format(method_name), exc_info=True)
raise CallError(501, "{} {} not implemented".format(req.method, req.path))
elif len(methods) > 1 and method_name in method_names:
raise ValueError(
" ".join([
"A multi method {} request should not have any methods named {}.",
"Instead, all {} methods should use use an appropriate decorator",
"like @route or @version and have a unique name starting with {}_"
]).format(
method_name,
method_name,
method_name,
method_name
)
)
return methods
|
def find_methods(self)
|
Find the methods that could satisfy this request
This will go through and find any method that starts with the request.method,
so if the request was GET /foo then this would find any methods that start
with GET
https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html
:returns: list of tuples (method_name, method), all the found methods
| 3.915884
| 3.851302
| 1.016769
|
req = self.request
args = req.controller_info["method_args"]
kwargs = req.controller_info["method_kwargs"]
return args, kwargs
|
def find_method_params(self)
|
Return the method params
:returns: tuple (args, kwargs) that will be passed as *args, **kwargs
| 6.267048
| 5.975758
| 1.048745
|
if not logger.isEnabledFor(logging.INFO): return
try:
req = self.request
logger.info("REQUEST {} {}?{}".format(req.method, req.path, req.query))
logger.info(datetime.datetime.strftime(datetime.datetime.utcnow(), "DATE %Y-%m-%dT%H:%M:%S.%f"))
ip = req.ip
if ip:
logger.info("\tIP ADDRESS: {}".format(ip))
if 'authorization' in req.headers:
logger.info('AUTH {}'.format(req.headers['authorization']))
ignore_hs = set([
'accept-language',
'accept-encoding',
'connection',
'authorization',
'host',
'x-forwarded-for'
])
hs = ["Request Headers..."]
for k, v in req.headers.items():
if k not in ignore_hs:
hs.append("\t{}: {}".format(k, v))
logger.info(os.linesep.join(hs))
except Exception as e:
logger.warn(e, exc_info=True)
|
def log_start(self, start)
|
log all the headers and stuff at the start of the request
| 3.12495
| 2.952615
| 1.058367
|
if not logger.isEnabledFor(logging.INFO): return
stop = time.time()
get_elapsed = lambda start, stop, multiplier, rnd: round(abs(stop - start) * float(multiplier), rnd)
elapsed = get_elapsed(start, stop, 1000.00, 1)
total = "%0.1f ms" % (elapsed)
logger.info("RESPONSE {} {} in {}".format(self.response.code, self.response.status, total))
|
def log_stop(self, start)
|
log a summary line on how the request went
| 4.876401
| 4.619651
| 1.055578
|
# Open
with open(path, 'rb') as yaml_definition:
definition = yaml.load(yaml_definition)
# Validate schema
try:
validate_schema(definition)
except SchemaError as exc:
raise LaneSchemaError(**exc.__dict__)
def build(lb_def, branch=False):
init_kwargs = {k: lb_def[k] for k in (a for a in ('run_parallel', 'name') if a in lb_def)}
lane_or_branch = Lane(**init_kwargs) if not branch else Branch(**init_kwargs)
for task in lb_def['tasks']:
if 'branch' in task:
branch_def = task['branch']
lane_or_branch.add(build(branch_def, True))
else:
sep = task['class'].rfind('.')
if sep == -1:
raise LaneImportError('Class must include its parent module')
mdl = task['class'][:sep]
cls_ = task['class'][sep + 1:]
try:
cls = getattr(import_module(mdl), cls_)
except ImportError:
raise LaneImportError('Could not find module %s' % mdl)
except AttributeError:
raise LaneImportError('Could not find class %s' % cls_)
args = task['args'] if 'args' in task else []
args = [args] if not isinstance(args, list) else args
kwargs = task['kwargs'] if 'kwargs' in task else {}
lane_or_branch.add(cls, *args, **kwargs)
return lane_or_branch
return build(definition['lane'])
|
def build_lane_from_yaml(path)
|
Builds a `sparklanes.Lane` object from a YAML definition file.
Parameters
----------
path: str
Path to the YAML definition file
Returns
-------
Lane
Lane, built according to definition in YAML file
| 2.828614
| 2.869608
| 0.985714
|
if not isclass(cls) or not issubclass(cls, LaneTask):
raise TypeError('Tried to add non-Task `%s` to a Lane. Are you sure the task was '
'decorated with `sparklanes.Task`?' % str(cls))
validate_params(cls, entry_mtd_name, *args, **kwargs)
|
def __validate_task(self, cls, entry_mtd_name, args, kwargs)
|
Checks if a class is a task, i.e. if it has been decorated with `sparklanes.Task`, and if
the supplied args/kwargs match the signature of the task's entry method.
Parameters
----------
cls : LaneTask
entry_mtd_name : str
Name of the method, which is called when the task is run
args : list
kwargs : dict
| 6.415287
| 4.522811
| 1.418429
|
if isinstance(cls_or_branch, Branch):
self.tasks.append(cls_or_branch) # Add branch with already validated tasks
else:
# Validate
self.__validate_task(cls_or_branch, '__init__', args, kwargs)
# Append
self.tasks.append({'cls_or_branch': cls_or_branch, 'args': args, 'kwargs': kwargs})
return self
|
def add(self, cls_or_branch, *args, **kwargs)
|
Adds a task or branch to the lane.
Parameters
----------
cls_or_branch : Class
*args
Variable length argument list to be passed to `cls_or_branch` during instantiation
**kwargs
Variable length keyword arguments to be passed to `cls_or_branch` during instantiation
Returns
-------
self: Returns `self` to allow method chaining
| 3.621933
| 3.720835
| 0.973419
|
logger = make_default_logger(INTERNAL_LOGGER_NAME)
logger.info('\n%s\nExecuting `%s`\n%s\n', '-'*80, self.name, '-'*80)
logger.info('\n%s', str(self))
threads = []
if not self.tasks:
raise LaneExecutionError('No tasks to execute!')
for task_def_or_branch in self.tasks:
if isinstance(task_def_or_branch, Branch):
task_def_or_branch.run()
elif isinstance(task_def_or_branch['cls_or_branch'], Branch): # Nested Branch
task_def_or_branch['cls_or_branch'].run()
else:
task = task_def_or_branch['cls_or_branch'](*task_def_or_branch['args'],
**task_def_or_branch['kwargs'])
if self.run_parallel:
threads.append(LaneTaskThread(task))
else:
task()
if threads:
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.info('\n%s\nFinished executing `%s`\n%s', '-'*80, self.name, '-'*80)
return self
|
def run(self)
|
Executes the tasks in the lane in the order in which they have been added, unless
`self.run_parallel` is True, then a thread is spawned for each task and executed in
parallel (note that task threads are still spawned in the order in which they were added).
| 2.712165
| 2.48984
| 1.089293
|
observations = np.tile(np.array([[10, 256, 202, 97]]), (nmr_problems, 1))
nmr_tanks_ground_truth = np.ones((nmr_problems,)) * 276
return observations, nmr_tanks_ground_truth
|
def get_historical_data(nmr_problems)
|
Get the historical tank data.
Args:
nmr_problems (int): the number of problems
Returns:
tuple: (observations, nmr_tanks_ground_truth)
| 5.227838
| 3.560335
| 1.468355
|
# The number of tanks we observe per problem
nmr_observed_tanks = 10
# Generate some maximum number of tanks. Basically the ground truth of the estimation problem.
nmr_tanks_ground_truth = normal(nmr_problems, 1, mean=250, std=30, ctype='uint')
# Generate some random tank observations
observations = uniform(nmr_problems, nmr_observed_tanks, low=0, high=nmr_tanks_ground_truth, ctype='uint')
return observations, nmr_tanks_ground_truth
|
def get_simulated_data(nmr_problems)
|
Simulate some data.
This returns the simulated tank observations and the corresponding ground truth maximum number of tanks.
Args:
nmr_problems (int): the number of problems
Returns:
tuple: (observations, nmr_tanks_ground_truth)
| 5.914598
| 4.564758
| 1.295709
|
return SimpleCLFunction.from_string('''
/**
* Compute the Hessian using (possibly) multiple steps with various interpolations.
*/
void _numdiff_hessian_element(
void* data, local mot_float_type* x_tmp, mot_float_type f_x_input,
uint px, uint py, global float* initial_step, global double* derivative,
global double* error, local double* scratch){
const uint nmr_steps = ''' + str(nmr_steps) + ''';
uint nmr_steps_remaining = nmr_steps;
local double* scratch_ind = scratch;
local double* steps = scratch_ind; scratch_ind += nmr_steps;
local double* errors = scratch_ind; scratch_ind += nmr_steps - 1;
local double* steps_tmp = scratch_ind; scratch_ind += nmr_steps;
if(get_local_id(0) == 0){
for(int i = 0; i < nmr_steps - 1; i++){
errors[i] = 0;
}
}
barrier(CLK_LOCAL_MEM_FENCE);
_numdiff_hessian_steps(data, x_tmp, f_x_input, px, py, steps, initial_step);
if(nmr_steps_remaining > 1){
nmr_steps_remaining = _numdiff_hessian_richardson_extrapolation(steps);
barrier(CLK_LOCAL_MEM_FENCE);
}
if(nmr_steps_remaining >= 3){
nmr_steps_remaining = _numdiff_wynn_extrapolation(steps, errors, nmr_steps_remaining);
barrier(CLK_LOCAL_MEM_FENCE);
}
if(nmr_steps_remaining > 1){
_numdiff_find_best_step(steps, errors, steps_tmp, nmr_steps_remaining);
barrier(CLK_LOCAL_MEM_FENCE);
}
if(get_local_id(0) == 0){
*derivative = steps[0];
*error = errors[0];
}
}
''', dependencies=[
_get_numdiff_hessian_steps_func(objective_func, nmr_steps, step_ratio),
_get_numdiff_hessian_richardson_extrapolation_func(nmr_steps, step_ratio),
_get_numdiff_wynn_extrapolation_func(),
_get_numdiff_find_best_step_func()
])
|
def _get_numdiff_hessian_element_func(objective_func, nmr_steps, step_ratio)
|
Return a function to compute one element of the Hessian matrix.
| 3.317901
| 3.323212
| 0.998402
|
nmr_params = parameters.shape[1]
initial_step = np.zeros_like(parameters)
if max_step_sizes is None:
max_step_sizes = 0.1
if isinstance(max_step_sizes, Number):
max_step_sizes = [max_step_sizes] * nmr_params
max_step_sizes = np.array(max_step_sizes)
for ind in range(parameters.shape[1]):
minimum_allowed_step = np.minimum(np.abs(parameters[:, ind] - lower_bounds[ind]),
np.abs(upper_bounds[ind] - parameters[:, ind]))
initial_step[:, ind] = np.minimum(minimum_allowed_step, max_step_sizes[ind])
return initial_step / 2.
|
def _get_initial_step(parameters, lower_bounds, upper_bounds, max_step_sizes)
|
Get an initial step size to use for every parameter.
This chooses the step sizes based on the maximum step size and the lower and upper bounds.
Args:
parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems,
p parameters and n samples.
lower_bounds (list): lower bounds
upper_bounds (list): upper bounds
max_step_sizes (list or None): the maximum step size, or the maximum step size per parameter. Defaults to 0.1
Returns:
ndarray: for every problem instance the vector with the initial step size for each parameter.
| 2.259969
| 2.29847
| 0.983249
|
self._old_config = {k: v for k, v in _config.items()}
self._apply()
|
def apply(self)
|
Apply the current action to the current runtime configuration.
| 8.441441
| 6.413828
| 1.316131
|
for key, value in self._old_config.items():
_config[key] = value
|
def unapply(self)
|
Reset the current configuration to the previous state.
| 9.194933
| 5.878249
| 1.56423
|
# In the event that no argument is supplied to the decorator, python passes the decorated
# class itself as an argument. That way, we can detect if no argument (or an argument of
# invalid type) was supplied. This allows passing of `entry` as both a named kwarg, and
# as an arg. Isn't neat, but for now it suffices.
raise TypeError('When decorating a class with `Task`, a single string argument must be '
'supplied, which specifies the "main" task method, i.e. the class\'s entry '
'point to the task.')
else:
def wrapper(cls):
if isclass(cls):
if not hasattr(cls, entry): # Check if cls has the specified entry method
raise TypeError('Method `%s` not found in class `%s`.' % (entry, cls.__name__))
# We will have to inspect the task class's `__init__` method later (by inspecting
# the arg signature, before it is instantiated). In various circumstances, classes
# will not have an unbound `__init__` method. Let's deal with that now already, by
# assigning an empty, unbound `__init__` method manually, in order to prevent
# errors later on during method inspection (not an issue in Python 3):
# - Whenever a class is not defined as a new-style class in Python 2.7, i.e. a
# sub-class of object, and it does not have a `__init__` method definition, the
# class will not have an attribute `__init__`
# - If a class misses a `__init__` method definition, but is defined as a
# new-style class, attribute `__init__` will be of type `slot wrapper`, which
# cannot be inspected (and it also doesn't seem possible to check if a method is of
# type `slot wrapper`, which is why we manually define one).
if not hasattr(cls, '__init__') or cls.__init__ == object.__init__:
init = MethodType(lambda self: None, None, cls) \
if PY2 else MethodType(lambda self: None, cls)
setattr(cls, '__init__', init)
# Check for attributes that will be overwritten, in order to warn the user
reserved_attributes = ('__getattr__', '__call__', '_entry_mtd', 'cache', 'uncache',
'clear_cache', '_log_lock')
for attr in dir(cls):
if attr in reserved_attributes:
make_default_logger(INTERNAL_LOGGER_NAME).warning(
'Attribute `%s` of class `%s` will be overwritten when decorated with '
'`sparklanes.Task`! Avoid assigning any of the following attributes '
'`%s`', attr, cls.__name__, str(reserved_attributes)
)
assignments = {'_entry_mtd': entry,
'__getattr__': lambda self, name: TaskCache.get(name),
'__init__': cls.__init__,
'_log_lock': Lock()}
for attr in WRAPPER_ASSIGNMENTS:
try:
assignments[attr] = getattr(cls, attr)
except AttributeError:
pass
# Build task as a subclass of LaneTask
return type('Task_%s' % cls.__name__, (LaneTask, cls, object), assignments)
else:
raise TypeError('Only classes can be decorated with `Task`')
return wrapper
|
def Task(entry): # pylint: disable=invalid-name
if not isinstance(entry, string_types)
|
Decorator with which classes, who act as tasks in a `Lane`, must be decorated. When a class is
being decorated, it becomes a child of `LaneTask`.
Parameters
----------
entry: The name of the task's "main" method, i.e. the method which is executed when task is run
Returns
-------
wrapper (function): The actual decorator function
| 5.88268
| 5.808586
| 1.012756
|
if name in TaskCache.cached and not overwrite:
raise CacheError('Object with name `%s` already in cache.' % name)
TaskCache.cached[name] = val
|
def cache(self, name, val, overwrite=True)
|
Assigns an attribute reference to all subsequent tasks. For example, if a task caches a
DataFrame `df` using `self.cache('some_df', df)`, all tasks that follow can access the
DataFrame using `self.some_df`. Note that manually assigned attributes that share the same
name have precedence over cached attributes.
Parameters
----------
name : str
Name of the attribute
val
Attribute value
overwrite : bool
Indicates if the attribute shall be overwritten, or not (if `False`, and
a cached attribute with the given name already exists, `sparklanes.errors.CacheError`
will be thrown).
| 5.347608
| 5.666649
| 0.943698
|
self.exc = None
try:
self.task()
except BaseException:
self.exc = sys.exc_info()
|
def run(self)
|
Overwrites `threading.Thread.run`, to allow handling of exceptions thrown by threads
from within the main app.
| 4.56365
| 3.438531
| 1.327209
|
Thread.join(self, timeout=timeout)
if self.exc:
msg = "Thread '%s' threw an exception `%s`: %s" \
% (self.getName(), self.exc[0].__name__, self.exc[1])
new_exc = LaneExecutionError(msg)
if PY3:
raise new_exc.with_traceback(self.exc[2]) # pylint: disable=no-member
else:
raise (new_exc.__class__, new_exc, self.exc[2])
|
def join(self, timeout=None)
|
Overwrites `threading.Thread.join`, to allow handling of exceptions thrown by threads
from within the main app.
| 3.512647
| 3.62633
| 0.968651
|
def _called_decorator(dec_func):
@wraps(dec_func)
def _decorator(*args, **kwargs):
return dec_func()
return _decorator
return _called_decorator
|
def mock_decorator(*args, **kwargs)
|
Mocked decorator, needed in the case we need to mock a decorator
| 3.181128
| 3.031504
| 1.049356
|
if any(name.startswith(s) for s in mock_modules):
return MockModule()
return orig_import(name, *args, **kwargs)
|
def import_mock(name, *args, **kwargs)
|
Mock all modules starting with one of the mock_modules names.
| 4.47285
| 3.215867
| 1.390869
|
cl_runtime_info = cl_runtime_info or CLRuntimeInfo()
cl_environments = cl_runtime_info.cl_environments
for param in cl_function.get_parameters():
if param.name not in kernel_data:
names = [param.name for param in cl_function.get_parameters()]
missing_names = [name for name in names if name not in kernel_data]
raise ValueError('Some parameters are missing an input value, '
'required parameters are: {}, missing inputs are: {}'.format(names, missing_names))
if cl_function.get_return_type() != 'void':
kernel_data['_results'] = Zeros((nmr_instances,), cl_function.get_return_type())
workers = []
for ind, cl_environment in enumerate(cl_environments):
worker = _ProcedureWorker(cl_environment, cl_runtime_info.compile_flags,
cl_function, kernel_data, cl_runtime_info.double_precision, use_local_reduction)
workers.append(worker)
def enqueue_batch(batch_size, offset):
items_per_worker = [batch_size // len(cl_environments) for _ in range(len(cl_environments) - 1)]
items_per_worker.append(batch_size - sum(items_per_worker))
for ind, worker in enumerate(workers):
worker.calculate(offset, offset + items_per_worker[ind])
offset += items_per_worker[ind]
worker.cl_queue.flush()
for worker in workers:
worker.cl_queue.finish()
return offset
total_offset = 0
for batch_start, batch_end in split_in_batches(nmr_instances, 1e4 * len(workers)):
total_offset = enqueue_batch(batch_end - batch_start, total_offset)
if cl_function.get_return_type() != 'void':
return kernel_data['_results'].get_data()
|
def apply_cl_function(cl_function, kernel_data, nmr_instances, use_local_reduction=False, cl_runtime_info=None)
|
Run the given function/procedure on the given set of data.
This class will wrap the given CL function in a kernel call and execute that that for every data instance using
the provided kernel data. This class will respect the read write setting of the kernel data elements such that
output can be written back to the according kernel data elements.
Args:
cl_function (mot.lib.cl_function.CLFunction): the function to
run on the datasets. Either a name function tuple or an actual CLFunction object.
kernel_data (dict[str: mot.lib.kernel_data.KernelData]): the data to use as input to the function.
nmr_instances (int): the number of parallel threads to run (used as ``global_size``)
use_local_reduction (boolean): set this to True if you want to use local memory reduction in
your CL procedure. If this is set to True we will multiply the global size (given by the nmr_instances)
by the work group sizes.
cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information
| 2.791767
| 2.968414
| 0.940491
|
return_type, function_name, parameter_list, body = split_cl_function(cl_function)
return SimpleCLFunction(return_type, function_name, parameter_list, body, dependencies=dependencies)
|
def from_string(cls, cl_function, dependencies=())
|
Parse the given CL function into a SimpleCLFunction object.
Args:
cl_function (str): the function we wish to turn into an object
dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on
Returns:
SimpleCLFunction: the CL data type for this parameter declaration
| 4.083089
| 3.523357
| 1.158863
|
declarations = []
for p in self.get_parameters():
new_p = p.get_renamed(p.name.replace('.', '_'))
declarations.append(new_p.get_declaration())
return declarations
|
def _get_parameter_signatures(self)
|
Get the signature of the parameters for the CL function declaration.
This should return the list of signatures of the parameters for use inside the function signature.
Returns:
list: the signatures of the parameters for the use in the CL code.
| 4.749177
| 4.977032
| 0.954219
|
code = ''
for d in self._dependencies:
code += d.get_cl_code() + "\n"
return code
|
def _get_cl_dependency_code(self)
|
Get the CL code for all the CL code for all the dependencies.
Returns:
str: The CL code with the actual code.
| 4.605629
| 4.781287
| 0.963261
|
return cl.Program(self._cl_context, kernel_source).build(' '.join(compile_flags))
|
def _build_kernel(self, kernel_source, compile_flags=())
|
Convenience function for building the kernel for this worker.
Args:
kernel_source (str): the kernel source to use for building the kernel
Returns:
cl.Program: a compiled CL kernel
| 7.691453
| 9.851299
| 0.780755
|
declarations = []
for name, data in self._kernel_data.items():
declarations.extend(data.get_kernel_parameters('_' + name))
return declarations
|
def _get_kernel_arguments(self)
|
Get the list of kernel arguments for loading the kernel data elements into the kernel.
This will use the sorted keys for looping through the kernel input items.
Returns:
list of str: the list of parameter definitions
| 6.898833
| 6.983984
| 0.987808
|
dtypes = []
for name, data in self._kernel_data.items():
dtypes.extend(data.get_scalar_arg_dtypes())
return dtypes
|
def get_scalar_arg_dtypes(self)
|
Get the location and types of the input scalars.
Returns:
list: for every kernel input element either None if the data is a buffer or the numpy data type if
if is a scalar.
| 3.862915
| 3.921098
| 0.985162
|
if cls.sc is not None:
cls.sc.stop()
cls.sc = SparkContext(master, appName, sparkHome, pyFiles, environment, batchSize,
serializer,
conf, gateway, jsc, profiler_cls)
cls.__init_spark()
|
def set_sc(cls, master=None, appName=None, sparkHome=None, pyFiles=None, environment=None,
batchSize=0, serializer=PickleSerializer(), conf=None, gateway=None, jsc=None,
profiler_cls=BasicProfiler)
|
Creates and initializes a new `SparkContext` (the old one will be stopped).
Argument signature is copied from `pyspark.SparkContext
<https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkContext>`_.
| 2.571739
| 2.71955
| 0.945649
|
sess = SparkSession.builder
if master:
sess.master(master)
if appName:
sess.appName(appName)
if conf:
sess.config(conf=conf)
if hive_support:
sess.enableHiveSupport()
cls.spark = sess.getOrCreate()
|
def set_spark(cls, master=None, appName=None, conf=None, hive_support=False)
|
Creates and initializes a new `SparkSession`. Argument signature is copied from
`pyspark.sql.SparkSession
<https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.SparkSession>`_.
| 1.94401
| 2.048386
| 0.949045
|
args = _parse_and_validate_args(args)
logging.debug(args)
dist = __make_tmp_dir()
try:
__package_dependencies(dist_dir=dist, additional_reqs=args['requirements'],
silent=args['silent'])
__package_app(tasks_pkg=args['package'],
dist_dir=dist,
custom_main=args['main'],
extra_data=args['extra_data'])
__run_spark_submit(lane_yaml=args['yaml'],
dist_dir=dist,
spark_home=args['spark_home'],
spark_args=args['spark_args'],
silent=args['silent'])
except Exception as exc:
__clean_up(dist)
raise exc
__clean_up(dist)
|
def _package_and_submit(args)
|
Packages and submits a job, which is defined in a YAML file, to Spark.
Parameters
----------
args (List): Command-line arguments
| 4.602906
| 4.67358
| 0.984878
|
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, [])
getattr(namespace, self.dest).extend(values)
parser = argparse.ArgumentParser(description='Submitting a lane to spark.')
parser.add_argument('-y', '--yaml', type=str, required=True,
help='Path to the yaml definition file.')
parser.add_argument('-p', '--package', type=str, required=True,
help='Path to the python package containing your tasks.')
parser.add_argument('-r', '--requirements', type=str, required=False,
help='Path to a `requirements.txt` specifying any additional dependencies '
'of your tasks.')
parser.add_argument('-e', '--extra-data', nargs='*', required=False, action=ExtendAction,
help='Path to any additional files or directories that should be packaged '
'and sent to Spark.')
parser.add_argument('-m', '--main', type=str, required=False,
help='Path to a custom main python file')
parser.add_argument('-d', '--spark-home', type=str, required=False,
help='Custom path to the directory containing your Spark installation. If '
'none is given, sparklanes will try to use the `spark-submit` command '
'from your PATH')
parser.add_argument('-s', '--spark-args', nargs='*', required=False,
help='Any additional arguments that should be sent to Spark via '
'spark-submit. '
'(e.g. `--spark-args executor-memory=20G total-executor-cores=100`)')
parser.add_argument('--silent', help='If set, no output will be sent to console',
action='store_true')
args = parser.parse_args(args).__dict__
# Check/fix files/dirs
for param in ('package', 'spark_home'):
args[param] = __validate_and_fix_path(args[param], check_dir=True)
for param in ('yaml', 'requirements', 'main'):
args[param] = __validate_and_fix_path(args[param], check_file=True)
if args['extra_data']:
for i in range(len(args['extra_data'])):
args['extra_data'][i] = __validate_and_fix_path(args['extra_data'][i],
check_file=True, check_dir=True)
# Check if python package
if not os.path.isfile(os.path.join(args['package'], '__init__.py')):
raise SystemExit('Could not confirm `%s` is a python package. Make sure it contains an '
'`__init__.py`.')
# Check/fix spark args
if args['spark_args']:
args['spark_args'] = __validate_and_fix_spark_args(args['spark_args'])
return args
|
def _parse_and_validate_args(args)
|
Parse and validate arguments. During validation, it is checked whether the given
files/directories exist, while also converting relative paths to absolute ones.
Parameters
----------
args (List): Command-line arguments
| 2.640853
| 2.633002
| 1.002982
|
# pylint: disable=superfluous-parens
if path is None:
return path
else:
if not (os.path.isfile(path) if check_file else False) \
and not (os.path.isdir(path) if check_dir else False):
raise SystemExit('Path `%s` does not exist' % path)
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(os.path.abspath(os.curdir), path))
return path
|
def __validate_and_fix_path(path, check_file=False, check_dir=False)
|
Check if a file/directory exists and converts relative paths to absolute ones
| 2.454223
| 2.335994
| 1.050612
|
pattern = re.compile(r'[\w\-_]+=.+')
fixed_args = []
for arg in spark_args:
if arg not in SPARK_SUBMIT_FLAGS:
if not pattern.match(arg):
raise SystemExit('Spark argument `%s` does not seem to be in the correct format '
'`ARG_NAME=ARG_VAL`, and is also not recognized to be one of the'
'valid spark-submit flags (%s).' % (arg, str(SPARK_SUBMIT_FLAGS)))
eq_pos = arg.find('=')
fixed_args.append('--' + arg[:eq_pos])
fixed_args.append(arg[eq_pos + 1:])
else:
fixed_args.append('--' + arg)
return fixed_args
|
def __validate_and_fix_spark_args(spark_args)
|
Prepares spark arguments. In the command-line script, they are passed as for example
`-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as
`--master local[4] --deploy-mode client --verbose`
Parameters
----------
spark_args (List): List of spark arguments
Returns
-------
fixed_args (List): List of fixed and validated spark arguments
| 3.520775
| 3.536503
| 0.995553
|
logging.info('Packaging dependencies')
libs_dir = os.path.join(dist_dir, 'libs')
if not os.path.isdir(libs_dir):
os.mkdir(libs_dir)
# Get requirements
req_txt = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'requirements-submit.txt')
with open(req_txt, 'r') as req:
requirements = req.read().splitlines()
if additional_reqs:
with open(additional_reqs, 'r') as req:
for row in req:
requirements.append(row)
# Remove duplicates
requirements = list(set(requirements))
# Install
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
for pkg in requirements:
cmd = ['pip', 'install', pkg, '-t', libs_dir]
logging.debug('Calling `%s`', str(cmd))
call(cmd, **outp)
devnull.close()
# Package
shutil.make_archive(libs_dir, 'zip', libs_dir, './')
|
def __package_dependencies(dist_dir, additional_reqs, silent)
|
Installs the app's dependencies from pip and packages them (as zip), to be submitted to spark.
Parameters
----------
dist_dir (str): Path to directory where the packaged libs shall be located
additional_reqs (str): Path to a requirements.txt, containing any of the app's additional
requirements
silent (bool): Flag indicating whether pip output should be printed to console
| 2.528637
| 2.546094
| 0.993143
|
logging.info('Packaging application')
# Package tasks
tasks_dir_splits = os.path.split(os.path.realpath(tasks_pkg))
shutil.make_archive(os.path.join(dist_dir, 'tasks'),
'zip',
tasks_dir_splits[0],
tasks_dir_splits[1])
# Package main.py
if custom_main is None:
from . import _main
main_path = _main.__file__
if main_path[-3:] == 'pyc':
main_path = main_path[:-1]
shutil.copy(os.path.realpath(main_path),
os.path.join(dist_dir, 'main.py'))
else:
shutil.copy(os.path.realpath(custom_main),
os.path.join(dist_dir, 'main.py'))
# Package _framework
shutil.make_archive(os.path.join(dist_dir, '_framework'),
'zip',
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'),
'./sparklanes/')
# Package extra data
if extra_data:
for dat in extra_data:
real_path = os.path.realpath(dat)
target = os.path.join(dist_dir, os.path.split(real_path)[1])
if os.path.isfile(real_path):
shutil.copy(real_path, target)
elif os.path.isdir(real_path):
shutil.copytree(real_path, target)
else:
raise IOError('File `%s` not found at `%s`.' % (dat, real_path))
|
def __package_app(tasks_pkg, dist_dir, custom_main=None, extra_data=None)
|
Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to
`dist_dir`, to be submitted to spark. Same for `extra_data`.
Parameters
----------
tasks_pkg (str): Path to the python package containing tasks
dist_dir (str): Path to the directory where the packaged code should be stored
custom_main (str): Path to a custom 'main' python file.
extra_data (List[str]): List containing paths to files/directories that should also be packaged
and submitted to spark
| 2.105884
| 2.061528
| 1.021516
|
# spark-submit binary
cmd = ['spark-submit' if spark_home is None else os.path.join(spark_home, 'bin/spark-submit')]
# Supplied spark arguments
if spark_args:
cmd += spark_args
# Packaged App & lane
cmd += ['--py-files', 'libs.zip,_framework.zip,tasks.zip', 'main.py']
cmd += ['--lane', lane_yaml]
logging.info('Submitting to Spark')
logging.debug(str(cmd))
# Submit
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
call(cmd, cwd=dist_dir, env=MY_ENV, **outp)
devnull.close()
|
def __run_spark_submit(lane_yaml, dist_dir, spark_home, spark_args, silent)
|
Submits the packaged application to spark using a `spark-submit` subprocess
Parameters
----------
lane_yaml (str): Path to the YAML lane definition file
dist_dir (str): Path to the directory where the packaged code is located
spark_args (str): String of any additional spark config args to be passed when submitting
silent (bool): Flag indicating whether job output should be printed to console
| 4.551974
| 4.529057
| 1.00506
|
if not guard_name:
guard_name = 'GUARD_' + hashlib.md5(cl_str.encode('utf-8')).hexdigest()
return '''
# ifndef {guard_name}
# define {guard_name}
{func_str}
# endif // {guard_name}
'''.format(func_str=cl_str, guard_name=guard_name)
|
def add_include_guards(cl_str, guard_name=None)
|
Add include guards to the given string.
If you are including the same body of CL code multiple times in a Kernel, it is important to add include
guards (https://en.wikipedia.org/wiki/Include_guard) around them to prevent the kernel from registering the function
twice.
Args:
cl_str (str): the piece of CL code as a string to which we add the include guards
guard_name (str): the name of the C pre-processor guard. If not given we use the MD5 hash of the
given cl string.
Returns:
str: the same string but then with include guards around them.
| 2.542974
| 2.652303
| 0.958779
|
if is_vector_ctype(cl_type):
raw_type, vector_length = split_vector_ctype(cl_type)
if raw_type == 'mot_float_type':
if is_vector_ctype(mot_float_type):
raw_type, _ = split_vector_ctype(mot_float_type)
else:
raw_type = mot_float_type
vector_type = raw_type + str(vector_length)
return getattr(cl_array.vec, vector_type)
else:
if cl_type == 'mot_float_type':
cl_type = mot_float_type
data_types = [
('char', np.int8),
('uchar', np.uint8),
('short', np.int16),
('ushort', np.uint16),
('int', np.int32),
('uint', np.uint32),
('long', np.int64),
('ulong', np.uint64),
('float', np.float32),
('double', np.float64),
]
for ctype, dtype in data_types:
if ctype == cl_type:
return dtype
|
def ctype_to_dtype(cl_type, mot_float_type='float')
|
Get the numpy dtype of the given cl_type string.
Args:
cl_type (str): the CL data type to match, for example 'float' or 'float4'.
mot_float_type (str): the C name of the ``mot_float_type``. The dtype will be looked up recursively.
Returns:
dtype: the numpy datatype
| 1.954459
| 1.989164
| 0.982553
|
scalar_dtype = ctype_to_dtype(data_type, mot_float_type)
if isinstance(data, numbers.Number):
data = scalar_dtype(data)
if is_vector_ctype(data_type):
shape = data.shape
dtype = ctype_to_dtype(data_type, mot_float_type)
ve = np.zeros(shape[:-1], dtype=dtype)
if len(shape) == 1:
for vector_ind in range(shape[0]):
ve[0][vector_ind] = data[vector_ind]
elif len(shape) == 2:
for i in range(data.shape[0]):
for vector_ind in range(data.shape[1]):
ve[i][vector_ind] = data[i, vector_ind]
elif len(shape) == 3:
for i in range(data.shape[0]):
for j in range(data.shape[1]):
for vector_ind in range(data.shape[2]):
ve[i, j][vector_ind] = data[i, j, vector_ind]
return np.require(ve, requirements=['C', 'A', 'O'])
return np.require(data, scalar_dtype, ['C', 'A', 'O'])
|
def convert_data_to_dtype(data, data_type, mot_float_type='float')
|
Convert the given input data to the correct numpy type.
Args:
data (ndarray): The value to convert to the correct numpy type
data_type (str): the data type we need to convert the data to
mot_float_type (str): the data type of the current ``mot_float_type``
Returns:
ndarray: the input data but then converted to the desired numpy data type
| 2.053555
| 2.190681
| 0.937405
|
if not is_vector_ctype(ctype):
raise ValueError('The given ctype is not a vector type.')
for vector_length in [2, 3, 4, 8, 16]:
if ctype.endswith(str(vector_length)):
vector_str_len = len(str(vector_length))
return ctype[:-vector_str_len], int(ctype[-vector_str_len:])
|
def split_vector_ctype(ctype)
|
Split a vector ctype into a raw ctype and the vector length.
If the given ctype is not a vector type, we raise an error. I
Args:
ctype (str): the ctype to possibly split into a raw ctype and the vector length
Returns:
tuple: the raw ctype and the vector length
| 2.853749
| 2.750242
| 1.037636
|
cl_device_type_str = cl_device_type_str.upper()
if hasattr(cl.device_type, cl_device_type_str):
return getattr(cl.device_type, cl_device_type_str)
return None
|
def device_type_from_string(cl_device_type_str)
|
Converts values like ``gpu`` to a pyopencl device type string.
Supported values are: ``accelerator``, ``cpu``, ``custom``, ``gpu``. If ``all`` is given, None is returned.
Args:
cl_device_type_str (str): The string we want to convert to a device type.
Returns:
cl.device_type: the pyopencl device type.
| 1.910999
| 2.647183
| 0.721899
|
if include_complex:
with open(os.path.abspath(resource_filename('mot', 'data/opencl/complex.h')), 'r') as f:
complex_number_support = f.read()
else:
complex_number_support = ''
scipy_constants = '''
#define MACHEP DBL_EPSILON
#define MAXLOG log(DBL_MAX)
#define LANCZOS_G 6.024680040776729583740234375 /* taken from Scipy */
#define EULER 0.577215664901532860606512090082402431 /* Euler constant, from Scipy */
'''
if double_precision:
return '''
#if __OPENCL_VERSION__ <= CL_VERSION_1_1
#pragma OPENCL EXTENSION cl_khr_fp64 : enable
#endif
#define PYOPENCL_DEFINE_CDOUBLE
typedef double mot_float_type;
typedef double2 mot_float_type2;
typedef double4 mot_float_type4;
typedef double8 mot_float_type8;
typedef double16 mot_float_type16;
#define MOT_EPSILON DBL_EPSILON
#define MOT_MIN DBL_MIN
#define MOT_MAX DBL_MAX
''' + scipy_constants + complex_number_support
else:
return '''
#if __OPENCL_VERSION__ <= CL_VERSION_1_1
#pragma OPENCL EXTENSION cl_khr_fp64 : enable
#endif
typedef float mot_float_type;
typedef float2 mot_float_type2;
typedef float4 mot_float_type4;
typedef float8 mot_float_type8;
typedef float16 mot_float_type16;
#define MOT_EPSILON FLT_EPSILON
#define MOT_MIN FLT_MIN
#define MOT_MAX FLT_MAX
''' + scipy_constants + complex_number_support
|
def get_float_type_def(double_precision, include_complex=True)
|
Get the model floating point type definition.
Args:
double_precision (boolean): if True we will use the double type for the mot_float_type type.
Else, we will use the single precision float type for the mot_float_type type.
include_complex (boolean): if we include support for complex numbers
Returns:
str: defines the mot_float_type types, the epsilon and the MIN and MAX values.
| 2.547222
| 2.469504
| 1.031471
|
def check_self_dependencies(input_data):
for k, v in input_data.items():
if k in v:
raise ValueError('Self-dependency, {} depends on itself.'.format(k))
def prepare_input_data(input_data):
return {k: set(v) for k, v in input_data.items()}
def find_items_without_dependencies(input_data):
return list(reduce(set.union, input_data.values()) - set(input_data.keys()))
def add_empty_dependencies(data):
items_without_dependencies = find_items_without_dependencies(data)
data.update({item: set() for item in items_without_dependencies})
def get_sorted(input_data):
data = input_data
while True:
ordered = set(item for item, dep in data.items() if len(dep) == 0)
if not ordered:
break
yield ordered
data = {item: (dep - ordered) for item, dep in data.items() if item not in ordered}
if len(data) != 0:
raise ValueError('Cyclic dependencies exist '
'among these items: {}'.format(', '.join(repr(x) for x in data.items())))
check_self_dependencies(data)
if not len(data):
return []
data_copy = prepare_input_data(data)
add_empty_dependencies(data_copy)
result = []
for d in get_sorted(data_copy):
try:
d = sorted(d)
except TypeError:
d = list(d)
result.extend(d)
return result
|
def topological_sort(data)
|
Topological sort the given dictionary structure.
Args:
data (dict); dictionary structure where the value is a list of dependencies for that given key.
For example: ``{'a': (), 'b': ('a',)}``, where ``a`` depends on nothing and ``b`` depends on ``a``.
Returns:
tuple: the dependencies in constructor order
| 2.584476
| 2.552967
| 1.012342
|
return np.isscalar(value) or (isinstance(value, np.ndarray) and (len(np.squeeze(value).shape) == 0))
|
def is_scalar(value)
|
Test if the given value is a scalar.
This function also works with memory mapped array values, in contrast to the numpy is_scalar method.
Args:
value: the value to test for being a scalar value
Returns:
boolean: if the given value is a scalar or not
| 3.135341
| 4.820517
| 0.650416
|
if is_scalar(value):
return True
return np.array(value == value.flatten()[0]).all()
|
def all_elements_equal(value)
|
Checks if all elements in the given value are equal to each other.
If the input is a single value the result is trivial. If not, we compare all the values to see
if they are exactly the same.
Args:
value (ndarray or number): a numpy array or a single number.
Returns:
bool: true if all elements are equal to each other, false otherwise
| 6.996349
| 9.544094
| 0.733055
|
if not all_elements_equal(value):
raise ValueError('Not all values are equal to each other.')
if is_scalar(value):
return value
return value.item(0)
|
def get_single_value(value)
|
Get a single value out of the given value.
This is meant to be used after a call to :func:`all_elements_equal` that returned True. With this
function we return a single number from the input value.
Args:
value (ndarray or number): a numpy array or a single number.
Returns:
number: a single number from the input
Raises:
ValueError: if not all elements are equal
| 5.944807
| 5.114616
| 1.162317
|
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
logging.disable(previous_level)
|
def all_logging_disabled(highest_level=logging.CRITICAL)
|
Disable all logging temporarily.
A context manager that will prevent any logging messages triggered during the body from being processed.
Args:
highest_level: the maximum logging level that is being blocked
| 2.675654
| 4.34649
| 0.61559
|
offset = 0
elements_left = nmr_elements
while elements_left > 0:
next_batch = (offset, offset + min(elements_left, max_batch_size))
yield next_batch
batch_size = min(elements_left, max_batch_size)
elements_left -= batch_size
offset += batch_size
|
def split_in_batches(nmr_elements, max_batch_size)
|
Split the total number of elements into batches of the specified maximum size.
Examples::
split_in_batches(30, 8) -> [(0, 8), (8, 15), (16, 23), (24, 29)]
for batch_start, batch_end in split_in_batches(2000, 100):
array[batch_start:batch_end]
Yields:
tuple: the start and end point of the next batch
| 2.368134
| 2.565249
| 0.923159
|
diagonal_ind = np.arange(covariance.shape[1])
diagonal_els = covariance[:, diagonal_ind, diagonal_ind]
result = covariance / np.sqrt(diagonal_els[:, :, None] * diagonal_els[:, None, :])
result[np.isinf(result)] = 0
return np.clip(np.nan_to_num(result), -1, 1)
|
def covariance_to_correlations(covariance)
|
Transform a covariance matrix into a correlations matrix.
This can be seen as dividing a covariance matrix by the outer product of the diagonal.
As post processing we replace the infinities and the NaNs with zeros and clip the result to [-1, 1].
Args:
covariance (ndarray): a matrix of shape (n, p, p) with for n problems the covariance matrix of shape (p, p).
Returns:
ndarray: the correlations matrix
| 2.887324
| 2.948441
| 0.979271
|
if os.name == 'nt': # In Windows there is no fork.
return list(map(func, iterable))
try:
p = multiprocessing.Pool()
return_data = list(p.imap(func, iterable))
p.close()
p.join()
return return_data
except OSError:
return list(map(func, iterable))
|
def multiprocess_mapping(func, iterable)
|
Multiprocess mapping the given function on the given iterable.
This only works in Linux and Mac systems since Windows has no forking capability. On Windows we fall back on
single processing. Also, if we reach memory limits we fall back on single cpu processing.
Args:
func (func): the function to apply
iterable (iterable): the iterable with the elements we want to apply the function on
| 2.897691
| 3.199518
| 0.905665
|
from mot.lib.cl_function import SimpleCLFunction
def separate_cl_functions(input_str):
class Semantics:
def __init__(self):
self._functions = []
def result(self, ast):
return self._functions
def arglist(self, ast):
return '({})'.format(', '.join(ast))
def function(self, ast):
def join(items):
result = ''
for item in items:
if isinstance(item, str):
result += item
else:
result += join(item)
return result
self._functions.append(join(ast).strip())
return ast
return _extract_cl_functions_parser.parse(input_str, semantics=Semantics())
functions = separate_cl_functions(cl_code)
return SimpleCLFunction.from_string(functions[-1], dependencies=list(dependencies or []) + [
SimpleCLFunction.from_string(s) for s in functions[:-1]])
|
def parse_cl_function(cl_code, dependencies=())
|
Parse the given OpenCL string to a single SimpleCLFunction.
If the string contains more than one function, we will return only the last, with all the other added as a
dependency.
Args:
cl_code (str): the input string containing one or more functions.
dependencies (Iterable[CLCodeObject]): The list of CL libraries this function depends on
Returns:
mot.lib.cl_function.SimpleCLFunction: the CL function for the last function in the given strings.
| 4.341578
| 3.84581
| 1.128911
|
class Semantics:
def __init__(self):
self._return_type = ''
self._function_name = ''
self._parameter_list = []
self._cl_body = ''
def result(self, ast):
return self._return_type, self._function_name, self._parameter_list, self._cl_body
def address_space(self, ast):
self._return_type = ast.strip() + ' '
return ast
def data_type(self, ast):
self._return_type += ''.join(ast).strip()
return ast
def function_name(self, ast):
self._function_name = ast.strip()
return ast
def arglist(self, ast):
if ast != '()':
self._parameter_list = ast
return ast
def body(self, ast):
def join(items):
result = ''
for item in items:
if isinstance(item, str):
result += item
else:
result += join(item)
return result
self._cl_body = join(ast).strip()[1:-1]
return ast
return _split_cl_function_parser.parse(cl_str, semantics=Semantics())
|
def split_cl_function(cl_str)
|
Split an CL function into a return type, function name, parameters list and the body.
Args:
cl_str (str): the CL code to parse and plit into components
Returns:
tuple: string elements for the return type, function name, parameter list and the body
| 2.717132
| 2.685426
| 1.011807
|
logger = logging.getLogger(name)
logger.setLevel(level)
if not logger.handlers:
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(level)
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
|
def make_default_logger(name=INTERNAL_LOGGER_NAME, level=logging.INFO,
fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
Create a logger with the default configuration
| 1.518613
| 1.575236
| 0.964054
|
return self._device.get_info(cl.device_info.TYPE) == cl.device_type.GPU
|
def is_gpu(self)
|
Check if the device associated with this environment is a GPU.
Returns:
boolean: True if the device is an GPU, false otherwise.
| 4.653194
| 5.323621
| 0.874066
|
return self._device.get_info(cl.device_info.TYPE) == cl.device_type.CPU
|
def is_cpu(self)
|
Check if the device associated with this environment is a CPU.
Returns:
boolean: True if the device is an CPU, false otherwise.
| 5.524373
| 5.945763
| 0.929128
|
if isinstance(cl_device_type, str):
cl_device_type = device_type_from_string(cl_device_type)
device = None
if platform is None:
platforms = cl.get_platforms()
else:
platforms = [platform]
for platform in platforms:
devices = platform.get_devices(device_type=cl_device_type)
for dev in devices:
if device_supports_double(dev):
try:
env = CLEnvironment(platform, dev)
return [env]
except cl.RuntimeError:
pass
if not device:
if fallback_to_any_device_type:
return cl.get_platforms()[0].get_devices()
else:
raise ValueError('No devices of the specified type ({}) found.'.format(
cl.device_type.to_string(cl_device_type)))
raise ValueError('No suitable OpenCL device found.')
|
def single_device(cl_device_type='GPU', platform=None, fallback_to_any_device_type=False)
|
Get a list containing a single device environment, for a device of the given type on the given platform.
This will only fetch devices that support double (possibly only double with a pragma
defined, but still, it should support double).
Args:
cl_device_type (cl.device_type.* or string): The type of the device we want,
can be a opencl device type or a string matching 'GPU', 'CPU' or 'ALL'.
platform (opencl platform): The opencl platform to select the devices from
fallback_to_any_device_type (boolean): If True, try to fallback to any possible device in the system.
Returns:
list of CLEnvironment: List with one element, the CL runtime environment requested.
| 2.424245
| 2.213524
| 1.095197
|
if isinstance(cl_device_type, str):
cl_device_type = device_type_from_string(cl_device_type)
runtime_list = []
if platform is None:
platforms = cl.get_platforms()
else:
platforms = [platform]
for platform in platforms:
if cl_device_type:
devices = platform.get_devices(device_type=cl_device_type)
else:
devices = platform.get_devices()
for device in devices:
if device_supports_double(device):
env = CLEnvironment(platform, device)
runtime_list.append(env)
return runtime_list
|
def all_devices(cl_device_type=None, platform=None)
|
Get multiple device environments, optionally only of the indicated type.
This will only fetch devices that support double point precision.
Args:
cl_device_type (cl.device_type.* or string): The type of the device we want,
can be a opencl device type or a string matching 'GPU' or 'CPU'.
platform (opencl platform): The opencl platform to select the devices from
Returns:
list of CLEnvironment: List with the CL device environments.
| 2.134022
| 1.967847
| 1.084445
|
cl_environments = CLEnvironmentFactory.all_devices(cl_device_type=preferred_device_type)
platform_names = [env.platform.name for env in cl_environments]
has_amd_pro_platform = any('AMD Accelerated Parallel Processing' in name for name in platform_names)
if has_amd_pro_platform:
return list(filter(lambda env: 'Clover' not in env.platform.name, cl_environments))
if preferred_device_type is not None and not len(cl_environments):
return CLEnvironmentFactory.all_devices()
return cl_environments
|
def smart_device_selection(preferred_device_type=None)
|
Get a list of device environments that is suitable for use in MOT.
Basically this gets the total list of devices using all_devices() and applies a filter on it.
This filter does the following:
1) if the 'AMD Accelerated Parallel Processing' is available remove all environments using the 'Clover'
platform.
More things may be implemented in the future.
Args:
preferred_device_type (str): the preferred device type, one of 'CPU', 'GPU' or 'APU'.
If no devices of this type can be found, we will use any other device available.
Returns:
list of CLEnvironment: List with the CL device environments.
| 4.334559
| 3.127276
| 1.386049
|
r
samples_generator = _get_sample_generator(samples)
return np.array(multiprocess_mapping(_MultivariateESSMultiProcessing(batch_size_generator), samples_generator()))
|
def multivariate_ess(samples, batch_size_generator=None)
|
r"""Estimate the multivariate Effective Sample Size for the samples of every problem.
This essentially applies :func:`estimate_multivariate_ess` to every problem.
Args:
samples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and
n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally,
a generator function that yields sample arrays of shape (p, n).
batch_size_generator (MultiVariateESSBatchSizeGenerator): the batch size generator, tells us how many
batches and of which size we use in estimating the minimum ESS.
Returns:
ndarray: the multivariate ESS per problem
| 11.446875
| 15.647846
| 0.73153
|
r
samples_generator = _get_sample_generator(samples)
return np.array(multiprocess_mapping(_UnivariateESSMultiProcessing(method, **kwargs), samples_generator()))
|
def univariate_ess(samples, method='standard_error', **kwargs)
|
r"""Estimate the univariate Effective Sample Size for the samples of every problem.
This computes the ESS using:
.. math::
ESS(X) = n * \frac{\lambda^{2}}{\sigma^{2}}
Where :math:`\lambda` is the standard deviation of the chain and :math:`\sigma` is estimated using the
monte carlo standard error (which in turn is, by default, estimated using a batch means estimator).
Args:
samples (ndarray, dict or generator): either a matrix of shape (d, p, n) with d problems, p parameters and
n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally,
a generator function that yields sample arrays of shape (p, n).
method (str): one of 'autocorrelation' or 'standard_error' defaults to 'standard_error'.
If 'autocorrelation' is chosen we apply the function: :func:`estimate_univariate_ess_autocorrelation`,
if 'standard_error` is choosen we apply the function: :func:`estimate_univariate_ess_standard_error`.
**kwargs: passed to the chosen compute method
Returns:
ndarray: a matrix of size (d, p) with for every problem and every parameter an ESS.
References:
* Flegal, J.M., Haran, M., and Jones, G.L. (2008). "Markov chain Monte Carlo: Can We
Trust the Third Significant Figure?". Statistical Science, 23, p. 250-260.
* Marc S. Meketon and Bruce Schmeiser. 1984. Overlapping batch means: something for nothing?.
In Proceedings of the 16th conference on Winter simulation (WSC '84), Sallie Sheppard (Ed.).
IEEE Press, Piscataway, NJ, USA, 226-230.
| 13.194909
| 17.459198
| 0.755757
|
if isinstance(samples, Mapping):
def samples_generator():
for ind in range(samples[list(samples.keys())[0]].shape[0]):
yield np.array([samples[s][ind, :] for s in sorted(samples)])
elif isinstance(samples, np.ndarray):
def samples_generator():
for ind in range(samples.shape[0]):
yield samples[ind]
else:
samples_generator = samples
return samples_generator
|
def _get_sample_generator(samples)
|
Get a sample generator from the given polymorphic input.
Args:
samples (ndarray, dict or generator): either an matrix of shape (d, p, n) with d problems, p parameters and
n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally,
a generator function that yields sample arrays of shape (p, n).
Returns:
generator: a generator that yields a matrix of size (p, n) for every problem in the input.
| 2.616847
| 2.447851
| 1.069039
|
r
normalized_chain = chain - np.mean(chain, dtype=np.float64)
lagged_mean = np.mean(normalized_chain[:len(chain) - lag] * normalized_chain[lag:], dtype=np.float64)
return lagged_mean / np.var(chain, dtype=np.float64)
|
def get_auto_correlation(chain, lag)
|
r"""Estimates the auto correlation for the given chain (1d vector) with the given lag.
Given a lag :math:`k`, the auto correlation coefficient :math:`\rho_{k}` is estimated as:
.. math::
\hat{\rho}_{k} = \frac{E[(X_{t} - \mu)(X_{t + k} - \mu)]}{\sigma^{2}}
Please note that this equation only works for lags :math:`k < n` where :math:`n` is the number of samples in
the chain.
Args:
chain (ndarray): the vector with the samples
lag (int): the lag to use in the autocorrelation computation
Returns:
float: the autocorrelation with the given lag
| 3.218133
| 3.501683
| 0.919025
|
r
max_lag = max_lag or min(len(chain) // 3, 1000)
normalized_chain = chain - np.mean(chain, dtype=np.float64)
previous_accoeff = 0
auto_corr_sum = 0
for lag in range(1, max_lag):
auto_correlation_coeff = np.mean(normalized_chain[:len(chain) - lag] * normalized_chain[lag:], dtype=np.float64)
if lag % 2 == 0:
if previous_accoeff + auto_correlation_coeff <= 0:
break
auto_corr_sum += auto_correlation_coeff
previous_accoeff = auto_correlation_coeff
return auto_corr_sum / np.var(chain, dtype=np.float64)
|
def get_auto_correlation_time(chain, max_lag=None)
|
r"""Compute the auto correlation time up to the given lag for the given chain (1d vector).
This will halt when the maximum lag :math:`m` is reached or when the sum of two consecutive lags for any
odd lag is lower or equal to zero.
The auto correlation sum is estimated as:
.. math::
\tau = 1 + 2 * \sum_{k=1}^{m}{\rho_{k}}
Where :math:`\rho_{k}` is estimated as:
.. math::
\hat{\rho}_{k} = \frac{E[(X_{t} - \mu)(X_{t + k} - \mu)]}{\sigma^{2}}
Args:
chain (ndarray): the vector with the samples
max_lag (int): the maximum lag to use in the autocorrelation computation. If not given we use:
:math:`min(n/3, 1000)`.
| 2.83007
| 2.904671
| 0.974317
|
r
sigma = (monte_carlo_standard_error(chain, batch_size_generator=batch_size_generator,
compute_method=compute_method) ** 2 * len(chain))
lambda_ = np.var(chain, dtype=np.float64)
return len(chain) * (lambda_ / sigma)
|
def estimate_univariate_ess_standard_error(chain, batch_size_generator=None, compute_method=None)
|
r"""Compute the univariate ESS using the standard error method.
This computes the ESS using:
.. math::
ESS(X) = n * \frac{\lambda^{2}}{\sigma^{2}}
Where :math:`\lambda` is the standard deviation of the chain and :math:`\sigma` is estimated using the monte carlo
standard error (which in turn is, by default, estimated using a batch means estimator).
Args:
chain (ndarray): the Markov chain
batch_size_generator (UniVariateESSBatchSizeGenerator): the method that generates that batch sizes
we will use. Per default it uses the :class:`SquareRootSingleBatch` method.
compute_method (ComputeMonteCarloStandardError): the method used to compute the standard error.
By default we will use the :class:`BatchMeansMCSE` method
Returns:
float: the estimated ESS
| 4.18621
| 4.764599
| 0.878607
|
r
tmp = 2.0 / nmr_params
log_min_ess = tmp * np.log(2) + np.log(np.pi) - tmp * (np.log(nmr_params) + gammaln(nmr_params / 2)) \
+ np.log(chi2.ppf(1 - alpha, nmr_params)) - 2 * np.log(epsilon)
return int(round(np.exp(log_min_ess)))
|
def minimum_multivariate_ess(nmr_params, alpha=0.05, epsilon=0.05)
|
r"""Calculate the minimum multivariate Effective Sample Size you will need to obtain the desired precision.
This implements the inequality from Vats et al. (2016):
.. math::
\widehat{ESS} \geq \frac{2^{2/p}\pi}{(p\Gamma(p/2))^{2/p}} \frac{\chi^{2}_{1-\alpha,p}}{\epsilon^{2}}
Where :math:`p` is the number of free parameters.
Args:
nmr_params (int): the number of free parameters in the model
alpha (float): the level of confidence of the confidence region. For example, an alpha of 0.05 means
that we want to be in a 95% confidence region.
epsilon (float): the level of precision in our multivariate ESS estimate.
An epsilon of 0.05 means that we expect that the Monte Carlo error is 5% of the uncertainty in
the target distribution.
Returns:
float: the minimum multivariate Effective Sample Size that one should aim for in MCMC sample to
obtain the desired confidence region with the desired precision.
References:
Vats D, Flegal J, Jones G (2016). Multivariate Output Analysis for Markov Chain Monte Carlo.
arXiv:1512.07713v2 [math.ST]
| 3.544013
| 3.301922
| 1.073318
|
r
tmp = 2.0 / nmr_params
log_min_ess = tmp * np.log(2) + np.log(np.pi) - tmp * (np.log(nmr_params) + gammaln(nmr_params / 2)) \
+ np.log(chi2.ppf(1 - alpha, nmr_params)) - np.log(multi_variate_ess)
return np.sqrt(np.exp(log_min_ess))
|
def multivariate_ess_precision(nmr_params, multi_variate_ess, alpha=0.05)
|
r"""Calculate the precision given your multivariate Effective Sample Size.
Given that you obtained :math:`ESS` multivariate effective samples in your estimate you can calculate the
precision with which you approximated your desired confidence region.
This implements the inequality from Vats et al. (2016), slightly restructured to give :math:`\epsilon` back instead
of the minimum ESS.
.. math::
\epsilon = \sqrt{\frac{2^{2/p}\pi}{(p\Gamma(p/2))^{2/p}} \frac{\chi^{2}_{1-\alpha,p}}{\widehat{ESS}}}
Where :math:`p` is the number of free parameters and ESS is the multivariate ESS from your samples.
Args:
nmr_params (int): the number of free parameters in the model
multi_variate_ess (int): the number of iid samples you obtained in your sample results.
alpha (float): the level of confidence of the confidence region. For example, an alpha of 0.05 means
that we want to be in a 95% confidence region.
Returns:
float: the minimum multivariate Effective Sample Size that one should aim for in MCMC sample to
obtain the desired confidence region with the desired precision.
References:
Vats D, Flegal J, Jones G (2016). Multivariate Output Analysis for Markov Chain Monte Carlo.
arXiv:1512.07713v2 [math.ST]
| 3.502133
| 2.988564
| 1.171845
|
r
sample_means = np.mean(samples, axis=1, dtype=np.float64)
nmr_params, chain_length = samples.shape
nmr_batches = int(np.floor(chain_length / batch_size))
sigma = np.zeros((nmr_params, nmr_params))
nmr_offsets = chain_length - nmr_batches * batch_size + 1
for offset in range(nmr_offsets):
batches = np.reshape(samples[:, np.array(offset + np.arange(0, nmr_batches * batch_size), dtype=np.int)].T,
[batch_size, nmr_batches, nmr_params], order='F')
batch_means = np.squeeze(np.mean(batches, axis=0, dtype=np.float64))
Z = batch_means - sample_means
for x, y in itertools.product(range(nmr_params), range(nmr_params)):
sigma[x, y] += np.sum(Z[:, x] * Z[:, y])
return sigma * batch_size / (nmr_batches - 1) / nmr_offsets
|
def estimate_multivariate_ess_sigma(samples, batch_size)
|
r"""Calculates the Sigma matrix which is part of the multivariate ESS calculation.
This implementation is based on the Matlab implementation found at: https://github.com/lacerbi/multiESS
The Sigma matrix is defined as:
.. math::
\Sigma = \Lambda + 2 * \sum_{k=1}^{\infty}{Cov(Y_{1}, Y_{1+k})}
Where :math:`Y` are our samples and :math:`\Lambda` is the covariance matrix of the samples.
This implementation computes the :math:`\Sigma` matrix using a Batch Mean estimator using the given batch size.
The batch size has to be :math:`1 \le b_n \le n` and a typical value is either :math:`\lfloor n^{1/2} \rfloor`
for slow mixing chains or :math:`\lfloor n^{1/3} \rfloor` for reasonable mixing chains.
If the length of the chain is longer than the sum of the length of all the batches, this implementation
calculates :math:`\Sigma` for every offset and returns the average of those offsets.
Args:
samples (ndarray): the samples for which we compute the sigma matrix. Expects an (p, n) array with
p the number of parameters and n the sample size
batch_size (int): the batch size used in the approximation of the correlation covariance
Returns:
ndarray: an pxp array with p the number of parameters in the samples.
References:
Vats D, Flegal J, Jones G (2016). Multivariate Output Analysis for Markov Chain Monte Carlo.
arXiv:1512.07713v2 [math.ST]
| 2.830335
| 2.744475
| 1.031285
|
r
batch_size_generator = batch_size_generator or SquareRootSingleBatch()
batch_sizes = batch_size_generator.get_multivariate_ess_batch_sizes(*samples.shape)
nmr_params, chain_length = samples.shape
nmr_batches = len(batch_sizes)
det_lambda = det(np.cov(samples))
ess_estimates = np.zeros(nmr_batches)
sigma_estimates = np.zeros((nmr_params, nmr_params, nmr_batches))
for i in range(0, nmr_batches):
sigma = estimate_multivariate_ess_sigma(samples, int(batch_sizes[i]))
ess = chain_length * (det_lambda**(1.0 / nmr_params) / det(sigma)**(1.0 / nmr_params))
ess_estimates[i] = ess
sigma_estimates[..., i] = sigma
ess_estimates = np.nan_to_num(ess_estimates)
if nmr_batches > 1:
idx = np.argmin(ess_estimates)
else:
idx = 0
if full_output:
return ess_estimates[idx], sigma_estimates[..., idx], batch_sizes[idx]
return ess_estimates[idx]
|
def estimate_multivariate_ess(samples, batch_size_generator=None, full_output=False)
|
r"""Compute the multivariate Effective Sample Size of your (single instance set of) samples.
This multivariate ESS is defined in Vats et al. (2016) and is given by:
.. math::
ESS = n \bigg(\frac{|\Lambda|}{|\Sigma|}\bigg)^{1/p}
Where :math:`n` is the number of samples, :math:`p` the number of parameters, :math:`\Lambda` is the covariance
matrix of the parameters and :math:`\Sigma` captures the covariance structure in the target together with
the covariance due to correlated samples. :math:`\Sigma` is estimated using
:func:`estimate_multivariate_ess_sigma`.
In the case of NaN in any part of the computation the ESS is set to 0.
To compute the multivariate ESS for multiple problems, please use :func:`multivariate_ess`.
Args:
samples (ndarray): an pxn matrix with for p parameters and n samples.
batch_size_generator (MultiVariateESSBatchSizeGenerator): the batch size generator, tells us how many
batches and of which size we use for estimating the minimum ESS. Defaults to :class:`SquareRootSingleBatch`
full_output (boolean): set to True to return the estimated :math:`\Sigma` and the optimal batch size.
Returns:
float or tuple: when full_output is set to True we return a tuple with the estimated multivariate ESS,
the estimated :math:`\Sigma` matrix and the optimal batch size. When full_output is False (the default)
we only return the ESS.
References:
Vats D, Flegal J, Jones G (2016). Multivariate Output Analysis for Markov Chain Monte Carlo.
arXiv:1512.07713v2 [math.ST]
| 2.842838
| 2.451871
| 1.159456
|
batch_size_generator = batch_size_generator or SquareRootSingleBatch()
compute_method = compute_method or BatchMeansMCSE()
batch_sizes = batch_size_generator.get_univariate_ess_batch_sizes(len(chain))
return np.min(list(compute_method.compute_standard_error(chain, b) for b in batch_sizes))
|
def monte_carlo_standard_error(chain, batch_size_generator=None, compute_method=None)
|
Compute Monte Carlo standard errors for the expectations
This is a convenience function that calls the compute method for each batch size and returns the lowest ESS
over the used batch sizes.
Args:
chain (ndarray): the Markov chain
batch_size_generator (UniVariateESSBatchSizeGenerator): the method that generates that batch sizes
we will use. Per default it uses the :class:`SquareRootSingleBatch` method.
compute_method (ComputeMonteCarloStandardError): the method used to compute the standard error.
By default we will use the :class:`BatchMeansMCSE` method
| 5.586039
| 3.032486
| 1.842066
|
if len(samples.shape) == 1:
return np.mean(samples), np.std(samples, ddof=ddof)
return np.mean(samples, axis=1), np.std(samples, axis=1, ddof=ddof)
|
def fit_gaussian(samples, ddof=0)
|
Calculates the mean and the standard deviation of the given samples.
Args:
samples (ndarray): a one or two dimensional array. If one dimensional we calculate the fit using all
values. If two dimensional, we fit the Gaussian for every set of samples over the first dimension.
ddof (int): the difference degrees of freedom in the std calculation. See numpy.
| 1.700493
| 1.942277
| 0.875515
|
cl_func = SimpleCLFunction.from_string('''
void compute(global mot_float_type* samples,
global mot_float_type* means,
global mot_float_type* stds,
int nmr_samples,
int low,
int high){
double cos_mean = 0;
double sin_mean = 0;
double ang;
for(uint i = 0; i < nmr_samples; i++){
ang = (samples[i] - low)*2*M_PI / (high - low);
cos_mean += (cos(ang) - cos_mean) / (i + 1);
sin_mean += (sin(ang) - sin_mean) / (i + 1);
}
double R = hypot(cos_mean, sin_mean);
if(R > 1){
R = 1;
}
double stds = 1/2. * sqrt(-2 * log(R));
double res = atan2(sin_mean, cos_mean);
if(res < 0){
res += 2 * M_PI;
}
*(means) = res*(high - low)/2.0/M_PI + low;
*(stds) = ((high - low)/2.0/M_PI) * sqrt(-2*log(R));
}
''')
def run_cl(samples):
data = {'samples': Array(samples, 'mot_float_type'),
'means': Zeros(samples.shape[0], 'mot_float_type'),
'stds': Zeros(samples.shape[0], 'mot_float_type'),
'nmr_samples': Scalar(samples.shape[1]),
'low': Scalar(low),
'high': Scalar(high),
}
cl_func.evaluate(data, samples.shape[0])
return data['means'].get_data(), data['stds'].get_data()
if len(samples.shape) == 1:
mean, std = run_cl(samples[None, :])
return mean[0], std[0]
return run_cl(samples)
|
def fit_circular_gaussian(samples, high=np.pi, low=0)
|
Compute the circular mean for samples in a range
Args:
samples (ndarray): a one or two dimensional array. If one dimensional we calculate the fit using all
values. If two dimensional, we fit the Gaussian for every set of samples over the first dimension.
high (float): The maximum wrap point
low (float): The minimum wrap point
| 2.94824
| 2.974931
| 0.991028
|
if len(samples.shape) == 1:
return _TruncatedNormalFitter()((samples, lower_bounds, upper_bounds))
def item_generator():
for ind in range(samples.shape[0]):
if is_scalar(lower_bounds):
lower_bound = lower_bounds
else:
lower_bound = lower_bounds[ind]
if is_scalar(upper_bounds):
upper_bound = upper_bounds
else:
upper_bound = upper_bounds[ind]
yield (samples[ind], lower_bound, upper_bound)
results = np.array(multiprocess_mapping(_TruncatedNormalFitter(), item_generator()))
return results[:, 0], results[:, 1]
|
def fit_truncated_gaussian(samples, lower_bounds, upper_bounds)
|
Fits a truncated gaussian distribution on the given samples.
This will do a maximum likelihood estimation of a truncated Gaussian on the provided samples, with the
truncation points given by the lower and upper bounds.
Args:
samples (ndarray): a one or two dimensional array. If one dimensional we fit the truncated Gaussian on all
values. If two dimensional, we calculate the truncated Gaussian for every set of samples over the
first dimension.
lower_bounds (ndarray or float): the lower bound, either a scalar or a lower bound per problem (first index of
samples)
upper_bounds (ndarray or float): the upper bound, either a scalar or an upper bound per problem (first index of
samples)
Returns:
mean, std: the mean and std of the fitted truncated Gaussian
| 2.580837
| 2.701527
| 0.955325
|
if lower is None:
lower = -np.inf
if upper is None:
upper = np.inf
def point_iterator():
for ind in range(means_0.shape[0]):
yield np.squeeze(means_0[ind]), np.squeeze(stds_0[ind]), np.squeeze(means_1[ind]), np.squeeze(stds_1[ind])
return np.array(list(multiprocess_mapping(_ComputeGaussianOverlap(lower, upper), point_iterator())))
|
def gaussian_overlapping_coefficient(means_0, stds_0, means_1, stds_1, lower=None, upper=None)
|
Compute the overlapping coefficient of two Gaussian continuous_distributions.
This computes the :math:`\int_{-\infty}^{\infty}{\min(f(x), g(x))\partial x}` where
:math:`f \sim \mathcal{N}(\mu_0, \sigma_0^{2})` and :math:`f \sim \mathcal{N}(\mu_1, \sigma_1^{2})` are normally
distributed variables.
This will compute the overlap for each element in the first dimension.
Args:
means_0 (ndarray): the set of means of the first distribution
stds_0 (ndarray): the set of stds of the fist distribution
means_1 (ndarray): the set of means of the second distribution
stds_1 (ndarray): the set of stds of the second distribution
lower (float): the lower limit of the integration. If not set we set it to -inf.
upper (float): the upper limit of the integration. If not set we set it to +inf.
| 2.982049
| 3.247653
| 0.918217
|
r
mean_deviance = -2 * np.mean(ll_per_sample, axis=1)
deviance_at_mean = -2 * mean_posterior_lls
pd_2002 = mean_deviance - deviance_at_mean
pd_2004 = np.var(ll_per_sample, axis=1) / 2.0
return {'DIC_2002': np.nan_to_num(mean_deviance + pd_2002),
'DIC_2004': np.nan_to_num(mean_deviance + pd_2004),
'DIC_Ando_2011': np.nan_to_num(mean_deviance + 2 * pd_2002)}
|
def deviance_information_criterions(mean_posterior_lls, ll_per_sample)
|
r"""Calculates the Deviance Information Criteria (DIC) using three methods.
This returns a dictionary returning the ``DIC_2002``, the ``DIC_2004`` and the ``DIC_Ando_2011`` method.
The first is based on Spiegelhalter et al (2002), the second based on Gelman et al. (2004) and the last on
Ando (2011). All cases differ in how they calculate model complexity, i.e. the effective number of parameters
in the model. In all cases the model with the smallest DIC is preferred.
All these DIC methods measure fitness using the deviance, which is, for a likelihood :math:`p(y | \theta)`
defined as:
.. math::
D(\theta) = -2\log p(y|\theta)
From this, the posterior mean deviance,
.. math::
\bar{D} = \mathbb{E}_{\theta}[D(\theta)]
is then used as a measure of how well the model fits the data.
The complexity, or measure of effective number of parameters, can be measured in see ways, see
Spiegelhalter et al. (2002), Gelman et al (2004) and Ando (2011). The first method calculated the parameter
deviance as:
.. math::
:nowrap:
\begin{align}
p_{D} &= \mathbb{E}_{\theta}[D(\theta)] - D(\mathbb{E}[\theta)]) \\
&= \bar{D} - D(\bar{\theta})
\end{align}
i.e. posterior mean deviance minus the deviance evaluated at the posterior mean of the parameters.
The second method calculated :math:`p_{D}` as:
.. math::
p_{D} = p_{V} = \frac{1}{2}\hat{var}(D(\theta))
i.e. half the variance of the deviance is used as an estimate of the number of free parameters in the model.
The third method calculates the parameter deviance as:
.. math::
p_{D} = 2 \cdot (\bar{D} - D(\bar{\theta}))
That is, twice the complexity of that of the first method.
Finally, the DIC is (for all cases) defined as:
.. math::
DIC = \bar{D} + p_{D}
Args:
mean_posterior_lls (ndarray): a 1d matrix containing the log likelihood for the average posterior
point estimate. That is, the single log likelihood of the average parameters.
ll_per_sample (ndarray): a (d, n) array with for d problems the n log likelihoods.
This is the log likelihood per sample.
Returns:
dict: a dictionary containing the ``DIC_2002``, the ``DIC_2004`` and the ``DIC_Ando_2011`` information
criterion maps.
| 3.089195
| 2.357617
| 1.310304
|
mu = params[0]
sigma = params[1]
if sigma == 0:
return np.inf
ll = np.sum(norm.logpdf(data, mu, sigma))
ll -= len(data) * np.log((norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma)))
return -ll
|
def truncated_normal_log_likelihood(params, low, high, data)
|
Calculate the log likelihood of the truncated normal distribution.
Args:
params: tuple with (mean, std), the parameters under which we evaluate the model
low (float): the lower truncation bound
high (float): the upper truncation bound
data (ndarray): the one dimension list of data points for which we want to calculate the likelihood
Returns:
float: the negative log likelihood of observing the given data under the given parameters.
This is meant to be used in minimization routines.
| 2.705406
| 3.044348
| 0.888665
|
if params[1] == 0:
return np.array([np.inf, np.inf])
return np.array([_TruncatedNormalFitter.partial_derivative_mu(params[0], params[1], low, high, data),
_TruncatedNormalFitter.partial_derivative_sigma(params[0], params[1], low, high, data)])
|
def truncated_normal_ll_gradient(params, low, high, data)
|
Return the gradient of the log likelihood of the truncated normal at the given position.
Args:
params: tuple with (mean, std), the parameters under which we evaluate the model
low (float): the lower truncation bound
high (float): the upper truncation bound
data (ndarray): the one dimension list of data points for which we want to calculate the likelihood
Returns:
tuple: the gradient of the log likelihood given as a tuple with (mean, std)
| 3.038273
| 3.214513
| 0.945174
|
pd_mu = np.sum(data - mu) / sigma ** 2
pd_mu -= len(data) * ((norm.pdf(low, mu, sigma) - norm.pdf(high, mu, sigma))
/ (norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma)))
return -pd_mu
|
def partial_derivative_mu(mu, sigma, low, high, data)
|
The partial derivative with respect to the mean.
Args:
mu (float): the mean of the truncated normal
sigma (float): the std of the truncated normal
low (float): the lower truncation bound
high (float): the upper truncation bound
data (ndarray): the one dimension list of data points for which we want to calculate the likelihood
Returns:
float: the partial derivative evaluated at the given point
| 2.999682
| 3.657489
| 0.820148
|
pd_sigma = np.sum(-(1 / sigma) + ((data - mu) ** 2 / (sigma ** 3)))
pd_sigma -= len(data) * (((low - mu) * norm.pdf(low, mu, sigma) - (high - mu) * norm.pdf(high, mu, sigma))
/ (sigma * (norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma))))
return -pd_sigma
|
def partial_derivative_sigma(mu, sigma, low, high, data)
|
The partial derivative with respect to the standard deviation.
Args:
mu (float): the mean of the truncated normal
sigma (float): the std of the truncated normal
low (float): the lower truncation bound
high (float): the upper truncation bound
data (ndarray): the one dimension list of data points for which we want to calculate the likelihood
Returns:
float: the partial derivative evaluated at the given point
| 3.193369
| 3.649824
| 0.874938
|
return {
'nmsimplex_scratch': LocalMemory(
'mot_float_type', self._nmr_parameters * 2 + (self._nmr_parameters + 1) ** 2 + 1),
'initial_simplex_scale': LocalMemory('mot_float_type', self._nmr_parameters)
}
|
def get_kernel_data(self)
|
Get the kernel data needed for this optimization routine to work.
| 13.320233
| 10.907323
| 1.221219
|
return {
'subplex_scratch_float': LocalMemory(
'mot_float_type', 4 + self._var_replace_dict['NMR_PARAMS'] * 2
+ self._var_replace_dict['MAX_SUBSPACE_LENGTH'] * 2
+ (self._var_replace_dict['MAX_SUBSPACE_LENGTH'] * 2
+ self._var_replace_dict['MAX_SUBSPACE_LENGTH']+1)**2 + 1),
'subplex_scratch_int': LocalMemory(
'int', 2 + self._var_replace_dict['NMR_PARAMS']
+ (self._var_replace_dict['NMR_PARAMS'] // self._var_replace_dict['MIN_SUBSPACE_LENGTH'])),
'initial_simplex_scale': LocalMemory('mot_float_type', self._var_replace_dict['NMR_PARAMS'])
}
|
def get_kernel_data(self)
|
Get the kernel data needed for this optimization routine to work.
| 5.315953
| 4.902423
| 1.084352
|
return {
'scratch_mot_float_type': LocalMemory(
'mot_float_type', 8 +
2 * self._var_replace_dict['NMR_OBSERVATIONS'] +
5 * self._var_replace_dict['NMR_PARAMS'] +
self._var_replace_dict['NMR_PARAMS'] * self._var_replace_dict['NMR_OBSERVATIONS']),
'scratch_int': LocalMemory('int', self._var_replace_dict['NMR_PARAMS'])
}
|
def get_kernel_data(self)
|
Get the kernel data needed for this optimization routine to work.
| 6.706178
| 6.133766
| 1.093321
|
elements = []
for value in bounds:
if all_elements_equal(value):
elements.append(Scalar(get_single_value(value), ctype='mot_float_type'))
else:
elements.append(Array(value, ctype='mot_float_type', as_scalar=True))
return CompositeArray(elements, 'mot_float_type', address_space='local')
|
def _bounds_to_array(bounds)
|
Create a CompositeArray to hold the bounds.
| 6.873764
| 5.689745
| 1.208097
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.