sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def half_mag_amplitude_ratio(self, mag, avg, weight):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
lower_weight = weight[index]
lower_weight_sum = np.sum(lower_weight)
lower_mag = mag[index]
lower_weighted_std = np.sum((lower_mag
- avg) ** 2 * lower_weight) / \
lower_weight_sum
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
higher_weight = weight[index]
higher_weight_sum = np.sum(higher_weight)
higher_mag = mag[index]
higher_weighted_std = np.sum((higher_mag
- avg) ** 2 * higher_weight) / \
higher_weight_sum
# Return ratio.
return np.sqrt(lower_weighted_std / higher_weighted_std)
|
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
weight : array_like
An array of weight.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
|
entailment
|
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum)
|
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
|
entailment
|
def get_eta(self, mag, std):
"""
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
"""
diff = mag[1:] - mag[:len(mag) - 1]
eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std
return eta
|
Return Eta feature.
Parameters
----------
mag : array_like
An array of magnitudes.
std : array_like
A standard deviation of magnitudes.
Returns
-------
eta : float
The value of Eta index.
|
entailment
|
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
"""
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90
|
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
|
entailment
|
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
"""
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c)
|
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
|
entailment
|
def get_features2(self):
"""
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
"""
feature_names = []
feature_values = []
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
# Omit input variables such as date, mag, err, etc.
if not (name == 'date' or name == 'mag' or name == 'err'
or name == 'n_threads' or name == 'min_period'):
# Filter some other unnecessary features.
if not (name == 'f' or name == 'f_phase'
or name == 'period_log10FAP'
or name == 'weight' or name == 'weighted_sum'
or name == 'median' or name == 'mean' or name == 'std'):
feature_names.append(name)
# Sort by the names.
# Sorting should be done to keep maintaining the same order of features.
feature_names.sort()
# Get feature values.
for name in feature_names:
feature_values.append(all_vars[name])
return feature_names, feature_values
|
Return all features with its names.
Returns
-------
names : list
Feature names.
values : list
Feature values
|
entailment
|
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
|
entailment
|
def init(device_id=None, random_seed=None):
"""Initialize Hebel.
This function creates a CUDA context, CUBLAS context and
initializes and seeds the pseudo-random number generator.
**Parameters:**
device_id : integer, optional
The ID of the GPU device to use. If this is omitted, PyCUDA's
default context is used, which by default uses the fastest
available device on the system. Alternatively, you can put the
device id in the environment variable ``CUDA_DEVICE`` or into
the file ``.cuda-device`` in the user's home directory.
random_seed : integer, optional
The seed to use for the pseudo-random number generator. If
this is omitted, the seed is taken from the environment
variable ``RANDOM_SEED`` and if that is not defined, a random
integer is used as a seed.
"""
if device_id is None:
random_seed = _os.environ.get('CUDA_DEVICE')
if random_seed is None:
random_seed = _os.environ.get('RANDOM_SEED')
global is_initialized
if not is_initialized:
is_initialized = True
global context
context.init_context(device_id)
from pycuda import gpuarray, driver, curandom
# Initialize memory pool
global memory_pool
memory_pool.init()
# Initialize PRG
global sampler
sampler.set_seed(random_seed)
# Initialize pycuda_ops
from hebel import pycuda_ops
pycuda_ops.init()
|
Initialize Hebel.
This function creates a CUDA context, CUBLAS context and
initializes and seeds the pseudo-random number generator.
**Parameters:**
device_id : integer, optional
The ID of the GPU device to use. If this is omitted, PyCUDA's
default context is used, which by default uses the fastest
available device on the system. Alternatively, you can put the
device id in the environment variable ``CUDA_DEVICE`` or into
the file ``.cuda-device`` in the user's home directory.
random_seed : integer, optional
The seed to use for the pseudo-random number generator. If
this is omitted, the seed is taken from the environment
variable ``RANDOM_SEED`` and if that is not defined, a random
integer is used as a seed.
|
entailment
|
def inflate_context_tuple(ast_rootpath, root_env):
"""Instantiate a Tuple from a TupleNode.
Walking the AST tree upwards, evaluate from the root down again.
"""
with util.LogTime('inflate_context_tuple'):
# We only need to look at tuple members going down.
inflated = ast_rootpath[0].eval(root_env)
current = inflated
env = root_env
try:
for node in ast_rootpath[1:]:
if is_tuple_member_node(node):
assert framework.is_tuple(current)
with util.LogTime('into tuple'):
thunk, env = inflated.get_thunk_env(node.name)
current = framework.eval(thunk, env)
elif framework.is_list(current):
with util.LogTime('eval thing'):
current = framework.eval(node, env)
if framework.is_tuple(current):
inflated = current
except (gcl.EvaluationError, ast.UnparseableAccess):
# Eat evaluation error, probably means the rightmost tuplemember wasn't complete.
# Return what we have so far.
pass
return inflated
|
Instantiate a Tuple from a TupleNode.
Walking the AST tree upwards, evaluate from the root down again.
|
entailment
|
def enumerate_scope(ast_rootpath, root_env=None, include_default_builtins=False):
"""Return a dict of { name => Completions } for the given tuple node.
Enumerates all keys that are in scope in a given tuple. The node
part of the tuple may be None, in case the binding is a built-in.
"""
with util.LogTime('enumerate_scope'):
scope = {}
for node in reversed(ast_rootpath):
if is_tuple_node(node):
for member in node.members:
if member.name not in scope:
scope[member.name] = Completion(member.name, False, member.comment.as_string(), member.location)
if include_default_builtins: # Backwards compat flag
root_env = gcl.default_env
if root_env:
for k in root_env.keys():
if k not in scope and not hide_from_autocomplete(root_env[k]):
v = root_env[k]
scope[k] = Completion(k, True, dedent(v.__doc__ or ''), None)
return scope
|
Return a dict of { name => Completions } for the given tuple node.
Enumerates all keys that are in scope in a given tuple. The node
part of the tuple may be None, in case the binding is a built-in.
|
entailment
|
def find_deref_completions(ast_rootpath, root_env=gcl.default_env):
"""Returns a dict of { name => Completions }."""
with util.LogTime('find_deref_completions'):
tup = inflate_context_tuple(ast_rootpath, root_env)
path = path_until(ast_rootpath, is_deref_node)
if not path:
return {}
deref = path[-1]
haystack = deref.haystack(tup.env(tup))
if not hasattr(haystack, 'keys'):
return {}
return {n: get_completion(haystack, n) for n in haystack.keys()}
|
Returns a dict of { name => Completions }.
|
entailment
|
def is_identifier_position(rootpath):
"""Return whether the cursor is in identifier-position in a member declaration."""
if len(rootpath) >= 2 and is_tuple_member_node(rootpath[-2]) and is_identifier(rootpath[-1]):
return True
if len(rootpath) >= 1 and is_tuple_node(rootpath[-1]):
# No deeper node than tuple? Must be identifier position, otherwise we'd have a TupleMemberNode.
return True
return False
|
Return whether the cursor is in identifier-position in a member declaration.
|
entailment
|
def find_completions_at_cursor(ast_tree, filename, line, col, root_env=gcl.default_env):
"""Find completions at the cursor.
Return a dict of { name => Completion } objects.
"""
q = gcl.SourceQuery(filename, line, col - 1)
rootpath = ast_tree.find_tokens(q)
if is_identifier_position(rootpath):
return find_inherited_key_completions(rootpath, root_env)
try:
ret = find_deref_completions(rootpath, root_env) or enumerate_scope(rootpath, root_env=root_env)
assert isinstance(ret, dict)
return ret
except gcl.EvaluationError:
# Probably an unbound value or something--just return an empty list
return {}
|
Find completions at the cursor.
Return a dict of { name => Completion } objects.
|
entailment
|
def find_inherited_key_completions(rootpath, root_env):
"""Return completion keys from INHERITED tuples.
Easiest way to get those is to evaluate the tuple, check if it is a CompositeTuple,
then enumerate the keys that are NOT in the rightmost tuple.
"""
tup = inflate_context_tuple(rootpath, root_env)
if isinstance(tup, runtime.CompositeTuple):
keys = set(k for t in tup.tuples[:-1] for k in t.keys())
return {n: get_completion(tup, n) for n in keys}
return {}
|
Return completion keys from INHERITED tuples.
Easiest way to get those is to evaluate the tuple, check if it is a CompositeTuple,
then enumerate the keys that are NOT in the rightmost tuple.
|
entailment
|
def find_value_at_cursor(ast_tree, filename, line, col, root_env=gcl.default_env):
"""Find the value of the object under the cursor."""
q = gcl.SourceQuery(filename, line, col)
rootpath = ast_tree.find_tokens(q)
rootpath = path_until(rootpath, is_thunk)
if len(rootpath) <= 1:
# Just the file tuple itself, or some non-thunk element at the top level
return None
tup = inflate_context_tuple(rootpath, root_env)
try:
if isinstance(rootpath[-1], ast.Inherit):
# Special case handling of 'Inherit' nodes, show the value that's being
# inherited.
return tup[rootpath[-1].name]
return rootpath[-1].eval(tup.env(tup))
except gcl.EvaluationError as e:
return e
|
Find the value of the object under the cursor.
|
entailment
|
def add_vec_to_mat(mat, vec, axis=None, inplace=False,
target=None, substract=False):
""" Add a vector to a matrix
"""
assert mat.flags.c_contiguous
if axis is None:
if vec.shape[0] == mat.shape[0]:
axis = 0
elif vec.shape[0] == mat.shape[1]:
axis = 1
else:
raise ValueError('Vector length must be equal '
'to one side of the matrix')
n, m = mat.shape
block = (_compilation_constants['add_vec_block_size'],
_compilation_constants['add_vec_block_size'], 1)
gridx = ceil_div(n, block[0])
gridy = ceil_div(m, block[1])
grid = (gridx, gridy, 1)
if inplace:
target = mat
elif target is None:
target = gpuarray.empty_like(mat)
if axis == 0:
assert vec.shape[0] == mat.shape[0]
add_col_vec_kernel.prepared_call(
grid, block,
mat.gpudata,
vec.gpudata,
target.gpudata,
np.uint32(n),
np.uint32(m),
np.int32(substract))
elif axis == 1:
assert vec.shape[0] == mat.shape[1]
add_row_vec_kernel.prepared_call(
grid, block,
mat.gpudata,
vec.gpudata,
target.gpudata,
np.uint32(n),
np.uint32(m),
np.int32(substract))
return target
|
Add a vector to a matrix
|
entailment
|
def vector_normalize(mat, max_vec_norm=1.):
""" Normalize each column vector in mat to length
max_vec_norm if it is longer than max_vec_norm
"""
assert mat.flags.c_contiguous
n, m = mat.shape
vector_normalize_kernel.prepared_call(
(m, 1, 1), (32, 1, 1),
mat.gpudata,
np.float32(max_vec_norm),
np.int32(m),
np.int32(n))
|
Normalize each column vector in mat to length
max_vec_norm if it is longer than max_vec_norm
|
entailment
|
def preprocess(string):
"""
Preprocesses a string, by replacing ${VARNAME} with
os.environ['VARNAME']
Parameters
----------
string: the str object to preprocess
Returns
-------
the preprocessed string
"""
split = string.split('${')
rval = [split[0]]
for candidate in split[1:]:
subsplit = candidate.split('}')
if len(subsplit) < 2:
raise ValueError('Open ${ not followed by } before ' \
+ 'end of string or next ${ in "' \
+ string + '"')
varname = subsplit[0]
if varname == 'PYLEARN2_TRAIN_FILE_NAME':
warnings.warn("PYLEARN2_TRAIN_FILE_NAME is deprecated and may be "
"removed from the library on or after Oct 22, 2013. Switch"
" to PYLEARN2_TRAIN_FILE_FULL_STEM")
try:
val = os.environ[varname]
except KeyError:
if varname == 'PYLEARN2_DATA_PATH':
raise NoDataPathError()
if varname == 'PYLEARN2_VIEWER_COMMAND':
raise EnvironmentVariableError(environment_variable_essay)
raise ValueError('Unrecognized environment variable "' + varname
+ '". Did you mean ' + match(varname, os.environ.keys())
+ '?')
rval.append(val)
rval.append('}'.join(subsplit[1:]))
rval = ''.join(rval)
return rval
|
Preprocesses a string, by replacing ${VARNAME} with
os.environ['VARNAME']
Parameters
----------
string: the str object to preprocess
Returns
-------
the preprocessed string
|
entailment
|
def tokenize_by_number(s):
""" splits a string into a list of tokens
each is either a string containing no numbers
or a float """
r = find_number(s)
if r == None:
return [ s ]
else:
tokens = []
if r[0] > 0:
tokens.append(s[0:r[0]])
tokens.append( float(s[r[0]:r[1]]) )
if r[1] < len(s):
tokens.extend(tokenize_by_number(s[r[1]:]))
return tokens
assert False
|
splits a string into a list of tokens
each is either a string containing no numbers
or a float
|
entailment
|
def number_aware_alphabetical_cmp(str1, str2):
""" cmp function for sorting a list of strings by alphabetical order, but with
numbers sorted numerically.
i.e., foo1, foo2, foo10, foo11
instead of foo1, foo10
"""
def flatten_tokens(tokens):
l = []
for token in tokens:
if isinstance(token, str):
for char in token:
l.append(char)
else:
assert isinstance(token, float)
l.append(token)
return l
seq1 = flatten_tokens(tokenize_by_number(str1))
seq2 = flatten_tokens(tokenize_by_number(str2))
l = min(len(seq1),len(seq2))
i = 0
while i < l:
if seq1[i] < seq2[i]:
return -1
elif seq1[i] > seq2[i]:
return 1
i += 1
if len(seq1) < len(seq2):
return -1
elif len(seq1) > len(seq2):
return 1
return 0
|
cmp function for sorting a list of strings by alphabetical order, but with
numbers sorted numerically.
i.e., foo1, foo2, foo10, foo11
instead of foo1, foo10
|
entailment
|
def match(wrong, candidates):
"""
wrong: a mispelling
candidates: a set of correct words
returns a guess of which candidate is the right one
This should be used with a small number of candidates and a high potential
edit distance.
ie, use it to correct a wrong filename in a directory, wrong class name
in a module, etc. Don't use it to correct small typos of freeform natural
language words.
"""
assert len(candidates) > 0
# Current implementation tries all candidates and outputs the one
# with the min score
# Could try to do something smarter
def score(w1,w2):
# Current implementation returns negative dot product of
# the two words mapped into a feature space by mapping phi
# w -> [ phi(w1), .1 phi(first letter of w), .1 phi(last letter of w) ]
# Could try to do something smarter
w1 = w1.lower()
w2 = w2.lower()
def phi(w):
# Current feature mapping is to the vector of counts of
# all letters and two-letter sequences
# Could try to do something smarter
rval = {}
for i in xrange(len(w)):
l = w[i]
rval[l] = rval.get(l,0.) + 1.
if i < len(w)-1:
b = w[i:i+2]
rval[b] = rval.get(b,0.) + 1.
return rval
d1 = phi(w1)
d2 = phi(w2)
def mul(d1, d2):
rval = 0
for key in set(d1).union(d2):
rval += d1.get(key,0) * d2.get(key,0)
return rval
tot_score = mul(phi(w1),phi(w2)) / float(len(w1)*len(w2)) + \
0.1 * mul(phi(w1[0:1]), phi(w2[0:1])) + \
0.1 * mul(phi(w1[-1:]), phi(w2[-1:]))
return tot_score
scored_candidates = [ (-score(wrong, candidate), candidate)
for candidate in candidates ]
scored_candidates.sort()
return scored_candidates[0][1]
|
wrong: a mispelling
candidates: a set of correct words
returns a guess of which candidate is the right one
This should be used with a small number of candidates and a high potential
edit distance.
ie, use it to correct a wrong filename in a directory, wrong class name
in a module, etc. Don't use it to correct small typos of freeform natural
language words.
|
entailment
|
def censor_non_alphanum(s):
"""
Returns s with all non-alphanumeric characters replaced with *
"""
def censor(ch):
if (ch >= 'A' and ch <= 'z') or (ch >= '0' and ch <= '9'):
return ch
return '*'
return ''.join([censor(ch) for ch in s])
|
Returns s with all non-alphanumeric characters replaced with *
|
entailment
|
def is_period_alias(period):
"""
Check if a given period is possibly an alias.
Parameters
----------
period : float
A period to test if it is a possible alias or not.
Returns
-------
is_alias : boolean
True if the given period is in a range of period alias.
"""
# Based on the period vs periodSN plot of EROS-2 dataset (Kim+ 2014).
# Period alias occurs mostly at ~1 and ~30.
# Check each 1, 2, 3, 4, 5 factors.
for i in range(1, 6):
# One-day and one-month alias
if (.99 / float(i)) < period < (1.004 / float(i)):
return True
if (1.03 / float(i)) < period < (1.04 / float(i)):
return True
if (29.2 / float(i)) < period < (29.9 / float(i)):
return True
# From candidates from the two fields 01, 08.
# All of them are close to one day (or sidereal) alias.
if (0.96465 / float(i)) < period < (0.96485 / float(i)):
return True
if (0.96725 / float(i)) < period < (0.96745 / float(i)):
return True
if (0.98190 / float(i)) < period < (0.98230 / float(i)):
return True
if (1.01034 / float(i)) < period < (1.01076 / float(i)):
return True
if (1.01568 / float(i)) < period < (1.01604 / float(i)):
return True
if (1.01718 / float(i)) < period < (1.01742 / float(i)):
return True
# From the all candidates from the entire LMC fields.
# Some of these could be overlapped with the above cuts.
if (0.50776 / float(i)) < period < (0.50861 / float(i)):
return True
if (0.96434 / float(i)) < period < (0.9652 / float(i)):
return True
if (0.96688 / float(i)) < period < (0.96731 / float(i)):
return True
if (1.0722 / float(i)) < period < (1.0729 / float(i)):
return True
if (27.1 / float(i)) < period < (27.5 / float(i)):
return True
# Not in the range of any alias.
return False
|
Check if a given period is possibly an alias.
Parameters
----------
period : float
A period to test if it is a possible alias or not.
Returns
-------
is_alias : boolean
True if the given period is in a range of period alias.
|
entailment
|
def save(filepath, obj, on_overwrite = 'ignore'):
"""
Serialize `object` to a file denoted by `filepath`.
Parameters
----------
filepath : str
A filename. If the suffix is `.joblib` and joblib can be
imported, `joblib.dump` is used in place of the regular
pickling mechanisms; this results in much faster saves by
saving arrays as separate .npy files on disk. If the file
suffix is `.npy` than `numpy.save` is attempted on `obj`.
Otherwise, (c)pickle is used.
obj : object
A Python object to be serialized.
on_overwrite: A string specifying what to do if the file already
exists.
ignore: just overwrite it
backup: make a copy of the file (<filepath>.bak) and
delete it when done saving the new copy.
this allows recovery of the old version of
the file if saving the new one fails
"""
filepath = preprocess(filepath)
if os.path.exists(filepath):
if on_overwrite == 'backup':
backup = filepath + '.bak'
shutil.move(filepath, backup)
save(filepath, obj)
try:
os.remove(backup)
except Exception, e:
warnings.warn("Got an error while traing to remove "+backup+":"+str(e))
return
else:
assert on_overwrite == 'ignore'
try:
_save(filepath, obj)
except RuntimeError, e:
""" Sometimes for large theano graphs, pickle/cPickle exceed the
maximum recursion depth. This seems to me like a fundamental
design flaw in pickle/cPickle. The workaround I employ here
is the one recommended to someone who had a similar problem
on stackexchange:
http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle
Obviously this does not scale and could cause a crash
but I don't see another solution short of writing our
own implementation of pickle.
"""
if str(e).find('recursion') != -1:
warnings.warn('pylearn2.utils.save encountered the following '
'error: ' + str(e) +
'\nAttempting to resolve this error by calling ' +
'sys.setrecusionlimit and retrying')
old_limit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(50000)
_save(filepath, obj)
finally:
sys.setrecursionlimit(old_limit)
|
Serialize `object` to a file denoted by `filepath`.
Parameters
----------
filepath : str
A filename. If the suffix is `.joblib` and joblib can be
imported, `joblib.dump` is used in place of the regular
pickling mechanisms; this results in much faster saves by
saving arrays as separate .npy files on disk. If the file
suffix is `.npy` than `numpy.save` is attempted on `obj`.
Otherwise, (c)pickle is used.
obj : object
A Python object to be serialized.
on_overwrite: A string specifying what to do if the file already
exists.
ignore: just overwrite it
backup: make a copy of the file (<filepath>.bak) and
delete it when done saving the new copy.
this allows recovery of the old version of
the file if saving the new one fails
|
entailment
|
def get_pickle_protocol():
"""
Allow configuration of the pickle protocol on a per-machine basis.
This way, if you use multiple platforms with different versions of
pickle, you can configure each of them to use the highest protocol
supported by all of the machines that you want to be able to
communicate.
"""
try:
protocol_str = os.environ['PYLEARN2_PICKLE_PROTOCOL']
except KeyError:
# If not defined, we default to 0 because this is the default
# protocol used by cPickle.dump (and because it results in
# maximum portability)
protocol_str = '0'
if protocol_str == 'pickle.HIGHEST_PROTOCOL':
return pickle.HIGHEST_PROTOCOL
return int(protocol_str)
|
Allow configuration of the pickle protocol on a per-machine basis.
This way, if you use multiple platforms with different versions of
pickle, you can configure each of them to use the highest protocol
supported by all of the machines that you want to be able to
communicate.
|
entailment
|
def load_train_file(config_file_path):
"""Loads and parses a yaml file for a Train object.
Publishes the relevant training environment variables"""
from pylearn2.config import yaml_parse
suffix_to_strip = '.yaml'
# publish environment variables related to file name
if config_file_path.endswith(suffix_to_strip):
config_file_full_stem = config_file_path[0:-len(suffix_to_strip)]
else:
config_file_full_stem = config_file_path
for varname in ["PYLEARN2_TRAIN_FILE_NAME", #this one is deprecated
"PYLEARN2_TRAIN_FILE_FULL_STEM"]: #this is the new, accepted name
environ.putenv(varname, config_file_full_stem)
directory = config_file_path.split('/')[:-1]
directory = '/'.join(directory)
if directory != '':
directory += '/'
environ.putenv("PYLEARN2_TRAIN_DIR", directory)
environ.putenv("PYLEARN2_TRAIN_BASE_NAME", config_file_path.split('/')[-1] )
environ.putenv("PYLEARN2_TRAIN_FILE_STEM", config_file_full_stem.split('/')[-1] )
return yaml_parse.load_path(config_file_path)
|
Loads and parses a yaml file for a Train object.
Publishes the relevant training environment variables
|
entailment
|
def feed_forward(self, input_data, prediction=False):
"""Propagate forward through the layer
**Parameters:**
input_data : ``GPUArray``
Inpute data to perform dropout on.
prediction : bool, optional
Whether to use prediction model. If true, then the data is
scaled by ``1 - dropout_probability`` uses dropout.
**Returns:**
dropout_data : ``GPUArray``
The data after performing dropout.
"""
if input_data.shape[1] != self.n_in:
raise ValueError('Number of outputs from previous layer (%d) '
'does not match number of inputs to this layer (%d)' %
(input_data.shape[1], self.n_in))
if not prediction:
dropout_input = gpuarray.empty_like(input_data)
dropout_mask = sample_dropout_mask(input_data,
self.dropout_probability, target=dropout_input
)
return dropout_input, dropout_mask
else:
return (input_data * (1 - self.dropout_probability),)
|
Propagate forward through the layer
**Parameters:**
input_data : ``GPUArray``
Inpute data to perform dropout on.
prediction : bool, optional
Whether to use prediction model. If true, then the data is
scaled by ``1 - dropout_probability`` uses dropout.
**Returns:**
dropout_data : ``GPUArray``
The data after performing dropout.
|
entailment
|
def backprop(self, input_data, df_output, cache=None):
""" Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Inpute data to perform dropout on.
df_output : ``GPUArray``
Gradients with respect to the output of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : empty tuple
Gradients are empty since this layer has no parameters.
df_input : ``GPUArray``
Gradients with respect to the input.
"""
if self.compute_input_gradients:
apply_dropout_mask(df_output, dropout_mask)
return tuple(), df_output
|
Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Inpute data to perform dropout on.
df_output : ``GPUArray``
Gradients with respect to the output of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : empty tuple
Gradients are empty since this layer has no parameters.
df_input : ``GPUArray``
Gradients with respect to the input.
|
entailment
|
def POINTER(obj):
"""
Create ctypes pointer to object.
Notes
-----
This function converts None to a real NULL pointer because of bug
in how ctypes handles None on 64-bit platforms.
"""
p = ctypes.POINTER(obj)
if not isinstance(p.from_param, classmethod):
def from_param(cls, x):
if x is None:
return cls()
else:
return x
p.from_param = classmethod(from_param)
return p
|
Create ctypes pointer to object.
Notes
-----
This function converts None to a real NULL pointer because of bug
in how ctypes handles None on 64-bit platforms.
|
entailment
|
def gpuarray_ptr(g):
"""
Return ctypes pointer to data in GPUAarray object.
"""
addr = int(g.gpudata)
if g.dtype == np.int8:
return ctypes.cast(addr, POINTER(ctypes.c_byte))
if g.dtype == np.uint8:
return ctypes.cast(addr, POINTER(ctypes.c_ubyte))
if g.dtype == np.int16:
return ctypes.cast(addr, POINTER(ctypes.c_short))
if g.dtype == np.uint16:
return ctypes.cast(addr, POINTER(ctypes.c_ushort))
if g.dtype == np.int32:
return ctypes.cast(addr, POINTER(ctypes.c_int))
if g.dtype == np.uint32:
return ctypes.cast(addr, POINTER(ctypes.c_uint))
if g.dtype == np.int64:
return ctypes.cast(addr, POINTER(ctypes.c_long))
if g.dtype == np.uint64:
return ctypes.cast(addr, POINTER(ctypes.c_ulong))
if g.dtype == np.float32:
return ctypes.cast(addr, POINTER(ctypes.c_float))
elif g.dtype == np.float64:
return ctypes.cast(addr, POINTER(ctypes.c_double))
elif g.dtype == np.complex64:
return ctypes.cast(addr, POINTER(cuFloatComplex))
elif g.dtype == np.complex128:
return ctypes.cast(addr, POINTER(cuDoubleComplex))
else:
raise ValueError('unrecognized type')
|
Return ctypes pointer to data in GPUAarray object.
|
entailment
|
def cudaMalloc(count, ctype=None):
"""
Allocate device memory.
Allocate memory on the device associated with the current active
context.
Parameters
----------
count : int
Number of bytes of memory to allocate
ctype : _ctypes.SimpleType, optional
ctypes type to cast returned pointer.
Returns
-------
ptr : ctypes pointer
Pointer to allocated device memory.
"""
ptr = ctypes.c_void_p()
status = _libcudart.cudaMalloc(ctypes.byref(ptr), count)
cudaCheckStatus(status)
if ctype != None:
ptr = ctypes.cast(ptr, ctypes.POINTER(ctype))
return ptr
|
Allocate device memory.
Allocate memory on the device associated with the current active
context.
Parameters
----------
count : int
Number of bytes of memory to allocate
ctype : _ctypes.SimpleType, optional
ctypes type to cast returned pointer.
Returns
-------
ptr : ctypes pointer
Pointer to allocated device memory.
|
entailment
|
def cudaMallocPitch(pitch, rows, cols, elesize):
"""
Allocate pitched device memory.
Allocate pitched memory on the device associated with the current active
context.
Parameters
----------
pitch : int
Pitch for allocation.
rows : int
Requested pitched allocation height.
cols : int
Requested pitched allocation width.
elesize : int
Size of memory element.
Returns
-------
ptr : ctypes pointer
Pointer to allocated device memory.
"""
ptr = ctypes.c_void_p()
status = _libcudart.cudaMallocPitch(ctypes.byref(ptr),
ctypes.c_size_t(pitch), cols*elesize,
rows)
cudaCheckStatus(status)
return ptr, pitch
|
Allocate pitched device memory.
Allocate pitched memory on the device associated with the current active
context.
Parameters
----------
pitch : int
Pitch for allocation.
rows : int
Requested pitched allocation height.
cols : int
Requested pitched allocation width.
elesize : int
Size of memory element.
Returns
-------
ptr : ctypes pointer
Pointer to allocated device memory.
|
entailment
|
def cudaMemcpy_htod(dst, src, count):
"""
Copy memory from host to device.
Copy data from host memory to device memory.
Parameters
----------
dst : ctypes pointer
Device memory pointer.
src : ctypes pointer
Host memory pointer.
count : int
Number of bytes to copy.
"""
status = _libcudart.cudaMemcpy(dst, src,
ctypes.c_size_t(count),
cudaMemcpyHostToDevice)
cudaCheckStatus(status)
|
Copy memory from host to device.
Copy data from host memory to device memory.
Parameters
----------
dst : ctypes pointer
Device memory pointer.
src : ctypes pointer
Host memory pointer.
count : int
Number of bytes to copy.
|
entailment
|
def cudaMemcpy_dtoh(dst, src, count):
"""
Copy memory from device to host.
Copy data from device memory to host memory.
Parameters
----------
dst : ctypes pointer
Host memory pointer.
src : ctypes pointer
Device memory pointer.
count : int
Number of bytes to copy.
"""
status = _libcudart.cudaMemcpy(dst, src,
ctypes.c_size_t(count),
cudaMemcpyDeviceToHost)
cudaCheckStatus(status)
|
Copy memory from device to host.
Copy data from device memory to host memory.
Parameters
----------
dst : ctypes pointer
Host memory pointer.
src : ctypes pointer
Device memory pointer.
count : int
Number of bytes to copy.
|
entailment
|
def cudaMemGetInfo():
"""
Return the amount of free and total device memory.
Returns
-------
free : long
Free memory in bytes.
total : long
Total memory in bytes.
"""
free = ctypes.c_size_t()
total = ctypes.c_size_t()
status = _libcudart.cudaMemGetInfo(ctypes.byref(free),
ctypes.byref(total))
cudaCheckStatus(status)
return free.value, total.value
|
Return the amount of free and total device memory.
Returns
-------
free : long
Free memory in bytes.
total : long
Total memory in bytes.
|
entailment
|
def cudaGetDevice():
"""
Get current CUDA device.
Return the identifying number of the device currently used to
process CUDA operations.
Returns
-------
dev : int
Device number.
"""
dev = ctypes.c_int()
status = _libcudart.cudaGetDevice(ctypes.byref(dev))
cudaCheckStatus(status)
return dev.value
|
Get current CUDA device.
Return the identifying number of the device currently used to
process CUDA operations.
Returns
-------
dev : int
Device number.
|
entailment
|
def cudaDriverGetVersion():
"""
Get installed CUDA driver version.
Return the version of the installed CUDA driver as an integer. If
no driver is detected, 0 is returned.
Returns
-------
version : int
Driver version.
"""
version = ctypes.c_int()
status = _libcudart.cudaDriverGetVersion(ctypes.byref(version))
cudaCheckStatus(status)
return version.value
|
Get installed CUDA driver version.
Return the version of the installed CUDA driver as an integer. If
no driver is detected, 0 is returned.
Returns
-------
version : int
Driver version.
|
entailment
|
def cudaPointerGetAttributes(ptr):
"""
Get memory pointer attributes.
Returns attributes of the specified pointer.
Parameters
----------
ptr : ctypes pointer
Memory pointer to examine.
Returns
-------
memory_type : int
Memory type; 1 indicates host memory, 2 indicates device
memory.
device : int
Number of device associated with pointer.
Notes
-----
This function only works with CUDA 4.0 and later.
"""
attributes = cudaPointerAttributes()
status = \
_libcudart.cudaPointerGetAttributes(ctypes.byref(attributes), ptr)
cudaCheckStatus(status)
return attributes.memoryType, attributes.device
|
Get memory pointer attributes.
Returns attributes of the specified pointer.
Parameters
----------
ptr : ctypes pointer
Memory pointer to examine.
Returns
-------
memory_type : int
Memory type; 1 indicates host memory, 2 indicates device
memory.
device : int
Number of device associated with pointer.
Notes
-----
This function only works with CUDA 4.0 and later.
|
entailment
|
def eval(thunk, env):
"""Evaluate a thunk in an environment.
Will defer the actual evaluation to the thunk itself, but adds two things:
caching and recursion detection.
Since we have to use a global evaluation stack (because there is a variety of functions that may
be invoked, not just eval() but also __getitem__, and not all of them can pass along a context
object), GCL evaluation is not thread safe.
With regard to schemas:
- A schema can be passed in from outside. The returned object will be validated to see that it
conforms to the schema. The schema will be attached to the value if possible.
- Some objects may contain their own schema, such as tuples. This would be out of scope of the
eval() function, were it not for:
- Schema validation can be disabled in an evaluation call stack. This is useful if we're
evaluating a tuple only for its schema information. At that point, we're not interested if the
object is value-complete.
"""
key = Activation.key(thunk, env)
if Activation.activated(key):
raise exceptions.RecursionError('Reference cycle')
with Activation(key):
return eval_cache.get(key, thunk.eval, env)
|
Evaluate a thunk in an environment.
Will defer the actual evaluation to the thunk itself, but adds two things:
caching and recursion detection.
Since we have to use a global evaluation stack (because there is a variety of functions that may
be invoked, not just eval() but also __getitem__, and not all of them can pass along a context
object), GCL evaluation is not thread safe.
With regard to schemas:
- A schema can be passed in from outside. The returned object will be validated to see that it
conforms to the schema. The schema will be attached to the value if possible.
- Some objects may contain their own schema, such as tuples. This would be out of scope of the
eval() function, were it not for:
- Schema validation can be disabled in an evaluation call stack. This is useful if we're
evaluating a tuple only for its schema information. At that point, we're not interested if the
object is value-complete.
|
entailment
|
def get_node(self, key):
"""Delegate to our current "value provider" for the node belonging to this key."""
if key in self.names:
return self.values.get_member_node(key) if hasattr(self.values, 'get_member_node') else None
return self.parent.get_node(key)
|
Delegate to our current "value provider" for the node belonging to this key.
|
entailment
|
def create_table(cls):
"""
create_table
Manually create a temporary table for model in test data base.
:return:
"""
schema_editor = getattr(connection, 'schema_editor', None)
if schema_editor:
with schema_editor() as schema_editor:
schema_editor.create_model(cls)
else:
raw_sql, _ = connection.creation.sql_create_model(
cls,
no_style(),
[])
cls.delete_table()
cursor = connection.cursor()
try:
cursor.execute(*raw_sql)
finally:
cursor.close()
|
create_table
Manually create a temporary table for model in test data base.
:return:
|
entailment
|
def delete_table(cls):
"""
delete_table
Manually delete a temporary table for model in test data base.
:return:
"""
schema_editor = getattr(connection, 'schema_editor', None)
if schema_editor:
with connection.schema_editor() as schema_editor:
schema_editor.delete_model(cls)
else:
cursor = connection.cursor()
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'unknown table')
cursor.execute('DROP TABLE IF EXISTS {0}'.format(cls._meta.db_table))
finally:
cursor.close()
|
delete_table
Manually delete a temporary table for model in test data base.
:return:
|
entailment
|
def fake_me(cls, source):
"""
fake_me
Class or method decorator
Class decorator: create temporary table for all tests in SimpleTestCase.
Method decorator: create temporary model only for given test method.
:param source: SimpleTestCase or test function
:return:
"""
if source and type(source) == type and issubclass(source, SimpleTestCase):
return cls._class_extension(source)
elif hasattr(source, '__call__'):
return cls._decorator(source)
else:
raise AttributeError('source - must be a SimpleTestCase subclass of function')
|
fake_me
Class or method decorator
Class decorator: create temporary table for all tests in SimpleTestCase.
Method decorator: create temporary model only for given test method.
:param source: SimpleTestCase or test function
:return:
|
entailment
|
def vcr(decorated_func=None, debug=False, overwrite=False, disabled=False,
playback_only=False, tape_name=None):
"""
Decorator for capturing and simulating network communication
``debug`` : bool, optional
Enables debug mode.
``overwrite`` : bool, optional
Will run vcr in recording mode - overwrites any existing vcrtapes.
``playback_only`` : bool, optional
Will run vcr in playback mode - will not create missing vcrtapes.
``disabled`` : bool, optional
Completely disables vcr - same effect as removing the decorator.
``tape_name`` : str, optional
Use given custom file name instead of an auto-generated name for the
tape file.
"""
def _vcr_outer(func):
"""
Wrapper around _vcr_inner allowing optional arguments on decorator
"""
def _vcr_inner(*args, **kwargs):
"""
The actual decorator doing a lot of monkey patching and auto magic
"""
if disabled or VCRSystem.disabled:
# execute decorated function without VCR
return func(*args, **kwargs)
# prepare VCR tape
if func.__module__ == 'doctest':
source_filename = func.__self__._dt_test.filename
file_name = os.path.splitext(
os.path.basename(source_filename))[0]
# check if a tests directory exists
path = os.path.join(os.path.dirname(source_filename),
'tests')
if os.path.exists(path):
# ./test/vcrtapes/tape_name.vcr
path = os.path.join(os.path.dirname(source_filename),
'tests', 'vcrtapes')
else:
# ./vcrtapes/tape_name.vcr
path = os.path.join(os.path.dirname(source_filename),
'vcrtapes')
func_name = func.__self__._dt_test.name.split('.')[-1]
else:
source_filename = func.__code__.co_filename
file_name = os.path.splitext(
os.path.basename(source_filename))[0]
path = os.path.join(
os.path.dirname(source_filename), 'vcrtapes')
func_name = func.__name__
if tape_name:
# tape file name is given - either full path is given or use
# 'vcrtapes' directory
if os.sep in tape_name:
temp = os.path.abspath(tape_name)
path = os.path.dirname(temp)
if not os.path.isdir(path):
os.makedirs(path)
tape = os.path.join(path, '%s' % (tape_name))
else:
# make sure 'vcrtapes' directory exists
if not os.path.isdir(path):
os.makedirs(path)
# auto-generated file name
tape = os.path.join(path, '%s.%s.vcr' % (file_name, func_name))
# enable VCR
with VCRSystem(debug=debug):
# check for tape file and determine mode
if not (playback_only or VCRSystem.playback_only) and (
not os.path.isfile(tape) or
overwrite or VCRSystem.overwrite):
# record mode
if PY2:
msg = 'VCR records only in PY3 to be backward ' + \
'compatible with PY2 - skipping VCR ' + \
'mechanics for %s'
warnings.warn(msg % (func.__name__))
# disable VCR
VCRSystem.stop()
# execute decorated function without VCR
return func(*args, **kwargs)
if VCRSystem.debug:
print('\nVCR RECORDING (%s) ...' % (func_name))
VCRSystem.status = VCR_RECORD
# execute decorated function
value = func(*args, **kwargs)
# check if vcr is actually used at all
if len(VCRSystem.playlist) == 0:
msg = 'no socket activity - @vcr unneeded for %s'
msg = msg % (func.__name__)
if VCRSystem.raise_if_not_needed:
raise Exception(msg)
else:
warnings.warn(msg)
else:
# remove existing tape
try:
os.remove(tape)
except OSError:
pass
# write playlist to file
with gzip.open(tape, 'wb') as fh:
pickle.dump(VCRSystem.playlist, fh, protocol=2)
else:
# playback mode
if VCRSystem.debug:
print('\nVCR PLAYBACK (%s) ...' % (func_name))
VCRSystem.status = VCR_PLAYBACK
# if playback is requested and tape is missing: raise!
if not os.path.exists(tape):
msg = 'Missing VCR tape file for playback: {}'
raise IOError(msg.format(tape))
# load playlist
try:
with gzip.open(tape, 'rb') as fh:
VCRSystem.playlist = pickle.load(fh)
except OSError:
# support for older uncompressed tapes
with open(tape, 'rb') as fh:
VCRSystem.playlist = pickle.load(fh)
if VCRSystem.debug:
print('Loaded playlist:')
for i, item in enumerate(VCRSystem.playlist):
print('{:3d}: {} {} {}'.format(i, *item))
print()
# execute decorated function
value = func(*args, **kwargs)
return value
return _vcr_inner
if decorated_func is None:
# without arguments
return _vcr_outer
else:
# with arguments
return _vcr_outer(decorated_func)
|
Decorator for capturing and simulating network communication
``debug`` : bool, optional
Enables debug mode.
``overwrite`` : bool, optional
Will run vcr in recording mode - overwrites any existing vcrtapes.
``playback_only`` : bool, optional
Will run vcr in playback mode - will not create missing vcrtapes.
``disabled`` : bool, optional
Completely disables vcr - same effect as removing the decorator.
``tape_name`` : str, optional
Use given custom file name instead of an auto-generated name for the
tape file.
|
entailment
|
def reset(cls):
"""
Reset to default settings
"""
cls.debug = False
cls.disabled = False
cls.overwrite = False
cls.playback_only = False
cls.recv_timeout = 5
cls.recv_endmarkers = []
cls.recv_size = None
|
Reset to default settings
|
entailment
|
def to_python(value, seen=None):
"""Reify values to their Python equivalents.
Does recursion detection, failing when that happens.
"""
seen = seen or set()
if isinstance(value, framework.TupleLike):
if value.ident in seen:
raise RecursionException('to_python: infinite recursion while evaluating %r' % value)
new_seen = seen.union([value.ident])
return {k: to_python(value[k], seen=new_seen) for k in value.exportable_keys()}
if isinstance(value, dict):
return {k: to_python(value[k], seen=seen) for k in value.keys()}
if isinstance(value, list):
return [to_python(x, seen=seen) for x in value]
return value
|
Reify values to their Python equivalents.
Does recursion detection, failing when that happens.
|
entailment
|
def walk(value, walker, path=None, seen=None):
"""Walks the _evaluated_ tree of the given GCL tuple.
The appropriate methods of walker will be invoked for every element in the
tree.
"""
seen = seen or set()
path = path or []
# Recursion
if id(value) in seen:
walker.visitRecursion(path)
return
# Error
if isinstance(value, Exception):
walker.visitError(path, value)
return
# List
if isinstance(value, list):
# Not actually a tuple, but okay
recurse = walker.enterList(value, path)
if not recurse: return
next_walker = walker if recurse is True else recurse
with TempSetAdd(seen, id(value)):
for i, x in enumerate(value):
walk(x, next_walker, path=path + ['[%d]' % i], seen=seen)
walker.leaveList(value, path)
return
# Scalar
if not isinstance(value, framework.TupleLike):
walker.visitScalar(path, value)
return
# Tuple
recurse = walker.enterTuple(value, path)
if not recurse: return
next_walker = walker if recurse is True else recurse
with TempSetAdd(seen, id(value)):
keys = sorted(value.keys())
for key in keys:
key_path = path + [key]
elm = get_or_error(value, key)
walk(elm, next_walker, path=key_path, seen=seen)
walker.leaveTuple(value, path)
|
Walks the _evaluated_ tree of the given GCL tuple.
The appropriate methods of walker will be invoked for every element in the
tree.
|
entailment
|
def fingerprint(value):
"""Return a hash value that uniquely identifies the GCL value."""
h = hashlib.sha256()
_digest(value, h)
return h.digest().encode('hex')
|
Return a hash value that uniquely identifies the GCL value.
|
entailment
|
def compact_error(err):
"""Return the the last 2 error messages from an error stack.
These error messages turns out to be the most descriptive.
"""
def err2(e):
if isinstance(e, exceptions.EvaluationError) and e.inner:
message, i = err2(e.inner)
if i == 1:
return ', '.join([e.args[0], str(e.inner)]), i + 1
else:
return message, i + 1
else:
return str(e), 1
return err2(err)[0]
|
Return the the last 2 error messages from an error stack.
These error messages turns out to be the most descriptive.
|
entailment
|
def backprop(self, input_data, targets,
cache=None):
""" Backpropagate through the logistic layer.
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
targets : ``GPUArray``
The target values of the units.
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
"""
if cache is not None:
activations = cache
else:
activations = self.feed_forward(input_data, prediction=False)
if activations.shape != targets.shape:
raise ValueError('Activations (shape = %s) and targets (shape = %s) are different sizes' %
(activations.shape, targets.shape))
delta = substract_matrix(activations, targets)
nan_to_zeros(delta, delta)
# Gradient wrt weights
df_W = linalg.dot(input_data, delta, transa='T')
# Gradient wrt bias
df_b = matrix_sum_out_axis(delta, 0)
# Gradient wrt input
df_input = linalg.dot(delta, self.W, transb='T')
# L1 penalty
if self.l1_penalty_weight:
df_W += self.l1_penalty_weight * sign(self.W)
# L2 penalty
if self.l2_penalty_weight:
df_W += self.l2_penalty_weight * self.W
return (df_W, df_b), df_input
|
Backpropagate through the logistic layer.
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
targets : ``GPUArray``
The target values of the units.
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
|
entailment
|
def cross_entropy_error(self, input_data, targets, average=True,
cache=None, prediction=False):
""" Return the cross entropy error
"""
if cache is not None:
activations = cache
else:
activations = \
self.feed_forward(input_data, prediction=prediction)
loss = cross_entropy_logistic(activations, targets)
if average: loss /= targets.shape[0]
# assert np.isfinite(loss)
return loss.get()
|
Return the cross entropy error
|
entailment
|
def stylize_comment_block(lines):
"""Parse comment lines and make subsequent indented lines into a code block
block.
"""
normal, sep, in_code = range(3)
state = normal
for line in lines:
indented = line.startswith(' ')
empty_line = line.strip() == ''
if state == normal and empty_line:
state = sep
elif state in [sep, normal] and indented:
yield ''
if indented:
yield '.. code-block:: javascript'
yield ''
yield line
state = in_code
else:
state = normal
elif state == sep and not empty_line:
yield ''
yield line
state = normal
else:
yield line
if state == in_code and not (indented or empty_line):
sep = normal
|
Parse comment lines and make subsequent indented lines into a code block
block.
|
entailment
|
def sort_members(tup, names):
"""Return two pairs of members, scalar and tuple members.
The scalars will be sorted s.t. the unbound members are at the top.
"""
scalars, tuples = partition(lambda x: not is_tuple_node(tup.member[x].value), names)
unbound, bound = partition(lambda x: tup.member[x].value.is_unbound(), scalars)
return usorted(unbound) + usorted(bound), usorted(tuples)
|
Return two pairs of members, scalar and tuple members.
The scalars will be sorted s.t. the unbound members are at the top.
|
entailment
|
def resolve_file(fname, paths):
"""Resolve filename relatively against one of the given paths, if possible."""
fpath = path.abspath(fname)
for p in paths:
spath = path.abspath(p)
if fpath.startswith(spath):
return fpath[len(spath) + 1:]
return fname
|
Resolve filename relatively against one of the given paths, if possible.
|
entailment
|
def generate(self):
"""Generate a list of strings representing the table in RST format."""
header = ' '.join('=' * self.width[i] for i in range(self.w))
lines = [
' '.join(row[i].ljust(self.width[i]) for i in range(self.w))
for row in self.rows]
return [header] + lines + [header]
|
Generate a list of strings representing the table in RST format.
|
entailment
|
def partition(pred, iterable):
'Use a predicate to partition entries into false entries and true entries'
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = itertools.tee(iterable)
return list(filter(negate(pred), t1)), list(filter(pred, t2))
|
Use a predicate to partition entries into false entries and true entries
|
entailment
|
def select(self, model):
"""Select nodes according to the input selector.
This can ALWAYS return multiple root elements.
"""
res = []
def doSelect(value, pre, remaining):
if not remaining:
res.append((pre, value))
else:
# For the other selectors to work, value must be a Tuple or a list at this point.
if not is_tuple(value) and not isinstance(value, list):
return
qhead, qtail = remaining[0], remaining[1:]
if isinstance(qhead, tuple) and is_tuple(value):
for alt in qhead:
if alt in value:
doSelect(value[alt], pre + [alt], qtail)
elif qhead == '*':
if isinstance(value, list):
indices = range(len(value))
reprs = [listKey(i) for i in indices]
else:
indices = value.keys()
reprs = indices
for key, rep in zip(indices, reprs):
doSelect(value[key], pre + [rep], qtail)
elif isinstance(qhead, int) and isinstance(value, list):
doSelect(value[qhead], pre + [listKey(qhead)], qtail)
elif is_tuple(value):
if qhead in value:
doSelect(value[qhead], pre + [qhead], qtail)
for selector in self.selectors:
doSelect(model, [], selector)
return QueryResult(res)
|
Select nodes according to the input selector.
This can ALWAYS return multiple root elements.
|
entailment
|
def deep(self):
"""Return a deep dict of the values selected.
The leaf values may still be gcl Tuples. Use util.to_python() if you want
to reify everything to real Python values.
"""
self.lists = {}
ret = {}
for path, value in self.paths_values():
self.recursiveSet(ret, path, value)
self.removeMissingValuesFromLists()
return ret
|
Return a deep dict of the values selected.
The leaf values may still be gcl Tuples. Use util.to_python() if you want
to reify everything to real Python values.
|
entailment
|
def ldSet(self, what, key, value):
"""List/dictionary-aware set."""
if isListKey(key):
# Make sure we keep the indexes consistent, insert missing_values
# as necessary. We do remember the lists, so that we can remove
# missing values after inserting all values from all selectors.
self.lists[id(what)] = what
ix = listKeyIndex(key)
while len(what) <= ix:
what.append(missing_value)
what[ix] = value
else:
what[key] = value
return value
|
List/dictionary-aware set.
|
entailment
|
def ldGet(self, what, key):
"""List-aware get."""
if isListKey(key):
return what[listKeyIndex(key)]
else:
return what[key]
|
List-aware get.
|
entailment
|
def ldContains(self, what, key):
"""List/dictinary/missing-aware contains.
If the value is a "missing_value", we'll treat it as non-existent
so it will be overwritten by an empty list/dict when necessary to
assign child keys.
"""
if isListKey(key):
i = listKeyIndex(key)
return i < len(what) and what[i] != missing_value
else:
return key in what and what[key] != missing_value
|
List/dictinary/missing-aware contains.
If the value is a "missing_value", we'll treat it as non-existent
so it will be overwritten by an empty list/dict when necessary to
assign child keys.
|
entailment
|
def find_recursive_dependency(self):
"""Return a list of nodes that have a recursive dependency."""
nodes_on_path = []
def helper(nodes):
for node in nodes:
cycle = node in nodes_on_path
nodes_on_path.append(node)
if cycle or helper(self.deps.get(node, [])):
return True
nodes_on_path.pop()
return False
helper(self.unordered)
return nodes_on_path
|
Return a list of nodes that have a recursive dependency.
|
entailment
|
def enterTuple(self, tuple, path):
"""Called for every tuple.
If this returns False, the elements of the tuple will not be recursed over
and leaveTuple() will not be called.
"""
if skip_name(path):
return False
node = Node(path, tuple)
if self.condition.matches(node):
self.unordered.append(node)
return False
return True
|
Called for every tuple.
If this returns False, the elements of the tuple will not be recursed over
and leaveTuple() will not be called.
|
entailment
|
def convertAndMake(converter, handler):
"""Convert with location."""
def convertAction(loc, value):
return handler(loc, converter(value))
return convertAction
|
Convert with location.
|
entailment
|
def mkApplications(location, *atoms):
"""Make a sequence of applications from a list of tokens.
atoms is a list of atoms, which will be handled left-associatively. E.g:
['foo', [], []] == foo()() ==> Application(Application('foo', []), [])
"""
atoms = list(atoms)
while len(atoms) > 1:
atoms[0:2] = [Application(location, atoms[0], atoms[1])]
# Nothing left to apply
return atoms[0]
|
Make a sequence of applications from a list of tokens.
atoms is a list of atoms, which will be handled left-associatively. E.g:
['foo', [], []] == foo()() ==> Application(Application('foo', []), [])
|
entailment
|
def call_fn(fn, arglist, env):
"""Call a function, respecting all the various types of functions that exist."""
if isinstance(fn, framework.LazyFunction):
# The following looks complicated, but this is necessary because you can't
# construct closures over the loop variable directly.
thunks = [(lambda thunk: lambda: framework.eval(thunk, env))(th) for th in arglist.values]
return fn(*thunks)
evaled_args = framework.eval(arglist, env)
if isinstance(fn, framework.EnvironmentFunction):
return fn(*evaled_args, env=env)
return fn(*evaled_args)
|
Call a function, respecting all the various types of functions that exist.
|
entailment
|
def schema_spec_from_tuple(tup):
"""Return the schema spec from a run-time tuple."""
if hasattr(tup, 'get_schema_spec'):
# Tuples have a TupleSchema field that contains a model of the schema
return schema.from_spec({
'fields': TupleSchemaAccess(tup),
'required': tup.get_required_fields()})
return schema.AnySchema()
|
Return the schema spec from a run-time tuple.
|
entailment
|
def make_schema_from(value, env):
"""Make a Schema object from the given spec.
The input and output types of this function are super unclear, and are held together by ponies,
wishes, duct tape, and a load of tests. See the comments for horrific entertainment.
"""
# So this thing may not need to evaluate anything[0]
if isinstance(value, framework.Thunk):
value = framework.eval(value, env)
# We're a bit messy. In general, this has evaluated to a Schema object, but not necessarily:
# for tuples and lists, we still need to treat the objects as specs.
if isinstance(value, schema.Schema):
return value
if framework.is_tuple(value):
# If it so happens that the thing is a tuple, we need to pass in the data in a bit of a
# different way into the schema factory (in a dictionary with {fields, required} keys).
return schema_spec_from_tuple(value)
if framework.is_list(value):
# [0] This list may contain tuples, which oughta be treated as specs, or already-resolved schema
# objects (as returned by 'int' and 'string' literals). make_schema_from
# deals with both.
return schema.from_spec([make_schema_from(x, env) for x in value])
raise exceptions.EvaluationError('Can\'t make a schema from %r' % value)
|
Make a Schema object from the given spec.
The input and output types of this function are super unclear, and are held together by ponies,
wishes, duct tape, and a load of tests. See the comments for horrific entertainment.
|
entailment
|
def bracketedList(l, r, sep, expr, allow_missing_close=False):
"""Parse bracketed list.
Empty list is possible, as is a trailing separator.
"""
# We may need to backtrack for lists, because of list comprehension, but not for
# any of the other lists
strict = l != '['
closer = sym(r) if not allow_missing_close else p.Optional(sym(r))
if strict:
return sym(l) - listMembers(sep, expr) - closer
else:
return sym(l) + listMembers(sep, expr) + closer
|
Parse bracketed list.
Empty list is possible, as is a trailing separator.
|
entailment
|
def unquote(s):
"""Unquote the indicated string."""
# Ignore the left- and rightmost chars (which should be quotes).
# Use the Python engine to decode the escape sequence
i, N = 1, len(s) - 1
ret = []
while i < N:
if s[i] == '\\' and i < N - 1:
ret.append(UNQUOTE_MAP.get(s[i+1], s[i+1]))
i += 2
else:
ret.append(s[i])
i += 1
return ''.join(ret)
|
Unquote the indicated string.
|
entailment
|
def pattern(name, pattern):
"""Function to put a name on a pyparsing pattern.
Just for ease of debugging/tracing parse errors.
"""
pattern.setName(name)
astracing.maybe_trace(pattern)
return pattern
|
Function to put a name on a pyparsing pattern.
Just for ease of debugging/tracing parse errors.
|
entailment
|
def make_grammar(allow_errors):
"""Make the part of the grammar that depends on whether we swallow errors or not."""
if allow_errors in GRAMMAR_CACHE:
return GRAMMAR_CACHE[allow_errors]
tuple = p.Forward()
catch_errors = p.Forward()
catch_errors << (p.Regex('[^{};]*') - p.Optional(tuple) - p.Regex('[^;}]*'))
def swallow_remainder():
if allow_errors:
return pattern('swallow_remainder', p.Suppress(catch_errors))
return p.Empty()
def swallow_errors(rule):
"""Extend the production rule by potentially eating errors.
This does not return a p.NoMatch() because that messes up the error messages.
"""
ret = rule
if allow_errors:
# Synchronize on the first semicolon or the first unbalanced closing curly
ret = rule | pattern('catch_errors', parseWithLocation(p.Suppress(catch_errors), UnparseableNode))
return ret
class Grammar:
keywords = ['and', 'or', 'not', 'if', 'then', 'else', 'include', 'inherit', 'null', 'true', 'false',
'for', 'in']
# This is a hack: this condition helps uselessly recursing into the grammar for
# juxtapositions.
early_abort_scan = ~p.oneOf([';', ',', ']', '}', 'for' ])
expression = pattern('expression', p.Forward())
comment = p.Regex('#') + ~p.FollowedBy(sym('.')) + p.restOfLine
doc_comment = pattern('doc_comment', (sym('#.') - p.restOfLine))
quotedIdentifier = pattern('quotedIdentifier', p.QuotedString('`', multiline=False))
# - Must start with an alphascore
# - May contain alphanumericscores and special characters such as : and -
# - Must not end in a special character
identifier = pattern('identifier', parseWithLocation(quotedIdentifier | p.Regex(r'[a-zA-Z_]([a-zA-Z0-9_:-]*[a-zA-Z0-9_])?'), Identifier))
# Variable identifier (can't be any of the keywords, which may have lower matching priority)
variable = pattern('variable', ~p.MatchFirst(p.oneOf(keywords)) + pattern('identifier', parseWithLocation(identifier.copy(), Var)))
# Contants
integer = pattern('integer', parseWithLocation(p.Word(p.nums), convertAndMake(int, Literal)))
floating = pattern('floating', parseWithLocation(p.Regex(r'\d*\.\d+'), convertAndMake(float, Literal)))
dq_string = pattern('dq_string', parseWithLocation(p.QuotedString('"', escChar='\\', unquoteResults=False, multiline=True), convertAndMake(unquote, Literal)))
sq_string = pattern('sq_string', parseWithLocation(p.QuotedString("'", escChar='\\', unquoteResults=False, multiline=True), convertAndMake(unquote, Literal)))
boolean = pattern('boolean', parseWithLocation(p.Keyword('true') | p.Keyword('false'), convertAndMake(mkBool, Literal)))
null = pattern('null', parseWithLocation(p.Keyword('null'), Null))
# List
list_ = pattern('list', parseWithLocation(bracketedList('[', ']', ',', expression), List))
# Tuple
inherit = pattern('inherit', (kw('inherit') - p.ZeroOrMore(variable)).setParseAction(inheritNodes))
schema_spec = pattern('schema_spec', parseWithLocation(p.Optional(p.Keyword('private').setParseAction(lambda: True), default=False)
- p.Optional(p.Keyword('required').setParseAction(lambda: True), default=False)
- p.Optional(expression, default=any_schema_expr), MemberSchemaNode))
optional_schema = pattern('optional_schema', p.Optional(p.Suppress(':') - schema_spec, default=no_schema))
expression_value = pattern('expression_value', sym('=') - swallow_errors(expression))
void_value = pattern('void_value', parseWithLocation(p.FollowedBy(sym(';') | sym('}')), lambda loc: Void(loc, 'nonameyet')))
member_value = pattern('member_value', swallow_errors(expression_value | void_value))
named_member = pattern('named_member', parseWithLocation(identifier - optional_schema - member_value - swallow_remainder(), TupleMemberNode))
documented_member = pattern('documented_member', parseWithLocation(parseWithLocation(p.ZeroOrMore(doc_comment), DocComment) + named_member, attach_doc_comment))
tuple_member = early_abort_scan + pattern('tuple_member', swallow_errors(inherit | documented_member) - swallow_remainder())
ErrorAwareTupleNode = functools.partial(TupleNode, allow_errors)
tuple_members = pattern('tuple_members', parseWithLocation(listMembers(';', tuple_member), ErrorAwareTupleNode))
tuple << pattern('tuple', parseWithLocation(bracketedList('{', '}', ';', tuple_member, allow_missing_close=allow_errors), ErrorAwareTupleNode))
# Argument list will live by itself as a atom. Actually, it's a tuple, but we
# don't call it that because we use that term for something else already :)
arg_list = pattern('arg_list', bracketedList('(', ')', ',', expression).setParseAction(ArgList))
parenthesized_expr = pattern('parenthesized_expr', (sym('(') - expression - ')').setParseAction(head))
unary_op = pattern('unary_op', (p.oneOf(' '.join(functions.unary_operators.keys())) - expression).setParseAction(mkUnOp))
if_then_else = pattern('if_then_else', parseWithLocation(kw('if') + expression +
kw('then') + expression +
kw('else') + expression, Condition))
list_comprehension = pattern('list_comprehension', parseWithLocation(sym('[') + expression + kw('for') + variable + kw('in') +
expression + p.Optional(kw('if') + expression) + sym(']'), ListComprehension))
# We don't allow space-application here
# Now our grammar is becoming very dirty and hackish
deref = pattern('deref', p.Forward())
include = pattern('include', parseWithLocation(kw('include') - deref, Include))
atom = pattern('atom', (tuple
| sq_string
| dq_string
| variable
| floating
| integer
| boolean
| list_
| null
| unary_op
| parenthesized_expr
| if_then_else
| include
| list_comprehension
))
# We have two different forms of function application, so they can have 2
# different precedences. This one: fn(args), which binds stronger than
# dereferencing (fn(args).attr == (fn(args)).attr)
applic1 = pattern('applic1', parseWithLocation(atom - p.ZeroOrMore(arg_list), mkApplications))
# Dereferencing of an expression (obj.bar)
deref << parseWithLocation(applic1 - p.ZeroOrMore(p.Suppress('.') - swallow_errors(identifier)), mkDerefs)
# All binary operators at various precedence levels go here:
# This piece of code does the moral equivalent of:
#
# T = F*F | F/F | F
# E = T+T | T-T | T
#
# etc.
term = deref
for op_level in functions.binary_operators_before_juxtaposition:
operator_syms = list(op_level.keys())
term = (term - p.ZeroOrMore(p.oneOf(operator_syms) - term)).setParseAction(mkBinOps)
# Juxtaposition function application (fn arg), must be 1-arg every time
applic2 = pattern('applic2', parseWithLocation(term - p.ZeroOrMore(early_abort_scan + term), mkApplications))
term = applic2
for op_level in functions.binary_operators_after_juxtaposition:
operator_syms = list(op_level.keys())
term = (term - p.ZeroOrMore(p.oneOf(operator_syms) - term)).setParseAction(mkBinOps)
expression << term
# Two entry points: start at an arbitrary expression, or expect the top-level
# scope to be a tuple.
start = pattern('start', expression.copy().ignore(comment))
start_tuple = tuple_members.ignore(comment)
GRAMMAR_CACHE[allow_errors] = Grammar
return Grammar
|
Make the part of the grammar that depends on whether we swallow errors or not.
|
entailment
|
def reads(s, filename, loader, implicit_tuple, allow_errors):
"""Load but don't evaluate a GCL expression from a string."""
try:
the_context.filename = filename
the_context.loader = loader
grammar = make_grammar(allow_errors=allow_errors)
root = grammar.start_tuple if implicit_tuple else grammar.start
return root.parseWithTabs().parseString(s, parseAll=True)[0]
except (p.ParseException, p.ParseSyntaxException) as e:
loc = SourceLocation(s, find_offset(s, e.lineno, e.col))
raise exceptions.ParseError(the_context.filename, loc, e.msg)
|
Load but don't evaluate a GCL expression from a string.
|
entailment
|
def find_tokens(self, q):
"""Find all AST nodes at the given filename, line and column."""
found_me = []
if hasattr(self, 'location'):
if self.location.contains(q):
found_me = [self]
elif self._found_by(q):
found_me = [self]
cs = [n.find_tokens(q) for n in self._children()]
return found_me + list(itertools.chain(*cs))
|
Find all AST nodes at the given filename, line and column.
|
entailment
|
def _make_tuple(self, env):
"""Instantiate the Tuple based on this TupleNode."""
t = runtime.Tuple(self, env, dict2tuple)
# A tuple also provides its own schema spec
schema = schema_spec_from_tuple(t)
t.attach_schema(schema)
return t
|
Instantiate the Tuple based on this TupleNode.
|
entailment
|
def applyTuple(self, tuple, right, env):
"""Apply a tuple to something else."""
if len(right) != 1:
raise exceptions.EvaluationError('Tuple (%r) can only be applied to one argument, got %r' % (self.left, self.right))
right = right[0]
return tuple(right)
|
Apply a tuple to something else.
|
entailment
|
def applyIndex(self, lst, right):
"""Apply a list to something else."""
if len(right) != 1:
raise exceptions.EvaluationError('%r can only be applied to one argument, got %r' % (self.left, self.right))
right = right[0]
if isinstance(right, int):
return lst[right]
raise exceptions.EvaluationError("Can't apply %r to argument (%r): integer expected, got %r" % (self.left, self.right, right))
|
Apply a list to something else.
|
entailment
|
def pre_gradient_update(self):
""" First step of Nesterov momentum method:
take step in direction of accumulated gradient
"""
updates = zip(self.velocity, self.model.n_parameters * [1.])
self.model.update_parameters(updates)
|
First step of Nesterov momentum method:
take step in direction of accumulated gradient
|
entailment
|
def class_error(self, input_data, targets, average=True,
cache=None, prediction=False):
""" Return the classification error rate
"""
if cache is not None:
activations = cache
else:
activations = \
self.feed_forward(input_data, prediction=prediction)
targets = targets.get().argmax(1)
class_error = np.sum(activations.get().argmax(1) != targets)
if average: class_error = float(class_error) / targets.shape[0]
return class_error
|
Return the classification error rate
|
entailment
|
def kl_error(self, input_data, targets, average=True,
cache=None, prediction=True):
""" The KL divergence error
"""
if cache is not None:
activations = cache
else:
activations = \
self.feed_forward(input_data, prediction=prediction)
targets_non_nan = gpuarray.empty_like(targets)
nan_to_zeros(targets, targets_non_nan)
kl_error = gpuarray.sum(targets_non_nan *
(cumath.log(targets_non_nan + eps) -
cumath.log(activations + eps)))
if average:
kl_error /= targets.shape[0]
return kl_error.get()
|
The KL divergence error
|
entailment
|
def dot(x_gpu, y_gpu, transa='N', transb='N', handle=None, target=None):
"""
Dot product of two arrays.
For 1D arrays, this function computes the inner product. For 2D
arrays of shapes `(m, k)` and `(k, n)`, it computes the matrix
product; the result has shape `(m, n)`.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Input array.
y_gpu : pycuda.gpuarray.GPUArray
Input array.
transa : char
If 'T', compute the product of the transpose of `x_gpu`.
If 'C', compute the product of the Hermitian of `x_gpu`.
transb : char
If 'T', compute the product of the transpose of `y_gpu`.
If 'C', compute the product of the Hermitian of `y_gpu`.
handle : int
CUBLAS context. If no context is specified, the default handle from
`scikits.cuda.misc._global_cublas_handle` is used.
Returns
-------
c_gpu : pycuda.gpuarray.GPUArray, float{32,64}, or complex{64,128}
Inner product of `x_gpu` and `y_gpu`. When the inputs are 1D
arrays, the result will be returned as a scalar.
Notes
-----
The input matrices must all contain elements of the same data type.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import linalg
>>> import misc
>>> linalg.init()
>>> a = np.asarray(np.random.rand(4, 2), np.float32)
>>> b = np.asarray(np.random.rand(2, 2), np.float32)
>>> a_gpu = gpuarray.to_gpu(a)
>>> b_gpu = gpuarray.to_gpu(b)
>>> c_gpu = linalg.dot(a_gpu, b_gpu)
>>> np.allclose(np.dot(a, b), c_gpu.get())
True
>>> d = np.asarray(np.random.rand(5), np.float32)
>>> e = np.asarray(np.random.rand(5), np.float32)
>>> d_gpu = gpuarray.to_gpu(d)
>>> e_gpu = gpuarray.to_gpu(e)
>>> f = linalg.dot(d_gpu, e_gpu)
>>> np.allclose(np.dot(d, e), f)
True
"""
if handle is None:
handle = _global_cublas_handle
if len(x_gpu.shape) == 1 and len(y_gpu.shape) == 1:
if x_gpu.size != y_gpu.size:
raise ValueError('arrays must be of same length: '
'x_gpu.size = %d, y_gpu.size = %d' %
(x_gpu.size, y_gpu.size))
# Compute inner product for 1D arrays:
if (x_gpu.dtype == np.complex64 and y_gpu.dtype == np.complex64):
cublas_func = cublas.cublasCdotu
elif (x_gpu.dtype == np.float32 and y_gpu.dtype == np.float32):
cublas_func = cublas.cublasSdot
elif (x_gpu.dtype == np.complex128 and y_gpu.dtype == np.complex128):
cublas_func = cublas.cublasZdotu
elif (x_gpu.dtype == np.float64 and y_gpu.dtype == np.float64):
cublas_func = cublas.cublasDdot
else:
raise ValueError('unsupported combination of input types: '
'x_gpu.dtype = %s, y_gpu.dtype = %s' %
(str(x_gpu.dtype), str(y_gpu.dtype)))
return cublas_func(handle, x_gpu.size, x_gpu.gpudata, 1,
y_gpu.gpudata, 1)
else:
# Get the shapes of the arguments (accounting for the
# possibility that one of them may only have one dimension):
x_shape = x_gpu.shape
y_shape = y_gpu.shape
if len(x_shape) == 1:
x_shape = (1, x_shape[0])
if len(y_shape) == 1:
y_shape = (1, y_shape[0])
# Perform matrix multiplication for 2D arrays:
if (x_gpu.dtype == np.complex64 and y_gpu.dtype == np.complex64):
cublas_func = cublas.cublasCgemm
alpha = np.complex64(1.0)
beta = np.complex64(0.0)
elif (x_gpu.dtype == np.float32 and y_gpu.dtype == np.float32):
cublas_func = cublas.cublasSgemm
alpha = np.float32(1.0)
beta = np.float32(0.0)
elif (x_gpu.dtype == np.complex128 and y_gpu.dtype == np.complex128):
cublas_func = cublas.cublasZgemm
alpha = np.complex128(1.0)
beta = np.complex128(0.0)
elif (x_gpu.dtype == np.float64 and y_gpu.dtype == np.float64):
cublas_func = cublas.cublasDgemm
alpha = np.float64(1.0)
beta = np.float64(0.0)
else:
raise ValueError('unsupported combination of input types: '
'x_gpu.dtype = %s, y_gpu.dtype = %s' %
(str(x_gpu.dtype), str(y_gpu.dtype)))
transa = lower(transa)
transb = lower(transb)
if transb in ['t', 'c']:
m, k = y_shape
elif transb in ['n']:
k, m = y_shape
else:
raise ValueError('invalid value "%s" for transb' % transb)
if transa in ['t', 'c']:
l, n = x_shape
elif transa in ['n']:
n, l = x_shape
else:
raise ValueError('invalid value "%s" for transa' % transa)
if l != k:
raise ValueError('objects are not aligned: x_shape = %s, y_shape = %s' %
(x_shape, y_shape))
if transb == 'n':
lda = max(1, m)
else:
lda = max(1, k)
if transa == 'n':
ldb = max(1, k)
else:
ldb = max(1, n)
ldc = max(1, m)
# Note that the desired shape of the output matrix is the transpose
# of what CUBLAS assumes:
if target is None:
target = gpuarray.empty((n, ldc), x_gpu.dtype, allocator=memory_pool.allocate)
cublas_func(handle, transb, transa, m, n, k, alpha, y_gpu.gpudata,
lda, x_gpu.gpudata, ldb, beta, target.gpudata, ldc)
return target
|
Dot product of two arrays.
For 1D arrays, this function computes the inner product. For 2D
arrays of shapes `(m, k)` and `(k, n)`, it computes the matrix
product; the result has shape `(m, n)`.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Input array.
y_gpu : pycuda.gpuarray.GPUArray
Input array.
transa : char
If 'T', compute the product of the transpose of `x_gpu`.
If 'C', compute the product of the Hermitian of `x_gpu`.
transb : char
If 'T', compute the product of the transpose of `y_gpu`.
If 'C', compute the product of the Hermitian of `y_gpu`.
handle : int
CUBLAS context. If no context is specified, the default handle from
`scikits.cuda.misc._global_cublas_handle` is used.
Returns
-------
c_gpu : pycuda.gpuarray.GPUArray, float{32,64}, or complex{64,128}
Inner product of `x_gpu` and `y_gpu`. When the inputs are 1D
arrays, the result will be returned as a scalar.
Notes
-----
The input matrices must all contain elements of the same data type.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import linalg
>>> import misc
>>> linalg.init()
>>> a = np.asarray(np.random.rand(4, 2), np.float32)
>>> b = np.asarray(np.random.rand(2, 2), np.float32)
>>> a_gpu = gpuarray.to_gpu(a)
>>> b_gpu = gpuarray.to_gpu(b)
>>> c_gpu = linalg.dot(a_gpu, b_gpu)
>>> np.allclose(np.dot(a, b), c_gpu.get())
True
>>> d = np.asarray(np.random.rand(5), np.float32)
>>> e = np.asarray(np.random.rand(5), np.float32)
>>> d_gpu = gpuarray.to_gpu(d)
>>> e_gpu = gpuarray.to_gpu(e)
>>> f = linalg.dot(d_gpu, e_gpu)
>>> np.allclose(np.dot(d, e), f)
True
|
entailment
|
def make_tempfile(data=None):
"Create a temp file, write our PID into it."
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp:
temp.write(six.text_type(data if data is not None else os.getpid()))
return temp.name
|
Create a temp file, write our PID into it.
|
entailment
|
def parameters(self):
"""Return a list where each element contains the parameters for a task.
"""
parameters = []
for task in self.tasks:
parameters.extend(task.parameters)
return parameters
|
Return a list where each element contains the parameters for a task.
|
entailment
|
def parameters(self, value):
"""Update the parameters.
``value`` must be a list/tuple of length
``MultitaskTopLayer.n_tasks``, each element of which must have
the correct number of parameters for the task.
"""
assert len(value) == self.n_parameters
i = 0
for task in self.tasks:
task.parameters = value[i:i + task.n_parameters]
i += task.n_parameters
|
Update the parameters.
``value`` must be a list/tuple of length
``MultitaskTopLayer.n_tasks``, each element of which must have
the correct number of parameters for the task.
|
entailment
|
def feed_forward(self, input_data, prediction=False):
"""Call ``feed_forward`` for each task and combine the activations.
Passes ``input_data`` to all tasks and returns the activations
as a list.
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
prediction : bool, optional
Whether to use prediction model. Only relevant when using
dropout. If true, then weights are multiplied by
1 - dropout if the layer uses dropout.
**Returns:**
activations : list of ``GPUArray``
The activations of the output units, one element for each task.
"""
activations = []
for task in self.tasks:
activations_task = task.feed_forward(input_data, prediction)
activations.append(activations_task)
return activations
|
Call ``feed_forward`` for each task and combine the activations.
Passes ``input_data`` to all tasks and returns the activations
as a list.
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
prediction : bool, optional
Whether to use prediction model. Only relevant when using
dropout. If true, then weights are multiplied by
1 - dropout if the layer uses dropout.
**Returns:**
activations : list of ``GPUArray``
The activations of the output units, one element for each task.
|
entailment
|
def backprop(self, input_data, targets, cache=None):
"""Compute gradients for each task and combine the results.
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
targets : ``GPUArray``
The target values of the units.
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : list
Gradients with respect to the weights and biases for each task
df_input : ``GPUArray``
Gradients with respect to the input, obtained by adding
the gradients with respect to the inputs from each task,
weighted by ``MultitaskTopLayer.task_weights``.
"""
df_input = gpuarray.zeros_like(input_data)
if cache is None: cache = self.n_tasks * [None]
gradients = []
for targets_task, cache_task, task, task_weight in \
izip(targets, cache, self.tasks, self.task_weights):
gradients_task, df_input_task = \
task.backprop(input_data, targets_task,
cache_task)
df_input = df_input.mul_add(1., df_input_task, task_weight)
gradients.extend(gradients_task)
return gradients, df_input
|
Compute gradients for each task and combine the results.
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
targets : ``GPUArray``
The target values of the units.
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : list
Gradients with respect to the weights and biases for each task
df_input : ``GPUArray``
Gradients with respect to the input, obtained by adding
the gradients with respect to the inputs from each task,
weighted by ``MultitaskTopLayer.task_weights``.
|
entailment
|
def cross_entropy_error(self, input_data, targets, average=True,
cache=None, prediction=False,
sum_errors=True):
""" Computes the cross-entropy error for all tasks.
"""
loss = []
if cache is None:
cache = self.n_tasks * [None]
for targets_task, cache_task, task in \
izip(targets, cache, self.tasks):
loss.append(task.cross_entropy_error(
input_data, targets_task, average=average,
cache=cache_task,
prediction=prediction))
if sum_errors:
return sum(loss)
else:
return loss
|
Computes the cross-entropy error for all tasks.
|
entailment
|
def parameters(self, value):
"""Update the parameters. ``value`` must have the shape
``(weights, biases)``"""
self.W = value[0] if isinstance(value[0], GPUArray) else \
gpuarray.to_gpu(value[0])
self.b = value[1] if isinstance(value[0], GPUArray) else \
gpuarray.to_gpu(value[1])
|
Update the parameters. ``value`` must have the shape
``(weights, biases)``
|
entailment
|
def architecture(self):
"""Returns a dictionary describing the architecture of the layer."""
arch = {'class': self.__class__,
'n_in': self.n_in,
'n_units': self.n_units,
'activation_function': self.activation_function
if hasattr(self, 'activation_function') else None}
return arch
|
Returns a dictionary describing the architecture of the layer.
|
entailment
|
def feed_forward(self, input_data, prediction=False):
"""Propagate forward through the layer
**Parameters:**
input_data : ``GPUArray``
Input data to compute activations for.
prediction : bool, optional
Whether to use prediction model. Only relevant when using
dropout. If true, then weights are multiplied by
1 - dropout if the layer uses dropout.
**Returns:**
activations : ``GPUArray``
The activations of the hidden units.
"""
if input_data.shape[1] != self.W.shape[0]:
raise ValueError('Number of outputs from previous layer (%d) '
'does not match number of inputs to this layer (%d)' %
(input_data.shape[1], self.W.shape[0]))
activations = linalg.dot(input_data, self.W)
activations = add_vec_to_mat(activations, self.b, inplace=True)
self.f(activations)
if self.dropout > 0:
if prediction:
activations *= 1 - self.dropout
else:
dropout_mask = sample_dropout_mask(activations, self.dropout)
return activations, dropout_mask
return (activations,)
|
Propagate forward through the layer
**Parameters:**
input_data : ``GPUArray``
Input data to compute activations for.
prediction : bool, optional
Whether to use prediction model. Only relevant when using
dropout. If true, then weights are multiplied by
1 - dropout if the layer uses dropout.
**Returns:**
activations : ``GPUArray``
The activations of the hidden units.
|
entailment
|
def backprop(self, input_data, df_output, cache=None):
""" Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Input data to compute activations for.
df_output : ``GPUArray``
Gradients with respect to the activations of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
"""
# Get cache if it wasn't provided
if cache is None:
cache = self.feed_forward(input_data,
prediction=False)
if len(cache) == 2:
activations, dropout_mask = cache
else:
activations = cache[0]
# Multiply the binary mask with the incoming gradients
if self.dropout > 0 and dropout_mask is not None:
apply_dropout_mask(df_output, dropout_mask)
# Get gradient wrt activation function
df_activations = self.df(activations)
delta = mult_matrix(df_activations, df_output)
# Gradient wrt weights
df_W = linalg.dot(input_data, delta, transa='T')
# Gradient wrt bias
df_b = matrix_sum_out_axis(delta, 0)
# Gradient wrt inputs
df_input = linalg.dot(delta, self.W, transb='T')
# L1 weight decay
if self.l1_penalty_weight:
df_W += self.l1_penalty_weight * sign(self.W)
# L2 weight decay
if self.l2_penalty_weight:
df_W += self.l2_penalty_weight * self.W
return (df_W, df_b), df_input
|
Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Input data to compute activations for.
df_output : ``GPUArray``
Gradients with respect to the activations of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
|
entailment
|
def cublasCreate():
"""
Initialize CUBLAS.
Initializes CUBLAS and creates a handle to a structure holding
the CUBLAS library context.
Returns
-------
handle : void_p
CUBLAS context.
"""
handle = ctypes.c_void_p()
status = _libcublas.cublasCreate_v2(ctypes.byref(handle))
cublasCheckStatus(status)
return handle.value
|
Initialize CUBLAS.
Initializes CUBLAS and creates a handle to a structure holding
the CUBLAS library context.
Returns
-------
handle : void_p
CUBLAS context.
|
entailment
|
def cublasDestroy(handle):
"""
Release CUBLAS resources.
Releases hardware resources used by CUBLAS.
Parameters
----------
handle : void_p
CUBLAS context.
"""
status = _libcublas.cublasDestroy_v2(ctypes.c_void_p(handle))
cublasCheckStatus(status)
|
Release CUBLAS resources.
Releases hardware resources used by CUBLAS.
Parameters
----------
handle : void_p
CUBLAS context.
|
entailment
|
def cublasGetVersion(handle):
"""
Get CUBLAS version.
Returns version number of installed CUBLAS libraries.
Parameters
----------
handle : void_p
CUBLAS context.
Returns
-------
version : int
CUBLAS version.
"""
version = ctypes.c_int()
status = _libcublas.cublasGetVersion_v2(handle, ctypes.byref(version))
cublasCheckStatus(status)
return version.value
|
Get CUBLAS version.
Returns version number of installed CUBLAS libraries.
Parameters
----------
handle : void_p
CUBLAS context.
Returns
-------
version : int
CUBLAS version.
|
entailment
|
def cublasSetStream(handle, id):
"""
Set current CUBLAS library stream.
Parameters
----------
handle : id
CUBLAS context.
id : int
Stream ID.
"""
status = _libcublas.cublasSetStream_v2(handle, id)
cublasCheckStatus(status)
|
Set current CUBLAS library stream.
Parameters
----------
handle : id
CUBLAS context.
id : int
Stream ID.
|
entailment
|
def cublasGetStream(handle):
"""
Set current CUBLAS library stream.
Parameters
----------
handle : void_p
CUBLAS context.
Returns
-------
id : int
Stream ID.
"""
id = ctypes.c_int()
status = _libcublas.cublasGetStream_v2(handle, ctypes.byref(id))
cublasCheckStatus(status)
return id.value
|
Set current CUBLAS library stream.
Parameters
----------
handle : void_p
CUBLAS context.
Returns
-------
id : int
Stream ID.
|
entailment
|
def cublasSgbmv(handle, trans, m, n, kl, ku, alpha, A, lda,
x, incx, beta, y, incy):
"""
Matrix-vector product for real general banded matrix.
"""
status = _libcublas.cublasSgbmv_v2(handle,
trans, m, n, kl, ku,
ctypes.byref(ctypes.c_float(alpha)),
int(A), lda,
int(x), incx,
ctypes.byref(ctypes.c_float(beta)),
int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for real general banded matrix.
|
entailment
|
def cublasCgbmv(handle, trans, m, n, kl, ku, alpha, A, lda,
x, incx, beta, y, incy):
"""
Matrix-vector product for complex general banded matrix.
"""
status = _libcublas.cublasCgbmv_v2(handle,
trans, m, n, kl, ku,
ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(A), lda, int(x), incx,
ctypes.byref(cuda.cuFloatComplex(beta.real,
beta.imag)),
int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for complex general banded matrix.
|
entailment
|
def cublasZgbmv(handle, trans, m, n, kl, ku, alpha, A, lda,
x, incx, beta, y, incy):
"""
Matrix-vector product for complex general banded matrix.
"""
status = _libcublas.cublasZgbmv_v2(handle,
trans, m, n, kl, ku,
ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(A), lda, int(x), incx,
ctypes.byref(cuda.cuDoubleComplex(beta.real,
beta.imag)),
int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for complex general banded matrix.
|
entailment
|
def cublasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for real general matrix.
"""
status = _libcublas.cublasSgemv_v2(handle,
_CUBLAS_OP[trans], m, n,
ctypes.byref(ctypes.c_float(alpha)), int(A), lda,
int(x), incx,
ctypes.byref(ctypes.c_float(beta)), int(y), incy)
cublasCheckStatus(status)
|
Matrix-vector product for real general matrix.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.