_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q21900
|
Curve.plot
|
train
|
def plot(self, ax=None, legend=None, return_fig=False, **kwargs):
"""
Plot a curve.
Args:
ax (ax): A matplotlib axis.
legend (striplog.legend): A legend. Optional.
return_fig (bool): whether to return the matplotlib figure.
Default False.
kwargs: Arguments for ``ax.set()``
Returns:
ax. If you passed in an ax, otherwise None.
"""
if ax is None:
fig = plt.figure(figsize=(2, 10))
ax = fig.add_subplot(111)
return_ax = False
else:
return_ax = True
d = None
if legend is not None:
try:
d = legend.get_decor(self)
except:
pass
if d is not None:
kwargs['color'] = d.colour
kwargs['lw'] = getattr(d, 'lineweight', None) or getattr(d, 'lw', 1)
kwargs['ls'] = getattr(d, 'linestyle', None) or getattr(d, 'ls', '-')
# Attempt to get axis parameters from decor.
axkwargs = {}
xlim = getattr(d, 'xlim', None)
if xlim is not None:
axkwargs['xlim'] = list(map(float, xlim.split(',')))
xticks = getattr(d, 'xticks', None)
if xticks is not None:
axkwargs['xticks'] = list(map(float, xticks.split(',')))
xscale = getattr(d, 'xscale', None)
if xscale is not None:
axkwargs['xscale'] = xscale
ax.set(**axkwargs)
ax.plot(self, self.basis, **kwargs)
ax.set_title(self.mnemonic) # no longer needed
ax.set_xlabel(self.units)
if False: # labeltop of axes?
ax.xaxis.tick_top()
if True: # rotate x-tick labels
labels = ax.get_xticklabels()
for label in labels:
label.set_rotation(90)
ax.set_ylim([self.stop, self.start])
ax.grid('on', color='k', alpha=0.33, lw=0.33, linestyle='-')
if return_ax:
return ax
elif return_fig:
return fig
else:
return None
|
python
|
{
"resource": ""
}
|
q21901
|
Curve.interpolate
|
train
|
def interpolate(self):
"""
Interpolate across any missing zones.
TODO
Allow spline interpolation.
"""
nans, x = utils.nan_idx(self)
self[nans] = np.interp(x(nans), x(~nans), self[~nans])
return self
|
python
|
{
"resource": ""
}
|
q21902
|
Curve.interpolate_where
|
train
|
def interpolate_where(self, condition):
"""
Remove then interpolate across
"""
raise NotImplementedError()
self[self < 0] = np.nan
return self.interpolate()
|
python
|
{
"resource": ""
}
|
q21903
|
Curve.read_at
|
train
|
def read_at(self, d, **kwargs):
"""
Read the log at a specific depth or an array of depths.
Args:
d (float or array-like)
interpolation (str)
index(bool)
return_basis (bool)
Returns:
float or ndarray.
"""
try:
return np.array([self._read_at(depth, **kwargs) for depth in d])
except:
return self._read_at(d, **kwargs)
|
python
|
{
"resource": ""
}
|
q21904
|
Curve.quality
|
train
|
def quality(self, tests, alias=None):
"""
Run a series of tests and return the corresponding results.
Args:
tests (list): a list of functions.
alias (dict): a dictionary mapping mnemonics to lists of mnemonics.
Returns:
list. The results. Stick to booleans (True = pass) or ints.
"""
# Gather the test s.
# First, anything called 'all', 'All', or 'ALL'.
# Second, anything with the name of the curve we're in now.
# Third, anything that the alias list has for this curve.
# (This requires a reverse look-up so it's a bit messy.)
this_tests =\
tests.get('each', [])+tests.get('Each', [])+tests.get('EACH', [])\
+ tests.get(self.mnemonic, [])\
+ utils.flatten_list([tests.get(a) for a in self.get_alias(alias=alias)])
this_tests = filter(None, this_tests)
# If we explicitly set zero tests for a particular key, then this
# overrides the 'all' and 'alias' tests.
if not tests.get(self.mnemonic, 1):
this_tests = []
return {test.__name__: test(self) for test in this_tests}
|
python
|
{
"resource": ""
}
|
q21905
|
Curve.block
|
train
|
def block(self,
cutoffs=None,
values=None,
n_bins=0,
right=False,
function=None):
"""
Block a log based on number of bins, or on cutoffs.
Args:
cutoffs (array)
values (array): the values to map to. Defaults to [0, 1, 2,...]
n_bins (int)
right (bool)
function (function): transform the log if you want.
Returns:
Curve.
"""
# We'll return a copy.
params = self.__dict__.copy()
if (values is not None) and (cutoffs is None):
cutoffs = values[1:]
if (cutoffs is None) and (n_bins == 0):
cutoffs = np.mean(self)
if (n_bins != 0) and (cutoffs is None):
mi, ma = np.amin(self), np.amax(self)
cutoffs = np.linspace(mi, ma, n_bins+1)
cutoffs = cutoffs[:-1]
try: # To use cutoff as a list.
data = np.digitize(self, cutoffs, right)
except ValueError: # It's just a number.
data = np.digitize(self, [cutoffs], right)
if (function is None) and (values is None):
return Curve(data, params=params)
data = data.astype(float)
# Set the function for reducing.
f = function or utils.null
# Find the tops of the 'zones'.
tops, vals = utils.find_edges(data)
# End of array trick... adding this should remove the
# need for the marked lines below. But it doesn't.
# np.append(tops, None)
# np.append(vals, None)
if values is None:
# Transform each segment in turn, then deal with the last segment.
for top, base in zip(tops[:-1], tops[1:]):
data[top:base] = f(np.copy(self[top:base]))
data[base:] = f(np.copy(self[base:])) # See above
else:
for top, base, val in zip(tops[:-1], tops[1:], vals[:-1]):
data[top:base] = values[int(val)]
data[base:] = values[int(vals[-1])] # See above
return Curve(data, params=params)
|
python
|
{
"resource": ""
}
|
q21906
|
Curve.apply
|
train
|
def apply(self, window_length, samples=True, func1d=None):
"""
Runs any kind of function over a window.
Args:
window_length (int): the window length. Required.
samples (bool): window length is in samples. Use False for a window
length given in metres.
func1d (function): a function that takes a 1D array and returns a
scalar. Default: ``np.mean()``.
Returns:
Curve.
"""
window_length /= 1 if samples else self.step
if func1d is None:
func1d = np.mean
params = self.__dict__.copy()
out = self._rolling_window(int(window_length), func1d)
return Curve(out, params=params)
|
python
|
{
"resource": ""
}
|
q21907
|
Header.from_csv
|
train
|
def from_csv(cls, csv_file):
"""
Not implemented. Will provide a route from CSV file.
"""
try:
param_dict = csv.DictReader(csv_file)
return cls(param_dict)
except:
raise NotImplementedError
|
python
|
{
"resource": ""
}
|
q21908
|
write_row
|
train
|
def write_row(dictionary, card, log):
"""
Processes a single row from the file.
"""
rowhdr = {'card': card, 'log': log}
# Do this as a list of 1-char strings.
# Can't use a string b/c strings are immutable.
row = [' '] * 80
# Make the row header.
for e in ['log', 'card']:
strt, stop, item = _put_field(cols(0), e, rowhdr[e])
if item is not None:
row[strt:stop] = list(item)
# Now make the rest of the row.
for field in cols(card):
strt, stop, item = _put_field(cols(card), field, dictionary.get(field))
if item is not None:
row[strt:stop] = list(item)
return ''.join(row) + '\n'
|
python
|
{
"resource": ""
}
|
q21909
|
Synthetic.basis
|
train
|
def basis(self):
"""
Compute basis rather than storing it.
"""
precision_adj = self.dt / 100
return np.arange(self.start, self.stop - precision_adj, self.dt)
|
python
|
{
"resource": ""
}
|
q21910
|
Synthetic.as_curve
|
train
|
def as_curve(self, start=None, stop=None):
"""
Get the synthetic as a Curve, in depth. Facilitates plotting along-
side other curve data.
"""
params = {'start': start or getattr(self, 'z start', None),
'mnemonic': 'SYN',
'step': 0.1524
}
return Curve(data, params=params)
|
python
|
{
"resource": ""
}
|
q21911
|
Synthetic.plot
|
train
|
def plot(self, ax=None, return_fig=False, **kwargs):
"""
Plot a synthetic.
Args:
ax (ax): A matplotlib axis.
legend (Legend): For now, only here to match API for other plot
methods.
return_fig (bool): whether to return the matplotlib figure.
Default False.
Returns:
ax. If you passed in an ax, otherwise None.
"""
if ax is None:
fig = plt.figure(figsize=(2, 10))
ax = fig.add_subplot(111)
return_ax = False
else:
return_ax = True
hypertime = np.linspace(self.start, self.stop, (10 * self.size - 1) + 1)
hyperamp = np.interp(hypertime, self.basis, self)
ax.plot(hyperamp, hypertime, 'k')
ax.fill_betweenx(hypertime, hyperamp, 0, hyperamp > 0.0, facecolor='k', lw=0)
ax.invert_yaxis()
ax.set_title(self.name)
if return_ax:
return ax
elif return_fig:
return fig
else:
return None
|
python
|
{
"resource": ""
}
|
q21912
|
no_gaps
|
train
|
def no_gaps(curve):
"""
Check for gaps, after ignoring any NaNs at the top and bottom.
"""
tnt = utils.top_and_tail(curve)
return not any(np.isnan(tnt))
|
python
|
{
"resource": ""
}
|
q21913
|
no_spikes
|
train
|
def no_spikes(tolerance):
"""
Arg ``tolerance`` is the number of spiky samples allowed.
"""
def no_spikes(curve):
diff = np.abs(curve - curve.despike())
return np.count_nonzero(diff) < tolerance
return no_spikes
|
python
|
{
"resource": ""
}
|
q21914
|
Well.from_lasio
|
train
|
def from_lasio(cls, l, remap=None, funcs=None, data=True, req=None, alias=None, fname=None):
"""
Constructor. If you already have the lasio object, then this makes a
well object from it.
Args:
l (lasio object): a lasio object.
remap (dict): Optional. A dict of 'old': 'new' LAS field names.
funcs (dict): Optional. A dict of 'las field': function() for
implementing a transform before loading. Can be a lambda.
data (bool): Whether to load curves or not.
req (dict): An alias list, giving all required curves. If not
all of the aliases are present, the well is empty.
Returns:
well. The well object.
"""
# Build a dict of curves.
curve_params = {}
for field, (sect, code) in LAS_FIELDS['data'].items():
curve_params[field] = utils.lasio_get(l,
sect,
code,
remap=remap,
funcs=funcs)
# This is annoying, but I need the whole depth array to
# deal with edge cases, eg non-uniform sampling.
# Add all required curves together.
if req:
reqs = utils.flatten_list([v for k, v in alias.items() if k in req])
# Using lasio's idea of depth in metres:
if l.depth_m[0] < l.depth_m[1]:
curve_params['depth'] = l.depth_m
else:
curve_params['depth'] = np.flipud(l.depth_m)
# Make the curve dictionary.
depth_curves = ['DEPT', 'TIME']
if data and req:
curves = {c.mnemonic: Curve.from_lasio_curve(c, **curve_params)
for c in l.curves
if (c.mnemonic[:4] not in depth_curves)
and (c.mnemonic in reqs)}
elif data and not req:
curves = {c.mnemonic: Curve.from_lasio_curve(c, **curve_params)
for c in l.curves
if (c.mnemonic[:4] not in depth_curves)}
elif (not data) and req:
curves = {c.mnemonic: True
for c in l.curves
if (c.mnemonic[:4] not in depth_curves)
and (c.mnemonic in reqs)}
else:
curves = {c.mnemonic: True
for c in l.curves
if (c.mnemonic[:4] not in depth_curves)}
if req:
aliases = utils.flatten_list([c.get_alias(alias)
for m, c
in curves.items()]
)
if len(set(aliases)) < len(req):
return cls(params={})
# Build a dict of the other well data.
params = {'las': l,
'header': Header.from_lasio(l, remap=remap, funcs=funcs),
'location': Location.from_lasio(l, remap=remap, funcs=funcs),
'data': curves,
'fname': fname}
for field, (sect, code) in LAS_FIELDS['well'].items():
params[field] = utils.lasio_get(l,
sect,
code,
remap=remap,
funcs=funcs)
return cls(params)
|
python
|
{
"resource": ""
}
|
q21915
|
Well.to_lasio
|
train
|
def to_lasio(self, keys=None, basis=None):
"""
Makes a lasio object from the current well.
Args:
basis (ndarray): Optional. The basis to export the curves in. If
you don't specify one, it will survey all the curves with
``survey_basis()``.
keys (list): List of strings: the keys of the data items to
include, if not all of them. You can have nested lists, such
as you might use for ``tracks`` in ``well.plot()``.
Returns:
lasio. The lasio object.
"""
# Create an empty lasio object.
l = lasio.LASFile()
l.well.DATE = str(datetime.datetime.today())
# Deal with header.
for obj, dic in LAS_FIELDS.items():
if obj == 'data':
continue
for attr, (sect, item) in dic.items():
value = getattr(getattr(self, obj), attr, None)
try:
getattr(l, sect)[item].value = value
except:
h = lasio.HeaderItem(item, "", value, "")
getattr(l, sect)[item] = h
# Clear curves from header portion.
l.header['Curves'] = []
# Add a depth basis.
if basis is None:
basis = self.survey_basis(keys=keys)
try:
l.add_curve('DEPT', basis)
except:
raise Exception("Please provide a depth basis.")
# Add meta from basis.
setattr(l.well, 'STRT', basis[0])
setattr(l.well, 'STOP', basis[-1])
setattr(l.well, 'STEP', basis[1]-basis[0])
# Add data entities.
other = ''
if keys is None:
keys = [k for k, v in self.data.items() if isinstance(v, Curve)]
else:
keys = utils.flatten_list(keys)
for k in keys:
d = self.data[k]
if getattr(d, 'null', None) is not None:
d[np.isnan(d)] = d.null
try:
new_data = np.copy(d.to_basis_like(basis))
except:
# Basis shift failed; is probably not a curve
pass
try:
descr = getattr(d, 'description', '')
l.add_curve(k.upper(), new_data, unit=d.units, descr=descr)
except:
try:
# Treat as OTHER
other += "{}\n".format(k.upper()) + d.to_csv()
except:
pass
# Write OTHER, if any.
if other:
l.other = other
return l
|
python
|
{
"resource": ""
}
|
q21916
|
Well._plot_depth_track
|
train
|
def _plot_depth_track(self, ax, md, kind='MD'):
"""
Private function. Depth track plotting.
Args:
ax (ax): A matplotlib axis.
md (ndarray): The measured depths of the track.
kind (str): The kind of track to plot.
Returns:
ax.
"""
if kind == 'MD':
ax.set_yscale('bounded', vmin=md.min(), vmax=md.max())
# ax.set_ylim([md.max(), md.min()])
elif kind == 'TVD':
tvd = self.location.md2tvd(md)
ax.set_yscale('piecewise', x=tvd, y=md)
# ax.set_ylim([tvd.max(), tvd.min()])
else:
raise Exception("Kind must be MD or TVD")
for sp in ax.spines.values():
sp.set_color('gray')
if ax.is_first_col():
pad = -10
ax.spines['left'].set_color('none')
ax.yaxis.set_ticks_position('right')
for label in ax.get_yticklabels():
label.set_horizontalalignment('right')
elif ax.is_last_col():
pad = -10
ax.spines['right'].set_color('none')
ax.yaxis.set_ticks_position('left')
for label in ax.get_yticklabels():
label.set_horizontalalignment('left')
else:
pad = -30
for label in ax.get_yticklabels():
label.set_horizontalalignment('center')
ax.tick_params(axis='y', colors='gray', labelsize=12, pad=pad)
ax.set_xticks([])
ax.set(xticks=[])
ax.depth_track = True
return ax
|
python
|
{
"resource": ""
}
|
q21917
|
Well.survey_basis
|
train
|
def survey_basis(self, keys=None, alias=None, step=None):
"""
Look at the basis of all the curves in ``well.data`` and return a
basis with the minimum start, maximum depth, and minimum step.
Args:
keys (list): List of strings: the keys of the data items to
survey, if not all of them.
alias (dict): a dictionary mapping mnemonics to lists of mnemonics.
step (float): a new step, if you want to change it.
Returns:
ndarray. The most complete common basis.
"""
if keys is None:
keys = [k for k, v in self.data.items() if isinstance(v, Curve)]
else:
keys = utils.flatten_list(keys)
starts, stops, steps = [], [], []
for k in keys:
d = self.get_curve(k, alias=alias)
if keys and (d is None):
continue
try:
starts.append(d.basis[0])
stops.append(d.basis[-1])
steps.append(d.basis[1] - d.basis[0])
except Exception as e:
pass
if starts and stops and steps:
step = step or min(steps)
return np.arange(min(starts), max(stops)+1e-9, step)
else:
return None
|
python
|
{
"resource": ""
}
|
q21918
|
Well.get_mnemonics_from_regex
|
train
|
def get_mnemonics_from_regex(self, pattern):
"""
Should probably integrate getting curves with regex, vs getting with
aliases, even though mixing them is probably confusing. For now I can't
think of another use case for these wildcards, so I'll just implement
for the curve table and we can worry about a nice solution later if we
ever come back to it.
"""
regex = re.compile(pattern)
keys = list(self.data.keys())
return [m.group(0) for k in keys for m in [regex.search(k)] if m]
|
python
|
{
"resource": ""
}
|
q21919
|
Well.get_mnemonic
|
train
|
def get_mnemonic(self, mnemonic, alias=None):
"""
Instead of picking curves by name directly from the data dict, you
can pick them up with this method, which takes account of the alias
dict you pass it. If you do not pass an alias dict, then you get the
curve you asked for, if it exists, or None. NB Wells do not have alias
dicts, but Projects do.
Args:
mnemonic (str): the name of the curve you want.
alias (dict): an alias dictionary, mapping mnemonics to lists of
mnemonics.
Returns:
Curve.
"""
alias = alias or {}
aliases = alias.get(mnemonic, [mnemonic])
for a in aliases:
if a in self.data:
return a
return None
|
python
|
{
"resource": ""
}
|
q21920
|
Well.get_curve
|
train
|
def get_curve(self, mnemonic, alias=None):
"""
Wraps get_mnemonic.
Instead of picking curves by name directly from the data dict, you
can pick them up with this method, which takes account of the alias
dict you pass it. If you do not pass an alias dict, then you get the
curve you asked for, if it exists, or None. NB Wells do not have alias
dicts, but Projects do.
Args:
mnemonic (str): the name of the curve you want.
alias (dict): an alias dictionary, mapping mnemonics to lists of
mnemonics.
Returns:
Curve.
"""
return self.data.get(self.get_mnemonic(mnemonic, alias=alias), None)
|
python
|
{
"resource": ""
}
|
q21921
|
Well.count_curves
|
train
|
def count_curves(self, keys=None, alias=None):
"""
Counts the number of curves in the well that will be selected with the
given key list and the given alias dict. Used by Project's curve table.
"""
if keys is None:
keys = [k for k, v in self.data.items() if isinstance(v, Curve)]
else:
keys = utils.flatten_list(keys)
return len(list(filter(None, [self.get_mnemonic(k, alias=alias) for k in keys])))
|
python
|
{
"resource": ""
}
|
q21922
|
Well.make_synthetic
|
train
|
def make_synthetic(self,
srd=0,
v_repl_seismic=2000,
v_repl_log=2000,
f=50,
dt=0.001):
"""
Early hack. Use with extreme caution.
Hands-free. There'll be a more granualr version in synthetic.py.
Assumes DT is in µs/m and RHOB is kg/m3.
There is no handling yet for TVD.
The datum handling is probably sketchy.
TODO:
A lot.
"""
kb = getattr(self.location, 'kb', None) or 0
data0 = self.data['DT'].start
log_start_time = ((srd - kb) / v_repl_seismic) + (data0 / v_repl_log)
# Basic log values.
dt_log = self.data['DT'].despike() # assume µs/m
rho_log = self.data['RHOB'].despike() # assume kg/m3
if not np.allclose(dt_log.basis, rho_log.basis):
rho_log = rho_log.to_basis_like(dt_log)
Z = (1e6 / dt_log) * rho_log
# Two-way-time.
scaled_dt = dt_log.step * np.nan_to_num(dt_log) / 1e6
twt = 2 * np.cumsum(scaled_dt)
t = twt + log_start_time
# Move to time.
t_max = t[-1] + 10*dt
t_reg = np.arange(0, t_max+1e-9, dt)
Z_t = np.interp(x=t_reg, xp=t, fp=Z)
# Make RC series.
rc_t = (Z_t[1:] - Z_t[:-1]) / (Z_t[1:] + Z_t[:-1])
rc_t = np.nan_to_num(rc_t)
# Convolve.
_, ricker = utils.ricker(f=f, length=0.128, dt=dt)
synth = np.convolve(ricker, rc_t, mode='same')
params = {'dt': dt,
'z start': dt_log.start,
'z stop': dt_log.stop
}
self.data['Synthetic'] = Synthetic(synth, basis=t_reg, params=params)
return None
|
python
|
{
"resource": ""
}
|
q21923
|
Well.qc_curve_group
|
train
|
def qc_curve_group(self, tests, alias=None):
"""
Run tests on a cohort of curves.
Args:
alias (dict): an alias dictionary, mapping mnemonics to lists of
mnemonics.
Returns:
dict.
"""
keys = [k for k, v in self.data.items() if isinstance(v, Curve)]
if not keys:
return {}
all_tests = tests.get('all', tests.get('All', tests.get('ALL', [])))
data = {test.__name__: test(self, keys, alias) for test in all_tests}
results = {}
for i, key in enumerate(keys):
this = {}
for test, result in data.items():
this[test] = result[i]
results[key] = this
return results
|
python
|
{
"resource": ""
}
|
q21924
|
Well.qc_data
|
train
|
def qc_data(self, tests, alias=None):
"""
Run a series of tests against the data and return the corresponding
results.
Args:
tests (list): a list of functions.
Returns:
list. The results. Stick to booleans (True = pass) or ints.
"""
# We'll get a result for each curve here.
r = {m: c.quality(tests, alias) for m, c in self.data.items()}
s = self.qc_curve_group(tests, alias=alias)
for m, results in r.items():
if m in s:
results.update(s[m])
return r
|
python
|
{
"resource": ""
}
|
q21925
|
Well.data_as_matrix
|
train
|
def data_as_matrix(self,
keys=None,
return_basis=False,
basis=None,
alias=None,
start=None,
stop=None,
step=None,
window_length=None,
window_step=1,
):
"""
Provide a feature matrix, given a list of data items.
I think this will probably fail if there are striplogs in the data
dictionary for this well.
TODO:
Deal with striplogs and other data, if present.
Args:
keys (list): List of the logs to export from the data dictionary.
return_basis (bool): Whether or not to return the basis that was
used.
basis (ndarray): The basis to use. Default is to survey all curves
to find a common basis.
alias (dict): A mapping of alias names to lists of mnemonics.
start (float): Optionally override the start of whatever basis
you find or (more likely) is surveyed.
stop (float): Optionally override the stop of whatever basis
you find or (more likely) is surveyed.
step (float): Override the step in the basis from survey_basis.
window_length (int): The number of samples to return around each sample.
This will provide one or more shifted versions of the features.
window_step (int): How much to step the offset versions.
Returns:
ndarray.
or
ndarray, ndarray if return_basis=True
"""
if keys is None:
keys = [k for k, v in self.data.items() if isinstance(v, Curve)]
else:
# Only look at the alias list if keys were passed.
if alias is not None:
_keys = []
for k in keys:
if k in alias:
added = False
for a in alias[k]:
if a in self.data:
_keys.append(a)
added = True
break
if not added:
_keys.append(k)
else:
_keys.append(k)
keys = _keys
if basis is None:
basis = self.survey_basis(keys=keys, step=step)
# Get the data, or None is curve is missing.
data = [self.data.get(k) for k in keys]
# Now cast to the correct basis, and replace any missing curves with
# an empty Curve. The sklearn imputer will deal with it. We will change
# the elements in place.
for i, d in enumerate(data):
if d is not None:
data[i] = d.to_basis(basis=basis)
# Allow user to override the start and stop from the survey.
if (start is not None) or (stop is not None):
data[i] = data[i].to_basis(start=start, stop=stop, step=step)
basis = data[i].basis
else:
# Empty_like gives unpredictable results
data[i] = Curve(np.full(basis.shape, np.nan), basis=basis)
if window_length is not None:
d_new = []
for d in data:
r = d._rolling_window(window_length,
func1d=utils.null,
step=window_step,
return_rolled=False,
)
d_new.append(r.T)
data = d_new
if return_basis:
return np.vstack(data).T, basis
else:
return np.vstack(data).T
|
python
|
{
"resource": ""
}
|
q21926
|
CRS.from_string
|
train
|
def from_string(cls, prjs):
"""
Turn a PROJ.4 string into a mapping of parameters. Bare parameters
like "+no_defs" are given a value of ``True``. All keys are checked
against the ``all_proj_keys`` list.
Args:
prjs (str): A PROJ4 string.
"""
def parse(v):
try:
return int(v)
except ValueError:
pass
try:
return float(v)
except ValueError:
return v
parts = [o.lstrip('+') for o in prjs.strip().split()]
items = map(
lambda kv: len(kv) == 2 and (kv[0], parse(kv[1])) or (kv[0], True),
(p.split('=') for p in parts))
return cls({k: v for k, v in items if '+'+k in PROJ4_PARAMS.keys()})
|
python
|
{
"resource": ""
}
|
q21927
|
similarity_by_path
|
train
|
def similarity_by_path(sense1: "wn.Synset", sense2: "wn.Synset", option: str = "path") -> float:
"""
Returns maximum path similarity between two senses.
:param sense1: A synset.
:param sense2: A synset.
:param option: String, one of ('path', 'wup', 'lch').
:return: A float, similarity measurement.
"""
if option.lower() in ["path", "path_similarity"]: # Path similarities.
return max(wn.path_similarity(sense1, sense2, if_none_return=0),
wn.path_similarity(sense2, sense1, if_none_return=0))
elif option.lower() in ["wup", "wupa", "wu-palmer", "wu-palmer"]: # Wu-Palmer
return max(wn.wup_similarity(sense1, sense2, if_none_return=0),
wn.wup_similarity(sense2, sense1, if_none_return=0))
elif option.lower() in ['lch', "leacock-chordorow"]: # Leacock-Chodorow
if sense1.pos != sense2.pos: # lch can't do diff POS
return 0
return wn.lch_similarity(sense1, sense2, if_none_return=0)
|
python
|
{
"resource": ""
}
|
q21928
|
similarity_by_infocontent
|
train
|
def similarity_by_infocontent(sense1: "wn.Synset", sense2: "wn.Synset", option: str) -> float:
"""
Returns similarity scores by information content.
:param sense1: A synset.
:param sense2: A synset.
:param option: String, one of ('res', 'jcn', 'lin').
:return: A float, similarity measurement.
"""
if sense1.pos != sense2.pos: # infocontent sim can't do diff POS.
return 0
if option in ['res', 'resnik']:
if sense1.pos not in wnic_bnc_resnik_add1.ic:
return 0
return wn.res_similarity(sense1, sense2, wnic_bnc_resnik_add1)
#return min(wn.res_similarity(sense1, sense2, wnic.ic(ic)) \
# for ic in info_contents)
elif option in ['jcn', "jiang-conrath"]:
if sense1.pos not in wnic_bnc_add1.ic:
return 0
return wn.jcn_similarity(sense1, sense2, wnic_bnc_add1)
elif option in ['lin']:
if sense1.pos not in wnic_bnc_add1.ic:
return 0
return wn.lin_similarity(sense1, sense2, wnic_bnc_add1)
|
python
|
{
"resource": ""
}
|
q21929
|
sim
|
train
|
def sim(sense1: "wn.Synset", sense2: "wn.Synset", option: str = "path") -> float:
"""
Calculates similarity based on user's choice.
:param sense1: A synset.
:param sense2: A synset.
:param option: String, one of ('path', 'wup', 'lch', 'res', 'jcn', 'lin').
:return: A float, similarity measurement.
"""
option = option.lower()
if option.lower() in ["path", "path_similarity",
"wup", "wupa", "wu-palmer", "wu-palmer",
'lch', "leacock-chordorow"]:
return similarity_by_path(sense1, sense2, option)
elif option.lower() in ["res", "resnik",
"jcn","jiang-conrath",
"lin"]:
return similarity_by_infocontent(sense1, sense2, option)
|
python
|
{
"resource": ""
}
|
q21930
|
lemmatize
|
train
|
def lemmatize(ambiguous_word: str, pos: str = None, neverstem=False,
lemmatizer=wnl, stemmer=porter) -> str:
"""
Tries to convert a surface word into lemma, and if lemmatize word is not in
wordnet then try and convert surface word into its stem.
This is to handle the case where users input a surface word as an ambiguous
word and the surface word is a not a lemma.
"""
# Try to be a little smarter and use most frequent POS.
pos = pos if pos else penn2morphy(pos_tag([ambiguous_word])[0][1],
default_to_noun=True)
lemma = lemmatizer.lemmatize(ambiguous_word, pos=pos)
stem = stemmer.stem(ambiguous_word)
# Ensure that ambiguous word is a lemma.
if not wn.synsets(lemma):
if neverstem:
return ambiguous_word
if not wn.synsets(stem):
return ambiguous_word
else:
return stem
else:
return lemma
|
python
|
{
"resource": ""
}
|
q21931
|
has_synset
|
train
|
def has_synset(word: str) -> list:
"""" Returns a list of synsets of a word after lemmatization. """
return wn.synsets(lemmatize(word, neverstem=True))
|
python
|
{
"resource": ""
}
|
q21932
|
LinearClassifier.get_label
|
train
|
def get_label(self, x, w):
"""
Computes the label for each data point
"""
scores = np.dot(x,w)
return np.argmax(scores,axis=1).transpose()
|
python
|
{
"resource": ""
}
|
q21933
|
LinearClassifier.add_intercept_term
|
train
|
def add_intercept_term(self, x):
"""
Adds a column of ones to estimate the intercept term for
separation boundary
"""
nr_x,nr_f = x.shape
intercept = np.ones([nr_x,1])
x = np.hstack((intercept,x))
return x
|
python
|
{
"resource": ""
}
|
q21934
|
LinearClassifier.evaluate
|
train
|
def evaluate(self, truth, predicted):
"""
Evaluates the predicted outputs against the gold data.
"""
correct = 0.0
total = 0.0
for i in range(len(truth)):
if(truth[i] == predicted[i]):
correct += 1
total += 1
return 1.0*correct/total
|
python
|
{
"resource": ""
}
|
q21935
|
synset_signatures
|
train
|
def synset_signatures(ss: "wn.Synset", hyperhypo=True, adapted=False,
remove_stopwords=True, to_lemmatize=True, remove_numbers=True,
lowercase=True, original_lesk=False, from_cache=True) -> set:
"""
Takes a Synset and returns its signature words.
:param ss: An instance of wn.Synset.
:return: A set of signature strings
"""
if from_cache:
return synset_signatures_from_cache(ss, hyperhypo, adapted, original_lesk)
# Collects the signatures from WordNet.
signature = []
# Adds the definition, example sentences and lemma_names.
signature += word_tokenize(ss.definition())
# If the original lesk signature is requested, skip the other signatures.
if original_lesk:
return set(signature)
# Adds the examples and lemma names.
signature += chain(*[word_tokenize(eg) for eg in ss.examples()])
signature += ss.lemma_names()
# Includes lemma_names of hyper-/hyponyms.
if hyperhypo:
hyperhyponyms = set(ss.hyponyms() + ss.hypernyms() + ss.instance_hyponyms() + ss.instance_hypernyms())
signature += set(chain(*[i.lemma_names() for i in hyperhyponyms]))
# Includes signatures from related senses as in Adapted Lesk.
if adapted:
# Includes lemma_names from holonyms, meronyms and similar_tos
related_senses = set(ss.member_holonyms() + ss.part_holonyms() + ss.substance_holonyms() + \
ss.member_meronyms() + ss.part_meronyms() + ss.substance_meronyms() + \
ss.similar_tos())
signature += set(chain(*[i.lemma_names() for i in related_senses]))
# Lowercase.
signature = set(s.lower() for s in signature) if lowercase else signature
# Removes stopwords.
signature = set(signature).difference(EN_STOPWORDS) if remove_stopwords else signature
# Lemmatized context is preferred over stemmed context.
if to_lemmatize:
signature = [lemmatize(s) if lowercase else lemmatize(s) # Lowercasing checks here.
for s in signature
# We only throw away if both remove_numbers and s is a digit are true.
if not (remove_numbers and s.isdigit())]
# Keep only the unique bag-of-words
return set(signature)
|
python
|
{
"resource": ""
}
|
q21936
|
signatures
|
train
|
def signatures(ambiguous_word: str, pos: str = None, hyperhypo=True, adapted=False,
remove_stopwords=True, to_lemmatize=True, remove_numbers=True,
lowercase=True, to_stem=False, original_lesk=False, from_cache=True) -> dict:
"""
Takes an ambiguous word and optionally its Part-Of-Speech and returns
a dictionary where keys are the synsets and values are sets of signatures.
:param ambiguous_word: String, a single word.
:param pos: String, one of 'a', 'r', 's', 'n', 'v', or None.
:return: dict(synset:{signatures}).
"""
# Ensure that the POS is supported.
pos = pos if pos in ['a', 'r', 's', 'n', 'v', None] else None
# If the POS specified isn't found but other POS is in wordnet.
if not wn.synsets(ambiguous_word, pos) and wn.synsets(ambiguous_word):
pos = None
# Holds the synset->signature dictionary.
ss_sign = {}
for ss in wn.synsets(ambiguous_word, pos):
ss_sign[ss] = synset_signatures(ss, hyperhypo=hyperhypo,
adapted=adapted,
remove_stopwords=remove_stopwords,
to_lemmatize=to_lemmatize,
remove_numbers=remove_numbers,
lowercase=lowercase,
original_lesk=original_lesk,
from_cache=from_cache)
# Matching exact words may cause sparsity, so optional matching for stems.
# Not advisible to use thus left out of the synsets_signatures()
if to_stem == True:
ss_sign = {ss:[porter.stem(s) for s in signature]
for ss, signature in ss_sign.items()}
return ss_sign
|
python
|
{
"resource": ""
}
|
q21937
|
compare_overlaps_greedy
|
train
|
def compare_overlaps_greedy(context: list, synsets_signatures: dict) -> "wn.Synset":
"""
Calculate overlaps between the context sentence and the synset_signatures
and returns the synset with the highest overlap.
Note: Greedy algorithm only keeps the best sense,
see https://en.wikipedia.org/wiki/Greedy_algorithm
Only used by original_lesk(). Keeping greedy algorithm for documentary sake,
because original_lesks is greedy.
:param context: List of strings, tokenized sentence or document.
:param synsets_signatures: dict of Synsets and the set of their corresponding signatures.
:return: The Synset with the highest number of overlaps with its signatures.
"""
max_overlaps = 0; lesk_sense = None
for ss in synsets_signatures:
overlaps = set(synsets_signatures[ss]).intersection(context)
if len(overlaps) > max_overlaps:
lesk_sense = ss
max_overlaps = len(overlaps)
return lesk_sense
|
python
|
{
"resource": ""
}
|
q21938
|
compare_overlaps
|
train
|
def compare_overlaps(context: list, synsets_signatures: dict,
nbest=False, keepscore=False, normalizescore=False) -> "wn.Synset":
"""
Calculates overlaps between the context sentence and the synset_signture
and returns a ranked list of synsets from highest overlap to lowest.
:param context: List of strings, tokenized sentence or document.
:param synsets_signatures: dict of Synsets and the set of their corresponding signatures.
:return: The Synset with the highest number of overlaps with its signatures.
"""
overlaplen_synsets = [] # a tuple of (len(overlap), synset).
for ss in synsets_signatures:
overlaps = set(synsets_signatures[ss]).intersection(context)
overlaplen_synsets.append((len(overlaps), ss))
# Rank synsets from highest to lowest overlap.
ranked_synsets = sorted(overlaplen_synsets, reverse=True)
# Normalize scores such that it's between 0 to 1.
if normalizescore:
total = float(sum(i[0] for i in ranked_synsets))
ranked_synsets = [(i/total,j) for i,j in ranked_synsets]
if not keepscore: # Returns a list of ranked synsets without scores
ranked_synsets = [i[1] for i in sorted(overlaplen_synsets, reverse=True)]
# Returns a ranked list of synsets otherwise only the best sense.
return ranked_synsets if nbest else ranked_synsets[0]
|
python
|
{
"resource": ""
}
|
q21939
|
SemEval2007_Coarse_WSD.fileids
|
train
|
def fileids(self):
""" Returns files from SemEval2007 Coarse-grain All-words WSD task. """
return [os.path.join(self.path,i) for i in os.listdir(self.path)]
|
python
|
{
"resource": ""
}
|
q21940
|
SemEval2007_Coarse_WSD.sents
|
train
|
def sents(self, filename=None):
"""
Returns the file, line by line. Use test_file if no filename specified.
"""
filename = filename if filename else self.test_file
with io.open(filename, 'r') as fin:
for line in fin:
yield line.strip()
|
python
|
{
"resource": ""
}
|
q21941
|
SemEval2007_Coarse_WSD.sentences
|
train
|
def sentences(self):
"""
Returns the instances by sentences, and yields a list of tokens,
similar to the pywsd.semcor.sentences.
>>> coarse_wsd = SemEval2007_Coarse_WSD()
>>> for sent in coarse_wsd.sentences():
>>> for token in sent:
>>> print token
>>> break
>>> break
word(id=None, text=u'Your', offset=None, sentid=0, paraid=u'd001', term=None)
"""
for sentid, ys in enumerate(self.yield_sentences()):
sent, context_sent, context_doc, inst2ans, textid = ys
instances = {}
for instance in sent.findAll('instance'):
instid = instance['id']
lemma = instance['lemma']
word = instance.text
instances[instid] = Instance(instid, lemma, word)
tokens = []
for i in sent: # Iterates through BeautifulSoup object.
if str(i).startswith('<instance'): # BeautifulSoup.Tag
instid = sent.find('instance')['id']
inst = instances[instid]
answer = inst2ans[instid]
term = Term(instid, answer.pos, inst.lemma, answer.sensekey,
type='open')
tokens.append(Word(instid, inst.word,
sentid, textid, term))
else: # if BeautifulSoup.NavigableString
tokens+=[Word(None, w, sentid, textid, None)
for w in i.split()]
yield tokens
|
python
|
{
"resource": ""
}
|
q21942
|
random_sense
|
train
|
def random_sense(ambiguous_word: str, pos=None) -> "wn.Synset":
"""
Returns a random sense.
:param ambiguous_word: String, a single word.
:param pos: String, one of 'a', 'r', 's', 'n', 'v', or None.
:return: A random Synset.
"""
if pos is None:
return custom_random.choice(wn.synsets(ambiguous_word))
else:
return custom_random.choice(wn.synsets(ambiguous_word, pos))
|
python
|
{
"resource": ""
}
|
q21943
|
first_sense
|
train
|
def first_sense(ambiguous_word: str, pos: str = None) -> "wn.Synset":
"""
Returns the first sense.
:param ambiguous_word: String, a single word.
:param pos: String, one of 'a', 'r', 's', 'n', 'v', or None.
:return: The first Synset in the wn.synsets(word) list.
"""
if pos is None:
return wn.synsets(ambiguous_word)[0]
else:
return wn.synsets(ambiguous_word, pos)[0]
|
python
|
{
"resource": ""
}
|
q21944
|
estimate_gaussian
|
train
|
def estimate_gaussian(X):
"""
Returns the mean and the variance of a data set of X points assuming that
the points come from a gaussian distribution X.
"""
mean = np.mean(X,0)
variance = np.var(X,0)
return Gaussian(mean,variance)
|
python
|
{
"resource": ""
}
|
q21945
|
dict_max
|
train
|
def dict_max(dic):
"""
Returns maximum value of a dictionary.
"""
aux = dict(map(lambda item: (item[1],item[0]),dic.items()))
if aux.keys() == []:
return 0
max_value = max(aux.keys())
return max_value,aux[max_value]
|
python
|
{
"resource": ""
}
|
q21946
|
l2norm_squared
|
train
|
def l2norm_squared(a):
"""
L2 normalize squared
"""
value = 0
for i in xrange(a.shape[1]):
value += np.dot(a[:,i],a[:,i])
return value
|
python
|
{
"resource": ""
}
|
q21947
|
KNNIndex.check_metric
|
train
|
def check_metric(self, metric):
"""Check that the metric is supported by the KNNIndex instance."""
if metric not in self.VALID_METRICS:
raise ValueError(
f"`{self.__class__.__name__}` does not support the `{metric}` "
f"metric. Please choose one of the supported metrics: "
f"{', '.join(self.VALID_METRICS)}."
)
|
python
|
{
"resource": ""
}
|
q21948
|
random
|
train
|
def random(X, n_components=2, random_state=None):
"""Initialize an embedding using samples from an isotropic Gaussian.
Parameters
----------
X: np.ndarray
The data matrix.
n_components: int
The dimension of the embedding space.
random_state: Union[int, RandomState]
If the value is an int, random_state is the seed used by the random
number generator. If the value is a RandomState instance, then it will
be used as the random number generator. If the value is None, the random
number generator is the RandomState instance used by `np.random`.
Returns
-------
initialization: np.ndarray
"""
random_state = check_random_state(random_state)
return random_state.normal(0, 1e-2, (X.shape[0], n_components))
|
python
|
{
"resource": ""
}
|
q21949
|
pca
|
train
|
def pca(X, n_components=2, random_state=None):
"""Initialize an embedding using the top principal components.
Parameters
----------
X: np.ndarray
The data matrix.
n_components: int
The dimension of the embedding space.
random_state: Union[int, RandomState]
If the value is an int, random_state is the seed used by the random
number generator. If the value is a RandomState instance, then it will
be used as the random number generator. If the value is None, the random
number generator is the RandomState instance used by `np.random`.
Returns
-------
initialization: np.ndarray
"""
pca_ = PCA(n_components=n_components, random_state=random_state)
embedding = pca_.fit_transform(X)
# The PCA embedding may have high variance, which leads to poor convergence
normalization = np.std(embedding[:, 0]) * 100
embedding /= normalization
return embedding
|
python
|
{
"resource": ""
}
|
q21950
|
weighted_mean
|
train
|
def weighted_mean(X, embedding, neighbors, distances):
"""Initialize points onto an existing embedding by placing them in the
weighted mean position of their nearest neighbors on the reference embedding.
Parameters
----------
X: np.ndarray
embedding: TSNEEmbedding
neighbors: np.ndarray
distances: np.ndarray
Returns
-------
np.ndarray
"""
n_samples = X.shape[0]
n_components = embedding.shape[1]
partial_embedding = np.zeros((n_samples, n_components))
for i in range(n_samples):
partial_embedding[i] = np.average(
embedding[neighbors[i]], axis=0, weights=distances[i],
)
return partial_embedding
|
python
|
{
"resource": ""
}
|
q21951
|
make_heap_initializer
|
train
|
def make_heap_initializer(dist, dist_args):
"""Create a numba accelerated version of heap initialization for the
alternative k-neighbor graph algorithm. This approach builds two heaps
of neighbors simultaneously, one is a heap used to construct a very
approximate k-neighbor graph for searching; the other is the
initialization for searching.
Parameters
----------
dist: function
A numba JITd distance function which, given two arrays computes a
dissimilarity between them.
dist_args: tuple
Any extra arguments that need to be passed to the distance function
beyond the two arrays to be compared.
Returns
-------
A numba JITd function for for heap initialization that is
specialised to the given metric.
"""
@numba.njit(parallel=True)
def initialize_heaps(data, n_neighbors, leaf_array):
graph_heap = make_heap(data.shape[0], 10)
search_heap = make_heap(data.shape[0], n_neighbors * 2)
tried = set([(-1, -1)])
for n in range(leaf_array.shape[0]):
for i in range(leaf_array.shape[1]):
if leaf_array[n, i] < 0:
break
for j in range(i + 1, leaf_array.shape[1]):
if leaf_array[n, j] < 0:
break
if (leaf_array[n, i], leaf_array[n, j]) in tried:
continue
d = dist(data[leaf_array[n, i]], data[leaf_array[n, j]], *dist_args)
unchecked_heap_push(
graph_heap, leaf_array[n, i], d, leaf_array[n, j], 1
)
unchecked_heap_push(
graph_heap, leaf_array[n, j], d, leaf_array[n, i], 1
)
unchecked_heap_push(
search_heap, leaf_array[n, i], d, leaf_array[n, j], 1
)
unchecked_heap_push(
search_heap, leaf_array[n, j], d, leaf_array[n, i], 1
)
tried.add((leaf_array[n, i], leaf_array[n, j]))
return graph_heap, search_heap
return initialize_heaps
|
python
|
{
"resource": ""
}
|
q21952
|
degree_prune
|
train
|
def degree_prune(graph, max_degree=20):
"""Prune the k-neighbors graph back so that nodes have a maximum
degree of ``max_degree``.
Parameters
----------
graph: sparse matrix
The adjacency matrix of the graph
max_degree: int (optional, default 20)
The maximum degree of any node in the pruned graph
Returns
-------
result: sparse matrix
The pruned graph.
"""
result = graph.tolil()
for i, row_data in enumerate(result.data):
if len(row_data) > max_degree:
cut_value = np.argsort(row_data)[max_degree]
row_data = [x if x <= cut_value else 0.0 for x in row_data]
result.data[i] = row_data
result = result.tocsr()
result.eliminate_zeros()
return result
|
python
|
{
"resource": ""
}
|
q21953
|
NNDescent.query
|
train
|
def query(self, query_data, k=10, queue_size=5.0):
"""Query the training data for the k nearest neighbors
Parameters
----------
query_data: array-like, last dimension self.dim
An array of points to query
k: integer (default = 10)
The number of nearest neighbors to return
queue_size: float (default 5.0)
The multiplier of the internal search queue. This controls the
speed/accuracy tradeoff. Low values will search faster but with
more approximate results. High values will search more
accurately, but will require more computation to do so. Values
should generally be in the range 1.0 to 10.0.
Returns
-------
indices, distances: array (n_query_points, k), array (n_query_points, k)
The first array, ``indices``, provides the indices of the data
points in the training set that are the nearest neighbors of
each query point. Thus ``indices[i, j]`` is the index into the
training data of the jth nearest neighbor of the ith query points.
Similarly ``distances`` provides the distances to the neighbors
of the query points such that ``distances[i, j]`` is the distance
from the ith query point to its jth nearest neighbor in the
training data.
"""
# query_data = check_array(query_data, dtype=np.float64, order='C')
query_data = np.asarray(query_data).astype(np.float32)
init = initialise_search(
self._rp_forest,
self._raw_data,
query_data,
int(k * queue_size),
self._random_init,
self._tree_init,
self.rng_state,
)
result = self._search(
self._raw_data,
self._search_graph.indptr,
self._search_graph.indices,
init,
query_data,
)
indices, dists = deheap_sort(result)
return indices[:, :k], dists[:, :k]
|
python
|
{
"resource": ""
}
|
q21954
|
PyNNDescentTransformer.fit
|
train
|
def fit(self, X):
"""Fit the PyNNDescent transformer to build KNN graphs with
neighbors given by the dataset X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Sample data
Returns
-------
transformer : PyNNDescentTransformer
The trained transformer
"""
self.n_samples_fit = X.shape[0]
if self.metric_kwds is None:
metric_kwds = {}
else:
metric_kwds = self.metric_kwds
self.pynndescent_ = NNDescent(
X,
self.metric,
metric_kwds,
self.n_neighbors,
self.n_trees,
self.leaf_size,
self.pruning_level,
self.tree_init,
self.random_state,
self.algorithm,
self.max_candidates,
self.n_iters,
self.early_termination_value,
self.sampling_rate,
)
return self
|
python
|
{
"resource": ""
}
|
q21955
|
weighted_minkowski
|
train
|
def weighted_minkowski(x, y, w=_mock_identity, p=2):
"""A weighted version of Minkowski distance.
..math::
D(x, y) = \left(\sum_i w_i |x_i - y_i|^p\right)^{\frac{1}{p}}
If weights w_i are inverse standard deviations of data in each dimension
then this represented a standardised Minkowski distance (and is
equivalent to standardised Euclidean distance for p=1).
"""
result = 0.0
for i in range(x.shape[0]):
result += (w[i] * np.abs(x[i] - y[i])) ** p
return result ** (1.0 / p)
|
python
|
{
"resource": ""
}
|
q21956
|
_handle_nice_params
|
train
|
def _handle_nice_params(optim_params: dict) -> None:
"""Convert the user friendly params into something the optimizer can
understand."""
# Handle callbacks
optim_params["callbacks"] = _check_callbacks(optim_params.get("callbacks"))
optim_params["use_callbacks"] = optim_params["callbacks"] is not None
# Handle negative gradient method
negative_gradient_method = optim_params.pop("negative_gradient_method")
if callable(negative_gradient_method):
negative_gradient_method = negative_gradient_method
elif negative_gradient_method in {"bh", "BH", "barnes-hut"}:
negative_gradient_method = kl_divergence_bh
elif negative_gradient_method in {"fft", "FFT", "interpolation"}:
negative_gradient_method = kl_divergence_fft
else:
raise ValueError("Unrecognized gradient method. Please choose one of "
"the supported methods or provide a valid callback.")
# `gradient_descent` uses the more informative name `objective_function`
optim_params["objective_function"] = negative_gradient_method
# Handle number of jobs
n_jobs = optim_params.get("n_jobs", 1)
if n_jobs < 0:
n_cores = multiprocessing.cpu_count()
# Add negative number of n_jobs to the number of cores, but increment by
# one because -1 indicates using all cores, -2 all except one, and so on
n_jobs = n_cores + n_jobs + 1
# If the number of jobs, after this correction is still <= 0, then the user
# probably thought they had more cores, so we'll default to 1
if n_jobs <= 0:
log.warning("`n_jobs` receieved value %d but only %d cores are available. "
"Defaulting to single job." % (optim_params["n_jobs"], n_cores))
n_jobs = 1
optim_params["n_jobs"] = n_jobs
|
python
|
{
"resource": ""
}
|
q21957
|
PartialTSNEEmbedding.optimize
|
train
|
def optimize(self, n_iter, inplace=False, propagate_exception=False,
**gradient_descent_params):
"""Run optmization on the embedding for a given number of steps.
Parameters
----------
n_iter: int
The number of optimization iterations.
learning_rate: float
The learning rate for t-SNE optimization. Typical values range
between 100 to 1000. Setting the learning rate too low or too high
may result in the points forming a "ball". This is also known as the
crowding problem.
exaggeration: float
The exaggeration factor is used to increase the attractive forces of
nearby points, producing more compact clusters.
momentum: float
Momentum accounts for gradient directions from previous iterations,
resulting in faster convergence.
negative_gradient_method: str
Specifies the negative gradient approximation method to use. For
smaller data sets, the Barnes-Hut approximation is appropriate and
can be set using one of the following aliases: ``bh``, ``BH`` or
``barnes-hut``. For larger data sets, the FFT accelerated
interpolation method is more appropriate and can be set using one of
the following aliases: ``fft``, ``FFT`` or ``ìnterpolation``.
theta: float
This is the trade-off parameter between speed and accuracy of the
tree approximation method. Typical values range from 0.2 to 0.8. The
value 0 indicates that no approximation is to be made and produces
exact results also producing longer runtime.
n_interpolation_points: int
Only used when ``negative_gradient_method="fft"`` or its other
aliases. The number of interpolation points to use within each grid
cell for interpolation based t-SNE. It is highly recommended leaving
this value at the default 3.
min_num_intervals: int
Only used when ``negative_gradient_method="fft"`` or its other
aliases. The minimum number of grid cells to use, regardless of the
``ints_in_interval`` parameter. Higher values provide more accurate
gradient estimations.
inplace: bool
Whether or not to create a copy of the embedding or to perform
updates inplace.
propagate_exception: bool
The optimization process can be interrupted using callbacks. This
flag indicates whether we should propagate that exception or to
simply stop optimization and return the resulting embedding.
random_state: Union[int, RandomState]
The random state parameter follows the convention used in
scikit-learn. If the value is an int, random_state is the seed used
by the random number generator. If the value is a RandomState
instance, then it will be used as the random number generator. If
the value is None, the random number generator is the RandomState
instance used by `np.random`.
n_jobs: int
The number of threads to use while running t-SNE. This follows the
scikit-learn convention, ``-1`` meaning all processors, ``-2``
meaning all but one, etc.
callbacks: Callable[[int, float, np.ndarray] -> bool]
Callbacks, which will be run every ``callbacks_every_iters``
iterations.
callbacks_every_iters: int
How many iterations should pass between each time the callbacks are
invoked.
Returns
-------
PartialTSNEEmbedding
An optimized partial t-SNE embedding.
Raises
------
OptimizationInterrupt
If a callback stops the optimization and the ``propagate_exception``
flag is set, then an exception is raised.
"""
# Typically we want to return a new embedding and keep the old one intact
if inplace:
embedding = self
else:
embedding = PartialTSNEEmbedding(
np.copy(self),
self.reference_embedding,
self.P,
optimizer=self.optimizer.copy(),
**self.gradient_descent_params,
)
# If optimization parameters were passed to this funciton, prefer those
# over the defaults specified in the TSNE object
optim_params = dict(self.gradient_descent_params)
optim_params.update(gradient_descent_params)
_handle_nice_params(optim_params)
optim_params["n_iter"] = n_iter
try:
# Run gradient descent with the embedding optimizer so gains are
# properly updated and kept
error, embedding = embedding.optimizer(
embedding=embedding,
reference_embedding=self.reference_embedding,
P=self.P,
**optim_params,
)
except OptimizationInterrupt as ex:
log.info("Optimization was interrupted with callback.")
if propagate_exception:
raise ex
error, embedding = ex.error, ex.final_embedding
embedding.kl_divergence = error
return embedding
|
python
|
{
"resource": ""
}
|
q21958
|
TSNEEmbedding.transform
|
train
|
def transform(self, X, perplexity=5, initialization="median", k=25,
learning_rate=1, n_iter=100, exaggeration=2, momentum=0):
"""Embed new points into the existing embedding.
This procedure optimizes each point only with respect to the existing
embedding i.e. it ignores any interactions between the points in ``X``
among themselves.
Please see the :ref:`parameter-guide` for more information.
Parameters
----------
X: np.ndarray
The data matrix to be added to the existing embedding.
perplexity: float
Perplexity can be thought of as the continuous :math:`k` number of
nearest neighbors, for which t-SNE will attempt to preserve
distances. However, when transforming, we only consider neighbors in
the existing embedding i.e. each data point is placed into the
embedding, independently of other new data points.
initialization: Union[np.ndarray, str]
The initial point positions to be used in the embedding space. Can
be a precomputed numpy array, ``median``, ``weighted`` or
``random``. In all cases, ``median`` of ``weighted`` should be
preferred.
k: int
The number of nearest neighbors to consider when initially placing
the point onto the embedding. This is different from ``perpelxity``
because perplexity affects optimization while this only affects the
initial point positions.
learning_rate: float
The learning rate for t-SNE optimization. Typical values range
between 100 to 1000. Setting the learning rate too low or too high
may result in the points forming a "ball". This is also known as the
crowding problem.
n_iter: int
The number of iterations to run in the normal optimization regime.
Typically, the number of iterations needed when adding new data
points is much lower than with regular optimization.
exaggeration: float
The exaggeration factor to use during the normal optimization phase.
This can be used to form more densely packed clusters and is useful
for large data sets.
momentum: float
The momentum to use during optimization phase.
Returns
-------
PartialTSNEEmbedding
The positions of the new points in the embedding space.
"""
# We check if the affinity `to_new` methods takes the `perplexity`
# parameter and raise an informative error if not. This happes when the
# user uses a non-standard affinity class e.g. multiscale, then attempts
# to add points via `transform`. These classes take `perplexities` and
# fail
affinity_signature = inspect.signature(self.affinities.to_new)
if "perplexity" not in affinity_signature.parameters:
raise TypeError(
"`transform` currently does not support non `%s` type affinity "
"classes. Please use `prepare_partial` and `optimize` to add "
"points to the embedding." % PerplexityBasedNN.__name__
)
embedding = self.prepare_partial(
X, perplexity=perplexity, initialization=initialization, k=k
)
try:
embedding.optimize(
n_iter=n_iter,
learning_rate=learning_rate,
exaggeration=exaggeration,
momentum=momentum,
inplace=True,
propagate_exception=True,
)
except OptimizationInterrupt as ex:
log.info("Optimization was interrupted with callback.")
embedding = ex.final_embedding
return embedding
|
python
|
{
"resource": ""
}
|
q21959
|
TSNEEmbedding.prepare_partial
|
train
|
def prepare_partial(self, X, initialization="median", k=25, **affinity_params):
"""Prepare a partial embedding which can be optimized.
Parameters
----------
X: np.ndarray
The data matrix to be added to the existing embedding.
initialization: Union[np.ndarray, str]
The initial point positions to be used in the embedding space. Can
be a precomputed numpy array, ``median``, ``weighted`` or
``random``. In all cases, ``median`` of ``weighted`` should be
preferred.
k: int
The number of nearest neighbors to consider when initially placing
the point onto the embedding. This is different from ``perpelxity``
because perplexity affects optimization while this only affects the
initial point positions.
**affinity_params: dict
Additional params to be passed to the ``Affinities.to_new`` method.
Please see individual :class:`~openTSNE.affinity.Affinities`
implementations as the parameters differ between implementations.
Returns
-------
PartialTSNEEmbedding
An unoptimized :class:`PartialTSNEEmbedding` object, prepared for
optimization.
"""
P, neighbors, distances = self.affinities.to_new(
X, return_distances=True, **affinity_params
)
# If initial positions are given in an array, use a copy of that
if isinstance(initialization, np.ndarray):
init_checks.num_samples(initialization.shape[0], X.shape[0])
init_checks.num_dimensions(initialization.shape[1], self.shape[1])
embedding = np.array(initialization)
# Random initialization with isotropic normal distribution
elif initialization == "random":
embedding = initialization_scheme.random(X, self.shape[1], self.random_state)
elif initialization == "weighted":
embedding = initialization_scheme.weighted_mean(
X, self, neighbors[:, :k], distances[:, :k]
)
elif initialization == "median":
embedding = initialization_scheme.median(self, neighbors[:, :k])
else:
raise ValueError(f"Unrecognized initialization scheme `{initialization}`.")
return PartialTSNEEmbedding(
embedding,
reference_embedding=self,
P=P,
**self.gradient_descent_params,
)
|
python
|
{
"resource": ""
}
|
q21960
|
TSNE.fit
|
train
|
def fit(self, X):
"""Fit a t-SNE embedding for a given data set.
Runs the standard t-SNE optimization, consisting of the early
exaggeration phase and a normal optimization phase.
Parameters
----------
X: np.ndarray
The data matrix to be embedded.
Returns
-------
TSNEEmbedding
A fully optimized t-SNE embedding.
"""
embedding = self.prepare_initial(X)
try:
# Early exaggeration with lower momentum to allow points to find more
# easily move around and find their neighbors
embedding.optimize(
n_iter=self.early_exaggeration_iter,
exaggeration=self.early_exaggeration,
momentum=self.initial_momentum,
inplace=True,
propagate_exception=True,
)
# Restore actual affinity probabilities and increase momentum to get
# final, optimized embedding
embedding.optimize(
n_iter=self.n_iter,
exaggeration=self.exaggeration,
momentum=self.final_momentum,
inplace=True,
propagate_exception=True,
)
except OptimizationInterrupt as ex:
log.info("Optimization was interrupted with callback.")
embedding = ex.final_embedding
return embedding
|
python
|
{
"resource": ""
}
|
q21961
|
TSNE.prepare_initial
|
train
|
def prepare_initial(self, X):
"""Prepare the initial embedding which can be optimized as needed.
Parameters
----------
X: np.ndarray
The data matrix to be embedded.
Returns
-------
TSNEEmbedding
An unoptimized :class:`TSNEEmbedding` object, prepared for
optimization.
"""
# If initial positions are given in an array, use a copy of that
if isinstance(self.initialization, np.ndarray):
init_checks.num_samples(self.initialization.shape[0], X.shape[0])
init_checks.num_dimensions(self.initialization.shape[1], self.n_components)
embedding = np.array(self.initialization)
variance = np.var(embedding, axis=0)
if any(variance > 1e-4):
log.warning(
"Variance of embedding is greater than 0.0001. Initial "
"embeddings with high variance may have display poor convergence."
)
elif self.initialization == "pca":
embedding = initialization_scheme.pca(
X, self.n_components, random_state=self.random_state
)
elif self.initialization == "random":
embedding = initialization_scheme.random(
X, self.n_components, random_state=self.random_state
)
else:
raise ValueError(
f"Unrecognized initialization scheme `{self.initialization}`."
)
affinities = PerplexityBasedNN(
X,
self.perplexity,
method=self.neighbors_method,
metric=self.metric,
metric_params=self.metric_params,
n_jobs=self.n_jobs,
random_state=self.random_state,
)
gradient_descent_params = {
# Degrees of freedom of the Student's t-distribution. The
# suggestion degrees_of_freedom = n_components - 1 comes from [3]_.
"dof": max(self.n_components - 1, 1),
"negative_gradient_method": self.negative_gradient_method,
"learning_rate": self.learning_rate,
# By default, use the momentum used in unexaggerated phase
"momentum": self.final_momentum,
# Barnes-Hut params
"theta": self.theta,
# Interpolation params
"n_interpolation_points": self.n_interpolation_points,
"min_num_intervals": self.min_num_intervals,
"ints_in_interval": self.ints_in_interval,
"n_jobs": self.n_jobs,
# Callback params
"callbacks": self.callbacks,
"callbacks_every_iters": self.callbacks_every_iters,
}
return TSNEEmbedding(
embedding,
affinities=affinities,
random_state=self.random_state,
**gradient_descent_params,
)
|
python
|
{
"resource": ""
}
|
q21962
|
PerplexityBasedNN.set_perplexity
|
train
|
def set_perplexity(self, new_perplexity):
"""Change the perplexity of the affinity matrix.
Note that we only allow lowering the perplexity or restoring it to its
original value. This restriction exists because setting a higher
perplexity value requires recomputing all the nearest neighbors, which
can take a long time. To avoid potential confusion as to why execution
time is slow, this is not allowed. If you would like to increase the
perplexity above the initial value, simply create a new instance.
Parameters
----------
new_perplexity: float
The new perplexity.
"""
# If the value hasn't changed, there's nothing to do
if new_perplexity == self.perplexity:
return
# Verify that the perplexity isn't too large
new_perplexity = self.check_perplexity(new_perplexity)
# Recompute the affinity matrix
k_neighbors = min(self.n_samples - 1, int(3 * new_perplexity))
if k_neighbors > self.__neighbors.shape[1]:
raise RuntimeError(
"The desired perplexity `%.2f` is larger than the initial one "
"used. This would need to recompute the nearest neighbors, "
"which is not efficient. Please create a new `%s` instance "
"with the increased perplexity."
% (new_perplexity, self.__class__.__name__)
)
self.perplexity = new_perplexity
self.P = joint_probabilities_nn(
self.__neighbors[:, :k_neighbors],
self.__distances[:, :k_neighbors],
[self.perplexity],
symmetrize=True,
n_jobs=self.n_jobs,
)
|
python
|
{
"resource": ""
}
|
q21963
|
MultiscaleMixture.set_perplexities
|
train
|
def set_perplexities(self, new_perplexities):
"""Change the perplexities of the affinity matrix.
Note that we only allow lowering the perplexities or restoring them to
their original maximum value. This restriction exists because setting a
higher perplexity value requires recomputing all the nearest neighbors,
which can take a long time. To avoid potential confusion as to why
execution time is slow, this is not allowed. If you would like to
increase the perplexity above the initial value, simply create a new
instance.
Parameters
----------
new_perplexities: List[float]
The new list of perplexities.
"""
if np.array_equal(self.perplexities, new_perplexities):
return
new_perplexities = self.check_perplexities(new_perplexities)
max_perplexity = np.max(new_perplexities)
k_neighbors = min(self.n_samples - 1, int(3 * max_perplexity))
if k_neighbors > self.__neighbors.shape[1]:
raise RuntimeError(
"The largest perplexity `%.2f` is larger than the initial one "
"used. This would need to recompute the nearest neighbors, "
"which is not efficient. Please create a new `%s` instance "
"with the increased perplexity."
% (max_perplexity, self.__class__.__name__)
)
self.perplexities = new_perplexities
self.P = self._calculate_P(
self.__neighbors[:, :k_neighbors],
self.__distances[:, :k_neighbors],
self.perplexities,
symmetrize=True,
n_jobs=self.n_jobs,
)
|
python
|
{
"resource": ""
}
|
q21964
|
euclidean_random_projection_split
|
train
|
def euclidean_random_projection_split(data, indices, rng_state):
"""Given a set of ``indices`` for data points from ``data``, create
a random hyperplane to split the data, returning two arrays indices
that fall on either side of the hyperplane. This is the basis for a
random projection tree, which simply uses this splitting recursively.
This particular split uses euclidean distance to determine the hyperplane
and which side each data sample falls on.
Parameters
----------
data: array of shape (n_samples, n_features)
The original data to be split
indices: array of shape (tree_node_size,)
The indices of the elements in the ``data`` array that are to
be split in the current operation.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
indices_left: array
The elements of ``indices`` that fall on the "left" side of the
random hyperplane.
indices_right: array
The elements of ``indices`` that fall on the "left" side of the
random hyperplane.
"""
dim = data.shape[1]
# Select two random points, set the hyperplane between them
left_index = tau_rand_int(rng_state) % indices.shape[0]
right_index = tau_rand_int(rng_state) % indices.shape[0]
right_index += left_index == right_index
right_index = right_index % indices.shape[0]
left = indices[left_index]
right = indices[right_index]
# Compute the normal vector to the hyperplane (the vector between
# the two points) and the offset from the origin
hyperplane_offset = 0.0
hyperplane_vector = np.empty(dim, dtype=np.float32)
for d in range(dim):
hyperplane_vector[d] = data[left, d] - data[right, d]
hyperplane_offset -= (
hyperplane_vector[d] * (data[left, d] + data[right, d]) / 2.0
)
# For each point compute the margin (project into normal vector, add offset)
# If we are on lower side of the hyperplane put in one pile, otherwise
# put it in the other pile (if we hit hyperplane on the nose, flip a coin)
n_left = 0
n_right = 0
side = np.empty(indices.shape[0], np.int8)
for i in range(indices.shape[0]):
margin = hyperplane_offset
for d in range(dim):
margin += hyperplane_vector[d] * data[indices[i], d]
if margin == 0:
side[i] = tau_rand_int(rng_state) % 2
if side[i] == 0:
n_left += 1
else:
n_right += 1
elif margin > 0:
side[i] = 0
n_left += 1
else:
side[i] = 1
n_right += 1
# Now that we have the counts allocate arrays
indices_left = np.empty(n_left, dtype=np.int64)
indices_right = np.empty(n_right, dtype=np.int64)
# Populate the arrays with indices according to which side they fell on
n_left = 0
n_right = 0
for i in range(side.shape[0]):
if side[i] == 0:
indices_left[n_left] = indices[i]
n_left += 1
else:
indices_right[n_right] = indices[i]
n_right += 1
return indices_left, indices_right, hyperplane_vector, hyperplane_offset
|
python
|
{
"resource": ""
}
|
q21965
|
get_acf
|
train
|
def get_acf(x, axis=0, fast=False):
"""
Estimate the autocorrelation function of a time series using the FFT.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False)
"""
x = np.atleast_1d(x)
m = [slice(None), ] * len(x.shape)
# For computational efficiency, crop the chain to the largest power of
# two if requested.
if fast:
n = int(2 ** np.floor(np.log2(x.shape[axis])))
m[axis] = slice(0, n)
x = x
else:
n = x.shape[axis]
# Compute the FFT and then (from that) the auto-correlation function.
f = np.fft.fft(x - np.mean(x, axis=axis), n=2 * n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[tuple(m)].real
m[axis] = 0
return acf / acf[tuple(m)]
|
python
|
{
"resource": ""
}
|
q21966
|
get_integrated_act
|
train
|
def get_integrated_act(x, axis=0, window=50, fast=False):
"""
Estimate the integrated autocorrelation time of a time series.
See `Sokal's notes <http://www.stat.unc.edu/faculty/cji/Sokal.pdf>`_ on
MCMC and sample estimators for autocorrelation times.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param window: (optional)
The size of the window to use. (default: 50)
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False)
"""
# Compute the autocorrelation function.
f = get_acf(x, axis=axis, fast=fast)
# Special case 1D for simplicity.
if len(f.shape) == 1:
return 1 + 2 * np.sum(f[1:window])
# N-dimensional case.
m = [slice(None), ] * len(f.shape)
m[axis] = slice(1, window)
tau = 1 + 2 * np.sum(f[tuple(m)], axis=axis)
return tau
|
python
|
{
"resource": ""
}
|
q21967
|
thermodynamic_integration_log_evidence
|
train
|
def thermodynamic_integration_log_evidence(betas, logls):
"""
Thermodynamic integration estimate of the evidence.
:param betas: The inverse temperatures to use for the quadrature.
:param logls: The mean log-likelihoods corresponding to ``betas`` to use for
computing the thermodynamic evidence.
:return ``(logZ, dlogZ)``: Returns an estimate of the
log-evidence and the error associated with the finite
number of temperatures at which the posterior has been
sampled.
The evidence is the integral of the un-normalized posterior
over all of parameter space:
.. math::
Z \\equiv \\int d\\theta \\, l(\\theta) p(\\theta)
Thermodymanic integration is a technique for estimating the
evidence integral using information from the chains at various
temperatures. Let
.. math::
Z(\\beta) = \\int d\\theta \\, l^\\beta(\\theta) p(\\theta)
Then
.. math::
\\frac{d \\log Z}{d \\beta}
= \\frac{1}{Z(\\beta)} \\int d\\theta l^\\beta p \\log l
= \\left \\langle \\log l \\right \\rangle_\\beta
so
.. math::
\\log Z(1) - \\log Z(0)
= \\int_0^1 d\\beta \\left \\langle \\log l \\right\\rangle_\\beta
By computing the average of the log-likelihood at the
difference temperatures, the sampler can approximate the above
integral.
"""
if len(betas) != len(logls):
raise ValueError('Need the same number of log(L) values as temperatures.')
order = np.argsort(betas)[::-1]
betas = betas[order]
logls = logls[order]
betas0 = np.copy(betas)
if betas[-1] != 0:
betas = np.concatenate((betas0, [0]))
betas2 = np.concatenate((betas0[::2], [0]))
# Duplicate mean log-likelihood of hottest chain as a best guess for beta = 0.
logls2 = np.concatenate((logls[::2], [logls[-1]]))
logls = np.concatenate((logls, [logls[-1]]))
else:
betas2 = np.concatenate((betas0[:-1:2], [0]))
logls2 = np.concatenate((logls[:-1:2], [logls[-1]]))
logZ = -np.trapz(logls, betas)
logZ2 = -np.trapz(logls2, betas2)
return logZ, np.abs(logZ - logZ2)
|
python
|
{
"resource": ""
}
|
q21968
|
find_frequent_patterns
|
train
|
def find_frequent_patterns(transactions, support_threshold):
"""
Given a set of transactions, find the patterns in it
over the specified support threshold.
"""
tree = FPTree(transactions, support_threshold, None, None)
return tree.mine_patterns(support_threshold)
|
python
|
{
"resource": ""
}
|
q21969
|
FPNode.has_child
|
train
|
def has_child(self, value):
"""
Check if node has a particular child node.
"""
for node in self.children:
if node.value == value:
return True
return False
|
python
|
{
"resource": ""
}
|
q21970
|
FPNode.get_child
|
train
|
def get_child(self, value):
"""
Return a child node with a particular value.
"""
for node in self.children:
if node.value == value:
return node
return None
|
python
|
{
"resource": ""
}
|
q21971
|
FPNode.add_child
|
train
|
def add_child(self, value):
"""
Add a node as a child node.
"""
child = FPNode(value, 1, self)
self.children.append(child)
return child
|
python
|
{
"resource": ""
}
|
q21972
|
FPTree.find_frequent_items
|
train
|
def find_frequent_items(transactions, threshold):
"""
Create a dictionary of items with occurrences above the threshold.
"""
items = {}
for transaction in transactions:
for item in transaction:
if item in items:
items[item] += 1
else:
items[item] = 1
for key in list(items.keys()):
if items[key] < threshold:
del items[key]
return items
|
python
|
{
"resource": ""
}
|
q21973
|
FPTree.build_fptree
|
train
|
def build_fptree(self, transactions, root_value,
root_count, frequent, headers):
"""
Build the FP tree and return the root node.
"""
root = FPNode(root_value, root_count, None)
for transaction in transactions:
sorted_items = [x for x in transaction if x in frequent]
sorted_items.sort(key=lambda x: frequent[x], reverse=True)
if len(sorted_items) > 0:
self.insert_tree(sorted_items, root, headers)
return root
|
python
|
{
"resource": ""
}
|
q21974
|
FPTree.insert_tree
|
train
|
def insert_tree(self, items, node, headers):
"""
Recursively grow FP tree.
"""
first = items[0]
child = node.get_child(first)
if child is not None:
child.count += 1
else:
# Add new child.
child = node.add_child(first)
# Link it to header structure.
if headers[first] is None:
headers[first] = child
else:
current = headers[first]
while current.link is not None:
current = current.link
current.link = child
# Call function recursively.
remaining_items = items[1:]
if len(remaining_items) > 0:
self.insert_tree(remaining_items, child, headers)
|
python
|
{
"resource": ""
}
|
q21975
|
FPTree.tree_has_single_path
|
train
|
def tree_has_single_path(self, node):
"""
If there is a single path in the tree,
return True, else return False.
"""
num_children = len(node.children)
if num_children > 1:
return False
elif num_children == 0:
return True
else:
return True and self.tree_has_single_path(node.children[0])
|
python
|
{
"resource": ""
}
|
q21976
|
FPTree.mine_patterns
|
train
|
def mine_patterns(self, threshold):
"""
Mine the constructed FP tree for frequent patterns.
"""
if self.tree_has_single_path(self.root):
return self.generate_pattern_list()
else:
return self.zip_patterns(self.mine_sub_trees(threshold))
|
python
|
{
"resource": ""
}
|
q21977
|
FPTree.zip_patterns
|
train
|
def zip_patterns(self, patterns):
"""
Append suffix to patterns in dictionary if
we are in a conditional FP tree.
"""
suffix = self.root.value
if suffix is not None:
# We are in a conditional tree.
new_patterns = {}
for key in patterns.keys():
new_patterns[tuple(sorted(list(key) + [suffix]))] = patterns[key]
return new_patterns
return patterns
|
python
|
{
"resource": ""
}
|
q21978
|
FPTree.generate_pattern_list
|
train
|
def generate_pattern_list(self):
"""
Generate a list of patterns with support counts.
"""
patterns = {}
items = self.frequent.keys()
# If we are in a conditional tree,
# the suffix is a pattern on its own.
if self.root.value is None:
suffix_value = []
else:
suffix_value = [self.root.value]
patterns[tuple(suffix_value)] = self.root.count
for i in range(1, len(items) + 1):
for subset in itertools.combinations(items, i):
pattern = tuple(sorted(list(subset) + suffix_value))
patterns[pattern] = \
min([self.frequent[x] for x in subset])
return patterns
|
python
|
{
"resource": ""
}
|
q21979
|
FPTree.mine_sub_trees
|
train
|
def mine_sub_trees(self, threshold):
"""
Generate subtrees and mine them for patterns.
"""
patterns = {}
mining_order = sorted(self.frequent.keys(),
key=lambda x: self.frequent[x])
# Get items in tree in reverse order of occurrences.
for item in mining_order:
suffixes = []
conditional_tree_input = []
node = self.headers[item]
# Follow node links to get a list of
# all occurrences of a certain item.
while node is not None:
suffixes.append(node)
node = node.link
# For each occurrence of the item,
# trace the path back to the root node.
for suffix in suffixes:
frequency = suffix.count
path = []
parent = suffix.parent
while parent.parent is not None:
path.append(parent.value)
parent = parent.parent
for i in range(frequency):
conditional_tree_input.append(path)
# Now we have the input for a subtree,
# so construct it and grab the patterns.
subtree = FPTree(conditional_tree_input, threshold,
item, self.frequent[item])
subtree_patterns = subtree.mine_patterns(threshold)
# Insert subtree patterns into main patterns dictionary.
for pattern in subtree_patterns.keys():
if pattern in patterns:
patterns[pattern] += subtree_patterns[pattern]
else:
patterns[pattern] = subtree_patterns[pattern]
return patterns
|
python
|
{
"resource": ""
}
|
q21980
|
rm_subtitles
|
train
|
def rm_subtitles(path):
""" delete all subtitles in path recursively
"""
sub_exts = ['ass', 'srt', 'sub']
count = 0
for root, dirs, files in os.walk(path):
for f in files:
_, ext = os.path.splitext(f)
ext = ext[1:]
if ext in sub_exts:
p = os.path.join(root, f)
count += 1
print('Delete {}'.format(p))
os.remove(p)
return count
|
python
|
{
"resource": ""
}
|
q21981
|
mv_videos
|
train
|
def mv_videos(path):
""" move videos in sub-directory of path to path.
"""
count = 0
for f in os.listdir(path):
f = os.path.join(path, f)
if os.path.isdir(f):
for sf in os.listdir(f):
sf = os.path.join(f, sf)
if os.path.isfile(sf):
new_name = os.path.join(path, os.path.basename(sf))
try:
os.rename(sf, new_name)
except (WindowsError, OSError) as e:
print('mv {} happens error: {}'.format(sf, e))
else:
count += 1
print('mv {} to {}'.format(sf, new_name))
return count
|
python
|
{
"resource": ""
}
|
q21982
|
ZimukuSubSearcher._get_subinfo_list
|
train
|
def _get_subinfo_list(self, videoname):
""" return subinfo_list of videoname
"""
# searching subtitles
res = self.session.get(self.API, params={'q': videoname})
doc = res.content
referer = res.url
subgroups = self._parse_search_results_html(doc)
if not subgroups:
return []
subgroup = self._filter_subgroup(subgroups)
# get subtitles
headers = {
'Referer': referer
}
res = self.session.get(self._join_url(self.API, subgroup['link']), headers=headers)
doc = res.content
referer = res.url
subinfo_list = self._parse_sublist_html(doc)
for subinfo in subinfo_list:
subinfo['link'] = self._join_url(res.url, subinfo['link'])
return subinfo_list, referer
|
python
|
{
"resource": ""
}
|
q21983
|
register_subsearcher
|
train
|
def register_subsearcher(name, subsearcher_cls):
""" register a subsearcher, the `name` is a key used for searching subsearchers.
if the subsearcher named `name` already exists, then it's will overrite the old subsearcher.
"""
if not issubclass(subsearcher_cls, BaseSubSearcher):
raise ValueError(
'{} is not a subclass of BaseSubSearcher'.format(subsearcher_cls))
registered_subsearchers[name] = subsearcher_cls
|
python
|
{
"resource": ""
}
|
q21984
|
BaseSubSearcher._get_videoname
|
train
|
def _get_videoname(cls, videofile):
"""parse the `videofile` and return it's basename
"""
name = os.path.basename(videofile)
name = os.path.splitext(name)[0]
return name
|
python
|
{
"resource": ""
}
|
q21985
|
connect
|
train
|
def connect(
database: Union[str, Path], *, loop: asyncio.AbstractEventLoop = None, **kwargs: Any
) -> Connection:
"""Create and return a connection proxy to the sqlite database."""
if loop is None:
loop = asyncio.get_event_loop()
def connector() -> sqlite3.Connection:
if isinstance(database, str):
loc = database
elif isinstance(database, bytes):
loc = database.decode("utf-8")
else:
loc = str(database)
return sqlite3.connect(loc, **kwargs)
return Connection(connector, loop)
|
python
|
{
"resource": ""
}
|
q21986
|
Cursor._execute
|
train
|
async def _execute(self, fn, *args, **kwargs):
"""Execute the given function on the shared connection's thread."""
return await self._conn._execute(fn, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q21987
|
Cursor.execute
|
train
|
async def execute(self, sql: str, parameters: Iterable[Any] = None) -> None:
"""Execute the given query."""
if parameters is None:
parameters = []
await self._execute(self._cursor.execute, sql, parameters)
|
python
|
{
"resource": ""
}
|
q21988
|
Cursor.executemany
|
train
|
async def executemany(self, sql: str, parameters: Iterable[Iterable[Any]]) -> None:
"""Execute the given multiquery."""
await self._execute(self._cursor.executemany, sql, parameters)
|
python
|
{
"resource": ""
}
|
q21989
|
Cursor.executescript
|
train
|
async def executescript(self, sql_script: str) -> None:
"""Execute a user script."""
await self._execute(self._cursor.executescript, sql_script)
|
python
|
{
"resource": ""
}
|
q21990
|
Cursor.fetchone
|
train
|
async def fetchone(self) -> Optional[sqlite3.Row]:
"""Fetch a single row."""
return await self._execute(self._cursor.fetchone)
|
python
|
{
"resource": ""
}
|
q21991
|
Cursor.fetchmany
|
train
|
async def fetchmany(self, size: int = None) -> Iterable[sqlite3.Row]:
"""Fetch up to `cursor.arraysize` number of rows."""
args = () # type: Tuple[int, ...]
if size is not None:
args = (size,)
return await self._execute(self._cursor.fetchmany, *args)
|
python
|
{
"resource": ""
}
|
q21992
|
Cursor.fetchall
|
train
|
async def fetchall(self) -> Iterable[sqlite3.Row]:
"""Fetch all remaining rows."""
return await self._execute(self._cursor.fetchall)
|
python
|
{
"resource": ""
}
|
q21993
|
Connection.run
|
train
|
def run(self) -> None:
"""Execute function calls on a separate thread."""
while self._running:
try:
future, function = self._tx.get(timeout=0.1)
except Empty:
continue
try:
LOG.debug("executing %s", function)
result = function()
LOG.debug("returning %s", result)
self._loop.call_soon_threadsafe(future.set_result, result)
except BaseException as e:
LOG.exception("returning exception %s", e)
self._loop.call_soon_threadsafe(future.set_exception, e)
|
python
|
{
"resource": ""
}
|
q21994
|
Connection._execute
|
train
|
async def _execute(self, fn, *args, **kwargs):
"""Queue a function with the given arguments for execution."""
function = partial(fn, *args, **kwargs)
future = self._loop.create_future()
self._tx.put_nowait((future, function))
return await future
|
python
|
{
"resource": ""
}
|
q21995
|
Connection._connect
|
train
|
async def _connect(self) -> "Connection":
"""Connect to the actual sqlite database."""
if self._connection is None:
self._connection = await self._execute(self._connector)
return self
|
python
|
{
"resource": ""
}
|
q21996
|
Connection.cursor
|
train
|
async def cursor(self) -> Cursor:
"""Create an aiosqlite cursor wrapping a sqlite3 cursor object."""
return Cursor(self, await self._execute(self._conn.cursor))
|
python
|
{
"resource": ""
}
|
q21997
|
Connection.execute_insert
|
train
|
async def execute_insert(
self, sql: str, parameters: Iterable[Any] = None
) -> Optional[sqlite3.Row]:
"""Helper to insert and get the last_insert_rowid."""
if parameters is None:
parameters = []
return await self._execute(self._execute_insert, sql, parameters)
|
python
|
{
"resource": ""
}
|
q21998
|
Connection.execute_fetchall
|
train
|
async def execute_fetchall(
self, sql: str, parameters: Iterable[Any] = None
) -> Iterable[sqlite3.Row]:
"""Helper to execute a query and return all the data."""
if parameters is None:
parameters = []
return await self._execute(self._execute_fetchall, sql, parameters)
|
python
|
{
"resource": ""
}
|
q21999
|
Connection.executemany
|
train
|
async def executemany(
self, sql: str, parameters: Iterable[Iterable[Any]]
) -> Cursor:
"""Helper to create a cursor and execute the given multiquery."""
cursor = await self._execute(self._conn.executemany, sql, parameters)
return Cursor(self, cursor)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.