source_code stringlengths 45 7.24k | function_name stringlengths 1 111 | messages list |
|---|---|---|
def x(subj_motions, messages=True):
dr = data_resources['cmu_mocap_full']
cmu_url = dr['urls'][0]
subjects_num = subj_motions[0]
motions_num = subj_motions[1]
resource = {'urls': [], 'files': []}
subjects = []
motions = [list() for _ in range(len(subjects_num))]
for i in range(len(subjects_num)):
curSubj = str(int(subjects_num[i]))
if int(subjects_num[i]) < 10:
curSubj = '0' + curSubj
subjects.append(curSubj)
for j in range(len(motions_num[i])):
curMot = str(int(motions_num[i][j]))
if int(motions_num[i][j]) < 10:
curMot = '0' + curMot
motions[i].append(curMot)
all_skels = []
assert len(subjects) == len(motions)
all_motions = []
for i in range(len(subjects)):
skel_dir = os.path.join(data_path, 'cmu_mocap')
cur_skel_file = os.path.join(skel_dir, subjects[i] + '.asf')
url_required = False
file_download = []
if not os.path.exists(cur_skel_file):
if not os.path.isdir(skel_dir):
os.makedirs(skel_dir)
url_required = True
file_download.append(subjects[i] + '.asf')
for j in range(len(motions[i])):
file_name = subjects[i] + '_' + motions[i][j] + '.amc'
cur_motion_file = os.path.join(skel_dir, file_name)
if not os.path.exists(cur_motion_file):
url_required = True
file_download.append(subjects[i] + '_' + motions[i][j] + '.amc'
)
if url_required:
resource['urls'].append(cmu_url + '/' + subjects[i] + '/')
resource['files'].append(file_download)
return resource
| cmu_urls_files | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(subj_motions, messages=True):\n dr = data_resources['cmu_mocap_full']\n cmu_url = dr['urls'][0]\n subjects_num = subj_motions[0]\n motions_num = su... |
def x(season='1314', data_set='football_data'):
def x(string):
league_dict = {'E0': 0, 'E1': 1, 'E2': 2, 'E3': 3, 'EC': 4}
return league_dict[string]
def x(string):
if string in football_dict:
return football_dict[string]
else:
football_dict[string] = len(football_dict) + 1
return len(football_dict) + 1
data_set_season = data_set + '_' + season
data_resources[data_set_season] = copy.deepcopy(data_resources[data_set])
data_resources[data_set_season]['urls'][0] += season + '/'
start_year = int(season[0:2])
end_year = int(season[2:4])
files = ['E0.csv', 'E1.csv', 'E2.csv', 'E3.csv']
if start_year > 4 and start_year < 93:
files += ['EC.csv']
data_resources[data_set_season]['files'] = [files]
if not data_available(data_set_season):
download_data(data_set_season)
from matplotlib import pyplot as pb
for file in reversed(files):
filename = os.path.join(data_path, data_set_season, file)
writename = os.path.join(data_path, data_set_season, 'temp.csv')
input = open(filename, 'rb')
output = open(writename, 'wb')
writer = csv.writer(output)
for row in csv.reader(input):
if any(field.strip() for field in row):
writer.writerow(row)
input.close()
output.close()
table = np.loadtxt(writename, skiprows=1, usecols=(0, 1, 2, 3, 4, 5
), converters={(0): league2num, (1): pb.datestr2num, (2):
football2num, (3): football2num}, delimiter=',')
X = table[:, :4]
Y = table[:, 4:]
return data_details_return({'X': X, 'Y': Y}, data_set)
| football_data | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(season='1314', data_set='football_data'):\n\n def x(string):\n league_dict = {'E0': 0, 'E1': 1, 'E2': 2, 'E3': 3, 'EC': 4}\n return league_dic... |
def x(data_set='three_phase_oil_flow'):
if not data_available(data_set):
download_data(data_set)
oil_train_file = os.path.join(data_path, data_set, 'DataTrn.txt')
oil_trainlbls_file = os.path.join(data_path, data_set, 'DataTrnLbls.txt')
oil_test_file = os.path.join(data_path, data_set, 'DataTst.txt')
oil_testlbls_file = os.path.join(data_path, data_set, 'DataTstLbls.txt')
oil_valid_file = os.path.join(data_path, data_set, 'DataVdn.txt')
oil_validlbls_file = os.path.join(data_path, data_set, 'DataVdnLbls.txt')
fid = open(oil_train_file)
X = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_test_file)
Xtest = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_valid_file)
Xvalid = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_trainlbls_file)
Y = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2.0 - 1.0
fid.close()
fid = open(oil_testlbls_file)
Ytest = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2.0 - 1.0
fid.close()
fid = open(oil_validlbls_file)
Yvalid = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2.0 - 1.0
fid.close()
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest':
Ytest, 'Xtest': Xtest, 'Xvalid': Xvalid, 'Yvalid': Yvalid}, data_set)
| oil | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data_set='three_phase_oil_flow'):\n if not data_available(data_set):\n download_data(data_set)\n oil_train_file = os.path.join(data_path, data_set... |
def x(seed=default_seed, num_samples=500):
np.random.seed(seed=seed)
num_in = 1
X = np.random.uniform(low=-1.0, high=1.0, size=(num_samples, num_in))
X.sort(axis=0)
rbf = GPy.kern.RBF(num_in, variance=1.0, lengthscale=np.array((0.25,)))
white = GPy.kern.White(num_in, variance=0.01)
kernel = rbf + white
K = kernel.K(X)
y = np.reshape(np.random.multivariate_normal(np.zeros(num_samples), K),
(num_samples, 1))
return {'X': X, 'Y': y, 'info': 'Sampled ' + str(num_samples) +
' values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1.'
}
| toy_rbf_1d | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(seed=default_seed, num_samples=500):\n np.random.seed(seed=seed)\n num_in = 1\n X = np.random.uniform(low=-1.0, high=1.0, size=(num_samples, num_in))\... |
def x(num_data=200, seed=default_seed):
np.random.seed(seed=seed)
sqrt2 = np.sqrt(2)
R = np.array([[sqrt2 / 2, -sqrt2 / 2], [sqrt2 / 2, sqrt2 / 2]])
scales = []
scales.append(np.array([[3, 0], [0, 1]]))
scales.append(np.array([[3, 0], [0, 1]]))
scales.append([[1, 0], [0, 3]])
scales.append([[1, 0], [0, 3]])
means = []
means.append(np.array([4, 4]))
means.append(np.array([0, 4]))
means.append(np.array([-4, -4]))
means.append(np.array([0, -4]))
Xparts = []
num_data_part = []
num_data_total = 0
for i in range(0, 4):
num_data_part.append(round((i + 1) * num_data / 4.0))
num_data_part[i] -= num_data_total
part = np.random.normal(size=(num_data_part[i], 2))
part = np.dot(np.dot(part, scales[i]), R) + means[i]
Xparts.append(part)
num_data_total += num_data_part[i]
X = np.vstack((Xparts[0], Xparts[1], Xparts[2], Xparts[3]))
Y = np.vstack((np.ones((num_data_part[0] + num_data_part[1], 1)), -np.
ones((num_data_part[2] + num_data_part[3], 1))))
return {'X': X, 'Y': Y, 'info':
'Two separate classes of data formed approximately in the shape of two crescents.'
}
| crescent_data | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(num_data=200, seed=default_seed):\n np.random.seed(seed=seed)\n sqrt2 = np.sqrt(2)\n R = np.array([[sqrt2 / 2, -sqrt2 / 2], [sqrt2 / 2, sqrt2 / 2]])\n... |
def x(data_set='creep_rupture'):
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar_file = os.path.join(path, 'creeprupt.tar')
tar = tarfile.open(tar_file)
print('Extracting file.')
tar.extractall(path=path)
tar.close()
all_data = np.loadtxt(os.path.join(data_path, data_set, 'taka'))
y = all_data[:, 1:2].copy()
features = [0]
features.extend(range(2, 31))
X = all_data[:, features].copy()
return data_details_return({'X': X, 'y': y}, data_set)
| creep_data | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data_set='creep_rupture'):\n if not data_available(data_set):\n download_data(data_set)\n path = os.path.join(data_path, data_set)\n ta... |
def x(data_set='cifar-10'):
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'cifar-10-python.tar.gz')
if not data_available(data_set):
download_data(data_set)
import tarfile
tfile = tarfile.open(filename, 'r:gz')
tfile.extractall(dir_path)
with open(os.path.join(dir_path, 'cifar-10-batches-py', 'data_batch_1'),
'rb') as f:
data = pickle.load(f)
images = data['data'].reshape((-1, 3, 32, 32)).astype('float32') / 255
images = np.rollaxis(images, 1, 4)
patches = np.zeros((0, 5, 5, 3))
for x in range(0, 32 - 5, 5):
for y in range(0, 32 - 5, 5):
patches = np.concatenate((patches, images[:, x:x + 5, y:y + 5,
:]), axis=0)
patches = patches.reshape((patches.shape[0], -1))
return data_details_return({'Y': patches, 'info':
'32x32 pixel patches extracted from the CIFAR-10 data by Boris Babenko to demonstrate k-means features.'
}, data_set)
| cifar10_patches | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data_set='cifar-10'):\n dir_path = os.path.join(data_path, data_set)\n filename = os.path.join(dir_path, 'cifar-10-python.tar.gz')\n if not data_avail... |
def x(data_set='cmu_mocap'):
train_motions = ['18', '19']
test_motions = ['20']
data = cmu_mocap('49', train_motions, test_motions, sample_every=4,
data_set=data_set)
data['info'] = (
'One legged balancing motions from CMU data base subject 49. As used in Alvarez, Luengo and Lawrence at AISTATS 2009. It consists of '
+ data['info'])
return data
| cmu_mocap_49_balance | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data_set='cmu_mocap'):\n train_motions = ['18', '19']\n test_motions = ['20']\n data = cmu_mocap('49', train_motions, test_motions, sample_every=4,\n ... |
def x(data_set='cmu_mocap'):
train_motions = ['01', '02', '03', '04', '05', '06', '07', '08', '09',
'10', '11', '12', '13', '14', '15', '16', '17', '19', '20', '21',
'22', '23', '24', '25', '26', '28', '30', '31', '32', '33', '34']
test_motions = ['18', '29']
data = cmu_mocap('35', train_motions, test_motions, sample_every=4,
data_set=data_set)
data['info'] = (
'Walk and jog data from CMU data base subject 35. As used in Tayor, Roweis and Hinton at NIPS 2007, but without their pre-processing (i.e. as used by Lawrence at AISTATS 2007). It consists of '
+ data['info'])
return data
| cmu_mocap_35_walk_jog | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(data_set='cmu_mocap'):\n train_motions = ['01', '02', '03', '04', '05', '06', '07', '08', '09',\n '10', '11', '12', '13', '14', '15', '16', '17', '19... |
def x(subject, train_motions, test_motions=[], sample_every=4, data_set=
'cmu_mocap'):
subject_dir = os.path.join(data_path, data_set)
all_motions = train_motions + test_motions
resource = cmu_urls_files(([subject], [all_motions]))
data_resources[data_set] = data_resources['cmu_mocap_full'].copy()
data_resources[data_set]['files'] = resource['files']
data_resources[data_set]['urls'] = resource['urls']
if resource['urls']:
download_data(data_set)
skel = GPy.util.mocap.acclaim_skeleton(os.path.join(subject_dir,
subject + '.asf'))
exlbls = np.eye(len(train_motions))
tot_length = 0
temp_Y = []
temp_lbls = []
for i in range(len(train_motions)):
temp_chan = skel.load_channels(os.path.join(subject_dir, subject +
'_' + train_motions[i] + '.amc'))
temp_Y.append(temp_chan[::sample_every, :])
temp_lbls.append(np.tile(exlbls[i, :], (temp_Y[i].shape[0], 1)))
tot_length += temp_Y[i].shape[0]
Y = np.zeros((tot_length, temp_Y[0].shape[1]))
lbls = np.zeros((tot_length, temp_lbls[0].shape[1]))
end_ind = 0
for i in range(len(temp_Y)):
start_ind = end_ind
end_ind += temp_Y[i].shape[0]
Y[start_ind:end_ind, :] = temp_Y[i]
lbls[start_ind:end_ind, :] = temp_lbls[i]
if len(test_motions) > 0:
temp_Ytest = []
temp_lblstest = []
testexlbls = np.eye(len(test_motions))
tot_test_length = 0
for i in range(len(test_motions)):
temp_chan = skel.load_channels(os.path.join(subject_dir,
subject + '_' + test_motions[i] + '.amc'))
temp_Ytest.append(temp_chan[::sample_every, :])
temp_lblstest.append(np.tile(testexlbls[i, :], (temp_Ytest[i].
shape[0], 1)))
tot_test_length += temp_Ytest[i].shape[0]
Ytest = np.zeros((tot_test_length, temp_Ytest[0].shape[1]))
lblstest = np.zeros((tot_test_length, temp_lblstest[0].shape[1]))
end_ind = 0
for i in range(len(temp_Ytest)):
start_ind = end_ind
end_ind += temp_Ytest[i].shape[0]
Ytest[start_ind:end_ind, :] = temp_Ytest[i]
lblstest[start_ind:end_ind, :] = temp_lblstest[i]
else:
Ytest = None
lblstest = None
info = 'Subject: ' + subject + '. Training motions: '
for motion in train_motions:
info += motion + ', '
info = info[:-2]
if len(test_motions) > 0:
info += '. Test motions: '
for motion in test_motions:
info += motion + ', '
info = info[:-2] + '.'
else:
info += '.'
if sample_every != 1:
info += ' Data is sub-sampled to every ' + str(sample_every
) + ' frames.'
return data_details_return({'Y': Y, 'lbls': lbls, 'Ytest': Ytest,
'lblstest': lblstest, 'info': info, 'skel': skel}, data_set)
| cmu_mocap | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(subject, train_motions, test_motions=[], sample_every=4, data_set=\n 'cmu_mocap'):\n subject_dir = os.path.join(data_path, data_set)\n all_motions = t... |
def x(self, X, X_grad):
X.mean.gradient, X.variance.gradient = X_grad
| set_X_gradients | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X, X_grad):\n X.mean.gradient, X.variance.gradient = X_grad\n",
"role": "user"
},
{
"content": "set_X_gradients",
"role": "assistant"
... |
def x(self, link_f, y, Y_metadata=None):
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
alpha = link_f * self.beta
objective = y ** (alpha - 1.0) * np.exp(-self.beta * y
) * self.beta ** alpha / special.gamma(alpha)
return np.exp(np.sum(np.log(objective)))
| pdf_link | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, link_f, y, Y_metadata=None):\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\n alpha = link_f * self.beta\n objective = y ** (alp... |
def x(self, link_f, y, Y_metadata=None):
alpha = link_f * self.beta
log_objective = alpha * np.log(self.beta) - np.log(special.gamma(alpha)
) + (alpha - 1) * np.log(y) - self.beta * y
return log_objective
| logpdf_link | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, link_f, y, Y_metadata=None):\n alpha = link_f * self.beta\n log_objective = alpha * np.log(self.beta) - np.log(special.gamma(alpha)\n ) + (a... |
def x(self, link_f, y, Y_metadata=None):
grad = self.beta * np.log(self.beta * y) - special.psi(self.beta * link_f
) * self.beta
return grad
| dlogpdf_dlink | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, link_f, y, Y_metadata=None):\n grad = self.beta * np.log(self.beta * y) - special.psi(self.beta * link_f\n ) * self.beta\n return grad\n",
... |
def x(self, link_f, y, Y_metadata=None):
hess = -special.polygamma(1, self.beta * link_f) * self.beta ** 2
return hess
| d2logpdf_dlink2 | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, link_f, y, Y_metadata=None):\n hess = -special.polygamma(1, self.beta * link_f) * self.beta ** 2\n return hess\n",
"role": "user"
},
{
... |
def x(self, link_f, y, Y_metadata=None):
d3lik_dlink3 = -special.polygamma(2, self.beta * link_f) * self.beta ** 3
return d3lik_dlink3
| d3logpdf_dlink3 | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, link_f, y, Y_metadata=None):\n d3lik_dlink3 = -special.polygamma(2, self.beta * link_f) * self.beta ** 3\n return d3lik_dlink3\n",
"role": "use... |
def x(self):
self.X = self.transform_data(self.X_untransformed)
super(InputWarpedGP, self).parameters_changed()
dL_dX = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X)
self.warping_function.update_grads(self.X_untransformed, dL_dX)
| parameters_changed | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n self.X = self.transform_data(self.X_untransformed)\n super(InputWarpedGP, self).parameters_changed()\n dL_dX = self.kern.gradients_X(self.gra... |
def x(self, X, test_data=False):
return self.warping_function.f(X, test_data)
| transform_data | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X, test_data=False):\n return self.warping_function.f(X, test_data)\n",
"role": "user"
},
{
"content": "transform_data",
"role": "assi... |
def x(self, Xnew):
Xnew_warped = self.transform_data(Xnew, test_data=True)
mean, var = super(InputWarpedGP, self).predict(Xnew_warped, kern=self.
kernel, full_cov=False)
return mean, var
| predict | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, Xnew):\n Xnew_warped = self.transform_data(Xnew, test_data=True)\n mean, var = super(InputWarpedGP, self).predict(Xnew_warped, kern=self.\n ... |
def x(self, namespaces):
self.namespace = {}
for m in namespaces[::-1]:
buf = _get_namespace(m)
self.namespace.update(buf)
self.namespace.update(self.__dict__)
| set_namespace | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, namespaces):\n self.namespace = {}\n for m in namespaces[::-1]:\n buf = _get_namespace(m)\n self.namespace.update(buf)\n self.name... |
def x(self, expressions):
self.expressions = {}
for key, item in expressions.items():
self.expressions[key] = {'function': item}
| set_expressions | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, expressions):\n self.expressions = {}\n for key, item in expressions.items():\n self.expressions[key] = {'function': item}\n",
"role": "... |
def x(self, cacheable):
def x(expr):
return [e for e in expr.atoms() if e.is_Symbol and e not in vars]
self.cacheable = cacheable
self.variables = {}
vars = []
for expression in self.expressions.values():
vars += extract_vars(expression['function'])
self.cacheable_vars = []
for var in cacheable:
self.variables[var] = [e for e in vars if e.name.split('_')[0] ==
var.lower()]
self.cacheable_vars += self.variables[var]
for var in cacheable:
if not self.variables[var]:
raise ValueError('Variable ' + var +
' was specified as cacheable but is not in expression. Expected to find symbols of the form '
+ var.lower() + '_0 to represent ' + var)
self.variables['theta'] = sorted([e for e in vars if not e in self.
cacheable_vars], key=lambda e: e.name)
| set_variables | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, cacheable):\n\n def x(expr):\n return [e for e in expr.atoms() if e.is_Symbol and e not in vars]\n self.cacheable = cacheable\n self.vari... |
def x(self, parameters):
for theta in self.variables['theta']:
val = 1.0
if parameters is not None:
if theta.name in parameters:
val = parameters[theta.name]
self.link_parameters(Param(theta.name, val, None))
| set_parameters | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, parameters):\n for theta in self.variables['theta']:\n val = 1.0\n if parameters is not None:\n if theta.name in parameters:\... |
def x(self, name, value):
setattr(self, name, value)
self.namespace.update({name: getattr(self, name)})
| set_attribute | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, name, value):\n setattr(self, name, value)\n self.namespace.update({name: getattr(self, name)})\n",
"role": "user"
},
{
"content": "set... |
def x(self):
self.expression_list = []
self.expression_keys = []
self.expression_order = []
for fname, fexpressions in self.expressions.items():
for type, texpressions in fexpressions.items():
if type == 'function':
self.expression_list.append(texpressions)
self.expression_keys.append([fname, type])
self.expression_order.append(1)
elif type[-10:] == 'derivative':
for dtype, expression in texpressions.items():
self.expression_list.append(expression)
self.expression_keys.append([fname, type, dtype])
if type[:-10] == 'first_' or type[:-10] == '':
self.expression_order.append(3)
elif type[:-10] == 'second_':
self.expression_order.append(4)
elif type[:-10] == 'third_':
self.expression_order.append(5)
else:
self.expression_list.append(fexpressions[type])
self.expression_keys.append([fname, type])
self.expression_order.append(2)
self.expression_order, self.expression_list, self.expression_keys = zip(*
sorted(zip(self.expression_order, self.expression_list, self.
expression_keys)))
| update_expression_list | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n self.expression_list = []\n self.expression_keys = []\n self.expression_order = []\n for fname, fexpressions in self.expressions.items():\... |
def x(self):
self.code = {}
def x(expr):
if type(expr) is dict:
code = {}
for key in expr.keys():
code[key] = match_key(expr[key])
else:
arg_list = [e for e in expr.atoms() if e.is_Symbol]
code = self._expr2code(arg_list, expr)
return code
self.code = match_key(self.expressions)
| gen_code | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n self.code = {}\n\n def x(expr):\n if type(expr) is dict:\n code = {}\n for key in expr.keys():\n cod... |
def x(self, arg_list, expr):
code = lambdastr(arg_list, expr)
function_code = code.split(':')[1].strip()
return function_code
| expr2code | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, arg_list, expr):\n code = lambdastr(arg_list, expr)\n function_code = code.split(':')[1].strip()\n return function_code\n",
"role": "user"
... |
def x(self, code):
for key in self.variables.keys():
for arg in self.variables[key]:
code = code.replace(arg.name, 'self.' + arg.name)
return code
| print_code | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, code):\n for key in self.variables.keys():\n for arg in self.variables[key]:\n code = code.replace(arg.name, 'self.' + arg.name)\n ... |
def x(self, keys, user_substitutes={}):
sigma, alpha, nu, omega, l, variance = sym.var(
"""\\sigma, lpha,
u, \\omega, \\ell, \\sigma^2""")
substitutes = {'scale': sigma, 'shape': alpha, 'lengthscale': l,
'variance': variance}
substitutes.update(user_substitutes)
function_substitutes = {normcdfln: lambda arg: sym.log(normcdf(arg)),
logisticln: lambda arg: -sym.log(1 + sym.exp(-arg)), logistic: lambda
arg: 1 / (1 + sym.exp(-arg)), erfcx: lambda arg: erfc(arg) / sym.
exp(arg * arg), gammaln: lambda arg: sym.log(sym.gamma(arg))}
expr = getFromDict(self.expressions, keys)
for var_name, sub in self.variable_sort(self.expressions['update_cache'
], reverse=True):
for var in self.variables['cache']:
if var_name == var.name:
expr = expr.subs(var, sub)
break
for var_name, sub in self.variable_sort(self.expressions[
'parameters_changed'], reverse=True):
for var in self.variables['sub']:
if var_name == var.name:
expr = expr.subs(var, sub)
break
for var_name, sub in self.variable_sort(substitutes, reverse=True):
for var in self.variables['theta']:
if var_name == var.name:
expr = expr.subs(var, sub)
break
for m, r in function_substitutes.items():
expr = expr.replace(m, r)
return expr.simplify()
| display_expression | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, keys, user_substitutes={}):\n sigma, alpha, nu, omega, l, variance = sym.var(\n \"\"\"\\\\sigma, \u0007lpha, \nu, \\\\omega, \\\\ell, \\\\sigma... |
def x(self, full_values):
super(BayesianGPLVMMiniBatch, self)._outer_values_update(full_values)
if self.has_uncertain_inputs():
meangrad_tmp, vargrad_tmp = self.kern.gradients_qX_expectations(
variational_posterior=self.X, Z=self.Z, dL_dpsi0=full_values[
'dL_dpsi0'], dL_dpsi1=full_values['dL_dpsi1'], dL_dpsi2=
full_values['dL_dpsi2'], psi0=self.psi0, psi1=self.psi1, psi2=
self.psi2)
self.X.mean.gradient = meangrad_tmp
self.X.variance.gradient = vargrad_tmp
else:
self.X.gradient = self.kern.gradients_X(full_values['dL_dKnm'],
self.X, self.Z)
self.X.gradient += self.kern.gradients_X_diag(full_values[
'dL_dKdiag'], self.X)
| outer_values_update | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, full_values):\n super(BayesianGPLVMMiniBatch, self)._outer_values_update(full_values)\n if self.has_uncertain_inputs():\n meangrad_tmp, varg... |
def x(self, X, X2=None):
X, slices = X[:, :-1], index_to_slices(X[:, -1])
if X2 is None:
X2, slices2 = X, slices
K = np.zeros((X.shape[0], X.shape[0]))
else:
X2, slices2 = X2[:, :-1], index_to_slices(X2[:, -1])
K = np.zeros((X.shape[0], X2.shape[0]))
tdist = (X[:, 0][:, None] - X2[:, 0][None, :]) ** 2
ttdist = X[:, 0][:, None] - X2[:, 0][None, :]
vyt = self.variance_Yt
lyt = 1 / (2 * self.lengthscale_Yt)
a = -self.a
c = self.c
kyy = lambda tdist: np.exp(-lyt * tdist)
k1 = lambda tdist: 2 * lyt - 4 * lyt ** 2 * tdist
k4 = lambda tdist: 2 * lyt * tdist
for i, s1 in enumerate(slices):
for j, s2 in enumerate(slices2):
for ss1 in s1:
for ss2 in s2:
if i == 0 and j == 0:
K[ss1, ss2] = vyt * kyy(tdist[ss1, ss2])
elif i == 0 and j == 1:
K[ss1, ss2] = (k4(ttdist[ss1, ss2]) + 1) * vyt * kyy(
tdist[ss1, ss2])
elif i == 1 and j == 1:
K[ss1, ss2] = (k1(tdist[ss1, ss2]) + 1.0) * vyt * kyy(
tdist[ss1, ss2]) + self.ubias
else:
K[ss1, ss2] = (-k4(ttdist[ss1, ss2]) + 1) * vyt * kyy(
tdist[ss1, ss2])
return K
| K | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X, X2=None):\n X, slices = X[:, :-1], index_to_slices(X[:, -1])\n if X2 is None:\n X2, slices2 = X, slices\n K = np.zeros((X.shape[0]... |
def x(self, dL_dK, X, X2=None):
X, slices = X[:, :-1], index_to_slices(X[:, -1])
if X2 is None:
X2, slices2 = X, slices
K = np.zeros((X.shape[0], X.shape[0]))
else:
X2, slices2 = X2[:, :-1], index_to_slices(X2[:, -1])
vyt = self.variance_Yt
lyt = 1.0 / (2 * self.lengthscale_Yt)
tdist = (X[:, 0][:, None] - X2[:, 0][None, :]) ** 2
ttdist = X[:, 0][:, None] - X2[:, 0][None, :]
rd = tdist.shape[0]
dka = np.zeros([rd, rd])
dkc = np.zeros([rd, rd])
dkYdvart = np.zeros([rd, rd])
dkYdlent = np.zeros([rd, rd])
dkdubias = np.zeros([rd, rd])
kyy = lambda tdist: np.exp(-lyt * tdist)
dkyydlyt = lambda tdist: kyy(tdist) * -tdist
k1 = lambda tdist: 2 * lyt - 4 * lyt ** 2 * tdist
k4 = lambda ttdist: 2 * lyt * ttdist
dk1dlyt = lambda tdist: 2.0 - 4 * 2.0 * lyt * tdist
dk4dlyt = lambda ttdist: 2 * ttdist
for i, s1 in enumerate(slices):
for j, s2 in enumerate(slices2):
for ss1 in s1:
for ss2 in s2:
if i == 0 and j == 0:
dkYdvart[ss1, ss2] = kyy(tdist[ss1, ss2])
dkYdlent[ss1, ss2] = vyt * dkyydlyt(tdist[ss1, ss2])
dkdubias[ss1, ss2] = 0
elif i == 0 and j == 1:
dkYdvart[ss1, ss2] = (k4(ttdist[ss1, ss2]) + 1) * kyy(
tdist[ss1, ss2])
dkYdlent[ss1, ss2] = vyt * dkyydlyt(tdist[ss1, ss2]
) * (k4(ttdist[ss1, ss2]) + 1.0) + vyt * kyy(tdist
[ss1, ss2]) * dk4dlyt(ttdist[ss1, ss2])
dkdubias[ss1, ss2] = 0
elif i == 1 and j == 1:
dkYdvart[ss1, ss2] = (k1(tdist[ss1, ss2]) + 1.0) * kyy(
tdist[ss1, ss2])
dkYdlent[ss1, ss2] = vyt * dkyydlyt(tdist[ss1, ss2]
) * (k1(tdist[ss1, ss2]) + 1.0) + vyt * kyy(tdist
[ss1, ss2]) * dk1dlyt(tdist[ss1, ss2])
dkdubias[ss1, ss2] = 1
else:
dkYdvart[ss1, ss2] = (-k4(ttdist[ss1, ss2]) + 1) * kyy(
tdist[ss1, ss2])
dkYdlent[ss1, ss2] = vyt * dkyydlyt(tdist[ss1, ss2]
) * (-k4(ttdist[ss1, ss2]) + 1.0) + vyt * kyy(tdist
[ss1, ss2]) * -dk4dlyt(ttdist[ss1, ss2])
dkdubias[ss1, ss2] = 0
self.variance_Yt.gradient = np.sum(dkYdvart * dL_dK)
self.lengthscale_Yt.gradient = np.sum(dkYdlent * (-0.5 * self.
lengthscale_Yt ** -2) * dL_dK)
self.ubias.gradient = np.sum(dkdubias * dL_dK)
| update_gradients_full | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, dL_dK, X, X2=None):\n X, slices = X[:, :-1], index_to_slices(X[:, -1])\n if X2 is None:\n X2, slices2 = X, slices\n K = np.zeros((X.s... |
def x(self, target):
t_ode = self._t[self._index > 0]
dL_dK_ode = self._dL_dK[self._index > 0, :]
index_ode = self._index[self._index > 0] - 1
if self._t2 is None:
if t_ode.size == 0:
return
t2_ode = t_ode
dL_dK_ode = dL_dK_ode[:, self._index > 0]
index2_ode = index_ode
else:
t2_ode = self._t2[self._index2 > 0]
dL_dK_ode = dL_dK_ode[:, self._index2 > 0]
if t_ode.size == 0 or t2_ode.size == 0:
return
index2_ode = self._index2[self._index2 > 0] - 1
h1 = self._compute_H(t_ode, index_ode, t2_ode, index2_ode, stationary=
self.is_stationary, update_derivatives=True)
self._dK_dsigma = self._dh_dsigma
if self._t2 is None:
h2 = h1
else:
h2 = self._compute_H(t2_ode, index2_ode, t_ode, index_ode,
stationary=self.is_stationary, update_derivatives=True)
self._dK_dsigma += self._dh_dsigma.T
(dL_dK_ode[:, :, None] * self._dh_ddelay[:, None, :]).sum(2)
target[-1] += (dL_dK_ode * self._dK_dsigma / np.sqrt(2)).sum()
| dK_ode_dtheta | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, target):\n t_ode = self._t[self._index > 0]\n dL_dK_ode = self._dL_dK[self._index > 0, :]\n index_ode = self._index[self._index > 0] - 1\n if... |
def x(self, X, X2=None, dL_dK=None):
self._t = X[:, 0]
if not X.shape[1] == 2:
raise ValueError(
'Input matrix for ode1 covariance should have two columns, one containing times, the other output indices'
)
self._index = np.asarray(X[:, 1], dtype=np.int)
self._order = self._index.argsort()
self._index = self._index[self._order]
self._t = self._t[self._order]
self._rorder = self._order.argsort()
if X2 is None:
self._t2 = None
self._index2 = None
self._order2 = self._order
self._rorder2 = self._rorder
else:
if not X2.shape[1] == 2:
raise ValueError(
'Input matrix for ode1 covariance should have two columns, one containing times, the other output indices'
)
self._t2 = X2[:, 0]
self._index2 = np.asarray(X2[:, 1], dtype=np.int)
self._order2 = self._index2.argsort()
self._index2 = self._index2[self._order2]
self._t2 = self._t2[self._order2]
self._rorder2 = self._order2.argsort()
if dL_dK is not None:
self._dL_dK = dL_dK[self._order, :]
self._dL_dK = self._dL_dK[:, self._order2]
| extract_t_indices | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X, X2=None, dL_dK=None):\n self._t = X[:, 0]\n if not X.shape[1] == 2:\n raise ValueError(\n 'Input matrix for ode1 covariance sh... |
def x(self, X, X2):
self._extract_t_indices(X, X2)
self._K_compute_eq()
self._K_compute_ode_eq()
if X2 is None:
self._K_eq_ode = self._K_ode_eq.T
else:
self._K_compute_ode_eq(transpose=True)
self._K_compute_ode()
if X2 is None:
self._K_dvar = np.zeros((self._t.shape[0], self._t.shape[0]))
else:
self._K_dvar = np.zeros((self._t.shape[0], self._t2.shape[0]))
self._K_dvar = np.vstack((np.hstack((self._K_eq, self._K_eq_ode)), np.
hstack((self._K_ode_eq, self._K_ode))))
self._K_dvar = self._K_dvar[self._rorder, :]
self._K_dvar = self._K_dvar[:, self._rorder2]
if X2 is None:
self._scale = np.zeros((self._t.size, self._t.size))
code = """
for(int i=0;i<N; i++){
scale_mat[i+i*N] = B[index[i]+output_dim*(index[i])];
for(int j=0; j<i; j++){
scale_mat[j+i*N] = B[index[i]+output_dim*index[j]];
scale_mat[i+j*N] = scale_mat[j+i*N];
}
}
"""
scale_mat, B, index = self._scale, self.B, self._index
N, output_dim = self._t.size, self.output_dim
weave.inline(code, ['index', 'scale_mat', 'B', 'N', 'output_dim'])
else:
self._scale = np.zeros((self._t.size, self._t2.size))
code = """
for(int i=0; i<N; i++){
for(int j=0; j<N2; j++){
scale_mat[i+j*N] = B[index[i]+output_dim*index2[j]];
}
}
"""
scale_mat, B, index, index2 = (self._scale, self.B, self._index,
self._index2)
N, N2, output_dim = self._t.size, self._t2.size, self.output_dim
weave.inline(code, ['index', 'index2', 'scale_mat', 'B', 'N', 'N2',
'output_dim'])
| K_computations | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X, X2):\n self._extract_t_indices(X, X2)\n self._K_compute_eq()\n self._K_compute_ode_eq()\n if X2 is None:\n self._K_eq_ode = self._K... |
def x(self):
t_eq = self._t[self._index == 0]
if self._t2 is None:
if t_eq.size == 0:
self._K_eq = np.zeros((0, 0))
return
self._dist2 = np.square(t_eq[:, None] - t_eq[None, :])
else:
t2_eq = self._t2[self._index2 == 0]
if t_eq.size == 0 or t2_eq.size == 0:
self._K_eq = np.zeros((t_eq.size, t2_eq.size))
return
self._dist2 = np.square(t_eq[:, None] - t2_eq[None, :])
self._K_eq = np.exp(-self._dist2 / (2 * self.lengthscale * self.
lengthscale))
if self.is_normalized:
self._K_eq /= np.sqrt(2 * np.pi) * self.lengthscale
| K_compute_eq | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n t_eq = self._t[self._index == 0]\n if self._t2 is None:\n if t_eq.size == 0:\n self._K_eq = np.zeros((0, 0))\n retu... |
def x(self, transpose=False):
if self._t2 is not None:
if transpose:
t_eq = self._t[self._index == 0]
t_ode = self._t2[self._index2 > 0]
index_ode = self._index2[self._index2 > 0] - 1
else:
t_eq = self._t2[self._index2 == 0]
t_ode = self._t[self._index > 0]
index_ode = self._index[self._index > 0] - 1
else:
t_eq = self._t[self._index == 0]
t_ode = self._t[self._index > 0]
index_ode = self._index[self._index > 0] - 1
if t_ode.size == 0 or t_eq.size == 0:
if transpose:
self._K_eq_ode = np.zeros((t_eq.shape[0], t_ode.shape[0]))
else:
self._K_ode_eq = np.zeros((t_ode.shape[0], t_eq.shape[0]))
return
t_ode_mat = t_ode[:, None]
t_eq_mat = t_eq[None, :]
if self.delay is not None:
t_ode_mat -= self.delay[index_ode, None]
diff_t = t_ode_mat - t_eq_mat
inv_sigma_diff_t = 1.0 / self.sigma * diff_t
decay_vals = self.decay[index_ode][:, None]
half_sigma_d_i = 0.5 * self.sigma * decay_vals
if self.is_stationary:
ln_part, signs = ln_diff_erfs(inf, half_sigma_d_i -
inv_sigma_diff_t, return_sign=True)
else:
ln_part, signs = ln_diff_erfs(half_sigma_d_i + t_eq_mat / self.
sigma, half_sigma_d_i - inv_sigma_diff_t, return_sign=True)
sK = signs * np.exp(half_sigma_d_i * half_sigma_d_i - decay_vals *
diff_t + ln_part)
sK *= 0.5
if not self.is_normalized:
sK *= np.sqrt(np.pi) * self.sigma
if transpose:
self._K_eq_ode = sK.T
else:
self._K_ode_eq = sK
| K_compute_ode_eq | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, transpose=False):\n if self._t2 is not None:\n if transpose:\n t_eq = self._t[self._index == 0]\n t_ode = self._t2[self._... |
def x(input_dict):
import copy
input_dict = copy.deepcopy(input_dict)
mapping_class = input_dict.pop('class')
input_dict['name'] = str(input_dict['name'])
import GPy
mapping_class = eval(mapping_class)
return mapping_class._build_from_input_dict(mapping_class, input_dict)
| from_dict | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(input_dict):\n import copy\n input_dict = copy.deepcopy(input_dict)\n mapping_class = input_dict.pop('class')\n input_dict['name'] = str(input_dict... |
def x(X, axis=0):
from collections import defaultdict
from itertools import count
from operator import iadd
assert X.ndim == 2 and axis in (0, 1), 'Only implemented for 2D arrays'
subarrays = defaultdict(list)
cnt = count()
def x(x, s, c):
t = tuple(x)
col = next(c)
iadd(s[t], [col])
return None
if axis == 0:
[accumulate(x, subarrays, cnt) for x in X]
else:
[accumulate(x, subarrays, cnt) for x in X.T]
return subarrays
| common_subarrays | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(X, axis=0):\n from collections import defaultdict\n from itertools import count\n from operator import iadd\n assert X.ndim == 2 and axis in (0, 1)... |
def x(self, Xnew, full_cov=False, Y_metadata=None, kern=None, Yindex=0):
b = self.bgplvms[Yindex]
self.posterior = b.posterior
self.kern = b.kern
self.likelihood = b.likelihood
return super(MRD, self).predict(Xnew, full_cov, Y_metadata, kern)
| predict | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, Xnew, full_cov=False, Y_metadata=None, kern=None, Yindex=0):\n b = self.bgplvms[Yindex]\n self.posterior = b.posterior\n self.kern = b.kern\n ... |
def x(self, titles=None, fig_kwargs={}, **kwargs):
from ..plotting import plotting_library as pl
if titles is None:
titles = ['${}$'.format(name) for name in self.names]
M = len(self.bgplvms)
fig = pl().figure(rows=1, cols=M, **fig_kwargs)
for c in range(M):
canvas = self.bgplvms[c].kern.plot_ARD(title=titles[c], figure=fig,
col=c + 1, **kwargs)
return canvas
| plot_scales | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, titles=None, fig_kwargs={}, **kwargs):\n from ..plotting import plotting_library as pl\n if titles is None:\n titles = ['${}$'.format(name) ... |
def x(self, labels=None, which_indices=None, resolution=60, legend=True,
plot_limits=None, updates=False, kern=None, marker='<>^vsd',
num_samples=1000, projection='2d', predict_kwargs={}, scatter_kwargs=
None, **imshow_kwargs):
from ..plotting.gpy_plot.latent_plots import plot_latent
if 'Yindex' not in predict_kwargs:
predict_kwargs['Yindex'] = 0
Yindex = predict_kwargs['Yindex']
self.kern = self.bgplvms[Yindex].kern
self.likelihood = self.bgplvms[Yindex].likelihood
return plot_latent(self, labels, which_indices, resolution, legend,
plot_limits, updates, kern, marker, num_samples, projection,
scatter_kwargs)
| plot_latent | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, labels=None, which_indices=None, resolution=60, legend=True,\n plot_limits=None, updates=False, kern=None, marker='<>^vsd',\n num_samples=1000, pro... |
def x(self, threshold=0.005, printOut=False, views=None):
M = len(self.bgplvms)
if views is None:
if M is not 2:
raise NotImplementedError('Not implemented for M > 2')
obsMod = [0]
infMod = 1
else:
obsMod = views[0]
infMod = views[1]
scObs = [None] * len(obsMod)
for i in range(0, len(obsMod)):
scObs[i] = np.atleast_2d(self.bgplvms[obsMod[i]].kern.
input_sensitivity(summarize=False))[0]
scObs[i] /= np.max(scObs[i])
scInf = np.atleast_2d(self.bgplvms[infMod].kern.input_sensitivity(
summarize=False))[0]
scInf /= np.max(scInf)
retainedScales = [None] * (len(obsMod) + 1)
for i in range(0, len(obsMod)):
retainedScales[obsMod[i]] = np.where(scObs[i] > threshold)[0]
retainedScales[infMod] = np.where(scInf > threshold)[0]
for i in range(len(retainedScales)):
retainedScales[i] = [k for k in retainedScales[i]]
sharedDims = set(retainedScales[obsMod[0]]).intersection(set(
retainedScales[infMod]))
for i in range(1, len(obsMod)):
sharedDims = sharedDims.intersection(set(retainedScales[obsMod[i]]))
privateDims = [None] * M
for i in range(0, len(retainedScales)):
privateDims[i] = set(retainedScales[i]).difference(sharedDims)
privateDims[i] = [k for k in privateDims[i]]
sharedDims = [k for k in sharedDims]
sharedDims.sort()
for i in range(len(privateDims)):
privateDims[i].sort()
if printOut:
print('# Shared dimensions: ' + str(sharedDims))
for i in range(len(retainedScales)):
print('# Private dimensions model ' + str(i) + ':' + str(
privateDims[i]))
return sharedDims, privateDims
| factorize_space | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, threshold=0.005, printOut=False, views=None):\n M = len(self.bgplvms)\n if views is None:\n if M is not 2:\n raise NotImplemented... |
def x(self, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=
False, apply_link=False, visible_dims=None, which_data_ycols='all',
levels=20, projection='2d', label='gp mean', predict_kw=None, **kwargs):
canvas, kwargs = pl().new_canvas(projection=projection, **kwargs)
X = get_x_y_var(self)[0]
helper_data = helper_for_plot_data(self, X, plot_limits, visible_dims,
fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[2],
plot_raw, apply_link, None, get_which_data_ycols(self,
which_data_ycols), predict_kw)
plots = _plot_mean(self, canvas, helper_data, helper_prediction, levels,
projection, label, **kwargs)
return pl().add_to_canvas(canvas, plots)
| plot_mean | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=\n False, apply_link=False, visible_dims=None, which_data_ycols='all',\n levels=20, ... |
def x(self, lower=2.5, upper=97.5, plot_limits=None, fixed_inputs=None,
resolution=None, plot_raw=False, apply_link=False, visible_dims=None,
which_data_ycols='all', label='gp confidence', predict_kw=None, **kwargs):
canvas, kwargs = pl().new_canvas(**kwargs)
ycols = get_which_data_ycols(self, which_data_ycols)
X = get_x_y_var(self)[0]
helper_data = helper_for_plot_data(self, X, plot_limits, visible_dims,
fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[2],
plot_raw, apply_link, (lower, upper), ycols, predict_kw)
plots = _plot_confidence(self, canvas, helper_data, helper_prediction,
label, **kwargs)
return pl().add_to_canvas(canvas, plots, legend=label is not None)
| plot_confidence | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, lower=2.5, upper=97.5, plot_limits=None, fixed_inputs=None,\n resolution=None, plot_raw=False, apply_link=False, visible_dims=None,\n which_data_yc... |
def x(self, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=
True, apply_link=False, visible_dims=None, which_data_ycols='all',
samples=3, projection='2d', label='gp_samples', predict_kw=None, **kwargs):
canvas, kwargs = pl().new_canvas(projection=projection, **kwargs)
ycols = get_which_data_ycols(self, which_data_ycols)
X = get_x_y_var(self)[0]
helper_data = helper_for_plot_data(self, X, plot_limits, visible_dims,
fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[2],
plot_raw, apply_link, None, ycols, predict_kw, samples)
plots = _plot_samples(self, canvas, helper_data, helper_prediction,
projection, label, **kwargs)
return pl().add_to_canvas(canvas, plots)
| plot_samples | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=\n True, apply_link=False, visible_dims=None, which_data_ycols='all',\n samples=3, p... |
def x(self, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=
False, apply_link=False, visible_dims=None, which_data_ycols='all',
levels=35, label='gp density', predict_kw=None, **kwargs):
canvas, kwargs = pl().new_canvas(**kwargs)
X = get_x_y_var(self)[0]
helper_data = helper_for_plot_data(self, X, plot_limits, visible_dims,
fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[2],
plot_raw, apply_link, np.linspace(2.5, 97.5, levels * 2),
get_which_data_ycols(self, which_data_ycols), predict_kw)
plots = _plot_density(self, canvas, helper_data, helper_prediction,
label, **kwargs)
return pl().add_to_canvas(canvas, plots)
| plot_density | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=\n False, apply_link=False, visible_dims=None, which_data_ycols='all',\n levels=35, ... |
def x(self, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=
False, apply_link=False, which_data_ycols='all', which_data_rows='all',
visible_dims=None, levels=20, samples=0, samples_likelihood=0, lower=
2.5, upper=97.5, plot_data=True, plot_inducing=True, plot_density=False,
predict_kw=None, projection='2d', legend=True, **kwargs):
X = get_x_y_var(self)[0]
helper_data = helper_for_plot_data(self, X, plot_limits, visible_dims,
fixed_inputs, resolution)
xmin, xmax = helper_data[5:7]
free_dims = helper_data[1]
if not 'xlim' in kwargs:
kwargs['xlim'] = xmin[0], xmax[0]
if not 'ylim' in kwargs and len(free_dims) == 2:
kwargs['ylim'] = xmin[1], xmax[1]
canvas, _ = pl().new_canvas(projection=projection, **kwargs)
helper_prediction = helper_predict_with_model(self, helper_data[2],
plot_raw, apply_link, np.linspace(2.5, 97.5, levels * 2) if
plot_density else (lower, upper), get_which_data_ycols(self,
which_data_ycols), predict_kw, samples)
if plot_raw and not apply_link:
plot_data = False
plots = {}
if hasattr(self, 'Z') and plot_inducing:
plots.update(_plot_inducing(self, canvas, free_dims, projection,
'Inducing'))
if plot_data:
plots.update(_plot_data(self, canvas, which_data_rows,
which_data_ycols, free_dims, projection, 'Data'))
plots.update(_plot_data_error(self, canvas, which_data_rows,
which_data_ycols, free_dims, projection, 'Data Error'))
plots.update(_plot(self, canvas, plots, helper_data, helper_prediction,
levels, plot_inducing, plot_density, projection))
if plot_raw and samples_likelihood > 0:
helper_prediction = helper_predict_with_model(self, helper_data[2],
False, apply_link, None, get_which_data_ycols(self,
which_data_ycols), predict_kw, samples_likelihood)
plots.update(_plot_samples(canvas, helper_data, helper_prediction,
projection, 'Lik Samples'))
return pl().add_to_canvas(canvas, plots, legend=legend)
| plot | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=\n False, apply_link=False, which_data_ycols='all', which_data_rows='all',\n visible... |
def x(self, plot_limits=None, fixed_inputs=None, resolution=None,
apply_link=False, which_data_ycols='all', which_data_rows='all',
visible_dims=None, levels=20, samples=0, lower=2.5, upper=97.5,
plot_density=False, plot_data=True, plot_inducing=True, projection='2d',
legend=True, predict_kw=None, **kwargs):
return plot(self, plot_limits, fixed_inputs, resolution, True,
apply_link, which_data_ycols, which_data_rows, visible_dims, levels,
samples, 0, lower, upper, plot_data, plot_inducing, plot_density,
predict_kw, projection, legend, **kwargs)
| plot_f | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, plot_limits=None, fixed_inputs=None, resolution=None,\n apply_link=False, which_data_ycols='all', which_data_rows='all',\n visible_dims=None, level... |
def x(input_dict, data=None):
import copy
input_dict = copy.deepcopy(input_dict)
model_class = input_dict.pop('class')
input_dict['name'] = str(input_dict['name'])
import GPy
model_class = eval(model_class)
return model_class._build_from_input_dict(input_dict, data)
| from_dict | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(input_dict, data=None):\n import copy\n input_dict = copy.deepcopy(input_dict)\n model_class = input_dict.pop('class')\n input_dict['name'] = str(i... |
def x(self):
input_dict = super(Linear, self)._save_to_input_dict()
input_dict['class'] = 'GPy.mappings.Linear'
input_dict['A'] = self.A.values.tolist()
return input_dict
| to_dict | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n input_dict = super(Linear, self)._save_to_input_dict()\n input_dict['class'] = 'GPy.mappings.Linear'\n input_dict['A'] = self.A.values.tolist... |
def x(input_dim, num_outputs, kernel, W_rank=1, W=None, kappa=None, name='ICM'
):
if kernel.input_dim != input_dim:
kernel.input_dim = input_dim
warnings.warn(
"kernel's input dimension overwritten to fit input_dim parameter.")
K = kernel.prod(GPy.kern.Coregionalize(1, num_outputs, active_dims=[
input_dim], rank=W_rank, W=W, kappa=kappa, name='B'), name=name)
return K
| ICM | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(input_dim, num_outputs, kernel, W_rank=1, W=None, kappa=None, name='ICM'\n ):\n if kernel.input_dim != input_dim:\n kernel.input_dim = input_dim\n... |
def x(input_dim, num_outputs, kernels_list, W_rank=1, name='ICM'):
Nk = len(kernels_list)
K = ICM(input_dim, num_outputs, kernels_list[0], W_rank, name='%s%s' %
(name, 0))
j = 1
for kernel in kernels_list[1:]:
K += ICM(input_dim, num_outputs, kernel, W_rank, name='%s%s' % (
name, j))
j += 1
return K
| LCM | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(input_dim, num_outputs, kernels_list, W_rank=1, name='ICM'):\n Nk = len(kernels_list)\n K = ICM(input_dim, num_outputs, kernels_list[0], W_rank, name='%s... |
def x(input_dim, num_outputs, kernel, output, kappa=None, name='X'):
K = ICM(input_dim, num_outputs, kernel, W_rank=1, kappa=kappa, name=name)
K.B.W.fix(0)
_range = range(num_outputs)
_range.pop(output)
for j in _range:
K.B.kappa[j] = 0
K.B.kappa[j].fix()
return K
| Private | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(input_dim, num_outputs, kernel, output, kappa=None, name='X'):\n K = ICM(input_dim, num_outputs, kernel, W_rank=1, kappa=kappa, name=name)\n K.B.W.fix(0)... |
def x(self, X, X2=None):
r = self._scaled_dist(X, X2)
return self.K_of_r(r)
| K | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X, X2=None):\n r = self._scaled_dist(X, X2)\n return self.K_of_r(r)\n",
"role": "user"
},
{
"content": "K",
"role": "assistant"
}... |
def x(self, X, X2):
return self.dK_dr(self._scaled_dist(X, X2))
| dK_dr_via_X | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X, X2):\n return self.dK_dr(self._scaled_dist(X, X2))\n",
"role": "user"
},
{
"content": "dK_dr_via_X",
"role": "assistant"
}
] |
def x(self, X, X2=None):
if X2 is None:
Xsq = np.sum(np.square(X), 1)
r2 = -2.0 * tdot(X) + (Xsq[:, None] + Xsq[None, :])
util.diag.view(r2)[:,] = 0.0
r2 = np.clip(r2, 0, np.inf)
return np.sqrt(r2)
else:
X1sq = np.sum(np.square(X), 1)
X2sq = np.sum(np.square(X2), 1)
r2 = -2.0 * np.dot(X, X2.T) + (X1sq[:, None] + X2sq[None, :])
r2 = np.clip(r2, 0, np.inf)
return np.sqrt(r2)
| unscaled_dist | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X, X2=None):\n if X2 is None:\n Xsq = np.sum(np.square(X), 1)\n r2 = -2.0 * tdot(X) + (Xsq[:, None] + Xsq[None, :])\n util.diag.v... |
def x(self, X, X2=None):
if self.ARD:
if X2 is not None:
X2 = X2 / self.lengthscale
return self._unscaled_dist(X / self.lengthscale, X2)
else:
return self._unscaled_dist(X, X2) / self.lengthscale
| scaled_dist | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X, X2=None):\n if self.ARD:\n if X2 is not None:\n X2 = X2 / self.lengthscale\n return self._unscaled_dist(X / self.lengthsca... |
def x(self, dL_dK, X, X2=None, reset=True):
self.variance.gradient = np.sum(self.K(X, X2) * dL_dK) / self.variance
dL_dr = self.dK_dr_via_X(X, X2) * dL_dK
if self.ARD:
tmp = dL_dr * self._inv_dist(X, X2)
if X2 is None:
X2 = X
if use_stationary_cython:
self.lengthscale.gradient = self._lengthscale_grads_cython(tmp,
X, X2)
else:
self.lengthscale.gradient = self._lengthscale_grads_pure(tmp, X, X2
)
else:
r = self._scaled_dist(X, X2)
self.lengthscale.gradient = -np.sum(dL_dr * r) / self.lengthscale
| update_gradients_full | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, dL_dK, X, X2=None, reset=True):\n self.variance.gradient = np.sum(self.K(X, X2) * dL_dK) / self.variance\n dL_dr = self.dK_dr_via_X(X, X2) * dL_dK\... |
def x(self, dL_dVar, dL_dLen):
self.variance.gradient = dL_dVar
self.lengthscale.gradient = dL_dLen
| update_gradients_direct | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, dL_dVar, dL_dLen):\n self.variance.gradient = dL_dVar\n self.lengthscale.gradient = dL_dLen\n",
"role": "user"
},
{
"content": "update_... |
def x(self, X, X2=None):
dist = self._scaled_dist(X, X2).copy()
return 1.0 / np.where(dist != 0.0, dist, np.inf)
| inv_dist | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, X, X2=None):\n dist = self._scaled_dist(X, X2).copy()\n return 1.0 / np.where(dist != 0.0, dist, np.inf)\n",
"role": "user"
},
{
"conte... |
def x(self, dL_dK, X, X2=None):
if use_stationary_cython:
return self._gradients_X_cython(dL_dK, X, X2)
else:
return self._gradients_X_pure(dL_dK, X, X2)
| gradients_X | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, dL_dK, X, X2=None):\n if use_stationary_cython:\n return self._gradients_X_cython(dL_dK, X, X2)\n else:\n return self._gradients_X_pu... |
def x(self, dL_dK, X, X2=None):
invdist = self._inv_dist(X, X2)
invdist2 = invdist ** 2
dL_dr = self.dK_dr_via_X(X, X2)
tmp1 = dL_dr * invdist
dL_drdr = self.dK2_drdr_via_X(X, X2)
tmp2 = dL_drdr * invdist2
l2 = np.ones(X.shape[1]) * self.lengthscale ** 2
if X2 is None:
X2 = X
tmp1 -= np.eye(X.shape[0]) * self.variance
else:
tmp1[invdist2 == 0.0] -= self.variance
dist = X[:, None, :] - X2[None, :, :]
dist = dist[:, :, :, None] * dist[:, :, None, :]
I = np.ones((X.shape[0], X2.shape[0], X2.shape[1], X.shape[1])) * np.eye(X2
.shape[1])
grad = ((dL_dK * (tmp1 * invdist2 - tmp2))[:, :, None, None] * dist /
l2[None, None, :, None] - (dL_dK * tmp1)[:, :, None, None] * I) / l2[
None, None, None, :]
return grad
| gradients_XX | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, dL_dK, X, X2=None):\n invdist = self._inv_dist(X, X2)\n invdist2 = invdist ** 2\n dL_dr = self.dK_dr_via_X(X, X2)\n tmp1 = dL_dr * invdist\n ... |
def x(self, dL_dK_diag, X):
dL_dK_diag = dL_dK_diag.copy().reshape(-1, 1, 1)
assert dL_dK_diag.size == X.shape[0
] or dL_dK_diag.size == 1, 'dL_dK_diag has to be given as row [N] or column vector [Nx1]'
l4 = np.ones(X.shape[1]) * self.lengthscale ** 2
return dL_dK_diag * (np.eye(X.shape[1]) * -self.dK2_drdr_diag() / l4)[
None, :, :]
| gradients_XX_diag | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, dL_dK_diag, X):\n dL_dK_diag = dL_dK_diag.copy().reshape(-1, 1, 1)\n assert dL_dK_diag.size == X.shape[0\n ] or dL_dK_diag.size == 1, 'dL_dK... |
def x(self):
variance = float(self.variance.values)
lengthscale = float(self.lengthscale.values)
foo = np.sqrt(3.0) / lengthscale
F = np.array([[0, 1], [-foo ** 2, -2 * foo]])
L = np.array([[0], [1]])
Qc = np.array([[12.0 * np.sqrt(3) / lengthscale ** 3 * variance]])
H = np.array([[1, 0]])
Pinf = np.array([[variance, 0], [0, 3.0 * variance / lengthscale ** 2]])
dF = np.empty([F.shape[0], F.shape[1], 2])
dQc = np.empty([Qc.shape[0], Qc.shape[1], 2])
dPinf = np.empty([Pinf.shape[0], Pinf.shape[1], 2])
dFvariance = np.zeros([2, 2])
dFlengthscale = np.array([[0, 0], [6.0 / lengthscale ** 3, 2 * np.sqrt(
3) / lengthscale ** 2]])
dQcvariance = np.array([12.0 * np.sqrt(3) / lengthscale ** 3])
dQclengthscale = np.array([-3 * 12 * np.sqrt(3) / lengthscale ** 4 *
variance])
dPinfvariance = np.array([[1, 0], [0, 3.0 / lengthscale ** 2]])
dPinflengthscale = np.array([[0, 0], [0, -6 * variance / lengthscale ** 3]]
)
dF[:, :, 0] = dFvariance
dF[:, :, 1] = dFlengthscale
dQc[:, :, 0] = dQcvariance
dQc[:, :, 1] = dQclengthscale
dPinf[:, :, 0] = dPinfvariance
dPinf[:, :, 1] = dPinflengthscale
return F, L, Qc, H, Pinf, dF, dQc, dPinf
| sde | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n variance = float(self.variance.values)\n lengthscale = float(self.lengthscale.values)\n foo = np.sqrt(3.0) / lengthscale\n F = np.array([[... |
def x(self, F, F1, F2, F3, lower, upper):
assert self.input_dim == 1
def x(x, i):
return 5 * np.sqrt(5) / self.lengthscale ** 3 * F[i](x
) + 15.0 / self.lengthscale ** 2 * F1[i](x) + 3 * np.sqrt(5
) / self.lengthscale * F2[i](x) + F3[i](x)
n = F.shape[0]
G = np.zeros((n, n))
for i in range(n):
for j in range(i, n):
G[i, j] = G[j, i] = integrate.quad(lambda x: L(x, i) * L(x, j),
lower, upper)[0]
G_coef = 3.0 * self.lengthscale ** 5 / (400 * np.sqrt(5))
Flower = np.array([f(lower) for f in F])[:, None]
F1lower = np.array([f(lower) for f in F1])[:, None]
F2lower = np.array([f(lower) for f in F2])[:, None]
orig = 9.0 / 8 * np.dot(Flower, Flower.T
) + 9.0 * self.lengthscale ** 4 / 200 * np.dot(F2lower, F2lower.T)
orig2 = 3.0 / 5 * self.lengthscale ** 2 * (np.dot(F1lower, F1lower.T) +
1.0 / 8 * np.dot(Flower, F2lower.T) + 1.0 / 8 * np.dot(F2lower,
Flower.T))
return 1.0 / self.variance * (G_coef * G + orig + orig2)
| Gram_matrix | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, F, F1, F2, F3, lower, upper):\n assert self.input_dim == 1\n\n def x(x, i):\n return 5 * np.sqrt(5) / self.lengthscale ** 3 * F[i](x\n ... |
def x(self):
input_dict = super(RatQuad, self)._save_to_input_dict()
input_dict['class'] = 'GPy.kern.RatQuad'
input_dict['power'] = self.power.values.tolist()
return input_dict
| to_dict | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n input_dict = super(RatQuad, self)._save_to_input_dict()\n input_dict['class'] = 'GPy.kern.RatQuad'\n input_dict['power'] = self.power.values.... |
def x(model, non_fixed_inputs, fix_routine='median', as_list=True, X_all=False
):
from ...inference.latent_function_inference.posterior import VariationalPosterior
f_inputs = []
if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs():
X = model.X.mean.values.copy()
elif isinstance(model.X, VariationalPosterior):
X = model.X.values.copy()
elif X_all:
X = model.X_all.copy()
else:
X = model.X.copy()
for i in range(X.shape[1]):
if i not in non_fixed_inputs:
if fix_routine == 'mean':
f_inputs.append((i, np.mean(X[:, i])))
if fix_routine == 'median':
f_inputs.append((i, np.median(X[:, i])))
else:
f_inputs.append((i, 0))
if not as_list:
X[:, i] = f_inputs[-1][1]
if as_list:
return f_inputs
else:
return X
| fixed_inputs | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(model, non_fixed_inputs, fix_routine='median', as_list=True, X_all=False\n ):\n from ...inference.latent_function_inference.posterior import VariationalP... |
def x(model, Y_new, optimize=True, init='L2'):
infr_m = InferenceX(model, Y_new, init=init)
if optimize:
infr_m.optimize()
return infr_m.X, infr_m
| infer_newX | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(model, Y_new, optimize=True, init='L2'):\n infr_m = InferenceX(model, Y_new, init=init)\n if optimize:\n infr_m.optimize()\n return infr_m.X, i... |
def x(self):
if self.approx_order is not None:
N = self.approx_order
else:
N = 10
roots_rounding_decimals = 6
fn = np.math.factorial(N)
p_lengthscale = float(self.lengthscale)
p_variance = float(self.variance)
kappa = 1.0 / 2.0 / p_lengthscale ** 2
Qc = np.array(((p_variance * np.sqrt(np.pi / kappa) * fn * (4 * kappa) **
N,),))
eps = 1e-12
if float(Qc) > 1.0 / eps or float(Qc) < eps:
warnings.warn(
"""sde_RBF kernel: the noise variance Qc is either very large or very small.
It influece conditioning of P_inf: {0:e}"""
.format(float(Qc)))
pp1 = np.zeros((2 * N + 1,))
for n in range(0, N + 1):
pp1[2 * (N - n)] = fn * (4.0 * kappa) ** (N - n) / np.math.factorial(n
) * (-1) ** n
pp = sp.poly1d(pp1)
roots = sp.roots(pp)
neg_real_part_roots = roots[np.round(np.real(roots),
roots_rounding_decimals) < 0]
aa = sp.poly1d(neg_real_part_roots, r=True).coeffs
F = np.diag(np.ones((N - 1,)), 1)
F[-1, :] = -aa[-1:0:-1]
L = np.zeros((N, 1))
L[N - 1, 0] = 1
H = np.zeros((1, N))
H[0, 0] = 1
Pinf = lyap(F, -np.dot(L, np.dot(Qc[0, 0], L.T)))
Pinf = 0.5 * (Pinf + Pinf.T)
dF = np.empty([F.shape[0], F.shape[1], 2])
dQc = np.empty([Qc.shape[0], Qc.shape[1], 2])
dPinf = np.empty([Pinf.shape[0], Pinf.shape[1], 2])
dFvariance = np.zeros(F.shape)
dFlengthscale = np.zeros(F.shape)
dFlengthscale[-1, :] = -aa[-1:0:-1] / p_lengthscale * np.arange(-N, 0, 1)
dQcvariance = Qc / p_variance
dQclengthscale = np.array(((p_variance * np.sqrt(2 * np.pi) * fn * 2 **
N * p_lengthscale ** (-2 * N) * (1 - 2 * N),),))
dPinf_variance = Pinf / p_variance
lp = Pinf.shape[0]
coeff = np.arange(1, lp + 1).reshape(lp, 1) + np.arange(1, lp + 1).reshape(
1, lp) - 2
coeff[np.mod(coeff, 2) != 0] = 0
dPinf_lengthscale = -1 / p_lengthscale * Pinf * coeff
dF[:, :, 0] = dFvariance
dF[:, :, 1] = dFlengthscale
dQc[:, :, 0] = dQcvariance
dQc[:, :, 1] = dQclengthscale
dPinf[:, :, 0] = dPinf_variance
dPinf[:, :, 1] = dPinf_lengthscale
P0 = Pinf.copy()
dP0 = dPinf.copy()
if self.balance:
import GPy.models.state_space_main as ssm
F, L, Qc, H, Pinf, P0, dF, dQc, dPinf, dP0 = ssm.balance_ss_model(F,
L, Qc, H, Pinf, P0, dF, dQc, dPinf, dP0)
return F, L, Qc, H, Pinf, P0, dF, dQc, dPinf, dP0
| sde | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self.approx_order is not None:\n N = self.approx_order\n else:\n N = 10\n roots_rounding_decimals = 6\n fn = np.math.fact... |
def x(self):
variance = float(self.variance.values)
lengthscale = float(self.lengthscale)
F = np.array(((-1.0 / lengthscale,),))
L = np.array(((1.0,),))
Qc = np.array(((2.0 * variance / lengthscale,),))
H = np.array(((1.0,),))
Pinf = np.array(((variance,),))
P0 = Pinf.copy()
dF = np.zeros((1, 1, 2))
dQc = np.zeros((1, 1, 2))
dPinf = np.zeros((1, 1, 2))
dF[:, :, 0] = 0.0
dF[:, :, 1] = 1.0 / lengthscale ** 2
dQc[:, :, 0] = 2.0 / lengthscale
dQc[:, :, 1] = -2.0 * variance / lengthscale ** 2
dPinf[:, :, 0] = 1.0
dPinf[:, :, 1] = 0.0
dP0 = dPinf.copy()
return F, L, Qc, H, Pinf, P0, dF, dQc, dPinf, dP0
| sde | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n variance = float(self.variance.values)\n lengthscale = float(self.lengthscale)\n F = np.array(((-1.0 / lengthscale,),))\n L = np.array(((1... |
def x(self, link_f, y, Y_metadata=None):
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
c = np.zeros((link_f.shape[0],))
log_objective = self.logpdf_link(link_f, y, Y_metadata)
return np.exp(log_objective)
| pdf_link | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, link_f, y, Y_metadata=None):\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\n c = np.zeros((link_f.shape[0],))\n log_objective =... |
def x(self, link_f, y, Y_metadata=None):
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
c = np.zeros_like(y)
if Y_metadata is not None and 'censored' in Y_metadata.keys():
c = Y_metadata['censored']
uncensored = (1 - c) * (np.log(self.r) - np.log(link_f) + (self.r - 1) *
np.log(y) - y ** self.r / link_f)
censored = -c * y ** self.r / link_f
log_objective = uncensored + censored
return log_objective
| logpdf_link | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, link_f, y, Y_metadata=None):\n assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape\n c = np.zeros_like(y)\n if Y_metadata is not None a... |
def x(self, link_f, y, Y_metadata=None):
c = np.zeros_like(y)
if Y_metadata is not None and 'censored' in Y_metadata.keys():
c = Y_metadata['censored']
uncensored = (1 - c) * (-1 / link_f + y ** self.r / link_f ** 2)
censored = c * y ** self.r / link_f ** 2
grad = uncensored + censored
return grad
| dlogpdf_dlink | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, link_f, y, Y_metadata=None):\n c = np.zeros_like(y)\n if Y_metadata is not None and 'censored' in Y_metadata.keys():\n c = Y_metadata['censo... |
def x(self, link_f, y, Y_metadata=None):
c = np.zeros_like(y)
if Y_metadata is not None and 'censored' in Y_metadata.keys():
c = Y_metadata['censored']
uncensored = (1 - c) * (1 / link_f ** 2 - 2 * y ** self.r / link_f ** 3)
censored = -c * 2 * y ** self.r / link_f ** 3
hess = uncensored + censored
return hess
| d2logpdf_dlink2 | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, link_f, y, Y_metadata=None):\n c = np.zeros_like(y)\n if Y_metadata is not None and 'censored' in Y_metadata.keys():\n c = Y_metadata['censo... |
def x(self, link_f, y, Y_metadata=None):
c = np.zeros_like(y)
if Y_metadata is not None and 'censored' in Y_metadata.keys():
c = Y_metadata['censored']
uncensored = (1 - c) * (-2 / link_f ** 3 + 6 * y ** self.r / link_f ** 4)
censored = c * 6 * y ** self.r / link_f ** 4
d3lik_dlink3 = uncensored + censored
return d3lik_dlink3
| d3logpdf_dlink3 | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, link_f, y, Y_metadata=None):\n c = np.zeros_like(y)\n if Y_metadata is not None and 'censored' in Y_metadata.keys():\n c = Y_metadata['censo... |
def x(self, inv_link_f, y, Y_metadata=None):
c = np.zeros_like(y)
if Y_metadata is not None and 'censored' in Y_metadata.keys():
c = Y_metadata['censored']
link_f = inv_link_f
uncensored = (1 - c) * (y ** self.r * np.log(y) / link_f ** 2)
censored = c * (y ** self.r * np.log(y) / link_f ** 2)
dlogpdf_dlink_dr = uncensored + censored
return dlogpdf_dlink_dr
| dlogpdf_dlink_dr | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, inv_link_f, y, Y_metadata=None):\n c = np.zeros_like(y)\n if Y_metadata is not None and 'censored' in Y_metadata.keys():\n c = Y_metadata['c... |
def x(self, gp, Y_metadata=None):
orig_shape = gp.shape
gp = gp.flatten()
weibull_samples = np.array([sp.stats.weibull_min.rvs(self.r, loc=0,
scale=self.gp_link.transf(f)) for f in gp])
return weibull_samples.reshape(orig_shape)
| samples | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, gp, Y_metadata=None):\n orig_shape = gp.shape\n gp = gp.flatten()\n weibull_samples = np.array([sp.stats.weibull_min.rvs(self.r, loc=0,\n ... |
def x(self, kern, X, Z, likelihood, Y):
num_data, output_dim = Y.shape
input_dim = Z.shape[0]
if self.mpi_comm != None:
from mpi4py import MPI
num_data_all = np.array(num_data, dtype=np.int32)
self.mpi_comm.Allreduce([np.int32(num_data), MPI.INT], [
num_data_all, MPI.INT])
num_data = num_data_all
if isinstance(X, VariationalPosterior):
uncertain_inputs = True
else:
uncertain_inputs = False
beta = 1.0 / np.fmax(likelihood.variance, 1e-06)
het_noise = beta.size > 1
if het_noise:
self.batchsize = 1
psi0_full, psi1Y_full, psi2_full, YRY_full = self.gatherPsiStat(kern, X,
Z, Y, beta, uncertain_inputs)
Kmm = kern.K(Z).copy()
diag.add(Kmm, self.const_jitter)
if not np.isfinite(Kmm).all():
print(Kmm)
Lm = jitchol(Kmm)
LmInv = dtrtri(Lm)
LmInvPsi2LmInvT = LmInv.dot(psi2_full.dot(LmInv.T))
Lambda = np.eye(Kmm.shape[0]) + LmInvPsi2LmInvT
LL = jitchol(Lambda)
LLInv = dtrtri(LL)
logdet_L = 2.0 * np.sum(np.log(np.diag(LL)))
LmLLInv = LLInv.dot(LmInv)
b = psi1Y_full.dot(LmLLInv.T)
bbt = np.square(b).sum()
v = b.dot(LmLLInv).T
LLinvPsi1TYYTPsi1LLinvT = tdot(b.T)
tmp = -LLInv.T.dot(LLinvPsi1TYYTPsi1LLinvT + output_dim * np.eye(input_dim)
).dot(LLInv)
dL_dpsi2R = LmInv.T.dot(tmp + output_dim * np.eye(input_dim)).dot(LmInv
) / 2.0
self.midRes['dL_dpsi2R'] = dL_dpsi2R
self.midRes['v'] = v
if het_noise:
logL_R = -np.log(beta).sum()
else:
logL_R = -num_data * np.log(beta)
logL = -(output_dim * (num_data * log_2_pi + logL_R + psi0_full - np.
trace(LmInvPsi2LmInvT)) + YRY_full - bbt
) / 2.0 - output_dim * logdet_L / 2.0
dL_dKmm = dL_dpsi2R - output_dim * LmInv.T.dot(LmInvPsi2LmInvT).dot(LmInv
) / 2.0
if not self.Y_speedup or het_noise:
wd_inv = backsub_both_sides(Lm, np.eye(input_dim) -
backsub_both_sides(LL, np.identity(input_dim), transpose='left'
), transpose='left')
post = Posterior(woodbury_inv=wd_inv, woodbury_vector=v, K=Kmm,
mean=None, cov=None, K_chol=Lm)
else:
post = None
if not het_noise:
dL_dthetaL = (YRY_full * beta + beta * output_dim * psi0_full -
num_data * output_dim * beta) / 2.0 - beta * (dL_dpsi2R * psi2_full
).sum() - beta * (v.T * psi1Y_full).sum()
self.midRes['dL_dthetaL'] = dL_dthetaL
return logL, dL_dKmm, post
| inference_likelihood | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, kern, X, Z, likelihood, Y):\n num_data, output_dim = Y.shape\n input_dim = Z.shape[0]\n if self.mpi_comm != None:\n from mpi4py import MP... |
def x(self, kern, X, Z, likelihood, Y):
num_data, output_dim = Y.shape
if isinstance(X, VariationalPosterior):
uncertain_inputs = True
else:
uncertain_inputs = False
beta = 1.0 / np.fmax(likelihood.variance, 1e-06)
het_noise = beta.size > 1
if self.Y_speedup and not het_noise:
YYT_factor = self.get_YYTfactor(Y)
else:
YYT_factor = Y
n_start = self.batch_pos
batchsize = num_data if self.batchsize is None else self.batchsize
n_end = min(batchsize + n_start, num_data)
if n_end == num_data:
isEnd = True
self.batch_pos = 0
else:
isEnd = False
self.batch_pos = n_end
if batchsize == num_data:
Y_slice = YYT_factor
X_slice = X
else:
Y_slice = YYT_factor[n_start:n_end]
X_slice = X[n_start:n_end]
if not uncertain_inputs:
psi0 = kern.Kdiag(X_slice)
psi1 = kern.K(X_slice, Z)
psi2 = None
betapsi1 = np.einsum('n,nm->nm', beta, psi1)
elif het_noise:
psi0 = kern.psi0(Z, X_slice)
psi1 = kern.psi1(Z, X_slice)
psi2 = kern.psi2(Z, X_slice)
betapsi1 = np.einsum('n,nm->nm', beta, psi1)
if het_noise:
beta = beta[n_start]
betaY = beta * Y_slice
dL_dpsi2R = self.midRes['dL_dpsi2R']
v = self.midRes['v']
dL_dpsi0 = -output_dim * (beta * np.ones((n_end - n_start,))) / 2.0
dL_dpsi1 = np.dot(betaY, v.T)
if uncertain_inputs:
dL_dpsi2 = beta * dL_dpsi2R
else:
dL_dpsi1 += np.dot(betapsi1, dL_dpsi2R) * 2.0
dL_dpsi2 = None
if het_noise:
if uncertain_inputs:
psiR = np.einsum('mo,mo->', dL_dpsi2R, psi2)
else:
psiR = np.einsum('nm,no,mo->', psi1, psi1, dL_dpsi2R)
dL_dthetaL = (np.square(betaY).sum(axis=-1) + np.square(beta) * (
output_dim * psi0) - output_dim * beta) / 2.0 - np.square(beta
) * psiR - (betaY * np.dot(betapsi1, v)).sum(axis=-1)
elif isEnd:
dL_dthetaL = self.midRes['dL_dthetaL']
else:
dL_dthetaL = 0.0
if uncertain_inputs:
grad_dict = {'dL_dpsi0': dL_dpsi0, 'dL_dpsi1': dL_dpsi1, 'dL_dpsi2':
dL_dpsi2, 'dL_dthetaL': dL_dthetaL}
else:
grad_dict = {'dL_dKdiag': dL_dpsi0, 'dL_dKnm': dL_dpsi1,
'dL_dthetaL': dL_dthetaL}
return isEnd, (n_start, n_end), grad_dict
| inference_minibatch | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, kern, X, Z, likelihood, Y):\n num_data, output_dim = Y.shape\n if isinstance(X, VariationalPosterior):\n uncertain_inputs = True\n else:\... |
def x(self, kern, X, Z, likelihood, Y, Zp):
num_data, output_dim = Y.shape
beta = 1.0 / np.fmax(likelihood.variance, 1e-06)
het_noise = beta.size > 1
if self.Y_speedup and not het_noise:
YYT_factor = self.get_YYTfactor(Y)
else:
YYT_factor = Y
n_start = self.batch_pos
batchsize = num_data if self.batchsize is None else self.batchsize
n_end = min(batchsize + n_start, num_data)
if n_end == num_data:
isEnd = True
self.batch_pos = 0
else:
isEnd = False
self.batch_pos = n_end
if batchsize == num_data:
Y_slice = YYT_factor
X_slice = X
else:
Y_slice = YYT_factor[n_start:n_end]
X_slice = X[n_start:n_end]
psi0 = kern._Kdiag(X_slice)
psi1 = kern.K(X_slice, Z)
betapsi1 = np.einsum('n,nm->nm', beta, psi1)
X_slice = X_slice.values
Z = Z.values
Zp = Zp.gamma.values
indX = np.int_(X_slice[:, -1])
indZ = np.int_(Z[:, -1]) - Zp.shape[0]
betaY = beta * Y_slice
dL_dpsi2R = self.midRes['dL_dpsi2R']
v = self.midRes['v']
dL_dpsi0 = -0.5 * output_dim * (beta * Zp[indX, :])
dL_dpsi1 = np.dot(betaY, v.T)
dL_dEZp = psi1 * dL_dpsi1
dL_dpsi1 = Zp[np.ix_(indX, indZ)] * dL_dpsi1
dL_dgamma = np.zeros(Zp.shape)
for d in np.unique(indX):
indd = indX == d
betapsi1d = betapsi1[indd, :]
psi1d = psi1[indd, :]
Zpd = Zp[d, :]
Zp2 = Zpd[:, None] * Zpd[None, :] - np.diag(np.power(Zpd, 2)
) + np.diag(Zpd)
dL_dpsi1[indd, :] += np.dot(betapsi1d, Zp2[np.ix_(indZ, indZ)] *
dL_dpsi2R) * 2.0
dL_EZp2 = dL_dpsi2R * (np.dot(psi1d.T, psi1d) * beta) * 2.0
EZ = Zp[d, indZ]
for q in range(Zp.shape[1]):
EZt = EZ.copy()
indq = indZ == q
EZt[indq] = 0.5
dL_dgamma[d, q] = np.sum(dL_dEZp[np.ix_(indd, indq)]) + np.sum(
dL_EZp2[:, indq] * EZt[:, None]) - 0.5 * beta * np.sum(psi0
[indd, q])
if isEnd:
dL_dthetaL = self.midRes['dL_dthetaL']
else:
dL_dthetaL = 0.0
grad_dict = {'dL_dKdiag': dL_dpsi0, 'dL_dKnm': dL_dpsi1, 'dL_dthetaL':
dL_dthetaL, 'dL_dgamma': dL_dgamma}
return isEnd, (n_start, n_end), grad_dict
| inference_minibatch | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, kern, X, Z, likelihood, Y, Zp):\n num_data, output_dim = Y.shape\n beta = 1.0 / np.fmax(likelihood.variance, 1e-06)\n het_noise = beta.size > 1\... |
def x(self):
input_dict = super(RBF, self)._save_to_input_dict()
input_dict['class'] = 'GPy.kern.RBF'
input_dict['inv_l'] = self.use_invLengthscale
if input_dict['inv_l'] == True:
input_dict['lengthscale'] = np.sqrt(1 / float(self.inv_l))
return input_dict
| to_dict | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n input_dict = super(RBF, self)._save_to_input_dict()\n input_dict['class'] = 'GPy.kern.RBF'\n input_dict['inv_l'] = self.use_invLengthscale\n ... |
def x(self, dim):
oneDkernel = GridRBF(input_dim=1, variance=self.variance.copy(),
originalDimensions=dim)
return oneDkernel
| get_one_dimensional_kernel | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, dim):\n oneDkernel = GridRBF(input_dim=1, variance=self.variance.copy(),\n originalDimensions=dim)\n return oneDkernel\n",
"role": "user... |
def x(dataset, directory, centre=True):
points, point_names = parse_text(os.path.join(directory, dataset + '.txt')
)[0:2]
present_index = [i for i in range(points[0].shape[1]) if not (np.any(np
.isnan(points[0][:, i])) or np.any(np.isnan(points[0][:, i])) or np
.any(np.isnan(points[0][:, i])))]
point_names = point_names[present_index]
for i in range(3):
points[i] = points[i][:, present_index]
if centre:
points[i] = (points[i].T - points[i].mean(axis=1)).T
Y = np.concatenate((points[0], points[1], points[2]), axis=1)
Y = Y / 400.0
connect = read_connections(os.path.join(directory, 'connections.txt'),
point_names)
return Y, connect
| load_text_data | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(dataset, directory, centre=True):\n points, point_names = parse_text(os.path.join(directory, dataset + '.txt')\n )[0:2]\n present_index = [i for i... |
def x(file_name):
fid = open(file_name, 'r')
point_names = np.array(fid.readline().split())[2:-1:3]
fid.close()
for i in range(len(point_names)):
point_names[i] = point_names[i][0:-2]
S = np.loadtxt(file_name, skiprows=1)
field = np.uint(S[:, 0])
times = S[:, 1]
S = S[:, 2:]
S[S == -9999.99] = np.NaN
points = []
points.append(S[:, 0:-1:3])
points.append(S[:, 1:-1:3])
points.append(S[:, 2:-1:3])
return points, point_names, times
| parse_text | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(file_name):\n fid = open(file_name, 'r')\n point_names = np.array(fid.readline().split())[2:-1:3]\n fid.close()\n for i in range(len(point_names)):... |
def x(file_name, point_names):
connections = []
fid = open(file_name, 'r')
line = fid.readline()
while line:
connections.append(np.array(line.split(',')))
connections[-1][0] = connections[-1][0].strip()
connections[-1][1] = connections[-1][1].strip()
line = fid.readline()
connect = np.zeros((len(point_names), len(point_names)), dtype=bool)
for i in range(len(point_names)):
for j in range(len(point_names)):
for k in range(len(connections)):
if connections[k][0] == point_names[i] and connections[k][1
] == point_names[j]:
connect[i, j] = True
connect[j, i] = True
break
return connect
| read_connections | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(file_name, point_names):\n connections = []\n fid = open(file_name, 'r')\n line = fid.readline()\n while line:\n connections.append(np.array... |
def x(self):
for i in range(len(self.vertices)):
self.vertices[i].children = []
for i in range(len(self.vertices)):
for parent in self.vertices[i].parents:
if i not in self.vertices[parent].children:
self.vertices[parent].children.append(i)
| find_children | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n for i in range(len(self.vertices)):\n self.vertices[i].children = []\n for i in range(len(self.vertices)):\n for parent in self.ve... |
def x(self):
for i in range(len(self.vertices)):
self.vertices[i].parents = []
for i in range(len(self.vertices)):
for child in self.vertices[i].children:
if i not in self.vertices[child].parents:
self.vertices[child].parents.append(i)
| find_parents | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n for i in range(len(self.vertices)):\n self.vertices[i].parents = []\n for i in range(len(self.vertices)):\n for child in self.vert... |
def x(self):
self.find_parents()
index = 0
while len(self.vertices[index].parents) > 0:
index = self.vertices[index].parents[0]
return index
| find_root | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n self.find_parents()\n index = 0\n while len(self.vertices[index].parents) > 0:\n index = self.vertices[index].parents[0]\n return i... |
def x(self, id):
for i in range(len(self.vertices)):
if self.vertices[i].id == id:
return i
raise ValueError('Reverse look up of id failed.')
| get_index_by_id | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, id):\n for i in range(len(self.vertices)):\n if self.vertices[i].id == id:\n return i\n raise ValueError('Reverse look up of id f... |
def x(self, name):
for i in range(len(self.vertices)):
if self.vertices[i].name == name:
return i
raise ValueError('Reverse look up of name failed.')
| get_index_by_name | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, name):\n for i in range(len(self.vertices)):\n if self.vertices[i].name == name:\n return i\n raise ValueError('Reverse look up o... |
def x(self):
ordered = False
while ordered == False:
for i in range(len(self.vertices)):
ordered = True
for parent in self.vertices[i].parents:
if parent > i:
ordered = False
self.swap_vertices(i, parent)
| order_vertices | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n ordered = False\n while ordered == False:\n for i in range(len(self.vertices)):\n ordered = True\n for parent in se... |
def x(self, i, j):
store_vertex_i = self.vertices[i]
store_vertex_j = self.vertices[j]
self.vertices[j] = store_vertex_i
self.vertices[i] = store_vertex_j
for k in range(len(self.vertices)):
for swap_list in [self.vertices[k].children, self.vertices[k].parents]:
if i in swap_list:
swap_list[swap_list.index(i)] = -1
if j in swap_list:
swap_list[swap_list.index(j)] = i
if -1 in swap_list:
swap_list[swap_list.index(-1)] = j
| swap_vertices | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, i, j):\n store_vertex_i = self.vertices[i]\n store_vertex_j = self.vertices[j]\n self.vertices[j] = store_vertex_i\n self.vertices[i] = store... |
def x(self, channels):
for vertex in self.vertices:
for col in vertex.meta['rot_ind']:
if col:
for k in range(1, channels.shape[0]):
diff = channels[k, col] - channels[k - 1, col]
if abs(diff + 360.0) < abs(diff):
channels[k:, col] = channels[k:, col] + 360.0
elif abs(diff - 360.0) < abs(diff):
channels[k:, col] = channels[k:, col] - 360.0
| smooth_angle_channels | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, channels):\n for vertex in self.vertices:\n for col in vertex.meta['rot_ind']:\n if col:\n for k in range(1, channels... |
def x(self, file_name):
fid = open(file_name, 'r')
self.read_skel(fid)
fid.close()
self.name = file_name
| load_skel | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, file_name):\n fid = open(file_name, 'r')\n self.read_skel(fid)\n fid.close()\n self.name = file_name\n",
"role": "user"
},
{
"con... |
def x(self, fid):
bone_count = 0
lin = self.read_line(fid)
while lin[0] != ':':
parts = lin.split()
if parts[0] == 'begin':
bone_count += 1
self.vertices.append(vertex(name='', id=np.NaN, meta={'name': [
], 'id': [], 'offset': [], 'orientation': [], 'axis': [0.0,
0.0, 0.0], 'axis_order': [], 'C': np.eye(3), 'Cinv': np.eye
(3), 'channels': [], 'bodymass': [], 'confmass': [],
'order': [], 'rot_ind': [], 'pos_ind': [], 'limits': [],
'xyz': np.array([0.0, 0.0, 0.0]), 'rot': np.eye(3)}))
lin = self.read_line(fid)
elif parts[0] == 'id':
self.vertices[bone_count].id = int(parts[1])
lin = self.read_line(fid)
self.vertices[bone_count].children = []
elif parts[0] == 'name':
self.vertices[bone_count].name = parts[1]
lin = self.read_line(fid)
elif parts[0] == 'direction':
direction = np.array([float(parts[1]), float(parts[2]), float(
parts[3])])
lin = self.read_line(fid)
elif parts[0] == 'length':
lgth = float(parts[1])
lin = self.read_line(fid)
elif parts[0] == 'axis':
self.vertices[bone_count].meta['axis'] = np.array([float(parts[
1]), float(parts[2]), float(parts[3])])
self.vertices[bone_count].meta['axis_order'] = parts[-1][::-1
].lower()
lin = self.read_line(fid)
elif parts[0] == 'dof':
order = []
for i in range(1, len(parts)):
if parts[i] == 'rx':
chan = 'Xrotation'
order.append('x')
elif parts[i] == 'ry':
chan = 'Yrotation'
order.append('y')
elif parts[i] == 'rz':
chan = 'Zrotation'
order.append('z')
elif parts[i] == 'tx':
chan = 'Xposition'
elif parts[i] == 'ty':
chan = 'Yposition'
elif parts[i] == 'tz':
chan = 'Zposition'
elif parts[i] == 'l':
chan = 'length'
self.vertices[bone_count].meta['channels'].append(chan)
self.vertices[bone_count].meta['order'] = order[::-1]
lin = self.read_line(fid)
elif parts[0] == 'limits':
self.vertices[bone_count].meta['limits'] = [[float(parts[1][1:]
), float(parts[2][:-1])]]
lin = self.read_line(fid)
while lin != 'end':
parts = lin.split()
self.vertices[bone_count].meta['limits'].append([float(
parts[0][1:]), float(parts[1][:-1])])
lin = self.read_line(fid)
self.vertices[bone_count].meta['limits'] = np.array(self.
vertices[bone_count].meta['limits'])
elif parts[0] == 'end':
self.vertices[bone_count].meta['offset'] = direction * lgth
lin = self.read_line(fid)
return lin
| read_bonedata | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, fid):\n bone_count = 0\n lin = self.read_line(fid)\n while lin[0] != ':':\n parts = lin.split()\n if parts[0] == 'begin':\n ... |
def x(self, fid):
bones = [[] for i in self.vertices]
num_channels = 0
for vertex in self.vertices:
num_channels = num_channels + len(vertex.meta['channels'])
lin = self.read_line(fid)
while lin != ':DEGREES':
lin = self.read_line(fid)
if lin == '':
raise ValueError('Could not find :DEGREES in ' + fid.name)
counter = 0
lin = self.read_line(fid)
while lin:
parts = lin.split()
if len(parts) == 1:
frame_no = int(parts[0])
if frame_no:
counter += 1
if counter != frame_no:
raise ValueError('Unexpected frame number.')
else:
raise ValueError('Single bone name ...')
else:
ind = self.get_index_by_name(parts[0])
bones[ind].append(np.array([float(channel) for channel in parts
[1:]]))
lin = self.read_line(fid)
num_frames = counter
channels = np.zeros((num_frames, num_channels))
end_val = 0
for i in range(len(self.vertices)):
vertex = self.vertices[i]
if len(vertex.meta['channels']) > 0:
start_val = end_val
end_val = end_val + len(vertex.meta['channels'])
for j in range(num_frames):
channels[j, start_val:end_val] = bones[i][j]
self.resolve_indices(i, start_val)
self.smooth_angle_channels(channels)
return channels
| read_channels | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, fid):\n bones = [[] for i in self.vertices]\n num_channels = 0\n for vertex in self.vertices:\n num_channels = num_channels + len(vertex.... |
def x(self, fid):
lin = self.read_line(fid)
while lin[0] != ':':
self.documentation.append(lin)
lin = self.read_line(fid)
return lin
| read_documentation | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, fid):\n lin = self.read_line(fid)\n while lin[0] != ':':\n self.documentation.append(lin)\n lin = self.read_line(fid)\n return lin... |
def x(self, fid):
lin = self.read_line(fid)
while lin != 'end':
parts = lin.split()
if lin != 'begin':
ind = self.get_index_by_name(parts[0])
for i in range(1, len(parts)):
self.vertices[ind].children.append(self.get_index_by_name(
parts[i]))
lin = self.read_line(fid)
lin = self.read_line(fid)
return lin
| read_hierarchy | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, fid):\n lin = self.read_line(fid)\n while lin != 'end':\n parts = lin.split()\n if lin != 'begin':\n ind = self.get_index_... |
def x(self, fid):
lin = '#'
while lin[0] == '#':
lin = fid.readline().strip()
if lin == '':
return lin
return lin
| read_line | [
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, fid):\n lin = '#'\n while lin[0] == '#':\n lin = fid.readline().strip()\n if lin == '':\n return lin\n return lin\n",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.